repo_name
string
path
string
copies
string
size
string
content
string
license
string
drmarble/android_kernel_bn_encore
fs/btrfs/tree-log.c
1299
87662
/* * Copyright (C) 2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> #include <linux/slab.h> #include "ctree.h" #include "transaction.h" #include "disk-io.h" #include "locking.h" #include "print-tree.h" #include "compat.h" #include "tree-log.h" /* magic values for the inode_only field in btrfs_log_inode: * * LOG_INODE_ALL means to log everything * LOG_INODE_EXISTS means to log just enough to recreate the inode * during log replay */ #define LOG_INODE_ALL 0 #define LOG_INODE_EXISTS 1 /* * directory trouble cases * * 1) on rename or unlink, if the inode being unlinked isn't in the fsync * log, we must force a full commit before doing an fsync of the directory * where the unlink was done. * ---> record transid of last unlink/rename per directory * * mkdir foo/some_dir * normal commit * rename foo/some_dir foo2/some_dir * mkdir foo/some_dir * fsync foo/some_dir/some_file * * The fsync above will unlink the original some_dir without recording * it in its new location (foo2). After a crash, some_dir will be gone * unless the fsync of some_file forces a full commit * * 2) we must log any new names for any file or dir that is in the fsync * log. ---> check inode while renaming/linking. * * 2a) we must log any new names for any file or dir during rename * when the directory they are being removed from was logged. * ---> check inode and old parent dir during rename * * 2a is actually the more important variant. With the extra logging * a crash might unlink the old name without recreating the new one * * 3) after a crash, we must go through any directories with a link count * of zero and redo the rm -rf * * mkdir f1/foo * normal commit * rm -rf f1/foo * fsync(f1) * * The directory f1 was fully removed from the FS, but fsync was never * called on f1, only its parent dir. After a crash the rm -rf must * be replayed. This must be able to recurse down the entire * directory tree. The inode link count fixup code takes care of the * ugly details. */ /* * stages for the tree walking. The first * stage (0) is to only pin down the blocks we find * the second stage (1) is to make sure that all the inodes * we find in the log are created in the subvolume. * * The last stage is to deal with directories and links and extents * and all the other fun semantics */ #define LOG_WALK_PIN_ONLY 0 #define LOG_WALK_REPLAY_INODES 1 #define LOG_WALK_REPLAY_ALL 2 static int btrfs_log_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, int inode_only); static int link_to_fixup_dir(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid); static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, u64 dirid, int del_all); /* * tree logging is a special write ahead log used to make sure that * fsyncs and O_SYNCs can happen without doing full tree commits. * * Full tree commits are expensive because they require commonly * modified blocks to be recowed, creating many dirty pages in the * extent tree an 4x-6x higher write load than ext3. * * Instead of doing a tree commit on every fsync, we use the * key ranges and transaction ids to find items for a given file or directory * that have changed in this transaction. Those items are copied into * a special tree (one per subvolume root), that tree is written to disk * and then the fsync is considered complete. * * After a crash, items are copied out of the log-tree back into the * subvolume tree. Any file data extents found are recorded in the extent * allocation tree, and the log-tree freed. * * The log tree is read three times, once to pin down all the extents it is * using in ram and once, once to create all the inodes logged in the tree * and once to do all the other items. */ /* * start a sub transaction and setup the log tree * this increments the log tree writer count to make the people * syncing the tree wait for us to finish */ static int start_log_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root) { int ret; int err = 0; mutex_lock(&root->log_mutex); if (root->log_root) { if (!root->log_start_pid) { root->log_start_pid = current->pid; root->log_multiple_pids = false; } else if (root->log_start_pid != current->pid) { root->log_multiple_pids = true; } root->log_batch++; atomic_inc(&root->log_writers); mutex_unlock(&root->log_mutex); return 0; } root->log_multiple_pids = false; root->log_start_pid = current->pid; mutex_lock(&root->fs_info->tree_log_mutex); if (!root->fs_info->log_root_tree) { ret = btrfs_init_log_root_tree(trans, root->fs_info); if (ret) err = ret; } if (err == 0 && !root->log_root) { ret = btrfs_add_log_tree(trans, root); if (ret) err = ret; } mutex_unlock(&root->fs_info->tree_log_mutex); root->log_batch++; atomic_inc(&root->log_writers); mutex_unlock(&root->log_mutex); return err; } /* * returns 0 if there was a log transaction running and we were able * to join, or returns -ENOENT if there were not transactions * in progress */ static int join_running_log_trans(struct btrfs_root *root) { int ret = -ENOENT; smp_mb(); if (!root->log_root) return -ENOENT; mutex_lock(&root->log_mutex); if (root->log_root) { ret = 0; atomic_inc(&root->log_writers); } mutex_unlock(&root->log_mutex); return ret; } /* * This either makes the current running log transaction wait * until you call btrfs_end_log_trans() or it makes any future * log transactions wait until you call btrfs_end_log_trans() */ int btrfs_pin_log_trans(struct btrfs_root *root) { int ret = -ENOENT; mutex_lock(&root->log_mutex); atomic_inc(&root->log_writers); mutex_unlock(&root->log_mutex); return ret; } /* * indicate we're done making changes to the log tree * and wake up anyone waiting to do a sync */ int btrfs_end_log_trans(struct btrfs_root *root) { if (atomic_dec_and_test(&root->log_writers)) { smp_mb(); if (waitqueue_active(&root->log_writer_wait)) wake_up(&root->log_writer_wait); } return 0; } /* * the walk control struct is used to pass state down the chain when * processing the log tree. The stage field tells us which part * of the log tree processing we are currently doing. The others * are state fields used for that specific part */ struct walk_control { /* should we free the extent on disk when done? This is used * at transaction commit time while freeing a log tree */ int free; /* should we write out the extent buffer? This is used * while flushing the log tree to disk during a sync */ int write; /* should we wait for the extent buffer io to finish? Also used * while flushing the log tree to disk for a sync */ int wait; /* pin only walk, we record which extents on disk belong to the * log trees */ int pin; /* what stage of the replay code we're currently in */ int stage; /* the root we are currently replaying */ struct btrfs_root *replay_dest; /* the trans handle for the current replay */ struct btrfs_trans_handle *trans; /* the function that gets used to process blocks we find in the * tree. Note the extent_buffer might not be up to date when it is * passed in, and it must be checked or read if you need the data * inside it */ int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, struct walk_control *wc, u64 gen); }; /* * process_func used to pin down extents, write them or wait on them */ static int process_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, struct walk_control *wc, u64 gen) { if (wc->pin) btrfs_pin_extent(log->fs_info->extent_root, eb->start, eb->len, 0); if (btrfs_buffer_uptodate(eb, gen)) { if (wc->write) btrfs_write_tree_block(eb); if (wc->wait) btrfs_wait_tree_block_writeback(eb); } return 0; } /* * Item overwrite used by replay and tree logging. eb, slot and key all refer * to the src data we are copying out. * * root is the tree we are copying into, and path is a scratch * path for use in this function (it should be released on entry and * will be released on exit). * * If the key is already in the destination tree the existing item is * overwritten. If the existing item isn't big enough, it is extended. * If it is too large, it is truncated. * * If the key isn't in the destination yet, a new item is inserted. */ static noinline int overwrite_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) { int ret; u32 item_size; u64 saved_i_size = 0; int save_old_i_size = 0; unsigned long src_ptr; unsigned long dst_ptr; int overwrite_root = 0; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) overwrite_root = 1; item_size = btrfs_item_size_nr(eb, slot); src_ptr = btrfs_item_ptr_offset(eb, slot); /* look for the key in the destination tree */ ret = btrfs_search_slot(NULL, root, key, path, 0, 0); if (ret == 0) { char *src_copy; char *dst_copy; u32 dst_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); if (dst_size != item_size) goto insert; if (item_size == 0) { btrfs_release_path(path); return 0; } dst_copy = kmalloc(item_size, GFP_NOFS); src_copy = kmalloc(item_size, GFP_NOFS); if (!dst_copy || !src_copy) { btrfs_release_path(path); kfree(dst_copy); kfree(src_copy); return -ENOMEM; } read_extent_buffer(eb, src_copy, src_ptr, item_size); dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, item_size); ret = memcmp(dst_copy, src_copy, item_size); kfree(dst_copy); kfree(src_copy); /* * they have the same contents, just return, this saves * us from cowing blocks in the destination tree and doing * extra writes that may not have been done by a previous * sync */ if (ret == 0) { btrfs_release_path(path); return 0; } } insert: btrfs_release_path(path); /* try to insert the key into the destination tree */ ret = btrfs_insert_empty_item(trans, root, path, key, item_size); /* make sure any existing item is the correct size */ if (ret == -EEXIST) { u32 found_size; found_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); if (found_size > item_size) { btrfs_truncate_item(trans, root, path, item_size, 1); } else if (found_size < item_size) { ret = btrfs_extend_item(trans, root, path, item_size - found_size); } } else if (ret) { return ret; } dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); /* don't overwrite an existing inode if the generation number * was logged as zero. This is done when the tree logging code * is just logging an inode to make sure it exists after recovery. * * Also, don't overwrite i_size on directories during replay. * log replay inserts and removes directory items based on the * state of the tree found in the subvolume, and i_size is modified * as it goes */ if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { struct btrfs_inode_item *src_item; struct btrfs_inode_item *dst_item; src_item = (struct btrfs_inode_item *)src_ptr; dst_item = (struct btrfs_inode_item *)dst_ptr; if (btrfs_inode_generation(eb, src_item) == 0) goto no_copy; if (overwrite_root && S_ISDIR(btrfs_inode_mode(eb, src_item)) && S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { save_old_i_size = 1; saved_i_size = btrfs_inode_size(path->nodes[0], dst_item); } } copy_extent_buffer(path->nodes[0], eb, dst_ptr, src_ptr, item_size); if (save_old_i_size) { struct btrfs_inode_item *dst_item; dst_item = (struct btrfs_inode_item *)dst_ptr; btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); } /* make sure the generation is filled in */ if (key->type == BTRFS_INODE_ITEM_KEY) { struct btrfs_inode_item *dst_item; dst_item = (struct btrfs_inode_item *)dst_ptr; if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { btrfs_set_inode_generation(path->nodes[0], dst_item, trans->transid); } } no_copy: btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_release_path(path); return 0; } /* * simple helper to read an inode off the disk from a given root * This can only be called for subvolume roots and not for the log */ static noinline struct inode *read_one_inode(struct btrfs_root *root, u64 objectid) { struct btrfs_key key; struct inode *inode; key.objectid = objectid; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); if (IS_ERR(inode)) { inode = NULL; } else if (is_bad_inode(inode)) { iput(inode); inode = NULL; } return inode; } /* replays a single extent in 'eb' at 'slot' with 'key' into the * subvolume 'root'. path is released on entry and should be released * on exit. * * extents in the log tree have not been allocated out of the extent * tree yet. So, this completes the allocation, taking a reference * as required if the extent already exists or creating a new extent * if it isn't in the extent allocation tree yet. * * The extent is inserted into the file, dropping any existing extents * from the file that overlap the new one. */ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) { int found_type; u64 mask = root->sectorsize - 1; u64 extent_end; u64 alloc_hint; u64 start = key->offset; u64 saved_nbytes; struct btrfs_file_extent_item *item; struct inode *inode = NULL; unsigned long size; int ret = 0; item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(eb, item); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) extent_end = start + btrfs_file_extent_num_bytes(eb, item); else if (found_type == BTRFS_FILE_EXTENT_INLINE) { size = btrfs_file_extent_inline_len(eb, item); extent_end = (start + size + mask) & ~mask; } else { ret = 0; goto out; } inode = read_one_inode(root, key->objectid); if (!inode) { ret = -EIO; goto out; } /* * first check to see if we already have this extent in the * file. This must be done before the btrfs_drop_extents run * so we don't try to drop this extent. */ ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), start, 0); if (ret == 0 && (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC)) { struct btrfs_file_extent_item cmp1; struct btrfs_file_extent_item cmp2; struct btrfs_file_extent_item *existing; struct extent_buffer *leaf; leaf = path->nodes[0]; existing = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); read_extent_buffer(eb, &cmp1, (unsigned long)item, sizeof(cmp1)); read_extent_buffer(leaf, &cmp2, (unsigned long)existing, sizeof(cmp2)); /* * we already have a pointer to this exact extent, * we don't have to do anything */ if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { btrfs_release_path(path); goto out; } } btrfs_release_path(path); saved_nbytes = inode_get_bytes(inode); /* drop any overlapping extents */ ret = btrfs_drop_extents(trans, inode, start, extent_end, &alloc_hint, 1); BUG_ON(ret); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { u64 offset; unsigned long dest_offset; struct btrfs_key ins; ret = btrfs_insert_empty_item(trans, root, path, key, sizeof(*item)); BUG_ON(ret); dest_offset = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); copy_extent_buffer(path->nodes[0], eb, dest_offset, (unsigned long)item, sizeof(*item)); ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); ins.type = BTRFS_EXTENT_ITEM_KEY; offset = key->offset - btrfs_file_extent_offset(eb, item); if (ins.objectid > 0) { u64 csum_start; u64 csum_end; LIST_HEAD(ordered_sums); /* * is this extent already allocated in the extent * allocation tree? If so, just add a reference */ ret = btrfs_lookup_extent(root, ins.objectid, ins.offset); if (ret == 0) { ret = btrfs_inc_extent_ref(trans, root, ins.objectid, ins.offset, 0, root->root_key.objectid, key->objectid, offset); BUG_ON(ret); } else { /* * insert the extent pointer in the extent * allocation tree */ ret = btrfs_alloc_logged_file_extent(trans, root, root->root_key.objectid, key->objectid, offset, &ins); BUG_ON(ret); } btrfs_release_path(path); if (btrfs_file_extent_compression(eb, item)) { csum_start = ins.objectid; csum_end = csum_start + ins.offset; } else { csum_start = ins.objectid + btrfs_file_extent_offset(eb, item); csum_end = csum_start + btrfs_file_extent_num_bytes(eb, item); } ret = btrfs_lookup_csums_range(root->log_root, csum_start, csum_end - 1, &ordered_sums, 0); BUG_ON(ret); while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums; sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, list); ret = btrfs_csum_file_blocks(trans, root->fs_info->csum_root, sums); BUG_ON(ret); list_del(&sums->list); kfree(sums); } } else { btrfs_release_path(path); } } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { /* inline extents are easy, we just overwrite them */ ret = overwrite_item(trans, root, path, eb, slot, key); BUG_ON(ret); } inode_set_bytes(inode, saved_nbytes); btrfs_update_inode(trans, root, inode); out: if (inode) iput(inode); return ret; } /* * when cleaning up conflicts between the directory names in the * subvolume, directory names in the log and directory names in the * inode back references, we may have to unlink inodes from directories. * * This is a helper function to do the unlink of a specific directory * item */ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct inode *dir, struct btrfs_dir_item *di) { struct inode *inode; char *name; int name_len; struct extent_buffer *leaf; struct btrfs_key location; int ret; leaf = path->nodes[0]; btrfs_dir_item_key_to_cpu(leaf, di, &location); name_len = btrfs_dir_name_len(leaf, di); name = kmalloc(name_len, GFP_NOFS); if (!name) return -ENOMEM; read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); btrfs_release_path(path); inode = read_one_inode(root, location.objectid); if (!inode) { kfree(name); return -EIO; } ret = link_to_fixup_dir(trans, root, path, location.objectid); BUG_ON(ret); ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len); BUG_ON(ret); kfree(name); iput(inode); return ret; } /* * helper function to see if a given name and sequence number found * in an inode back reference are already in a directory and correctly * point to this inode */ static noinline int inode_in_dir(struct btrfs_root *root, struct btrfs_path *path, u64 dirid, u64 objectid, u64 index, const char *name, int name_len) { struct btrfs_dir_item *di; struct btrfs_key location; int match = 0; di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, index, name, name_len, 0); if (di && !IS_ERR(di)) { btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); if (location.objectid != objectid) goto out; } else goto out; btrfs_release_path(path); di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); if (di && !IS_ERR(di)) { btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); if (location.objectid != objectid) goto out; } else goto out; match = 1; out: btrfs_release_path(path); return match; } /* * helper function to check a log tree for a named back reference in * an inode. This is used to decide if a back reference that is * found in the subvolume conflicts with what we find in the log. * * inode backreferences may have multiple refs in a single item, * during replay we process one reference at a time, and we don't * want to delete valid links to a file from the subvolume if that * link is also in the log. */ static noinline int backref_in_log(struct btrfs_root *log, struct btrfs_key *key, char *name, int namelen) { struct btrfs_path *path; struct btrfs_inode_ref *ref; unsigned long ptr; unsigned long ptr_end; unsigned long name_ptr; int found_name_len; int item_size; int ret; int match = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(NULL, log, key, path, 0, 0); if (ret != 0) goto out; item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); ptr_end = ptr + item_size; while (ptr < ptr_end) { ref = (struct btrfs_inode_ref *)ptr; found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); if (found_name_len == namelen) { name_ptr = (unsigned long)(ref + 1); ret = memcmp_extent_buffer(path->nodes[0], name, name_ptr, namelen); if (ret == 0) { match = 1; goto out; } } ptr = (unsigned long)(ref + 1) + found_name_len; } out: btrfs_free_path(path); return match; } /* * replay one inode back reference item found in the log tree. * eb, slot and key refer to the buffer and key found in the log tree. * root is the destination we are replaying into, and path is for temp * use by this function. (it should be released on return). */ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) { struct btrfs_inode_ref *ref; struct btrfs_dir_item *di; struct inode *dir; struct inode *inode; unsigned long ref_ptr; unsigned long ref_end; char *name; int namelen; int ret; int search_done = 0; /* * it is possible that we didn't log all the parent directories * for a given inode. If we don't find the dir, just don't * copy the back ref in. The link count fixup code will take * care of the rest */ dir = read_one_inode(root, key->offset); if (!dir) return -ENOENT; inode = read_one_inode(root, key->objectid); if (!inode) { iput(dir); return -EIO; } ref_ptr = btrfs_item_ptr_offset(eb, slot); ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); again: ref = (struct btrfs_inode_ref *)ref_ptr; namelen = btrfs_inode_ref_name_len(eb, ref); name = kmalloc(namelen, GFP_NOFS); BUG_ON(!name); read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); /* if we already have a perfect match, we're done */ if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), btrfs_inode_ref_index(eb, ref), name, namelen)) { goto out; } /* * look for a conflicting back reference in the metadata. * if we find one we have to unlink that name of the file * before we add our new link. Later on, we overwrite any * existing back reference, and we don't want to create * dangling pointers in the directory. */ if (search_done) goto insert; ret = btrfs_search_slot(NULL, root, key, path, 0, 0); if (ret == 0) { char *victim_name; int victim_name_len; struct btrfs_inode_ref *victim_ref; unsigned long ptr; unsigned long ptr_end; struct extent_buffer *leaf = path->nodes[0]; /* are we trying to overwrite a back ref for the root directory * if so, just jump out, we're done */ if (key->objectid == key->offset) goto out_nowrite; /* check all the names in this back reference to see * if they are in the log. if so, we allow them to stay * otherwise they must be unlinked as a conflict */ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); while (ptr < ptr_end) { victim_ref = (struct btrfs_inode_ref *)ptr; victim_name_len = btrfs_inode_ref_name_len(leaf, victim_ref); victim_name = kmalloc(victim_name_len, GFP_NOFS); BUG_ON(!victim_name); read_extent_buffer(leaf, victim_name, (unsigned long)(victim_ref + 1), victim_name_len); if (!backref_in_log(log, key, victim_name, victim_name_len)) { btrfs_inc_nlink(inode); btrfs_release_path(path); ret = btrfs_unlink_inode(trans, root, dir, inode, victim_name, victim_name_len); } kfree(victim_name); ptr = (unsigned long)(victim_ref + 1) + victim_name_len; } BUG_ON(ret); /* * NOTE: we have searched root tree and checked the * coresponding ref, it does not need to check again. */ search_done = 1; } btrfs_release_path(path); /* look for a conflicting sequence number */ di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), btrfs_inode_ref_index(eb, ref), name, namelen, 0); if (di && !IS_ERR(di)) { ret = drop_one_dir_item(trans, root, path, dir, di); BUG_ON(ret); } btrfs_release_path(path); /* look for a conflicing name */ di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), name, namelen, 0); if (di && !IS_ERR(di)) { ret = drop_one_dir_item(trans, root, path, dir, di); BUG_ON(ret); } btrfs_release_path(path); insert: /* insert our name */ ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, btrfs_inode_ref_index(eb, ref)); BUG_ON(ret); btrfs_update_inode(trans, root, inode); out: ref_ptr = (unsigned long)(ref + 1) + namelen; kfree(name); if (ref_ptr < ref_end) goto again; /* finally write the back reference in the inode */ ret = overwrite_item(trans, root, path, eb, slot, key); BUG_ON(ret); out_nowrite: btrfs_release_path(path); iput(dir); iput(inode); return 0; } static int insert_orphan_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 offset) { int ret; ret = btrfs_find_orphan_item(root, offset); if (ret > 0) ret = btrfs_insert_orphan_item(trans, root, offset); return ret; } /* * There are a few corners where the link count of the file can't * be properly maintained during replay. So, instead of adding * lots of complexity to the log code, we just scan the backrefs * for any file that has been through replay. * * The scan will update the link count on the inode to reflect the * number of back refs found. If it goes down to zero, the iput * will free the inode. */ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { struct btrfs_path *path; int ret; struct btrfs_key key; u64 nlink = 0; unsigned long ptr; unsigned long ptr_end; int name_len; u64 ino = btrfs_ino(inode); key.objectid = ino; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; path = btrfs_alloc_path(); if (!path) return -ENOMEM; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) break; if (ret > 0) { if (path->slots[0] == 0) break; path->slots[0]--; } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid != ino || key.type != BTRFS_INODE_REF_KEY) break; ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], path->slots[0]); while (ptr < ptr_end) { struct btrfs_inode_ref *ref; ref = (struct btrfs_inode_ref *)ptr; name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); ptr = (unsigned long)(ref + 1) + name_len; nlink++; } if (key.offset == 0) break; key.offset--; btrfs_release_path(path); } btrfs_release_path(path); if (nlink != inode->i_nlink) { inode->i_nlink = nlink; btrfs_update_inode(trans, root, inode); } BTRFS_I(inode)->index_cnt = (u64)-1; if (inode->i_nlink == 0) { if (S_ISDIR(inode->i_mode)) { ret = replay_dir_deletes(trans, root, NULL, path, ino, 1); BUG_ON(ret); } ret = insert_orphan_item(trans, root, ino); BUG_ON(ret); } btrfs_free_path(path); return 0; } static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path) { int ret; struct btrfs_key key; struct inode *inode; key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = (u64)-1; while (1) { ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) break; if (ret == 1) { if (path->slots[0] == 0) break; path->slots[0]--; } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || key.type != BTRFS_ORPHAN_ITEM_KEY) break; ret = btrfs_del_item(trans, root, path); if (ret) goto out; btrfs_release_path(path); inode = read_one_inode(root, key.offset); if (!inode) return -EIO; ret = fixup_inode_link_count(trans, root, inode); BUG_ON(ret); iput(inode); /* * fixup on a directory may create new entries, * make sure we always look for the highset possible * offset */ key.offset = (u64)-1; } ret = 0; out: btrfs_release_path(path); return ret; } /* * record a given inode in the fixup dir so we can check its link * count when replay is done. The link count is incremented here * so the inode won't go away until we check it */ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid) { struct btrfs_key key; int ret = 0; struct inode *inode; inode = read_one_inode(root, objectid); if (!inode) return -EIO; key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); key.offset = objectid; ret = btrfs_insert_empty_item(trans, root, path, &key, 0); btrfs_release_path(path); if (ret == 0) { btrfs_inc_nlink(inode); btrfs_update_inode(trans, root, inode); } else if (ret == -EEXIST) { ret = 0; } else { BUG(); } iput(inode); return ret; } /* * when replaying the log for a directory, we only insert names * for inodes that actually exist. This means an fsync on a directory * does not implicitly fsync all the new files in it */ static noinline int insert_one_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dirid, u64 index, char *name, int name_len, u8 type, struct btrfs_key *location) { struct inode *inode; struct inode *dir; int ret; inode = read_one_inode(root, location->objectid); if (!inode) return -ENOENT; dir = read_one_inode(root, dirid); if (!dir) { iput(inode); return -EIO; } ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); /* FIXME, put inode into FIXUP list */ iput(inode); iput(dir); return ret; } /* * take a single entry in a log directory item and replay it into * the subvolume. * * if a conflicting item exists in the subdirectory already, * the inode it points to is unlinked and put into the link count * fix up tree. * * If a name from the log points to a file or directory that does * not exist in the FS, it is skipped. fsyncs on directories * do not force down inodes inside that directory, just changes to the * names or unlinks in a directory. */ static noinline int replay_one_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, struct btrfs_dir_item *di, struct btrfs_key *key) { char *name; int name_len; struct btrfs_dir_item *dst_di; struct btrfs_key found_key; struct btrfs_key log_key; struct inode *dir; u8 log_type; int exists; int ret; dir = read_one_inode(root, key->objectid); if (!dir) return -EIO; name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); if (!name) return -ENOMEM; log_type = btrfs_dir_type(eb, di); read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len); btrfs_dir_item_key_to_cpu(eb, di, &log_key); exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); if (exists == 0) exists = 1; else exists = 0; btrfs_release_path(path); if (key->type == BTRFS_DIR_ITEM_KEY) { dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, name, name_len, 1); } else if (key->type == BTRFS_DIR_INDEX_KEY) { dst_di = btrfs_lookup_dir_index_item(trans, root, path, key->objectid, key->offset, name, name_len, 1); } else { BUG(); } if (IS_ERR_OR_NULL(dst_di)) { /* we need a sequence number to insert, so we only * do inserts for the BTRFS_DIR_INDEX_KEY types */ if (key->type != BTRFS_DIR_INDEX_KEY) goto out; goto insert; } btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); /* the existing item matches the logged item */ if (found_key.objectid == log_key.objectid && found_key.type == log_key.type && found_key.offset == log_key.offset && btrfs_dir_type(path->nodes[0], dst_di) == log_type) { goto out; } /* * don't drop the conflicting directory entry if the inode * for the new entry doesn't exist */ if (!exists) goto out; ret = drop_one_dir_item(trans, root, path, dir, dst_di); BUG_ON(ret); if (key->type == BTRFS_DIR_INDEX_KEY) goto insert; out: btrfs_release_path(path); kfree(name); iput(dir); return 0; insert: btrfs_release_path(path); ret = insert_one_name(trans, root, path, key->objectid, key->offset, name, name_len, log_type, &log_key); BUG_ON(ret && ret != -ENOENT); goto out; } /* * find all the names in a directory item and reconcile them into * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than * one name in a directory item, but the same code gets used for * both directory index types */ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) { int ret; u32 item_size = btrfs_item_size_nr(eb, slot); struct btrfs_dir_item *di; int name_len; unsigned long ptr; unsigned long ptr_end; ptr = btrfs_item_ptr_offset(eb, slot); ptr_end = ptr + item_size; while (ptr < ptr_end) { di = (struct btrfs_dir_item *)ptr; if (verify_dir_item(root, eb, di)) return -EIO; name_len = btrfs_dir_name_len(eb, di); ret = replay_one_name(trans, root, path, eb, di, key); BUG_ON(ret); ptr = (unsigned long)(di + 1); ptr += name_len; } return 0; } /* * directory replay has two parts. There are the standard directory * items in the log copied from the subvolume, and range items * created in the log while the subvolume was logged. * * The range items tell us which parts of the key space the log * is authoritative for. During replay, if a key in the subvolume * directory is in a logged range item, but not actually in the log * that means it was deleted from the directory before the fsync * and should be removed. */ static noinline int find_dir_range(struct btrfs_root *root, struct btrfs_path *path, u64 dirid, int key_type, u64 *start_ret, u64 *end_ret) { struct btrfs_key key; u64 found_end; struct btrfs_dir_log_item *item; int ret; int nritems; if (*start_ret == (u64)-1) return 1; key.objectid = dirid; key.type = key_type; key.offset = *start_ret; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { if (path->slots[0] == 0) goto out; path->slots[0]--; } if (ret != 0) btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type != key_type || key.objectid != dirid) { ret = 1; goto next; } item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dir_log_item); found_end = btrfs_dir_log_end(path->nodes[0], item); if (*start_ret >= key.offset && *start_ret <= found_end) { ret = 0; *start_ret = key.offset; *end_ret = found_end; goto out; } ret = 1; next: /* check the next slot in the tree to see if it is a valid item */ nritems = btrfs_header_nritems(path->nodes[0]); if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret) goto out; } else { path->slots[0]++; } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type != key_type || key.objectid != dirid) { ret = 1; goto out; } item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dir_log_item); found_end = btrfs_dir_log_end(path->nodes[0], item); *start_ret = key.offset; *end_ret = found_end; ret = 0; out: btrfs_release_path(path); return ret; } /* * this looks for a given directory item in the log. If the directory * item is not in the log, the item is removed and the inode it points * to is unlinked */ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, struct btrfs_path *log_path, struct inode *dir, struct btrfs_key *dir_key) { int ret; struct extent_buffer *eb; int slot; u32 item_size; struct btrfs_dir_item *di; struct btrfs_dir_item *log_di; int name_len; unsigned long ptr; unsigned long ptr_end; char *name; struct inode *inode; struct btrfs_key location; again: eb = path->nodes[0]; slot = path->slots[0]; item_size = btrfs_item_size_nr(eb, slot); ptr = btrfs_item_ptr_offset(eb, slot); ptr_end = ptr + item_size; while (ptr < ptr_end) { di = (struct btrfs_dir_item *)ptr; if (verify_dir_item(root, eb, di)) { ret = -EIO; goto out; } name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); if (!name) { ret = -ENOMEM; goto out; } read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len); log_di = NULL; if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { log_di = btrfs_lookup_dir_item(trans, log, log_path, dir_key->objectid, name, name_len, 0); } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { log_di = btrfs_lookup_dir_index_item(trans, log, log_path, dir_key->objectid, dir_key->offset, name, name_len, 0); } if (IS_ERR_OR_NULL(log_di)) { btrfs_dir_item_key_to_cpu(eb, di, &location); btrfs_release_path(path); btrfs_release_path(log_path); inode = read_one_inode(root, location.objectid); if (!inode) { kfree(name); return -EIO; } ret = link_to_fixup_dir(trans, root, path, location.objectid); BUG_ON(ret); btrfs_inc_nlink(inode); ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len); BUG_ON(ret); kfree(name); iput(inode); /* there might still be more names under this key * check and repeat if required */ ret = btrfs_search_slot(NULL, root, dir_key, path, 0, 0); if (ret == 0) goto again; ret = 0; goto out; } btrfs_release_path(log_path); kfree(name); ptr = (unsigned long)(di + 1); ptr += name_len; } ret = 0; out: btrfs_release_path(path); btrfs_release_path(log_path); return ret; } /* * deletion replay happens before we copy any new directory items * out of the log or out of backreferences from inodes. It * scans the log to find ranges of keys that log is authoritative for, * and then scans the directory to find items in those ranges that are * not present in the log. * * Anything we don't find in the log is unlinked and removed from the * directory. */ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, u64 dirid, int del_all) { u64 range_start; u64 range_end; int key_type = BTRFS_DIR_LOG_ITEM_KEY; int ret = 0; struct btrfs_key dir_key; struct btrfs_key found_key; struct btrfs_path *log_path; struct inode *dir; dir_key.objectid = dirid; dir_key.type = BTRFS_DIR_ITEM_KEY; log_path = btrfs_alloc_path(); if (!log_path) return -ENOMEM; dir = read_one_inode(root, dirid); /* it isn't an error if the inode isn't there, that can happen * because we replay the deletes before we copy in the inode item * from the log */ if (!dir) { btrfs_free_path(log_path); return 0; } again: range_start = 0; range_end = 0; while (1) { if (del_all) range_end = (u64)-1; else { ret = find_dir_range(log, path, dirid, key_type, &range_start, &range_end); if (ret != 0) break; } dir_key.offset = range_start; while (1) { int nritems; ret = btrfs_search_slot(NULL, root, &dir_key, path, 0, 0); if (ret < 0) goto out; nritems = btrfs_header_nritems(path->nodes[0]); if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret) break; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); if (found_key.objectid != dirid || found_key.type != dir_key.type) goto next_type; if (found_key.offset > range_end) break; ret = check_item_in_log(trans, root, log, path, log_path, dir, &found_key); BUG_ON(ret); if (found_key.offset == (u64)-1) break; dir_key.offset = found_key.offset + 1; } btrfs_release_path(path); if (range_end == (u64)-1) break; range_start = range_end + 1; } next_type: ret = 0; if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { key_type = BTRFS_DIR_LOG_INDEX_KEY; dir_key.type = BTRFS_DIR_INDEX_KEY; btrfs_release_path(path); goto again; } out: btrfs_release_path(path); btrfs_free_path(log_path); iput(dir); return ret; } /* * the process_func used to replay items from the log tree. This * gets called in two different stages. The first stage just looks * for inodes and makes sure they are all copied into the subvolume. * * The second stage copies all the other item types from the log into * the subvolume. The two stage approach is slower, but gets rid of * lots of complexity around inodes referencing other inodes that exist * only in the log (references come from either directory items or inode * back refs). */ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, struct walk_control *wc, u64 gen) { int nritems; struct btrfs_path *path; struct btrfs_root *root = wc->replay_dest; struct btrfs_key key; int level; int i; int ret; btrfs_read_buffer(eb, gen); level = btrfs_header_level(eb); if (level != 0) return 0; path = btrfs_alloc_path(); BUG_ON(!path); nritems = btrfs_header_nritems(eb); for (i = 0; i < nritems; i++) { btrfs_item_key_to_cpu(eb, &key, i); /* inode keys are done during the first stage */ if (key.type == BTRFS_INODE_ITEM_KEY && wc->stage == LOG_WALK_REPLAY_INODES) { struct btrfs_inode_item *inode_item; u32 mode; inode_item = btrfs_item_ptr(eb, i, struct btrfs_inode_item); mode = btrfs_inode_mode(eb, inode_item); if (S_ISDIR(mode)) { ret = replay_dir_deletes(wc->trans, root, log, path, key.objectid, 0); BUG_ON(ret); } ret = overwrite_item(wc->trans, root, path, eb, i, &key); BUG_ON(ret); /* for regular files, make sure corresponding * orhpan item exist. extents past the new EOF * will be truncated later by orphan cleanup. */ if (S_ISREG(mode)) { ret = insert_orphan_item(wc->trans, root, key.objectid); BUG_ON(ret); } ret = link_to_fixup_dir(wc->trans, root, path, key.objectid); BUG_ON(ret); } if (wc->stage < LOG_WALK_REPLAY_ALL) continue; /* these keys are simply copied */ if (key.type == BTRFS_XATTR_ITEM_KEY) { ret = overwrite_item(wc->trans, root, path, eb, i, &key); BUG_ON(ret); } else if (key.type == BTRFS_INODE_REF_KEY) { ret = add_inode_ref(wc->trans, root, log, path, eb, i, &key); BUG_ON(ret && ret != -ENOENT); } else if (key.type == BTRFS_EXTENT_DATA_KEY) { ret = replay_one_extent(wc->trans, root, path, eb, i, &key); BUG_ON(ret); } else if (key.type == BTRFS_DIR_ITEM_KEY || key.type == BTRFS_DIR_INDEX_KEY) { ret = replay_one_dir_item(wc->trans, root, path, eb, i, &key); BUG_ON(ret); } } btrfs_free_path(path); return 0; } static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, struct walk_control *wc) { u64 root_owner; u64 bytenr; u64 ptr_gen; struct extent_buffer *next; struct extent_buffer *cur; struct extent_buffer *parent; u32 blocksize; int ret = 0; WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); while (*level > 0) { WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); cur = path->nodes[*level]; if (btrfs_header_level(cur) != *level) WARN_ON(1); if (path->slots[*level] >= btrfs_header_nritems(cur)) break; bytenr = btrfs_node_blockptr(cur, path->slots[*level]); ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); blocksize = btrfs_level_size(root, *level - 1); parent = path->nodes[*level]; root_owner = btrfs_header_owner(parent); next = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!next) return -ENOMEM; if (*level == 1) { wc->process_func(root, next, wc, ptr_gen); path->slots[*level]++; if (wc->free) { btrfs_read_buffer(next, ptr_gen); btrfs_tree_lock(next); clean_tree_block(trans, root, next); btrfs_set_lock_blocking(next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); ret = btrfs_free_reserved_extent(root, bytenr, blocksize); BUG_ON(ret); } free_extent_buffer(next); continue; } btrfs_read_buffer(next, ptr_gen); WARN_ON(*level <= 0); if (path->nodes[*level-1]) free_extent_buffer(path->nodes[*level-1]); path->nodes[*level-1] = next; *level = btrfs_header_level(next); path->slots[*level] = 0; cond_resched(); } WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); cond_resched(); return 0; } static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, struct walk_control *wc) { u64 root_owner; int i; int slot; int ret; for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { slot = path->slots[i]; if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { path->slots[i]++; *level = i; WARN_ON(*level == 0); return 0; } else { struct extent_buffer *parent; if (path->nodes[*level] == root->node) parent = path->nodes[*level]; else parent = path->nodes[*level + 1]; root_owner = btrfs_header_owner(parent); wc->process_func(root, path->nodes[*level], wc, btrfs_header_generation(path->nodes[*level])); if (wc->free) { struct extent_buffer *next; next = path->nodes[*level]; btrfs_tree_lock(next); clean_tree_block(trans, root, next); btrfs_set_lock_blocking(next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); ret = btrfs_free_reserved_extent(root, path->nodes[*level]->start, path->nodes[*level]->len); BUG_ON(ret); } free_extent_buffer(path->nodes[*level]); path->nodes[*level] = NULL; *level = i + 1; } } return 1; } /* * drop the reference count on the tree rooted at 'snap'. This traverses * the tree freeing any blocks that have a ref count of zero after being * decremented. */ static int walk_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct walk_control *wc) { int ret = 0; int wret; int level; struct btrfs_path *path; int i; int orig_level; path = btrfs_alloc_path(); if (!path) return -ENOMEM; level = btrfs_header_level(log->node); orig_level = level; path->nodes[level] = log->node; extent_buffer_get(log->node); path->slots[level] = 0; while (1) { wret = walk_down_log_tree(trans, log, path, &level, wc); if (wret > 0) break; if (wret < 0) ret = wret; wret = walk_up_log_tree(trans, log, path, &level, wc); if (wret > 0) break; if (wret < 0) ret = wret; } /* was the root node processed? if not, catch it here */ if (path->nodes[orig_level]) { wc->process_func(log, path->nodes[orig_level], wc, btrfs_header_generation(path->nodes[orig_level])); if (wc->free) { struct extent_buffer *next; next = path->nodes[orig_level]; btrfs_tree_lock(next); clean_tree_block(trans, log, next); btrfs_set_lock_blocking(next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); WARN_ON(log->root_key.objectid != BTRFS_TREE_LOG_OBJECTID); ret = btrfs_free_reserved_extent(log, next->start, next->len); BUG_ON(ret); } } for (i = 0; i <= orig_level; i++) { if (path->nodes[i]) { free_extent_buffer(path->nodes[i]); path->nodes[i] = NULL; } } btrfs_free_path(path); return ret; } /* * helper function to update the item for a given subvolumes log root * in the tree of log roots */ static int update_log_root(struct btrfs_trans_handle *trans, struct btrfs_root *log) { int ret; if (log->log_transid == 1) { /* insert root item on the first sync */ ret = btrfs_insert_root(trans, log->fs_info->log_root_tree, &log->root_key, &log->root_item); } else { ret = btrfs_update_root(trans, log->fs_info->log_root_tree, &log->root_key, &log->root_item); } return ret; } static int wait_log_commit(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long transid) { DEFINE_WAIT(wait); int index = transid % 2; /* * we only allow two pending log transactions at a time, * so we know that if ours is more than 2 older than the * current transaction, we're done */ do { prepare_to_wait(&root->log_commit_wait[index], &wait, TASK_UNINTERRUPTIBLE); mutex_unlock(&root->log_mutex); if (root->fs_info->last_trans_log_full_commit != trans->transid && root->log_transid < transid + 2 && atomic_read(&root->log_commit[index])) schedule(); finish_wait(&root->log_commit_wait[index], &wait); mutex_lock(&root->log_mutex); } while (root->log_transid < transid + 2 && atomic_read(&root->log_commit[index])); return 0; } static int wait_for_writer(struct btrfs_trans_handle *trans, struct btrfs_root *root) { DEFINE_WAIT(wait); while (atomic_read(&root->log_writers)) { prepare_to_wait(&root->log_writer_wait, &wait, TASK_UNINTERRUPTIBLE); mutex_unlock(&root->log_mutex); if (root->fs_info->last_trans_log_full_commit != trans->transid && atomic_read(&root->log_writers)) schedule(); mutex_lock(&root->log_mutex); finish_wait(&root->log_writer_wait, &wait); } return 0; } /* * btrfs_sync_log does sends a given tree log down to the disk and * updates the super blocks to record it. When this call is done, * you know that any inodes previously logged are safely on disk only * if it returns 0. * * Any other return value means you need to call btrfs_commit_transaction. * Some of the edge cases for fsyncing directories that have had unlinks * or renames done in the past mean that sometimes the only safe * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, * that has happened. */ int btrfs_sync_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) { int index1; int index2; int mark; int ret; struct btrfs_root *log = root->log_root; struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; unsigned long log_transid = 0; mutex_lock(&root->log_mutex); index1 = root->log_transid % 2; if (atomic_read(&root->log_commit[index1])) { wait_log_commit(trans, root, root->log_transid); mutex_unlock(&root->log_mutex); return 0; } atomic_set(&root->log_commit[index1], 1); /* wait for previous tree log sync to complete */ if (atomic_read(&root->log_commit[(index1 + 1) % 2])) wait_log_commit(trans, root, root->log_transid - 1); while (1) { unsigned long batch = root->log_batch; if (root->log_multiple_pids) { mutex_unlock(&root->log_mutex); schedule_timeout_uninterruptible(1); mutex_lock(&root->log_mutex); } wait_for_writer(trans, root); if (batch == root->log_batch) break; } /* bail out if we need to do a full commit */ if (root->fs_info->last_trans_log_full_commit == trans->transid) { ret = -EAGAIN; mutex_unlock(&root->log_mutex); goto out; } log_transid = root->log_transid; if (log_transid % 2 == 0) mark = EXTENT_DIRTY; else mark = EXTENT_NEW; /* we start IO on all the marked extents here, but we don't actually * wait for them until later. */ ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark); BUG_ON(ret); btrfs_set_root_node(&log->root_item, log->node); root->log_batch = 0; root->log_transid++; log->log_transid = root->log_transid; root->log_start_pid = 0; smp_mb(); /* * IO has been started, blocks of the log tree have WRITTEN flag set * in their headers. new modifications of the log will be written to * new positions. so it's safe to allow log writers to go in. */ mutex_unlock(&root->log_mutex); mutex_lock(&log_root_tree->log_mutex); log_root_tree->log_batch++; atomic_inc(&log_root_tree->log_writers); mutex_unlock(&log_root_tree->log_mutex); ret = update_log_root(trans, log); mutex_lock(&log_root_tree->log_mutex); if (atomic_dec_and_test(&log_root_tree->log_writers)) { smp_mb(); if (waitqueue_active(&log_root_tree->log_writer_wait)) wake_up(&log_root_tree->log_writer_wait); } if (ret) { BUG_ON(ret != -ENOSPC); root->fs_info->last_trans_log_full_commit = trans->transid; btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); mutex_unlock(&log_root_tree->log_mutex); ret = -EAGAIN; goto out; } index2 = log_root_tree->log_transid % 2; if (atomic_read(&log_root_tree->log_commit[index2])) { btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); wait_log_commit(trans, log_root_tree, log_root_tree->log_transid); mutex_unlock(&log_root_tree->log_mutex); ret = 0; goto out; } atomic_set(&log_root_tree->log_commit[index2], 1); if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { wait_log_commit(trans, log_root_tree, log_root_tree->log_transid - 1); } wait_for_writer(trans, log_root_tree); /* * now that we've moved on to the tree of log tree roots, * check the full commit flag again */ if (root->fs_info->last_trans_log_full_commit == trans->transid) { btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); mutex_unlock(&log_root_tree->log_mutex); ret = -EAGAIN; goto out_wake_log_root; } ret = btrfs_write_and_wait_marked_extents(log_root_tree, &log_root_tree->dirty_log_pages, EXTENT_DIRTY | EXTENT_NEW); BUG_ON(ret); btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); btrfs_set_super_log_root(&root->fs_info->super_for_commit, log_root_tree->node->start); btrfs_set_super_log_root_level(&root->fs_info->super_for_commit, btrfs_header_level(log_root_tree->node)); log_root_tree->log_batch = 0; log_root_tree->log_transid++; smp_mb(); mutex_unlock(&log_root_tree->log_mutex); /* * nobody else is going to jump in and write the the ctree * super here because the log_commit atomic below is protecting * us. We must be called with a transaction handle pinning * the running transaction open, so a full commit can't hop * in and cause problems either. */ btrfs_scrub_pause_super(root); write_ctree_super(trans, root->fs_info->tree_root, 1); btrfs_scrub_continue_super(root); ret = 0; mutex_lock(&root->log_mutex); if (root->last_log_commit < log_transid) root->last_log_commit = log_transid; mutex_unlock(&root->log_mutex); out_wake_log_root: atomic_set(&log_root_tree->log_commit[index2], 0); smp_mb(); if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) wake_up(&log_root_tree->log_commit_wait[index2]); out: atomic_set(&root->log_commit[index1], 0); smp_mb(); if (waitqueue_active(&root->log_commit_wait[index1])) wake_up(&root->log_commit_wait[index1]); return ret; } static void free_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *log) { int ret; u64 start; u64 end; struct walk_control wc = { .free = 1, .process_func = process_one_buffer }; ret = walk_log_tree(trans, log, &wc); BUG_ON(ret); while (1) { ret = find_first_extent_bit(&log->dirty_log_pages, 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW); if (ret) break; clear_extent_bits(&log->dirty_log_pages, start, end, EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); } free_extent_buffer(log->node); kfree(log); } /* * free all the extents used by the tree log. This should be called * at commit time of the full transaction */ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) { if (root->log_root) { free_log_tree(trans, root->log_root); root->log_root = NULL; } return 0; } int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { if (fs_info->log_root_tree) { free_log_tree(trans, fs_info->log_root_tree); fs_info->log_root_tree = NULL; } return 0; } /* * If both a file and directory are logged, and unlinks or renames are * mixed in, we have a few interesting corners: * * create file X in dir Y * link file X to X.link in dir Y * fsync file X * unlink file X but leave X.link * fsync dir Y * * After a crash we would expect only X.link to exist. But file X * didn't get fsync'd again so the log has back refs for X and X.link. * * We solve this by removing directory entries and inode backrefs from the * log when a file that was logged in the current transaction is * unlinked. Any later fsync will include the updated log entries, and * we'll be able to reconstruct the proper directory items from backrefs. * * This optimizations allows us to avoid relogging the entire inode * or the entire directory. */ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, struct inode *dir, u64 index) { struct btrfs_root *log; struct btrfs_dir_item *di; struct btrfs_path *path; int ret; int err = 0; int bytes_del = 0; u64 dir_ino = btrfs_ino(dir); if (BTRFS_I(dir)->logged_trans < trans->transid) return 0; ret = join_running_log_trans(root); if (ret) return 0; mutex_lock(&BTRFS_I(dir)->log_mutex); log = root->log_root; path = btrfs_alloc_path(); if (!path) { err = -ENOMEM; goto out_unlock; } di = btrfs_lookup_dir_item(trans, log, path, dir_ino, name, name_len, -1); if (IS_ERR(di)) { err = PTR_ERR(di); goto fail; } if (di) { ret = btrfs_delete_one_dir_name(trans, log, path, di); bytes_del += name_len; BUG_ON(ret); } btrfs_release_path(path); di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, index, name, name_len, -1); if (IS_ERR(di)) { err = PTR_ERR(di); goto fail; } if (di) { ret = btrfs_delete_one_dir_name(trans, log, path, di); bytes_del += name_len; BUG_ON(ret); } /* update the directory size in the log to reflect the names * we have removed */ if (bytes_del) { struct btrfs_key key; key.objectid = dir_ino; key.offset = 0; key.type = BTRFS_INODE_ITEM_KEY; btrfs_release_path(path); ret = btrfs_search_slot(trans, log, &key, path, 0, 1); if (ret < 0) { err = ret; goto fail; } if (ret == 0) { struct btrfs_inode_item *item; u64 i_size; item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); i_size = btrfs_inode_size(path->nodes[0], item); if (i_size > bytes_del) i_size -= bytes_del; else i_size = 0; btrfs_set_inode_size(path->nodes[0], item, i_size); btrfs_mark_buffer_dirty(path->nodes[0]); } else ret = 0; btrfs_release_path(path); } fail: btrfs_free_path(path); out_unlock: mutex_unlock(&BTRFS_I(dir)->log_mutex); if (ret == -ENOSPC) { root->fs_info->last_trans_log_full_commit = trans->transid; ret = 0; } btrfs_end_log_trans(root); return err; } /* see comments for btrfs_del_dir_entries_in_log */ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, struct inode *inode, u64 dirid) { struct btrfs_root *log; u64 index; int ret; if (BTRFS_I(inode)->logged_trans < trans->transid) return 0; ret = join_running_log_trans(root); if (ret) return 0; log = root->log_root; mutex_lock(&BTRFS_I(inode)->log_mutex); ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), dirid, &index); mutex_unlock(&BTRFS_I(inode)->log_mutex); if (ret == -ENOSPC) { root->fs_info->last_trans_log_full_commit = trans->transid; ret = 0; } btrfs_end_log_trans(root); return ret; } /* * creates a range item in the log for 'dirid'. first_offset and * last_offset tell us which parts of the key space the log should * be considered authoritative for. */ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, int key_type, u64 dirid, u64 first_offset, u64 last_offset) { int ret; struct btrfs_key key; struct btrfs_dir_log_item *item; key.objectid = dirid; key.offset = first_offset; if (key_type == BTRFS_DIR_ITEM_KEY) key.type = BTRFS_DIR_LOG_ITEM_KEY; else key.type = BTRFS_DIR_LOG_INDEX_KEY; ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); if (ret) return ret; item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dir_log_item); btrfs_set_dir_log_end(path->nodes[0], item, last_offset); btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_release_path(path); return 0; } /* * log all the items included in the current transaction for a given * directory. This also creates the range items in the log tree required * to replay anything deleted before the fsync */ static noinline int log_dir_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path, int key_type, u64 min_offset, u64 *last_offset_ret) { struct btrfs_key min_key; struct btrfs_key max_key; struct btrfs_root *log = root->log_root; struct extent_buffer *src; int err = 0; int ret; int i; int nritems; u64 first_offset = min_offset; u64 last_offset = (u64)-1; u64 ino = btrfs_ino(inode); log = root->log_root; max_key.objectid = ino; max_key.offset = (u64)-1; max_key.type = key_type; min_key.objectid = ino; min_key.type = key_type; min_key.offset = min_offset; path->keep_locks = 1; ret = btrfs_search_forward(root, &min_key, &max_key, path, 0, trans->transid); /* * we didn't find anything from this transaction, see if there * is anything at all */ if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { min_key.objectid = ino; min_key.type = key_type; min_key.offset = (u64)-1; btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); if (ret < 0) { btrfs_release_path(path); return ret; } ret = btrfs_previous_item(root, path, ino, key_type); /* if ret == 0 there are items for this type, * create a range to tell us the last key of this type. * otherwise, there are no items in this directory after * *min_offset, and we create a range to indicate that. */ if (ret == 0) { struct btrfs_key tmp; btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); if (key_type == tmp.type) first_offset = max(min_offset, tmp.offset) + 1; } goto done; } /* go backward to find any previous key */ ret = btrfs_previous_item(root, path, ino, key_type); if (ret == 0) { struct btrfs_key tmp; btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); if (key_type == tmp.type) { first_offset = tmp.offset; ret = overwrite_item(trans, log, dst_path, path->nodes[0], path->slots[0], &tmp); if (ret) { err = ret; goto done; } } } btrfs_release_path(path); /* find the first key from this transaction again */ ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); if (ret != 0) { WARN_ON(1); goto done; } /* * we have a block from this transaction, log every item in it * from our directory */ while (1) { struct btrfs_key tmp; src = path->nodes[0]; nritems = btrfs_header_nritems(src); for (i = path->slots[0]; i < nritems; i++) { btrfs_item_key_to_cpu(src, &min_key, i); if (min_key.objectid != ino || min_key.type != key_type) goto done; ret = overwrite_item(trans, log, dst_path, src, i, &min_key); if (ret) { err = ret; goto done; } } path->slots[0] = nritems; /* * look ahead to the next item and see if it is also * from this directory and from this transaction */ ret = btrfs_next_leaf(root, path); if (ret == 1) { last_offset = (u64)-1; goto done; } btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); if (tmp.objectid != ino || tmp.type != key_type) { last_offset = (u64)-1; goto done; } if (btrfs_header_generation(path->nodes[0]) != trans->transid) { ret = overwrite_item(trans, log, dst_path, path->nodes[0], path->slots[0], &tmp); if (ret) err = ret; else last_offset = tmp.offset; goto done; } } done: btrfs_release_path(path); btrfs_release_path(dst_path); if (err == 0) { *last_offset_ret = last_offset; /* * insert the log range keys to indicate where the log * is valid */ ret = insert_dir_log_key(trans, log, path, key_type, ino, first_offset, last_offset); if (ret) err = ret; } return err; } /* * logging directories is very similar to logging inodes, We find all the items * from the current transaction and write them to the log. * * The recovery code scans the directory in the subvolume, and if it finds a * key in the range logged that is not present in the log tree, then it means * that dir entry was unlinked during the transaction. * * In order for that scan to work, we must include one key smaller than * the smallest logged by this transaction and one key larger than the largest * key logged by this transaction. */ static noinline int log_directory_changes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path) { u64 min_key; u64 max_key; int ret; int key_type = BTRFS_DIR_ITEM_KEY; again: min_key = 0; max_key = 0; while (1) { ret = log_dir_items(trans, root, inode, path, dst_path, key_type, min_key, &max_key); if (ret) return ret; if (max_key == (u64)-1) break; min_key = max_key + 1; } if (key_type == BTRFS_DIR_ITEM_KEY) { key_type = BTRFS_DIR_INDEX_KEY; goto again; } return 0; } /* * a helper function to drop items from the log before we relog an * inode. max_key_type indicates the highest item type to remove. * This cannot be run for file data extents because it does not * free the extents they point to. */ static int drop_objectid_items(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, u64 objectid, int max_key_type) { int ret; struct btrfs_key key; struct btrfs_key found_key; key.objectid = objectid; key.type = max_key_type; key.offset = (u64)-1; while (1) { ret = btrfs_search_slot(trans, log, &key, path, -1, 1); BUG_ON(ret == 0); if (ret < 0) break; if (path->slots[0] == 0) break; path->slots[0]--; btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); if (found_key.objectid != objectid) break; ret = btrfs_del_item(trans, log, path); if (ret) break; btrfs_release_path(path); } btrfs_release_path(path); return ret; } static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *dst_path, struct extent_buffer *src, int start_slot, int nr, int inode_only) { unsigned long src_offset; unsigned long dst_offset; struct btrfs_file_extent_item *extent; struct btrfs_inode_item *inode_item; int ret; struct btrfs_key *ins_keys; u32 *ins_sizes; char *ins_data; int i; struct list_head ordered_sums; INIT_LIST_HEAD(&ordered_sums); ins_data = kmalloc(nr * sizeof(struct btrfs_key) + nr * sizeof(u32), GFP_NOFS); if (!ins_data) return -ENOMEM; ins_sizes = (u32 *)ins_data; ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); for (i = 0; i < nr; i++) { ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); } ret = btrfs_insert_empty_items(trans, log, dst_path, ins_keys, ins_sizes, nr); if (ret) { kfree(ins_data); return ret; } for (i = 0; i < nr; i++, dst_path->slots[0]++) { dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], dst_path->slots[0]); src_offset = btrfs_item_ptr_offset(src, start_slot + i); copy_extent_buffer(dst_path->nodes[0], src, dst_offset, src_offset, ins_sizes[i]); if (inode_only == LOG_INODE_EXISTS && ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { inode_item = btrfs_item_ptr(dst_path->nodes[0], dst_path->slots[0], struct btrfs_inode_item); btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0); /* set the generation to zero so the recover code * can tell the difference between an logging * just to say 'this inode exists' and a logging * to say 'update this inode with these values' */ btrfs_set_inode_generation(dst_path->nodes[0], inode_item, 0); } /* take a reference on file data extents so that truncates * or deletes of this inode don't have to relog the inode * again */ if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) { int found_type; extent = btrfs_item_ptr(src, start_slot + i, struct btrfs_file_extent_item); if (btrfs_file_extent_generation(src, extent) < trans->transid) continue; found_type = btrfs_file_extent_type(src, extent); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { u64 ds, dl, cs, cl; ds = btrfs_file_extent_disk_bytenr(src, extent); /* ds == 0 is a hole */ if (ds == 0) continue; dl = btrfs_file_extent_disk_num_bytes(src, extent); cs = btrfs_file_extent_offset(src, extent); cl = btrfs_file_extent_num_bytes(src, extent); if (btrfs_file_extent_compression(src, extent)) { cs = 0; cl = dl; } ret = btrfs_lookup_csums_range( log->fs_info->csum_root, ds + cs, ds + cs + cl - 1, &ordered_sums, 0); BUG_ON(ret); } } } btrfs_mark_buffer_dirty(dst_path->nodes[0]); btrfs_release_path(dst_path); kfree(ins_data); /* * we have to do this after the loop above to avoid changing the * log tree while trying to change the log tree. */ ret = 0; while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, list); if (!ret) ret = btrfs_csum_file_blocks(trans, log, sums); list_del(&sums->list); kfree(sums); } return ret; } /* log a single inode in the tree log. * At least one parent directory for this inode must exist in the tree * or be logged already. * * Any items from this inode changed by the current transaction are copied * to the log tree. An extra reference is taken on any extents in this * file, allowing us to avoid a whole pile of corner cases around logging * blocks that have been removed from the tree. * * See LOG_INODE_ALL and related defines for a description of what inode_only * does. * * This handles both files and directories. */ static int btrfs_log_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, int inode_only) { struct btrfs_path *path; struct btrfs_path *dst_path; struct btrfs_key min_key; struct btrfs_key max_key; struct btrfs_root *log = root->log_root; struct extent_buffer *src = NULL; int err = 0; int ret; int nritems; int ins_start_slot = 0; int ins_nr; u64 ino = btrfs_ino(inode); log = root->log_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; dst_path = btrfs_alloc_path(); if (!dst_path) { btrfs_free_path(path); return -ENOMEM; } min_key.objectid = ino; min_key.type = BTRFS_INODE_ITEM_KEY; min_key.offset = 0; max_key.objectid = ino; /* today the code can only do partial logging of directories */ if (!S_ISDIR(inode->i_mode)) inode_only = LOG_INODE_ALL; if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode)) max_key.type = BTRFS_XATTR_ITEM_KEY; else max_key.type = (u8)-1; max_key.offset = (u64)-1; ret = btrfs_commit_inode_delayed_items(trans, inode); if (ret) { btrfs_free_path(path); btrfs_free_path(dst_path); return ret; } mutex_lock(&BTRFS_I(inode)->log_mutex); /* * a brute force approach to making sure we get the most uptodate * copies of everything. */ if (S_ISDIR(inode->i_mode)) { int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; if (inode_only == LOG_INODE_EXISTS) max_key_type = BTRFS_XATTR_ITEM_KEY; ret = drop_objectid_items(trans, log, path, ino, max_key_type); } else { ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); } if (ret) { err = ret; goto out_unlock; } path->keep_locks = 1; while (1) { ins_nr = 0; ret = btrfs_search_forward(root, &min_key, &max_key, path, 0, trans->transid); if (ret != 0) break; again: /* note, ins_nr might be > 0 here, cleanup outside the loop */ if (min_key.objectid != ino) break; if (min_key.type > max_key.type) break; src = path->nodes[0]; if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { ins_nr++; goto next_slot; } else if (!ins_nr) { ins_start_slot = path->slots[0]; ins_nr = 1; goto next_slot; } ret = copy_items(trans, log, dst_path, src, ins_start_slot, ins_nr, inode_only); if (ret) { err = ret; goto out_unlock; } ins_nr = 1; ins_start_slot = path->slots[0]; next_slot: nritems = btrfs_header_nritems(path->nodes[0]); path->slots[0]++; if (path->slots[0] < nritems) { btrfs_item_key_to_cpu(path->nodes[0], &min_key, path->slots[0]); goto again; } if (ins_nr) { ret = copy_items(trans, log, dst_path, src, ins_start_slot, ins_nr, inode_only); if (ret) { err = ret; goto out_unlock; } ins_nr = 0; } btrfs_release_path(path); if (min_key.offset < (u64)-1) min_key.offset++; else if (min_key.type < (u8)-1) min_key.type++; else if (min_key.objectid < (u64)-1) min_key.objectid++; else break; } if (ins_nr) { ret = copy_items(trans, log, dst_path, src, ins_start_slot, ins_nr, inode_only); if (ret) { err = ret; goto out_unlock; } ins_nr = 0; } WARN_ON(ins_nr); if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { btrfs_release_path(path); btrfs_release_path(dst_path); ret = log_directory_changes(trans, root, inode, path, dst_path); if (ret) { err = ret; goto out_unlock; } } BTRFS_I(inode)->logged_trans = trans->transid; out_unlock: mutex_unlock(&BTRFS_I(inode)->log_mutex); btrfs_free_path(path); btrfs_free_path(dst_path); return err; } /* * follow the dentry parent pointers up the chain and see if any * of the directories in it require a full commit before they can * be logged. Returns zero if nothing special needs to be done or 1 if * a full commit is required. */ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, struct inode *inode, struct dentry *parent, struct super_block *sb, u64 last_committed) { int ret = 0; struct btrfs_root *root; struct dentry *old_parent = NULL; /* * for regular files, if its inode is already on disk, we don't * have to worry about the parents at all. This is because * we can use the last_unlink_trans field to record renames * and other fun in this file. */ if (S_ISREG(inode->i_mode) && BTRFS_I(inode)->generation <= last_committed && BTRFS_I(inode)->last_unlink_trans <= last_committed) goto out; if (!S_ISDIR(inode->i_mode)) { if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) goto out; inode = parent->d_inode; } while (1) { BTRFS_I(inode)->logged_trans = trans->transid; smp_mb(); if (BTRFS_I(inode)->last_unlink_trans > last_committed) { root = BTRFS_I(inode)->root; /* * make sure any commits to the log are forced * to be full commits */ root->fs_info->last_trans_log_full_commit = trans->transid; ret = 1; break; } if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) break; if (IS_ROOT(parent)) break; parent = dget_parent(parent); dput(old_parent); old_parent = parent; inode = parent->d_inode; } dput(old_parent); out: return ret; } static int inode_in_log(struct btrfs_trans_handle *trans, struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; mutex_lock(&root->log_mutex); if (BTRFS_I(inode)->logged_trans == trans->transid && BTRFS_I(inode)->last_sub_trans <= root->last_log_commit) ret = 1; mutex_unlock(&root->log_mutex); return ret; } /* * helper function around btrfs_log_inode to make sure newly created * parent directories also end up in the log. A minimal inode and backref * only logging is done of any parent directories that are older than * the last committed transaction */ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct dentry *parent, int exists_only) { int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; struct super_block *sb; struct dentry *old_parent = NULL; int ret = 0; u64 last_committed = root->fs_info->last_trans_committed; sb = inode->i_sb; if (btrfs_test_opt(root, NOTREELOG)) { ret = 1; goto end_no_trans; } if (root->fs_info->last_trans_log_full_commit > root->fs_info->last_trans_committed) { ret = 1; goto end_no_trans; } if (root != BTRFS_I(inode)->root || btrfs_root_refs(&root->root_item) == 0) { ret = 1; goto end_no_trans; } ret = check_parent_dirs_for_sync(trans, inode, parent, sb, last_committed); if (ret) goto end_no_trans; if (inode_in_log(trans, inode)) { ret = BTRFS_NO_LOG_SYNC; goto end_no_trans; } ret = start_log_trans(trans, root); if (ret) goto end_trans; ret = btrfs_log_inode(trans, root, inode, inode_only); if (ret) goto end_trans; /* * for regular files, if its inode is already on disk, we don't * have to worry about the parents at all. This is because * we can use the last_unlink_trans field to record renames * and other fun in this file. */ if (S_ISREG(inode->i_mode) && BTRFS_I(inode)->generation <= last_committed && BTRFS_I(inode)->last_unlink_trans <= last_committed) { ret = 0; goto end_trans; } inode_only = LOG_INODE_EXISTS; while (1) { if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) break; inode = parent->d_inode; if (root != BTRFS_I(inode)->root) break; if (BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) { ret = btrfs_log_inode(trans, root, inode, inode_only); if (ret) goto end_trans; } if (IS_ROOT(parent)) break; parent = dget_parent(parent); dput(old_parent); old_parent = parent; } ret = 0; end_trans: dput(old_parent); if (ret < 0) { BUG_ON(ret != -ENOSPC); root->fs_info->last_trans_log_full_commit = trans->transid; ret = 1; } btrfs_end_log_trans(root); end_no_trans: return ret; } /* * it is not safe to log dentry if the chunk root has added new * chunks. This returns 0 if the dentry was logged, and 1 otherwise. * If this returns 1, you must commit the transaction to safely get your * data on disk. */ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct dentry *dentry) { struct dentry *parent = dget_parent(dentry); int ret; ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0); dput(parent); return ret; } /* * should be called during mount to recover any replay any log trees * from the FS */ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) { int ret; struct btrfs_path *path; struct btrfs_trans_handle *trans; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_key tmp_key; struct btrfs_root *log; struct btrfs_fs_info *fs_info = log_root_tree->fs_info; struct walk_control wc = { .process_func = process_one_buffer, .stage = 0, }; path = btrfs_alloc_path(); if (!path) return -ENOMEM; fs_info->log_root_recovering = 1; trans = btrfs_start_transaction(fs_info->tree_root, 0); BUG_ON(IS_ERR(trans)); wc.trans = trans; wc.pin = 1; ret = walk_log_tree(trans, log_root_tree, &wc); BUG_ON(ret); again: key.objectid = BTRFS_TREE_LOG_OBJECTID; key.offset = (u64)-1; btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); while (1) { ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); if (ret < 0) break; if (ret > 0) { if (path->slots[0] == 0) break; path->slots[0]--; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); btrfs_release_path(path); if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) break; log = btrfs_read_fs_root_no_radix(log_root_tree, &found_key); BUG_ON(IS_ERR(log)); tmp_key.objectid = found_key.offset; tmp_key.type = BTRFS_ROOT_ITEM_KEY; tmp_key.offset = (u64)-1; wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); BUG_ON(IS_ERR_OR_NULL(wc.replay_dest)); wc.replay_dest->log_root = log; btrfs_record_root_in_trans(trans, wc.replay_dest); ret = walk_log_tree(trans, log, &wc); BUG_ON(ret); if (wc.stage == LOG_WALK_REPLAY_ALL) { ret = fixup_inode_link_counts(trans, wc.replay_dest, path); BUG_ON(ret); } key.offset = found_key.offset - 1; wc.replay_dest->log_root = NULL; free_extent_buffer(log->node); free_extent_buffer(log->commit_root); kfree(log); if (found_key.offset == 0) break; } btrfs_release_path(path); /* step one is to pin it all, step two is to replay just inodes */ if (wc.pin) { wc.pin = 0; wc.process_func = replay_one_buffer; wc.stage = LOG_WALK_REPLAY_INODES; goto again; } /* step three is to replay everything */ if (wc.stage < LOG_WALK_REPLAY_ALL) { wc.stage++; goto again; } btrfs_free_path(path); free_extent_buffer(log_root_tree->node); log_root_tree->log_root = NULL; fs_info->log_root_recovering = 0; /* step 4: commit the transaction, which also unpins the blocks */ btrfs_commit_transaction(trans, fs_info->tree_root); kfree(log_root_tree); return 0; } /* * there are some corner cases where we want to force a full * commit instead of allowing a directory to be logged. * * They revolve around files there were unlinked from the directory, and * this function updates the parent directory so that a full commit is * properly done if it is fsync'd later after the unlinks are done. */ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, struct inode *dir, struct inode *inode, int for_rename) { /* * when we're logging a file, if it hasn't been renamed * or unlinked, and its inode is fully committed on disk, * we don't have to worry about walking up the directory chain * to log its parents. * * So, we use the last_unlink_trans field to put this transid * into the file. When the file is logged we check it and * don't log the parents if the file is fully on disk. */ if (S_ISREG(inode->i_mode)) BTRFS_I(inode)->last_unlink_trans = trans->transid; /* * if this directory was already logged any new * names for this file/dir will get recorded */ smp_mb(); if (BTRFS_I(dir)->logged_trans == trans->transid) return; /* * if the inode we're about to unlink was logged, * the log will be properly updated for any new names */ if (BTRFS_I(inode)->logged_trans == trans->transid) return; /* * when renaming files across directories, if the directory * there we're unlinking from gets fsync'd later on, there's * no way to find the destination directory later and fsync it * properly. So, we have to be conservative and force commits * so the new name gets discovered. */ if (for_rename) goto record; /* we can safely do the unlink without any special recording */ return; record: BTRFS_I(dir)->last_unlink_trans = trans->transid; } /* * Call this after adding a new name for a file and it will properly * update the log to reflect the new name. * * It will return zero if all goes well, and it will return 1 if a * full transaction commit is required. */ int btrfs_log_new_name(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *old_dir, struct dentry *parent) { struct btrfs_root * root = BTRFS_I(inode)->root; /* * this will force the logging code to walk the dentry chain * up for the file */ if (S_ISREG(inode->i_mode)) BTRFS_I(inode)->last_unlink_trans = trans->transid; /* * if this inode hasn't been logged and directory we're renaming it * from hasn't been logged, we don't need to log it */ if (BTRFS_I(inode)->logged_trans <= root->fs_info->last_trans_committed && (!old_dir || BTRFS_I(old_dir)->logged_trans <= root->fs_info->last_trans_committed)) return 0; return btrfs_log_inode_parent(trans, root, inode, parent, 1); }
gpl-2.0
CyanogenMod/android_kernel_bn_omap
arch/ia64/xen/irq_xen.c
2835
11976
/****************************************************************************** * arch/ia64/xen/irq_xen.c * * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/cpu.h> #include <xen/interface/xen.h> #include <xen/interface/callback.h> #include <xen/events.h> #include <asm/xen/privop.h> #include "irq_xen.h" /*************************************************************************** * pv_irq_ops * irq operations */ static int xen_assign_irq_vector(int irq) { struct physdev_irq irq_op; irq_op.irq = irq; if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) return -ENOSPC; return irq_op.vector; } static void xen_free_irq_vector(int vector) { struct physdev_irq irq_op; if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return; irq_op.vector = vector; if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n", __func__, vector); } static DEFINE_PER_CPU(int, xen_timer_irq) = -1; static DEFINE_PER_CPU(int, xen_ipi_irq) = -1; static DEFINE_PER_CPU(int, xen_resched_irq) = -1; static DEFINE_PER_CPU(int, xen_cmc_irq) = -1; static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1; static DEFINE_PER_CPU(int, xen_cpep_irq) = -1; #define NAME_SIZE 15 static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name); static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name); static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name); static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name); static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name); static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name); #undef NAME_SIZE struct saved_irq { unsigned int irq; struct irqaction *action; }; /* 16 should be far optimistic value, since only several percpu irqs * are registered early. */ #define MAX_LATE_IRQ 16 static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ]; static unsigned short late_irq_cnt; static unsigned short saved_irq_cnt; static int xen_slab_ready; #ifdef CONFIG_SMP #include <linux/sched.h> /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, * it ends up to issue several memory accesses upon percpu data and * thus adds unnecessary traffic to other paths. */ static irqreturn_t xen_dummy_handler(int irq, void *dev_id) { return IRQ_HANDLED; } static irqreturn_t xen_resched_handler(int irq, void *dev_id) { scheduler_ipi(); return IRQ_HANDLED; } static struct irqaction xen_ipi_irqaction = { .handler = handle_IPI, .flags = IRQF_DISABLED, .name = "IPI" }; static struct irqaction xen_resched_irqaction = { .handler = xen_resched_handler, .flags = IRQF_DISABLED, .name = "resched" }; static struct irqaction xen_tlb_irqaction = { .handler = xen_dummy_handler, .flags = IRQF_DISABLED, .name = "tlb_flush" }; #endif /* * This is xen version percpu irq registration, which needs bind * to xen specific evtchn sub-system. One trick here is that xen * evtchn binding interface depends on kmalloc because related * port needs to be freed at device/cpu down. So we cache the * registration on BSP before slab is ready and then deal them * at later point. For rest instances happening after slab ready, * we hook them to xen evtchn immediately. * * FIXME: MCA is not supported by far, and thus "nomca" boot param is * required. */ static void __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, struct irqaction *action, int save) { int irq = 0; if (xen_slab_ready) { switch (vec) { case IA64_TIMER_VECTOR: snprintf(per_cpu(xen_timer_name, cpu), sizeof(per_cpu(xen_timer_name, cpu)), "%s%d", action->name, cpu); irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, action->handler, action->flags, per_cpu(xen_timer_name, cpu), action->dev_id); per_cpu(xen_timer_irq, cpu) = irq; break; case IA64_IPI_RESCHEDULE: snprintf(per_cpu(xen_resched_name, cpu), sizeof(per_cpu(xen_resched_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_resched_name, cpu), action->dev_id); per_cpu(xen_resched_irq, cpu) = irq; break; case IA64_IPI_VECTOR: snprintf(per_cpu(xen_ipi_name, cpu), sizeof(per_cpu(xen_ipi_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_ipi_name, cpu), action->dev_id); per_cpu(xen_ipi_irq, cpu) = irq; break; case IA64_CMC_VECTOR: snprintf(per_cpu(xen_cmc_name, cpu), sizeof(per_cpu(xen_cmc_name, cpu)), "%s%d", action->name, cpu); irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, action->handler, action->flags, per_cpu(xen_cmc_name, cpu), action->dev_id); per_cpu(xen_cmc_irq, cpu) = irq; break; case IA64_CMCP_VECTOR: snprintf(per_cpu(xen_cmcp_name, cpu), sizeof(per_cpu(xen_cmcp_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_cmcp_name, cpu), action->dev_id); per_cpu(xen_cmcp_irq, cpu) = irq; break; case IA64_CPEP_VECTOR: snprintf(per_cpu(xen_cpep_name, cpu), sizeof(per_cpu(xen_cpep_name, cpu)), "%s%d", action->name, cpu); irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, action->handler, action->flags, per_cpu(xen_cpep_name, cpu), action->dev_id); per_cpu(xen_cpep_irq, cpu) = irq; break; case IA64_CPE_VECTOR: case IA64_MCA_RENDEZ_VECTOR: case IA64_PERFMON_VECTOR: case IA64_MCA_WAKEUP_VECTOR: case IA64_SPURIOUS_INT_VECTOR: /* No need to complain, these aren't supported. */ break; default: printk(KERN_WARNING "Percpu irq %d is unsupported " "by xen!\n", vec); break; } BUG_ON(irq < 0); if (irq > 0) { /* * Mark percpu. Without this, migrate_irqs() will * mark the interrupt for migrations and trigger it * on cpu hotplug. */ irq_set_status_flags(irq, IRQ_PER_CPU); } } /* For BSP, we cache registered percpu irqs, and then re-walk * them when initializing APs */ if (!cpu && save) { BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); saved_percpu_irqs[saved_irq_cnt].irq = vec; saved_percpu_irqs[saved_irq_cnt].action = action; saved_irq_cnt++; if (!xen_slab_ready) late_irq_cnt++; } } static void xen_register_percpu_irq(ia64_vector vec, struct irqaction *action) { __xen_register_percpu_irq(smp_processor_id(), vec, action, 1); } static void xen_bind_early_percpu_irq(void) { int i; xen_slab_ready = 1; /* There's no race when accessing this cached array, since only * BSP will face with such step shortly */ for (i = 0; i < late_irq_cnt; i++) __xen_register_percpu_irq(smp_processor_id(), saved_percpu_irqs[i].irq, saved_percpu_irqs[i].action, 0); } /* FIXME: There's no obvious point to check whether slab is ready. So * a hack is used here by utilizing a late time hook. */ #ifdef CONFIG_HOTPLUG_CPU static int __devinit unbind_evtchn_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; if (action == CPU_DEAD) { /* Unregister evtchn. */ if (per_cpu(xen_cpep_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), NULL); per_cpu(xen_cpep_irq, cpu) = -1; } if (per_cpu(xen_cmcp_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), NULL); per_cpu(xen_cmcp_irq, cpu) = -1; } if (per_cpu(xen_cmc_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); per_cpu(xen_cmc_irq, cpu) = -1; } if (per_cpu(xen_ipi_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); per_cpu(xen_ipi_irq, cpu) = -1; } if (per_cpu(xen_resched_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); per_cpu(xen_resched_irq, cpu) = -1; } if (per_cpu(xen_timer_irq, cpu) >= 0) { unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), NULL); per_cpu(xen_timer_irq, cpu) = -1; } } return NOTIFY_OK; } static struct notifier_block unbind_evtchn_notifier = { .notifier_call = unbind_evtchn_callback, .priority = 0 }; #endif void xen_smp_intr_init_early(unsigned int cpu) { #ifdef CONFIG_SMP unsigned int i; for (i = 0; i < saved_irq_cnt; i++) __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, saved_percpu_irqs[i].action, 0); #endif } void xen_smp_intr_init(void) { #ifdef CONFIG_SMP unsigned int cpu = smp_processor_id(); struct callback_register event = { .type = CALLBACKTYPE_event, .address = { .ip = (unsigned long)&xen_event_callback }, }; if (cpu == 0) { /* Initialization was already done for boot cpu. */ #ifdef CONFIG_HOTPLUG_CPU /* Register the notifier only once. */ register_cpu_notifier(&unbind_evtchn_notifier); #endif return; } /* This should be piggyback when setup vcpu guest context */ BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); #endif /* CONFIG_SMP */ } void __init xen_irq_init(void) { struct callback_register event = { .type = CALLBACKTYPE_event, .address = { .ip = (unsigned long)&xen_event_callback }, }; xen_init_IRQ(); BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); late_time_init = xen_bind_early_percpu_irq; } void xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect) { #ifdef CONFIG_SMP /* TODO: we need to call vcpu_up here */ if (unlikely(vector == ap_wakeup_vector)) { /* XXX * This should be in __cpu_up(cpu) in ia64 smpboot.c * like x86. But don't want to modify it, * keep it untouched. */ xen_smp_intr_init_early(cpu); xen_send_ipi(cpu, vector); /* vcpu_prepare_and_up(cpu); */ return; } #endif switch (vector) { case IA64_IPI_VECTOR: xen_send_IPI_one(cpu, XEN_IPI_VECTOR); break; case IA64_IPI_RESCHEDULE: xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); break; case IA64_CMCP_VECTOR: xen_send_IPI_one(cpu, XEN_CMCP_VECTOR); break; case IA64_CPEP_VECTOR: xen_send_IPI_one(cpu, XEN_CPEP_VECTOR); break; case IA64_TIMER_VECTOR: { /* this is used only once by check_sal_cache_flush() at boot time */ static int used = 0; if (!used) { xen_send_ipi(cpu, IA64_TIMER_VECTOR); used = 1; break; } /* fallthrough */ } default: printk(KERN_WARNING "Unsupported IPI type 0x%x\n", vector); notify_remote_via_irq(0); /* defaults to 0 irq */ break; } } static void __init xen_register_ipi(void) { #ifdef CONFIG_SMP register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction); register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction); register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction); #endif } static void xen_resend_irq(unsigned int vector) { (void)resend_irq_on_evtchn(vector); } const struct pv_irq_ops xen_irq_ops __initdata = { .register_ipi = xen_register_ipi, .assign_irq_vector = xen_assign_irq_vector, .free_irq_vector = xen_free_irq_vector, .register_percpu_irq = xen_register_percpu_irq, .resend_irq = xen_resend_irq, };
gpl-2.0
VigorCM9/vigor_aosp_kernel
fs/jffs2/compr_lzo.c
3347
2326
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2007 Nokia Corporation. All rights reserved. * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by Richard Purdie <rpurdie@openedhand.com> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/lzo.h> #include "compr.h" static void *lzo_mem; static void *lzo_compress_buf; static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */ static void free_workspace(void) { vfree(lzo_mem); vfree(lzo_compress_buf); } static int __init alloc_workspace(void) { lzo_mem = vmalloc(LZO1X_MEM_COMPRESS); lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); if (!lzo_mem || !lzo_compress_buf) { printk(KERN_WARNING "Failed to allocate lzo deflate workspace\n"); free_workspace(); return -ENOMEM; } return 0; } static int jffs2_lzo_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) { size_t compress_size; int ret; mutex_lock(&deflate_mutex); ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem); if (ret != LZO_E_OK) goto fail; if (compress_size > *dstlen) goto fail; memcpy(cpage_out, lzo_compress_buf, compress_size); mutex_unlock(&deflate_mutex); *dstlen = compress_size; return 0; fail: mutex_unlock(&deflate_mutex); return -1; } static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t srclen, uint32_t destlen) { size_t dl = destlen; int ret; ret = lzo1x_decompress_safe(data_in, srclen, cpage_out, &dl); if (ret != LZO_E_OK || dl != destlen) return -1; return 0; } static struct jffs2_compressor jffs2_lzo_comp = { .priority = JFFS2_LZO_PRIORITY, .name = "lzo", .compr = JFFS2_COMPR_LZO, .compress = &jffs2_lzo_compress, .decompress = &jffs2_lzo_decompress, .disabled = 0, }; int __init jffs2_lzo_init(void) { int ret; ret = alloc_workspace(); if (ret < 0) return ret; ret = jffs2_register_compressor(&jffs2_lzo_comp); if (ret) free_workspace(); return ret; } void jffs2_lzo_exit(void) { jffs2_unregister_compressor(&jffs2_lzo_comp); free_workspace(); }
gpl-2.0
rbrune/android_kernel_google_steelhead_orig
net/ipv4/netfilter/nf_nat_pptp.c
3603
9954
/* * nf_nat_pptp.c * * NAT support for PPTP (Point to Point Tunneling Protocol). * PPTP is a a protocol for creating virtual private networks. * It is a specification defined by Microsoft and some vendors * working with Microsoft. PPTP is built on top of a modified * version of the Internet Generic Routing Encapsulation Protocol. * GRE is defined in RFC 1701 and RFC 1702. Documentation of * PPTP can be found in RFC 2637 * * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org> * * Development of this code funded by Astaro AG (http://www.astaro.com/) * * TODO: - NAT to a unique tuple, not to TCP source port * (needs netfilter tuple reservation) */ #include <linux/module.h> #include <linux/tcp.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <net/netfilter/nf_nat_rule.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> #include <linux/netfilter/nf_conntrack_pptp.h> #define NF_NAT_PPTP_VERSION "3.0" #define REQ_CID(req, off) (*(__be16 *)((char *)(req) + (off))) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP"); MODULE_ALIAS("ip_nat_pptp"); static void pptp_nat_expected(struct nf_conn *ct, struct nf_conntrack_expect *exp) { struct net *net = nf_ct_net(ct); const struct nf_conn *master = ct->master; struct nf_conntrack_expect *other_exp; struct nf_conntrack_tuple t; const struct nf_ct_pptp_master *ct_pptp_info; const struct nf_nat_pptp *nat_pptp_info; struct nf_nat_range range; ct_pptp_info = &nfct_help(master)->help.ct_pptp_info; nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info; /* And here goes the grand finale of corrosion... */ if (exp->dir == IP_CT_DIR_ORIGINAL) { pr_debug("we are PNS->PAC\n"); /* therefore, build tuple for PAC->PNS */ t.src.l3num = AF_INET; t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; t.src.u.gre.key = ct_pptp_info->pac_call_id; t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip; t.dst.u.gre.key = ct_pptp_info->pns_call_id; t.dst.protonum = IPPROTO_GRE; } else { pr_debug("we are PAC->PNS\n"); /* build tuple for PNS->PAC */ t.src.l3num = AF_INET; t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; t.src.u.gre.key = nat_pptp_info->pns_call_id; t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip; t.dst.u.gre.key = nat_pptp_info->pac_call_id; t.dst.protonum = IPPROTO_GRE; } pr_debug("trying to unexpect other dir: "); nf_ct_dump_tuple_ip(&t); other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t); if (other_exp) { nf_ct_unexpect_related(other_exp); nf_ct_expect_put(other_exp); pr_debug("success\n"); } else { pr_debug("not found!\n"); } /* This must be a fresh one. */ BUG_ON(ct->status & IPS_NAT_DONE_MASK); /* Change src to where master sends to */ range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; if (exp->dir == IP_CT_DIR_ORIGINAL) { range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; range.min = range.max = exp->saved_proto; } nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC); /* For DST manip, map port here to where it's expected. */ range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip; if (exp->dir == IP_CT_DIR_REPLY) { range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; range.min = range.max = exp->saved_proto; } nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST); } /* outbound packets == from PNS to PAC */ static int pptp_outbound_pkt(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq) { struct nf_ct_pptp_master *ct_pptp_info; struct nf_nat_pptp *nat_pptp_info; u_int16_t msg; __be16 new_callid; unsigned int cid_off; ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info; nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; new_callid = ct_pptp_info->pns_call_id; switch (msg = ntohs(ctlh->messageType)) { case PPTP_OUT_CALL_REQUEST: cid_off = offsetof(union pptp_ctrl_union, ocreq.callID); /* FIXME: ideally we would want to reserve a call ID * here. current netfilter NAT core is not able to do * this :( For now we use TCP source port. This breaks * multiple calls within one control session */ /* save original call ID in nat_info */ nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id; /* don't use tcph->source since we are at a DSTmanip * hook (e.g. PREROUTING) and pkt is not mangled yet */ new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port; /* save new call ID in ct info */ ct_pptp_info->pns_call_id = new_callid; break; case PPTP_IN_CALL_REPLY: cid_off = offsetof(union pptp_ctrl_union, icack.callID); break; case PPTP_CALL_CLEAR_REQUEST: cid_off = offsetof(union pptp_ctrl_union, clrreq.callID); break; default: pr_debug("unknown outbound packet 0x%04x:%s\n", msg, msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0]); /* fall through */ case PPTP_SET_LINK_INFO: /* only need to NAT in case PAC is behind NAT box */ case PPTP_START_SESSION_REQUEST: case PPTP_START_SESSION_REPLY: case PPTP_STOP_SESSION_REQUEST: case PPTP_STOP_SESSION_REPLY: case PPTP_ECHO_REQUEST: case PPTP_ECHO_REPLY: /* no need to alter packet */ return NF_ACCEPT; } /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass * down to here */ pr_debug("altering call id from 0x%04x to 0x%04x\n", ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); /* mangle packet */ if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, cid_off + sizeof(struct pptp_pkt_hdr) + sizeof(struct PptpControlHeader), sizeof(new_callid), (char *)&new_callid, sizeof(new_callid)) == 0) return NF_DROP; return NF_ACCEPT; } static void pptp_exp_gre(struct nf_conntrack_expect *expect_orig, struct nf_conntrack_expect *expect_reply) { const struct nf_conn *ct = expect_orig->master; struct nf_ct_pptp_master *ct_pptp_info; struct nf_nat_pptp *nat_pptp_info; ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info; nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; /* save original PAC call ID in nat_info */ nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id; /* alter expectation for PNS->PAC direction */ expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id; expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id; expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id; expect_orig->dir = IP_CT_DIR_ORIGINAL; /* alter expectation for PAC->PNS direction */ expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id; expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id; expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id; expect_reply->dir = IP_CT_DIR_REPLY; } /* inbound packets == from PAC to PNS */ static int pptp_inbound_pkt(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq) { const struct nf_nat_pptp *nat_pptp_info; u_int16_t msg; __be16 new_pcid; unsigned int pcid_off; nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; new_pcid = nat_pptp_info->pns_call_id; switch (msg = ntohs(ctlh->messageType)) { case PPTP_OUT_CALL_REPLY: pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID); break; case PPTP_IN_CALL_CONNECT: pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID); break; case PPTP_IN_CALL_REQUEST: /* only need to nat in case PAC is behind NAT box */ return NF_ACCEPT; case PPTP_WAN_ERROR_NOTIFY: pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID); break; case PPTP_CALL_DISCONNECT_NOTIFY: pcid_off = offsetof(union pptp_ctrl_union, disc.callID); break; case PPTP_SET_LINK_INFO: pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); break; default: pr_debug("unknown inbound packet %s\n", msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0]); /* fall through */ case PPTP_START_SESSION_REQUEST: case PPTP_START_SESSION_REPLY: case PPTP_STOP_SESSION_REQUEST: case PPTP_STOP_SESSION_REPLY: case PPTP_ECHO_REQUEST: case PPTP_ECHO_REPLY: /* no need to alter packet */ return NF_ACCEPT; } /* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST, * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */ /* mangle packet */ pr_debug("altering peer call id from 0x%04x to 0x%04x\n", ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, pcid_off + sizeof(struct pptp_pkt_hdr) + sizeof(struct PptpControlHeader), sizeof(new_pcid), (char *)&new_pcid, sizeof(new_pcid)) == 0) return NF_DROP; return NF_ACCEPT; } static int __init nf_nat_helper_pptp_init(void) { nf_nat_need_gre(); BUG_ON(nf_nat_pptp_hook_outbound != NULL); rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); BUG_ON(nf_nat_pptp_hook_inbound != NULL); rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); BUG_ON(nf_nat_pptp_hook_exp_gre != NULL); rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); BUG_ON(nf_nat_pptp_hook_expectfn != NULL); rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected); return 0; } static void __exit nf_nat_helper_pptp_fini(void) { rcu_assign_pointer(nf_nat_pptp_hook_expectfn, NULL); rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, NULL); rcu_assign_pointer(nf_nat_pptp_hook_inbound, NULL); rcu_assign_pointer(nf_nat_pptp_hook_outbound, NULL); synchronize_rcu(); } module_init(nf_nat_helper_pptp_init); module_exit(nf_nat_helper_pptp_fini);
gpl-2.0
MetSystem/Xiaomi_Kernel_OpenSource
net/ieee802154/6lowpan.c
3859
33847
/* * Copyright 2011, Siemens AG * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ /* * Based on patches from Jon Smirl <jonsmirl@gmail.com> * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* Jon's code is based on 6lowpan implementation for Contiki which is: * Copyright (c) 2008, Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <linux/bitops.h> #include <linux/if_arp.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <net/af_ieee802154.h> #include <net/ieee802154.h> #include <net/ieee802154_netdev.h> #include <net/ipv6.h> #include "6lowpan.h" /* TTL uncompression values */ static const u8 lowpan_ttl_values[] = {0, 1, 64, 255}; static LIST_HEAD(lowpan_devices); /* * Uncompression of linklocal: * 0 -> 16 bytes from packet * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet * 2 -> 2 bytes from prefix - zeroes + 2 from packet * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr * * NOTE: => the uncompress function does change 0xf to 0x10 * NOTE: 0x00 => no-autoconfig => unspecified */ static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20}; /* * Uncompression of ctx-based: * 0 -> 0 bits from packet [unspecified / reserved] * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet * 2 -> 8 bytes from prefix - zeroes + 2 from packet * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr */ static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80}; /* * Uncompression of ctx-base * 0 -> 0 bits from packet * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet * 2 -> 2 bytes from prefix - zeroes + 3 from packet * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr */ static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21}; /* Link local prefix */ static const u8 lowpan_llprefix[] = {0xfe, 0x80}; /* private device info */ struct lowpan_dev_info { struct net_device *real_dev; /* real WPAN device ptr */ struct mutex dev_list_mtx; /* mutex for list ops */ }; struct lowpan_dev_record { struct net_device *ldev; struct list_head list; }; struct lowpan_fragment { struct sk_buff *skb; /* skb to be assembled */ spinlock_t lock; /* concurency lock */ u16 length; /* length to be assemled */ u32 bytes_rcv; /* bytes received */ u16 tag; /* current fragment tag */ struct timer_list timer; /* assembling timer */ struct list_head list; /* fragments list */ }; static unsigned short fragment_tag; static LIST_HEAD(lowpan_fragments); spinlock_t flist_lock; static inline struct lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) { return netdev_priv(dev); } static inline void lowpan_address_flip(u8 *src, u8 *dest) { int i; for (i = 0; i < IEEE802154_ADDR_LEN; i++) (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i]; } /* list of all 6lowpan devices, uses for package delivering */ /* print data in line */ static inline void lowpan_raw_dump_inline(const char *caller, char *msg, unsigned char *buf, int len) { #ifdef DEBUG if (msg) pr_debug("(%s) %s: ", caller, msg); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 1, buf, len, false); #endif /* DEBUG */ } /* * print data in a table format: * * addr: xx xx xx xx xx xx * addr: xx xx xx xx xx xx * ... */ static inline void lowpan_raw_dump_table(const char *caller, char *msg, unsigned char *buf, int len) { #ifdef DEBUG if (msg) pr_debug("(%s) %s:\n", caller, msg); print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); #endif /* DEBUG */ } static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr, const unsigned char *lladdr) { u8 val = 0; if (is_addr_mac_addr_based(ipaddr, lladdr)) val = 3; /* 0-bits */ else if (lowpan_is_iid_16_bit_compressable(ipaddr)) { /* compress IID to 16 bits xxxx::XXXX */ memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2); *hc06_ptr += 2; val = 2; /* 16-bits */ } else { /* do not compress IID => xxxx::IID */ memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8); *hc06_ptr += 8; val = 1; /* 64-bits */ } return rol8(val, shift); } static void lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr) { memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN); /* second bit-flip (Universe/Local) is done according RFC2464 */ ipaddr->s6_addr[8] ^= 0x02; } /* * Uncompress addresses based on a prefix and a postfix with zeroes in * between. If the postfix is zero in length it will use the link address * to configure the IP address (autoconf style). * pref_post_count takes a byte where the first nibble specify prefix count * and the second postfix count (NOTE: 15/0xf => 16 bytes copy). */ static int lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, u8 const *prefix, u8 pref_post_count, unsigned char *lladdr) { u8 prefcount = pref_post_count >> 4; u8 postcount = pref_post_count & 0x0f; /* full nibble 15 => 16 */ prefcount = (prefcount == 15 ? 16 : prefcount); postcount = (postcount == 15 ? 16 : postcount); if (lladdr) lowpan_raw_dump_inline(__func__, "linklocal address", lladdr, IEEE802154_ALEN); if (prefcount > 0) memcpy(ipaddr, prefix, prefcount); if (prefcount + postcount < 16) memset(&ipaddr->s6_addr[prefcount], 0, 16 - (prefcount + postcount)); if (postcount > 0) { memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount); skb_pull(skb, postcount); } else if (prefcount > 0) { if (lladdr == NULL) return -EINVAL; /* no IID based configuration if no prefix and no data */ lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr); } pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount, postcount); lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16); return 0; } static void lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb) { struct udphdr *uh = udp_hdr(skb); pr_debug("(%s): UDP header compression\n", __func__); if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) == LOWPAN_NHC_UDP_4BIT_PORT) && ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) == LOWPAN_NHC_UDP_4BIT_PORT)) { pr_debug("(%s): both ports compression to 4 bits\n", __func__); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11; **(hc06_ptr + 1) = /* subtraction is faster */ (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) + ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4)); *hc06_ptr += 2; } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) == LOWPAN_NHC_UDP_8BIT_PORT) { pr_debug("(%s): remove 8 bits of dest\n", __func__); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01; memcpy(*hc06_ptr + 1, &uh->source, 2); **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT); *hc06_ptr += 4; } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) == LOWPAN_NHC_UDP_8BIT_PORT) { pr_debug("(%s): remove 8 bits of source\n", __func__); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10; memcpy(*hc06_ptr + 1, &uh->dest, 2); **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT); *hc06_ptr += 4; } else { pr_debug("(%s): can't compress header\n", __func__); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00; memcpy(*hc06_ptr + 1, &uh->source, 2); memcpy(*hc06_ptr + 3, &uh->dest, 2); *hc06_ptr += 5; } /* checksum is always inline */ memcpy(*hc06_ptr, &uh->check, 2); *hc06_ptr += 2; } static u8 lowpan_fetch_skb_u8(struct sk_buff *skb) { u8 ret; ret = skb->data[0]; skb_pull(skb, 1); return ret; } static u16 lowpan_fetch_skb_u16(struct sk_buff *skb) { u16 ret; BUG_ON(!pskb_may_pull(skb, 2)); ret = skb->data[0] | (skb->data[1] << 8); skb_pull(skb, 2); return ret; } static int lowpan_uncompress_udp_header(struct sk_buff *skb) { struct udphdr *uh = udp_hdr(skb); u8 tmp; tmp = lowpan_fetch_skb_u8(skb); if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { pr_debug("(%s): UDP header uncompression\n", __func__); switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { case LOWPAN_NHC_UDP_CS_P_00: memcpy(&uh->source, &skb->data[0], 2); memcpy(&uh->dest, &skb->data[2], 2); skb_pull(skb, 4); break; case LOWPAN_NHC_UDP_CS_P_01: memcpy(&uh->source, &skb->data[0], 2); uh->dest = skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT; skb_pull(skb, 3); break; case LOWPAN_NHC_UDP_CS_P_10: uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT; memcpy(&uh->dest, &skb->data[1], 2); skb_pull(skb, 3); break; case LOWPAN_NHC_UDP_CS_P_11: uh->source = LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4); uh->dest = LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f); skb_pull(skb, 1); break; default: pr_debug("(%s) ERROR: unknown UDP format\n", __func__); goto err; break; } pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n", __func__, uh->source, uh->dest); /* copy checksum */ memcpy(&uh->check, &skb->data[0], 2); skb_pull(skb, 2); } else { pr_debug("(%s): ERROR: unsupported NH format\n", __func__); goto err; } return 0; err: return -EINVAL; } static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, const void *_saddr, unsigned len) { u8 tmp, iphc0, iphc1, *hc06_ptr; struct ipv6hdr *hdr; const u8 *saddr = _saddr; const u8 *daddr = _daddr; u8 *head; struct ieee802154_addr sa, da; if (type != ETH_P_IPV6) return 0; /* TODO: * if this package isn't ipv6 one, where should it be routed? */ head = kzalloc(100, GFP_KERNEL); if (head == NULL) return -ENOMEM; hdr = ipv6_hdr(skb); hc06_ptr = head + 2; pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n" "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr->version, ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit); lowpan_raw_dump_table(__func__, "raw skb network header dump", skb_network_header(skb), sizeof(struct ipv6hdr)); if (!saddr) saddr = dev->dev_addr; lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8); /* * As we copy some bit-length fields, in the IPHC encoding bytes, * we sometimes use |= * If the field is 0, and the current bit value in memory is 1, * this does not work. We therefore reset the IPHC encoding here */ iphc0 = LOWPAN_DISPATCH_IPHC; iphc1 = 0; /* TODO: context lookup */ lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8); /* * Traffic class, flow label * If flow label is 0, compress it. If traffic class is 0, compress it * We have to process both in the same time as the offset of traffic * class depends on the presence of version and flow label */ /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */ tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4); tmp = ((tmp & 0x03) << 6) | (tmp >> 2); if (((hdr->flow_lbl[0] & 0x0F) == 0) && (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) { /* flow label can be compressed */ iphc0 |= LOWPAN_IPHC_FL_C; if ((hdr->priority == 0) && ((hdr->flow_lbl[0] & 0xF0) == 0)) { /* compress (elide) all */ iphc0 |= LOWPAN_IPHC_TC_C; } else { /* compress only the flow label */ *hc06_ptr = tmp; hc06_ptr += 1; } } else { /* Flow label cannot be compressed */ if ((hdr->priority == 0) && ((hdr->flow_lbl[0] & 0xF0) == 0)) { /* compress only traffic class */ iphc0 |= LOWPAN_IPHC_TC_C; *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F); memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2); hc06_ptr += 3; } else { /* compress nothing */ memcpy(hc06_ptr, &hdr, 4); /* replace the top byte with new ECN | DSCP format */ *hc06_ptr = tmp; hc06_ptr += 4; } } /* NOTE: payload length is always compressed */ /* Next Header is compress if UDP */ if (hdr->nexthdr == UIP_PROTO_UDP) iphc0 |= LOWPAN_IPHC_NH_C; if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { *hc06_ptr = hdr->nexthdr; hc06_ptr += 1; } /* * Hop limit * if 1: compress, encoding is 01 * if 64: compress, encoding is 10 * if 255: compress, encoding is 11 * else do not compress */ switch (hdr->hop_limit) { case 1: iphc0 |= LOWPAN_IPHC_TTL_1; break; case 64: iphc0 |= LOWPAN_IPHC_TTL_64; break; case 255: iphc0 |= LOWPAN_IPHC_TTL_255; break; default: *hc06_ptr = hdr->hop_limit; break; } /* source address compression */ if (is_addr_unspecified(&hdr->saddr)) { pr_debug("(%s): source address is unspecified, setting SAC\n", __func__); iphc1 |= LOWPAN_IPHC_SAC; /* TODO: context lookup */ } else if (is_addr_link_local(&hdr->saddr)) { pr_debug("(%s): source address is link-local\n", __func__); iphc1 |= lowpan_compress_addr_64(&hc06_ptr, LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr); } else { pr_debug("(%s): send the full source address\n", __func__); memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16); hc06_ptr += 16; } /* destination address compression */ if (is_addr_mcast(&hdr->daddr)) { pr_debug("(%s): destination address is multicast", __func__); iphc1 |= LOWPAN_IPHC_M; if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) { pr_debug("compressed to 1 octet\n"); iphc1 |= LOWPAN_IPHC_DAM_11; /* use last byte */ *hc06_ptr = hdr->daddr.s6_addr[15]; hc06_ptr += 1; } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) { pr_debug("compressed to 4 octets\n"); iphc1 |= LOWPAN_IPHC_DAM_10; /* second byte + the last three */ *hc06_ptr = hdr->daddr.s6_addr[1]; memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3); hc06_ptr += 4; } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) { pr_debug("compressed to 6 octets\n"); iphc1 |= LOWPAN_IPHC_DAM_01; /* second byte + the last five */ *hc06_ptr = hdr->daddr.s6_addr[1]; memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5); hc06_ptr += 6; } else { pr_debug("using full address\n"); iphc1 |= LOWPAN_IPHC_DAM_00; memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16); hc06_ptr += 16; } } else { pr_debug("(%s): destination address is unicast: ", __func__); /* TODO: context lookup */ if (is_addr_link_local(&hdr->daddr)) { pr_debug("destination address is link-local\n"); iphc1 |= lowpan_compress_addr_64(&hc06_ptr, LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr); } else { pr_debug("using full address\n"); memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16); hc06_ptr += 16; } } /* UDP header compression */ if (hdr->nexthdr == UIP_PROTO_UDP) lowpan_compress_udp_header(&hc06_ptr, skb); head[0] = iphc0; head[1] = iphc1; skb_pull(skb, sizeof(struct ipv6hdr)); memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head); kfree(head); lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); /* * NOTE1: I'm still unsure about the fact that compression and WPAN * header are created here and not later in the xmit. So wait for * an opinion of net maintainers. */ /* * NOTE2: to be absolutely correct, we must derive PANid information * from MAC subif of the 'dev' and 'real_dev' network devices, but * this isn't implemented in mainline yet, so currently we assign 0xff */ { /* prepare wpan address data */ sa.addr_type = IEEE802154_ADDR_LONG; sa.pan_id = 0xff; da.addr_type = IEEE802154_ADDR_LONG; da.pan_id = 0xff; memcpy(&(da.hwaddr), daddr, 8); memcpy(&(sa.hwaddr), saddr, 8); mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, type, (void *)&da, (void *)&sa, skb->len); } } static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr) { struct sk_buff *new; struct lowpan_dev_record *entry; int stat = NET_RX_SUCCESS; new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb), GFP_ATOMIC); kfree_skb(skb); if (!new) return -ENOMEM; skb_push(new, sizeof(struct ipv6hdr)); skb_reset_network_header(new); skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr)); new->protocol = htons(ETH_P_IPV6); new->pkt_type = PACKET_HOST; rcu_read_lock(); list_for_each_entry_rcu(entry, &lowpan_devices, list) if (lowpan_dev_info(entry->ldev)->real_dev == new->dev) { skb = skb_copy(new, GFP_ATOMIC); if (!skb) { stat = -ENOMEM; break; } skb->dev = entry->ldev; stat = netif_rx(skb); } rcu_read_unlock(); kfree_skb(new); return stat; } static void lowpan_fragment_timer_expired(unsigned long entry_addr) { struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr; pr_debug("%s: timer expired for frame with tag %d\n", __func__, entry->tag); spin_lock(&flist_lock); list_del(&entry->list); spin_unlock(&flist_lock); dev_kfree_skb(entry->skb); kfree(entry); } static int lowpan_process_data(struct sk_buff *skb) { struct ipv6hdr hdr; u8 tmp, iphc0, iphc1, num_context = 0; u8 *_saddr, *_daddr; int err; lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); /* at least two bytes will be used for the encoding */ if (skb->len < 2) goto drop; iphc0 = lowpan_fetch_skb_u8(skb); /* fragments assembling */ switch (iphc0 & LOWPAN_DISPATCH_MASK) { case LOWPAN_DISPATCH_FRAG1: case LOWPAN_DISPATCH_FRAGN: { struct lowpan_fragment *frame; u8 len, offset; u16 tag; bool found = false; len = lowpan_fetch_skb_u8(skb); /* frame length */ tag = lowpan_fetch_skb_u16(skb); /* * check if frame assembling with the same tag is * already in progress */ spin_lock(&flist_lock); list_for_each_entry(frame, &lowpan_fragments, list) if (frame->tag == tag) { found = true; break; } /* alloc new frame structure */ if (!found) { frame = kzalloc(sizeof(struct lowpan_fragment), GFP_ATOMIC); if (!frame) goto unlock_and_drop; INIT_LIST_HEAD(&frame->list); frame->length = (iphc0 & 7) | (len << 3); frame->tag = tag; /* allocate buffer for frame assembling */ frame->skb = alloc_skb(frame->length + sizeof(struct ipv6hdr), GFP_ATOMIC); if (!frame->skb) { kfree(frame); goto unlock_and_drop; } frame->skb->priority = skb->priority; frame->skb->dev = skb->dev; /* reserve headroom for uncompressed ipv6 header */ skb_reserve(frame->skb, sizeof(struct ipv6hdr)); skb_put(frame->skb, frame->length); init_timer(&frame->timer); /* time out is the same as for ipv6 - 60 sec */ frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; frame->timer.data = (unsigned long)frame; frame->timer.function = lowpan_fragment_timer_expired; add_timer(&frame->timer); list_add_tail(&frame->list, &lowpan_fragments); } if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) goto unlock_and_drop; offset = lowpan_fetch_skb_u8(skb); /* fetch offset */ /* if payload fits buffer, copy it */ if (likely((offset * 8 + skb->len) <= frame->length)) skb_copy_to_linear_data_offset(frame->skb, offset * 8, skb->data, skb->len); else goto unlock_and_drop; frame->bytes_rcv += skb->len; /* frame assembling complete */ if ((frame->bytes_rcv == frame->length) && frame->timer.expires > jiffies) { /* if timer haven't expired - first of all delete it */ del_timer(&frame->timer); list_del(&frame->list); spin_unlock(&flist_lock); dev_kfree_skb(skb); skb = frame->skb; kfree(frame); iphc0 = lowpan_fetch_skb_u8(skb); break; } spin_unlock(&flist_lock); return kfree_skb(skb), 0; } default: break; } iphc1 = lowpan_fetch_skb_u8(skb); _saddr = mac_cb(skb)->sa.hwaddr; _daddr = mac_cb(skb)->da.hwaddr; pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1); /* another if the CID flag is set */ if (iphc1 & LOWPAN_IPHC_CID) { pr_debug("(%s): CID flag is set, increase header with one\n", __func__); if (!skb->len) goto drop; num_context = lowpan_fetch_skb_u8(skb); } hdr.version = 6; /* Traffic Class and Flow Label */ switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) { /* * Traffic Class and FLow Label carried in-line * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) */ case 0: /* 00b */ if (!skb->len) goto drop; tmp = lowpan_fetch_skb_u8(skb); memcpy(&hdr.flow_lbl, &skb->data[0], 3); skb_pull(skb, 3); hdr.priority = ((tmp >> 2) & 0x0f); hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) | (hdr.flow_lbl[0] & 0x0f); break; /* * Traffic class carried in-line * ECN + DSCP (1 byte), Flow Label is elided */ case 1: /* 10b */ if (!skb->len) goto drop; tmp = lowpan_fetch_skb_u8(skb); hdr.priority = ((tmp >> 2) & 0x0f); hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30); hdr.flow_lbl[1] = 0; hdr.flow_lbl[2] = 0; break; /* * Flow Label carried in-line * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided */ case 2: /* 01b */ if (!skb->len) goto drop; tmp = lowpan_fetch_skb_u8(skb); hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); memcpy(&hdr.flow_lbl[1], &skb->data[0], 2); skb_pull(skb, 2); break; /* Traffic Class and Flow Label are elided */ case 3: /* 11b */ hdr.priority = 0; hdr.flow_lbl[0] = 0; hdr.flow_lbl[1] = 0; hdr.flow_lbl[2] = 0; break; default: break; } /* Next Header */ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { /* Next header is carried inline */ if (!skb->len) goto drop; hdr.nexthdr = lowpan_fetch_skb_u8(skb); pr_debug("(%s): NH flag is set, next header is carried " "inline: %02x\n", __func__, hdr.nexthdr); } /* Hop Limit */ if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03]; else { if (!skb->len) goto drop; hdr.hop_limit = lowpan_fetch_skb_u8(skb); } /* Extract SAM to the tmp variable */ tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03; /* Source address uncompression */ pr_debug("(%s): source address stateless compression\n", __func__); err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix, lowpan_unc_llconf[tmp], skb->data); if (err) goto drop; /* Extract DAM to the tmp variable */ tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03; /* check for Multicast Compression */ if (iphc1 & LOWPAN_IPHC_M) { if (iphc1 & LOWPAN_IPHC_DAC) { pr_debug("(%s): destination address context-based " "multicast compression\n", __func__); /* TODO: implement this */ } else { u8 prefix[] = {0xff, 0x02}; pr_debug("(%s): destination address non-context-based" " multicast compression\n", __func__); if (0 < tmp && tmp < 3) { if (!skb->len) goto drop; else prefix[1] = lowpan_fetch_skb_u8(skb); } err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix, lowpan_unc_mxconf[tmp], NULL); if (err) goto drop; } } else { pr_debug("(%s): destination address stateless compression\n", __func__); err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix, lowpan_unc_llconf[tmp], skb->data); if (err) goto drop; } /* UDP data uncompression */ if (iphc0 & LOWPAN_IPHC_NH_C) if (lowpan_uncompress_udp_header(skb)) goto drop; /* Not fragmented package */ hdr.payload_len = htons(skb->len); pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__, skb_headroom(skb), skb->len); pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t" "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version, ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit); lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr)); return lowpan_skb_deliver(skb, &hdr); unlock_and_drop: spin_unlock(&flist_lock); drop: kfree_skb(skb); return -EINVAL; } static int lowpan_get_mac_header_length(struct sk_buff *skb) { /* * Currently long addressing mode is supported only, so the overall * header size is 21: * FC SeqNum DPAN DA SA Sec * 2 + 1 + 2 + 8 + 8 + 0 = 21 */ return 21; } static int lowpan_fragment_xmit(struct sk_buff *skb, u8 *head, int mlen, int plen, int offset) { struct sk_buff *frag; int hlen, ret; /* if payload length is zero, therefore it's a first fragment */ hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE); lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE); if (!frag) return -ENOMEM; frag->priority = skb->priority; frag->dev = skb->dev; /* copy header, MFR and payload */ memcpy(skb_put(frag, mlen), skb->data, mlen); memcpy(skb_put(frag, hlen), head, hlen); if (plen) skb_copy_from_linear_data_offset(skb, offset + mlen, skb_put(frag, plen), plen); lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len); ret = dev_queue_xmit(frag); return ret; } static int lowpan_skb_fragmentation(struct sk_buff *skb) { int err, header_length, payload_length, tag, offset = 0; u8 head[5]; header_length = lowpan_get_mac_header_length(skb); payload_length = skb->len - header_length; tag = fragment_tag++; /* first fragment header */ head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7); head[1] = (payload_length >> 3) & 0xff; head[2] = tag & 0xff; head[3] = tag >> 8; err = lowpan_fragment_xmit(skb, head, header_length, 0, 0); /* next fragment header */ head[0] &= ~LOWPAN_DISPATCH_FRAG1; head[0] |= LOWPAN_DISPATCH_FRAGN; while ((payload_length - offset > 0) && (err >= 0)) { int len = LOWPAN_FRAG_SIZE; head[4] = offset / 8; if (payload_length - offset < len) len = payload_length - offset; err = lowpan_fragment_xmit(skb, head, header_length, len, offset); offset += len; } return err; } static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) { int err = -1; pr_debug("(%s): package xmit\n", __func__); skb->dev = lowpan_dev_info(dev)->real_dev; if (skb->dev == NULL) { pr_debug("(%s) ERROR: no real wpan device found\n", __func__); goto error; } if (skb->len <= IEEE802154_MTU) { err = dev_queue_xmit(skb); goto out; } pr_debug("(%s): frame is too big, fragmentation is needed\n", __func__); err = lowpan_skb_fragmentation(skb); error: dev_kfree_skb(skb); out: if (err < 0) pr_debug("(%s): ERROR: xmit failed\n", __func__); return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK); } static void lowpan_dev_free(struct net_device *dev) { dev_put(lowpan_dev_info(dev)->real_dev); free_netdev(dev); } static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_phy(real_dev); } static u16 lowpan_get_pan_id(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev); } static u16 lowpan_get_short_addr(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); } static struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; static const struct net_device_ops lowpan_netdev_ops = { .ndo_start_xmit = lowpan_xmit, .ndo_set_mac_address = eth_mac_addr, }; static struct ieee802154_mlme_ops lowpan_mlme = { .get_pan_id = lowpan_get_pan_id, .get_phy = lowpan_get_phy, .get_short_addr = lowpan_get_short_addr, }; static void lowpan_setup(struct net_device *dev) { pr_debug("(%s)\n", __func__); dev->addr_len = IEEE802154_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); dev->type = ARPHRD_IEEE802154; /* Frame Control + Sequence Number + Address fields + Security Header */ dev->hard_header_len = 2 + 1 + 20 + 14; dev->needed_tailroom = 2; /* FCS */ dev->mtu = 1281; dev->tx_queue_len = 0; dev->flags = IFF_BROADCAST | IFF_MULTICAST; dev->watchdog_timeo = 0; dev->netdev_ops = &lowpan_netdev_ops; dev->header_ops = &lowpan_header_ops; dev->ml_priv = &lowpan_mlme; dev->destructor = lowpan_dev_free; } static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) { pr_debug("(%s)\n", __func__); if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) return -EINVAL; } return 0; } static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { if (!netif_running(dev)) goto drop; if (dev->type != ARPHRD_IEEE802154) goto drop; /* check that it's our buffer */ switch (skb->data[0] & 0xe0) { case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ lowpan_process_data(skb); break; default: break; } return NET_RX_SUCCESS; drop: kfree_skb(skb); return NET_RX_DROP; } static int lowpan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_device *real_dev; struct lowpan_dev_record *entry; pr_debug("(%s)\n", __func__); if (!tb[IFLA_LINK]) return -EINVAL; /* find and hold real wpan device */ real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; lowpan_dev_info(dev)->real_dev = real_dev; mutex_init(&lowpan_dev_info(dev)->dev_list_mtx); entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL); if (!entry) { dev_put(real_dev); lowpan_dev_info(dev)->real_dev = NULL; return -ENOMEM; } entry->ldev = dev; mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); INIT_LIST_HEAD(&entry->list); list_add_tail(&entry->list, &lowpan_devices); mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); spin_lock_init(&flist_lock); register_netdevice(dev); return 0; } static void lowpan_dellink(struct net_device *dev, struct list_head *head) { struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); struct net_device *real_dev = lowpan_dev->real_dev; struct lowpan_dev_record *entry, *tmp; struct lowpan_fragment *frame, *tframe; ASSERT_RTNL(); spin_lock(&flist_lock); list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) { del_timer(&frame->timer); list_del(&frame->list); dev_kfree_skb(frame->skb); kfree(frame); } spin_unlock(&flist_lock); mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { if (entry->ldev == dev) { list_del(&entry->list); kfree(entry); } } mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx); unregister_netdevice_queue(dev, head); dev_put(real_dev); } static struct rtnl_link_ops lowpan_link_ops __read_mostly = { .kind = "lowpan", .priv_size = sizeof(struct lowpan_dev_info), .setup = lowpan_setup, .newlink = lowpan_newlink, .dellink = lowpan_dellink, .validate = lowpan_validate, }; static inline int __init lowpan_netlink_init(void) { return rtnl_link_register(&lowpan_link_ops); } static inline void __init lowpan_netlink_fini(void) { rtnl_link_unregister(&lowpan_link_ops); } static struct packet_type lowpan_packet_type = { .type = __constant_htons(ETH_P_IEEE802154), .func = lowpan_rcv, }; static int __init lowpan_init_module(void) { int err = 0; pr_debug("(%s)\n", __func__); err = lowpan_netlink_init(); if (err < 0) goto out; dev_add_pack(&lowpan_packet_type); out: return err; } static void __exit lowpan_cleanup_module(void) { pr_debug("(%s)\n", __func__); lowpan_netlink_fini(); dev_remove_pack(&lowpan_packet_type); } module_init(lowpan_init_module); module_exit(lowpan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("lowpan");
gpl-2.0
gao-feng/auditns
arch/arm/mach-rpc/ecard.c
3859
26081
/* * linux/arch/arm/kernel/ecard.c * * Copyright 1995-2001 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Find all installed expansion cards, and handle interrupts from them. * * Created from information from Acorns RiscOS3 PRMs * * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether * podule slot. * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work. * 12-Sep-1997 RMK Created new handling of interrupt enables/disables * - cards can now register their own routine to control * interrupts (recommended). * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled * on reset from Linux. (Caused cards not to respond * under RiscOS without hard reset). * 15-Feb-1998 RMK Added DMA support * 12-Sep-1998 RMK Added EASI support * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment. * 17-Apr-1999 RMK Support for EASI Type C cycles. */ #define ECARD_C #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/reboot.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/device.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/dma.h> #include <asm/ecard.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/mach/irq.h> #include <asm/tlbflush.h> #include "ecard.h" struct ecard_request { void (*fn)(struct ecard_request *); ecard_t *ec; unsigned int address; unsigned int length; unsigned int use_loader; void *buffer; struct completion *complete; }; struct expcard_blacklist { unsigned short manufacturer; unsigned short product; const char *type; }; static ecard_t *cards; static ecard_t *slot_to_expcard[MAX_ECARDS]; static unsigned int ectcr; /* List of descriptions of cards which don't have an extended * identification, or chunk directories containing a description. */ static struct expcard_blacklist __initdata blacklist[] = { { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" } }; asmlinkage extern int ecard_loader_reset(unsigned long base, loader_t loader); asmlinkage extern int ecard_loader_read(int off, unsigned long base, loader_t loader); static inline unsigned short ecard_getu16(unsigned char *v) { return v[0] | v[1] << 8; } static inline signed long ecard_gets24(unsigned char *v) { return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0); } static inline ecard_t *slot_to_ecard(unsigned int slot) { return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL; } /* ===================== Expansion card daemon ======================== */ /* * Since the loader programs on the expansion cards need to be run * in a specific environment, create a separate task with this * environment up, and pass requests to this task as and when we * need to. * * This should allow 99% of loaders to be called from Linux. * * From a security standpoint, we trust the card vendors. This * may be a misplaced trust. */ static void ecard_task_reset(struct ecard_request *req) { struct expansion_card *ec = req->ec; struct resource *res; res = ec->slot_no == 8 ? &ec->resource[ECARD_RES_MEMC] : ec->easi ? &ec->resource[ECARD_RES_EASI] : &ec->resource[ECARD_RES_IOCSYNC]; ecard_loader_reset(res->start, ec->loader); } static void ecard_task_readbytes(struct ecard_request *req) { struct expansion_card *ec = req->ec; unsigned char *buf = req->buffer; unsigned int len = req->length; unsigned int off = req->address; if (ec->slot_no == 8) { void __iomem *base = (void __iomem *) ec->resource[ECARD_RES_MEMC].start; /* * The card maintains an index which increments the address * into a 4096-byte page on each access. We need to keep * track of the counter. */ static unsigned int index; unsigned int page; page = (off >> 12) * 4; if (page > 256 * 4) return; off &= 4095; /* * If we are reading offset 0, or our current index is * greater than the offset, reset the hardware index counter. */ if (off == 0 || index > off) { writeb(0, base); index = 0; } /* * Increment the hardware index counter until we get to the * required offset. The read bytes are discarded. */ while (index < off) { readb(base + page); index += 1; } while (len--) { *buf++ = readb(base + page); index += 1; } } else { unsigned long base = (ec->easi ? &ec->resource[ECARD_RES_EASI] : &ec->resource[ECARD_RES_IOCSYNC])->start; void __iomem *pbase = (void __iomem *)base; if (!req->use_loader || !ec->loader) { off *= 4; while (len--) { *buf++ = readb(pbase + off); off += 4; } } else { while(len--) { /* * The following is required by some * expansion card loader programs. */ *(unsigned long *)0x108 = 0; *buf++ = ecard_loader_read(off++, base, ec->loader); } } } } static DECLARE_WAIT_QUEUE_HEAD(ecard_wait); static struct ecard_request *ecard_req; static DEFINE_MUTEX(ecard_mutex); /* * Set up the expansion card daemon's page tables. */ static void ecard_init_pgtables(struct mm_struct *mm) { struct vm_area_struct vma; /* We want to set up the page tables for the following mapping: * Virtual Physical * 0x03000000 0x03000000 * 0x03010000 unmapped * 0x03210000 0x03210000 * 0x03400000 unmapped * 0x08000000 0x08000000 * 0x10000000 unmapped * * FIXME: we don't follow this 100% yet. */ pgd_t *src_pgd, *dst_pgd; src_pgd = pgd_offset(mm, (unsigned long)IO_BASE); dst_pgd = pgd_offset(mm, IO_START); memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (IO_SIZE / PGDIR_SIZE)); src_pgd = pgd_offset(mm, (unsigned long)EASI_BASE); dst_pgd = pgd_offset(mm, EASI_START); memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); vma.vm_flags = VM_EXEC; vma.vm_mm = mm; flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); } static int ecard_init_mm(void) { struct mm_struct * mm = mm_alloc(); struct mm_struct *active_mm = current->active_mm; if (!mm) return -ENOMEM; current->mm = mm; current->active_mm = mm; activate_mm(active_mm, mm); mmdrop(active_mm); ecard_init_pgtables(mm); return 0; } static int ecard_task(void * unused) { /* * Allocate a mm. We're not a lazy-TLB kernel task since we need * to set page table entries where the user space would be. Note * that this also creates the page tables. Failure is not an * option here. */ if (ecard_init_mm()) panic("kecardd: unable to alloc mm\n"); while (1) { struct ecard_request *req; wait_event_interruptible(ecard_wait, ecard_req != NULL); req = xchg(&ecard_req, NULL); if (req != NULL) { req->fn(req); complete(req->complete); } } } /* * Wake the expansion card daemon to action our request. * * FIXME: The test here is not sufficient to detect if the * kcardd is running. */ static void ecard_call(struct ecard_request *req) { DECLARE_COMPLETION_ONSTACK(completion); req->complete = &completion; mutex_lock(&ecard_mutex); ecard_req = req; wake_up(&ecard_wait); /* * Now wait for kecardd to run. */ wait_for_completion(&completion); mutex_unlock(&ecard_mutex); } /* ======================= Mid-level card control ===================== */ static void ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld) { struct ecard_request req; req.fn = ecard_task_readbytes; req.ec = ec; req.address = off; req.length = len; req.use_loader = useld; req.buffer = addr; ecard_call(&req); } int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num) { struct ex_chunk_dir excd; int index = 16; int useld = 0; if (!ec->cid.cd) return 0; while(1) { ecard_readbytes(&excd, ec, index, 8, useld); index += 8; if (c_id(&excd) == 0) { if (!useld && ec->loader) { useld = 1; index = 0; continue; } return 0; } if (c_id(&excd) == 0xf0) { /* link */ index = c_start(&excd); continue; } if (c_id(&excd) == 0x80) { /* loader */ if (!ec->loader) { ec->loader = kmalloc(c_len(&excd), GFP_KERNEL); if (ec->loader) ecard_readbytes(ec->loader, ec, (int)c_start(&excd), c_len(&excd), useld); else return 0; } continue; } if (c_id(&excd) == id && num-- == 0) break; } if (c_id(&excd) & 0x80) { switch (c_id(&excd) & 0x70) { case 0x70: ecard_readbytes((unsigned char *)excd.d.string, ec, (int)c_start(&excd), c_len(&excd), useld); break; case 0x00: break; } } cd->start_offset = c_start(&excd); memcpy(cd->d.string, excd.d.string, 256); return 1; } /* ======================= Interrupt control ============================ */ static void ecard_def_irq_enable(ecard_t *ec, int irqnr) { } static void ecard_def_irq_disable(ecard_t *ec, int irqnr) { } static int ecard_def_irq_pending(ecard_t *ec) { return !ec->irqmask || readb(ec->irqaddr) & ec->irqmask; } static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr) { panic("ecard_def_fiq_enable called - impossible"); } static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr) { panic("ecard_def_fiq_disable called - impossible"); } static int ecard_def_fiq_pending(ecard_t *ec) { return !ec->fiqmask || readb(ec->fiqaddr) & ec->fiqmask; } static expansioncard_ops_t ecard_default_ops = { ecard_def_irq_enable, ecard_def_irq_disable, ecard_def_irq_pending, ecard_def_fiq_enable, ecard_def_fiq_disable, ecard_def_fiq_pending }; /* * Enable and disable interrupts from expansion cards. * (interrupts are disabled for these functions). * * They are not meant to be called directly, but via enable/disable_irq. */ static void ecard_irq_unmask(struct irq_data *d) { ecard_t *ec = irq_data_get_irq_chip_data(d); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->claimed && ec->ops->irqenable) ec->ops->irqenable(ec, d->irq); else printk(KERN_ERR "ecard: rejecting request to " "enable IRQs for %d\n", d->irq); } } static void ecard_irq_mask(struct irq_data *d) { ecard_t *ec = irq_data_get_irq_chip_data(d); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->ops && ec->ops->irqdisable) ec->ops->irqdisable(ec, d->irq); } } static struct irq_chip ecard_chip = { .name = "ECARD", .irq_ack = ecard_irq_mask, .irq_mask = ecard_irq_mask, .irq_unmask = ecard_irq_unmask, }; void ecard_enablefiq(unsigned int fiqnr) { ecard_t *ec = slot_to_ecard(fiqnr); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->claimed && ec->ops->fiqenable) ec->ops->fiqenable(ec, fiqnr); else printk(KERN_ERR "ecard: rejecting request to " "enable FIQs for %d\n", fiqnr); } } void ecard_disablefiq(unsigned int fiqnr) { ecard_t *ec = slot_to_ecard(fiqnr); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->ops->fiqdisable) ec->ops->fiqdisable(ec, fiqnr); } } static void ecard_dump_irq_state(void) { ecard_t *ec; printk("Expansion card IRQ state:\n"); for (ec = cards; ec; ec = ec->next) { if (ec->slot_no == 8) continue; printk(" %d: %sclaimed, ", ec->slot_no, ec->claimed ? "" : "not "); if (ec->ops && ec->ops->irqpending && ec->ops != &ecard_default_ops) printk("irq %spending\n", ec->ops->irqpending(ec) ? "" : "not "); else printk("irqaddr %p, mask = %02X, status = %02X\n", ec->irqaddr, ec->irqmask, readb(ec->irqaddr)); } } static void ecard_check_lockup(struct irq_desc *desc) { static unsigned long last; static int lockup; /* * If the timer interrupt has not run since the last million * unrecognised expansion card interrupts, then there is * something seriously wrong. Disable the expansion card * interrupts so at least we can continue. * * Maybe we ought to start a timer to re-enable them some time * later? */ if (last == jiffies) { lockup += 1; if (lockup > 1000000) { printk(KERN_ERR "\nInterrupt lockup detected - " "disabling all expansion card interrupts\n"); desc->irq_data.chip->irq_mask(&desc->irq_data); ecard_dump_irq_state(); } } else lockup = 0; /* * If we did not recognise the source of this interrupt, * warn the user, but don't flood the user with these messages. */ if (!last || time_after(jiffies, last + 5*HZ)) { last = jiffies; printk(KERN_WARNING "Unrecognised interrupt from backplane\n"); ecard_dump_irq_state(); } } static void ecard_irq_handler(unsigned int irq, struct irq_desc *desc) { ecard_t *ec; int called = 0; desc->irq_data.chip->irq_mask(&desc->irq_data); for (ec = cards; ec; ec = ec->next) { int pending; if (!ec->claimed || !ec->irq || ec->slot_no == 8) continue; if (ec->ops && ec->ops->irqpending) pending = ec->ops->irqpending(ec); else pending = ecard_default_ops.irqpending(ec); if (pending) { generic_handle_irq(ec->irq); called ++; } } desc->irq_data.chip->irq_unmask(&desc->irq_data); if (called == 0) ecard_check_lockup(desc); } static void __iomem *__ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed) { void __iomem *address = NULL; int slot = ec->slot_no; if (ec->slot_no == 8) return ECARD_MEMC8_BASE; ectcr &= ~(1 << slot); switch (type) { case ECARD_MEMC: if (slot < 4) address = ECARD_MEMC_BASE + (slot << 14); break; case ECARD_IOC: if (slot < 4) address = ECARD_IOC_BASE + (slot << 14); else address = ECARD_IOC4_BASE + ((slot - 4) << 14); if (address) address += speed << 19; break; case ECARD_EASI: address = ECARD_EASI_BASE + (slot << 24); if (speed == ECARD_FAST) ectcr |= 1 << slot; break; default: break; } #ifdef IOMD_ECTCR iomd_writeb(ectcr, IOMD_ECTCR); #endif return address; } static int ecard_prints(struct seq_file *m, ecard_t *ec) { seq_printf(m, " %d: %s ", ec->slot_no, ec->easi ? "EASI" : " "); if (ec->cid.id == 0) { struct in_chunk_dir incd; seq_printf(m, "[%04X:%04X] ", ec->cid.manufacturer, ec->cid.product); if (!ec->card_desc && ec->cid.cd && ecard_readchunk(&incd, ec, 0xf5, 0)) { ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL); if (ec->card_desc) strcpy((char *)ec->card_desc, incd.d.string); } seq_printf(m, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*"); } else seq_printf(m, "Simple card %d\n", ec->cid.id); return 0; } static int ecard_devices_proc_show(struct seq_file *m, void *v) { ecard_t *ec = cards; while (ec) { ecard_prints(m, ec); ec = ec->next; } return 0; } static int ecard_devices_proc_open(struct inode *inode, struct file *file) { return single_open(file, ecard_devices_proc_show, NULL); } static const struct file_operations bus_ecard_proc_fops = { .owner = THIS_MODULE, .open = ecard_devices_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct proc_dir_entry *proc_bus_ecard_dir = NULL; static void ecard_proc_init(void) { proc_bus_ecard_dir = proc_mkdir("bus/ecard", NULL); proc_create("devices", 0, proc_bus_ecard_dir, &bus_ecard_proc_fops); } #define ec_set_resource(ec,nr,st,sz) \ do { \ (ec)->resource[nr].name = dev_name(&ec->dev); \ (ec)->resource[nr].start = st; \ (ec)->resource[nr].end = (st) + (sz) - 1; \ (ec)->resource[nr].flags = IORESOURCE_MEM; \ } while (0) static void __init ecard_free_card(struct expansion_card *ec) { int i; for (i = 0; i < ECARD_NUM_RESOURCES; i++) if (ec->resource[i].flags) release_resource(&ec->resource[i]); kfree(ec); } static struct expansion_card *__init ecard_alloc_card(int type, int slot) { struct expansion_card *ec; unsigned long base; int i; ec = kzalloc(sizeof(ecard_t), GFP_KERNEL); if (!ec) { ec = ERR_PTR(-ENOMEM); goto nomem; } ec->slot_no = slot; ec->easi = type == ECARD_EASI; ec->irq = 0; ec->fiq = 0; ec->dma = NO_DMA; ec->ops = &ecard_default_ops; dev_set_name(&ec->dev, "ecard%d", slot); ec->dev.parent = NULL; ec->dev.bus = &ecard_bus_type; ec->dev.dma_mask = &ec->dma_mask; ec->dma_mask = (u64)0xffffffff; ec->dev.coherent_dma_mask = ec->dma_mask; if (slot < 4) { ec_set_resource(ec, ECARD_RES_MEMC, PODSLOT_MEMC_BASE + (slot << 14), PODSLOT_MEMC_SIZE); base = PODSLOT_IOC0_BASE + (slot << 14); } else base = PODSLOT_IOC4_BASE + ((slot - 4) << 14); #ifdef CONFIG_ARCH_RPC if (slot < 8) { ec_set_resource(ec, ECARD_RES_EASI, PODSLOT_EASI_BASE + (slot << 24), PODSLOT_EASI_SIZE); } if (slot == 8) { ec_set_resource(ec, ECARD_RES_MEMC, NETSLOT_BASE, NETSLOT_SIZE); } else #endif for (i = 0; i <= ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++) ec_set_resource(ec, i + ECARD_RES_IOCSLOW, base + (i << 19), PODSLOT_IOC_SIZE); for (i = 0; i < ECARD_NUM_RESOURCES; i++) { if (ec->resource[i].flags && request_resource(&iomem_resource, &ec->resource[i])) { dev_err(&ec->dev, "resource(s) not available\n"); ec->resource[i].end -= ec->resource[i].start; ec->resource[i].start = 0; ec->resource[i].flags = 0; } } nomem: return ec; } static ssize_t ecard_show_irq(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->irq); } static ssize_t ecard_show_dma(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->dma); } static ssize_t ecard_show_resources(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); char *str = buf; int i; for (i = 0; i < ECARD_NUM_RESOURCES; i++) str += sprintf(str, "%08x %08x %08lx\n", ec->resource[i].start, ec->resource[i].end, ec->resource[i].flags); return str - buf; } static ssize_t ecard_show_vendor(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->cid.manufacturer); } static ssize_t ecard_show_device(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->cid.product); } static ssize_t ecard_show_type(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%s\n", ec->easi ? "EASI" : "IOC"); } static struct device_attribute ecard_dev_attrs[] = { __ATTR(device, S_IRUGO, ecard_show_device, NULL), __ATTR(dma, S_IRUGO, ecard_show_dma, NULL), __ATTR(irq, S_IRUGO, ecard_show_irq, NULL), __ATTR(resource, S_IRUGO, ecard_show_resources, NULL), __ATTR(type, S_IRUGO, ecard_show_type, NULL), __ATTR(vendor, S_IRUGO, ecard_show_vendor, NULL), __ATTR_NULL, }; int ecard_request_resources(struct expansion_card *ec) { int i, err = 0; for (i = 0; i < ECARD_NUM_RESOURCES; i++) { if (ecard_resource_end(ec, i) && !request_mem_region(ecard_resource_start(ec, i), ecard_resource_len(ec, i), ec->dev.driver->name)) { err = -EBUSY; break; } } if (err) { while (i--) if (ecard_resource_end(ec, i)) release_mem_region(ecard_resource_start(ec, i), ecard_resource_len(ec, i)); } return err; } EXPORT_SYMBOL(ecard_request_resources); void ecard_release_resources(struct expansion_card *ec) { int i; for (i = 0; i < ECARD_NUM_RESOURCES; i++) if (ecard_resource_end(ec, i)) release_mem_region(ecard_resource_start(ec, i), ecard_resource_len(ec, i)); } EXPORT_SYMBOL(ecard_release_resources); void ecard_setirq(struct expansion_card *ec, const struct expansion_card_ops *ops, void *irq_data) { ec->irq_data = irq_data; barrier(); ec->ops = ops; } EXPORT_SYMBOL(ecard_setirq); void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res, unsigned long offset, unsigned long maxsize) { unsigned long start = ecard_resource_start(ec, res); unsigned long end = ecard_resource_end(ec, res); if (offset > (end - start)) return NULL; start += offset; if (maxsize && end - start > maxsize) end = start + maxsize; return devm_ioremap(&ec->dev, start, end - start); } EXPORT_SYMBOL(ecardm_iomap); /* * Probe for an expansion card. * * If bit 1 of the first byte of the card is set, then the * card does not exist. */ static int __init ecard_probe(int slot, unsigned irq, card_type_t type) { ecard_t **ecp; ecard_t *ec; struct ex_ecid cid; void __iomem *addr; int i, rc; ec = ecard_alloc_card(type, slot); if (IS_ERR(ec)) { rc = PTR_ERR(ec); goto nomem; } rc = -ENODEV; if ((addr = __ecard_address(ec, type, ECARD_SYNC)) == NULL) goto nodev; cid.r_zero = 1; ecard_readbytes(&cid, ec, 0, 16, 0); if (cid.r_zero) goto nodev; ec->cid.id = cid.r_id; ec->cid.cd = cid.r_cd; ec->cid.is = cid.r_is; ec->cid.w = cid.r_w; ec->cid.manufacturer = ecard_getu16(cid.r_manu); ec->cid.product = ecard_getu16(cid.r_prod); ec->cid.country = cid.r_country; ec->cid.irqmask = cid.r_irqmask; ec->cid.irqoff = ecard_gets24(cid.r_irqoff); ec->cid.fiqmask = cid.r_fiqmask; ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff); ec->fiqaddr = ec->irqaddr = addr; if (ec->cid.is) { ec->irqmask = ec->cid.irqmask; ec->irqaddr += ec->cid.irqoff; ec->fiqmask = ec->cid.fiqmask; ec->fiqaddr += ec->cid.fiqoff; } else { ec->irqmask = 1; ec->fiqmask = 4; } for (i = 0; i < ARRAY_SIZE(blacklist); i++) if (blacklist[i].manufacturer == ec->cid.manufacturer && blacklist[i].product == ec->cid.product) { ec->card_desc = blacklist[i].type; break; } ec->irq = irq; /* * hook the interrupt handlers */ if (slot < 8) { irq_set_chip_and_handler(ec->irq, &ecard_chip, handle_level_irq); irq_set_chip_data(ec->irq, ec); set_irq_flags(ec->irq, IRQF_VALID); } #ifdef CONFIG_ARCH_RPC /* On RiscPC, only first two slots have DMA capability */ if (slot < 2) ec->dma = 2 + slot; #endif for (ecp = &cards; *ecp; ecp = &(*ecp)->next); *ecp = ec; slot_to_expcard[slot] = ec; rc = device_register(&ec->dev); if (rc) goto nodev; return 0; nodev: ecard_free_card(ec); nomem: return rc; } /* * Initialise the expansion card system. * Locate all hardware - interrupt management and * actual cards. */ static int __init ecard_init(void) { struct task_struct *task; int slot, irqbase; irqbase = irq_alloc_descs(-1, 0, 8, -1); if (irqbase < 0) return irqbase; task = kthread_run(ecard_task, NULL, "kecardd"); if (IS_ERR(task)) { printk(KERN_ERR "Ecard: unable to create kernel thread: %ld\n", PTR_ERR(task)); irq_free_descs(irqbase, 8); return PTR_ERR(task); } printk("Probing expansion cards\n"); for (slot = 0; slot < 8; slot ++) { if (ecard_probe(slot, irqbase + slot, ECARD_EASI) == -ENODEV) ecard_probe(slot, irqbase + slot, ECARD_IOC); } ecard_probe(8, 11, ECARD_IOC); irq_set_chained_handler(IRQ_EXPANSIONCARD, ecard_irq_handler); ecard_proc_init(); return 0; } subsys_initcall(ecard_init); /* * ECARD "bus" */ static const struct ecard_id * ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec) { int i; for (i = 0; ids[i].manufacturer != 65535; i++) if (ec->cid.manufacturer == ids[i].manufacturer && ec->cid.product == ids[i].product) return ids + i; return NULL; } static int ecard_drv_probe(struct device *dev) { struct expansion_card *ec = ECARD_DEV(dev); struct ecard_driver *drv = ECARD_DRV(dev->driver); const struct ecard_id *id; int ret; id = ecard_match_device(drv->id_table, ec); ec->claimed = 1; ret = drv->probe(ec, id); if (ret) ec->claimed = 0; return ret; } static int ecard_drv_remove(struct device *dev) { struct expansion_card *ec = ECARD_DEV(dev); struct ecard_driver *drv = ECARD_DRV(dev->driver); drv->remove(ec); ec->claimed = 0; /* * Restore the default operations. We ensure that the * ops are set before we change the data. */ ec->ops = &ecard_default_ops; barrier(); ec->irq_data = NULL; return 0; } /* * Before rebooting, we must make sure that the expansion card is in a * sensible state, so it can be re-detected. This means that the first * page of the ROM must be visible. We call the expansion cards reset * handler, if any. */ static void ecard_drv_shutdown(struct device *dev) { struct expansion_card *ec = ECARD_DEV(dev); struct ecard_driver *drv = ECARD_DRV(dev->driver); struct ecard_request req; if (dev->driver) { if (drv->shutdown) drv->shutdown(ec); ec->claimed = 0; } /* * If this card has a loader, call the reset handler. */ if (ec->loader) { req.fn = ecard_task_reset; req.ec = ec; ecard_call(&req); } } int ecard_register_driver(struct ecard_driver *drv) { drv->drv.bus = &ecard_bus_type; return driver_register(&drv->drv); } void ecard_remove_driver(struct ecard_driver *drv) { driver_unregister(&drv->drv); } static int ecard_match(struct device *_dev, struct device_driver *_drv) { struct expansion_card *ec = ECARD_DEV(_dev); struct ecard_driver *drv = ECARD_DRV(_drv); int ret; if (drv->id_table) { ret = ecard_match_device(drv->id_table, ec) != NULL; } else { ret = ec->cid.id == drv->id; } return ret; } struct bus_type ecard_bus_type = { .name = "ecard", .dev_attrs = ecard_dev_attrs, .match = ecard_match, .probe = ecard_drv_probe, .remove = ecard_drv_remove, .shutdown = ecard_drv_shutdown, }; static int ecard_bus_init(void) { return bus_register(&ecard_bus_type); } postcore_initcall(ecard_bus_init); EXPORT_SYMBOL(ecard_readchunk); EXPORT_SYMBOL(ecard_register_driver); EXPORT_SYMBOL(ecard_remove_driver); EXPORT_SYMBOL(ecard_bus_type);
gpl-2.0
manumanfred/kernel_omap
drivers/net/s6gmac.c
4115
34210
/* * Ethernet driver for S6105 on chip network device * (c)2008 emlix GmbH http://www.emlix.com * Authors: Oskar Schirmer <os@emlix.com> * Daniel Gloeckner <dg@emlix.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if.h> #include <linux/stddef.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <variant/hardware.h> #include <variant/dmac.h> #define DRV_NAME "s6gmac" #define DRV_PRMT DRV_NAME ": " /* register declarations */ #define S6_GMAC_MACCONF1 0x000 #define S6_GMAC_MACCONF1_TXENA 0 #define S6_GMAC_MACCONF1_SYNCTX 1 #define S6_GMAC_MACCONF1_RXENA 2 #define S6_GMAC_MACCONF1_SYNCRX 3 #define S6_GMAC_MACCONF1_TXFLOWCTRL 4 #define S6_GMAC_MACCONF1_RXFLOWCTRL 5 #define S6_GMAC_MACCONF1_LOOPBACK 8 #define S6_GMAC_MACCONF1_RESTXFUNC 16 #define S6_GMAC_MACCONF1_RESRXFUNC 17 #define S6_GMAC_MACCONF1_RESTXMACCTRL 18 #define S6_GMAC_MACCONF1_RESRXMACCTRL 19 #define S6_GMAC_MACCONF1_SIMULRES 30 #define S6_GMAC_MACCONF1_SOFTRES 31 #define S6_GMAC_MACCONF2 0x004 #define S6_GMAC_MACCONF2_FULL 0 #define S6_GMAC_MACCONF2_CRCENA 1 #define S6_GMAC_MACCONF2_PADCRCENA 2 #define S6_GMAC_MACCONF2_LENGTHFCHK 4 #define S6_GMAC_MACCONF2_HUGEFRAMENA 5 #define S6_GMAC_MACCONF2_IFMODE 8 #define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1 #define S6_GMAC_MACCONF2_IFMODE_BYTE 2 #define S6_GMAC_MACCONF2_IFMODE_MASK 3 #define S6_GMAC_MACCONF2_PREAMBLELEN 12 #define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F #define S6_GMAC_MACIPGIFG 0x008 #define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0 #define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F #define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8 #define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16 #define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24 #define S6_GMAC_MACHALFDUPLEX 0x00C #define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0 #define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F #define S6_GMAC_MACHALFDUPLEX_RETXMAX 12 #define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F #define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16 #define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17 #define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18 #define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19 #define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20 #define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F #define S6_GMAC_MACMAXFRAMELEN 0x010 #define S6_GMAC_MACMIICONF 0x020 #define S6_GMAC_MACMIICONF_CSEL 0 #define S6_GMAC_MACMIICONF_CSEL_DIV10 0 #define S6_GMAC_MACMIICONF_CSEL_DIV12 1 #define S6_GMAC_MACMIICONF_CSEL_DIV14 2 #define S6_GMAC_MACMIICONF_CSEL_DIV18 3 #define S6_GMAC_MACMIICONF_CSEL_DIV24 4 #define S6_GMAC_MACMIICONF_CSEL_DIV34 5 #define S6_GMAC_MACMIICONF_CSEL_DIV68 6 #define S6_GMAC_MACMIICONF_CSEL_DIV168 7 #define S6_GMAC_MACMIICONF_CSEL_MASK 7 #define S6_GMAC_MACMIICONF_PREAMBLESUPR 4 #define S6_GMAC_MACMIICONF_SCANAUTOINCR 5 #define S6_GMAC_MACMIICMD 0x024 #define S6_GMAC_MACMIICMD_READ 0 #define S6_GMAC_MACMIICMD_SCAN 1 #define S6_GMAC_MACMIIADDR 0x028 #define S6_GMAC_MACMIIADDR_REG 0 #define S6_GMAC_MACMIIADDR_REG_MASK 0x1F #define S6_GMAC_MACMIIADDR_PHY 8 #define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F #define S6_GMAC_MACMIICTRL 0x02C #define S6_GMAC_MACMIISTAT 0x030 #define S6_GMAC_MACMIIINDI 0x034 #define S6_GMAC_MACMIIINDI_BUSY 0 #define S6_GMAC_MACMIIINDI_SCAN 1 #define S6_GMAC_MACMIIINDI_INVAL 2 #define S6_GMAC_MACINTERFSTAT 0x03C #define S6_GMAC_MACINTERFSTAT_LINKFAIL 3 #define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9 #define S6_GMAC_MACSTATADDR1 0x040 #define S6_GMAC_MACSTATADDR2 0x044 #define S6_GMAC_FIFOCONF0 0x048 #define S6_GMAC_FIFOCONF0_HSTRSTWT 0 #define S6_GMAC_FIFOCONF0_HSTRSTSR 1 #define S6_GMAC_FIFOCONF0_HSTRSTFR 2 #define S6_GMAC_FIFOCONF0_HSTRSTST 3 #define S6_GMAC_FIFOCONF0_HSTRSTFT 4 #define S6_GMAC_FIFOCONF0_WTMENREQ 8 #define S6_GMAC_FIFOCONF0_SRFENREQ 9 #define S6_GMAC_FIFOCONF0_FRFENREQ 10 #define S6_GMAC_FIFOCONF0_STFENREQ 11 #define S6_GMAC_FIFOCONF0_FTFENREQ 12 #define S6_GMAC_FIFOCONF0_WTMENRPLY 16 #define S6_GMAC_FIFOCONF0_SRFENRPLY 17 #define S6_GMAC_FIFOCONF0_FRFENRPLY 18 #define S6_GMAC_FIFOCONF0_STFENRPLY 19 #define S6_GMAC_FIFOCONF0_FTFENRPLY 20 #define S6_GMAC_FIFOCONF1 0x04C #define S6_GMAC_FIFOCONF2 0x050 #define S6_GMAC_FIFOCONF2_CFGLWM 0 #define S6_GMAC_FIFOCONF2_CFGHWM 16 #define S6_GMAC_FIFOCONF3 0x054 #define S6_GMAC_FIFOCONF3_CFGFTTH 0 #define S6_GMAC_FIFOCONF3_CFGHWMFT 16 #define S6_GMAC_FIFOCONF4 0x058 #define S6_GMAC_FIFOCONF_RSV_PREVDROP 0 #define S6_GMAC_FIFOCONF_RSV_RUNT 1 #define S6_GMAC_FIFOCONF_RSV_FALSECAR 2 #define S6_GMAC_FIFOCONF_RSV_CODEERR 3 #define S6_GMAC_FIFOCONF_RSV_CRCERR 4 #define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5 #define S6_GMAC_FIFOCONF_RSV_LENRANGE 6 #define S6_GMAC_FIFOCONF_RSV_OK 7 #define S6_GMAC_FIFOCONF_RSV_MULTICAST 8 #define S6_GMAC_FIFOCONF_RSV_BROADCAST 9 #define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10 #define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11 #define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12 #define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13 #define S6_GMAC_FIFOCONF_RSV_VLANTAG 14 #define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15 #define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16 #define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF #define S6_GMAC_FIFOCONF5 0x05C #define S6_GMAC_FIFOCONF5_DROPLT64 18 #define S6_GMAC_FIFOCONF5_CFGBYTM 19 #define S6_GMAC_FIFOCONF5_RXDROPSIZE 20 #define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF #define S6_GMAC_STAT_REGS 0x080 #define S6_GMAC_STAT_SIZE_MIN 12 #define S6_GMAC_STATTR64 0x080 #define S6_GMAC_STATTR64_SIZE 18 #define S6_GMAC_STATTR127 0x084 #define S6_GMAC_STATTR127_SIZE 18 #define S6_GMAC_STATTR255 0x088 #define S6_GMAC_STATTR255_SIZE 18 #define S6_GMAC_STATTR511 0x08C #define S6_GMAC_STATTR511_SIZE 18 #define S6_GMAC_STATTR1K 0x090 #define S6_GMAC_STATTR1K_SIZE 18 #define S6_GMAC_STATTRMAX 0x094 #define S6_GMAC_STATTRMAX_SIZE 18 #define S6_GMAC_STATTRMGV 0x098 #define S6_GMAC_STATTRMGV_SIZE 18 #define S6_GMAC_STATRBYT 0x09C #define S6_GMAC_STATRBYT_SIZE 24 #define S6_GMAC_STATRPKT 0x0A0 #define S6_GMAC_STATRPKT_SIZE 18 #define S6_GMAC_STATRFCS 0x0A4 #define S6_GMAC_STATRFCS_SIZE 12 #define S6_GMAC_STATRMCA 0x0A8 #define S6_GMAC_STATRMCA_SIZE 18 #define S6_GMAC_STATRBCA 0x0AC #define S6_GMAC_STATRBCA_SIZE 22 #define S6_GMAC_STATRXCF 0x0B0 #define S6_GMAC_STATRXCF_SIZE 18 #define S6_GMAC_STATRXPF 0x0B4 #define S6_GMAC_STATRXPF_SIZE 12 #define S6_GMAC_STATRXUO 0x0B8 #define S6_GMAC_STATRXUO_SIZE 12 #define S6_GMAC_STATRALN 0x0BC #define S6_GMAC_STATRALN_SIZE 12 #define S6_GMAC_STATRFLR 0x0C0 #define S6_GMAC_STATRFLR_SIZE 16 #define S6_GMAC_STATRCDE 0x0C4 #define S6_GMAC_STATRCDE_SIZE 12 #define S6_GMAC_STATRCSE 0x0C8 #define S6_GMAC_STATRCSE_SIZE 12 #define S6_GMAC_STATRUND 0x0CC #define S6_GMAC_STATRUND_SIZE 12 #define S6_GMAC_STATROVR 0x0D0 #define S6_GMAC_STATROVR_SIZE 12 #define S6_GMAC_STATRFRG 0x0D4 #define S6_GMAC_STATRFRG_SIZE 12 #define S6_GMAC_STATRJBR 0x0D8 #define S6_GMAC_STATRJBR_SIZE 12 #define S6_GMAC_STATRDRP 0x0DC #define S6_GMAC_STATRDRP_SIZE 12 #define S6_GMAC_STATTBYT 0x0E0 #define S6_GMAC_STATTBYT_SIZE 24 #define S6_GMAC_STATTPKT 0x0E4 #define S6_GMAC_STATTPKT_SIZE 18 #define S6_GMAC_STATTMCA 0x0E8 #define S6_GMAC_STATTMCA_SIZE 18 #define S6_GMAC_STATTBCA 0x0EC #define S6_GMAC_STATTBCA_SIZE 18 #define S6_GMAC_STATTXPF 0x0F0 #define S6_GMAC_STATTXPF_SIZE 12 #define S6_GMAC_STATTDFR 0x0F4 #define S6_GMAC_STATTDFR_SIZE 12 #define S6_GMAC_STATTEDF 0x0F8 #define S6_GMAC_STATTEDF_SIZE 12 #define S6_GMAC_STATTSCL 0x0FC #define S6_GMAC_STATTSCL_SIZE 12 #define S6_GMAC_STATTMCL 0x100 #define S6_GMAC_STATTMCL_SIZE 12 #define S6_GMAC_STATTLCL 0x104 #define S6_GMAC_STATTLCL_SIZE 12 #define S6_GMAC_STATTXCL 0x108 #define S6_GMAC_STATTXCL_SIZE 12 #define S6_GMAC_STATTNCL 0x10C #define S6_GMAC_STATTNCL_SIZE 13 #define S6_GMAC_STATTPFH 0x110 #define S6_GMAC_STATTPFH_SIZE 12 #define S6_GMAC_STATTDRP 0x114 #define S6_GMAC_STATTDRP_SIZE 12 #define S6_GMAC_STATTJBR 0x118 #define S6_GMAC_STATTJBR_SIZE 12 #define S6_GMAC_STATTFCS 0x11C #define S6_GMAC_STATTFCS_SIZE 12 #define S6_GMAC_STATTXCF 0x120 #define S6_GMAC_STATTXCF_SIZE 12 #define S6_GMAC_STATTOVR 0x124 #define S6_GMAC_STATTOVR_SIZE 12 #define S6_GMAC_STATTUND 0x128 #define S6_GMAC_STATTUND_SIZE 12 #define S6_GMAC_STATTFRG 0x12C #define S6_GMAC_STATTFRG_SIZE 12 #define S6_GMAC_STATCARRY(n) (0x130 + 4*(n)) #define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n)) #define S6_GMAC_STATCARRY1_RDRP 0 #define S6_GMAC_STATCARRY1_RJBR 1 #define S6_GMAC_STATCARRY1_RFRG 2 #define S6_GMAC_STATCARRY1_ROVR 3 #define S6_GMAC_STATCARRY1_RUND 4 #define S6_GMAC_STATCARRY1_RCSE 5 #define S6_GMAC_STATCARRY1_RCDE 6 #define S6_GMAC_STATCARRY1_RFLR 7 #define S6_GMAC_STATCARRY1_RALN 8 #define S6_GMAC_STATCARRY1_RXUO 9 #define S6_GMAC_STATCARRY1_RXPF 10 #define S6_GMAC_STATCARRY1_RXCF 11 #define S6_GMAC_STATCARRY1_RBCA 12 #define S6_GMAC_STATCARRY1_RMCA 13 #define S6_GMAC_STATCARRY1_RFCS 14 #define S6_GMAC_STATCARRY1_RPKT 15 #define S6_GMAC_STATCARRY1_RBYT 16 #define S6_GMAC_STATCARRY1_TRMGV 25 #define S6_GMAC_STATCARRY1_TRMAX 26 #define S6_GMAC_STATCARRY1_TR1K 27 #define S6_GMAC_STATCARRY1_TR511 28 #define S6_GMAC_STATCARRY1_TR255 29 #define S6_GMAC_STATCARRY1_TR127 30 #define S6_GMAC_STATCARRY1_TR64 31 #define S6_GMAC_STATCARRY2_TDRP 0 #define S6_GMAC_STATCARRY2_TPFH 1 #define S6_GMAC_STATCARRY2_TNCL 2 #define S6_GMAC_STATCARRY2_TXCL 3 #define S6_GMAC_STATCARRY2_TLCL 4 #define S6_GMAC_STATCARRY2_TMCL 5 #define S6_GMAC_STATCARRY2_TSCL 6 #define S6_GMAC_STATCARRY2_TEDF 7 #define S6_GMAC_STATCARRY2_TDFR 8 #define S6_GMAC_STATCARRY2_TXPF 9 #define S6_GMAC_STATCARRY2_TBCA 10 #define S6_GMAC_STATCARRY2_TMCA 11 #define S6_GMAC_STATCARRY2_TPKT 12 #define S6_GMAC_STATCARRY2_TBYT 13 #define S6_GMAC_STATCARRY2_TFRG 14 #define S6_GMAC_STATCARRY2_TUND 15 #define S6_GMAC_STATCARRY2_TOVR 16 #define S6_GMAC_STATCARRY2_TXCF 17 #define S6_GMAC_STATCARRY2_TFCS 18 #define S6_GMAC_STATCARRY2_TJBR 19 #define S6_GMAC_HOST_PBLKCTRL 0x140 #define S6_GMAC_HOST_PBLKCTRL_TXENA 0 #define S6_GMAC_HOST_PBLKCTRL_RXENA 1 #define S6_GMAC_HOST_PBLKCTRL_TXSRES 2 #define S6_GMAC_HOST_PBLKCTRL_RXSRES 3 #define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8 #define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12 #define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4 #define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5 #define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6 #define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7 #define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF #define S6_GMAC_HOST_PBLKCTRL_STATENA 16 #define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17 #define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18 #define S6_GMAC_HOST_PBLKCTRL_RGMII 19 #define S6_GMAC_HOST_INTMASK 0x144 #define S6_GMAC_HOST_INTSTAT 0x148 #define S6_GMAC_HOST_INT_TXBURSTOVER 3 #define S6_GMAC_HOST_INT_TXPREWOVER 4 #define S6_GMAC_HOST_INT_RXBURSTUNDER 5 #define S6_GMAC_HOST_INT_RXPOSTRFULL 6 #define S6_GMAC_HOST_INT_RXPOSTRUNDER 7 #define S6_GMAC_HOST_RXFIFOHWM 0x14C #define S6_GMAC_HOST_CTRLFRAMXP 0x150 #define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n)) #define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n)) #define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n)) #define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n)) #define S6_GMAC_BURST_PREWR 0x1B0 #define S6_GMAC_BURST_PREWR_LEN 0 #define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1) #define S6_GMAC_BURST_PREWR_CFE 20 #define S6_GMAC_BURST_PREWR_PPE 21 #define S6_GMAC_BURST_PREWR_FCS 22 #define S6_GMAC_BURST_PREWR_PAD 23 #define S6_GMAC_BURST_POSTRD 0x1D0 #define S6_GMAC_BURST_POSTRD_LEN 0 #define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1) #define S6_GMAC_BURST_POSTRD_DROP 20 /* data handling */ #define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */ #define S6_NUM_RX_SKB 16 #define S6_MAX_FRLEN 1536 struct s6gmac { u32 reg; u32 tx_dma; u32 rx_dma; u32 io; u8 tx_chan; u8 rx_chan; spinlock_t lock; u8 tx_skb_i, tx_skb_o; u8 rx_skb_i, rx_skb_o; struct sk_buff *tx_skb[S6_NUM_TX_SKB]; struct sk_buff *rx_skb[S6_NUM_RX_SKB]; unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)]; unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)]; struct phy_device *phydev; struct { struct mii_bus *bus; int irq[PHY_MAX_ADDR]; } mii; struct { unsigned int mbit; u8 giga; u8 isup; u8 full; } link; }; static void s6gmac_rx_fillfifo(struct s6gmac *pd) { struct sk_buff *skb; while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) && (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) && (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) { pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb; s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan, pd->io, (u32)skb->data, S6_MAX_FRLEN); } } static void s6gmac_rx_interrupt(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); u32 pfx; struct sk_buff *skb; while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) > s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) { skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]; pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD); if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) { dev_kfree_skb_irq(skb); } else { skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN) & S6_GMAC_BURST_POSTRD_LEN_MASK); skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; netif_rx(skb); } } } static void s6gmac_tx_interrupt(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) > s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) { dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]); } if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) netif_wake_queue(dev); } struct s6gmac_statinf { unsigned reg_size : 4; /* 0: unused */ unsigned reg_off : 6; unsigned net_index : 6; }; #define S6_STATS_B (8 * sizeof(u32)) #define S6_STATS_C(b, r, f) [b] = { \ BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \ BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \ >= (1<<4)) + \ r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \ BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \ >= ((1<<6)-1)) + \ (r - S6_GMAC_STAT_REGS) / sizeof(u32), \ BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \ % sizeof(unsigned long)) + \ BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \ / sizeof(unsigned long)) >= (1<<6))) + \ BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \ != sizeof(unsigned long))) + \ (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)}, static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { { S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes) S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets) S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors) S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast) S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors) S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors) S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors) S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors) S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors) S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors) S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors) S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped) }, { S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes) S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets) S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors) S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors) S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions) S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped) S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors) S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors) S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors) S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors) S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors) } }; static void s6gmac_stats_collect(struct s6gmac *pd, const struct s6gmac_statinf *inf) { int b; for (b = 0; b < S6_STATS_B; b++) { if (inf[b].reg_size) { pd->stats[inf[b].net_index] += readl(pd->reg + S6_GMAC_STAT_REGS + sizeof(u32) * inf[b].reg_off); } } } static void s6gmac_stats_carry(struct s6gmac *pd, const struct s6gmac_statinf *inf, u32 mask) { int b; while (mask) { b = fls(mask) - 1; mask &= ~(1 << b); pd->carry[inf[b].net_index] += (1 << inf[b].reg_size); } } static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry) { int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) & ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry)); return r; } static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry) { u32 mask; mask = s6gmac_stats_pending(pd, carry); if (mask) { writel(mask, pd->reg + S6_GMAC_STATCARRY(carry)); s6gmac_stats_carry(pd, &statinf[carry][0], mask); } } static irqreturn_t s6gmac_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct s6gmac *pd = netdev_priv(dev); if (!dev) return IRQ_NONE; spin_lock(&pd->lock); if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan)) s6gmac_rx_interrupt(dev); s6gmac_rx_fillfifo(pd); if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan)) s6gmac_tx_interrupt(dev); s6gmac_stats_interrupt(pd, 0); s6gmac_stats_interrupt(pd, 1); spin_unlock(&pd->lock); return IRQ_HANDLED; } static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n, u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi) { writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n)); writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n)); writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n)); writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n)); } static inline void s6gmac_stop_device(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); writel(0, pd->reg + S6_GMAC_MACCONF1); } static inline void s6gmac_init_device(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); int is_rgmii = !!(pd->phydev->supported & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)); #if 0 writel(1 << S6_GMAC_MACCONF1_SYNCTX | 1 << S6_GMAC_MACCONF1_SYNCRX | 1 << S6_GMAC_MACCONF1_TXFLOWCTRL | 1 << S6_GMAC_MACCONF1_RXFLOWCTRL | 1 << S6_GMAC_MACCONF1_RESTXFUNC | 1 << S6_GMAC_MACCONF1_RESRXFUNC | 1 << S6_GMAC_MACCONF1_RESTXMACCTRL | 1 << S6_GMAC_MACCONF1_RESRXMACCTRL, pd->reg + S6_GMAC_MACCONF1); #endif writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1); udelay(1000); writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA, pd->reg + S6_GMAC_MACCONF1); writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES | 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES, pd->reg + S6_GMAC_HOST_PBLKCTRL); writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ | S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ | 1 << S6_GMAC_HOST_PBLKCTRL_STATENA | 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR | is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII, pd->reg + S6_GMAC_HOST_PBLKCTRL); writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA | (dev->flags & IFF_LOOPBACK ? 1 : 0) << S6_GMAC_MACCONF1_LOOPBACK, pd->reg + S6_GMAC_MACCONF1); writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ? dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN, pd->reg + S6_GMAC_MACMAXFRAMELEN); writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL | 1 << S6_GMAC_MACCONF2_PADCRCENA | 1 << S6_GMAC_MACCONF2_LENGTHFCHK | (pd->link.giga ? S6_GMAC_MACCONF2_IFMODE_BYTE : S6_GMAC_MACCONF2_IFMODE_NIBBLE) << S6_GMAC_MACCONF2_IFMODE | 7 << S6_GMAC_MACCONF2_PREAMBLELEN, pd->reg + S6_GMAC_MACCONF2); writel(0, pd->reg + S6_GMAC_MACSTATADDR1); writel(0, pd->reg + S6_GMAC_MACSTATADDR2); writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ | 1 << S6_GMAC_FIFOCONF0_SRFENREQ | 1 << S6_GMAC_FIFOCONF0_FRFENREQ | 1 << S6_GMAC_FIFOCONF0_STFENREQ | 1 << S6_GMAC_FIFOCONF0_FTFENREQ, pd->reg + S6_GMAC_FIFOCONF0); writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH | 128 << S6_GMAC_FIFOCONF3_CFGHWMFT, pd->reg + S6_GMAC_FIFOCONF3); writel((S6_GMAC_FIFOCONF_RSV_MASK & ~( 1 << S6_GMAC_FIFOCONF_RSV_RUNT | 1 << S6_GMAC_FIFOCONF_RSV_CRCERR | 1 << S6_GMAC_FIFOCONF_RSV_OK | 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE | 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME | 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL | 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE | 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) | 1 << S6_GMAC_FIFOCONF5_DROPLT64 | pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM | 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE, pd->reg + S6_GMAC_FIFOCONF5); writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT | 1 << S6_GMAC_FIFOCONF_RSV_CRCERR | 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE | 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME | 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL | 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE | 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED, pd->reg + S6_GMAC_FIFOCONF4); s6gmac_set_dstaddr(pd, 0, 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF); s6gmac_set_dstaddr(pd, 1, dev->dev_addr[5] | dev->dev_addr[4] << 8 | dev->dev_addr[3] << 16 | dev->dev_addr[2] << 24, dev->dev_addr[1] | dev->dev_addr[0] << 8, 0xFFFFFFFF, 0x0000FFFF); s6gmac_set_dstaddr(pd, 2, 0x00000000, 0x00000100, 0x00000000, 0x00000100); s6gmac_set_dstaddr(pd, 3, 0x00000000, 0x00000000, 0x00000000, 0x00000000); writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA | 1 << S6_GMAC_HOST_PBLKCTRL_RXENA | S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ | S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ | 1 << S6_GMAC_HOST_PBLKCTRL_STATENA | 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR | is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII, pd->reg + S6_GMAC_HOST_PBLKCTRL); } static void s6mii_enable(struct s6gmac *pd) { writel(readl(pd->reg + S6_GMAC_MACCONF1) & ~(1 << S6_GMAC_MACCONF1_SOFTRES), pd->reg + S6_GMAC_MACCONF1); writel((readl(pd->reg + S6_GMAC_MACMIICONF) & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL)) | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL), pd->reg + S6_GMAC_MACMIICONF); } static int s6mii_busy(struct s6gmac *pd, int tmo) { while (readl(pd->reg + S6_GMAC_MACMIIINDI)) { if (--tmo == 0) return -ETIME; udelay(64); } return 0; } static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum) { struct s6gmac *pd = bus->priv; s6mii_enable(pd); if (s6mii_busy(pd, 256)) return -ETIME; writel(phy_addr << S6_GMAC_MACMIIADDR_PHY | regnum << S6_GMAC_MACMIIADDR_REG, pd->reg + S6_GMAC_MACMIIADDR); writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD); writel(0, pd->reg + S6_GMAC_MACMIICMD); if (s6mii_busy(pd, 256)) return -ETIME; return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT); } static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value) { struct s6gmac *pd = bus->priv; s6mii_enable(pd); if (s6mii_busy(pd, 256)) return -ETIME; writel(phy_addr << S6_GMAC_MACMIIADDR_PHY | regnum << S6_GMAC_MACMIIADDR_REG, pd->reg + S6_GMAC_MACMIIADDR); writel(value, pd->reg + S6_GMAC_MACMIICTRL); if (s6mii_busy(pd, 256)) return -ETIME; return 0; } static int s6mii_reset(struct mii_bus *bus) { struct s6gmac *pd = bus->priv; s6mii_enable(pd); if (s6mii_busy(pd, PHY_INIT_TIMEOUT)) return -ETIME; return 0; } static void s6gmac_set_rgmii_txclock(struct s6gmac *pd) { u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL); pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC); switch (pd->link.mbit) { case 10: pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC; break; case 100: pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC; break; case 1000: pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC; break; default: return; } writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL); } static inline void s6gmac_linkisup(struct net_device *dev, int isup) { struct s6gmac *pd = netdev_priv(dev); struct phy_device *phydev = pd->phydev; pd->link.full = phydev->duplex; pd->link.giga = (phydev->speed == 1000); if (pd->link.mbit != phydev->speed) { pd->link.mbit = phydev->speed; s6gmac_set_rgmii_txclock(pd); } pd->link.isup = isup; if (isup) netif_carrier_on(dev); phy_print_status(phydev); } static void s6gmac_adjust_link(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); struct phy_device *phydev = pd->phydev; if (pd->link.isup && (!phydev->link || (pd->link.mbit != phydev->speed) || (pd->link.full != phydev->duplex))) { pd->link.isup = 0; netif_tx_disable(dev); if (!phydev->link) { netif_carrier_off(dev); phy_print_status(phydev); } } if (!pd->link.isup && phydev->link) { if (pd->link.full != phydev->duplex) { u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); if (phydev->duplex) maccfg |= 1 << S6_GMAC_MACCONF2_FULL; else maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL); writel(maccfg, pd->reg + S6_GMAC_MACCONF2); } if (pd->link.giga != (phydev->speed == 1000)) { u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5); u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK << S6_GMAC_MACCONF2_IFMODE); if (phydev->speed == 1000) { fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM; maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE << S6_GMAC_MACCONF2_IFMODE; } else { fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM); maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE << S6_GMAC_MACCONF2_IFMODE; } writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5); writel(maccfg, pd->reg + S6_GMAC_MACCONF2); } if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) netif_wake_queue(dev); s6gmac_linkisup(dev, 1); } } static inline int s6gmac_phy_start(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); int i = 0; struct phy_device *p = NULL; while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i]))) i++; p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, PHY_INTERFACE_MODE_RGMII); if (IS_ERR(p)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); return PTR_ERR(p); } p->supported &= PHY_GBIT_FEATURES; p->advertising = p->supported; pd->phydev = p; return 0; } static inline void s6gmac_init_stats(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); u32 mask; mask = 1 << S6_GMAC_STATCARRY1_RDRP | 1 << S6_GMAC_STATCARRY1_RJBR | 1 << S6_GMAC_STATCARRY1_RFRG | 1 << S6_GMAC_STATCARRY1_ROVR | 1 << S6_GMAC_STATCARRY1_RUND | 1 << S6_GMAC_STATCARRY1_RCDE | 1 << S6_GMAC_STATCARRY1_RFLR | 1 << S6_GMAC_STATCARRY1_RALN | 1 << S6_GMAC_STATCARRY1_RMCA | 1 << S6_GMAC_STATCARRY1_RFCS | 1 << S6_GMAC_STATCARRY1_RPKT | 1 << S6_GMAC_STATCARRY1_RBYT; writel(mask, pd->reg + S6_GMAC_STATCARRY(0)); writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0)); mask = 1 << S6_GMAC_STATCARRY2_TDRP | 1 << S6_GMAC_STATCARRY2_TNCL | 1 << S6_GMAC_STATCARRY2_TXCL | 1 << S6_GMAC_STATCARRY2_TEDF | 1 << S6_GMAC_STATCARRY2_TPKT | 1 << S6_GMAC_STATCARRY2_TBYT | 1 << S6_GMAC_STATCARRY2_TFRG | 1 << S6_GMAC_STATCARRY2_TUND | 1 << S6_GMAC_STATCARRY2_TOVR | 1 << S6_GMAC_STATCARRY2_TFCS | 1 << S6_GMAC_STATCARRY2_TJBR; writel(mask, pd->reg + S6_GMAC_STATCARRY(1)); writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1)); } static inline void s6gmac_init_dmac(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); s6dmac_disable_chan(pd->tx_dma, pd->tx_chan); s6dmac_disable_chan(pd->rx_dma, pd->rx_chan); s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX); s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX); } static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&pd->lock, flags); writel(skb->len << S6_GMAC_BURST_PREWR_LEN | 0 << S6_GMAC_BURST_PREWR_CFE | 1 << S6_GMAC_BURST_PREWR_PPE | 1 << S6_GMAC_BURST_PREWR_FCS | ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD, pd->reg + S6_GMAC_BURST_PREWR); s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan, (u32)skb->data, pd->io, skb->len); if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) netif_stop_queue(dev); if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) { printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n", pd->tx_skb_o, pd->tx_skb_i); BUG(); } pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb; spin_unlock_irqrestore(&pd->lock, flags); return 0; } static void s6gmac_tx_timeout(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&pd->lock, flags); s6gmac_tx_interrupt(dev); spin_unlock_irqrestore(&pd->lock, flags); } static int s6gmac_open(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); unsigned long flags; phy_read_status(pd->phydev); spin_lock_irqsave(&pd->lock, flags); pd->link.mbit = 0; s6gmac_linkisup(dev, pd->phydev->link); s6gmac_init_device(dev); s6gmac_init_stats(dev); s6gmac_init_dmac(dev); s6gmac_rx_fillfifo(pd); s6dmac_enable_chan(pd->rx_dma, pd->rx_chan, 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1); s6dmac_enable_chan(pd->tx_dma, pd->tx_chan, 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1); writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER | 0 << S6_GMAC_HOST_INT_TXPREWOVER | 0 << S6_GMAC_HOST_INT_RXBURSTUNDER | 0 << S6_GMAC_HOST_INT_RXPOSTRFULL | 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER, pd->reg + S6_GMAC_HOST_INTMASK); spin_unlock_irqrestore(&pd->lock, flags); phy_start(pd->phydev); netif_start_queue(dev); return 0; } static int s6gmac_stop(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); unsigned long flags; netif_stop_queue(dev); phy_stop(pd->phydev); spin_lock_irqsave(&pd->lock, flags); s6gmac_init_dmac(dev); s6gmac_stop_device(dev); while (pd->tx_skb_i != pd->tx_skb_o) dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]); while (pd->rx_skb_i != pd->rx_skb_o) dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]); spin_unlock_irqrestore(&pd->lock, flags); return 0; } static struct net_device_stats *s6gmac_stats(struct net_device *dev) { struct s6gmac *pd = netdev_priv(dev); struct net_device_stats *st = (struct net_device_stats *)&pd->stats; int i; do { unsigned long flags; spin_lock_irqsave(&pd->lock, flags); for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++) pd->stats[i] = pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1); s6gmac_stats_collect(pd, &statinf[0][0]); s6gmac_stats_collect(pd, &statinf[1][0]); i = s6gmac_stats_pending(pd, 0) | s6gmac_stats_pending(pd, 1); spin_unlock_irqrestore(&pd->lock, flags); } while (i); st->rx_errors = st->rx_crc_errors + st->rx_frame_errors + st->rx_length_errors + st->rx_missed_errors; st->tx_errors += st->tx_aborted_errors; return st; } static int __devinit s6gmac_probe(struct platform_device *pdev) { struct net_device *dev; struct s6gmac *pd; int res; unsigned long i; struct mii_bus *mb; dev = alloc_etherdev(sizeof(*pd)); if (!dev) { printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n"); return -ENOMEM; } dev->open = s6gmac_open; dev->stop = s6gmac_stop; dev->hard_start_xmit = s6gmac_tx; dev->tx_timeout = s6gmac_tx_timeout; dev->watchdog_timeo = HZ; dev->get_stats = s6gmac_stats; dev->irq = platform_get_irq(pdev, 0); pd = netdev_priv(dev); memset(pd, 0, sizeof(*pd)); spin_lock_init(&pd->lock); pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start; i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start; pd->tx_dma = DMA_MASK_DMAC(i); pd->tx_chan = DMA_INDEX_CHNL(i); i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start; pd->rx_dma = DMA_MASK_DMAC(i); pd->rx_chan = DMA_INDEX_CHNL(i); pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev); if (res) { printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq); goto errirq; } res = register_netdev(dev); if (res) { printk(KERN_ERR DRV_PRMT "error registering device %s\n", dev->name); goto errdev; } mb = mdiobus_alloc(); if (!mb) { printk(KERN_ERR DRV_PRMT "error allocating mii bus\n"); goto errmii; } mb->name = "s6gmac_mii"; mb->read = s6mii_read; mb->write = s6mii_write; mb->reset = s6mii_reset; mb->priv = pd; snprintf(mb->id, MII_BUS_ID_SIZE, "0"); mb->phy_mask = ~(1 << 0); mb->irq = &pd->mii.irq[0]; for (i = 0; i < PHY_MAX_ADDR; i++) { int n = platform_get_irq(pdev, i + 1); if (n < 0) n = PHY_POLL; pd->mii.irq[i] = n; } mdiobus_register(mb); pd->mii.bus = mb; res = s6gmac_phy_start(dev); if (res) return res; platform_set_drvdata(pdev, dev); return 0; errmii: unregister_netdev(dev); errdev: free_irq(dev->irq, dev); errirq: free_netdev(dev); return res; } static int __devexit s6gmac_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); if (dev) { struct s6gmac *pd = netdev_priv(dev); mdiobus_unregister(pd->mii.bus); unregister_netdev(dev); free_irq(dev->irq, dev); free_netdev(dev); platform_set_drvdata(pdev, NULL); } return 0; } static struct platform_driver s6gmac_driver = { .probe = s6gmac_probe, .remove = __devexit_p(s6gmac_remove), .driver = { .name = "s6gmac", .owner = THIS_MODULE, }, }; static int __init s6gmac_init(void) { printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n"); return platform_driver_register(&s6gmac_driver); } static void __exit s6gmac_exit(void) { platform_driver_unregister(&s6gmac_driver); } module_init(s6gmac_init); module_exit(s6gmac_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("S6105 on chip Ethernet driver"); MODULE_AUTHOR("Oskar Schirmer <os@emlix.com>");
gpl-2.0
hyuh/kernel-dlx
fs/dlm/rcom.c
4883
14903
/****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "lowcomms.h" #include "midcomms.h" #include "rcom.h" #include "recover.h" #include "dir.h" #include "config.h" #include "memory.h" #include "lock.h" #include "util.h" #include "member.h" static int rcom_response(struct dlm_ls *ls) { return test_bit(LSFL_RCOM_READY, &ls->ls_flags); } static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len, struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret) { struct dlm_rcom *rc; struct dlm_mhandle *mh; char *mb; int mb_len = sizeof(struct dlm_rcom) + len; mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb); if (!mh) { log_print("create_rcom to %d type %d len %d ENOBUFS", to_nodeid, type, len); return -ENOBUFS; } memset(mb, 0, mb_len); rc = (struct dlm_rcom *) mb; rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR); rc->rc_header.h_lockspace = ls->ls_global_id; rc->rc_header.h_nodeid = dlm_our_nodeid(); rc->rc_header.h_length = mb_len; rc->rc_header.h_cmd = DLM_RCOM; rc->rc_type = type; spin_lock(&ls->ls_recover_lock); rc->rc_seq = ls->ls_recover_seq; spin_unlock(&ls->ls_recover_lock); *mh_ret = mh; *rc_ret = rc; return 0; } static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh, struct dlm_rcom *rc) { dlm_rcom_out(rc); dlm_lowcomms_commit_buffer(mh); } static void set_rcom_status(struct dlm_ls *ls, struct rcom_status *rs, uint32_t flags) { rs->rs_flags = cpu_to_le32(flags); } /* When replying to a status request, a node also sends back its configuration values. The requesting node then checks that the remote node is configured the same way as itself. */ static void set_rcom_config(struct dlm_ls *ls, struct rcom_config *rf, uint32_t num_slots) { rf->rf_lvblen = cpu_to_le32(ls->ls_lvblen); rf->rf_lsflags = cpu_to_le32(ls->ls_exflags); rf->rf_our_slot = cpu_to_le16(ls->ls_slot); rf->rf_num_slots = cpu_to_le16(num_slots); rf->rf_generation = cpu_to_le32(ls->ls_generation); } static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) { struct rcom_config *rf = (struct rcom_config *) rc->rc_buf; if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) { log_error(ls, "version mismatch: %x nodeid %d: %x", DLM_HEADER_MAJOR | DLM_HEADER_MINOR, nodeid, rc->rc_header.h_version); return -EPROTO; } if (le32_to_cpu(rf->rf_lvblen) != ls->ls_lvblen || le32_to_cpu(rf->rf_lsflags) != ls->ls_exflags) { log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x", ls->ls_lvblen, ls->ls_exflags, nodeid, le32_to_cpu(rf->rf_lvblen), le32_to_cpu(rf->rf_lsflags)); return -EPROTO; } return 0; } static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq) { spin_lock(&ls->ls_rcom_spin); *new_seq = ++ls->ls_rcom_seq; set_bit(LSFL_RCOM_WAIT, &ls->ls_flags); spin_unlock(&ls->ls_rcom_spin); } static void disallow_sync_reply(struct dlm_ls *ls) { spin_lock(&ls->ls_rcom_spin); clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); clear_bit(LSFL_RCOM_READY, &ls->ls_flags); spin_unlock(&ls->ls_rcom_spin); } /* * low nodeid gathers one slot value at a time from each node. * it sets need_slots=0, and saves rf_our_slot returned from each * rcom_config. * * other nodes gather all slot values at once from the low nodeid. * they set need_slots=1, and ignore the rf_our_slot returned from each * rcom_config. they use the rf_num_slots returned from the low * node's rcom_config. */ int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags) { struct dlm_rcom *rc; struct dlm_mhandle *mh; int error = 0; ls->ls_recover_nodeid = nodeid; if (nodeid == dlm_our_nodeid()) { rc = ls->ls_recover_buf; rc->rc_result = dlm_recover_status(ls); goto out; } error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, sizeof(struct rcom_status), &rc, &mh); if (error) goto out; set_rcom_status(ls, (struct rcom_status *)rc->rc_buf, status_flags); allow_sync_reply(ls, &rc->rc_id); memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size); send_rcom(ls, mh, rc); error = dlm_wait_function(ls, &rcom_response); disallow_sync_reply(ls); if (error) goto out; rc = ls->ls_recover_buf; if (rc->rc_result == -ESRCH) { /* we pretend the remote lockspace exists with 0 status */ log_debug(ls, "remote node %d not ready", nodeid); rc->rc_result = 0; error = 0; } else { error = check_rcom_config(ls, rc, nodeid); } /* the caller looks at rc_result for the remote recovery status */ out: return error; } static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in) { struct dlm_rcom *rc; struct dlm_mhandle *mh; struct rcom_status *rs; uint32_t status; int nodeid = rc_in->rc_header.h_nodeid; int len = sizeof(struct rcom_config); int num_slots = 0; int error; if (!dlm_slots_version(&rc_in->rc_header)) { status = dlm_recover_status(ls); goto do_create; } rs = (struct rcom_status *)rc_in->rc_buf; if (!(rs->rs_flags & DLM_RSF_NEED_SLOTS)) { status = dlm_recover_status(ls); goto do_create; } spin_lock(&ls->ls_recover_lock); status = ls->ls_recover_status; num_slots = ls->ls_num_slots; spin_unlock(&ls->ls_recover_lock); len += num_slots * sizeof(struct rcom_slot); do_create: error = create_rcom(ls, nodeid, DLM_RCOM_STATUS_REPLY, len, &rc, &mh); if (error) return; rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; rc->rc_result = status; set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, num_slots); if (!num_slots) goto do_send; spin_lock(&ls->ls_recover_lock); if (ls->ls_num_slots != num_slots) { spin_unlock(&ls->ls_recover_lock); log_debug(ls, "receive_rcom_status num_slots %d to %d", num_slots, ls->ls_num_slots); rc->rc_result = 0; set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, 0); goto do_send; } dlm_slots_copy_out(ls, rc); spin_unlock(&ls->ls_recover_lock); do_send: send_rcom(ls, mh, rc); } static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) { spin_lock(&ls->ls_rcom_spin); if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) || rc_in->rc_id != ls->ls_rcom_seq) { log_debug(ls, "reject reply %d from %d seq %llx expect %llx", rc_in->rc_type, rc_in->rc_header.h_nodeid, (unsigned long long)rc_in->rc_id, (unsigned long long)ls->ls_rcom_seq); goto out; } memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length); set_bit(LSFL_RCOM_READY, &ls->ls_flags); clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); wake_up(&ls->ls_wait_general); out: spin_unlock(&ls->ls_rcom_spin); } int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len) { struct dlm_rcom *rc; struct dlm_mhandle *mh; int error = 0; int max_size = dlm_config.ci_buffer_size - sizeof(struct dlm_rcom); ls->ls_recover_nodeid = nodeid; if (nodeid == dlm_our_nodeid()) { ls->ls_recover_buf->rc_header.h_length = dlm_config.ci_buffer_size; dlm_copy_master_names(ls, last_name, last_len, ls->ls_recover_buf->rc_buf, max_size, nodeid); goto out; } error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, &rc, &mh); if (error) goto out; memcpy(rc->rc_buf, last_name, last_len); allow_sync_reply(ls, &rc->rc_id); memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size); send_rcom(ls, mh, rc); error = dlm_wait_function(ls, &rcom_response); disallow_sync_reply(ls); out: return error; } static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in) { struct dlm_rcom *rc; struct dlm_mhandle *mh; int error, inlen, outlen, nodeid; nodeid = rc_in->rc_header.h_nodeid; inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom); outlen = dlm_config.ci_buffer_size - sizeof(struct dlm_rcom); error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh); if (error) return; rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen, nodeid); send_rcom(ls, mh, rc); } int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid) { struct dlm_rcom *rc; struct dlm_mhandle *mh; struct dlm_ls *ls = r->res_ls; int error; error = create_rcom(ls, dir_nodeid, DLM_RCOM_LOOKUP, r->res_length, &rc, &mh); if (error) goto out; memcpy(rc->rc_buf, r->res_name, r->res_length); rc->rc_id = (unsigned long) r; send_rcom(ls, mh, rc); out: return error; } static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in) { struct dlm_rcom *rc; struct dlm_mhandle *mh; int error, ret_nodeid, nodeid = rc_in->rc_header.h_nodeid; int len = rc_in->rc_header.h_length - sizeof(struct dlm_rcom); error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh); if (error) return; error = dlm_dir_lookup(ls, nodeid, rc_in->rc_buf, len, &ret_nodeid); if (error) ret_nodeid = error; rc->rc_result = ret_nodeid; rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; send_rcom(ls, mh, rc); } static void receive_rcom_lookup_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) { dlm_recover_master_reply(ls, rc_in); } static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, struct rcom_lock *rl) { memset(rl, 0, sizeof(*rl)); rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid); rl->rl_lkid = cpu_to_le32(lkb->lkb_id); rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags); rl->rl_flags = cpu_to_le32(lkb->lkb_flags); rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); rl->rl_rqmode = lkb->lkb_rqmode; rl->rl_grmode = lkb->lkb_grmode; rl->rl_status = lkb->lkb_status; rl->rl_wait_type = cpu_to_le16(lkb->lkb_wait_type); if (lkb->lkb_bastfn) rl->rl_asts |= DLM_CB_BAST; if (lkb->lkb_astfn) rl->rl_asts |= DLM_CB_CAST; rl->rl_namelen = cpu_to_le16(r->res_length); memcpy(rl->rl_name, r->res_name, r->res_length); /* FIXME: might we have an lvb without DLM_LKF_VALBLK set ? If so, receive_rcom_lock_args() won't take this copy. */ if (lkb->lkb_lvbptr) memcpy(rl->rl_lvb, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); } int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { struct dlm_ls *ls = r->res_ls; struct dlm_rcom *rc; struct dlm_mhandle *mh; struct rcom_lock *rl; int error, len = sizeof(struct rcom_lock); if (lkb->lkb_lvbptr) len += ls->ls_lvblen; error = create_rcom(ls, r->res_nodeid, DLM_RCOM_LOCK, len, &rc, &mh); if (error) goto out; rl = (struct rcom_lock *) rc->rc_buf; pack_rcom_lock(r, lkb, rl); rc->rc_id = (unsigned long) r; send_rcom(ls, mh, rc); out: return error; } /* needs at least dlm_rcom + rcom_lock */ static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in) { struct dlm_rcom *rc; struct dlm_mhandle *mh; int error, nodeid = rc_in->rc_header.h_nodeid; dlm_recover_master_copy(ls, rc_in); error = create_rcom(ls, nodeid, DLM_RCOM_LOCK_REPLY, sizeof(struct rcom_lock), &rc, &mh); if (error) return; /* We send back the same rcom_lock struct we received, but dlm_recover_master_copy() has filled in rl_remid and rl_result */ memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock)); rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; send_rcom(ls, mh, rc); } /* If the lockspace doesn't exist then still send a status message back; it's possible that it just doesn't have its global_id yet. */ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) { struct dlm_rcom *rc; struct rcom_config *rf; struct dlm_mhandle *mh; char *mb; int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config); mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_NOFS, &mb); if (!mh) return -ENOBUFS; memset(mb, 0, mb_len); rc = (struct dlm_rcom *) mb; rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR); rc->rc_header.h_lockspace = rc_in->rc_header.h_lockspace; rc->rc_header.h_nodeid = dlm_our_nodeid(); rc->rc_header.h_length = mb_len; rc->rc_header.h_cmd = DLM_RCOM; rc->rc_type = DLM_RCOM_STATUS_REPLY; rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; rc->rc_result = -ESRCH; rf = (struct rcom_config *) rc->rc_buf; rf->rf_lvblen = cpu_to_le32(~0U); dlm_rcom_out(rc); dlm_lowcomms_commit_buffer(mh); return 0; } static int is_old_reply(struct dlm_ls *ls, struct dlm_rcom *rc) { uint64_t seq; int rv = 0; switch (rc->rc_type) { case DLM_RCOM_STATUS_REPLY: case DLM_RCOM_NAMES_REPLY: case DLM_RCOM_LOOKUP_REPLY: case DLM_RCOM_LOCK_REPLY: spin_lock(&ls->ls_recover_lock); seq = ls->ls_recover_seq; spin_unlock(&ls->ls_recover_lock); if (rc->rc_seq_reply != seq) { log_debug(ls, "ignoring old reply %x from %d " "seq_reply %llx expect %llx", rc->rc_type, rc->rc_header.h_nodeid, (unsigned long long)rc->rc_seq_reply, (unsigned long long)seq); rv = 1; } } return rv; } /* Called by dlm_recv; corresponds to dlm_receive_message() but special recovery-only comms are sent through here. */ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) { int lock_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_lock); if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) { log_debug(ls, "ignoring recovery message %x from %d", rc->rc_type, nodeid); goto out; } if (is_old_reply(ls, rc)) goto out; switch (rc->rc_type) { case DLM_RCOM_STATUS: receive_rcom_status(ls, rc); break; case DLM_RCOM_NAMES: receive_rcom_names(ls, rc); break; case DLM_RCOM_LOOKUP: receive_rcom_lookup(ls, rc); break; case DLM_RCOM_LOCK: if (rc->rc_header.h_length < lock_size) goto Eshort; receive_rcom_lock(ls, rc); break; case DLM_RCOM_STATUS_REPLY: receive_sync_reply(ls, rc); break; case DLM_RCOM_NAMES_REPLY: receive_sync_reply(ls, rc); break; case DLM_RCOM_LOOKUP_REPLY: receive_rcom_lookup_reply(ls, rc); break; case DLM_RCOM_LOCK_REPLY: if (rc->rc_header.h_length < lock_size) goto Eshort; dlm_recover_process_copy(ls, rc); break; default: log_error(ls, "receive_rcom bad type %d", rc->rc_type); } out: return; Eshort: log_error(ls, "recovery message %x from %d is too short", rc->rc_type, nodeid); }
gpl-2.0
androthan/android_kernel_samsung_hugo_legacy
drivers/isdn/hardware/eicon/dadapter.c
5139
14853
/* * Copyright (c) Eicon Networks, 2002. * This source file is supplied for the use with Eicon Networks range of DIVA Server Adapters. * Eicon File Revision : 2.1 * This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "platform.h" #include "pc.h" #include "debuglib.h" #include "di_defs.h" #include "divasync.h" #include "dadapter.h" /* -------------------------------------------------------------------------- Adapter array change notification framework -------------------------------------------------------------------------- */ typedef struct _didd_adapter_change_notification { didd_adapter_change_callback_t callback; void IDI_CALL_ENTITY_T * context; } didd_adapter_change_notification_t, \ * IDI_CALL_ENTITY_T pdidd_adapter_change_notification_t; #define DIVA_DIDD_MAX_NOTIFICATIONS 256 static didd_adapter_change_notification_t\ NotificationTable[DIVA_DIDD_MAX_NOTIFICATIONS]; /* -------------------------------------------------------------------------- Array to held adapter information -------------------------------------------------------------------------- */ static DESCRIPTOR HandleTable[NEW_MAX_DESCRIPTORS]; static dword Adapters = 0; /* Number of adapters */ /* -------------------------------------------------------------------------- Shadow IDI_DIMAINT and 'shadow' debug stuff -------------------------------------------------------------------------- */ static void no_printf (unsigned char * format, ...) { #ifdef EBUG va_list ap; va_start (ap, format); debug((format, ap)); va_end (ap); #endif } /* ------------------------------------------------------------------------- Portable debug Library ------------------------------------------------------------------------- */ #include "debuglib.c" static DESCRIPTOR MAdapter = {IDI_DIMAINT, /* Adapter Type */ 0x00, /* Channels */ 0x0000, /* Features */ (IDI_CALL)no_printf}; /* -------------------------------------------------------------------------- DAdapter. Only IDI clients with buffer, that is huge enough to get all descriptors will receive information about DAdapter { byte type, byte channels, word features, IDI_CALL request } -------------------------------------------------------------------------- */ static void IDI_CALL_LINK_T diva_dadapter_request (ENTITY IDI_CALL_ENTITY_T *); static DESCRIPTOR DAdapter = {IDI_DADAPTER, /* Adapter Type */ 0x00, /* Channels */ 0x0000, /* Features */ diva_dadapter_request }; /* -------------------------------------------------------------------------- LOCALS -------------------------------------------------------------------------- */ static dword diva_register_adapter_callback (\ didd_adapter_change_callback_t callback, void IDI_CALL_ENTITY_T* context); static void diva_remove_adapter_callback (dword handle); static void diva_notify_adapter_change (DESCRIPTOR* d, int removal); static diva_os_spin_lock_t didd_spin; /* -------------------------------------------------------------------------- Should be called as first step, after driver init -------------------------------------------------------------------------- */ void diva_didd_load_time_init (void) { memset (&HandleTable[0], 0x00, sizeof(HandleTable)); memset (&NotificationTable[0], 0x00, sizeof(NotificationTable)); diva_os_initialize_spin_lock (&didd_spin, "didd"); } /* -------------------------------------------------------------------------- Should be called as last step, if driver does unload -------------------------------------------------------------------------- */ void diva_didd_load_time_finit (void) { diva_os_destroy_spin_lock (&didd_spin, "didd"); } /* -------------------------------------------------------------------------- Called in order to register new adapter in adapter array return adapter handle (> 0) on success return -1 adapter array overflow -------------------------------------------------------------------------- */ static int diva_didd_add_descriptor (DESCRIPTOR* d) { diva_os_spin_lock_magic_t irql; int i; if (d->type == IDI_DIMAINT) { if (d->request) { MAdapter.request = d->request; dprintf = (DIVA_DI_PRINTF)d->request; diva_notify_adapter_change (&MAdapter, 0); /* Inserted */ DBG_TRC (("DIMAINT registered, dprintf=%08x", d->request)) } else { DBG_TRC (("DIMAINT removed")) diva_notify_adapter_change (&MAdapter, 1); /* About to remove */ MAdapter.request = (IDI_CALL)no_printf; dprintf = no_printf; } return (NEW_MAX_DESCRIPTORS); } for (i = 0; i < NEW_MAX_DESCRIPTORS; i++) { diva_os_enter_spin_lock (&didd_spin, &irql, "didd_add"); if (HandleTable[i].type == 0) { memcpy (&HandleTable[i], d, sizeof(*d)); Adapters++; diva_os_leave_spin_lock (&didd_spin, &irql, "didd_add"); diva_notify_adapter_change (d, 0); /* we have new adapter */ DBG_TRC (("Add adapter[%d], request=%08x", (i+1), d->request)) return (i+1); } diva_os_leave_spin_lock (&didd_spin, &irql, "didd_add"); } DBG_ERR (("Can't add adapter, out of resources")) return (-1); } /* -------------------------------------------------------------------------- Called in order to remove one registered adapter from array return adapter handle (> 0) on success return 0 on success -------------------------------------------------------------------------- */ static int diva_didd_remove_descriptor (IDI_CALL request) { diva_os_spin_lock_magic_t irql; int i; if (request == MAdapter.request) { DBG_TRC(("DIMAINT removed")) dprintf = no_printf; diva_notify_adapter_change (&MAdapter, 1); /* About to remove */ MAdapter.request = (IDI_CALL)no_printf; return (0); } for (i = 0; (Adapters && (i < NEW_MAX_DESCRIPTORS)); i++) { if (HandleTable[i].request == request) { diva_notify_adapter_change (&HandleTable[i], 1); /* About to remove */ diva_os_enter_spin_lock (&didd_spin, &irql, "didd_rm"); memset (&HandleTable[i], 0x00, sizeof(HandleTable[0])); Adapters--; diva_os_leave_spin_lock (&didd_spin, &irql, "didd_rm"); DBG_TRC (("Remove adapter[%d], request=%08x", (i+1), request)) return (0); } } DBG_ERR (("Invalid request=%08x, can't remove adapter", request)) return (-1); } /* -------------------------------------------------------------------------- Read adapter array return 1 if not enough space to save all available adapters -------------------------------------------------------------------------- */ static int diva_didd_read_adapter_array (DESCRIPTOR* buffer, int length) { diva_os_spin_lock_magic_t irql; int src, dst; memset (buffer, 0x00, length); length /= sizeof(DESCRIPTOR); DBG_TRC (("DIDD_Read, space = %d, Adapters = %d", length, Adapters+2)) diva_os_enter_spin_lock (&didd_spin, &irql, "didd_read"); for (src = 0, dst = 0; (Adapters && (src < NEW_MAX_DESCRIPTORS) && (dst < length)); src++) { if (HandleTable[src].type) { memcpy (&buffer[dst], &HandleTable[src], sizeof(DESCRIPTOR)); dst++; } } diva_os_leave_spin_lock (&didd_spin, &irql, "didd_read"); if (dst < length) { memcpy (&buffer[dst], &MAdapter, sizeof(DESCRIPTOR)); dst++; } else { DBG_ERR (("Can't write DIMAINT. Array too small")) } if (dst < length) { memcpy (&buffer[dst], &DAdapter, sizeof(DESCRIPTOR)); dst++; } else { DBG_ERR (("Can't write DADAPTER. Array too small")) } DBG_TRC (("Read %d adapters", dst)) return (dst == length); } /* -------------------------------------------------------------------------- DAdapter request function. This function does process only synchronous requests, and is used for reception/registration of new interfaces -------------------------------------------------------------------------- */ static void IDI_CALL_LINK_T diva_dadapter_request (\ ENTITY IDI_CALL_ENTITY_T *e) { IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e ; if (e->Req) { /* We do not process it, also return error */ e->Rc = OUT_OF_RESOURCES; DBG_ERR (("Can't process async request, Req=%02x", e->Req)) return; } /* So, we process sync request */ switch (e->Rc) { case IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY: { diva_didd_adapter_notify_t* pinfo = &syncReq->didd_notify.info; pinfo->handle = diva_register_adapter_callback (\ (didd_adapter_change_callback_t)pinfo->callback, (void IDI_CALL_ENTITY_T *)pinfo->context); e->Rc = 0xff; } break; case IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY: { diva_didd_adapter_notify_t* pinfo = &syncReq->didd_notify.info; diva_remove_adapter_callback (pinfo->handle); e->Rc = 0xff; } break; case IDI_SYNC_REQ_DIDD_ADD_ADAPTER: { diva_didd_add_adapter_t* pinfo = &syncReq->didd_add_adapter.info; if (diva_didd_add_descriptor ((DESCRIPTOR*)pinfo->descriptor) < 0) { e->Rc = OUT_OF_RESOURCES; } else { e->Rc = 0xff; } } break; case IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER: { diva_didd_remove_adapter_t* pinfo = &syncReq->didd_remove_adapter.info; if (diva_didd_remove_descriptor ((IDI_CALL)pinfo->p_request) < 0) { e->Rc = OUT_OF_RESOURCES; } else { e->Rc = 0xff; } } break; case IDI_SYNC_REQ_DIDD_READ_ADAPTER_ARRAY: { diva_didd_read_adapter_array_t* pinfo =\ &syncReq->didd_read_adapter_array.info; if (diva_didd_read_adapter_array ((DESCRIPTOR*)pinfo->buffer, (int)pinfo->length)) { e->Rc = OUT_OF_RESOURCES; } else { e->Rc = 0xff; } } break; default: DBG_ERR (("Can't process sync request, Req=%02x", e->Rc)) e->Rc = OUT_OF_RESOURCES; } } /* -------------------------------------------------------------------------- IDI client does register his notification function -------------------------------------------------------------------------- */ static dword diva_register_adapter_callback (\ didd_adapter_change_callback_t callback, void IDI_CALL_ENTITY_T* context) { diva_os_spin_lock_magic_t irql; dword i; for (i = 0; i < DIVA_DIDD_MAX_NOTIFICATIONS; i++) { diva_os_enter_spin_lock (&didd_spin, &irql, "didd_nfy_add"); if (!NotificationTable[i].callback) { NotificationTable[i].callback = callback; NotificationTable[i].context = context; diva_os_leave_spin_lock (&didd_spin, &irql, "didd_nfy_add"); DBG_TRC(("Register adapter notification[%d]=%08x", i+1, callback)) return (i+1); } diva_os_leave_spin_lock (&didd_spin, &irql, "didd_nfy_add"); } DBG_ERR (("Can't register adapter notification, overflow")) return (0); } /* -------------------------------------------------------------------------- IDI client does register his notification function -------------------------------------------------------------------------- */ static void diva_remove_adapter_callback (dword handle) { diva_os_spin_lock_magic_t irql; if (handle && ((--handle) < DIVA_DIDD_MAX_NOTIFICATIONS)) { diva_os_enter_spin_lock (&didd_spin, &irql, "didd_nfy_rm"); NotificationTable[handle].callback = NULL; NotificationTable[handle].context = NULL; diva_os_leave_spin_lock (&didd_spin, &irql, "didd_nfy_rm"); DBG_TRC(("Remove adapter notification[%d]", (int)(handle+1))) return; } DBG_ERR(("Can't remove adapter notification, handle=%d", handle)) } /* -------------------------------------------------------------------------- Notify all client about adapter array change Does suppose following behavior in the client side: Step 1: Redister Notification Step 2: Read Adapter Array -------------------------------------------------------------------------- */ static void diva_notify_adapter_change (DESCRIPTOR* d, int removal) { int i, do_notify; didd_adapter_change_notification_t nfy; diva_os_spin_lock_magic_t irql; for (i = 0; i < DIVA_DIDD_MAX_NOTIFICATIONS; i++) { do_notify = 0; diva_os_enter_spin_lock (&didd_spin, &irql, "didd_nfy"); if (NotificationTable[i].callback) { memcpy (&nfy, &NotificationTable[i], sizeof(nfy)); do_notify = 1; } diva_os_leave_spin_lock (&didd_spin, &irql, "didd_nfy"); if (do_notify) { (*(nfy.callback))(nfy.context, d, removal); } } } /* -------------------------------------------------------------------------- For all systems, that are linked by Kernel Mode Linker this is ONLY one function thet should be exported by this device driver IDI clients should look for IDI_DADAPTER, and use request function of this adapter (sync request) in order to receive appropriate services: - add new adapter - remove existing adapter - add adapter array notification - remove adapter array notification (read adapter is redundant in this case) INPUT: buffer - pointer to buffer that will receive adapter array length - length (in bytes) of space in buffer OUTPUT: Adapter array will be written to memory described by 'buffer' If the last adapter seen in the returned adapter array is IDI_DADAPTER or if last adapter in array does have type '0', then it was enougth space in buffer to accommodate all available adapter descriptors *NOTE 1 (debug interface): The IDI adapter of type 'IDI_DIMAINT' does register as 'request' famous 'dprintf' function (of type DI_PRINTF, please look include/debuglib.c and include/debuglib.h) for details. So dprintf is not exported from module debug module directly, instead of this IDI_DIMAINT is registered. Module load order will receive in this case: 1. DIDD (this file) 2. DIMAINT does load and register 'IDI_DIMAINT', at this step DIDD should be able to get 'dprintf', save it, and register with DIDD by means of 'dprintf' function. 3. any other driver is loaded and is able to access adapter array and debug interface This approach does allow to load/unload debug interface on demand, and save memory, it it is necessary. -------------------------------------------------------------------------- */ void IDI_CALL_LINK_T DIVA_DIDD_Read (void IDI_CALL_ENTITY_T * buffer, int length) { diva_didd_read_adapter_array (buffer, length); }
gpl-2.0
mlehtima/android_kernel_semc_msm7x30
drivers/rtc/rtc-sh.c
5139
20627
/* * SuperH On-Chip RTC Support * * Copyright (C) 2006 - 2009 Paul Mundt * Copyright (C) 2006 Jamie Lenehan * Copyright (C) 2008 Angelo Castello * * Based on the old arch/sh/kernel/cpu/rtc.c by: * * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/log2.h> #include <linux/clk.h> #include <linux/slab.h> #include <asm/rtc.h> #define DRV_NAME "sh-rtc" #define DRV_VERSION "0.2.3" #define RTC_REG(r) ((r) * rtc_reg_size) #define R64CNT RTC_REG(0) #define RSECCNT RTC_REG(1) /* RTC sec */ #define RMINCNT RTC_REG(2) /* RTC min */ #define RHRCNT RTC_REG(3) /* RTC hour */ #define RWKCNT RTC_REG(4) /* RTC week */ #define RDAYCNT RTC_REG(5) /* RTC day */ #define RMONCNT RTC_REG(6) /* RTC month */ #define RYRCNT RTC_REG(7) /* RTC year */ #define RSECAR RTC_REG(8) /* ALARM sec */ #define RMINAR RTC_REG(9) /* ALARM min */ #define RHRAR RTC_REG(10) /* ALARM hour */ #define RWKAR RTC_REG(11) /* ALARM week */ #define RDAYAR RTC_REG(12) /* ALARM day */ #define RMONAR RTC_REG(13) /* ALARM month */ #define RCR1 RTC_REG(14) /* Control */ #define RCR2 RTC_REG(15) /* Control */ /* * Note on RYRAR and RCR3: Up until this point most of the register * definitions are consistent across all of the available parts. However, * the placement of the optional RYRAR and RCR3 (the RYRAR control * register used to control RYRCNT/RYRAR compare) varies considerably * across various parts, occasionally being mapped in to a completely * unrelated address space. For proper RYRAR support a separate resource * would have to be handed off, but as this is purely optional in * practice, we simply opt not to support it, thereby keeping the code * quite a bit more simplified. */ /* ALARM Bits - or with BCD encoded value */ #define AR_ENB 0x80 /* Enable for alarm cmp */ /* Period Bits */ #define PF_HP 0x100 /* Enable Half Period to support 8,32,128Hz */ #define PF_COUNT 0x200 /* Half periodic counter */ #define PF_OXS 0x400 /* Periodic One x Second */ #define PF_KOU 0x800 /* Kernel or User periodic request 1=kernel */ #define PF_MASK 0xf00 /* RCR1 Bits */ #define RCR1_CF 0x80 /* Carry Flag */ #define RCR1_CIE 0x10 /* Carry Interrupt Enable */ #define RCR1_AIE 0x08 /* Alarm Interrupt Enable */ #define RCR1_AF 0x01 /* Alarm Flag */ /* RCR2 Bits */ #define RCR2_PEF 0x80 /* PEriodic interrupt Flag */ #define RCR2_PESMASK 0x70 /* Periodic interrupt Set */ #define RCR2_RTCEN 0x08 /* ENable RTC */ #define RCR2_ADJ 0x04 /* ADJustment (30-second) */ #define RCR2_RESET 0x02 /* Reset bit */ #define RCR2_START 0x01 /* Start bit */ struct sh_rtc { void __iomem *regbase; unsigned long regsize; struct resource *res; int alarm_irq; int periodic_irq; int carry_irq; struct clk *clk; struct rtc_device *rtc_dev; spinlock_t lock; unsigned long capabilities; /* See asm/rtc.h for cap bits */ unsigned short periodic_freq; }; static int __sh_rtc_interrupt(struct sh_rtc *rtc) { unsigned int tmp, pending; tmp = readb(rtc->regbase + RCR1); pending = tmp & RCR1_CF; tmp &= ~RCR1_CF; writeb(tmp, rtc->regbase + RCR1); /* Users have requested One x Second IRQ */ if (pending && rtc->periodic_freq & PF_OXS) rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF); return pending; } static int __sh_rtc_alarm(struct sh_rtc *rtc) { unsigned int tmp, pending; tmp = readb(rtc->regbase + RCR1); pending = tmp & RCR1_AF; tmp &= ~(RCR1_AF | RCR1_AIE); writeb(tmp, rtc->regbase + RCR1); if (pending) rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); return pending; } static int __sh_rtc_periodic(struct sh_rtc *rtc) { struct rtc_device *rtc_dev = rtc->rtc_dev; struct rtc_task *irq_task; unsigned int tmp, pending; tmp = readb(rtc->regbase + RCR2); pending = tmp & RCR2_PEF; tmp &= ~RCR2_PEF; writeb(tmp, rtc->regbase + RCR2); if (!pending) return 0; /* Half period enabled than one skipped and the next notified */ if ((rtc->periodic_freq & PF_HP) && (rtc->periodic_freq & PF_COUNT)) rtc->periodic_freq &= ~PF_COUNT; else { if (rtc->periodic_freq & PF_HP) rtc->periodic_freq |= PF_COUNT; if (rtc->periodic_freq & PF_KOU) { spin_lock(&rtc_dev->irq_task_lock); irq_task = rtc_dev->irq_task; if (irq_task) irq_task->func(irq_task->private_data); spin_unlock(&rtc_dev->irq_task_lock); } else rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF); } return pending; } static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id) { struct sh_rtc *rtc = dev_id; int ret; spin_lock(&rtc->lock); ret = __sh_rtc_interrupt(rtc); spin_unlock(&rtc->lock); return IRQ_RETVAL(ret); } static irqreturn_t sh_rtc_alarm(int irq, void *dev_id) { struct sh_rtc *rtc = dev_id; int ret; spin_lock(&rtc->lock); ret = __sh_rtc_alarm(rtc); spin_unlock(&rtc->lock); return IRQ_RETVAL(ret); } static irqreturn_t sh_rtc_periodic(int irq, void *dev_id) { struct sh_rtc *rtc = dev_id; int ret; spin_lock(&rtc->lock); ret = __sh_rtc_periodic(rtc); spin_unlock(&rtc->lock); return IRQ_RETVAL(ret); } static irqreturn_t sh_rtc_shared(int irq, void *dev_id) { struct sh_rtc *rtc = dev_id; int ret; spin_lock(&rtc->lock); ret = __sh_rtc_interrupt(rtc); ret |= __sh_rtc_alarm(rtc); ret |= __sh_rtc_periodic(rtc); spin_unlock(&rtc->lock); return IRQ_RETVAL(ret); } static int sh_rtc_irq_set_state(struct device *dev, int enable) { struct sh_rtc *rtc = dev_get_drvdata(dev); unsigned int tmp; spin_lock_irq(&rtc->lock); tmp = readb(rtc->regbase + RCR2); if (enable) { rtc->periodic_freq |= PF_KOU; tmp &= ~RCR2_PEF; /* Clear PES bit */ tmp |= (rtc->periodic_freq & ~PF_HP); /* Set PES2-0 */ } else { rtc->periodic_freq &= ~PF_KOU; tmp &= ~(RCR2_PESMASK | RCR2_PEF); } writeb(tmp, rtc->regbase + RCR2); spin_unlock_irq(&rtc->lock); return 0; } static int sh_rtc_irq_set_freq(struct device *dev, int freq) { struct sh_rtc *rtc = dev_get_drvdata(dev); int tmp, ret = 0; spin_lock_irq(&rtc->lock); tmp = rtc->periodic_freq & PF_MASK; switch (freq) { case 0: rtc->periodic_freq = 0x00; break; case 1: rtc->periodic_freq = 0x60; break; case 2: rtc->periodic_freq = 0x50; break; case 4: rtc->periodic_freq = 0x40; break; case 8: rtc->periodic_freq = 0x30 | PF_HP; break; case 16: rtc->periodic_freq = 0x30; break; case 32: rtc->periodic_freq = 0x20 | PF_HP; break; case 64: rtc->periodic_freq = 0x20; break; case 128: rtc->periodic_freq = 0x10 | PF_HP; break; case 256: rtc->periodic_freq = 0x10; break; default: ret = -ENOTSUPP; } if (ret == 0) rtc->periodic_freq |= tmp; spin_unlock_irq(&rtc->lock); return ret; } static inline void sh_rtc_setaie(struct device *dev, unsigned int enable) { struct sh_rtc *rtc = dev_get_drvdata(dev); unsigned int tmp; spin_lock_irq(&rtc->lock); tmp = readb(rtc->regbase + RCR1); if (enable) tmp |= RCR1_AIE; else tmp &= ~RCR1_AIE; writeb(tmp, rtc->regbase + RCR1); spin_unlock_irq(&rtc->lock); } static int sh_rtc_proc(struct device *dev, struct seq_file *seq) { struct sh_rtc *rtc = dev_get_drvdata(dev); unsigned int tmp; tmp = readb(rtc->regbase + RCR1); seq_printf(seq, "carry_IRQ\t: %s\n", (tmp & RCR1_CIE) ? "yes" : "no"); tmp = readb(rtc->regbase + RCR2); seq_printf(seq, "periodic_IRQ\t: %s\n", (tmp & RCR2_PESMASK) ? "yes" : "no"); return 0; } static inline void sh_rtc_setcie(struct device *dev, unsigned int enable) { struct sh_rtc *rtc = dev_get_drvdata(dev); unsigned int tmp; spin_lock_irq(&rtc->lock); tmp = readb(rtc->regbase + RCR1); if (!enable) tmp &= ~RCR1_CIE; else tmp |= RCR1_CIE; writeb(tmp, rtc->regbase + RCR1); spin_unlock_irq(&rtc->lock); } static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { sh_rtc_setaie(dev, enabled); return 0; } static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct sh_rtc *rtc = platform_get_drvdata(pdev); unsigned int sec128, sec2, yr, yr100, cf_bit; do { unsigned int tmp; spin_lock_irq(&rtc->lock); tmp = readb(rtc->regbase + RCR1); tmp &= ~RCR1_CF; /* Clear CF-bit */ tmp |= RCR1_CIE; writeb(tmp, rtc->regbase + RCR1); sec128 = readb(rtc->regbase + R64CNT); tm->tm_sec = bcd2bin(readb(rtc->regbase + RSECCNT)); tm->tm_min = bcd2bin(readb(rtc->regbase + RMINCNT)); tm->tm_hour = bcd2bin(readb(rtc->regbase + RHRCNT)); tm->tm_wday = bcd2bin(readb(rtc->regbase + RWKCNT)); tm->tm_mday = bcd2bin(readb(rtc->regbase + RDAYCNT)); tm->tm_mon = bcd2bin(readb(rtc->regbase + RMONCNT)) - 1; if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) { yr = readw(rtc->regbase + RYRCNT); yr100 = bcd2bin(yr >> 8); yr &= 0xff; } else { yr = readb(rtc->regbase + RYRCNT); yr100 = bcd2bin((yr == 0x99) ? 0x19 : 0x20); } tm->tm_year = (yr100 * 100 + bcd2bin(yr)) - 1900; sec2 = readb(rtc->regbase + R64CNT); cf_bit = readb(rtc->regbase + RCR1) & RCR1_CF; spin_unlock_irq(&rtc->lock); } while (cf_bit != 0 || ((sec128 ^ sec2) & RTC_BIT_INVERTED) != 0); #if RTC_BIT_INVERTED != 0 if ((sec128 & RTC_BIT_INVERTED)) tm->tm_sec--; #endif /* only keep the carry interrupt enabled if UIE is on */ if (!(rtc->periodic_freq & PF_OXS)) sh_rtc_setcie(dev, 0); dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, " "mday=%d, mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday); return rtc_valid_tm(tm); } static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct sh_rtc *rtc = platform_get_drvdata(pdev); unsigned int tmp; int year; spin_lock_irq(&rtc->lock); /* Reset pre-scaler & stop RTC */ tmp = readb(rtc->regbase + RCR2); tmp |= RCR2_RESET; tmp &= ~RCR2_START; writeb(tmp, rtc->regbase + RCR2); writeb(bin2bcd(tm->tm_sec), rtc->regbase + RSECCNT); writeb(bin2bcd(tm->tm_min), rtc->regbase + RMINCNT); writeb(bin2bcd(tm->tm_hour), rtc->regbase + RHRCNT); writeb(bin2bcd(tm->tm_wday), rtc->regbase + RWKCNT); writeb(bin2bcd(tm->tm_mday), rtc->regbase + RDAYCNT); writeb(bin2bcd(tm->tm_mon + 1), rtc->regbase + RMONCNT); if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) { year = (bin2bcd((tm->tm_year + 1900) / 100) << 8) | bin2bcd(tm->tm_year % 100); writew(year, rtc->regbase + RYRCNT); } else { year = tm->tm_year % 100; writeb(bin2bcd(year), rtc->regbase + RYRCNT); } /* Start RTC */ tmp = readb(rtc->regbase + RCR2); tmp &= ~RCR2_RESET; tmp |= RCR2_RTCEN | RCR2_START; writeb(tmp, rtc->regbase + RCR2); spin_unlock_irq(&rtc->lock); return 0; } static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) { unsigned int byte; int value = 0xff; /* return 0xff for ignored values */ byte = readb(rtc->regbase + reg_off); if (byte & AR_ENB) { byte &= ~AR_ENB; /* strip the enable bit */ value = bcd2bin(byte); } return value; } static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) { struct platform_device *pdev = to_platform_device(dev); struct sh_rtc *rtc = platform_get_drvdata(pdev); struct rtc_time *tm = &wkalrm->time; spin_lock_irq(&rtc->lock); tm->tm_sec = sh_rtc_read_alarm_value(rtc, RSECAR); tm->tm_min = sh_rtc_read_alarm_value(rtc, RMINAR); tm->tm_hour = sh_rtc_read_alarm_value(rtc, RHRAR); tm->tm_wday = sh_rtc_read_alarm_value(rtc, RWKAR); tm->tm_mday = sh_rtc_read_alarm_value(rtc, RDAYAR); tm->tm_mon = sh_rtc_read_alarm_value(rtc, RMONAR); if (tm->tm_mon > 0) tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */ tm->tm_year = 0xffff; wkalrm->enabled = (readb(rtc->regbase + RCR1) & RCR1_AIE) ? 1 : 0; spin_unlock_irq(&rtc->lock); return 0; } static inline void sh_rtc_write_alarm_value(struct sh_rtc *rtc, int value, int reg_off) { /* < 0 for a value that is ignored */ if (value < 0) writeb(0, rtc->regbase + reg_off); else writeb(bin2bcd(value) | AR_ENB, rtc->regbase + reg_off); } static int sh_rtc_check_alarm(struct rtc_time *tm) { /* * The original rtc says anything > 0xc0 is "don't care" or "match * all" - most users use 0xff but rtc-dev uses -1 for the same thing. * The original rtc doesn't support years - some things use -1 and * some 0xffff. We use -1 to make out tests easier. */ if (tm->tm_year == 0xffff) tm->tm_year = -1; if (tm->tm_mon >= 0xff) tm->tm_mon = -1; if (tm->tm_mday >= 0xff) tm->tm_mday = -1; if (tm->tm_wday >= 0xff) tm->tm_wday = -1; if (tm->tm_hour >= 0xff) tm->tm_hour = -1; if (tm->tm_min >= 0xff) tm->tm_min = -1; if (tm->tm_sec >= 0xff) tm->tm_sec = -1; if (tm->tm_year > 9999 || tm->tm_mon >= 12 || tm->tm_mday == 0 || tm->tm_mday >= 32 || tm->tm_wday >= 7 || tm->tm_hour >= 24 || tm->tm_min >= 60 || tm->tm_sec >= 60) return -EINVAL; return 0; } static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) { struct platform_device *pdev = to_platform_device(dev); struct sh_rtc *rtc = platform_get_drvdata(pdev); unsigned int rcr1; struct rtc_time *tm = &wkalrm->time; int mon, err; err = sh_rtc_check_alarm(tm); if (unlikely(err < 0)) return err; spin_lock_irq(&rtc->lock); /* disable alarm interrupt and clear the alarm flag */ rcr1 = readb(rtc->regbase + RCR1); rcr1 &= ~(RCR1_AF | RCR1_AIE); writeb(rcr1, rtc->regbase + RCR1); /* set alarm time */ sh_rtc_write_alarm_value(rtc, tm->tm_sec, RSECAR); sh_rtc_write_alarm_value(rtc, tm->tm_min, RMINAR); sh_rtc_write_alarm_value(rtc, tm->tm_hour, RHRAR); sh_rtc_write_alarm_value(rtc, tm->tm_wday, RWKAR); sh_rtc_write_alarm_value(rtc, tm->tm_mday, RDAYAR); mon = tm->tm_mon; if (mon >= 0) mon += 1; sh_rtc_write_alarm_value(rtc, mon, RMONAR); if (wkalrm->enabled) { rcr1 |= RCR1_AIE; writeb(rcr1, rtc->regbase + RCR1); } spin_unlock_irq(&rtc->lock); return 0; } static struct rtc_class_ops sh_rtc_ops = { .read_time = sh_rtc_read_time, .set_time = sh_rtc_set_time, .read_alarm = sh_rtc_read_alarm, .set_alarm = sh_rtc_set_alarm, .proc = sh_rtc_proc, .alarm_irq_enable = sh_rtc_alarm_irq_enable, }; static int __init sh_rtc_probe(struct platform_device *pdev) { struct sh_rtc *rtc; struct resource *res; struct rtc_time r; char clk_name[6]; int clk_id, ret; rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL); if (unlikely(!rtc)) return -ENOMEM; spin_lock_init(&rtc->lock); /* get periodic/carry/alarm irqs */ ret = platform_get_irq(pdev, 0); if (unlikely(ret <= 0)) { ret = -ENOENT; dev_err(&pdev->dev, "No IRQ resource\n"); goto err_badres; } rtc->periodic_irq = ret; rtc->carry_irq = platform_get_irq(pdev, 1); rtc->alarm_irq = platform_get_irq(pdev, 2); res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (unlikely(res == NULL)) { ret = -ENOENT; dev_err(&pdev->dev, "No IO resource\n"); goto err_badres; } rtc->regsize = resource_size(res); rtc->res = request_mem_region(res->start, rtc->regsize, pdev->name); if (unlikely(!rtc->res)) { ret = -EBUSY; goto err_badres; } rtc->regbase = ioremap_nocache(rtc->res->start, rtc->regsize); if (unlikely(!rtc->regbase)) { ret = -EINVAL; goto err_badmap; } clk_id = pdev->id; /* With a single device, the clock id is still "rtc0" */ if (clk_id < 0) clk_id = 0; snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id); rtc->clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(rtc->clk)) { /* * No error handling for rtc->clk intentionally, not all * platforms will have a unique clock for the RTC, and * the clk API can handle the struct clk pointer being * NULL. */ rtc->clk = NULL; } clk_enable(rtc->clk); rtc->capabilities = RTC_DEF_CAPABILITIES; if (pdev->dev.platform_data) { struct sh_rtc_platform_info *pinfo = pdev->dev.platform_data; /* * Some CPUs have special capabilities in addition to the * default set. Add those in here. */ rtc->capabilities |= pinfo->capabilities; } if (rtc->carry_irq <= 0) { /* register shared periodic/carry/alarm irq */ ret = request_irq(rtc->periodic_irq, sh_rtc_shared, 0, "sh-rtc", rtc); if (unlikely(ret)) { dev_err(&pdev->dev, "request IRQ failed with %d, IRQ %d\n", ret, rtc->periodic_irq); goto err_unmap; } } else { /* register periodic/carry/alarm irqs */ ret = request_irq(rtc->periodic_irq, sh_rtc_periodic, 0, "sh-rtc period", rtc); if (unlikely(ret)) { dev_err(&pdev->dev, "request period IRQ failed with %d, IRQ %d\n", ret, rtc->periodic_irq); goto err_unmap; } ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, 0, "sh-rtc carry", rtc); if (unlikely(ret)) { dev_err(&pdev->dev, "request carry IRQ failed with %d, IRQ %d\n", ret, rtc->carry_irq); free_irq(rtc->periodic_irq, rtc); goto err_unmap; } ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, 0, "sh-rtc alarm", rtc); if (unlikely(ret)) { dev_err(&pdev->dev, "request alarm IRQ failed with %d, IRQ %d\n", ret, rtc->alarm_irq); free_irq(rtc->carry_irq, rtc); free_irq(rtc->periodic_irq, rtc); goto err_unmap; } } platform_set_drvdata(pdev, rtc); /* everything disabled by default */ sh_rtc_irq_set_freq(&pdev->dev, 0); sh_rtc_irq_set_state(&pdev->dev, 0); sh_rtc_setaie(&pdev->dev, 0); sh_rtc_setcie(&pdev->dev, 0); rtc->rtc_dev = rtc_device_register("sh", &pdev->dev, &sh_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc_dev)) { ret = PTR_ERR(rtc->rtc_dev); free_irq(rtc->periodic_irq, rtc); free_irq(rtc->carry_irq, rtc); free_irq(rtc->alarm_irq, rtc); goto err_unmap; } rtc->rtc_dev->max_user_freq = 256; /* reset rtc to epoch 0 if time is invalid */ if (rtc_read_time(rtc->rtc_dev, &r) < 0) { rtc_time_to_tm(0, &r); rtc_set_time(rtc->rtc_dev, &r); } device_init_wakeup(&pdev->dev, 1); return 0; err_unmap: clk_disable(rtc->clk); clk_put(rtc->clk); iounmap(rtc->regbase); err_badmap: release_mem_region(rtc->res->start, rtc->regsize); err_badres: kfree(rtc); return ret; } static int __exit sh_rtc_remove(struct platform_device *pdev) { struct sh_rtc *rtc = platform_get_drvdata(pdev); rtc_device_unregister(rtc->rtc_dev); sh_rtc_irq_set_state(&pdev->dev, 0); sh_rtc_setaie(&pdev->dev, 0); sh_rtc_setcie(&pdev->dev, 0); free_irq(rtc->periodic_irq, rtc); if (rtc->carry_irq > 0) { free_irq(rtc->carry_irq, rtc); free_irq(rtc->alarm_irq, rtc); } iounmap(rtc->regbase); release_mem_region(rtc->res->start, rtc->regsize); clk_disable(rtc->clk); clk_put(rtc->clk); platform_set_drvdata(pdev, NULL); kfree(rtc); return 0; } static void sh_rtc_set_irq_wake(struct device *dev, int enabled) { struct platform_device *pdev = to_platform_device(dev); struct sh_rtc *rtc = platform_get_drvdata(pdev); irq_set_irq_wake(rtc->periodic_irq, enabled); if (rtc->carry_irq > 0) { irq_set_irq_wake(rtc->carry_irq, enabled); irq_set_irq_wake(rtc->alarm_irq, enabled); } } static int sh_rtc_suspend(struct device *dev) { if (device_may_wakeup(dev)) sh_rtc_set_irq_wake(dev, 1); return 0; } static int sh_rtc_resume(struct device *dev) { if (device_may_wakeup(dev)) sh_rtc_set_irq_wake(dev, 0); return 0; } static const struct dev_pm_ops sh_rtc_dev_pm_ops = { .suspend = sh_rtc_suspend, .resume = sh_rtc_resume, }; static struct platform_driver sh_rtc_platform_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .pm = &sh_rtc_dev_pm_ops, }, .remove = __exit_p(sh_rtc_remove), }; static int __init sh_rtc_init(void) { return platform_driver_probe(&sh_rtc_platform_driver, sh_rtc_probe); } static void __exit sh_rtc_exit(void) { platform_driver_unregister(&sh_rtc_platform_driver); } module_init(sh_rtc_init); module_exit(sh_rtc_exit); MODULE_DESCRIPTION("SuperH on-chip RTC driver"); MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, " "Jamie Lenehan <lenehan@twibble.org>, " "Angelo Castello <angelo.castello@st.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
DooMLoRD/android_kernel_sony_msm8974ab
drivers/pps/clients/pps_parport.c
5395
6180
/* * pps_parport.c -- kernel parallel port PPS client * * * Copyright (C) 2009 Alexander Gordeev <lasaine@lvk.cs.msu.su> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * TODO: * implement echo over SEL pin */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/irqnr.h> #include <linux/time.h> #include <linux/parport.h> #include <linux/pps_kernel.h> #define DRVDESC "parallel port PPS client" /* module parameters */ #define CLEAR_WAIT_MAX 100 #define CLEAR_WAIT_MAX_ERRORS 5 static unsigned int clear_wait = 100; MODULE_PARM_DESC(clear_wait, "Maximum number of port reads when polling for signal clear," " zero turns clear edge capture off entirely"); module_param(clear_wait, uint, 0); /* internal per port structure */ struct pps_client_pp { struct pardevice *pardev; /* parport device */ struct pps_device *pps; /* PPS device */ unsigned int cw; /* port clear timeout */ unsigned int cw_err; /* number of timeouts */ }; static inline int signal_is_set(struct parport *port) { return (port->ops->read_status(port) & PARPORT_STATUS_ACK) != 0; } /* parport interrupt handler */ static void parport_irq(void *handle) { struct pps_event_time ts_assert, ts_clear; struct pps_client_pp *dev = handle; struct parport *port = dev->pardev->port; unsigned int i; unsigned long flags; /* first of all we get the time stamp... */ pps_get_ts(&ts_assert); if (dev->cw == 0) /* clear edge capture disabled */ goto out_assert; /* try capture the clear edge */ /* We have to disable interrupts here. The idea is to prevent * other interrupts on the same processor to introduce random * lags while polling the port. Reading from IO port is known * to take approximately 1us while other interrupt handlers can * take much more potentially. * * Interrupts won't be disabled for a long time because the * number of polls is limited by clear_wait parameter which is * kept rather low. So it should never be an issue. */ local_irq_save(flags); /* check the signal (no signal means the pulse is lost this time) */ if (!signal_is_set(port)) { local_irq_restore(flags); dev_err(dev->pps->dev, "lost the signal\n"); goto out_assert; } /* poll the port until the signal is unset */ for (i = dev->cw; i; i--) if (!signal_is_set(port)) { pps_get_ts(&ts_clear); local_irq_restore(flags); dev->cw_err = 0; goto out_both; } local_irq_restore(flags); /* timeout */ dev->cw_err++; if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) { dev_err(dev->pps->dev, "disabled clear edge capture after %d" " timeouts\n", dev->cw_err); dev->cw = 0; dev->cw_err = 0; } out_assert: /* fire assert event */ pps_event(dev->pps, &ts_assert, PPS_CAPTUREASSERT, NULL); return; out_both: /* fire assert event */ pps_event(dev->pps, &ts_assert, PPS_CAPTUREASSERT, NULL); /* fire clear event */ pps_event(dev->pps, &ts_clear, PPS_CAPTURECLEAR, NULL); return; } static void parport_attach(struct parport *port) { struct pps_client_pp *device; struct pps_source_info info = { .name = KBUILD_MODNAME, .path = "", .mode = PPS_CAPTUREBOTH | \ PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \ PPS_ECHOASSERT | PPS_ECHOCLEAR | \ PPS_CANWAIT | PPS_TSFMT_TSPEC, .owner = THIS_MODULE, .dev = NULL }; device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL); if (!device) { pr_err("memory allocation failed, not attaching\n"); return; } device->pardev = parport_register_device(port, KBUILD_MODNAME, NULL, NULL, parport_irq, PARPORT_FLAG_EXCL, device); if (!device->pardev) { pr_err("couldn't register with %s\n", port->name); goto err_free; } if (parport_claim_or_block(device->pardev) < 0) { pr_err("couldn't claim %s\n", port->name); goto err_unregister_dev; } device->pps = pps_register_source(&info, PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR); if (device->pps == NULL) { pr_err("couldn't register PPS source\n"); goto err_release_dev; } device->cw = clear_wait; port->ops->enable_irq(port); pr_info("attached to %s\n", port->name); return; err_release_dev: parport_release(device->pardev); err_unregister_dev: parport_unregister_device(device->pardev); err_free: kfree(device); } static void parport_detach(struct parport *port) { struct pardevice *pardev = port->cad; struct pps_client_pp *device; /* FIXME: oooh, this is ugly! */ if (strcmp(pardev->name, KBUILD_MODNAME)) /* not our port */ return; device = pardev->private; port->ops->disable_irq(port); pps_unregister_source(device->pps); parport_release(pardev); parport_unregister_device(pardev); kfree(device); } static struct parport_driver pps_parport_driver = { .name = KBUILD_MODNAME, .attach = parport_attach, .detach = parport_detach, }; /* module staff */ static int __init pps_parport_init(void) { int ret; pr_info(DRVDESC "\n"); if (clear_wait > CLEAR_WAIT_MAX) { pr_err("clear_wait value should be not greater" " then %d\n", CLEAR_WAIT_MAX); return -EINVAL; } ret = parport_register_driver(&pps_parport_driver); if (ret) { pr_err("unable to register with parport\n"); return ret; } return 0; } static void __exit pps_parport_exit(void) { parport_unregister_driver(&pps_parport_driver); } module_init(pps_parport_init); module_exit(pps_parport_exit); MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>"); MODULE_DESCRIPTION(DRVDESC); MODULE_LICENSE("GPL");
gpl-2.0
cameron581/android_kernel_lge_msm8974
drivers/net/mii.c
7955
12578
/* mii.c: MII interface library Maintained by Jeff Garzik <jgarzik@pobox.com> Copyright 2001,2002 Jeff Garzik Various code came from myson803.c and other files by Donald Becker. Copyright: Written 1998-2002 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> static u32 mii_get_an(struct mii_if_info *mii, u16 addr) { int advert; advert = mii->mdio_read(mii->dev, mii->phy_id, addr); return mii_lpa_to_ethtool_lpa_t(advert); } /** * mii_ethtool_gset - get settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * The @ecmd parameter is expected to have been cleared before calling * mii_ethtool_gset(). * * Returns 0 for success, negative on error. */ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; u32 nego; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); if (mii->supports_gmii) ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; /* only supports twisted-pair */ ecmd->port = PORT_MII; /* only supports internal transceiver */ ecmd->transceiver = XCVR_INTERNAL; /* this isn't fully supported at higher layers */ ecmd->phy_address = mii->phy_id; ecmd->mdio_support = ETH_MDIO_SUPPORTS_C22; ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); if (mii->supports_gmii) { ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } if (bmcr & BMCR_ANENABLE) { ecmd->advertising |= ADVERTISED_Autoneg; ecmd->autoneg = AUTONEG_ENABLE; ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); if (mii->supports_gmii) ecmd->advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmsr & BMSR_ANEGCOMPLETE) { ecmd->lp_advertising = mii_get_an(mii, MII_LPA); ecmd->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(stat1000); } else { ecmd->lp_advertising = 0; } nego = ecmd->advertising & ecmd->lp_advertising; if (nego & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_1000); ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full); } else if (nego & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_100); ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full); } else { ethtool_cmd_speed_set(ecmd, SPEED_10); ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full); } } else { ecmd->autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED1000 && (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10))); ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } mii->full_duplex = ecmd->duplex; /* ignore maxtxpkt, maxrxpkt for now */ return 0; } /** * mii_ethtool_sset - set settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * Returns 0 for success, negative on error. */ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u32 speed = ethtool_cmd_speed(ecmd); if (speed != SPEED_10 && speed != SPEED_100 && speed != SPEED_1000) return -EINVAL; if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) return -EINVAL; if (ecmd->port != PORT_MII) return -EINVAL; if (ecmd->transceiver != XCVR_INTERNAL) return -EINVAL; if (ecmd->phy_address != mii->phy_id) return -EINVAL; if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) return -EINVAL; if ((speed == SPEED_1000) && (!mii->supports_gmii)) return -EINVAL; /* ignore supported, maxtxpkt, maxrxpkt */ if (ecmd->autoneg == AUTONEG_ENABLE) { u32 bmcr, advert, tmp; u32 advert2 = 0, tmp2 = 0; if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) == 0) return -EINVAL; /* advertise only what has been requested */ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (mii->supports_gmii) { advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising); if (mii->supports_gmii) tmp2 |= ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; } if ((mii->supports_gmii) && (advert2 != tmp2)) mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); /* turn on autonegotiation, and force a renegotiate */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); mii->force_media = 0; } else { u32 bmcr, tmp; /* turn off auto negotiation, set speed and duplexity */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_FULLDPLX); if (speed == SPEED_1000) tmp |= BMCR_SPEED1000; else if (speed == SPEED_100) tmp |= BMCR_SPEED100; if (ecmd->duplex == DUPLEX_FULL) { tmp |= BMCR_FULLDPLX; mii->full_duplex = 1; } else mii->full_duplex = 0; if (bmcr != tmp) mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); mii->force_media = 1; } return 0; } /** * mii_check_gmii_support - check if the MII supports Gb interfaces * @mii: the MII interface */ int mii_check_gmii_support(struct mii_if_info *mii) { int reg; reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (reg & BMSR_ESTATEN) { reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS); if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) return 1; } return 0; } /** * mii_link_ok - is link status up/ok * @mii: the MII interface * * Returns 1 if the MII reports link status up/ok, 0 otherwise. */ int mii_link_ok (struct mii_if_info *mii) { /* first, a dummy read, needed to latch some MII phys */ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) return 1; return 0; } /** * mii_nway_restart - restart NWay (autonegotiation) for this interface * @mii: the MII interface * * Returns 0 on success, negative on error. */ int mii_nway_restart (struct mii_if_info *mii) { int bmcr; int r = -EINVAL; /* if autoneg is off, it's an error */ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); if (bmcr & BMCR_ANENABLE) { bmcr |= BMCR_ANRESTART; mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); r = 0; } return r; } /** * mii_check_link - check MII link status * @mii: MII interface * * If the link status changed (previous != current), call * netif_carrier_on() if current link status is Up or call * netif_carrier_off() if current link status is Down. */ void mii_check_link (struct mii_if_info *mii) { int cur_link = mii_link_ok(mii); int prev_link = netif_carrier_ok(mii->dev); if (cur_link && !prev_link) netif_carrier_on(mii->dev); else if (prev_link && !cur_link) netif_carrier_off(mii->dev); } /** * mii_check_media - check the MII interface for a duplex change * @mii: the MII interface * @ok_to_print: OK to print link up/down messages * @init_media: OK to save duplex mode in @mii * * Returns 1 if the duplex mode changed, 0 if not. * If the media type is forced, always returns 0. */ unsigned int mii_check_media (struct mii_if_info *mii, unsigned int ok_to_print, unsigned int init_media) { unsigned int old_carrier, new_carrier; int advertise, lpa, media, duplex; int lpa2 = 0; /* if forced media, go no further */ if (mii->force_media) return 0; /* duplex did not change */ /* check current and old link status */ old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; new_carrier = (unsigned int) mii_link_ok(mii); /* if carrier state did not change, this is a "bounce", * just exit as everything is already set correctly */ if ((!init_media) && (old_carrier == new_carrier)) return 0; /* duplex did not change */ /* no carrier, nothing much to do */ if (!new_carrier) { netif_carrier_off(mii->dev); if (ok_to_print) netdev_info(mii->dev, "link down\n"); return 0; /* duplex did not change */ } /* * we have carrier, see who's on the other end */ netif_carrier_on(mii->dev); /* get MII advertise and LPA values */ if ((!init_media) && (mii->advertising)) advertise = mii->advertising; else { advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); mii->advertising = advertise; } lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); if (mii->supports_gmii) lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000); /* figure out media and duplex from advertise and LPA values */ media = mii_nway_result(lpa & advertise); duplex = (media & ADVERTISE_FULL) ? 1 : 0; if (lpa2 & LPA_1000FULL) duplex = 1; if (ok_to_print) netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n", lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10, duplex ? "full" : "half", lpa); if ((init_media) || (mii->full_duplex != duplex)) { mii->full_duplex = duplex; return 1; /* duplex changed */ } return 0; /* duplex did not change */ } /** * generic_mii_ioctl - main MII ioctl interface * @mii_if: the MII interface * @mii_data: MII ioctl data structure * @cmd: MII ioctl command * @duplex_chg_out: pointer to @duplex_changed status if there was no * ioctl error * * Returns 0 on success, negative on error. */ int generic_mii_ioctl(struct mii_if_info *mii_if, struct mii_ioctl_data *mii_data, int cmd, unsigned int *duplex_chg_out) { int rc = 0; unsigned int duplex_changed = 0; if (duplex_chg_out) *duplex_chg_out = 0; mii_data->phy_id &= mii_if->phy_id_mask; mii_data->reg_num &= mii_if->reg_num_mask; switch(cmd) { case SIOCGMIIPHY: mii_data->phy_id = mii_if->phy_id; /* fall through */ case SIOCGMIIREG: mii_data->val_out = mii_if->mdio_read(mii_if->dev, mii_data->phy_id, mii_data->reg_num); break; case SIOCSMIIREG: { u16 val = mii_data->val_in; if (mii_data->phy_id == mii_if->phy_id) { switch(mii_data->reg_num) { case MII_BMCR: { unsigned int new_duplex = 0; if (val & (BMCR_RESET|BMCR_ANENABLE)) mii_if->force_media = 0; else mii_if->force_media = 1; if (mii_if->force_media && (val & BMCR_FULLDPLX)) new_duplex = 1; if (mii_if->full_duplex != new_duplex) { duplex_changed = 1; mii_if->full_duplex = new_duplex; } break; } case MII_ADVERTISE: mii_if->advertising = val; break; default: /* do nothing */ break; } } mii_if->mdio_write(mii_if->dev, mii_data->phy_id, mii_data->reg_num, val); break; } default: rc = -EOPNOTSUPP; break; } if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) *duplex_chg_out = 1; return rc; } MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); MODULE_DESCRIPTION ("MII hardware support library"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(mii_link_ok); EXPORT_SYMBOL(mii_nway_restart); EXPORT_SYMBOL(mii_ethtool_gset); EXPORT_SYMBOL(mii_ethtool_sset); EXPORT_SYMBOL(mii_check_link); EXPORT_SYMBOL(mii_check_media); EXPORT_SYMBOL(mii_check_gmii_support); EXPORT_SYMBOL(generic_mii_ioctl);
gpl-2.0
spock1104/android_kernel_zte_msm8930
Documentation/accounting/getdelays.c
7955
13567
/* getdelays.c * * Utility to get per-pid and per-tgid delay accounting statistics * Also illustrates usage of the taskstats interface * * Copyright (C) Shailabh Nagar, IBM Corp. 2005 * Copyright (C) Balbir Singh, IBM Corp. 2006 * Copyright (c) Jay Lan, SGI. 2006 * * Compile with * gcc -I/usr/src/linux/include getdelays.c -o getdelays */ #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <unistd.h> #include <poll.h> #include <string.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/socket.h> #include <sys/wait.h> #include <signal.h> #include <linux/genetlink.h> #include <linux/taskstats.h> #include <linux/cgroupstats.h> /* * Generic macros for dealing with netlink sockets. Might be duplicated * elsewhere. It is recommended that commercial grade applications use * libnl or libnetlink and use the interfaces provided by the library */ #define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN)) #define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN) #define NLA_DATA(na) ((void *)((char*)(na) + NLA_HDRLEN)) #define NLA_PAYLOAD(len) (len - NLA_HDRLEN) #define err(code, fmt, arg...) \ do { \ fprintf(stderr, fmt, ##arg); \ exit(code); \ } while (0) int done; int rcvbufsz; char name[100]; int dbg; int print_delays; int print_io_accounting; int print_task_context_switch_counts; __u64 stime, utime; #define PRINTF(fmt, arg...) { \ if (dbg) { \ printf(fmt, ##arg); \ } \ } /* Maximum size of response requested or message sent */ #define MAX_MSG_SIZE 1024 /* Maximum number of cpus expected to be specified in a cpumask */ #define MAX_CPUS 32 struct msgtemplate { struct nlmsghdr n; struct genlmsghdr g; char buf[MAX_MSG_SIZE]; }; char cpumask[100+6*MAX_CPUS]; static void usage(void) { fprintf(stderr, "getdelays [-dilv] [-w logfile] [-r bufsize] " "[-m cpumask] [-t tgid] [-p pid]\n"); fprintf(stderr, " -d: print delayacct stats\n"); fprintf(stderr, " -i: print IO accounting (works only with -p)\n"); fprintf(stderr, " -l: listen forever\n"); fprintf(stderr, " -v: debug on\n"); fprintf(stderr, " -C: container path\n"); } /* * Create a raw netlink socket and bind */ static int create_nl_socket(int protocol) { int fd; struct sockaddr_nl local; fd = socket(AF_NETLINK, SOCK_RAW, protocol); if (fd < 0) return -1; if (rcvbufsz) if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbufsz, sizeof(rcvbufsz)) < 0) { fprintf(stderr, "Unable to set socket rcv buf size " "to %d\n", rcvbufsz); return -1; } memset(&local, 0, sizeof(local)); local.nl_family = AF_NETLINK; if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0) goto error; return fd; error: close(fd); return -1; } static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid, __u8 genl_cmd, __u16 nla_type, void *nla_data, int nla_len) { struct nlattr *na; struct sockaddr_nl nladdr; int r, buflen; char *buf; struct msgtemplate msg; msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); msg.n.nlmsg_type = nlmsg_type; msg.n.nlmsg_flags = NLM_F_REQUEST; msg.n.nlmsg_seq = 0; msg.n.nlmsg_pid = nlmsg_pid; msg.g.cmd = genl_cmd; msg.g.version = 0x1; na = (struct nlattr *) GENLMSG_DATA(&msg); na->nla_type = nla_type; na->nla_len = nla_len + 1 + NLA_HDRLEN; memcpy(NLA_DATA(na), nla_data, nla_len); msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len); buf = (char *) &msg; buflen = msg.n.nlmsg_len ; memset(&nladdr, 0, sizeof(nladdr)); nladdr.nl_family = AF_NETLINK; while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr, sizeof(nladdr))) < buflen) { if (r > 0) { buf += r; buflen -= r; } else if (errno != EAGAIN) return -1; } return 0; } /* * Probe the controller in genetlink to find the family id * for the TASKSTATS family */ static int get_family_id(int sd) { struct { struct nlmsghdr n; struct genlmsghdr g; char buf[256]; } ans; int id = 0, rc; struct nlattr *na; int rep_len; strcpy(name, TASKSTATS_GENL_NAME); rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY, CTRL_ATTR_FAMILY_NAME, (void *)name, strlen(TASKSTATS_GENL_NAME)+1); if (rc < 0) return 0; /* sendto() failure? */ rep_len = recv(sd, &ans, sizeof(ans), 0); if (ans.n.nlmsg_type == NLMSG_ERROR || (rep_len < 0) || !NLMSG_OK((&ans.n), rep_len)) return 0; na = (struct nlattr *) GENLMSG_DATA(&ans); na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len)); if (na->nla_type == CTRL_ATTR_FAMILY_ID) { id = *(__u16 *) NLA_DATA(na); } return id; } #define average_ms(t, c) (t / 1000000ULL / (c ? c : 1)) static void print_delayacct(struct taskstats *t) { printf("\n\nCPU %15s%15s%15s%15s%15s\n" " %15llu%15llu%15llu%15llu%15.3fms\n" "IO %15s%15s%15s\n" " %15llu%15llu%15llums\n" "SWAP %15s%15s%15s\n" " %15llu%15llu%15llums\n" "RECLAIM %12s%15s%15s\n" " %15llu%15llu%15llums\n", "count", "real total", "virtual total", "delay total", "delay average", (unsigned long long)t->cpu_count, (unsigned long long)t->cpu_run_real_total, (unsigned long long)t->cpu_run_virtual_total, (unsigned long long)t->cpu_delay_total, average_ms((double)t->cpu_delay_total, t->cpu_count), "count", "delay total", "delay average", (unsigned long long)t->blkio_count, (unsigned long long)t->blkio_delay_total, average_ms(t->blkio_delay_total, t->blkio_count), "count", "delay total", "delay average", (unsigned long long)t->swapin_count, (unsigned long long)t->swapin_delay_total, average_ms(t->swapin_delay_total, t->swapin_count), "count", "delay total", "delay average", (unsigned long long)t->freepages_count, (unsigned long long)t->freepages_delay_total, average_ms(t->freepages_delay_total, t->freepages_count)); } static void task_context_switch_counts(struct taskstats *t) { printf("\n\nTask %15s%15s\n" " %15llu%15llu\n", "voluntary", "nonvoluntary", (unsigned long long)t->nvcsw, (unsigned long long)t->nivcsw); } static void print_cgroupstats(struct cgroupstats *c) { printf("sleeping %llu, blocked %llu, running %llu, stopped %llu, " "uninterruptible %llu\n", (unsigned long long)c->nr_sleeping, (unsigned long long)c->nr_io_wait, (unsigned long long)c->nr_running, (unsigned long long)c->nr_stopped, (unsigned long long)c->nr_uninterruptible); } static void print_ioacct(struct taskstats *t) { printf("%s: read=%llu, write=%llu, cancelled_write=%llu\n", t->ac_comm, (unsigned long long)t->read_bytes, (unsigned long long)t->write_bytes, (unsigned long long)t->cancelled_write_bytes); } int main(int argc, char *argv[]) { int c, rc, rep_len, aggr_len, len2; int cmd_type = TASKSTATS_CMD_ATTR_UNSPEC; __u16 id; __u32 mypid; struct nlattr *na; int nl_sd = -1; int len = 0; pid_t tid = 0; pid_t rtid = 0; int fd = 0; int count = 0; int write_file = 0; int maskset = 0; char *logfile = NULL; int loop = 0; int containerset = 0; char containerpath[1024]; int cfd = 0; int forking = 0; sigset_t sigset; struct msgtemplate msg; while (!forking) { c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:c:"); if (c < 0) break; switch (c) { case 'd': printf("print delayacct stats ON\n"); print_delays = 1; break; case 'i': printf("printing IO accounting\n"); print_io_accounting = 1; break; case 'q': printf("printing task/process context switch rates\n"); print_task_context_switch_counts = 1; break; case 'C': containerset = 1; strncpy(containerpath, optarg, strlen(optarg) + 1); break; case 'w': logfile = strdup(optarg); printf("write to file %s\n", logfile); write_file = 1; break; case 'r': rcvbufsz = atoi(optarg); printf("receive buf size %d\n", rcvbufsz); if (rcvbufsz < 0) err(1, "Invalid rcv buf size\n"); break; case 'm': strncpy(cpumask, optarg, sizeof(cpumask)); maskset = 1; printf("cpumask %s maskset %d\n", cpumask, maskset); break; case 't': tid = atoi(optarg); if (!tid) err(1, "Invalid tgid\n"); cmd_type = TASKSTATS_CMD_ATTR_TGID; break; case 'p': tid = atoi(optarg); if (!tid) err(1, "Invalid pid\n"); cmd_type = TASKSTATS_CMD_ATTR_PID; break; case 'c': /* Block SIGCHLD for sigwait() later */ if (sigemptyset(&sigset) == -1) err(1, "Failed to empty sigset"); if (sigaddset(&sigset, SIGCHLD)) err(1, "Failed to set sigchld in sigset"); sigprocmask(SIG_BLOCK, &sigset, NULL); /* fork/exec a child */ tid = fork(); if (tid < 0) err(1, "Fork failed\n"); if (tid == 0) if (execvp(argv[optind - 1], &argv[optind - 1]) < 0) exit(-1); /* Set the command type and avoid further processing */ cmd_type = TASKSTATS_CMD_ATTR_PID; forking = 1; break; case 'v': printf("debug on\n"); dbg = 1; break; case 'l': printf("listen forever\n"); loop = 1; break; default: usage(); exit(-1); } } if (write_file) { fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (fd == -1) { perror("Cannot open output file\n"); exit(1); } } if ((nl_sd = create_nl_socket(NETLINK_GENERIC)) < 0) err(1, "error creating Netlink socket\n"); mypid = getpid(); id = get_family_id(nl_sd); if (!id) { fprintf(stderr, "Error getting family id, errno %d\n", errno); goto err; } PRINTF("family id %d\n", id); if (maskset) { rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, TASKSTATS_CMD_ATTR_REGISTER_CPUMASK, &cpumask, strlen(cpumask) + 1); PRINTF("Sent register cpumask, retval %d\n", rc); if (rc < 0) { fprintf(stderr, "error sending register cpumask\n"); goto err; } } if (tid && containerset) { fprintf(stderr, "Select either -t or -C, not both\n"); goto err; } /* * If we forked a child, wait for it to exit. Cannot use waitpid() * as all the delicious data would be reaped as part of the wait */ if (tid && forking) { int sig_received; sigwait(&sigset, &sig_received); } if (tid) { rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, cmd_type, &tid, sizeof(__u32)); PRINTF("Sent pid/tgid, retval %d\n", rc); if (rc < 0) { fprintf(stderr, "error sending tid/tgid cmd\n"); goto done; } } if (containerset) { cfd = open(containerpath, O_RDONLY); if (cfd < 0) { perror("error opening container file"); goto err; } rc = send_cmd(nl_sd, id, mypid, CGROUPSTATS_CMD_GET, CGROUPSTATS_CMD_ATTR_FD, &cfd, sizeof(__u32)); if (rc < 0) { perror("error sending cgroupstats command"); goto err; } } if (!maskset && !tid && !containerset) { usage(); goto err; } do { rep_len = recv(nl_sd, &msg, sizeof(msg), 0); PRINTF("received %d bytes\n", rep_len); if (rep_len < 0) { fprintf(stderr, "nonfatal reply error: errno %d\n", errno); continue; } if (msg.n.nlmsg_type == NLMSG_ERROR || !NLMSG_OK((&msg.n), rep_len)) { struct nlmsgerr *err = NLMSG_DATA(&msg); fprintf(stderr, "fatal reply error, errno %d\n", err->error); goto done; } PRINTF("nlmsghdr size=%zu, nlmsg_len=%d, rep_len=%d\n", sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len); rep_len = GENLMSG_PAYLOAD(&msg.n); na = (struct nlattr *) GENLMSG_DATA(&msg); len = 0; while (len < rep_len) { len += NLA_ALIGN(na->nla_len); switch (na->nla_type) { case TASKSTATS_TYPE_AGGR_TGID: /* Fall through */ case TASKSTATS_TYPE_AGGR_PID: aggr_len = NLA_PAYLOAD(na->nla_len); len2 = 0; /* For nested attributes, na follows */ na = (struct nlattr *) NLA_DATA(na); done = 0; while (len2 < aggr_len) { switch (na->nla_type) { case TASKSTATS_TYPE_PID: rtid = *(int *) NLA_DATA(na); if (print_delays) printf("PID\t%d\n", rtid); break; case TASKSTATS_TYPE_TGID: rtid = *(int *) NLA_DATA(na); if (print_delays) printf("TGID\t%d\n", rtid); break; case TASKSTATS_TYPE_STATS: count++; if (print_delays) print_delayacct((struct taskstats *) NLA_DATA(na)); if (print_io_accounting) print_ioacct((struct taskstats *) NLA_DATA(na)); if (print_task_context_switch_counts) task_context_switch_counts((struct taskstats *) NLA_DATA(na)); if (fd) { if (write(fd, NLA_DATA(na), na->nla_len) < 0) { err(1,"write error\n"); } } if (!loop) goto done; break; default: fprintf(stderr, "Unknown nested" " nla_type %d\n", na->nla_type); break; } len2 += NLA_ALIGN(na->nla_len); na = (struct nlattr *) ((char *) na + len2); } break; case CGROUPSTATS_TYPE_CGROUP_STATS: print_cgroupstats(NLA_DATA(na)); break; default: fprintf(stderr, "Unknown nla_type %d\n", na->nla_type); case TASKSTATS_TYPE_NULL: break; } na = (struct nlattr *) (GENLMSG_DATA(&msg) + len); } } while (loop); done: if (maskset) { rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK, &cpumask, strlen(cpumask) + 1); printf("Sent deregister mask, retval %d\n", rc); if (rc < 0) err(rc, "error sending deregister cpumask\n"); } err: close(nl_sd); if (fd) close(fd); if (cfd) close(cfd); return 0; }
gpl-2.0
davidmueller13/flo_kernel
drivers/scsi/bvme6000_scsi.c
9235
3367
/* * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux. * * Based on work by Alan Hourihane and Kars de Jong * * Rewritten to use 53c700.c by Richard Hirst <richard@sleepie.demon.co.uk> */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <asm/bvme6000hw.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" MODULE_AUTHOR("Richard Hirst <richard@sleepie.demon.co.uk>"); MODULE_DESCRIPTION("BVME6000 NCR53C710 driver"); MODULE_LICENSE("GPL"); static struct scsi_host_template bvme6000_scsi_driver_template = { .name = "BVME6000 NCR53c710 SCSI", .proc_name = "BVME6000", .this_id = 7, .module = THIS_MODULE, }; static struct platform_device *bvme6000_scsi_device; static __devinit int bvme6000_probe(struct platform_device *dev) { struct Scsi_Host *host; struct NCR_700_Host_Parameters *hostdata; if (!MACH_IS_BVME6000) goto out; hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); if (!hostdata) { printk(KERN_ERR "bvme6000-scsi: " "Failed to allocate host data\n"); goto out; } /* Fill in the required pieces of hostdata */ hostdata->base = (void __iomem *)BVME_NCR53C710_BASE; hostdata->clock = 40; /* XXX - depends on the CPU clock! */ hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; hostdata->dcntl_extra = EA_710; hostdata->ctest7_extra = CTEST7_TT1; /* and register the chip */ host = NCR_700_detect(&bvme6000_scsi_driver_template, hostdata, &dev->dev); if (!host) { printk(KERN_ERR "bvme6000-scsi: No host detected; " "board configuration problem?\n"); goto out_free; } host->base = BVME_NCR53C710_BASE; host->this_id = 7; host->irq = BVME_IRQ_SCSI; if (request_irq(BVME_IRQ_SCSI, NCR_700_intr, 0, "bvme6000-scsi", host)) { printk(KERN_ERR "bvme6000-scsi: request_irq failed\n"); goto out_put_host; } platform_set_drvdata(dev, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_free: kfree(hostdata); out: return -ENODEV; } static __devexit int bvme6000_device_remove(struct platform_device *dev) { struct Scsi_Host *host = platform_get_drvdata(dev); struct NCR_700_Host_Parameters *hostdata = shost_priv(host); scsi_remove_host(host); NCR_700_release(host); kfree(hostdata); free_irq(host->irq, host); return 0; } static struct platform_driver bvme6000_scsi_driver = { .driver = { .name = "bvme6000-scsi", .owner = THIS_MODULE, }, .probe = bvme6000_probe, .remove = __devexit_p(bvme6000_device_remove), }; static int __init bvme6000_scsi_init(void) { int err; err = platform_driver_register(&bvme6000_scsi_driver); if (err) return err; bvme6000_scsi_device = platform_device_register_simple("bvme6000-scsi", -1, NULL, 0); if (IS_ERR(bvme6000_scsi_device)) { platform_driver_unregister(&bvme6000_scsi_driver); return PTR_ERR(bvme6000_scsi_device); } return 0; } static void __exit bvme6000_scsi_exit(void) { platform_device_unregister(bvme6000_scsi_device); platform_driver_unregister(&bvme6000_scsi_driver); } module_init(bvme6000_scsi_init); module_exit(bvme6000_scsi_exit);
gpl-2.0
KlinkOnE/caf-port
drivers/media/video/cx25840/cx25840-vbi.c
9235
6978
/* cx25840 VBI functions * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/v4l2-common.h> #include <media/cx25840.h> #include "cx25840-core.h" static int odd_parity(u8 c) { c ^= (c >> 4); c ^= (c >> 2); c ^= (c >> 1); return c & 1; } static int decode_vps(u8 * dst, u8 * p) { static const u8 biphase_tbl[] = { 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96, 0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2, 0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94, 0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5, 0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1, 0xc3, 0x4b, 0x43, 0xc3, 0x87, 0x0f, 0x07, 0x87, 0x83, 0x0b, 0x03, 0x83, 0xc3, 0x4b, 0x43, 0xc3, 0xc1, 0x49, 0x41, 0xc1, 0x85, 0x0d, 0x05, 0x85, 0x81, 0x09, 0x01, 0x81, 0xc1, 0x49, 0x41, 0xc1, 0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5, 0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1, 0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4, 0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0, 0xc2, 0x4a, 0x42, 0xc2, 0x86, 0x0e, 0x06, 0x86, 0x82, 0x0a, 0x02, 0x82, 0xc2, 0x4a, 0x42, 0xc2, 0xc0, 0x48, 0x40, 0xc0, 0x84, 0x0c, 0x04, 0x84, 0x80, 0x08, 0x00, 0x80, 0xc0, 0x48, 0x40, 0xc0, 0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4, 0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96, 0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2, 0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94, 0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, }; u8 c, err = 0; int i; for (i = 0; i < 2 * 13; i += 2) { err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]]; c = (biphase_tbl[p[i + 1]] & 0xf) | ((biphase_tbl[p[i]] & 0xf) << 4); dst[i / 2] = c; } return err & 0xf0; } int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct cx25840_state *state = to_state(sd); static const u16 lcr2vbi[] = { 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */ 0, V4L2_SLICED_WSS_625, 0, /* 4 */ V4L2_SLICED_CAPTION_525, /* 6 */ 0, 0, V4L2_SLICED_VPS, 0, 0, /* 9 */ 0, 0, 0, 0 }; int is_pal = !(state->std & V4L2_STD_525_60); int i; memset(svbi, 0, sizeof(*svbi)); /* we're done if raw VBI is active */ if ((cx25840_read(client, 0x404) & 0x10) == 0) return 0; if (is_pal) { for (i = 7; i <= 23; i++) { u8 v = cx25840_read(client, 0x424 + i - 7); svbi->service_lines[0][i] = lcr2vbi[v >> 4]; svbi->service_lines[1][i] = lcr2vbi[v & 0xf]; svbi->service_set |= svbi->service_lines[0][i] | svbi->service_lines[1][i]; } } else { for (i = 10; i <= 21; i++) { u8 v = cx25840_read(client, 0x424 + i - 10); svbi->service_lines[0][i] = lcr2vbi[v >> 4]; svbi->service_lines[1][i] = lcr2vbi[v & 0xf]; svbi->service_set |= svbi->service_lines[0][i] | svbi->service_lines[1][i]; } } return 0; } int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct cx25840_state *state = to_state(sd); int is_pal = !(state->std & V4L2_STD_525_60); int vbi_offset = is_pal ? 1 : 0; /* Setup standard */ cx25840_std_setup(client); /* VBI Offset */ cx25840_write(client, 0x47f, vbi_offset); cx25840_write(client, 0x404, 0x2e); return 0; } int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct cx25840_state *state = to_state(sd); int is_pal = !(state->std & V4L2_STD_525_60); int vbi_offset = is_pal ? 1 : 0; int i, x; u8 lcr[24]; for (x = 0; x <= 23; x++) lcr[x] = 0x00; /* Setup standard */ cx25840_std_setup(client); /* Sliced VBI */ cx25840_write(client, 0x404, 0x32); /* Ancillary data */ cx25840_write(client, 0x406, 0x13); cx25840_write(client, 0x47f, vbi_offset); if (is_pal) { for (i = 0; i <= 6; i++) svbi->service_lines[0][i] = svbi->service_lines[1][i] = 0; } else { for (i = 0; i <= 9; i++) svbi->service_lines[0][i] = svbi->service_lines[1][i] = 0; for (i = 22; i <= 23; i++) svbi->service_lines[0][i] = svbi->service_lines[1][i] = 0; } for (i = 7; i <= 23; i++) { for (x = 0; x <= 1; x++) { switch (svbi->service_lines[1-x][i]) { case V4L2_SLICED_TELETEXT_B: lcr[i] |= 1 << (4 * x); break; case V4L2_SLICED_WSS_625: lcr[i] |= 4 << (4 * x); break; case V4L2_SLICED_CAPTION_525: lcr[i] |= 6 << (4 * x); break; case V4L2_SLICED_VPS: lcr[i] |= 9 << (4 * x); break; } } } if (is_pal) { for (x = 1, i = 0x424; i <= 0x434; i++, x++) cx25840_write(client, i, lcr[6 + x]); } else { for (x = 1, i = 0x424; i <= 0x430; i++, x++) cx25840_write(client, i, lcr[9 + x]); for (i = 0x431; i <= 0x434; i++) cx25840_write(client, i, 0); } cx25840_write(client, 0x43c, 0x16); cx25840_write(client, 0x474, is_pal ? 0x2a : 0x22); return 0; } int cx25840_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi) { struct cx25840_state *state = to_state(sd); u8 *p = vbi->p; int id1, id2, l, err = 0; if (p[0] || p[1] != 0xff || p[2] != 0xff || (p[3] != 0x55 && p[3] != 0x91)) { vbi->line = vbi->type = 0; return 0; } p += 4; id1 = p[-1]; id2 = p[0] & 0xf; l = p[2] & 0x3f; l += state->vbi_line_offset; p += 4; switch (id2) { case 1: id2 = V4L2_SLICED_TELETEXT_B; break; case 4: id2 = V4L2_SLICED_WSS_625; break; case 6: id2 = V4L2_SLICED_CAPTION_525; err = !odd_parity(p[0]) || !odd_parity(p[1]); break; case 9: id2 = V4L2_SLICED_VPS; if (decode_vps(p, p) != 0) err = 1; break; default: id2 = 0; err = 1; break; } vbi->type = err ? 0 : id2; vbi->line = err ? 0 : l; vbi->is_second_field = err ? 0 : (id1 == 0x55); vbi->p = p; return 0; }
gpl-2.0
ArtisteHsu/jetson-tk1-r21.2-kernel
drivers/pci/hotplug/cpqphp_nvram.c
12563
13820
/* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/init.h> #include <asm/uaccess.h> #include "cpqphp.h" #include "cpqphp_nvram.h" #define ROM_INT15_PHY_ADDR 0x0FF859 #define READ_EV 0xD8A4 #define WRITE_EV 0xD8A5 struct register_foo { union { unsigned long lword; /* eax */ unsigned short word; /* ax */ struct { unsigned char low; /* al */ unsigned char high; /* ah */ } byte; } data; unsigned char opcode; /* see below */ unsigned long length; /* if the reg. is a pointer, how much data */ } __attribute__ ((packed)); struct all_reg { struct register_foo eax_reg; struct register_foo ebx_reg; struct register_foo ecx_reg; struct register_foo edx_reg; struct register_foo edi_reg; struct register_foo esi_reg; struct register_foo eflags_reg; } __attribute__ ((packed)); struct ev_hrt_header { u8 Version; u8 num_of_ctrl; u8 next; }; struct ev_hrt_ctrl { u8 bus; u8 device; u8 function; u8 mem_avail; u8 p_mem_avail; u8 io_avail; u8 bus_avail; u8 next; }; static u8 evbuffer_init; static u8 evbuffer_length; static u8 evbuffer[1024]; static void __iomem *compaq_int15_entry_point; /* lock for ordering int15_bios_call() */ static spinlock_t int15_lock; /* This is a series of function that deals with * setting & getting the hotplug resource table in some environment variable. */ /* * We really shouldn't be doing this unless there is a _very_ good reason to!!! * greg k-h */ static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail) { u8 **tByte; if ((*used + 1) > *avail) return(1); *((u8*)*p_buffer) = value; tByte = (u8**)p_buffer; (*tByte)++; *used+=1; return(0); } static u32 add_dword( u32 **p_buffer, u32 value, u32 *used, u32 *avail) { if ((*used + 4) > *avail) return(1); **p_buffer = value; (*p_buffer)++; *used+=4; return(0); } /* * check_for_compaq_ROM * * this routine verifies that the ROM OEM string is 'COMPAQ' * * returns 0 for non-Compaq ROM, 1 for Compaq ROM */ static int check_for_compaq_ROM (void __iomem *rom_start) { u8 temp1, temp2, temp3, temp4, temp5, temp6; int result = 0; temp1 = readb(rom_start + 0xffea + 0); temp2 = readb(rom_start + 0xffea + 1); temp3 = readb(rom_start + 0xffea + 2); temp4 = readb(rom_start + 0xffea + 3); temp5 = readb(rom_start + 0xffea + 4); temp6 = readb(rom_start + 0xffea + 5); if ((temp1 == 'C') && (temp2 == 'O') && (temp3 == 'M') && (temp4 == 'P') && (temp5 == 'A') && (temp6 == 'Q')) { result = 1; } dbg ("%s - returned %d\n", __func__, result); return result; } static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size) { unsigned long flags; int op = operation; int ret_val; if (!compaq_int15_entry_point) return -ENODEV; spin_lock_irqsave(&int15_lock, flags); __asm__ ( "xorl %%ebx,%%ebx\n" \ "xorl %%edx,%%edx\n" \ "pushf\n" \ "push %%cs\n" \ "cli\n" \ "call *%6\n" : "=c" (*buf_size), "=a" (ret_val) : "a" (op), "c" (*buf_size), "S" (ev_name), "D" (buffer), "m" (compaq_int15_entry_point) : "%ebx", "%edx"); spin_unlock_irqrestore(&int15_lock, flags); return((ret_val & 0xFF00) >> 8); } /* * load_HRT * * Read the hot plug Resource Table from NVRAM */ static int load_HRT (void __iomem *rom_start) { u32 available; u32 temp_dword; u8 temp_byte = 0xFF; u32 rc; if (!check_for_compaq_ROM(rom_start)) { return -ENODEV; } available = 1024; /* Now load the EV */ temp_dword = available; rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword); evbuffer_length = temp_dword; /* We're maintaining the resource lists so write FF to invalidate old * info */ temp_dword = 1; rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword); return rc; } /* * store_HRT * * Save the hot plug Resource Table in NVRAM */ static u32 store_HRT (void __iomem *rom_start) { u32 *buffer; u32 *pFill; u32 usedbytes; u32 available; u32 temp_dword; u32 rc; u8 loop; u8 numCtrl = 0; struct controller *ctrl; struct pci_resource *resNode; struct ev_hrt_header *p_EV_header; struct ev_hrt_ctrl *p_ev_ctrl; available = 1024; if (!check_for_compaq_ROM(rom_start)) { return(1); } buffer = (u32*) evbuffer; if (!buffer) return(1); pFill = buffer; usedbytes = 0; p_EV_header = (struct ev_hrt_header *) pFill; ctrl = cpqhp_ctrl_list; /* The revision of this structure */ rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available); if (rc) return(rc); /* The number of controllers */ rc = add_byte( &pFill, 1, &usedbytes, &available); if (rc) return(rc); while (ctrl) { p_ev_ctrl = (struct ev_hrt_ctrl *) pFill; numCtrl++; /* The bus number */ rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available); if (rc) return(rc); /* The device Number */ rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available); if (rc) return(rc); /* The function Number */ rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available); if (rc) return(rc); /* Skip the number of available entries */ rc = add_dword( &pFill, 0, &usedbytes, &available); if (rc) return(rc); /* Figure out memory Available */ resNode = ctrl->mem_head; loop = 0; while (resNode) { loop ++; /* base */ rc = add_dword( &pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword( &pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->mem_avail = loop; /* Figure out prefetchable memory Available */ resNode = ctrl->p_mem_head; loop = 0; while (resNode) { loop ++; /* base */ rc = add_dword( &pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword( &pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->p_mem_avail = loop; /* Figure out IO Available */ resNode = ctrl->io_head; loop = 0; while (resNode) { loop ++; /* base */ rc = add_dword( &pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword( &pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->io_avail = loop; /* Figure out bus Available */ resNode = ctrl->bus_head; loop = 0; while (resNode) { loop ++; /* base */ rc = add_dword( &pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword( &pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->bus_avail = loop; ctrl = ctrl->next; } p_EV_header->num_of_ctrl = numCtrl; /* Now store the EV */ temp_dword = usedbytes; rc = access_EV(WRITE_EV, "CQTHPS", (u8*) buffer, &temp_dword); dbg("usedbytes = 0x%x, length = 0x%x\n", usedbytes, temp_dword); evbuffer_length = temp_dword; if (rc) { err(msg_unable_to_save); return(1); } return(0); } void compaq_nvram_init (void __iomem *rom_start) { if (rom_start) { compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); } dbg("int15 entry = %p\n", compaq_int15_entry_point); /* initialize our int15 lock */ spin_lock_init(&int15_lock); } int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl) { u8 bus, device, function; u8 nummem, numpmem, numio, numbus; u32 rc; u8 *p_byte; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; struct ev_hrt_ctrl *p_ev_ctrl; struct ev_hrt_header *p_EV_header; if (!evbuffer_init) { /* Read the resource list information in from NVRAM */ if (load_HRT(rom_start)) memset (evbuffer, 0, 1024); evbuffer_init = 1; } /* If we saved information in NVRAM, use it now */ p_EV_header = (struct ev_hrt_header *) evbuffer; /* The following code is for systems where version 1.0 of this * driver has been loaded, but doesn't support the hardware. * In that case, the driver would incorrectly store something * in NVRAM. */ if ((p_EV_header->Version == 2) || ((p_EV_header->Version == 1) && !ctrl->push_flag)) { p_byte = &(p_EV_header->next); p_ev_ctrl = (struct ev_hrt_ctrl *) &(p_EV_header->next); p_byte += 3; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) return 2; bus = p_ev_ctrl->bus; device = p_ev_ctrl->device; function = p_ev_ctrl->function; while ((bus != ctrl->bus) || (device != PCI_SLOT(ctrl->pci_dev->devfn)) || (function != PCI_FUNC(ctrl->pci_dev->devfn))) { nummem = p_ev_ctrl->mem_avail; numpmem = p_ev_ctrl->p_mem_avail; numio = p_ev_ctrl->io_avail; numbus = p_ev_ctrl->bus_avail; p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) return 2; /* Skip forward to the next entry */ p_byte += (nummem + numpmem + numio + numbus) * 8; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) return 2; p_ev_ctrl = (struct ev_hrt_ctrl *) p_byte; p_byte += 3; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) return 2; bus = p_ev_ctrl->bus; device = p_ev_ctrl->device; function = p_ev_ctrl->function; } nummem = p_ev_ctrl->mem_avail; numpmem = p_ev_ctrl->p_mem_avail; numio = p_ev_ctrl->io_avail; numbus = p_ev_ctrl->bus_avail; p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) return 2; while (nummem--) { mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!mem_node) break; mem_node->base = *(u32*)p_byte; dbg("mem base = %8.8x\n",mem_node->base); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(mem_node); return 2; } mem_node->length = *(u32*)p_byte; dbg("mem length = %8.8x\n",mem_node->length); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(mem_node); return 2; } mem_node->next = ctrl->mem_head; ctrl->mem_head = mem_node; } while (numpmem--) { p_mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!p_mem_node) break; p_mem_node->base = *(u32*)p_byte; dbg("pre-mem base = %8.8x\n",p_mem_node->base); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(p_mem_node); return 2; } p_mem_node->length = *(u32*)p_byte; dbg("pre-mem length = %8.8x\n",p_mem_node->length); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(p_mem_node); return 2; } p_mem_node->next = ctrl->p_mem_head; ctrl->p_mem_head = p_mem_node; } while (numio--) { io_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!io_node) break; io_node->base = *(u32*)p_byte; dbg("io base = %8.8x\n",io_node->base); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(io_node); return 2; } io_node->length = *(u32*)p_byte; dbg("io length = %8.8x\n",io_node->length); p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(io_node); return 2; } io_node->next = ctrl->io_head; ctrl->io_head = io_node; } while (numbus--) { bus_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!bus_node) break; bus_node->base = *(u32*)p_byte; p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(bus_node); return 2; } bus_node->length = *(u32*)p_byte; p_byte += 4; if (p_byte > ((u8*)p_EV_header + evbuffer_length)) { kfree(bus_node); return 2; } bus_node->next = ctrl->bus_head; ctrl->bus_head = bus_node; } /* If all of the following fail, we don't have any resources for * hot plug add */ rc = 1; rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->io_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (rc) return(rc); } else { if ((evbuffer[0] != 0) && (!ctrl->push_flag)) return 1; } return 0; } int compaq_nvram_store (void __iomem *rom_start) { int rc = 1; if (rom_start == NULL) return -ENODEV; if (evbuffer_init) { rc = store_HRT(rom_start); if (rc) { err(msg_unable_to_save); } } return rc; }
gpl-2.0
CyanogenMod/android_kernel_yu_msm8916
drivers/parisc/wax.c
14611
3227
/* * WAX Device Driver * * (c) Copyright 2000 The Puffin Group Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * by Helge Deller <deller@gmx.de> */ #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/types.h> #include <asm/io.h> #include <asm/hardware.h> #include "gsc.h" #define WAX_GSC_IRQ 7 /* Hardcoded Interrupt for GSC */ static void wax_choose_irq(struct parisc_device *dev, void *ctrl) { int irq; switch (dev->id.sversion) { case 0x73: irq = 1; break; /* i8042 General */ case 0x8c: irq = 6; break; /* Serial */ case 0x90: irq = 10; break; /* EISA */ default: return; /* Unknown */ } gsc_asic_assign_irq(ctrl, irq, &dev->irq); switch (dev->id.sversion) { case 0x73: irq = 2; break; /* i8042 High-priority */ case 0x90: irq = 0; break; /* EISA NMI */ default: return; /* No secondary IRQ */ } gsc_asic_assign_irq(ctrl, irq, &dev->aux_irq); } static void __init wax_init_irq(struct gsc_asic *wax) { unsigned long base = wax->hpa; /* Wax-off */ gsc_writel(0x00000000, base+OFFSET_IMR); /* clear pending interrupts */ gsc_readl(base+OFFSET_IRR); /* We're not really convinced we want to reset the onboard * devices. Firmware does it for us... */ /* Resets */ // gsc_writel(0xFFFFFFFF, base+0x1000); /* HIL */ // gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */ } static int __init wax_init_chip(struct parisc_device *dev) { struct gsc_asic *wax; struct parisc_device *parent; struct gsc_irq gsc_irq; int ret; wax = kzalloc(sizeof(*wax), GFP_KERNEL); if (!wax) return -ENOMEM; wax->name = "wax"; wax->hpa = dev->hpa.start; wax->version = 0; /* gsc_readb(wax->hpa+WAX_VER); */ printk(KERN_INFO "%s at 0x%lx found.\n", wax->name, wax->hpa); /* Stop wax hissing for a bit */ wax_init_irq(wax); /* the IRQ wax should use */ dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); kfree(wax); return -EBUSY; } wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); if (ret < 0) { kfree(wax); return ret; } /* enable IRQ's for devices below WAX */ gsc_writel(wax->eim, wax->hpa + OFFSET_IAR); /* Done init'ing, register this driver */ ret = gsc_common_setup(dev, wax); if (ret) { kfree(wax); return ret; } gsc_fixup_irqs(dev, wax, wax_choose_irq); /* On 715-class machines, Wax EISA is a sibling of Wax, not a child. */ parent = parisc_parent(dev); if (parent->id.hw_type != HPHW_IOA) { gsc_fixup_irqs(parent, wax, wax_choose_irq); } return ret; } static struct parisc_device_id wax_tbl[] = { { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008e }, { 0, } }; MODULE_DEVICE_TABLE(parisc, wax_tbl); struct parisc_driver wax_driver = { .name = "wax", .id_table = wax_tbl, .probe = wax_init_chip, };
gpl-2.0
hvaibhav/beagle-dev
drivers/gpu/drm/qxl/qxl_object.c
20
8250
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alon Levy */ #include "qxl_drv.h" #include "qxl_object.h" #include <linux/io-mapping.h> static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct qxl_bo *bo; struct qxl_device *qdev; bo = container_of(tbo, struct qxl_bo, tbo); qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; qxl_surface_evict(qdev, bo, false); qxl_fence_fini(&bo->fence); mutex_lock(&qdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&qdev->gem.mutex); drm_gem_object_release(&bo->gem_base); kfree(bo); } bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) { if (bo->destroy == &qxl_ttm_bo_destroy) return true; return false; } void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) { u32 c = 0; u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; qbo->placement.fpfn = 0; qbo->placement.lpfn = 0; qbo->placement.placement = qbo->placements; qbo->placement.busy_placement = qbo->placements; if (domain == QXL_GEM_DOMAIN_VRAM) qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; if (domain == QXL_GEM_DOMAIN_SURFACE) qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; if (domain == QXL_GEM_DOMAIN_CPU) qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; if (!c) qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; qbo->placement.num_placement = c; qbo->placement.num_busy_placement = c; } int qxl_bo_create(struct qxl_device *qdev, unsigned long size, bool kernel, bool pinned, u32 domain, struct qxl_surface *surf, struct qxl_bo **bo_ptr) { struct qxl_bo *bo; enum ttm_bo_type type; int r; qdev->mman.bdev.dev_mapping = qdev->ddev->anon_inode->i_mapping; if (kernel) type = ttm_bo_type_kernel; else type = ttm_bo_type_device; *bo_ptr = NULL; bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; size = roundup(size, PAGE_SIZE); r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size); if (unlikely(r)) { kfree(bo); return r; } bo->type = domain; bo->pin_count = pinned ? 1 : 0; bo->surface_id = 0; qxl_fence_init(qdev, &bo->fence); INIT_LIST_HEAD(&bo->list); if (surf) bo->surf = *surf; qxl_ttm_placement_from_domain(bo, domain, pinned); r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, &bo->placement, 0, !kernel, NULL, size, NULL, &qxl_ttm_bo_destroy); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) dev_err(qdev->dev, "object_init failed for (%lu, 0x%08X)\n", size, domain); return r; } *bo_ptr = bo; return 0; } int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) { bool is_iomem; int r; if (bo->kptr) { if (ptr) *ptr = bo->kptr; return 0; } r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); if (r) return r; bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); if (ptr) *ptr = bo->kptr; return 0; } void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset) { struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; void *rptr; int ret; struct io_mapping *map; if (bo->tbo.mem.mem_type == TTM_PL_VRAM) map = qdev->vram_mapping; else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) map = qdev->surface_mapping; else goto fallback; (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); ttm_mem_io_unlock(man); return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); fallback: if (bo->kptr) { rptr = bo->kptr + (page_offset * PAGE_SIZE); return rptr; } ret = qxl_bo_kmap(bo, &rptr); if (ret) return NULL; rptr += page_offset * PAGE_SIZE; return rptr; } void qxl_bo_kunmap(struct qxl_bo *bo) { if (bo->kptr == NULL) return; bo->kptr = NULL; ttm_bo_kunmap(&bo->kmap); } void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *pmap) { struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; struct io_mapping *map; if (bo->tbo.mem.mem_type == TTM_PL_VRAM) map = qdev->vram_mapping; else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) map = qdev->surface_mapping; else goto fallback; io_mapping_unmap_atomic(pmap); (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); ttm_mem_io_unlock(man); return ; fallback: qxl_bo_kunmap(bo); } void qxl_bo_unref(struct qxl_bo **bo) { struct ttm_buffer_object *tbo; if ((*bo) == NULL) return; tbo = &((*bo)->tbo); ttm_bo_unref(&tbo); if (tbo == NULL) *bo = NULL; } struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) { ttm_bo_reference(&bo->tbo); return bo; } int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) { struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; int r; if (bo->pin_count) { bo->pin_count++; if (gpu_addr) *gpu_addr = qxl_bo_gpu_offset(bo); return 0; } qxl_ttm_placement_from_domain(bo, domain, true); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { bo->pin_count = 1; if (gpu_addr != NULL) *gpu_addr = qxl_bo_gpu_offset(bo); } if (unlikely(r != 0)) dev_err(qdev->dev, "%p pin failed\n", bo); return r; } int qxl_bo_unpin(struct qxl_bo *bo) { struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; int r, i; if (!bo->pin_count) { dev_warn(qdev->dev, "%p unpin not necessary\n", bo); return 0; } bo->pin_count--; if (bo->pin_count) return 0; for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (unlikely(r != 0)) dev_err(qdev->dev, "%p validate failed for unpin\n", bo); return r; } void qxl_bo_force_delete(struct qxl_device *qdev) { struct qxl_bo *bo, *n; if (list_empty(&qdev->gem.objects)) return; dev_err(qdev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { mutex_lock(&qdev->ddev->struct_mutex); dev_err(qdev->dev, "%p %p %lu %lu force free\n", &bo->gem_base, bo, (unsigned long)bo->gem_base.size, *((unsigned long *)&bo->gem_base.refcount)); mutex_lock(&qdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&qdev->gem.mutex); /* this should unref the ttm bo */ drm_gem_object_unreference(&bo->gem_base); mutex_unlock(&qdev->ddev->struct_mutex); } } int qxl_bo_init(struct qxl_device *qdev) { return qxl_ttm_init(qdev); } void qxl_bo_fini(struct qxl_device *qdev) { qxl_ttm_fini(qdev); } int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) { int ret; if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) { /* allocate a surface id for this surface now */ ret = qxl_surface_id_alloc(qdev, bo); if (ret) return ret; ret = qxl_hw_surface_alloc(qdev, bo, NULL); if (ret) return ret; } return 0; } int qxl_surf_evict(struct qxl_device *qdev) { return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); } int qxl_vram_evict(struct qxl_device *qdev) { return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM); }
gpl-2.0
lisong521/wireshark
epan/dissectors/packet-bvlc.c
20
14054
/* packet-bvlc.c * Routines for BACnet/IP (BVLL, BVLC) dissection * Copyright 2001, Hartmut Mueller <hartmut@abmlinux.org>, FH Dortmund * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * Copied from README.developer,v 1.23 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <epan/packet.h> #include <epan/prefs.h> void proto_register_bvlc(void); void proto_reg_handoff_bvlc(void); /* Taken from add-135a (BACnet-IP-standard paper): * * The default UDP port for both directed messages and broadcasts shall * be X'BAC0' and all B/IP devices shall support it. In some cases, * e.g., a situation where it is desirable for two groups of BACnet devices * to coexist independently on the same IP subnet, the UDP port may be * configured locally to a different value without it being considered * a violation of this protocol. */ static guint global_additional_bvlc_udp_port = 0; static int proto_bvlc = -1; static int hf_bvlc_type = -1; static int hf_bvlc_function = -1; static int hf_bvlc_length = -1; static int hf_bvlc_result = -1; static int hf_bvlc_bdt_ip = -1; static int hf_bvlc_bdt_mask = -1; static int hf_bvlc_bdt_port = -1; static int hf_bvlc_reg_ttl = -1; static int hf_bvlc_fdt_ip = -1; static int hf_bvlc_fdt_port = -1; static int hf_bvlc_fdt_ttl = -1; static int hf_bvlc_fdt_timeout = -1; static int hf_bvlc_fwd_ip = -1; static int hf_bvlc_fwd_port = -1; static dissector_handle_t data_handle; static dissector_table_t bvlc_dissector_table; static const value_string bvlc_function_names[] = { { 0x00, "BVLC-Result", }, { 0x01, "Write-Broadcast-Distribution-Table", }, { 0x02, "Read-Broadcast-Distribution-Table", }, { 0x03, "Read-Broadcast-Distribution-Table-Ack", }, { 0x04, "Forwarded-NPDU", }, { 0x05, "Register-Foreign-Device", }, { 0x06, "Read-Foreign-Device-Table", }, { 0x07, "Read-Foreign-Device-Table-Ack", }, { 0x08, "Delete-Foreign-Device-Table-Entry", }, { 0x09, "Distribute-Broadcast-To-Network", }, { 0x0a, "Original-Unicast-NPDU", }, { 0x0b, "Original-Broadcast-NPDU" }, { 0, NULL } }; static const value_string bvlc_result_names[] = { { 0x00, "Successful completion" }, { 0x10, "Write-Broadcast-Distribution-Table NAK" }, { 0x20, "Read-Broadcast-Distribution-Table NAK" }, { 0x30, "Register-Foreign-Device NAK" }, { 0x40, "Read-Foreign-Device-Table NAK" }, { 0x50, "Delete-Foreign-Device-Table-Entry NAK" }, { 0x60, "Distribute-Broadcast-To-Network NAK" }, { 0, NULL } }; static gint ett_bvlc = -1; static gint ett_bdt = -1; static gint ett_fdt = -1; #define BACNET_IP_ANNEX_J 0x81 static const value_string bvlc_types[] = { { BACNET_IP_ANNEX_J, "BACnet/IP (Annex J)" }, { 0, NULL } }; static int dissect_bvlc(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_) { proto_item *ti; proto_item *ti_bdt; proto_item *ti_fdt; proto_tree *bvlc_tree; proto_tree *bdt_tree; /* Broadcast Distribution Table */ proto_tree *fdt_tree; /* Foreign Device Table */ gint offset; guint8 bvlc_type; guint8 bvlc_function; guint16 bvlc_length; guint16 packet_length; guint npdu_length; guint length_remaining; guint16 bvlc_result; tvbuff_t *next_tvb; offset = 0; bvlc_type = tvb_get_guint8(tvb, offset); /* * Simple sanity check - make sure the type is one we know about. */ if (try_val_to_str(bvlc_type, bvlc_types) == NULL) return 0; col_set_str(pinfo->cinfo, COL_PROTOCOL, "BVLC"); col_set_str(pinfo->cinfo, COL_INFO, "BACnet Virtual Link Control"); bvlc_function = tvb_get_guint8(tvb, offset+1); packet_length = tvb_get_ntohs(tvb, offset+2); length_remaining = tvb_reported_length_remaining(tvb, offset); if (bvlc_function > 0x08) { /* We have a constant header length of BVLC of 4 in every * BVLC-packet forewarding an NPDU. Beware: Changes in the * BACnet-IP-standard may break this. * At the moment, no functions above 0x0b * exist (Addendum 135a to ANSI/ASHRAE 135-1995 - BACnet) */ bvlc_length = 4; } else if(bvlc_function == 0x04) { /* 4 Bytes + 6 Bytes for B/IP Address of Originating Device */ bvlc_length = 10; } else { /* BVLC-packets with function below 0x09 contain * routing-level data (e.g. Broadcast Distribution) * but no NPDU for BACnet, so bvlc_length goes up to the end * of the captured frame. */ bvlc_length = packet_length; } if (bvlc_length < 4) { return 0; /* reject */ } if (tree) { ti = proto_tree_add_item(tree, proto_bvlc, tvb, 0, bvlc_length, ENC_NA); bvlc_tree = proto_item_add_subtree(ti, ett_bvlc); proto_tree_add_uint(bvlc_tree, hf_bvlc_type, tvb, offset, 1, bvlc_type); offset ++; proto_tree_add_uint(bvlc_tree, hf_bvlc_function, tvb, offset, 1, bvlc_function); offset ++; if (length_remaining != packet_length) proto_tree_add_uint_format_value(bvlc_tree, hf_bvlc_length, tvb, offset, 2, bvlc_length, "%d of %d bytes (invalid length - expected %d bytes)", bvlc_length, packet_length, length_remaining); else proto_tree_add_uint_format_value(bvlc_tree, hf_bvlc_length, tvb, offset, 2, bvlc_length, "%d of %d bytes BACnet packet length", bvlc_length, packet_length); offset += 2; switch (bvlc_function) { case 0x00: /* BVLC-Result */ bvlc_result = tvb_get_ntohs(tvb, offset); /* I don't know why the result code is encoded in 4 nibbles, * but only using one: 0x00r0. Shifting left 4 bits. */ /* We should bitmask the result correctly when we have a * packet to dissect, see README.developer, 1.6.2, FID */ proto_tree_add_uint_format_value(bvlc_tree, hf_bvlc_result, tvb, offset, 2, bvlc_result,"0x%04x (%s)", bvlc_result, val_to_str_const(bvlc_result, bvlc_result_names, "Unknown")); /*offset += 2;*/ break; case 0x01: /* Write-Broadcast-Distribution-Table */ case 0x03: /* Read-Broadcast-Distribution-Table-Ack */ /* List of BDT Entries: N*10-octet */ ti_bdt = proto_tree_add_item(bvlc_tree, proto_bvlc, tvb, offset, bvlc_length-4, ENC_NA); bdt_tree = proto_item_add_subtree(ti_bdt, ett_bdt); /* List of BDT Entries: N*10-octet */ while ((bvlc_length - offset) > 9) { proto_tree_add_item(bdt_tree, hf_bvlc_bdt_ip, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item(bdt_tree, hf_bvlc_bdt_port, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; proto_tree_add_item(bdt_tree, hf_bvlc_bdt_mask, tvb, offset, 4, ENC_NA); offset += 4; } /* We check this if we get a BDT-packet somewhere */ break; case 0x02: /* Read-Broadcast-Distribution-Table */ /* nothing to do here */ break; case 0x05: /* Register-Foreign-Device */ /* Time-to-Live 2-octets T, Time-to-Live T, in seconds */ proto_tree_add_item(bvlc_tree, hf_bvlc_reg_ttl, tvb, offset, 2, ENC_BIG_ENDIAN); /*offset += 2;*/ break; case 0x06: /* Read-Foreign-Device-Table */ /* nothing to do here */ break; case 0x07: /* Read-Foreign-Device-Table-Ack */ /* List of FDT Entries: N*10-octet */ /* N indicates the number of entries in the FDT whose * contents are being returned. Each returned entry * consists of the 6-octet B/IP address of the registrant; * the 2-octet Time-to-Live value supplied at the time of * registration; and a 2-octet value representing the * number of seconds remaining before the BBMD will purge * the registrant's FDT entry if no re-registration occurs. */ ti_fdt = proto_tree_add_item(bvlc_tree, proto_bvlc, tvb, offset, bvlc_length -4, ENC_NA); fdt_tree = proto_item_add_subtree(ti_fdt, ett_fdt); /* List of FDT Entries: N*10-octet */ while ((bvlc_length - offset) > 9) { proto_tree_add_item(fdt_tree, hf_bvlc_fdt_ip, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item(fdt_tree, hf_bvlc_fdt_port, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; proto_tree_add_item(fdt_tree, hf_bvlc_fdt_ttl, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; proto_tree_add_item(fdt_tree, hf_bvlc_fdt_timeout, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; } /* We check this if we get a FDT-packet somewhere */ break; case 0x08: /* Delete-Foreign-Device-Table-Entry */ /* FDT Entry: 6-octets */ proto_tree_add_item(bvlc_tree, hf_bvlc_fdt_ip, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item(bvlc_tree, hf_bvlc_fdt_port, tvb, offset, 2, ENC_BIG_ENDIAN); /*offset += 2;*/ break; /* We check this if we get a FDT-packet somewhere */ case 0x04: /* Forwarded-NPDU * Why is this 0x04? It would have been a better * idea to append all forewarded NPDUs at the * end of the function table in the B/IP-standard! */ /* proto_tree_add_bytes_format(); */ proto_tree_add_item(bvlc_tree, hf_bvlc_fwd_ip, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item(bvlc_tree, hf_bvlc_fwd_port, tvb, offset, 2, ENC_BIG_ENDIAN); /*offset += 2;*/ default:/* Distribute-Broadcast-To-Network * Original-Unicast-NPDU * Original-Broadcast-NPDU * Going to the next dissector... */ break; } } /* Ok, no routing information BVLC packet. Dissect as * BACnet NPDU */ npdu_length = packet_length - bvlc_length; next_tvb = tvb_new_subset(tvb,bvlc_length,-1,npdu_length); /* Code from Guy Harris */ if (!dissector_try_uint(bvlc_dissector_table, bvlc_function, next_tvb, pinfo, tree)) { /* Unknown function - dissect the paylod as data */ call_dissector(data_handle,next_tvb, pinfo, tree); } return tvb_reported_length(tvb); } void proto_register_bvlc(void) { static hf_register_info hf[] = { { &hf_bvlc_type, { "Type", "bvlc.type", FT_UINT8, BASE_HEX, VALS(bvlc_types), 0, NULL, HFILL } }, { &hf_bvlc_function, { "Function", "bvlc.function", FT_UINT8, BASE_HEX, VALS(bvlc_function_names), 0, "BVLC Function", HFILL } }, { &hf_bvlc_length, { "BVLC-Length", "bvlc.length", FT_UINT16, BASE_DEC, NULL, 0, "Length of BVLC", HFILL } }, /* We should bitmask the result correctly when we have a * packet to dissect */ { &hf_bvlc_result, { "Result", "bvlc.result", FT_UINT16, BASE_HEX, NULL, 0, "Result Code", HFILL } }, { &hf_bvlc_bdt_ip, { "IP", "bvlc.bdt_ip", FT_IPv4, BASE_NONE, NULL, 0, "BDT IP", HFILL } }, { &hf_bvlc_bdt_port, { "Port", "bvlc.bdt_port", FT_UINT16, BASE_DEC, NULL, 0, "BDT Port", HFILL } }, { &hf_bvlc_bdt_mask, { "Mask", "bvlc.bdt_mask", FT_BYTES, BASE_NONE, NULL, 0, "BDT Broadcast Distribution Mask", HFILL } }, { &hf_bvlc_reg_ttl, { "TTL", "bvlc.reg_ttl", FT_UINT16, BASE_DEC, NULL, 0, "Foreign Device Time To Live", HFILL } }, { &hf_bvlc_fdt_ip, { "IP", "bvlc.fdt_ip", FT_IPv4, BASE_NONE, NULL, 0, "FDT IP", HFILL } }, { &hf_bvlc_fdt_port, { "Port", "bvlc.fdt_port", FT_UINT16, BASE_DEC, NULL, 0, "FDT Port", HFILL } }, { &hf_bvlc_fdt_ttl, { "TTL", "bvlc.fdt_ttl", FT_UINT16, BASE_DEC, NULL, 0, "Foreign Device Time To Live", HFILL } }, { &hf_bvlc_fdt_timeout, { "Timeout", "bvlc.fdt_timeout", FT_UINT16, BASE_DEC, NULL, 0, "Foreign Device Timeout (seconds)", HFILL } }, { &hf_bvlc_fwd_ip, { "IP", "bvlc.fwd_ip", FT_IPv4, BASE_NONE, NULL, 0, "FWD IP", HFILL } }, { &hf_bvlc_fwd_port, { "Port", "bvlc.fwd_port", FT_UINT16, BASE_DEC, NULL, 0, "FWD Port", HFILL } }, }; static gint *ett[] = { &ett_bvlc, &ett_bdt, &ett_fdt, }; module_t *bvlc_module; proto_bvlc = proto_register_protocol("BACnet Virtual Link Control", "BVLC", "bvlc"); proto_register_field_array(proto_bvlc, hf, array_length(hf)); proto_register_subtree_array(ett, array_length(ett)); bvlc_module = prefs_register_protocol(proto_bvlc, proto_reg_handoff_bvlc); prefs_register_uint_preference(bvlc_module, "additional_udp_port", "Additional UDP port", "Set an additional UDP port, " "besides the standard X'BAC0' (47808) port.", 10, &global_additional_bvlc_udp_port); new_register_dissector("bvlc", dissect_bvlc, proto_bvlc); bvlc_dissector_table = register_dissector_table("bvlc.function", "BVLC Function", FT_UINT8, BASE_HEX); } void proto_reg_handoff_bvlc(void) { static gboolean bvlc_initialized = FALSE; static dissector_handle_t bvlc_handle; static guint additional_bvlc_udp_port; if (!bvlc_initialized) { bvlc_handle = find_dissector("bvlc"); dissector_add_uint("udp.port", 0xBAC0, bvlc_handle); data_handle = find_dissector("data"); bvlc_initialized = TRUE; } else { if (additional_bvlc_udp_port != 0) { dissector_delete_uint("udp.port", additional_bvlc_udp_port, bvlc_handle); } } if (global_additional_bvlc_udp_port != 0) { dissector_add_uint("udp.port", global_additional_bvlc_udp_port, bvlc_handle); } additional_bvlc_udp_port = global_additional_bvlc_udp_port; } /* * Editor modelines - http://www.wireshark.org/tools/modelines.html * * Local variables: * c-basic-offset: 8 * tab-width: 8 * indent-tabs-mode: t * End: * * vi: set shiftwidth=8 tabstop=8 noexpandtab: * :indentSize=8:tabSize=8:noTabs=false: */
gpl-2.0
SystemTera/SystemTera.Server-S-2.6-Kernel
drivers/staging/ath6kl/os/linux/netbuf.c
20
5510
//------------------------------------------------------------------------------ // Copyright (c) 2004-2010 Atheros Communications Inc. // All rights reserved. // // // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // // // // Author(s): ="Atheros" //------------------------------------------------------------------------------ #include <linux/kernel.h> #include <linux/skbuff.h> #include <a_config.h> #include "athdefs.h" #include "a_types.h" #include "a_osapi.h" #include "htc_packet.h" #define AR6000_DATA_OFFSET 64 void a_netbuf_enqueue(A_NETBUF_QUEUE_T *q, void *pkt) { skb_queue_tail((struct sk_buff_head *) q, (struct sk_buff *) pkt); } void a_netbuf_prequeue(A_NETBUF_QUEUE_T *q, void *pkt) { skb_queue_head((struct sk_buff_head *) q, (struct sk_buff *) pkt); } void *a_netbuf_dequeue(A_NETBUF_QUEUE_T *q) { return((void *) skb_dequeue((struct sk_buff_head *) q)); } int a_netbuf_queue_size(A_NETBUF_QUEUE_T *q) { return(skb_queue_len((struct sk_buff_head *) q)); } int a_netbuf_queue_empty(A_NETBUF_QUEUE_T *q) { return(skb_queue_empty((struct sk_buff_head *) q)); } void a_netbuf_queue_init(A_NETBUF_QUEUE_T *q) { skb_queue_head_init((struct sk_buff_head *) q); } void * a_netbuf_alloc(int size) { struct sk_buff *skb; size += 2 * (A_GET_CACHE_LINE_BYTES()); /* add some cacheline space at front and back of buffer */ skb = dev_alloc_skb(AR6000_DATA_OFFSET + sizeof(HTC_PACKET) + size); skb_reserve(skb, AR6000_DATA_OFFSET + sizeof(HTC_PACKET) + A_GET_CACHE_LINE_BYTES()); return ((void *)skb); } /* * Allocate an SKB w.o. any encapsulation requirement. */ void * a_netbuf_alloc_raw(int size) { struct sk_buff *skb; skb = dev_alloc_skb(size); return ((void *)skb); } void a_netbuf_free(void *bufPtr) { struct sk_buff *skb = (struct sk_buff *)bufPtr; dev_kfree_skb(skb); } A_UINT32 a_netbuf_to_len(void *bufPtr) { return (((struct sk_buff *)bufPtr)->len); } void * a_netbuf_to_data(void *bufPtr) { return (((struct sk_buff *)bufPtr)->data); } /* * Add len # of bytes to the beginning of the network buffer * pointed to by bufPtr */ A_STATUS a_netbuf_push(void *bufPtr, A_INT32 len) { skb_push((struct sk_buff *)bufPtr, len); return A_OK; } /* * Add len # of bytes to the beginning of the network buffer * pointed to by bufPtr and also fill with data */ A_STATUS a_netbuf_push_data(void *bufPtr, char *srcPtr, A_INT32 len) { skb_push((struct sk_buff *) bufPtr, len); A_MEMCPY(((struct sk_buff *)bufPtr)->data, srcPtr, len); return A_OK; } /* * Add len # of bytes to the end of the network buffer * pointed to by bufPtr */ A_STATUS a_netbuf_put(void *bufPtr, A_INT32 len) { skb_put((struct sk_buff *)bufPtr, len); return A_OK; } /* * Add len # of bytes to the end of the network buffer * pointed to by bufPtr and also fill with data */ A_STATUS a_netbuf_put_data(void *bufPtr, char *srcPtr, A_INT32 len) { char *start = (char*)(((struct sk_buff *)bufPtr)->data + ((struct sk_buff *)bufPtr)->len); skb_put((struct sk_buff *)bufPtr, len); A_MEMCPY(start, srcPtr, len); return A_OK; } /* * Trim the network buffer pointed to by bufPtr to len # of bytes */ A_STATUS a_netbuf_setlen(void *bufPtr, A_INT32 len) { skb_trim((struct sk_buff *)bufPtr, len); return A_OK; } /* * Chop of len # of bytes from the end of the buffer. */ A_STATUS a_netbuf_trim(void *bufPtr, A_INT32 len) { skb_trim((struct sk_buff *)bufPtr, ((struct sk_buff *)bufPtr)->len - len); return A_OK; } /* * Chop of len # of bytes from the end of the buffer and return the data. */ A_STATUS a_netbuf_trim_data(void *bufPtr, char *dstPtr, A_INT32 len) { char *start = (char*)(((struct sk_buff *)bufPtr)->data + (((struct sk_buff *)bufPtr)->len - len)); A_MEMCPY(dstPtr, start, len); skb_trim((struct sk_buff *)bufPtr, ((struct sk_buff *)bufPtr)->len - len); return A_OK; } /* * Returns the number of bytes available to a a_netbuf_push() */ A_INT32 a_netbuf_headroom(void *bufPtr) { return (skb_headroom((struct sk_buff *)bufPtr)); } /* * Removes specified number of bytes from the beginning of the buffer */ A_STATUS a_netbuf_pull(void *bufPtr, A_INT32 len) { skb_pull((struct sk_buff *)bufPtr, len); return A_OK; } /* * Removes specified number of bytes from the beginning of the buffer * and return the data */ A_STATUS a_netbuf_pull_data(void *bufPtr, char *dstPtr, A_INT32 len) { A_MEMCPY(dstPtr, ((struct sk_buff *)bufPtr)->data, len); skb_pull((struct sk_buff *)bufPtr, len); return A_OK; } #ifdef EXPORT_HCI_BRIDGE_INTERFACE EXPORT_SYMBOL(a_netbuf_to_data); EXPORT_SYMBOL(a_netbuf_put); EXPORT_SYMBOL(a_netbuf_pull); EXPORT_SYMBOL(a_netbuf_alloc); EXPORT_SYMBOL(a_netbuf_free); #endif
gpl-2.0
dirkmueller/qemu
hw/arm/versatilepb.c
20
12522
/* * ARM Versatile Platform/Application Baseboard System emulation. * * Copyright (c) 2005-2007 CodeSourcery. * Written by Paul Brook * * This code is licensed under the GPL. */ #include "hw/sysbus.h" #include "hw/arm/arm.h" #include "hw/devices.h" #include "net/net.h" #include "sysemu/sysemu.h" #include "hw/pci/pci.h" #include "hw/i2c/i2c.h" #include "hw/boards.h" #include "sysemu/blockdev.h" #include "exec/address-spaces.h" #include "hw/block/flash.h" #define VERSATILE_FLASH_ADDR 0x34000000 #define VERSATILE_FLASH_SIZE (64 * 1024 * 1024) #define VERSATILE_FLASH_SECT_SIZE (256 * 1024) /* Primary interrupt controller. */ #define TYPE_VERSATILE_PB_SIC "versatilepb_sic" #define VERSATILE_PB_SIC(obj) \ OBJECT_CHECK(vpb_sic_state, (obj), TYPE_VERSATILE_PB_SIC) typedef struct vpb_sic_state { SysBusDevice parent_obj; MemoryRegion iomem; uint32_t level; uint32_t mask; uint32_t pic_enable; qemu_irq parent[32]; int irq; } vpb_sic_state; static const VMStateDescription vmstate_vpb_sic = { .name = "versatilepb_sic", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(level, vpb_sic_state), VMSTATE_UINT32(mask, vpb_sic_state), VMSTATE_UINT32(pic_enable, vpb_sic_state), VMSTATE_END_OF_LIST() } }; static void vpb_sic_update(vpb_sic_state *s) { uint32_t flags; flags = s->level & s->mask; qemu_set_irq(s->parent[s->irq], flags != 0); } static void vpb_sic_update_pic(vpb_sic_state *s) { int i; uint32_t mask; for (i = 21; i <= 30; i++) { mask = 1u << i; if (!(s->pic_enable & mask)) continue; qemu_set_irq(s->parent[i], (s->level & mask) != 0); } } static void vpb_sic_set_irq(void *opaque, int irq, int level) { vpb_sic_state *s = (vpb_sic_state *)opaque; if (level) s->level |= 1u << irq; else s->level &= ~(1u << irq); if (s->pic_enable & (1u << irq)) qemu_set_irq(s->parent[irq], level); vpb_sic_update(s); } static uint64_t vpb_sic_read(void *opaque, hwaddr offset, unsigned size) { vpb_sic_state *s = (vpb_sic_state *)opaque; switch (offset >> 2) { case 0: /* STATUS */ return s->level & s->mask; case 1: /* RAWSTAT */ return s->level; case 2: /* ENABLE */ return s->mask; case 4: /* SOFTINT */ return s->level & 1; case 8: /* PICENABLE */ return s->pic_enable; default: printf ("vpb_sic_read: Bad register offset 0x%x\n", (int)offset); return 0; } } static void vpb_sic_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { vpb_sic_state *s = (vpb_sic_state *)opaque; switch (offset >> 2) { case 2: /* ENSET */ s->mask |= value; break; case 3: /* ENCLR */ s->mask &= ~value; break; case 4: /* SOFTINTSET */ if (value) s->mask |= 1; break; case 5: /* SOFTINTCLR */ if (value) s->mask &= ~1u; break; case 8: /* PICENSET */ s->pic_enable |= (value & 0x7fe00000); vpb_sic_update_pic(s); break; case 9: /* PICENCLR */ s->pic_enable &= ~value; vpb_sic_update_pic(s); break; default: printf ("vpb_sic_write: Bad register offset 0x%x\n", (int)offset); return; } vpb_sic_update(s); } static const MemoryRegionOps vpb_sic_ops = { .read = vpb_sic_read, .write = vpb_sic_write, .endianness = DEVICE_NATIVE_ENDIAN, }; static int vpb_sic_init(SysBusDevice *sbd) { DeviceState *dev = DEVICE(sbd); vpb_sic_state *s = VERSATILE_PB_SIC(dev); int i; qdev_init_gpio_in(dev, vpb_sic_set_irq, 32); for (i = 0; i < 32; i++) { sysbus_init_irq(sbd, &s->parent[i]); } s->irq = 31; memory_region_init_io(&s->iomem, OBJECT(s), &vpb_sic_ops, s, "vpb-sic", 0x1000); sysbus_init_mmio(sbd, &s->iomem); return 0; } /* Board init. */ /* The AB and PB boards both use the same core, just with different peripherals and expansion busses. For now we emulate a subset of the PB peripherals and just change the board ID. */ static struct arm_boot_info versatile_binfo; static void versatile_init(QEMUMachineInitArgs *args, int board_id) { ARMCPU *cpu; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); qemu_irq pic[32]; qemu_irq sic[32]; DeviceState *dev, *sysctl; SysBusDevice *busdev; DeviceState *pl041; PCIBus *pci_bus; NICInfo *nd; I2CBus *i2c; int n; int done_smc = 0; DriveInfo *dinfo; if (!args->cpu_model) { args->cpu_model = "arm926"; } cpu = cpu_arm_init(args->cpu_model); if (!cpu) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } memory_region_init_ram(ram, NULL, "versatile.ram", args->ram_size); vmstate_register_ram_global(ram); /* ??? RAM should repeat to fill physical memory space. */ /* SDRAM at address zero. */ memory_region_add_subregion(sysmem, 0, ram); sysctl = qdev_create(NULL, "realview_sysctl"); qdev_prop_set_uint32(sysctl, "sys_id", 0x41007004); qdev_prop_set_uint32(sysctl, "proc_id", 0x02000000); qdev_init_nofail(sysctl); sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000); dev = sysbus_create_varargs("pl190", 0x10140000, qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ), qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_FIQ), NULL); for (n = 0; n < 32; n++) { pic[n] = qdev_get_gpio_in(dev, n); } dev = sysbus_create_simple(TYPE_VERSATILE_PB_SIC, 0x10003000, NULL); for (n = 0; n < 32; n++) { sysbus_connect_irq(SYS_BUS_DEVICE(dev), n, pic[n]); sic[n] = qdev_get_gpio_in(dev, n); } sysbus_create_simple("pl050_keyboard", 0x10006000, sic[3]); sysbus_create_simple("pl050_mouse", 0x10007000, sic[4]); dev = qdev_create(NULL, "versatile_pci"); busdev = SYS_BUS_DEVICE(dev); qdev_init_nofail(dev); sysbus_mmio_map(busdev, 0, 0x10001000); /* PCI controller regs */ sysbus_mmio_map(busdev, 1, 0x41000000); /* PCI self-config */ sysbus_mmio_map(busdev, 2, 0x42000000); /* PCI config */ sysbus_mmio_map(busdev, 3, 0x43000000); /* PCI I/O */ sysbus_mmio_map(busdev, 4, 0x44000000); /* PCI memory window 1 */ sysbus_mmio_map(busdev, 5, 0x50000000); /* PCI memory window 2 */ sysbus_mmio_map(busdev, 6, 0x60000000); /* PCI memory window 3 */ sysbus_connect_irq(busdev, 0, sic[27]); sysbus_connect_irq(busdev, 1, sic[28]); sysbus_connect_irq(busdev, 2, sic[29]); sysbus_connect_irq(busdev, 3, sic[30]); pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci"); for(n = 0; n < nb_nics; n++) { nd = &nd_table[n]; if (!done_smc && (!nd->model || strcmp(nd->model, "smc91c111") == 0)) { smc91c111_init(nd, 0x10010000, sic[25]); done_smc = 1; } else { pci_nic_init_nofail(nd, pci_bus, "rtl8139", NULL); } } if (usb_enabled(false)) { pci_create_simple(pci_bus, -1, "pci-ohci"); } n = drive_get_max_bus(IF_SCSI); while (n >= 0) { pci_create_simple(pci_bus, -1, "lsi53c895a"); n--; } sysbus_create_simple("pl011", 0x101f1000, pic[12]); sysbus_create_simple("pl011", 0x101f2000, pic[13]); sysbus_create_simple("pl011", 0x101f3000, pic[14]); sysbus_create_simple("pl011", 0x10009000, sic[6]); sysbus_create_simple("pl080", 0x10130000, pic[17]); sysbus_create_simple("sp804", 0x101e2000, pic[4]); sysbus_create_simple("sp804", 0x101e3000, pic[5]); sysbus_create_simple("pl061", 0x101e4000, pic[6]); sysbus_create_simple("pl061", 0x101e5000, pic[7]); sysbus_create_simple("pl061", 0x101e6000, pic[8]); sysbus_create_simple("pl061", 0x101e7000, pic[9]); /* The versatile/PB actually has a modified Color LCD controller that includes hardware cursor support from the PL111. */ dev = sysbus_create_simple("pl110_versatile", 0x10120000, pic[16]); /* Wire up the mux control signals from the SYS_CLCD register */ qdev_connect_gpio_out(sysctl, 0, qdev_get_gpio_in(dev, 0)); sysbus_create_varargs("pl181", 0x10005000, sic[22], sic[1], NULL); sysbus_create_varargs("pl181", 0x1000b000, sic[23], sic[2], NULL); /* Add PL031 Real Time Clock. */ sysbus_create_simple("pl031", 0x101e8000, pic[10]); dev = sysbus_create_simple("versatile_i2c", 0x10002000, NULL); i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); i2c_create_slave(i2c, "ds1338", 0x68); /* Add PL041 AACI Interface to the LM4549 codec */ pl041 = qdev_create(NULL, "pl041"); qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512); qdev_init_nofail(pl041); sysbus_mmio_map(SYS_BUS_DEVICE(pl041), 0, 0x10004000); sysbus_connect_irq(SYS_BUS_DEVICE(pl041), 0, sic[24]); /* Memory map for Versatile/PB: */ /* 0x10000000 System registers. */ /* 0x10001000 PCI controller config registers. */ /* 0x10002000 Serial bus interface. */ /* 0x10003000 Secondary interrupt controller. */ /* 0x10004000 AACI (audio). */ /* 0x10005000 MMCI0. */ /* 0x10006000 KMI0 (keyboard). */ /* 0x10007000 KMI1 (mouse). */ /* 0x10008000 Character LCD Interface. */ /* 0x10009000 UART3. */ /* 0x1000a000 Smart card 1. */ /* 0x1000b000 MMCI1. */ /* 0x10010000 Ethernet. */ /* 0x10020000 USB. */ /* 0x10100000 SSMC. */ /* 0x10110000 MPMC. */ /* 0x10120000 CLCD Controller. */ /* 0x10130000 DMA Controller. */ /* 0x10140000 Vectored interrupt controller. */ /* 0x101d0000 AHB Monitor Interface. */ /* 0x101e0000 System Controller. */ /* 0x101e1000 Watchdog Interface. */ /* 0x101e2000 Timer 0/1. */ /* 0x101e3000 Timer 2/3. */ /* 0x101e4000 GPIO port 0. */ /* 0x101e5000 GPIO port 1. */ /* 0x101e6000 GPIO port 2. */ /* 0x101e7000 GPIO port 3. */ /* 0x101e8000 RTC. */ /* 0x101f0000 Smart card 0. */ /* 0x101f1000 UART0. */ /* 0x101f2000 UART1. */ /* 0x101f3000 UART2. */ /* 0x101f4000 SSPI. */ /* 0x34000000 NOR Flash */ dinfo = drive_get(IF_PFLASH, 0, 0); if (!pflash_cfi01_register(VERSATILE_FLASH_ADDR, NULL, "versatile.flash", VERSATILE_FLASH_SIZE, dinfo ? dinfo->bdrv : NULL, VERSATILE_FLASH_SECT_SIZE, VERSATILE_FLASH_SIZE / VERSATILE_FLASH_SECT_SIZE, 4, 0x0089, 0x0018, 0x0000, 0x0, 0)) { fprintf(stderr, "qemu: Error registering flash memory.\n"); } versatile_binfo.ram_size = args->ram_size; versatile_binfo.kernel_filename = args->kernel_filename; versatile_binfo.kernel_cmdline = args->kernel_cmdline; versatile_binfo.initrd_filename = args->initrd_filename; versatile_binfo.board_id = board_id; arm_load_kernel(cpu, &versatile_binfo); } static void vpb_init(QEMUMachineInitArgs *args) { versatile_init(args, 0x183); } static void vab_init(QEMUMachineInitArgs *args) { versatile_init(args, 0x25e); } static QEMUMachine versatilepb_machine = { .name = "versatilepb", .desc = "ARM Versatile/PB (ARM926EJ-S)", .init = vpb_init, .block_default_type = IF_SCSI, }; static QEMUMachine versatileab_machine = { .name = "versatileab", .desc = "ARM Versatile/AB (ARM926EJ-S)", .init = vab_init, .block_default_type = IF_SCSI, }; static void versatile_machine_init(void) { qemu_register_machine(&versatilepb_machine); qemu_register_machine(&versatileab_machine); } machine_init(versatile_machine_init); static void vpb_sic_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = vpb_sic_init; dc->vmsd = &vmstate_vpb_sic; } static const TypeInfo vpb_sic_info = { .name = TYPE_VERSATILE_PB_SIC, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(vpb_sic_state), .class_init = vpb_sic_class_init, }; static void versatilepb_register_types(void) { type_register_static(&vpb_sic_info); } type_init(versatilepb_register_types)
gpl-2.0
pboonstoppel/linux-3.1-nv-rel15r7-cpuquiet
arch/mips/sgi-ip22/ip28-berr.c
788
14902
/* * ip28-berr.c: Bus error handling. * * Copyright (C) 2002, 2003 Ladislav Michl (ladis@linux-mips.org) * Copyright (C) 2005 Peter Fuerst (pf@net.alphadv.de) - IP28 */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <asm/addrspace.h> #include <asm/system.h> #include <asm/traps.h> #include <asm/branch.h> #include <asm/irq_regs.h> #include <asm/sgi/mc.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ioc.h> #include <asm/sgi/ip22.h> #include <asm/r4kcache.h> #include <asm/uaccess.h> #include <asm/bootinfo.h> static unsigned int count_be_is_fixup; static unsigned int count_be_handler; static unsigned int count_be_interrupt; static int debug_be_interrupt; static unsigned int cpu_err_stat; /* Status reg for CPU */ static unsigned int gio_err_stat; /* Status reg for GIO */ static unsigned int cpu_err_addr; /* Error address reg for CPU */ static unsigned int gio_err_addr; /* Error address reg for GIO */ static unsigned int extio_stat; static unsigned int hpc3_berr_stat; /* Bus error interrupt status */ struct hpc3_stat { unsigned long addr; unsigned int ctrl; unsigned int cbp; unsigned int ndptr; }; static struct { struct hpc3_stat pbdma[8]; struct hpc3_stat scsi[2]; struct hpc3_stat ethrx, ethtx; } hpc3; static struct { unsigned long err_addr; struct { u32 lo; u32 hi; } tags[1][2], tagd[4][2], tagi[4][2]; /* Way 0/1 */ } cache_tags; static inline void save_cache_tags(unsigned busaddr) { unsigned long addr = CAC_BASE | busaddr; int i; cache_tags.err_addr = addr; /* * Starting with a bus-address, save secondary cache (indexed by * PA[23..18:7..6]) tags first. */ addr &= ~1L; #define tag cache_tags.tags[0] cache_op(Index_Load_Tag_S, addr); tag[0].lo = read_c0_taglo(); /* PA[35:18], VA[13:12] */ tag[0].hi = read_c0_taghi(); /* PA[39:36] */ cache_op(Index_Load_Tag_S, addr | 1L); tag[1].lo = read_c0_taglo(); /* PA[35:18], VA[13:12] */ tag[1].hi = read_c0_taghi(); /* PA[39:36] */ #undef tag /* * Save all primary data cache (indexed by VA[13:5]) tags which * might fit to this bus-address, knowing that VA[11:0] == PA[11:0]. * Saving all tags and evaluating them later is easier and safer * than relying on VA[13:12] from the secondary cache tags to pick * matching primary tags here already. */ addr &= (0xffL << 56) | ((1 << 12) - 1); #define tag cache_tags.tagd[i] for (i = 0; i < 4; ++i, addr += (1 << 12)) { cache_op(Index_Load_Tag_D, addr); tag[0].lo = read_c0_taglo(); /* PA[35:12] */ tag[0].hi = read_c0_taghi(); /* PA[39:36] */ cache_op(Index_Load_Tag_D, addr | 1L); tag[1].lo = read_c0_taglo(); /* PA[35:12] */ tag[1].hi = read_c0_taghi(); /* PA[39:36] */ } #undef tag /* * Save primary instruction cache (indexed by VA[13:6]) tags * the same way. */ addr &= (0xffL << 56) | ((1 << 12) - 1); #define tag cache_tags.tagi[i] for (i = 0; i < 4; ++i, addr += (1 << 12)) { cache_op(Index_Load_Tag_I, addr); tag[0].lo = read_c0_taglo(); /* PA[35:12] */ tag[0].hi = read_c0_taghi(); /* PA[39:36] */ cache_op(Index_Load_Tag_I, addr | 1L); tag[1].lo = read_c0_taglo(); /* PA[35:12] */ tag[1].hi = read_c0_taghi(); /* PA[39:36] */ } #undef tag } #define GIO_ERRMASK 0xff00 #define CPU_ERRMASK 0x3f00 static void save_and_clear_buserr(void) { int i; /* save status registers */ cpu_err_addr = sgimc->cerr; cpu_err_stat = sgimc->cstat; gio_err_addr = sgimc->gerr; gio_err_stat = sgimc->gstat; extio_stat = sgioc->extio; hpc3_berr_stat = hpc3c0->bestat; hpc3.scsi[0].addr = (unsigned long)&hpc3c0->scsi_chan0; hpc3.scsi[0].ctrl = hpc3c0->scsi_chan0.ctrl; /* HPC3_SCTRL_ACTIVE ? */ hpc3.scsi[0].cbp = hpc3c0->scsi_chan0.cbptr; hpc3.scsi[0].ndptr = hpc3c0->scsi_chan0.ndptr; hpc3.scsi[1].addr = (unsigned long)&hpc3c0->scsi_chan1; hpc3.scsi[1].ctrl = hpc3c0->scsi_chan1.ctrl; /* HPC3_SCTRL_ACTIVE ? */ hpc3.scsi[1].cbp = hpc3c0->scsi_chan1.cbptr; hpc3.scsi[1].ndptr = hpc3c0->scsi_chan1.ndptr; hpc3.ethrx.addr = (unsigned long)&hpc3c0->ethregs.rx_cbptr; hpc3.ethrx.ctrl = hpc3c0->ethregs.rx_ctrl; /* HPC3_ERXCTRL_ACTIVE ? */ hpc3.ethrx.cbp = hpc3c0->ethregs.rx_cbptr; hpc3.ethrx.ndptr = hpc3c0->ethregs.rx_ndptr; hpc3.ethtx.addr = (unsigned long)&hpc3c0->ethregs.tx_cbptr; hpc3.ethtx.ctrl = hpc3c0->ethregs.tx_ctrl; /* HPC3_ETXCTRL_ACTIVE ? */ hpc3.ethtx.cbp = hpc3c0->ethregs.tx_cbptr; hpc3.ethtx.ndptr = hpc3c0->ethregs.tx_ndptr; for (i = 0; i < 8; ++i) { /* HPC3_PDMACTRL_ISACT ? */ hpc3.pbdma[i].addr = (unsigned long)&hpc3c0->pbdma[i]; hpc3.pbdma[i].ctrl = hpc3c0->pbdma[i].pbdma_ctrl; hpc3.pbdma[i].cbp = hpc3c0->pbdma[i].pbdma_bptr; hpc3.pbdma[i].ndptr = hpc3c0->pbdma[i].pbdma_dptr; } i = 0; if (gio_err_stat & CPU_ERRMASK) i = gio_err_addr; if (cpu_err_stat & CPU_ERRMASK) i = cpu_err_addr; save_cache_tags(i); sgimc->cstat = sgimc->gstat = 0; } static void print_cache_tags(void) { u32 scb, scw; int i; printk(KERN_ERR "Cache tags @ %08x:\n", (unsigned)cache_tags.err_addr); /* PA[31:12] shifted to PTag0 (PA[35:12]) format */ scw = (cache_tags.err_addr >> 4) & 0x0fffff00; scb = cache_tags.err_addr & ((1 << 12) - 1) & ~((1 << 5) - 1); for (i = 0; i < 4; ++i) { /* for each possible VA[13:12] value */ if ((cache_tags.tagd[i][0].lo & 0x0fffff00) != scw && (cache_tags.tagd[i][1].lo & 0x0fffff00) != scw) continue; printk(KERN_ERR "D: 0: %08x %08x, 1: %08x %08x (VA[13:5] %04x)\n", cache_tags.tagd[i][0].hi, cache_tags.tagd[i][0].lo, cache_tags.tagd[i][1].hi, cache_tags.tagd[i][1].lo, scb | (1 << 12)*i); } scb = cache_tags.err_addr & ((1 << 12) - 1) & ~((1 << 6) - 1); for (i = 0; i < 4; ++i) { /* for each possible VA[13:12] value */ if ((cache_tags.tagi[i][0].lo & 0x0fffff00) != scw && (cache_tags.tagi[i][1].lo & 0x0fffff00) != scw) continue; printk(KERN_ERR "I: 0: %08x %08x, 1: %08x %08x (VA[13:6] %04x)\n", cache_tags.tagi[i][0].hi, cache_tags.tagi[i][0].lo, cache_tags.tagi[i][1].hi, cache_tags.tagi[i][1].lo, scb | (1 << 12)*i); } i = read_c0_config(); scb = i & (1 << 13) ? 7:6; /* scblksize = 2^[7..6] */ scw = ((i >> 16) & 7) + 19 - 1; /* scwaysize = 2^[24..19] / 2 */ i = ((1 << scw) - 1) & ~((1 << scb) - 1); printk(KERN_ERR "S: 0: %08x %08x, 1: %08x %08x (PA[%u:%u] %05x)\n", cache_tags.tags[0][0].hi, cache_tags.tags[0][0].lo, cache_tags.tags[0][1].hi, cache_tags.tags[0][1].lo, scw-1, scb, i & (unsigned)cache_tags.err_addr); } static inline const char *cause_excode_text(int cause) { static const char *txt[32] = { "Interrupt", "TLB modification", "TLB (load or instruction fetch)", "TLB (store)", "Address error (load or instruction fetch)", "Address error (store)", "Bus error (instruction fetch)", "Bus error (data: load or store)", "Syscall", "Breakpoint", "Reserved instruction", "Coprocessor unusable", "Arithmetic Overflow", "Trap", "14", "Floating-Point", "16", "17", "18", "19", "20", "21", "22", "Watch Hi/Lo", "24", "25", "26", "27", "28", "29", "30", "31", }; return txt[(cause & 0x7c) >> 2]; } static void print_buserr(const struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); int error = 0; if (extio_stat & EXTIO_MC_BUSERR) { printk(KERN_ERR "MC Bus Error\n"); error |= 1; } if (extio_stat & EXTIO_HPC3_BUSERR) { printk(KERN_ERR "HPC3 Bus Error 0x%x:<id=0x%x,%s,lane=0x%x>\n", hpc3_berr_stat, (hpc3_berr_stat & HPC3_BESTAT_PIDMASK) >> HPC3_BESTAT_PIDSHIFT, (hpc3_berr_stat & HPC3_BESTAT_CTYPE) ? "PIO" : "DMA", hpc3_berr_stat & HPC3_BESTAT_BLMASK); error |= 2; } if (extio_stat & EXTIO_EISA_BUSERR) { printk(KERN_ERR "EISA Bus Error\n"); error |= 4; } if (cpu_err_stat & CPU_ERRMASK) { printk(KERN_ERR "CPU error 0x%x<%s%s%s%s%s%s> @ 0x%08x\n", cpu_err_stat, cpu_err_stat & SGIMC_CSTAT_RD ? "RD " : "", cpu_err_stat & SGIMC_CSTAT_PAR ? "PAR " : "", cpu_err_stat & SGIMC_CSTAT_ADDR ? "ADDR " : "", cpu_err_stat & SGIMC_CSTAT_SYSAD_PAR ? "SYSAD " : "", cpu_err_stat & SGIMC_CSTAT_SYSCMD_PAR ? "SYSCMD " : "", cpu_err_stat & SGIMC_CSTAT_BAD_DATA ? "BAD_DATA " : "", cpu_err_addr); error |= 8; } if (gio_err_stat & GIO_ERRMASK) { printk(KERN_ERR "GIO error 0x%x:<%s%s%s%s%s%s%s%s> @ 0x%08x\n", gio_err_stat, gio_err_stat & SGIMC_GSTAT_RD ? "RD " : "", gio_err_stat & SGIMC_GSTAT_WR ? "WR " : "", gio_err_stat & SGIMC_GSTAT_TIME ? "TIME " : "", gio_err_stat & SGIMC_GSTAT_PROM ? "PROM " : "", gio_err_stat & SGIMC_GSTAT_ADDR ? "ADDR " : "", gio_err_stat & SGIMC_GSTAT_BC ? "BC " : "", gio_err_stat & SGIMC_GSTAT_PIO_RD ? "PIO_RD " : "", gio_err_stat & SGIMC_GSTAT_PIO_WR ? "PIO_WR " : "", gio_err_addr); error |= 16; } if (!error) printk(KERN_ERR "MC: Hmm, didn't find any error condition.\n"); else { printk(KERN_ERR "CP0: config %08x, " "MC: cpuctrl0/1: %08x/%05x, giopar: %04x\n" "MC: cpu/gio_memacc: %08x/%05x, memcfg0/1: %08x/%08x\n", read_c0_config(), sgimc->cpuctrl0, sgimc->cpuctrl0, sgimc->giopar, sgimc->cmacc, sgimc->gmacc, sgimc->mconfig0, sgimc->mconfig1); print_cache_tags(); } printk(KERN_ALERT "%s, epc == %0*lx, ra == %0*lx\n", cause_excode_text(regs->cp0_cause), field, regs->cp0_epc, field, regs->regs[31]); } /* * Check, whether MC's (virtual) DMA address caused the bus error. * See "Virtual DMA Specification", Draft 1.5, Feb 13 1992, SGI */ static int addr_is_ram(unsigned long addr, unsigned sz) { int i; for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long a = boot_mem_map.map[i].addr; if (a <= addr && addr+sz <= a+boot_mem_map.map[i].size) return 1; } return 0; } static int check_microtlb(u32 hi, u32 lo, unsigned long vaddr) { /* This is likely rather similar to correct code ;-) */ vaddr &= 0x7fffffff; /* Doc. states that top bit is ignored */ /* If tlb-entry is valid and VPN-high (bits [30:21] ?) matches... */ if ((lo & 2) && (vaddr >> 21) == ((hi<<1) >> 22)) { u32 ctl = sgimc->dma_ctrl; if (ctl & 1) { unsigned int pgsz = (ctl & 2) ? 14:12; /* 16k:4k */ /* PTEIndex is VPN-low (bits [22:14]/[20:12] ?) */ unsigned long pte = (lo >> 6) << 12; /* PTEBase */ pte += 8*((vaddr >> pgsz) & 0x1ff); if (addr_is_ram(pte, 8)) { /* * Note: Since DMA hardware does look up * translation on its own, this PTE *must* * match the TLB/EntryLo-register format ! */ unsigned long a = *(unsigned long *) PHYS_TO_XKSEG_UNCACHED(pte); a = (a & 0x3f) << 6; /* PFN */ a += vaddr & ((1 << pgsz) - 1); return (cpu_err_addr == a); } } } return 0; } static int check_vdma_memaddr(void) { if (cpu_err_stat & CPU_ERRMASK) { u32 a = sgimc->maddronly; if (!(sgimc->dma_ctrl & 0x100)) /* Xlate-bit clear ? */ return (cpu_err_addr == a); if (check_microtlb(sgimc->dtlb_hi0, sgimc->dtlb_lo0, a) || check_microtlb(sgimc->dtlb_hi1, sgimc->dtlb_lo1, a) || check_microtlb(sgimc->dtlb_hi2, sgimc->dtlb_lo2, a) || check_microtlb(sgimc->dtlb_hi3, sgimc->dtlb_lo3, a)) return 1; } return 0; } static int check_vdma_gioaddr(void) { if (gio_err_stat & GIO_ERRMASK) { u32 a = sgimc->gio_dma_trans; a = (sgimc->gmaddronly & ~a) | (sgimc->gio_dma_sbits & a); return (gio_err_addr == a); } return 0; } /* * MC sends an interrupt whenever bus or parity errors occur. In addition, * if the error happened during a CPU read, it also asserts the bus error * pin on the R4K. Code in bus error handler save the MC bus error registers * and then clear the interrupt when this happens. */ static int ip28_be_interrupt(const struct pt_regs *regs) { int i; save_and_clear_buserr(); /* * Try to find out, whether we got here by a mispredicted speculative * load/store operation. If so, it's not fatal, we can go on. */ /* Any cause other than "Interrupt" (ExcCode 0) is fatal. */ if (regs->cp0_cause & CAUSEF_EXCCODE) goto mips_be_fatal; /* Any cause other than "Bus error interrupt" (IP6) is weird. */ if ((regs->cp0_cause & CAUSEF_IP6) != CAUSEF_IP6) goto mips_be_fatal; if (extio_stat & (EXTIO_HPC3_BUSERR | EXTIO_EISA_BUSERR)) goto mips_be_fatal; /* Any state other than "Memory bus error" is fatal. */ if (cpu_err_stat & CPU_ERRMASK & ~SGIMC_CSTAT_ADDR) goto mips_be_fatal; /* GIO errors other than timeouts are fatal */ if (gio_err_stat & GIO_ERRMASK & ~SGIMC_GSTAT_TIME) goto mips_be_fatal; /* * Now we have an asynchronous bus error, speculatively or DMA caused. * Need to search all DMA descriptors for the error address. */ for (i = 0; i < sizeof(hpc3)/sizeof(struct hpc3_stat); ++i) { struct hpc3_stat *hp = (struct hpc3_stat *)&hpc3 + i; if ((cpu_err_stat & CPU_ERRMASK) && (cpu_err_addr == hp->ndptr || cpu_err_addr == hp->cbp)) break; if ((gio_err_stat & GIO_ERRMASK) && (gio_err_addr == hp->ndptr || gio_err_addr == hp->cbp)) break; } if (i < sizeof(hpc3)/sizeof(struct hpc3_stat)) { struct hpc3_stat *hp = (struct hpc3_stat *)&hpc3 + i; printk(KERN_ERR "at DMA addresses: HPC3 @ %08lx:" " ctl %08x, ndp %08x, cbp %08x\n", CPHYSADDR(hp->addr), hp->ctrl, hp->ndptr, hp->cbp); goto mips_be_fatal; } /* Check MC's virtual DMA stuff. */ if (check_vdma_memaddr()) { printk(KERN_ERR "at GIO DMA: mem address 0x%08x.\n", sgimc->maddronly); goto mips_be_fatal; } if (check_vdma_gioaddr()) { printk(KERN_ERR "at GIO DMA: gio address 0x%08x.\n", sgimc->gmaddronly); goto mips_be_fatal; } /* A speculative bus error... */ if (debug_be_interrupt) { print_buserr(regs); printk(KERN_ERR "discarded!\n"); } return MIPS_BE_DISCARD; mips_be_fatal: print_buserr(regs); return MIPS_BE_FATAL; } void ip22_be_interrupt(int irq) { const struct pt_regs *regs = get_irq_regs(); count_be_interrupt++; if (ip28_be_interrupt(regs) != MIPS_BE_DISCARD) { /* Assume it would be too dangerous to continue ... */ die_if_kernel("Oops", regs); force_sig(SIGBUS, current); } else if (debug_be_interrupt) show_regs((struct pt_regs *)regs); } static int ip28_be_handler(struct pt_regs *regs, int is_fixup) { /* * We arrive here only in the unusual case of do_be() invocation, * i.e. by a bus error exception without a bus error interrupt. */ if (is_fixup) { count_be_is_fixup++; save_and_clear_buserr(); return MIPS_BE_FIXUP; } count_be_handler++; return ip28_be_interrupt(regs); } void __init ip22_be_init(void) { board_be_handler = ip28_be_handler; } int ip28_show_be_info(struct seq_file *m) { seq_printf(m, "IP28 be fixups\t\t: %u\n", count_be_is_fixup); seq_printf(m, "IP28 be interrupts\t: %u\n", count_be_interrupt); seq_printf(m, "IP28 be handler\t\t: %u\n", count_be_handler); return 0; } static int __init debug_be_setup(char *str) { debug_be_interrupt++; return 1; } __setup("ip28_debug_be", debug_be_setup);
gpl-2.0
flintman/android_kernel_htc_msm8660-3.4
drivers/gpu/drm/radeon/atom.c
1300
35221
/* * Copyright 2008 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Author: Stanislaw Skowronek */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/unaligned.h> #define ATOM_DEBUG #include "atom.h" #include "atom-names.h" #include "atom-bits.h" #include "radeon.h" #define ATOM_COND_ABOVE 0 #define ATOM_COND_ABOVEOREQUAL 1 #define ATOM_COND_ALWAYS 2 #define ATOM_COND_BELOW 3 #define ATOM_COND_BELOWOREQUAL 4 #define ATOM_COND_EQUAL 5 #define ATOM_COND_NOTEQUAL 6 #define ATOM_PORT_ATI 0 #define ATOM_PORT_PCI 1 #define ATOM_PORT_SYSIO 2 #define ATOM_UNIT_MICROSEC 0 #define ATOM_UNIT_MILLISEC 1 #define PLL_INDEX 2 #define PLL_DATA 3 typedef struct { struct atom_context *ctx; uint32_t *ps, *ws; int ps_shift; uint16_t start; unsigned last_jump; unsigned long last_jump_jiffies; bool abort; } atom_exec_context; int atom_debug = 0; static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); static uint32_t atom_arg_mask[8] = { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, 0xFF000000 }; static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 }; static int atom_dst_to_src[8][4] = { /* translate destination alignment field to the source alignment encoding */ {0, 0, 0, 0}, {1, 2, 3, 0}, {1, 2, 3, 0}, {1, 2, 3, 0}, {4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}, }; static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 }; static int debug_depth = 0; #ifdef ATOM_DEBUG static void debug_print_spaces(int n) { while (n--) printk(" "); } #define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0) #define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0) #else #define DEBUG(...) do { } while (0) #define SDEBUG(...) do { } while (0) #endif static uint32_t atom_iio_execute(struct atom_context *ctx, int base, uint32_t index, uint32_t data) { struct radeon_device *rdev = ctx->card->dev->dev_private; uint32_t temp = 0xCDCDCDCD; while (1) switch (CU8(base)) { case ATOM_IIO_NOP: base++; break; case ATOM_IIO_READ: temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1)); base += 3; break; case ATOM_IIO_WRITE: if (rdev->family == CHIP_RV515) (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); base += 3; break; case ATOM_IIO_CLEAR: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 2)); base += 3; break; case ATOM_IIO_SET: temp |= (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 2); base += 3; break; case ATOM_IIO_MOVE_INDEX: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); temp |= ((index >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 3); base += 4; break; case ATOM_IIO_MOVE_DATA: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); temp |= ((data >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 3); base += 4; break; case ATOM_IIO_MOVE_ATTR: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); temp |= ((ctx-> io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8 (base + 1)))) << CU8(base + 3); base += 4; break; case ATOM_IIO_END: return temp; default: printk(KERN_INFO "Unknown IIO opcode.\n"); return 0; } } static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr, uint32_t *saved, int print) { uint32_t idx, val = 0xCDCDCDCD, align, arg; struct atom_context *gctx = ctx->ctx; arg = attr & 7; align = (attr >> 3) & 7; switch (arg) { case ATOM_ARG_REG: idx = U16(*ptr); (*ptr) += 2; if (print) DEBUG("REG[0x%04X]", idx); idx += gctx->reg_block; switch (gctx->io_mode) { case ATOM_IO_MM: val = gctx->card->reg_read(gctx->card, idx); break; case ATOM_IO_PCI: printk(KERN_INFO "PCI registers are not implemented.\n"); return 0; case ATOM_IO_SYSIO: printk(KERN_INFO "SYSIO registers are not implemented.\n"); return 0; default: if (!(gctx->io_mode & 0x80)) { printk(KERN_INFO "Bad IO mode.\n"); return 0; } if (!gctx->iio[gctx->io_mode & 0x7F]) { printk(KERN_INFO "Undefined indirect IO read method %d.\n", gctx->io_mode & 0x7F); return 0; } val = atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0x7F], idx, 0); } break; case ATOM_ARG_PS: idx = U8(*ptr); (*ptr)++; /* get_unaligned_le32 avoids unaligned accesses from atombios * tables, noticed on a DEC Alpha. */ val = get_unaligned_le32((u32 *)&ctx->ps[idx]); if (print) DEBUG("PS[0x%02X,0x%04X]", idx, val); break; case ATOM_ARG_WS: idx = U8(*ptr); (*ptr)++; if (print) DEBUG("WS[0x%02X]", idx); switch (idx) { case ATOM_WS_QUOTIENT: val = gctx->divmul[0]; break; case ATOM_WS_REMAINDER: val = gctx->divmul[1]; break; case ATOM_WS_DATAPTR: val = gctx->data_block; break; case ATOM_WS_SHIFT: val = gctx->shift; break; case ATOM_WS_OR_MASK: val = 1 << gctx->shift; break; case ATOM_WS_AND_MASK: val = ~(1 << gctx->shift); break; case ATOM_WS_FB_WINDOW: val = gctx->fb_base; break; case ATOM_WS_ATTRIBUTES: val = gctx->io_attr; break; case ATOM_WS_REGPTR: val = gctx->reg_block; break; default: val = ctx->ws[idx]; } break; case ATOM_ARG_ID: idx = U16(*ptr); (*ptr) += 2; if (print) { if (gctx->data_block) DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block); else DEBUG("ID[0x%04X]", idx); } val = U32(idx + gctx->data_block); break; case ATOM_ARG_FB: idx = U8(*ptr); (*ptr)++; if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); val = 0; } else val = gctx->scratch[(gctx->fb_base / 4) + idx]; if (print) DEBUG("FB[0x%02X]", idx); break; case ATOM_ARG_IMM: switch (align) { case ATOM_SRC_DWORD: val = U32(*ptr); (*ptr) += 4; if (print) DEBUG("IMM 0x%08X\n", val); return val; case ATOM_SRC_WORD0: case ATOM_SRC_WORD8: case ATOM_SRC_WORD16: val = U16(*ptr); (*ptr) += 2; if (print) DEBUG("IMM 0x%04X\n", val); return val; case ATOM_SRC_BYTE0: case ATOM_SRC_BYTE8: case ATOM_SRC_BYTE16: case ATOM_SRC_BYTE24: val = U8(*ptr); (*ptr)++; if (print) DEBUG("IMM 0x%02X\n", val); return val; } return 0; case ATOM_ARG_PLL: idx = U8(*ptr); (*ptr)++; if (print) DEBUG("PLL[0x%02X]", idx); val = gctx->card->pll_read(gctx->card, idx); break; case ATOM_ARG_MC: idx = U8(*ptr); (*ptr)++; if (print) DEBUG("MC[0x%02X]", idx); val = gctx->card->mc_read(gctx->card, idx); break; } if (saved) *saved = val; val &= atom_arg_mask[align]; val >>= atom_arg_shift[align]; if (print) switch (align) { case ATOM_SRC_DWORD: DEBUG(".[31:0] -> 0x%08X\n", val); break; case ATOM_SRC_WORD0: DEBUG(".[15:0] -> 0x%04X\n", val); break; case ATOM_SRC_WORD8: DEBUG(".[23:8] -> 0x%04X\n", val); break; case ATOM_SRC_WORD16: DEBUG(".[31:16] -> 0x%04X\n", val); break; case ATOM_SRC_BYTE0: DEBUG(".[7:0] -> 0x%02X\n", val); break; case ATOM_SRC_BYTE8: DEBUG(".[15:8] -> 0x%02X\n", val); break; case ATOM_SRC_BYTE16: DEBUG(".[23:16] -> 0x%02X\n", val); break; case ATOM_SRC_BYTE24: DEBUG(".[31:24] -> 0x%02X\n", val); break; } return val; } static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr) { uint32_t align = (attr >> 3) & 7, arg = attr & 7; switch (arg) { case ATOM_ARG_REG: case ATOM_ARG_ID: (*ptr) += 2; break; case ATOM_ARG_PLL: case ATOM_ARG_MC: case ATOM_ARG_PS: case ATOM_ARG_WS: case ATOM_ARG_FB: (*ptr)++; break; case ATOM_ARG_IMM: switch (align) { case ATOM_SRC_DWORD: (*ptr) += 4; return; case ATOM_SRC_WORD0: case ATOM_SRC_WORD8: case ATOM_SRC_WORD16: (*ptr) += 2; return; case ATOM_SRC_BYTE0: case ATOM_SRC_BYTE8: case ATOM_SRC_BYTE16: case ATOM_SRC_BYTE24: (*ptr)++; return; } return; } } static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) { return atom_get_src_int(ctx, attr, ptr, NULL, 1); } static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) { uint32_t val = 0xCDCDCDCD; switch (align) { case ATOM_SRC_DWORD: val = U32(*ptr); (*ptr) += 4; break; case ATOM_SRC_WORD0: case ATOM_SRC_WORD8: case ATOM_SRC_WORD16: val = U16(*ptr); (*ptr) += 2; break; case ATOM_SRC_BYTE0: case ATOM_SRC_BYTE8: case ATOM_SRC_BYTE16: case ATOM_SRC_BYTE24: val = U8(*ptr); (*ptr)++; break; } return val; } static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr, uint32_t *saved, int print) { return atom_get_src_int(ctx, arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3] << 3, ptr, saved, print); } static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr) { atom_skip_src_int(ctx, arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3] << 3, ptr); } static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr, uint32_t val, uint32_t saved) { uint32_t align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val = val, idx; struct atom_context *gctx = ctx->ctx; old_val &= atom_arg_mask[align] >> atom_arg_shift[align]; val <<= atom_arg_shift[align]; val &= atom_arg_mask[align]; saved &= ~atom_arg_mask[align]; val |= saved; switch (arg) { case ATOM_ARG_REG: idx = U16(*ptr); (*ptr) += 2; DEBUG("REG[0x%04X]", idx); idx += gctx->reg_block; switch (gctx->io_mode) { case ATOM_IO_MM: if (idx == 0) gctx->card->reg_write(gctx->card, idx, val << 2); else gctx->card->reg_write(gctx->card, idx, val); break; case ATOM_IO_PCI: printk(KERN_INFO "PCI registers are not implemented.\n"); return; case ATOM_IO_SYSIO: printk(KERN_INFO "SYSIO registers are not implemented.\n"); return; default: if (!(gctx->io_mode & 0x80)) { printk(KERN_INFO "Bad IO mode.\n"); return; } if (!gctx->iio[gctx->io_mode & 0xFF]) { printk(KERN_INFO "Undefined indirect IO write method %d.\n", gctx->io_mode & 0x7F); return; } atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF], idx, val); } break; case ATOM_ARG_PS: idx = U8(*ptr); (*ptr)++; DEBUG("PS[0x%02X]", idx); ctx->ps[idx] = cpu_to_le32(val); break; case ATOM_ARG_WS: idx = U8(*ptr); (*ptr)++; DEBUG("WS[0x%02X]", idx); switch (idx) { case ATOM_WS_QUOTIENT: gctx->divmul[0] = val; break; case ATOM_WS_REMAINDER: gctx->divmul[1] = val; break; case ATOM_WS_DATAPTR: gctx->data_block = val; break; case ATOM_WS_SHIFT: gctx->shift = val; break; case ATOM_WS_OR_MASK: case ATOM_WS_AND_MASK: break; case ATOM_WS_FB_WINDOW: gctx->fb_base = val; break; case ATOM_WS_ATTRIBUTES: gctx->io_attr = val; break; case ATOM_WS_REGPTR: gctx->reg_block = val; break; default: ctx->ws[idx] = val; } break; case ATOM_ARG_FB: idx = U8(*ptr); (*ptr)++; if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); } else gctx->scratch[(gctx->fb_base / 4) + idx] = val; DEBUG("FB[0x%02X]", idx); break; case ATOM_ARG_PLL: idx = U8(*ptr); (*ptr)++; DEBUG("PLL[0x%02X]", idx); gctx->card->pll_write(gctx->card, idx, val); break; case ATOM_ARG_MC: idx = U8(*ptr); (*ptr)++; DEBUG("MC[0x%02X]", idx); gctx->card->mc_write(gctx->card, idx, val); return; } switch (align) { case ATOM_SRC_DWORD: DEBUG(".[31:0] <- 0x%08X\n", old_val); break; case ATOM_SRC_WORD0: DEBUG(".[15:0] <- 0x%04X\n", old_val); break; case ATOM_SRC_WORD8: DEBUG(".[23:8] <- 0x%04X\n", old_val); break; case ATOM_SRC_WORD16: DEBUG(".[31:16] <- 0x%04X\n", old_val); break; case ATOM_SRC_BYTE0: DEBUG(".[7:0] <- 0x%02X\n", old_val); break; case ATOM_SRC_BYTE8: DEBUG(".[15:8] <- 0x%02X\n", old_val); break; case ATOM_SRC_BYTE16: DEBUG(".[23:16] <- 0x%02X\n", old_val); break; case ATOM_SRC_BYTE24: DEBUG(".[31:24] <- 0x%02X\n", old_val); break; } } static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst += src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst &= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) { printk("ATOM BIOS beeped!\n"); } static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) { int idx = U8((*ptr)++); int r = 0; if (idx < ATOM_TABLE_NAMES_CNT) SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); else SDEBUG(" table: %d\n", idx); if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); if (r) { ctx->abort = true; } } static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t saved; int dptr = *ptr; attr &= 0x38; attr |= atom_def_dst[attr >> 3] << 6; atom_get_dst(ctx, arg, attr, ptr, &saved, 0); SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, 0, saved); } static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src; SDEBUG(" src1: "); dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); SDEBUG(" src2: "); src = atom_get_src(ctx, attr, ptr); ctx->ctx->cs_equal = (dst == src); ctx->ctx->cs_above = (dst > src); SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE", ctx->ctx->cs_above ? "GT" : "LE"); } static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) { unsigned count = U8((*ptr)++); SDEBUG(" count: %d\n", count); if (arg == ATOM_UNIT_MICROSEC) udelay(count); else if (!drm_can_sleep()) mdelay(count); else msleep(count); } static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src; SDEBUG(" src1: "); dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); SDEBUG(" src2: "); src = atom_get_src(ctx, attr, ptr); if (src != 0) { ctx->ctx->divmul[0] = dst / src; ctx->ctx->divmul[1] = dst % src; } else { ctx->ctx->divmul[0] = 0; ctx->ctx->divmul[1] = 0; } } static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) { /* functionally, a nop */ } static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) { int execute = 0, target = U16(*ptr); unsigned long cjiffies; (*ptr) += 2; switch (arg) { case ATOM_COND_ABOVE: execute = ctx->ctx->cs_above; break; case ATOM_COND_ABOVEOREQUAL: execute = ctx->ctx->cs_above || ctx->ctx->cs_equal; break; case ATOM_COND_ALWAYS: execute = 1; break; case ATOM_COND_BELOW: execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal); break; case ATOM_COND_BELOWOREQUAL: execute = !ctx->ctx->cs_above; break; case ATOM_COND_EQUAL: execute = ctx->ctx->cs_equal; break; case ATOM_COND_NOTEQUAL: execute = !ctx->ctx->cs_equal; break; } if (arg != ATOM_COND_ALWAYS) SDEBUG(" taken: %s\n", execute ? "yes" : "no"); SDEBUG(" target: 0x%04X\n", target); if (execute) { if (ctx->last_jump == (ctx->start + target)) { cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; if ((jiffies_to_msecs(cjiffies) > 5000)) { DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); ctx->abort = true; } } else { /* jiffies wrap around we will just wait a little longer */ ctx->last_jump_jiffies = jiffies; } } else { ctx->last_jump = ctx->start + target; ctx->last_jump_jiffies = jiffies; } *ptr = ctx->start + target; } } static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, mask, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); SDEBUG(" mask: 0x%08x", mask); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst &= mask; dst |= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t src, saved; int dptr = *ptr; if (((attr >> 3) & 7) != ATOM_SRC_DWORD) atom_get_dst(ctx, arg, attr, ptr, &saved, 0); else { atom_skip_dst(ctx, arg, attr, ptr); saved = 0xCDCDCDCD; } SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, src, saved); } static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src; SDEBUG(" src1: "); dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); SDEBUG(" src2: "); src = atom_get_src(ctx, attr, ptr); ctx->ctx->divmul[0] = dst * src; } static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) { /* nothing */ } static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst |= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg) { uint8_t val = U8((*ptr)++); SDEBUG("POST card output: 0x%02X\n", val); } static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg) { printk(KERN_INFO "unimplemented!\n"); } static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg) { printk(KERN_INFO "unimplemented!\n"); } static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg) { printk(KERN_INFO "unimplemented!\n"); } static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg) { int idx = U8(*ptr); (*ptr)++; SDEBUG(" block: %d\n", idx); if (!idx) ctx->ctx->data_block = 0; else if (idx == 255) ctx->ctx->data_block = ctx->start; else ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx); SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block); } static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); SDEBUG(" fb_base: "); ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr); } static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg) { int port; switch (arg) { case ATOM_PORT_ATI: port = U16(*ptr); if (port < ATOM_IO_NAMES_CNT) SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]); else SDEBUG(" port: %d\n", port); if (!port) ctx->ctx->io_mode = ATOM_IO_MM; else ctx->ctx->io_mode = ATOM_IO_IIO | port; (*ptr) += 2; break; case ATOM_PORT_PCI: ctx->ctx->io_mode = ATOM_IO_PCI; (*ptr)++; break; case ATOM_PORT_SYSIO: ctx->ctx->io_mode = ATOM_IO_SYSIO; (*ptr)++; break; } } static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) { ctx->ctx->reg_block = U16(*ptr); (*ptr) += 2; SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); } static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; attr &= 0x38; attr |= atom_def_dst[attr >> 3] << 6; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); SDEBUG(" shift: %d\n", shift); dst <<= shift; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; attr &= 0x38; attr |= atom_def_dst[attr >> 3] << 6; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); SDEBUG(" shift: %d\n", shift); dst >>= shift; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); /* op needs to full dst value */ dst = saved; shift = atom_get_src(ctx, attr, ptr); SDEBUG(" shift: %d\n", shift); dst <<= shift; dst &= atom_arg_mask[dst_align]; dst >>= atom_arg_shift[dst_align]; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); /* op needs to full dst value */ dst = saved; shift = atom_get_src(ctx, attr, ptr); SDEBUG(" shift: %d\n", shift); dst >>= shift; dst &= atom_arg_mask[dst_align]; dst >>= atom_arg_shift[dst_align]; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst -= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t src, val, target; SDEBUG(" switch: "); src = atom_get_src(ctx, attr, ptr); while (U16(*ptr) != ATOM_CASE_END) if (U8(*ptr) == ATOM_CASE_MAGIC) { (*ptr)++; SDEBUG(" case: "); val = atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM, ptr); target = U16(*ptr); if (val == src) { SDEBUG(" target: %04X\n", target); *ptr = ctx->start + target; return; } (*ptr) += 2; } else { printk(KERN_INFO "Bad case.\n"); return; } (*ptr) += 2; } static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src; SDEBUG(" src1: "); dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); SDEBUG(" src2: "); src = atom_get_src(ctx, attr, ptr); ctx->ctx->cs_equal = ((dst & src) == 0); SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE"); } static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst ^= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) { printk(KERN_INFO "unimplemented!\n"); } static struct { void (*func) (atom_exec_context *, int *, int); int arg; } opcode_table[ATOM_OP_CNT] = { { NULL, 0}, { atom_op_move, ATOM_ARG_REG}, { atom_op_move, ATOM_ARG_PS}, { atom_op_move, ATOM_ARG_WS}, { atom_op_move, ATOM_ARG_FB}, { atom_op_move, ATOM_ARG_PLL}, { atom_op_move, ATOM_ARG_MC}, { atom_op_and, ATOM_ARG_REG}, { atom_op_and, ATOM_ARG_PS}, { atom_op_and, ATOM_ARG_WS}, { atom_op_and, ATOM_ARG_FB}, { atom_op_and, ATOM_ARG_PLL}, { atom_op_and, ATOM_ARG_MC}, { atom_op_or, ATOM_ARG_REG}, { atom_op_or, ATOM_ARG_PS}, { atom_op_or, ATOM_ARG_WS}, { atom_op_or, ATOM_ARG_FB}, { atom_op_or, ATOM_ARG_PLL}, { atom_op_or, ATOM_ARG_MC}, { atom_op_shift_left, ATOM_ARG_REG}, { atom_op_shift_left, ATOM_ARG_PS}, { atom_op_shift_left, ATOM_ARG_WS}, { atom_op_shift_left, ATOM_ARG_FB}, { atom_op_shift_left, ATOM_ARG_PLL}, { atom_op_shift_left, ATOM_ARG_MC}, { atom_op_shift_right, ATOM_ARG_REG}, { atom_op_shift_right, ATOM_ARG_PS}, { atom_op_shift_right, ATOM_ARG_WS}, { atom_op_shift_right, ATOM_ARG_FB}, { atom_op_shift_right, ATOM_ARG_PLL}, { atom_op_shift_right, ATOM_ARG_MC}, { atom_op_mul, ATOM_ARG_REG}, { atom_op_mul, ATOM_ARG_PS}, { atom_op_mul, ATOM_ARG_WS}, { atom_op_mul, ATOM_ARG_FB}, { atom_op_mul, ATOM_ARG_PLL}, { atom_op_mul, ATOM_ARG_MC}, { atom_op_div, ATOM_ARG_REG}, { atom_op_div, ATOM_ARG_PS}, { atom_op_div, ATOM_ARG_WS}, { atom_op_div, ATOM_ARG_FB}, { atom_op_div, ATOM_ARG_PLL}, { atom_op_div, ATOM_ARG_MC}, { atom_op_add, ATOM_ARG_REG}, { atom_op_add, ATOM_ARG_PS}, { atom_op_add, ATOM_ARG_WS}, { atom_op_add, ATOM_ARG_FB}, { atom_op_add, ATOM_ARG_PLL}, { atom_op_add, ATOM_ARG_MC}, { atom_op_sub, ATOM_ARG_REG}, { atom_op_sub, ATOM_ARG_PS}, { atom_op_sub, ATOM_ARG_WS}, { atom_op_sub, ATOM_ARG_FB}, { atom_op_sub, ATOM_ARG_PLL}, { atom_op_sub, ATOM_ARG_MC}, { atom_op_setport, ATOM_PORT_ATI}, { atom_op_setport, ATOM_PORT_PCI}, { atom_op_setport, ATOM_PORT_SYSIO}, { atom_op_setregblock, 0}, { atom_op_setfbbase, 0}, { atom_op_compare, ATOM_ARG_REG}, { atom_op_compare, ATOM_ARG_PS}, { atom_op_compare, ATOM_ARG_WS}, { atom_op_compare, ATOM_ARG_FB}, { atom_op_compare, ATOM_ARG_PLL}, { atom_op_compare, ATOM_ARG_MC}, { atom_op_switch, 0}, { atom_op_jump, ATOM_COND_ALWAYS}, { atom_op_jump, ATOM_COND_EQUAL}, { atom_op_jump, ATOM_COND_BELOW}, { atom_op_jump, ATOM_COND_ABOVE}, { atom_op_jump, ATOM_COND_BELOWOREQUAL}, { atom_op_jump, ATOM_COND_ABOVEOREQUAL}, { atom_op_jump, ATOM_COND_NOTEQUAL}, { atom_op_test, ATOM_ARG_REG}, { atom_op_test, ATOM_ARG_PS}, { atom_op_test, ATOM_ARG_WS}, { atom_op_test, ATOM_ARG_FB}, { atom_op_test, ATOM_ARG_PLL}, { atom_op_test, ATOM_ARG_MC}, { atom_op_delay, ATOM_UNIT_MILLISEC}, { atom_op_delay, ATOM_UNIT_MICROSEC}, { atom_op_calltable, 0}, { atom_op_repeat, 0}, { atom_op_clear, ATOM_ARG_REG}, { atom_op_clear, ATOM_ARG_PS}, { atom_op_clear, ATOM_ARG_WS}, { atom_op_clear, ATOM_ARG_FB}, { atom_op_clear, ATOM_ARG_PLL}, { atom_op_clear, ATOM_ARG_MC}, { atom_op_nop, 0}, { atom_op_eot, 0}, { atom_op_mask, ATOM_ARG_REG}, { atom_op_mask, ATOM_ARG_PS}, { atom_op_mask, ATOM_ARG_WS}, { atom_op_mask, ATOM_ARG_FB}, { atom_op_mask, ATOM_ARG_PLL}, { atom_op_mask, ATOM_ARG_MC}, { atom_op_postcard, 0}, { atom_op_beep, 0}, { atom_op_savereg, 0}, { atom_op_restorereg, 0}, { atom_op_setdatablock, 0}, { atom_op_xor, ATOM_ARG_REG}, { atom_op_xor, ATOM_ARG_PS}, { atom_op_xor, ATOM_ARG_WS}, { atom_op_xor, ATOM_ARG_FB}, { atom_op_xor, ATOM_ARG_PLL}, { atom_op_xor, ATOM_ARG_MC}, { atom_op_shl, ATOM_ARG_REG}, { atom_op_shl, ATOM_ARG_PS}, { atom_op_shl, ATOM_ARG_WS}, { atom_op_shl, ATOM_ARG_FB}, { atom_op_shl, ATOM_ARG_PLL}, { atom_op_shl, ATOM_ARG_MC}, { atom_op_shr, ATOM_ARG_REG}, { atom_op_shr, ATOM_ARG_PS}, { atom_op_shr, ATOM_ARG_WS}, { atom_op_shr, ATOM_ARG_FB}, { atom_op_shr, ATOM_ARG_PLL}, { atom_op_shr, ATOM_ARG_MC}, { atom_op_debug, 0},}; static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) { int base = CU16(ctx->cmd_table + 4 + 2 * index); int len, ws, ps, ptr; unsigned char op; atom_exec_context ectx; int ret = 0; if (!base) return -EINVAL; len = CU16(base + ATOM_CT_SIZE_PTR); ws = CU8(base + ATOM_CT_WS_PTR); ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK; ptr = base + ATOM_CT_CODE_PTR; SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); ectx.ctx = ctx; ectx.ps_shift = ps / 4; ectx.start = base; ectx.ps = params; ectx.abort = false; ectx.last_jump = 0; if (ws) ectx.ws = kzalloc(4 * ws, GFP_KERNEL); else ectx.ws = NULL; debug_depth++; while (1) { op = CU8(ptr++); if (op < ATOM_OP_NAMES_CNT) SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); else SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); if (ectx.abort) { DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", base, len, ws, ps, ptr - 1); ret = -EINVAL; goto free; } if (op < ATOM_OP_CNT && op > 0) opcode_table[op].func(&ectx, &ptr, opcode_table[op].arg); else break; if (op == ATOM_OP_EOT) break; } debug_depth--; SDEBUG("<<\n"); free: if (ws) kfree(ectx.ws); return ret; } int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) { int r; mutex_lock(&ctx->mutex); /* reset data block */ ctx->data_block = 0; /* reset reg block */ ctx->reg_block = 0; /* reset fb window */ ctx->fb_base = 0; /* reset io mode */ ctx->io_mode = ATOM_IO_MM; /* reset divmul */ ctx->divmul[0] = 0; ctx->divmul[1] = 0; r = atom_execute_table_locked(ctx, index, params); mutex_unlock(&ctx->mutex); return r; } static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; static void atom_index_iio(struct atom_context *ctx, int base) { ctx->iio = kzalloc(2 * 256, GFP_KERNEL); while (CU8(base) == ATOM_IIO_START) { ctx->iio[CU8(base + 1)] = base + 2; base += 2; while (CU8(base) != ATOM_IIO_END) base += atom_iio_len[CU8(base)]; base += 3; } } struct atom_context *atom_parse(struct card_info *card, void *bios) { int base; struct atom_context *ctx = kzalloc(sizeof(struct atom_context), GFP_KERNEL); char *str; char name[512]; int i; if (!ctx) return NULL; ctx->card = card; ctx->bios = bios; if (CU16(0) != ATOM_BIOS_MAGIC) { printk(KERN_INFO "Invalid BIOS magic.\n"); kfree(ctx); return NULL; } if (strncmp (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC, strlen(ATOM_ATI_MAGIC))) { printk(KERN_INFO "Invalid ATI magic.\n"); kfree(ctx); return NULL; } base = CU16(ATOM_ROM_TABLE_PTR); if (strncmp (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC, strlen(ATOM_ROM_MAGIC))) { printk(KERN_INFO "Invalid ATOM magic.\n"); kfree(ctx); return NULL; } ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR); ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR); atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4); str = CSTR(CU16(base + ATOM_ROM_MSG_PTR)); while (*str && ((*str == '\n') || (*str == '\r'))) str++; /* name string isn't always 0 terminated */ for (i = 0; i < 511; i++) { name[i] = str[i]; if (name[i] < '.' || name[i] > 'z') { name[i] = 0; break; } } printk(KERN_INFO "ATOM BIOS: %s\n", name); return ctx; } int atom_asic_init(struct atom_context *ctx) { struct radeon_device *rdev = ctx->card->dev->dev_private; int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); uint32_t ps[16]; int ret; memset(ps, 0, 64); ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR)); if (!ps[0] || !ps[1]) return 1; if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) return 1; ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps); if (ret) return ret; memset(ps, 0, 64); if (rdev->family < CHIP_R600) { if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL)) atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps); } return ret; } void atom_destroy(struct atom_context *ctx) { if (ctx->iio) kfree(ctx->iio); kfree(ctx); } bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t * size, uint8_t * frev, uint8_t * crev, uint16_t * data_start) { int offset = index * 2 + 4; int idx = CU16(ctx->data_table + offset); u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); if (!mdt[index]) return false; if (size) *size = CU16(idx); if (frev) *frev = CU8(idx + 2); if (crev) *crev = CU8(idx + 3); *data_start = idx; return true; } bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, uint8_t * crev) { int offset = index * 2 + 4; int idx = CU16(ctx->cmd_table + offset); u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); if (!mct[index]) return false; if (frev) *frev = CU8(idx + 2); if (crev) *crev = CU8(idx + 3); return true; } int atom_allocate_fb_scratch(struct atom_context *ctx) { int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); uint16_t data_offset; int usage_bytes = 0; struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); DRM_DEBUG("atom firmware requested %08x %dkb\n", le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware), le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb)); usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024; } ctx->scratch_size_bytes = 0; if (usage_bytes == 0) usage_bytes = 20 * 1024; /* allocate some scratch memory */ ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); if (!ctx->scratch) return -ENOMEM; ctx->scratch_size_bytes = usage_bytes; return 0; }
gpl-2.0
omega-roms/I9300_Stock_Kernel_JB_4.3
drivers/usb/host/ehci-msm.c
1556
6342
/* ehci-msm.c - HSUSB Host Controller Driver Implementation * * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. * * Partly derived from ehci-fsl.c and ehci-hcd.c * Copyright (c) 2000-2004 by David Brownell * Copyright (c) 2005 MontaVista Software * * All source code in this file is licensed under the following license except * where indicated. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org */ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/pm_runtime.h> #include <linux/usb/otg.h> #include <linux/usb/msm_hsusb_hw.h> #define MSM_USB_BASE (hcd->regs) static struct otg_transceiver *otg; static int ehci_msm_reset(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); int retval; ehci->caps = USB_CAPLENGTH; ehci->regs = USB_CAPLENGTH + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); dbg_hcs_params(ehci, "reset"); dbg_hcc_params(ehci, "reset"); /* cache the data to minimize the chip reads*/ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); hcd->has_tt = 1; ehci->sbrn = HCD_USB2; retval = ehci_halt(ehci); if (retval) return retval; /* data structure init */ retval = ehci_init(hcd); if (retval) return retval; retval = ehci_reset(ehci); if (retval) return retval; /* bursts of unspecified length. */ writel(0, USB_AHBBURST); /* Use the AHB transactor */ writel(0, USB_AHBMODE); /* Disable streaming mode and select host mode */ writel(0x13, USB_USBMODE); ehci_port_power(ehci, 1); return 0; } static struct hc_driver msm_hc_driver = { .description = hcd_name, .product_desc = "Qualcomm On-Chip EHCI Host Controller", .hcd_priv_size = sizeof(struct ehci_hcd), /* * generic hardware linkage */ .irq = ehci_irq, .flags = HCD_USB2 | HCD_MEMORY, .reset = ehci_msm_reset, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, /* * scheduling support */ .get_frame_number = ehci_get_frame, /* * root hub support */ .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, /* * PM support */ .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, }; static int ehci_msm_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct resource *res; int ret; dev_dbg(&pdev->dev, "ehci_msm proble\n"); hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); return -ENOMEM; } hcd->irq = platform_get_irq(pdev, 0); if (hcd->irq < 0) { dev_err(&pdev->dev, "Unable to get IRQ resource\n"); ret = hcd->irq; goto put_hcd; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Unable to get memory resource\n"); ret = -ENODEV; goto put_hcd; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto put_hcd; } /* * OTG driver takes care of PHY initialization, clock management, * powering up VBUS, mapping of registers address space and power * management. */ otg = otg_get_transceiver(); if (!otg) { dev_err(&pdev->dev, "unable to find transceiver\n"); ret = -ENODEV; goto unmap; } ret = otg_set_host(otg, &hcd->self); if (ret < 0) { dev_err(&pdev->dev, "unable to register with transceiver\n"); goto put_transceiver; } device_init_wakeup(&pdev->dev, 1); /* * OTG device parent of HCD takes care of putting * hardware into low power mode. */ pm_runtime_no_callbacks(&pdev->dev); pm_runtime_enable(&pdev->dev); return 0; put_transceiver: otg_put_transceiver(otg); unmap: iounmap(hcd->regs); put_hcd: usb_put_hcd(hcd); return ret; } static int __devexit ehci_msm_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); device_init_wakeup(&pdev->dev, 0); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); otg_set_host(otg, NULL); otg_put_transceiver(otg); usb_put_hcd(hcd); return 0; } #ifdef CONFIG_PM static int ehci_msm_pm_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); bool wakeup = device_may_wakeup(dev); dev_dbg(dev, "ehci-msm PM suspend\n"); /* * EHCI helper function has also the same check before manipulating * port wakeup flags. We do check here the same condition before * calling the same helper function to avoid bringing hardware * from Low power mode when there is no need for adjusting port * wakeup flags. */ if (hcd->self.root_hub->do_remote_wakeup && !wakeup) { pm_runtime_resume(dev); ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd), wakeup); } return 0; } static int ehci_msm_pm_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); dev_dbg(dev, "ehci-msm PM resume\n"); ehci_prepare_ports_for_controller_resume(hcd_to_ehci(hcd)); return 0; } #else #define ehci_msm_pm_suspend NULL #define ehci_msm_pm_resume NULL #endif static const struct dev_pm_ops ehci_msm_dev_pm_ops = { .suspend = ehci_msm_pm_suspend, .resume = ehci_msm_pm_resume, }; static struct platform_driver ehci_msm_driver = { .probe = ehci_msm_probe, .remove = __devexit_p(ehci_msm_remove), .driver = { .name = "msm_hsusb_host", .pm = &ehci_msm_dev_pm_ops, }, };
gpl-2.0
andi34/android_kernel_samsung_espresso
fs/ext4/move_extent.c
1812
41851
/* * Copyright (c) 2008,2009 NEC Software Tohoku, Ltd. * Written by Takashi Sato <t-sato@yk.jp.nec.com> * Akira Fujita <a-fujita@rs.jp.nec.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/fs.h> #include <linux/quotaops.h> #include <linux/slab.h> #include "ext4_jbd2.h" #include "ext4_extents.h" #include "ext4.h" /** * get_ext_path - Find an extent path for designated logical block number. * * @inode: an inode which is searched * @lblock: logical block number to find an extent path * @path: pointer to an extent path pointer (for output) * * ext4_ext_find_extent wrapper. Return 0 on success, or a negative error value * on failure. */ static inline int get_ext_path(struct inode *inode, ext4_lblk_t lblock, struct ext4_ext_path **path) { int ret = 0; *path = ext4_ext_find_extent(inode, lblock, *path); if (IS_ERR(*path)) { ret = PTR_ERR(*path); *path = NULL; } else if ((*path)[ext_depth(inode)].p_ext == NULL) ret = -ENODATA; return ret; } /** * copy_extent_status - Copy the extent's initialization status * * @src: an extent for getting initialize status * @dest: an extent to be set the status */ static void copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest) { if (ext4_ext_is_uninitialized(src)) ext4_ext_mark_uninitialized(dest); else dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest)); } /** * mext_next_extent - Search for the next extent and set it to "extent" * * @inode: inode which is searched * @path: this will obtain data for the next extent * @extent: pointer to the next extent we have just gotten * * Search the next extent in the array of ext4_ext_path structure (@path) * and set it to ext4_extent structure (@extent). In addition, the member of * @path (->p_ext) also points the next extent. Return 0 on success, 1 if * ext4_ext_path structure refers to the last extent, or a negative error * value on failure. */ static int mext_next_extent(struct inode *inode, struct ext4_ext_path *path, struct ext4_extent **extent) { struct ext4_extent_header *eh; int ppos, leaf_ppos = path->p_depth; ppos = leaf_ppos; if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) { /* leaf block */ *extent = ++path[ppos].p_ext; path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); return 0; } while (--ppos >= 0) { if (EXT_LAST_INDEX(path[ppos].p_hdr) > path[ppos].p_idx) { int cur_ppos = ppos; /* index block */ path[ppos].p_idx++; path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); if (path[ppos+1].p_bh) brelse(path[ppos+1].p_bh); path[ppos+1].p_bh = sb_bread(inode->i_sb, path[ppos].p_block); if (!path[ppos+1].p_bh) return -EIO; path[ppos+1].p_hdr = ext_block_hdr(path[ppos+1].p_bh); /* Halfway index block */ while (++cur_ppos < leaf_ppos) { path[cur_ppos].p_idx = EXT_FIRST_INDEX(path[cur_ppos].p_hdr); path[cur_ppos].p_block = ext4_idx_pblock(path[cur_ppos].p_idx); if (path[cur_ppos+1].p_bh) brelse(path[cur_ppos+1].p_bh); path[cur_ppos+1].p_bh = sb_bread(inode->i_sb, path[cur_ppos].p_block); if (!path[cur_ppos+1].p_bh) return -EIO; path[cur_ppos+1].p_hdr = ext_block_hdr(path[cur_ppos+1].p_bh); } path[leaf_ppos].p_ext = *extent = NULL; eh = path[leaf_ppos].p_hdr; if (le16_to_cpu(eh->eh_entries) == 0) /* empty leaf is found */ return -ENODATA; /* leaf block */ path[leaf_ppos].p_ext = *extent = EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr); path[leaf_ppos].p_block = ext4_ext_pblock(path[leaf_ppos].p_ext); return 0; } } /* We found the last extent */ return 1; } /** * mext_check_null_inode - NULL check for two inodes * * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. */ static int mext_check_null_inode(struct inode *inode1, struct inode *inode2, const char *function, unsigned int line) { int ret = 0; if (inode1 == NULL) { __ext4_error(inode2->i_sb, function, line, "Both inodes should not be NULL: " "inode1 NULL inode2 %lu", inode2->i_ino); ret = -EIO; } else if (inode2 == NULL) { __ext4_error(inode1->i_sb, function, line, "Both inodes should not be NULL: " "inode1 %lu inode2 NULL", inode1->i_ino); ret = -EIO; } return ret; } /** * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem * * @orig_inode: original inode structure * @donor_inode: donor inode structure * Acquire write lock of i_data_sem of the two inodes (orig and donor) by * i_ino order. */ static void double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) { struct inode *first = orig_inode, *second = donor_inode; /* * Use the inode number to provide the stable locking order instead * of its address, because the C language doesn't guarantee you can * compare pointers that don't come from the same array. */ if (donor_inode->i_ino < orig_inode->i_ino) { first = donor_inode; second = orig_inode; } down_write(&EXT4_I(first)->i_data_sem); down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING); } /** * double_up_write_data_sem - Release two inodes' write lock of i_data_sem * * @orig_inode: original inode structure to be released its lock first * @donor_inode: donor inode structure to be released its lock second * Release write lock of i_data_sem of two inodes (orig and donor). */ static void double_up_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) { up_write(&EXT4_I(orig_inode)->i_data_sem); up_write(&EXT4_I(donor_inode)->i_data_sem); } /** * mext_insert_across_blocks - Insert extents across leaf block * * @handle: journal handle * @orig_inode: original inode * @o_start: first original extent to be changed * @o_end: last original extent to be changed * @start_ext: first new extent to be inserted * @new_ext: middle of new extent to be inserted * @end_ext: last new extent to be inserted * * Allocate a new leaf block and insert extents into it. Return 0 on success, * or a negative error value on failure. */ static int mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, struct ext4_extent *o_start, struct ext4_extent *o_end, struct ext4_extent *start_ext, struct ext4_extent *new_ext, struct ext4_extent *end_ext) { struct ext4_ext_path *orig_path = NULL; ext4_lblk_t eblock = 0; int new_flag = 0; int end_flag = 0; int err = 0; if (start_ext->ee_len && new_ext->ee_len && end_ext->ee_len) { if (o_start == o_end) { /* start_ext new_ext end_ext * donor |---------|-----------|--------| * orig |------------------------------| */ end_flag = 1; } else { /* start_ext new_ext end_ext * donor |---------|----------|---------| * orig |---------------|--------------| */ o_end->ee_block = end_ext->ee_block; o_end->ee_len = end_ext->ee_len; ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext)); } o_start->ee_len = start_ext->ee_len; eblock = le32_to_cpu(start_ext->ee_block); new_flag = 1; } else if (start_ext->ee_len && new_ext->ee_len && !end_ext->ee_len && o_start == o_end) { /* start_ext new_ext * donor |--------------|---------------| * orig |------------------------------| */ o_start->ee_len = start_ext->ee_len; eblock = le32_to_cpu(start_ext->ee_block); new_flag = 1; } else if (!start_ext->ee_len && new_ext->ee_len && end_ext->ee_len && o_start == o_end) { /* new_ext end_ext * donor |--------------|---------------| * orig |------------------------------| */ o_end->ee_block = end_ext->ee_block; o_end->ee_len = end_ext->ee_len; ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext)); /* * Set 0 to the extent block if new_ext was * the first block. */ if (new_ext->ee_block) eblock = le32_to_cpu(new_ext->ee_block); new_flag = 1; } else { ext4_debug("ext4 move extent: Unexpected insert case\n"); return -EIO; } if (new_flag) { err = get_ext_path(orig_inode, eblock, &orig_path); if (err) goto out; if (ext4_ext_insert_extent(handle, orig_inode, orig_path, new_ext, 0)) goto out; } if (end_flag) { err = get_ext_path(orig_inode, le32_to_cpu(end_ext->ee_block) - 1, &orig_path); if (err) goto out; if (ext4_ext_insert_extent(handle, orig_inode, orig_path, end_ext, 0)) goto out; } out: if (orig_path) { ext4_ext_drop_refs(orig_path); kfree(orig_path); } return err; } /** * mext_insert_inside_block - Insert new extent to the extent block * * @o_start: first original extent to be moved * @o_end: last original extent to be moved * @start_ext: first new extent to be inserted * @new_ext: middle of new extent to be inserted * @end_ext: last new extent to be inserted * @eh: extent header of target leaf block * @range_to_move: used to decide how to insert extent * * Insert extents into the leaf block. The extent (@o_start) is overwritten * by inserted extents. */ static void mext_insert_inside_block(struct ext4_extent *o_start, struct ext4_extent *o_end, struct ext4_extent *start_ext, struct ext4_extent *new_ext, struct ext4_extent *end_ext, struct ext4_extent_header *eh, int range_to_move) { int i = 0; unsigned long len; /* Move the existing extents */ if (range_to_move && o_end < EXT_LAST_EXTENT(eh)) { len = (unsigned long)(EXT_LAST_EXTENT(eh) + 1) - (unsigned long)(o_end + 1); memmove(o_end + 1 + range_to_move, o_end + 1, len); } /* Insert start entry */ if (start_ext->ee_len) o_start[i++].ee_len = start_ext->ee_len; /* Insert new entry */ if (new_ext->ee_len) { o_start[i] = *new_ext; ext4_ext_store_pblock(&o_start[i++], ext4_ext_pblock(new_ext)); } /* Insert end entry */ if (end_ext->ee_len) o_start[i] = *end_ext; /* Increment the total entries counter on the extent block */ le16_add_cpu(&eh->eh_entries, range_to_move); } /** * mext_insert_extents - Insert new extent * * @handle: journal handle * @orig_inode: original inode * @orig_path: path indicates first extent to be changed * @o_start: first original extent to be changed * @o_end: last original extent to be changed * @start_ext: first new extent to be inserted * @new_ext: middle of new extent to be inserted * @end_ext: last new extent to be inserted * * Call the function to insert extents. If we cannot add more extents into * the leaf block, we call mext_insert_across_blocks() to create a * new leaf block. Otherwise call mext_insert_inside_block(). Return 0 * on success, or a negative error value on failure. */ static int mext_insert_extents(handle_t *handle, struct inode *orig_inode, struct ext4_ext_path *orig_path, struct ext4_extent *o_start, struct ext4_extent *o_end, struct ext4_extent *start_ext, struct ext4_extent *new_ext, struct ext4_extent *end_ext) { struct ext4_extent_header *eh; unsigned long need_slots, slots_range; int range_to_move, depth, ret; /* * The extents need to be inserted * start_extent + new_extent + end_extent. */ need_slots = (start_ext->ee_len ? 1 : 0) + (end_ext->ee_len ? 1 : 0) + (new_ext->ee_len ? 1 : 0); /* The number of slots between start and end */ slots_range = ((unsigned long)(o_end + 1) - (unsigned long)o_start + 1) / sizeof(struct ext4_extent); /* Range to move the end of extent */ range_to_move = need_slots - slots_range; depth = orig_path->p_depth; orig_path += depth; eh = orig_path->p_hdr; if (depth) { /* Register to journal */ ret = ext4_journal_get_write_access(handle, orig_path->p_bh); if (ret) return ret; } /* Expansion */ if (range_to_move > 0 && (range_to_move > le16_to_cpu(eh->eh_max) - le16_to_cpu(eh->eh_entries))) { ret = mext_insert_across_blocks(handle, orig_inode, o_start, o_end, start_ext, new_ext, end_ext); if (ret < 0) return ret; } else mext_insert_inside_block(o_start, o_end, start_ext, new_ext, end_ext, eh, range_to_move); if (depth) { ret = ext4_handle_dirty_metadata(handle, orig_inode, orig_path->p_bh); if (ret) return ret; } else { ret = ext4_mark_inode_dirty(handle, orig_inode); if (ret < 0) return ret; } return 0; } /** * mext_leaf_block - Move one leaf extent block into the inode. * * @handle: journal handle * @orig_inode: original inode * @orig_path: path indicates first extent to be changed * @dext: donor extent * @from: start offset on the target file * * In order to insert extents into the leaf block, we must divide the extent * in the leaf block into three extents. The one is located to be inserted * extents, and the others are located around it. * * Therefore, this function creates structures to save extents of the leaf * block, and inserts extents by calling mext_insert_extents() with * created extents. Return 0 on success, or a negative error value on failure. */ static int mext_leaf_block(handle_t *handle, struct inode *orig_inode, struct ext4_ext_path *orig_path, struct ext4_extent *dext, ext4_lblk_t *from) { struct ext4_extent *oext, *o_start, *o_end, *prev_ext; struct ext4_extent new_ext, start_ext, end_ext; ext4_lblk_t new_ext_end; int oext_alen, new_ext_alen, end_ext_alen; int depth = ext_depth(orig_inode); int ret; start_ext.ee_block = end_ext.ee_block = 0; o_start = o_end = oext = orig_path[depth].p_ext; oext_alen = ext4_ext_get_actual_len(oext); start_ext.ee_len = end_ext.ee_len = 0; new_ext.ee_block = cpu_to_le32(*from); ext4_ext_store_pblock(&new_ext, ext4_ext_pblock(dext)); new_ext.ee_len = dext->ee_len; new_ext_alen = ext4_ext_get_actual_len(&new_ext); new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; /* * Case: original extent is first * oext |--------| * new_ext |--| * start_ext |--| */ if (le32_to_cpu(oext->ee_block) < le32_to_cpu(new_ext.ee_block) && le32_to_cpu(new_ext.ee_block) < le32_to_cpu(oext->ee_block) + oext_alen) { start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - le32_to_cpu(oext->ee_block)); start_ext.ee_block = oext->ee_block; copy_extent_status(oext, &start_ext); } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { prev_ext = oext - 1; /* * We can merge new_ext into previous extent, * if these are contiguous and same extent type. */ if (ext4_can_extents_be_merged(orig_inode, prev_ext, &new_ext)) { o_start = prev_ext; start_ext.ee_len = cpu_to_le16( ext4_ext_get_actual_len(prev_ext) + new_ext_alen); start_ext.ee_block = oext->ee_block; copy_extent_status(prev_ext, &start_ext); new_ext.ee_len = 0; } } /* * Case: new_ext_end must be less than oext * oext |-----------| * new_ext |-------| */ if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { EXT4_ERROR_INODE(orig_inode, "new_ext_end(%u) should be less than or equal to " "oext->ee_block(%u) + oext_alen(%d) - 1", new_ext_end, le32_to_cpu(oext->ee_block), oext_alen); ret = -EIO; goto out; } /* * Case: new_ext is smaller than original extent * oext |---------------| * new_ext |-----------| * end_ext |---| */ if (le32_to_cpu(oext->ee_block) <= new_ext_end && new_ext_end < le32_to_cpu(oext->ee_block) + oext_alen - 1) { end_ext.ee_len = cpu_to_le16(le32_to_cpu(oext->ee_block) + oext_alen - 1 - new_ext_end); copy_extent_status(oext, &end_ext); end_ext_alen = ext4_ext_get_actual_len(&end_ext); ext4_ext_store_pblock(&end_ext, (ext4_ext_pblock(o_end) + oext_alen - end_ext_alen)); end_ext.ee_block = cpu_to_le32(le32_to_cpu(o_end->ee_block) + oext_alen - end_ext_alen); } ret = mext_insert_extents(handle, orig_inode, orig_path, o_start, o_end, &start_ext, &new_ext, &end_ext); out: return ret; } /** * mext_calc_swap_extents - Calculate extents for extent swapping. * * @tmp_dext: the extent that will belong to the original inode * @tmp_oext: the extent that will belong to the donor inode * @orig_off: block offset of original inode * @donor_off: block offset of donor inode * @max_count: the maximum length of extents * * Return 0 on success, or a negative error value on failure. */ static int mext_calc_swap_extents(struct ext4_extent *tmp_dext, struct ext4_extent *tmp_oext, ext4_lblk_t orig_off, ext4_lblk_t donor_off, ext4_lblk_t max_count) { ext4_lblk_t diff, orig_diff; struct ext4_extent dext_old, oext_old; BUG_ON(orig_off != donor_off); /* original and donor extents have to cover the same block offset */ if (orig_off < le32_to_cpu(tmp_oext->ee_block) || le32_to_cpu(tmp_oext->ee_block) + ext4_ext_get_actual_len(tmp_oext) - 1 < orig_off) return -ENODATA; if (orig_off < le32_to_cpu(tmp_dext->ee_block) || le32_to_cpu(tmp_dext->ee_block) + ext4_ext_get_actual_len(tmp_dext) - 1 < orig_off) return -ENODATA; dext_old = *tmp_dext; oext_old = *tmp_oext; /* When tmp_dext is too large, pick up the target range. */ diff = donor_off - le32_to_cpu(tmp_dext->ee_block); ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff); tmp_dext->ee_block = cpu_to_le32(le32_to_cpu(tmp_dext->ee_block) + diff); tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_dext->ee_len) - diff); if (max_count < ext4_ext_get_actual_len(tmp_dext)) tmp_dext->ee_len = cpu_to_le16(max_count); orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block); ext4_ext_store_pblock(tmp_oext, ext4_ext_pblock(tmp_oext) + orig_diff); /* Adjust extent length if donor extent is larger than orig */ if (ext4_ext_get_actual_len(tmp_dext) > ext4_ext_get_actual_len(tmp_oext) - orig_diff) tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_oext->ee_len) - orig_diff); tmp_oext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(tmp_dext)); copy_extent_status(&oext_old, tmp_dext); copy_extent_status(&dext_old, tmp_oext); return 0; } /** * mext_replace_branches - Replace original extents with new extents * * @handle: journal handle * @orig_inode: original inode * @donor_inode: donor inode * @from: block offset of orig_inode * @count: block count to be replaced * @err: pointer to save return value * * Replace original inode extents and donor inode extents page by page. * We implement this replacement in the following three steps: * 1. Save the block information of original and donor inodes into * dummy extents. * 2. Change the block information of original inode to point at the * donor inode blocks. * 3. Change the block information of donor inode to point at the saved * original inode blocks in the dummy extents. * * Return replaced block count. */ static int mext_replace_branches(handle_t *handle, struct inode *orig_inode, struct inode *donor_inode, ext4_lblk_t from, ext4_lblk_t count, int *err) { struct ext4_ext_path *orig_path = NULL; struct ext4_ext_path *donor_path = NULL; struct ext4_extent *oext, *dext; struct ext4_extent tmp_dext, tmp_oext; ext4_lblk_t orig_off = from, donor_off = from; int depth; int replaced_count = 0; int dext_alen; /* Protect extent trees against block allocations via delalloc */ double_down_write_data_sem(orig_inode, donor_inode); /* Get the original extent for the block "orig_off" */ *err = get_ext_path(orig_inode, orig_off, &orig_path); if (*err) goto out; /* Get the donor extent for the head */ *err = get_ext_path(donor_inode, donor_off, &donor_path); if (*err) goto out; depth = ext_depth(orig_inode); oext = orig_path[depth].p_ext; tmp_oext = *oext; depth = ext_depth(donor_inode); dext = donor_path[depth].p_ext; tmp_dext = *dext; *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, donor_off, count); if (*err) goto out; /* Loop for the donor extents */ while (1) { /* The extent for donor must be found. */ if (!dext) { EXT4_ERROR_INODE(donor_inode, "The extent for donor must be found"); *err = -EIO; goto out; } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { EXT4_ERROR_INODE(donor_inode, "Donor offset(%u) and the first block of donor " "extent(%u) should be equal", donor_off, le32_to_cpu(tmp_dext.ee_block)); *err = -EIO; goto out; } /* Set donor extent to orig extent */ *err = mext_leaf_block(handle, orig_inode, orig_path, &tmp_dext, &orig_off); if (*err) goto out; /* Set orig extent to donor extent */ *err = mext_leaf_block(handle, donor_inode, donor_path, &tmp_oext, &donor_off); if (*err) goto out; dext_alen = ext4_ext_get_actual_len(&tmp_dext); replaced_count += dext_alen; donor_off += dext_alen; orig_off += dext_alen; /* Already moved the expected blocks */ if (replaced_count >= count) break; if (orig_path) ext4_ext_drop_refs(orig_path); *err = get_ext_path(orig_inode, orig_off, &orig_path); if (*err) goto out; depth = ext_depth(orig_inode); oext = orig_path[depth].p_ext; tmp_oext = *oext; if (donor_path) ext4_ext_drop_refs(donor_path); *err = get_ext_path(donor_inode, donor_off, &donor_path); if (*err) goto out; depth = ext_depth(donor_inode); dext = donor_path[depth].p_ext; tmp_dext = *dext; *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, donor_off, count - replaced_count); if (*err) goto out; } out: if (orig_path) { ext4_ext_drop_refs(orig_path); kfree(orig_path); } if (donor_path) { ext4_ext_drop_refs(donor_path); kfree(donor_path); } ext4_ext_invalidate_cache(orig_inode); ext4_ext_invalidate_cache(donor_inode); double_up_write_data_sem(orig_inode, donor_inode); return replaced_count; } /** * move_extent_per_page - Move extent data per page * * @o_filp: file structure of original file * @donor_inode: donor inode * @orig_page_offset: page index on original file * @data_offset_in_page: block index where data swapping starts * @block_len_in_page: the number of blocks to be swapped * @uninit: orig extent is uninitialized or not * @err: pointer to save return value * * Save the data in original inode blocks and replace original inode extents * with donor inode extents by calling mext_replace_branches(). * Finally, write out the saved data in new original inode blocks. Return * replaced block count. */ static int move_extent_per_page(struct file *o_filp, struct inode *donor_inode, pgoff_t orig_page_offset, int data_offset_in_page, int block_len_in_page, int uninit, int *err) { struct inode *orig_inode = o_filp->f_dentry->d_inode; struct address_space *mapping = orig_inode->i_mapping; struct buffer_head *bh; struct page *page = NULL; const struct address_space_operations *a_ops = mapping->a_ops; handle_t *handle; ext4_lblk_t orig_blk_offset; long long offs = orig_page_offset << PAGE_CACHE_SHIFT; unsigned long blocksize = orig_inode->i_sb->s_blocksize; unsigned int w_flags = 0; unsigned int tmp_data_size, data_size, replaced_size; void *fsdata; int i, jblocks; int err2 = 0; int replaced_count = 0; int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; /* * It needs twice the amount of ordinary journal buffers because * inode and donor_inode may change each different metadata blocks. */ jblocks = ext4_writepage_trans_blocks(orig_inode) * 2; handle = ext4_journal_start(orig_inode, jblocks); if (IS_ERR(handle)) { *err = PTR_ERR(handle); return 0; } if (segment_eq(get_fs(), KERNEL_DS)) w_flags |= AOP_FLAG_UNINTERRUPTIBLE; orig_blk_offset = orig_page_offset * blocks_per_page + data_offset_in_page; /* * If orig extent is uninitialized one, * it's not necessary force the page into memory * and then force it to be written out again. * Just swap data blocks between orig and donor. */ if (uninit) { replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, orig_blk_offset, block_len_in_page, err); goto out2; } offs = (long long)orig_blk_offset << orig_inode->i_blkbits; /* Calculate data_size */ if ((orig_blk_offset + block_len_in_page - 1) == ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { /* Replace the last block */ tmp_data_size = orig_inode->i_size & (blocksize - 1); /* * If data_size equal zero, it shows data_size is multiples of * blocksize. So we set appropriate value. */ if (tmp_data_size == 0) tmp_data_size = blocksize; data_size = tmp_data_size + ((block_len_in_page - 1) << orig_inode->i_blkbits); } else data_size = block_len_in_page << orig_inode->i_blkbits; replaced_size = data_size; *err = a_ops->write_begin(o_filp, mapping, offs, data_size, w_flags, &page, &fsdata); if (unlikely(*err < 0)) goto out; if (!PageUptodate(page)) { mapping->a_ops->readpage(o_filp, page); lock_page(page); } /* * try_to_release_page() doesn't call releasepage in writeback mode. * We should care about the order of writing to the same file * by multiple move extent processes. * It needs to call wait_on_page_writeback() to wait for the * writeback of the page. */ wait_on_page_writeback(page); /* Release old bh and drop refs */ try_to_release_page(page, 0); replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, orig_blk_offset, block_len_in_page, &err2); if (err2) { if (replaced_count) { block_len_in_page = replaced_count; replaced_size = block_len_in_page << orig_inode->i_blkbits; } else goto out; } if (!page_has_buffers(page)) create_empty_buffers(page, 1 << orig_inode->i_blkbits, 0); bh = page_buffers(page); for (i = 0; i < data_offset_in_page; i++) bh = bh->b_this_page; for (i = 0; i < block_len_in_page; i++) { *err = ext4_get_block(orig_inode, (sector_t)(orig_blk_offset + i), bh, 0); if (*err < 0) goto out; if (bh->b_this_page != NULL) bh = bh->b_this_page; } *err = a_ops->write_end(o_filp, mapping, offs, data_size, replaced_size, page, fsdata); page = NULL; out: if (unlikely(page)) { if (PageLocked(page)) unlock_page(page); page_cache_release(page); ext4_journal_stop(handle); } out2: ext4_journal_stop(handle); if (err2) *err = err2; return replaced_count; } /** * mext_check_arguments - Check whether move extent can be done * * @orig_inode: original inode * @donor_inode: donor inode * @orig_start: logical start offset in block for orig * @donor_start: logical start offset in block for donor * @len: the number of blocks to be moved * * Check the arguments of ext4_move_extents() whether the files can be * exchanged with each other. * Return 0 on success, or a negative error value on failure. */ static int mext_check_arguments(struct inode *orig_inode, struct inode *donor_inode, __u64 orig_start, __u64 donor_start, __u64 *len) { ext4_lblk_t orig_blocks, donor_blocks; unsigned int blkbits = orig_inode->i_blkbits; unsigned int blocksize = 1 << blkbits; if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { ext4_debug("ext4 move extent: suid or sgid is set" " to donor file [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode)) return -EPERM; /* Ext4 move extent does not support swapfile */ if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) { ext4_debug("ext4 move extent: The argument files should " "not be swapfile [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* Files should be in the same ext4 FS */ if (orig_inode->i_sb != donor_inode->i_sb) { ext4_debug("ext4 move extent: The argument files " "should be in same FS [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* Ext4 move extent supports only extent based file */ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) { ext4_debug("ext4 move extent: orig file is not extents " "based file [ino:orig %lu]\n", orig_inode->i_ino); return -EOPNOTSUPP; } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) { ext4_debug("ext4 move extent: donor file is not extents " "based file [ino:donor %lu]\n", donor_inode->i_ino); return -EOPNOTSUPP; } if ((!orig_inode->i_size) || (!donor_inode->i_size)) { ext4_debug("ext4 move extent: File size is 0 byte\n"); return -EINVAL; } /* Start offset should be same */ if (orig_start != donor_start) { ext4_debug("ext4 move extent: orig and donor's start " "offset are not same [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if ((orig_start >= EXT_MAX_BLOCKS) || (donor_start >= EXT_MAX_BLOCKS) || (*len > EXT_MAX_BLOCKS) || (orig_start + *len >= EXT_MAX_BLOCKS)) { ext4_debug("ext4 move extent: Can't handle over [%u] blocks " "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if (orig_inode->i_size > donor_inode->i_size) { donor_blocks = (donor_inode->i_size + blocksize - 1) >> blkbits; /* TODO: eliminate this artificial restriction */ if (orig_start >= donor_blocks) { ext4_debug("ext4 move extent: orig start offset " "[%llu] should be less than donor file blocks " "[%u] [ino:orig %lu, donor %lu]\n", orig_start, donor_blocks, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* TODO: eliminate this artificial restriction */ if (orig_start + *len > donor_blocks) { ext4_debug("ext4 move extent: End offset [%llu] should " "be less than donor file blocks [%u]." "So adjust length from %llu to %llu " "[ino:orig %lu, donor %lu]\n", orig_start + *len, donor_blocks, *len, donor_blocks - orig_start, orig_inode->i_ino, donor_inode->i_ino); *len = donor_blocks - orig_start; } } else { orig_blocks = (orig_inode->i_size + blocksize - 1) >> blkbits; if (orig_start >= orig_blocks) { ext4_debug("ext4 move extent: start offset [%llu] " "should be less than original file blocks " "[%u] [ino:orig %lu, donor %lu]\n", orig_start, orig_blocks, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if (orig_start + *len > orig_blocks) { ext4_debug("ext4 move extent: Adjust length " "from %llu to %llu. Because it should be " "less than original file blocks " "[ino:orig %lu, donor %lu]\n", *len, orig_blocks - orig_start, orig_inode->i_ino, donor_inode->i_ino); *len = orig_blocks - orig_start; } } if (!*len) { ext4_debug("ext4 move extent: len should not be 0 " "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } return 0; } /** * mext_inode_double_lock - Lock i_mutex on both @inode1 and @inode2 * * @inode1: the inode structure * @inode2: the inode structure * * Lock two inodes' i_mutex by i_ino order. * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. */ static int mext_inode_double_lock(struct inode *inode1, struct inode *inode2) { int ret = 0; BUG_ON(inode1 == NULL && inode2 == NULL); ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__); if (ret < 0) goto out; if (inode1 == inode2) { mutex_lock(&inode1->i_mutex); goto out; } if (inode1->i_ino < inode2->i_ino) { mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); } else { mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD); } out: return ret; } /** * mext_inode_double_unlock - Release i_mutex on both @inode1 and @inode2 * * @inode1: the inode that is released first * @inode2: the inode that is released second * * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. */ static int mext_inode_double_unlock(struct inode *inode1, struct inode *inode2) { int ret = 0; BUG_ON(inode1 == NULL && inode2 == NULL); ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__); if (ret < 0) goto out; if (inode1) mutex_unlock(&inode1->i_mutex); if (inode2 && inode2 != inode1) mutex_unlock(&inode2->i_mutex); out: return ret; } /** * ext4_move_extents - Exchange the specified range of a file * * @o_filp: file structure of the original file * @d_filp: file structure of the donor file * @orig_start: start offset in block for orig * @donor_start: start offset in block for donor * @len: the number of blocks to be moved * @moved_len: moved block length * * This function returns 0 and moved block length is set in moved_len * if succeed, otherwise returns error value. * * Note: ext4_move_extents() proceeds the following order. * 1:ext4_move_extents() calculates the last block number of moving extent * function by the start block number (orig_start) and the number of blocks * to be moved (len) specified as arguments. * If the {orig, donor}_start points a hole, the extent's start offset * pointed by ext_cur (current extent), holecheck_path, orig_path are set * after hole behind. * 2:Continue step 3 to step 5, until the holecheck_path points to last_extent * or the ext_cur exceeds the block_end which is last logical block number. * 3:To get the length of continues area, call mext_next_extent() * specified with the ext_cur (initial value is holecheck_path) re-cursive, * until find un-continuous extent, the start logical block number exceeds * the block_end or the extent points to the last extent. * 4:Exchange the original inode data with donor inode data * from orig_page_offset to seq_end_page. * The start indexes of data are specified as arguments. * That of the original inode is orig_page_offset, * and the donor inode is also orig_page_offset * (To easily handle blocksize != pagesize case, the offset for the * donor inode is block unit). * 5:Update holecheck_path and orig_path to points a next proceeding extent, * then returns to step 2. * 6:Release holecheck_path, orig_path and set the len to moved_len * which shows the number of moved blocks. * The moved_len is useful for the command to calculate the file offset * for starting next move extent ioctl. * 7:Return 0 on success, or a negative error value on failure. */ int ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_start, __u64 donor_start, __u64 len, __u64 *moved_len) { struct inode *orig_inode = o_filp->f_dentry->d_inode; struct inode *donor_inode = d_filp->f_dentry->d_inode; struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL; struct ext4_extent *ext_prev, *ext_cur, *ext_dummy; ext4_lblk_t block_start = orig_start; ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0; ext4_lblk_t rest_blocks; pgoff_t orig_page_offset = 0, seq_end_page; int ret1, ret2, depth, last_extent = 0; int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; int data_offset_in_page; int block_len_in_page; int uninit; /* orig and donor should be different file */ if (orig_inode->i_ino == donor_inode->i_ino) { ext4_debug("ext4 move extent: The argument files should not " "be same file [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* Regular file check */ if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) { ext4_debug("ext4 move extent: The argument files should be " "regular file [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* Protect orig and donor inodes against a truncate */ ret1 = mext_inode_double_lock(orig_inode, donor_inode); if (ret1 < 0) return ret1; /* Protect extent tree against block allocations via delalloc */ double_down_write_data_sem(orig_inode, donor_inode); /* Check the filesystem environment whether move_extent can be done */ ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start, donor_start, &len); if (ret1) goto out; file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits; block_end = block_start + len - 1; if (file_end < block_end) len -= block_end - file_end; ret1 = get_ext_path(orig_inode, block_start, &orig_path); if (ret1) goto out; /* Get path structure to check the hole */ ret1 = get_ext_path(orig_inode, block_start, &holecheck_path); if (ret1) goto out; depth = ext_depth(orig_inode); ext_cur = holecheck_path[depth].p_ext; /* * Get proper starting location of block replacement if block_start was * within the hole. */ if (le32_to_cpu(ext_cur->ee_block) + ext4_ext_get_actual_len(ext_cur) - 1 < block_start) { /* * The hole exists between extents or the tail of * original file. */ last_extent = mext_next_extent(orig_inode, holecheck_path, &ext_cur); if (last_extent < 0) { ret1 = last_extent; goto out; } last_extent = mext_next_extent(orig_inode, orig_path, &ext_dummy); if (last_extent < 0) { ret1 = last_extent; goto out; } seq_start = le32_to_cpu(ext_cur->ee_block); } else if (le32_to_cpu(ext_cur->ee_block) > block_start) /* The hole exists at the beginning of original file. */ seq_start = le32_to_cpu(ext_cur->ee_block); else seq_start = block_start; /* No blocks within the specified range. */ if (le32_to_cpu(ext_cur->ee_block) > block_end) { ext4_debug("ext4 move extent: The specified range of file " "may be the hole\n"); ret1 = -EINVAL; goto out; } /* Adjust start blocks */ add_blocks = min(le32_to_cpu(ext_cur->ee_block) + ext4_ext_get_actual_len(ext_cur), block_end + 1) - max(le32_to_cpu(ext_cur->ee_block), block_start); while (!last_extent && le32_to_cpu(ext_cur->ee_block) <= block_end) { seq_blocks += add_blocks; /* Adjust tail blocks */ if (seq_start + seq_blocks - 1 > block_end) seq_blocks = block_end - seq_start + 1; ext_prev = ext_cur; last_extent = mext_next_extent(orig_inode, holecheck_path, &ext_cur); if (last_extent < 0) { ret1 = last_extent; break; } add_blocks = ext4_ext_get_actual_len(ext_cur); /* * Extend the length of contiguous block (seq_blocks) * if extents are contiguous. */ if (ext4_can_extents_be_merged(orig_inode, ext_prev, ext_cur) && block_end >= le32_to_cpu(ext_cur->ee_block) && !last_extent) continue; /* Is original extent is uninitialized */ uninit = ext4_ext_is_uninitialized(ext_prev); data_offset_in_page = seq_start % blocks_per_page; /* * Calculate data blocks count that should be swapped * at the first page. */ if (data_offset_in_page + seq_blocks > blocks_per_page) { /* Swapped blocks are across pages */ block_len_in_page = blocks_per_page - data_offset_in_page; } else { /* Swapped blocks are in a page */ block_len_in_page = seq_blocks; } orig_page_offset = seq_start >> (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); seq_end_page = (seq_start + seq_blocks - 1) >> (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); seq_start = le32_to_cpu(ext_cur->ee_block); rest_blocks = seq_blocks; /* * Up semaphore to avoid following problems: * a. transaction deadlock among ext4_journal_start, * ->write_begin via pagefault, and jbd2_journal_commit * b. racing with ->readpage, ->write_begin, and ext4_get_block * in move_extent_per_page */ double_up_write_data_sem(orig_inode, donor_inode); while (orig_page_offset <= seq_end_page) { /* Swap original branches with new branches */ block_len_in_page = move_extent_per_page( o_filp, donor_inode, orig_page_offset, data_offset_in_page, block_len_in_page, uninit, &ret1); /* Count how many blocks we have exchanged */ *moved_len += block_len_in_page; if (ret1 < 0) break; if (*moved_len > len) { EXT4_ERROR_INODE(orig_inode, "We replaced blocks too much! " "sum of replaced: %llu requested: %llu", *moved_len, len); ret1 = -EIO; break; } orig_page_offset++; data_offset_in_page = 0; rest_blocks -= block_len_in_page; if (rest_blocks > blocks_per_page) block_len_in_page = blocks_per_page; else block_len_in_page = rest_blocks; } double_down_write_data_sem(orig_inode, donor_inode); if (ret1 < 0) break; /* Decrease buffer counter */ if (holecheck_path) ext4_ext_drop_refs(holecheck_path); ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path); if (ret1) break; depth = holecheck_path->p_depth; /* Decrease buffer counter */ if (orig_path) ext4_ext_drop_refs(orig_path); ret1 = get_ext_path(orig_inode, seq_start, &orig_path); if (ret1) break; ext_cur = holecheck_path[depth].p_ext; add_blocks = ext4_ext_get_actual_len(ext_cur); seq_blocks = 0; } out: if (*moved_len) { ext4_discard_preallocations(orig_inode); ext4_discard_preallocations(donor_inode); } if (orig_path) { ext4_ext_drop_refs(orig_path); kfree(orig_path); } if (holecheck_path) { ext4_ext_drop_refs(holecheck_path); kfree(holecheck_path); } double_up_write_data_sem(orig_inode, donor_inode); ret2 = mext_inode_double_unlock(orig_inode, donor_inode); if (ret1) return ret1; else if (ret2) return ret2; return 0; }
gpl-2.0
Fusion-Devices/android_kernel_lge_g3
arch/arm/mach-msm/clock-8092.c
1812
15808
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/ctype.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/iopoll.h> #include <linux/regulator/consumer.h> #include <mach/rpm-regulator-smd.h> #include <mach/socinfo.h> #include <mach/rpm-smd.h> #include "clock-local2.h" #include "clock-pll.h" #include "clock-rpm.h" #include "clock-voter.h" #include "clock.h" /* * Drivers need to fill in the clock names and device names for the clocks * they need to control. */ static struct clk_lookup msm_clocks_8092[] = { CLK_DUMMY("core_clk", BLSP1_UART_CLK, "msm_serial_hsl.0", OFF), CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "msm_serial_hsl.0", OFF), CLK_DUMMY("core_clk", BLSP1_UART_CLK, "msm_serial_hsl.1", OFF), CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "msm_serial_hsl.1", OFF), CLK_DUMMY("core_clk", SDC1_CLK, "msm_sdcc.1", OFF), CLK_DUMMY("iface_clk", SDC1_P_CLK, "msm_sdcc.1", OFF), CLK_DUMMY("core_clk", SDC2_CLK, "msm_sdcc.2", OFF), CLK_DUMMY("iface_clk", SDC2_P_CLK, "msm_sdcc.2", OFF), CLK_DUMMY("dfab_clk", DFAB_CLK, "msm_sps", OFF), CLK_DUMMY("dma_bam_pclk", DMA_BAM_P_CLK, "msm_sps", OFF), CLK_DUMMY("", usb30_master_clk_src.c, "", OFF), CLK_DUMMY("", tsif_ref_clk_src.c, "", OFF), CLK_DUMMY("", ce1_clk_src.c, "", OFF), CLK_DUMMY("", ce2_clk_src.c, "", OFF), CLK_DUMMY("", ce3_clk_src.c, "", OFF), CLK_DUMMY("", geni_ser_clk_src.c, "", OFF), CLK_DUMMY("", gmac_125m_clk_src.c, "", OFF), CLK_DUMMY("", gmac_core_clk_src.c, "", OFF), CLK_DUMMY("", gmac_sys_25m_clk_src.c, "", OFF), CLK_DUMMY("", gp1_clk_src.c, "", OFF), CLK_DUMMY("", gp2_clk_src.c, "", OFF), CLK_DUMMY("", gp3_clk_src.c, "", OFF), CLK_DUMMY("", pcie_aux_clk_src.c, "", OFF), CLK_DUMMY("", pcie_pipe_clk_src.c, "", OFF), CLK_DUMMY("", pdm2_clk_src.c, "", OFF), CLK_DUMMY("", pwm_clk_src.c, "", OFF), CLK_DUMMY("", sata_asic0_clk_src.c, "", OFF), CLK_DUMMY("", sata_pmalive_clk_src.c, "", OFF), CLK_DUMMY("", sata_rx_clk_src.c, "", OFF), CLK_DUMMY("", sata_rx_oob_clk_src.c, "", OFF), CLK_DUMMY("", sdcc1_apps_clk_src.c, "", OFF), CLK_DUMMY("", sdcc2_apps_clk_src.c, "", OFF), CLK_DUMMY("", usb30_mock_utmi_clk_src.c, "", OFF), CLK_DUMMY("", usb_hs_system_clk_src.c, "", OFF), CLK_DUMMY("", usb_hs2_system_clk_src.c, "", OFF), CLK_DUMMY("", usb_hsic_clk_src.c, "", OFF), CLK_DUMMY("", usb_hsic_io_cal_clk_src.c, "", OFF), CLK_DUMMY("", usb_hsic_system_clk_src.c, "", OFF), CLK_DUMMY("", gcc_bam_dma_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_bcss_cfg_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_bimc_gfx_clk.c, "", OFF), CLK_DUMMY("", gcc_bimc_kpss_axi_mstr_clk.c, "", OFF), CLK_DUMMY("", gcc_bimc_sysnoc_axi_mstr_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup1_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup1_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup2_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup2_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup3_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup3_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup4_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup4_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup5_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup5_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup6_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_qup6_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_uart1_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_uart2_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_uart3_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_uart4_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_uart5_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp1_uart6_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup1_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup1_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup2_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup2_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup3_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup3_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup4_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup4_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup5_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup5_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup6_i2c_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_qup6_spi_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_uart1_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_uart2_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_uart3_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_uart4_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_uart5_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_blsp2_uart6_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_boot_rom_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_ce1_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_ce1_axi_clk.c, "", OFF), CLK_DUMMY("", gcc_ce1_clk.c, "", OFF), CLK_DUMMY("", gcc_ce2_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_ce2_axi_clk.c, "", OFF), CLK_DUMMY("", gcc_ce2_clk.c, "", OFF), CLK_DUMMY("", gcc_ce3_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_ce3_axi_clk.c, "", OFF), CLK_DUMMY("", gcc_ce3_clk.c, "", OFF), CLK_DUMMY("", gcc_xo_clk.c, "", OFF), CLK_DUMMY("", gcc_xo_div4_clk.c, "", OFF), CLK_DUMMY("", gcc_geni_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_geni_ser_clk.c, "", OFF), CLK_DUMMY("", gcc_gmac_125m_clk.c, "", OFF), CLK_DUMMY("", gcc_gmac_axi_clk.c, "", OFF), CLK_DUMMY("", gcc_gmac_cfg_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_gmac_core_clk.c, "", OFF), CLK_DUMMY("", gcc_gmac_rx_clk.c, "", OFF), CLK_DUMMY("", gcc_gmac_sys_25m_clk.c, "", OFF), CLK_DUMMY("", gcc_gmac_sys_clk.c, "", OFF), CLK_DUMMY("", gcc_gp1_clk.c, "", OFF), CLK_DUMMY("", gcc_gp2_clk.c, "", OFF), CLK_DUMMY("", gcc_gp3_clk.c, "", OFF), CLK_DUMMY("", gcc_klm_core_clk.c, "", OFF), CLK_DUMMY("", gcc_klm_s_clk.c, "", OFF), CLK_DUMMY("", gcc_lpass_q6_axi_clk.c, "", OFF), CLK_DUMMY("", gcc_sys_noc_lpass_mport_clk.c, "", OFF), CLK_DUMMY("", gcc_sys_noc_lpass_sway_clk.c, "", OFF), CLK_DUMMY("", gcc_mmss_a5ss_axi_clk.c, "", OFF), CLK_DUMMY("", gcc_mmss_bimc_gfx_clk.c, "", OFF), CLK_DUMMY("", gcc_pcie_axi_clk.c, "", OFF), CLK_DUMMY("", gcc_pcie_axi_mstr_clk.c, "", OFF), CLK_DUMMY("", gcc_pcie_cfg_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_pcie_pipe_clk.c, "", OFF), CLK_DUMMY("", gcc_pcie_sleep_clk.c, "", OFF), CLK_DUMMY("", gcc_pdm2_clk.c, "", OFF), CLK_DUMMY("", gcc_pdm_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_prng_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_pwm_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_pwm_clk.c, "", OFF), CLK_DUMMY("", gcc_sata_asic0_clk.c, "", OFF), CLK_DUMMY("", gcc_sata_axi_clk.c, "", OFF), CLK_DUMMY("", gcc_sata_cfg_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_sata_pmalive_clk.c, "", OFF), CLK_DUMMY("", gcc_sata_rx_clk.c, "", OFF), CLK_DUMMY("", gcc_sata_rx_oob_clk.c, "", OFF), CLK_DUMMY("", gcc_sdcc1_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_sdcc1_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_sdcc2_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_sdcc2_apps_clk.c, "", OFF), CLK_DUMMY("", gcc_spss_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_sys_noc_usb3_axi_clk.c, "", OFF), CLK_DUMMY("", gcc_usb2a_phy_sleep_clk.c, "", OFF), CLK_DUMMY("", gcc_usb2b_phy_sleep_clk.c, "", OFF), CLK_DUMMY("", gcc_usb2c_phy_sleep_clk.c, "", OFF), CLK_DUMMY("", gcc_usb30_master_clk.c, "", OFF), CLK_DUMMY("", gcc_usb30_mock_utmi_clk.c, "", OFF), CLK_DUMMY("", gcc_usb30_sleep_clk.c, "", OFF), CLK_DUMMY("", gcc_usb_hs_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_usb_hs_system_clk.c, "", OFF), CLK_DUMMY("", gcc_usb_hs2_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_usb_hs2_system_clk.c, "", OFF), CLK_DUMMY("", gcc_usb_hsic_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_usb_hsic_clk.c, "", OFF), CLK_DUMMY("", gcc_usb_hsic_io_cal_clk.c, "", OFF), CLK_DUMMY("", gcc_usb_hsic_system_clk.c, "", OFF), /* MMSS Clock Dummy */ CLK_DUMMY("", axi_clk_src.c, "", OFF), CLK_DUMMY("", mmpll0_pll_clk_src.c, "", OFF), CLK_DUMMY("", mmpll1_pll_clk_src.c, "", OFF), CLK_DUMMY("", mmpll2_pll_clk_src.c, "", OFF), CLK_DUMMY("", mmpll3_pll_clk_src.c, "", OFF), CLK_DUMMY("", mmpll6_pll_clk_src.c, "", OFF), CLK_DUMMY("", vcodec0_clk_src.c, "", OFF), CLK_DUMMY("", extpclk_clk_src.c, "", OFF), CLK_DUMMY("", lvds_clk_src.c, "", OFF), CLK_DUMMY("", mdp_clk_src.c, "", OFF), CLK_DUMMY("", vbyone_clk_src.c, "", OFF), CLK_DUMMY("", gfx3d_clk_src.c, "", OFF), CLK_DUMMY("", vp_clk_src.c, "", OFF), CLK_DUMMY("", jpeg2_clk_src.c, "", OFF), CLK_DUMMY("", hdmi_clk_src.c, "", OFF), CLK_DUMMY("", vbyone_symbol_clk_src.c, "", OFF), CLK_DUMMY("", mmss_spdm_axi_div_clk.c, "", OFF), CLK_DUMMY("", mmss_spdm_gfx3d_div_clk.c, "", OFF), CLK_DUMMY("", mmss_spdm_jpeg2_div_clk.c, "", OFF), CLK_DUMMY("", mmss_spdm_mdp_div_clk.c, "", OFF), CLK_DUMMY("", mmss_spdm_vcodec0_div_clk.c, "", OFF), CLK_DUMMY("", afe_pixel_clk_src.c, "", OFF), CLK_DUMMY("", cfg_clk_src.c, "", OFF), CLK_DUMMY("", hdmi_bus_clk_src.c, "", OFF), CLK_DUMMY("", hdmi_rx_clk_src.c, "", OFF), CLK_DUMMY("", md_clk_src.c, "", OFF), CLK_DUMMY("", ttl_clk_src.c, "", OFF), CLK_DUMMY("", vafe_ext_clk_src.c, "", OFF), CLK_DUMMY("", vcap_vp_clk_src.c, "", OFF), CLK_DUMMY("", gproc_clk_src.c, "", OFF), CLK_DUMMY("", hdmc_frcf_clk_src.c, "", OFF), CLK_DUMMY("", kproc_clk_src.c, "", OFF), CLK_DUMMY("", maple_clk_src.c, "", OFF), CLK_DUMMY("", preproc_clk_src.c, "", OFF), CLK_DUMMY("", sdmc_frcs_clk_src.c, "", OFF), CLK_DUMMY("", sdme_frcf_clk_src.c, "", OFF), CLK_DUMMY("", sdme_vproc_clk_src.c, "", OFF), CLK_DUMMY("", vdp_clk_src.c, "", OFF), CLK_DUMMY("", vpu_bus_clk_src.c, "", OFF), CLK_DUMMY("", vpu_frc_xin_clk_src.c, "", OFF), CLK_DUMMY("", vpu_vdp_xin_clk_src.c, "", OFF), CLK_DUMMY("", avsync_ahb_clk.c, "", OFF), CLK_DUMMY("", avsync_extpclk_clk.c, "", OFF), CLK_DUMMY("", avsync_lvds_clk.c, "", OFF), CLK_DUMMY("", avsync_vbyone_clk.c, "", OFF), CLK_DUMMY("", avsync_vp_clk.c, "", OFF), CLK_DUMMY("", camss_jpeg_jpeg2_clk.c, "", OFF), CLK_DUMMY("", camss_jpeg_jpeg_ahb_clk.c, "", OFF), CLK_DUMMY("", camss_jpeg_jpeg_axi_clk.c, "", OFF), CLK_DUMMY("", camss_micro_ahb_clk.c, "", OFF), CLK_DUMMY("", camss_top_ahb_clk.c, "", OFF), CLK_DUMMY("", mdss_ahb_clk.c, "", OFF), CLK_DUMMY("", mdss_axi_clk.c, "", OFF), CLK_DUMMY("", mdss_extpclk_clk.c, "", OFF), CLK_DUMMY("", mdss_hdmi_ahb_clk.c, "", OFF), CLK_DUMMY("", mdss_hdmi_clk.c, "", OFF), CLK_DUMMY("", mdss_lvds_clk.c, "", OFF), CLK_DUMMY("", mdss_mdp_clk.c, "", OFF), CLK_DUMMY("", mdss_mdp_lut_clk.c, "", OFF), CLK_DUMMY("", mdss_vbyone_clk.c, "", OFF), CLK_DUMMY("", mdss_vbyone_symbol_clk.c, "", OFF), CLK_DUMMY("", mmss_misc_ahb_clk.c, "", OFF), CLK_DUMMY("", mmss_mmssnoc_ahb_clk.c, "", OFF), CLK_DUMMY("", mmss_mmssnoc_axi_clk.c, "", OFF), CLK_DUMMY("", mmss_s0_axi_clk.c, "", OFF), CLK_DUMMY("core_clk", ocmemgx_core_clk.c, "fdd00000.qcom,ocmem", OFF), CLK_DUMMY("iface_clk", ocmemcx_ocmemnoc_clk.c, "fdd00000.qcom.ocmem", OFF), CLK_DUMMY("", oxili_ocmemgx_clk.c, "", OFF), CLK_DUMMY("", oxili_gfx3d_clk.c, "", OFF), CLK_DUMMY("", oxilicx_ahb_clk.c, "", OFF), CLK_DUMMY("", bcss_mmss_ifdemod_clk.c, "", OFF), CLK_DUMMY("", vcap_afe_pixel_clk.c, "", OFF), CLK_DUMMY("", vcap_ahb_clk.c, "", OFF), CLK_DUMMY("", vcap_audio_clk.c, "", OFF), CLK_DUMMY("", vcap_axi_clk.c, "", OFF), CLK_DUMMY("", vcap_cfg_clk.c, "", OFF), CLK_DUMMY("", vcap_hdmi_bus_clk.c, "", OFF), CLK_DUMMY("", vcap_hdmi_rx_clk.c, "", OFF), CLK_DUMMY("", vcap_md_clk.c, "", OFF), CLK_DUMMY("", vcap_ttl_clk.c, "", OFF), CLK_DUMMY("", vcap_ttl_debug_clk.c, "", OFF), CLK_DUMMY("", vcap_vafe_ext_clk.c, "", OFF), CLK_DUMMY("", vcap_vp_clk.c, "", OFF), CLK_DUMMY("", venus0_ahb_clk.c, "", OFF), CLK_DUMMY("", venus0_axi_clk.c, "", OFF), CLK_DUMMY("", venus0_core0_vcodec_clk.c, "", OFF), CLK_DUMMY("", venus0_core1_vcodec_clk.c, "", OFF), CLK_DUMMY("", venus0_ocmemnoc_clk.c, "", OFF), CLK_DUMMY("", venus0_vcodec0_clk.c, "", OFF), CLK_DUMMY("", vpu_ahb_clk.c, "", OFF), CLK_DUMMY("", vpu_axi_clk.c, "", OFF), CLK_DUMMY("", vpu_bus_clk.c, "", OFF), CLK_DUMMY("", vpu_cxo_clk.c, "", OFF), CLK_DUMMY("", vpu_frc_xin_clk.c, "", OFF), CLK_DUMMY("", vpu_gproc_clk.c, "", OFF), CLK_DUMMY("", vpu_hdmc_frcf_clk.c, "", OFF), CLK_DUMMY("", vpu_kproc_clk.c, "", OFF), CLK_DUMMY("", vpu_maple_clk.c, "", OFF), CLK_DUMMY("", vpu_preproc_clk.c, "", OFF), CLK_DUMMY("", vpu_sdmc_frcs_clk.c, "", OFF), CLK_DUMMY("", vpu_sdme_frcf_clk.c, "", OFF), CLK_DUMMY("", vpu_sdme_frcs_clk.c, "", OFF), CLK_DUMMY("", vpu_sdme_vproc_clk.c, "", OFF), CLK_DUMMY("", vpu_sleep_clk.c, "", OFF), CLK_DUMMY("", vpu_vdp_clk.c, "", OFF), CLK_DUMMY("", vpu_vdp_xin_clk.c, "", OFF), CLK_DUMMY("iface_clk", NULL, "fda64000.qcom,iommu", OFF), CLK_DUMMY("core_clk", NULL, "fda64000.qcom,iommu", OFF), CLK_DUMMY("alt_core_clk", NULL, "fda64000.qcom,iommu", OFF), CLK_DUMMY("iface_clk", NULL, "fd928000.qcom,iommu", OFF), CLK_DUMMY("core_clk", NULL, "fd928000.qcom,iommu", oFF), CLK_DUMMY("core_clk", NULL, "fdb10000.qcom,iommu", OFF), CLK_DUMMY("iface_clk", NULL, "fdb10000.qcom,iommu", OFF), CLK_DUMMY("iface_clk", NULL, "fdc84000.qcom,iommu", OFF), CLK_DUMMY("alt_core_clk", NULL, "fdc84000.qcom,iommu", OFF), CLK_DUMMY("core_clk", NULL, "fdc84000.qcom,iommu", OFF), CLK_DUMMY("iface_clk", NULL, "fdee4000.qcom,iommu", OFF), CLK_DUMMY("core_clk", NULL, "fdee4000.qcom,iommu", OFF), CLK_DUMMY("iface_clk", NULL, "fdfb6000.qcom,iommu", OFF), CLK_DUMMY("core_clk", NULL, "fdfb6000.qcom,iommu", OFF), CLK_DUMMY("alt_core_clk", NULL, "fdfb6000.qcom,iommu", OFF), /* BCSS broadcast */ CLK_DUMMY("", bcc_dem_core_b_clk_src.c, "", OFF), CLK_DUMMY("", adc_01_clk_src.c, "", OFF), CLK_DUMMY("", bcc_adc_0_in_clk.c, "", OFF), CLK_DUMMY("", bcc_dem_ahb_clk.c, "", OFF), CLK_DUMMY("", bcc_klm_ahb_clk.c, "", OFF), CLK_DUMMY("", bcc_lnb_ahb_clk.c, "", OFF), CLK_DUMMY("", bcc_tsc_ahb_clk.c, "", OFF), CLK_DUMMY("", bcc_tspp2_ahb_clk.c, "", OFF), CLK_DUMMY("", bcc_vbif_ahb_clk.c, "", OFF), CLK_DUMMY("", gcc_bcss_ahb_clk.c, "", OFF), CLK_DUMMY("", bcc_dem_atv_rxfe_clk.c, "", OFF), CLK_DUMMY("", bcc_dem_atv_rxfe_resamp_clk.c, "", OFF), CLK_DUMMY("", bcc_dem_core_clk_src.c, "", OFF), CLK_DUMMY("", bcc_dem_core_div2_clk_src.c, "", OFF), CLK_DUMMY("", bcc_dem_core_x2_b_clk_src.c, "", OFF), CLK_DUMMY("", bcc_dem_core_x2_pre_cgf_clk.c, "", OFF), CLK_DUMMY("", bcc_tsc_ci_clk.c, "", OFF), CLK_DUMMY("", bcc_tsc_cicam_ts_clk_src.c, "", OFF), CLK_DUMMY("", bcc_tsc_par_clk.c, "", OFF), CLK_DUMMY("", bcc_tsc_ser_clk_src.c, "", OFF), CLK_DUMMY("", bcc_tspp2_clk_src.c, "", OFF), CLK_DUMMY("", dig_dem_core_b_div2_clk.c, "", OFF), CLK_DUMMY("", atv_x5_pre_cgc_clk.c, "", OFF), CLK_DUMMY("", bcc_albacore_cvbs_clk.c, "", OFF), CLK_DUMMY("", bcc_atv_x1_clk.c, "", OFF), CLK_DUMMY("", nidaq_out_clk.c, "", OFF), CLK_DUMMY("", gcc_bcss_axi_clk.c, "", OFF), CLK_DUMMY("", bcc_lnb_core_clk.c, "", OFF), /* USB */ CLK_DUMMY("core_clk", NULL, "msm_otg", OFF), CLK_DUMMY("iface_clk", NULL, "msm_otg", OFF), CLK_DUMMY("xo", NULL, "msm_otg", OFF), }; struct clock_init_data mpq8092_clock_init_data __initdata = { .table = msm_clocks_8092, .size = ARRAY_SIZE(msm_clocks_8092), };
gpl-2.0
glenlee75/linux-at91
arch/mips/alchemy/common/usb.c
2068
15698
/* * USB block power/access management abstraction. * * Au1000+: The OHCI block control register is at the far end of the OHCI memory * area. Au1550 has OHCI on different base address. No need to handle * UDC here. * Au1200: one register to control access and clocks to O/EHCI, UDC and OTG * as well as the PHY for EHCI and UDC. * */ #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/syscore_ops.h> #include <asm/mach-au1x00/au1000.h> /* control register offsets */ #define AU1000_OHCICFG 0x7fffc #define AU1550_OHCICFG 0x07ffc #define AU1200_USBCFG 0x04 /* Au1000 USB block config bits */ #define USBHEN_RD (1 << 4) /* OHCI reset-done indicator */ #define USBHEN_CE (1 << 3) /* OHCI block clock enable */ #define USBHEN_E (1 << 2) /* OHCI block enable */ #define USBHEN_C (1 << 1) /* OHCI block coherency bit */ #define USBHEN_BE (1 << 0) /* OHCI Big-Endian */ /* Au1200 USB config bits */ #define USBCFG_PFEN (1 << 31) /* prefetch enable (undoc) */ #define USBCFG_RDCOMB (1 << 30) /* read combining (undoc) */ #define USBCFG_UNKNOWN (5 << 20) /* unknown, leave this way */ #define USBCFG_SSD (1 << 23) /* serial short detect en */ #define USBCFG_PPE (1 << 19) /* HS PHY PLL */ #define USBCFG_UCE (1 << 18) /* UDC clock enable */ #define USBCFG_ECE (1 << 17) /* EHCI clock enable */ #define USBCFG_OCE (1 << 16) /* OHCI clock enable */ #define USBCFG_FLA(x) (((x) & 0x3f) << 8) #define USBCFG_UCAM (1 << 7) /* coherent access (undoc) */ #define USBCFG_GME (1 << 6) /* OTG mem access */ #define USBCFG_DBE (1 << 5) /* UDC busmaster enable */ #define USBCFG_DME (1 << 4) /* UDC mem enable */ #define USBCFG_EBE (1 << 3) /* EHCI busmaster enable */ #define USBCFG_EME (1 << 2) /* EHCI mem enable */ #define USBCFG_OBE (1 << 1) /* OHCI busmaster enable */ #define USBCFG_OME (1 << 0) /* OHCI mem enable */ #define USBCFG_INIT_AU1200 (USBCFG_PFEN | USBCFG_RDCOMB | USBCFG_UNKNOWN |\ USBCFG_SSD | USBCFG_FLA(0x20) | USBCFG_UCAM | \ USBCFG_GME | USBCFG_DBE | USBCFG_DME | \ USBCFG_EBE | USBCFG_EME | USBCFG_OBE | \ USBCFG_OME) /* Au1300 USB config registers */ #define USB_DWC_CTRL1 0x00 #define USB_DWC_CTRL2 0x04 #define USB_VBUS_TIMER 0x10 #define USB_SBUS_CTRL 0x14 #define USB_MSR_ERR 0x18 #define USB_DWC_CTRL3 0x1C #define USB_DWC_CTRL4 0x20 #define USB_OTG_STATUS 0x28 #define USB_DWC_CTRL5 0x2C #define USB_DWC_CTRL6 0x30 #define USB_DWC_CTRL7 0x34 #define USB_PHY_STATUS 0xC0 #define USB_INT_STATUS 0xC4 #define USB_INT_ENABLE 0xC8 #define USB_DWC_CTRL1_OTGD 0x04 /* set to DISable OTG */ #define USB_DWC_CTRL1_HSTRS 0x02 /* set to ENable EHCI */ #define USB_DWC_CTRL1_DCRS 0x01 /* set to ENable UDC */ #define USB_DWC_CTRL2_PHY1RS 0x04 /* set to enable PHY1 */ #define USB_DWC_CTRL2_PHY0RS 0x02 /* set to enable PHY0 */ #define USB_DWC_CTRL2_PHYRS 0x01 /* set to enable PHY */ #define USB_DWC_CTRL3_OHCI1_CKEN (1 << 19) #define USB_DWC_CTRL3_OHCI0_CKEN (1 << 18) #define USB_DWC_CTRL3_EHCI0_CKEN (1 << 17) #define USB_DWC_CTRL3_OTG0_CKEN (1 << 16) #define USB_SBUS_CTRL_SBCA 0x04 /* coherent access */ #define USB_INTEN_FORCE 0x20 #define USB_INTEN_PHY 0x10 #define USB_INTEN_UDC 0x08 #define USB_INTEN_EHCI 0x04 #define USB_INTEN_OHCI1 0x02 #define USB_INTEN_OHCI0 0x01 static DEFINE_SPINLOCK(alchemy_usb_lock); static inline void __au1300_usb_phyctl(void __iomem *base, int enable) { unsigned long r, s; r = __raw_readl(base + USB_DWC_CTRL2); s = __raw_readl(base + USB_DWC_CTRL3); s &= USB_DWC_CTRL3_OHCI1_CKEN | USB_DWC_CTRL3_OHCI0_CKEN | USB_DWC_CTRL3_EHCI0_CKEN | USB_DWC_CTRL3_OTG0_CKEN; if (enable) { /* simply enable all PHYs */ r |= USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS | USB_DWC_CTRL2_PHYRS; __raw_writel(r, base + USB_DWC_CTRL2); wmb(); } else if (!s) { /* no USB block active, do disable all PHYs */ r &= ~(USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS | USB_DWC_CTRL2_PHYRS); __raw_writel(r, base + USB_DWC_CTRL2); wmb(); } } static inline void __au1300_ohci_control(void __iomem *base, int enable, int id) { unsigned long r; if (enable) { __raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */ wmb(); r = __raw_readl(base + USB_DWC_CTRL3); /* enable OHCI block */ r |= (id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN : USB_DWC_CTRL3_OHCI1_CKEN; __raw_writel(r, base + USB_DWC_CTRL3); wmb(); __au1300_usb_phyctl(base, enable); /* power up the PHYs */ r = __raw_readl(base + USB_INT_ENABLE); r |= (id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1; __raw_writel(r, base + USB_INT_ENABLE); wmb(); /* reset the OHCI start clock bit */ __raw_writel(0, base + USB_DWC_CTRL7); wmb(); } else { r = __raw_readl(base + USB_INT_ENABLE); r &= ~((id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1); __raw_writel(r, base + USB_INT_ENABLE); wmb(); r = __raw_readl(base + USB_DWC_CTRL3); r &= ~((id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN : USB_DWC_CTRL3_OHCI1_CKEN); __raw_writel(r, base + USB_DWC_CTRL3); wmb(); __au1300_usb_phyctl(base, enable); } } static inline void __au1300_ehci_control(void __iomem *base, int enable) { unsigned long r; if (enable) { r = __raw_readl(base + USB_DWC_CTRL3); r |= USB_DWC_CTRL3_EHCI0_CKEN; __raw_writel(r, base + USB_DWC_CTRL3); wmb(); r = __raw_readl(base + USB_DWC_CTRL1); r |= USB_DWC_CTRL1_HSTRS; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); __au1300_usb_phyctl(base, enable); r = __raw_readl(base + USB_INT_ENABLE); r |= USB_INTEN_EHCI; __raw_writel(r, base + USB_INT_ENABLE); wmb(); } else { r = __raw_readl(base + USB_INT_ENABLE); r &= ~USB_INTEN_EHCI; __raw_writel(r, base + USB_INT_ENABLE); wmb(); r = __raw_readl(base + USB_DWC_CTRL1); r &= ~USB_DWC_CTRL1_HSTRS; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); r = __raw_readl(base + USB_DWC_CTRL3); r &= ~USB_DWC_CTRL3_EHCI0_CKEN; __raw_writel(r, base + USB_DWC_CTRL3); wmb(); __au1300_usb_phyctl(base, enable); } } static inline void __au1300_udc_control(void __iomem *base, int enable) { unsigned long r; if (enable) { r = __raw_readl(base + USB_DWC_CTRL1); r |= USB_DWC_CTRL1_DCRS; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); __au1300_usb_phyctl(base, enable); r = __raw_readl(base + USB_INT_ENABLE); r |= USB_INTEN_UDC; __raw_writel(r, base + USB_INT_ENABLE); wmb(); } else { r = __raw_readl(base + USB_INT_ENABLE); r &= ~USB_INTEN_UDC; __raw_writel(r, base + USB_INT_ENABLE); wmb(); r = __raw_readl(base + USB_DWC_CTRL1); r &= ~USB_DWC_CTRL1_DCRS; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); __au1300_usb_phyctl(base, enable); } } static inline void __au1300_otg_control(void __iomem *base, int enable) { unsigned long r; if (enable) { r = __raw_readl(base + USB_DWC_CTRL3); r |= USB_DWC_CTRL3_OTG0_CKEN; __raw_writel(r, base + USB_DWC_CTRL3); wmb(); r = __raw_readl(base + USB_DWC_CTRL1); r &= ~USB_DWC_CTRL1_OTGD; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); __au1300_usb_phyctl(base, enable); } else { r = __raw_readl(base + USB_DWC_CTRL1); r |= USB_DWC_CTRL1_OTGD; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); r = __raw_readl(base + USB_DWC_CTRL3); r &= ~USB_DWC_CTRL3_OTG0_CKEN; __raw_writel(r, base + USB_DWC_CTRL3); wmb(); __au1300_usb_phyctl(base, enable); } } static inline int au1300_usb_control(int block, int enable) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR); int ret = 0; switch (block) { case ALCHEMY_USB_OHCI0: __au1300_ohci_control(base, enable, 0); break; case ALCHEMY_USB_OHCI1: __au1300_ohci_control(base, enable, 1); break; case ALCHEMY_USB_EHCI0: __au1300_ehci_control(base, enable); break; case ALCHEMY_USB_UDC0: __au1300_udc_control(base, enable); break; case ALCHEMY_USB_OTG0: __au1300_otg_control(base, enable); break; default: ret = -ENODEV; } return ret; } static inline void au1300_usb_init(void) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR); /* set some sane defaults. Note: we don't fiddle with DWC_CTRL4 * here at all: Port 2 routing (EHCI or UDC) must be set either * by boot firmware or platform init code; I can't autodetect * a sane setting. */ __raw_writel(0, base + USB_INT_ENABLE); /* disable all USB irqs */ wmb(); __raw_writel(0, base + USB_DWC_CTRL3); /* disable all clocks */ wmb(); __raw_writel(~0, base + USB_MSR_ERR); /* clear all errors */ wmb(); __raw_writel(~0, base + USB_INT_STATUS); /* clear int status */ wmb(); /* set coherent access bit */ __raw_writel(USB_SBUS_CTRL_SBCA, base + USB_SBUS_CTRL); wmb(); } static inline void __au1200_ohci_control(void __iomem *base, int enable) { unsigned long r = __raw_readl(base + AU1200_USBCFG); if (enable) { __raw_writel(r | USBCFG_OCE, base + AU1200_USBCFG); wmb(); udelay(2000); } else { __raw_writel(r & ~USBCFG_OCE, base + AU1200_USBCFG); wmb(); udelay(1000); } } static inline void __au1200_ehci_control(void __iomem *base, int enable) { unsigned long r = __raw_readl(base + AU1200_USBCFG); if (enable) { __raw_writel(r | USBCFG_ECE | USBCFG_PPE, base + AU1200_USBCFG); wmb(); udelay(1000); } else { if (!(r & USBCFG_UCE)) /* UDC also off? */ r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */ __raw_writel(r & ~USBCFG_ECE, base + AU1200_USBCFG); wmb(); udelay(1000); } } static inline void __au1200_udc_control(void __iomem *base, int enable) { unsigned long r = __raw_readl(base + AU1200_USBCFG); if (enable) { __raw_writel(r | USBCFG_UCE | USBCFG_PPE, base + AU1200_USBCFG); wmb(); } else { if (!(r & USBCFG_ECE)) /* EHCI also off? */ r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */ __raw_writel(r & ~USBCFG_UCE, base + AU1200_USBCFG); wmb(); } } static inline int au1200_coherency_bug(void) { #if defined(CONFIG_DMA_COHERENT) /* Au1200 AB USB does not support coherent memory */ if (!(read_c0_prid() & 0xff)) { printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n"); printk(KERN_INFO "Au1200 USB: update your board or re-configure" " the kernel\n"); return -ENODEV; } #endif return 0; } static inline int au1200_usb_control(int block, int enable) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR); int ret = 0; switch (block) { case ALCHEMY_USB_OHCI0: ret = au1200_coherency_bug(); if (ret && enable) goto out; __au1200_ohci_control(base, enable); break; case ALCHEMY_USB_UDC0: __au1200_udc_control(base, enable); break; case ALCHEMY_USB_EHCI0: ret = au1200_coherency_bug(); if (ret && enable) goto out; __au1200_ehci_control(base, enable); break; default: ret = -ENODEV; } out: return ret; } /* initialize USB block(s) to a known working state */ static inline void au1200_usb_init(void) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR); __raw_writel(USBCFG_INIT_AU1200, base + AU1200_USBCFG); wmb(); udelay(1000); } static inline void au1000_usb_init(unsigned long rb, int reg) { void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg); unsigned long r = __raw_readl(base); #if defined(__BIG_ENDIAN) r |= USBHEN_BE; #endif r |= USBHEN_C; __raw_writel(r, base); wmb(); udelay(1000); } static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg) { void __iomem *base = (void __iomem *)KSEG1ADDR(rb); unsigned long r = __raw_readl(base + creg); if (enable) { __raw_writel(r | USBHEN_CE, base + creg); wmb(); udelay(1000); __raw_writel(r | USBHEN_CE | USBHEN_E, base + creg); wmb(); udelay(1000); /* wait for reset complete (read reg twice: au1500 erratum) */ while (__raw_readl(base + creg), !(__raw_readl(base + creg) & USBHEN_RD)) udelay(1000); } else { __raw_writel(r & ~(USBHEN_CE | USBHEN_E), base + creg); wmb(); } } static inline int au1000_usb_control(int block, int enable, unsigned long rb, int creg) { int ret = 0; switch (block) { case ALCHEMY_USB_OHCI0: __au1xx0_ohci_control(enable, rb, creg); break; default: ret = -ENODEV; } return ret; } /* * alchemy_usb_control - control Alchemy on-chip USB blocks * @block: USB block to target * @enable: set 1 to enable a block, 0 to disable */ int alchemy_usb_control(int block, int enable) { unsigned long flags; int ret; spin_lock_irqsave(&alchemy_usb_lock, flags); switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: ret = au1000_usb_control(block, enable, AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG); break; case ALCHEMY_CPU_AU1550: ret = au1000_usb_control(block, enable, AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG); break; case ALCHEMY_CPU_AU1200: ret = au1200_usb_control(block, enable); break; case ALCHEMY_CPU_AU1300: ret = au1300_usb_control(block, enable); break; default: ret = -ENODEV; } spin_unlock_irqrestore(&alchemy_usb_lock, flags); return ret; } EXPORT_SYMBOL_GPL(alchemy_usb_control); static unsigned long alchemy_usb_pmdata[2]; static void au1000_usb_pm(unsigned long br, int creg, int susp) { void __iomem *base = (void __iomem *)KSEG1ADDR(br); if (susp) { alchemy_usb_pmdata[0] = __raw_readl(base + creg); /* There appears to be some undocumented reset register.... */ __raw_writel(0, base + 0x04); wmb(); __raw_writel(0, base + creg); wmb(); } else { __raw_writel(alchemy_usb_pmdata[0], base + creg); wmb(); } } static void au1200_usb_pm(int susp) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1200_USB_OTG_PHYS_ADDR); if (susp) { /* save OTG_CAP/MUX registers which indicate port routing */ /* FIXME: write an OTG driver to do that */ alchemy_usb_pmdata[0] = __raw_readl(base + 0x00); alchemy_usb_pmdata[1] = __raw_readl(base + 0x04); } else { /* restore access to all MMIO areas */ au1200_usb_init(); /* restore OTG_CAP/MUX registers */ __raw_writel(alchemy_usb_pmdata[0], base + 0x00); __raw_writel(alchemy_usb_pmdata[1], base + 0x04); wmb(); } } static void au1300_usb_pm(int susp) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR); /* remember Port2 routing */ if (susp) { alchemy_usb_pmdata[0] = __raw_readl(base + USB_DWC_CTRL4); } else { au1300_usb_init(); __raw_writel(alchemy_usb_pmdata[0], base + USB_DWC_CTRL4); wmb(); } } static void alchemy_usb_pm(int susp) { switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: au1000_usb_pm(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG, susp); break; case ALCHEMY_CPU_AU1550: au1000_usb_pm(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG, susp); break; case ALCHEMY_CPU_AU1200: au1200_usb_pm(susp); break; case ALCHEMY_CPU_AU1300: au1300_usb_pm(susp); break; } } static int alchemy_usb_suspend(void) { alchemy_usb_pm(1); return 0; } static void alchemy_usb_resume(void) { alchemy_usb_pm(0); } static struct syscore_ops alchemy_usb_pm_ops = { .suspend = alchemy_usb_suspend, .resume = alchemy_usb_resume, }; static int __init alchemy_usb_init(void) { switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: au1000_usb_init(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG); break; case ALCHEMY_CPU_AU1550: au1000_usb_init(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG); break; case ALCHEMY_CPU_AU1200: au1200_usb_init(); break; case ALCHEMY_CPU_AU1300: au1300_usb_init(); break; } register_syscore_ops(&alchemy_usb_pm_ops); return 0; } arch_initcall(alchemy_usb_init);
gpl-2.0
Validus-Kernel/android_kernel_moto_shamu
sound/soc/tegra/tegra_alc5632.c
2068
6754
/* * tegra_alc5632.c -- Toshiba AC100(PAZ00) machine ASoC driver * * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net> * Copyright (C) 2012 - NVIDIA, Inc. * * Authors: Leon Romanovsky <leon@leon.nu> * Andrey Danin <danindrey@mail.ru> * Marc Dietrich <marvin24@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <asm/mach-types.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "../codecs/alc5632.h" #include "tegra_asoc_utils.h" #define DRV_NAME "tegra-alc5632" struct tegra_alc5632 { struct tegra_asoc_utils_data util_data; int gpio_hp_det; }; static int tegra_alc5632_asoc_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_codec *codec = codec_dai->codec; struct snd_soc_card *card = codec->card; struct tegra_alc5632 *alc5632 = snd_soc_card_get_drvdata(card); int srate, mclk; int err; srate = params_rate(params); mclk = 512 * srate; err = tegra_asoc_utils_set_rate(&alc5632->util_data, srate, mclk); if (err < 0) { dev_err(card->dev, "Can't configure clocks\n"); return err; } err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk, SND_SOC_CLOCK_IN); if (err < 0) { dev_err(card->dev, "codec_dai clock not set\n"); return err; } return 0; } static struct snd_soc_ops tegra_alc5632_asoc_ops = { .hw_params = tegra_alc5632_asoc_hw_params, }; static struct snd_soc_jack tegra_alc5632_hs_jack; static struct snd_soc_jack_pin tegra_alc5632_hs_jack_pins[] = { { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, { .pin = "Headset Stereophone", .mask = SND_JACK_HEADPHONE, }, }; static struct snd_soc_jack_gpio tegra_alc5632_hp_jack_gpio = { .name = "Headset detection", .report = SND_JACK_HEADSET, .debounce_time = 150, }; static const struct snd_soc_dapm_widget tegra_alc5632_dapm_widgets[] = { SND_SOC_DAPM_SPK("Int Spk", NULL), SND_SOC_DAPM_HP("Headset Stereophone", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Digital Mic", NULL), }; static const struct snd_kcontrol_new tegra_alc5632_controls[] = { SOC_DAPM_PIN_SWITCH("Int Spk"), }; static int tegra_alc5632_asoc_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_codec *codec = codec_dai->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; struct tegra_alc5632 *machine = snd_soc_card_get_drvdata(codec->card); snd_soc_jack_new(codec, "Headset Jack", SND_JACK_HEADSET, &tegra_alc5632_hs_jack); snd_soc_jack_add_pins(&tegra_alc5632_hs_jack, ARRAY_SIZE(tegra_alc5632_hs_jack_pins), tegra_alc5632_hs_jack_pins); if (gpio_is_valid(machine->gpio_hp_det)) { tegra_alc5632_hp_jack_gpio.gpio = machine->gpio_hp_det; snd_soc_jack_add_gpios(&tegra_alc5632_hs_jack, 1, &tegra_alc5632_hp_jack_gpio); } snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1"); return 0; } static struct snd_soc_dai_link tegra_alc5632_dai = { .name = "ALC5632", .stream_name = "ALC5632 PCM", .codec_dai_name = "alc5632-hifi", .init = tegra_alc5632_asoc_init, .ops = &tegra_alc5632_asoc_ops, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, }; static struct snd_soc_card snd_soc_tegra_alc5632 = { .name = "tegra-alc5632", .owner = THIS_MODULE, .dai_link = &tegra_alc5632_dai, .num_links = 1, .controls = tegra_alc5632_controls, .num_controls = ARRAY_SIZE(tegra_alc5632_controls), .dapm_widgets = tegra_alc5632_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(tegra_alc5632_dapm_widgets), .fully_routed = true, }; static int tegra_alc5632_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct snd_soc_card *card = &snd_soc_tegra_alc5632; struct tegra_alc5632 *alc5632; int ret; alc5632 = devm_kzalloc(&pdev->dev, sizeof(struct tegra_alc5632), GFP_KERNEL); if (!alc5632) { dev_err(&pdev->dev, "Can't allocate tegra_alc5632\n"); return -ENOMEM; } card->dev = &pdev->dev; platform_set_drvdata(pdev, card); snd_soc_card_set_drvdata(card, alc5632); alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0); if (alc5632->gpio_hp_det == -EPROBE_DEFER) return -EPROBE_DEFER; ret = snd_soc_of_parse_card_name(card, "nvidia,model"); if (ret) goto err; ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing"); if (ret) goto err; tegra_alc5632_dai.codec_of_node = of_parse_phandle( pdev->dev.of_node, "nvidia,audio-codec", 0); if (!tegra_alc5632_dai.codec_of_node) { dev_err(&pdev->dev, "Property 'nvidia,audio-codec' missing or invalid\n"); ret = -EINVAL; goto err; } tegra_alc5632_dai.cpu_of_node = of_parse_phandle(np, "nvidia,i2s-controller", 0); if (!tegra_alc5632_dai.cpu_of_node) { dev_err(&pdev->dev, "Property 'nvidia,i2s-controller' missing or invalid\n"); ret = -EINVAL; goto err; } tegra_alc5632_dai.platform_of_node = tegra_alc5632_dai.cpu_of_node; ret = tegra_asoc_utils_init(&alc5632->util_data, &pdev->dev); if (ret) goto err; ret = snd_soc_register_card(card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); goto err_fini_utils; } return 0; err_fini_utils: tegra_asoc_utils_fini(&alc5632->util_data); err: return ret; } static int tegra_alc5632_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); struct tegra_alc5632 *machine = snd_soc_card_get_drvdata(card); snd_soc_jack_free_gpios(&tegra_alc5632_hs_jack, 1, &tegra_alc5632_hp_jack_gpio); snd_soc_unregister_card(card); tegra_asoc_utils_fini(&machine->util_data); return 0; } static const struct of_device_id tegra_alc5632_of_match[] = { { .compatible = "nvidia,tegra-audio-alc5632", }, {}, }; static struct platform_driver tegra_alc5632_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .pm = &snd_soc_pm_ops, .of_match_table = tegra_alc5632_of_match, }, .probe = tegra_alc5632_probe, .remove = tegra_alc5632_remove, }; module_platform_driver(tegra_alc5632_driver); MODULE_AUTHOR("Leon Romanovsky <leon@leon.nu>"); MODULE_DESCRIPTION("Tegra+ALC5632 machine ASoC driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); MODULE_DEVICE_TABLE(of, tegra_alc5632_of_match);
gpl-2.0
ashishtanwer/NFTable-porting-on-Android-Goldfish
drivers/usb/serial/mct_u232.c
2068
22148
/* * MCT (Magic Control Technology Corp.) USB RS232 Converter Driver * * Copyright (C) 2000 Wolfgang Grandegger (wolfgang@ces.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is largely derived from the Belkin USB Serial Adapter Driver * (see belkin_sa.[ch]). All of the information about the device was acquired * by using SniffUSB on Windows98. For technical details see mct_u232.h. * * William G. Greathouse and Greg Kroah-Hartman provided great help on how to * do the reverse engineering and how to write a USB serial device driver. * * TO BE DONE, TO BE CHECKED: * DTR/RTS signal handling may be incomplete or incorrect. I have mainly * implemented what I have seen with SniffUSB or found in belkin_sa.c. * For further TODOs check also belkin_sa.c. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include "mct_u232.h" #define DRIVER_AUTHOR "Wolfgang Grandegger <wolfgang@ces.ch>" #define DRIVER_DESC "Magic Control Technology USB-RS232 converter driver" /* * Function prototypes */ static int mct_u232_port_probe(struct usb_serial_port *port); static int mct_u232_port_remove(struct usb_serial_port *remove); static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port); static void mct_u232_close(struct usb_serial_port *port); static void mct_u232_dtr_rts(struct usb_serial_port *port, int on); static void mct_u232_read_int_callback(struct urb *urb); static void mct_u232_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); static void mct_u232_break_ctl(struct tty_struct *tty, int break_state); static int mct_u232_tiocmget(struct tty_struct *tty); static int mct_u232_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static void mct_u232_throttle(struct tty_struct *tty); static void mct_u232_unthrottle(struct tty_struct *tty); /* * All of the device info needed for the MCT USB-RS232 converter. */ static const struct usb_device_id id_table[] = { { USB_DEVICE(MCT_U232_VID, MCT_U232_PID) }, { USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) }, { USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) }, { USB_DEVICE(MCT_U232_BELKIN_F5U109_VID, MCT_U232_BELKIN_F5U109_PID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver mct_u232_device = { .driver = { .owner = THIS_MODULE, .name = "mct_u232", }, .description = "MCT U232", .id_table = id_table, .num_ports = 1, .open = mct_u232_open, .close = mct_u232_close, .dtr_rts = mct_u232_dtr_rts, .throttle = mct_u232_throttle, .unthrottle = mct_u232_unthrottle, .read_int_callback = mct_u232_read_int_callback, .set_termios = mct_u232_set_termios, .break_ctl = mct_u232_break_ctl, .tiocmget = mct_u232_tiocmget, .tiocmset = mct_u232_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .port_probe = mct_u232_port_probe, .port_remove = mct_u232_port_remove, .get_icount = usb_serial_generic_get_icount, }; static struct usb_serial_driver * const serial_drivers[] = { &mct_u232_device, NULL }; struct mct_u232_private { struct urb *read_urb; spinlock_t lock; unsigned int control_state; /* Modem Line Setting (TIOCM) */ unsigned char last_lcr; /* Line Control Register */ unsigned char last_lsr; /* Line Status Register */ unsigned char last_msr; /* Modem Status Register */ unsigned int rx_flags; /* Throttling flags */ }; #define THROTTLED 0x01 /* * Handle vendor specific USB requests */ #define WDR_TIMEOUT 5000 /* default urb timeout */ /* * Later day 2.6.0-test kernels have new baud rates like B230400 which * we do not know how to support. We ignore them for the moment. */ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value, speed_t *result) { *result = value; if (le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_SITECOM_PID || le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_BELKIN_F5U109_PID) { switch (value) { case 300: return 0x01; case 600: return 0x02; /* this one not tested */ case 1200: return 0x03; case 2400: return 0x04; case 4800: return 0x06; case 9600: return 0x08; case 19200: return 0x09; case 38400: return 0x0a; case 57600: return 0x0b; case 115200: return 0x0c; default: *result = 9600; return 0x08; } } else { /* FIXME: Can we use any divider - should we do divider = 115200/value; real baud = 115200/divider */ switch (value) { case 300: break; case 600: break; case 1200: break; case 2400: break; case 4800: break; case 9600: break; case 19200: break; case 38400: break; case 57600: break; case 115200: break; default: value = 9600; *result = 9600; } return 115200/value; } } static int mct_u232_set_baud_rate(struct tty_struct *tty, struct usb_serial *serial, struct usb_serial_port *port, speed_t value) { unsigned int divisor; int rc; unsigned char *buf; unsigned char cts_enable_byte = 0; speed_t speed; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; divisor = mct_u232_calculate_baud_rate(serial, value, &speed); put_unaligned_le32(cpu_to_le32(divisor), buf); rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_BAUD_RATE_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE, WDR_TIMEOUT); if (rc < 0) /*FIXME: What value speed results */ dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n", value, rc); else tty_encode_baud_rate(tty, speed, speed); dev_dbg(&port->dev, "set_baud_rate: value: 0x%x, divisor: 0x%x\n", value, divisor); /* Mimic the MCT-supplied Windows driver (version 1.21P.0104), which always sends two extra USB 'device request' messages after the 'baud rate change' message. The actual functionality of the request codes in these messages is not fully understood but these particular codes are never seen in any operation besides a baud rate change. Both of these messages send a single byte of data. In the first message, the value of this byte is always zero. The second message has been determined experimentally to control whether data will be transmitted to a device which is not asserting the 'CTS' signal. If the second message's data byte is zero, data will be transmitted even if 'CTS' is not asserted (i.e. no hardware flow control). if the second message's data byte is nonzero (a value of 1 is used by this driver), data will not be transmitted to a device which is not asserting 'CTS'. */ buf[0] = 0; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_UNKNOWN1_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Sending USB device request code %d " "failed (error = %d)\n", MCT_U232_SET_UNKNOWN1_REQUEST, rc); if (port && C_CRTSCTS(tty)) cts_enable_byte = 1; dev_dbg(&port->dev, "set_baud_rate: send second control message, data = %02X\n", cts_enable_byte); buf[0] = cts_enable_byte; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_CTS_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_CTS_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Sending USB device request code %d " "failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc); kfree(buf); return rc; } /* mct_u232_set_baud_rate */ static int mct_u232_set_line_ctrl(struct usb_serial_port *port, unsigned char lcr) { int rc; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; buf[0] = lcr; rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), MCT_U232_SET_LINE_CTRL_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc); dev_dbg(&port->dev, "set_line_ctrl: 0x%x\n", lcr); kfree(buf); return rc; } /* mct_u232_set_line_ctrl */ static int mct_u232_set_modem_ctrl(struct usb_serial_port *port, unsigned int control_state) { int rc; unsigned char mcr; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; mcr = MCT_U232_MCR_NONE; if (control_state & TIOCM_DTR) mcr |= MCT_U232_MCR_DTR; if (control_state & TIOCM_RTS) mcr |= MCT_U232_MCR_RTS; buf[0] = mcr; rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), MCT_U232_SET_MODEM_CTRL_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, WDR_TIMEOUT); kfree(buf); dev_dbg(&port->dev, "set_modem_ctrl: state=0x%x ==> mcr=0x%x\n", control_state, mcr); if (rc < 0) { dev_err(&port->dev, "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); return rc; } return 0; } /* mct_u232_set_modem_ctrl */ static int mct_u232_get_modem_stat(struct usb_serial_port *port, unsigned char *msr) { int rc; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) { *msr = 0; return -ENOMEM; } rc = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), MCT_U232_GET_MODEM_STAT_REQUEST, MCT_U232_GET_REQUEST_TYPE, 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE, WDR_TIMEOUT); if (rc < 0) { dev_err(&port->dev, "Get MODEM STATus failed (error = %d)\n", rc); *msr = 0; } else { *msr = buf[0]; } dev_dbg(&port->dev, "get_modem_stat: 0x%x\n", *msr); kfree(buf); return rc; } /* mct_u232_get_modem_stat */ static void mct_u232_msr_to_icount(struct async_icount *icount, unsigned char msr) { /* Translate Control Line states */ if (msr & MCT_U232_MSR_DDSR) icount->dsr++; if (msr & MCT_U232_MSR_DCTS) icount->cts++; if (msr & MCT_U232_MSR_DRI) icount->rng++; if (msr & MCT_U232_MSR_DCD) icount->dcd++; } /* mct_u232_msr_to_icount */ static void mct_u232_msr_to_state(struct usb_serial_port *port, unsigned int *control_state, unsigned char msr) { /* Translate Control Line states */ if (msr & MCT_U232_MSR_DSR) *control_state |= TIOCM_DSR; else *control_state &= ~TIOCM_DSR; if (msr & MCT_U232_MSR_CTS) *control_state |= TIOCM_CTS; else *control_state &= ~TIOCM_CTS; if (msr & MCT_U232_MSR_RI) *control_state |= TIOCM_RI; else *control_state &= ~TIOCM_RI; if (msr & MCT_U232_MSR_CD) *control_state |= TIOCM_CD; else *control_state &= ~TIOCM_CD; dev_dbg(&port->dev, "msr_to_state: msr=0x%x ==> state=0x%x\n", msr, *control_state); } /* mct_u232_msr_to_state */ /* * Driver's tty interface functions */ static int mct_u232_port_probe(struct usb_serial_port *port) { struct mct_u232_private *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* Use second interrupt-in endpoint for reading. */ priv->read_urb = port->serial->port[1]->interrupt_in_urb; priv->read_urb->context = port; spin_lock_init(&priv->lock); usb_set_serial_port_data(port, priv); return 0; } static int mct_u232_port_remove(struct usb_serial_port *port) { struct mct_u232_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); return 0; } static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); int retval = 0; unsigned int control_state; unsigned long flags; unsigned char last_lcr; unsigned char last_msr; /* Compensate for a hardware bug: although the Sitecom U232-P25 * device reports a maximum output packet size of 32 bytes, * it seems to be able to accept only 16 bytes (and that's what * SniffUSB says too...) */ if (le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_SITECOM_PID) port->bulk_out_size = 16; /* Do a defined restart: the normal serial device seems to * always turn on DTR and RTS here, so do the same. I'm not * sure if this is really necessary. But it should not harm * either. */ spin_lock_irqsave(&priv->lock, flags); if (tty && (tty->termios.c_cflag & CBAUD)) priv->control_state = TIOCM_DTR | TIOCM_RTS; else priv->control_state = 0; priv->last_lcr = (MCT_U232_DATA_BITS_8 | MCT_U232_PARITY_NONE | MCT_U232_STOP_BITS_1); control_state = priv->control_state; last_lcr = priv->last_lcr; spin_unlock_irqrestore(&priv->lock, flags); mct_u232_set_modem_ctrl(port, control_state); mct_u232_set_line_ctrl(port, last_lcr); /* Read modem status and update control state */ mct_u232_get_modem_stat(port, &last_msr); spin_lock_irqsave(&priv->lock, flags); priv->last_msr = last_msr; mct_u232_msr_to_state(port, &priv->control_state, priv->last_msr); spin_unlock_irqrestore(&priv->lock, flags); retval = usb_submit_urb(priv->read_urb, GFP_KERNEL); if (retval) { dev_err(&port->dev, "usb_submit_urb(read) failed pipe 0x%x err %d\n", port->read_urb->pipe, retval); goto error; } retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (retval) { usb_kill_urb(priv->read_urb); dev_err(&port->dev, "usb_submit_urb(read int) failed pipe 0x%x err %d", port->interrupt_in_urb->pipe, retval); goto error; } return 0; error: return retval; } /* mct_u232_open */ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on) { unsigned int control_state; struct mct_u232_private *priv = usb_get_serial_port_data(port); spin_lock_irq(&priv->lock); if (on) priv->control_state |= TIOCM_DTR | TIOCM_RTS; else priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); control_state = priv->control_state; spin_unlock_irq(&priv->lock); mct_u232_set_modem_ctrl(port, control_state); } static void mct_u232_close(struct usb_serial_port *port) { struct mct_u232_private *priv = usb_get_serial_port_data(port); usb_kill_urb(priv->read_urb); usb_kill_urb(port->interrupt_in_urb); usb_serial_generic_close(port); } /* mct_u232_close */ static void mct_u232_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned char *data = urb->transfer_buffer; int retval; int status = urb->status; unsigned long flags; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); /* * Work-a-round: handle the 'usual' bulk-in pipe here */ if (urb->transfer_buffer_length > 2) { if (urb->actual_length) { tty_insert_flip_string(&port->port, data, urb->actual_length); tty_flip_buffer_push(&port->port); } goto exit; } /* * The interrupt-in pipe signals exceptional conditions (modem line * signal changes and errors). data[0] holds MSR, data[1] holds LSR. */ spin_lock_irqsave(&priv->lock, flags); priv->last_msr = data[MCT_U232_MSR_INDEX]; /* Record Control Line states */ mct_u232_msr_to_state(port, &priv->control_state, priv->last_msr); mct_u232_msr_to_icount(&port->icount, priv->last_msr); #if 0 /* Not yet handled. See belkin_sa.c for further information */ /* Now to report any errors */ priv->last_lsr = data[MCT_U232_LSR_INDEX]; /* * fill in the flip buffer here, but I do not know the relation * to the current/next receive buffer or characters. I need * to look in to this before committing any code. */ if (priv->last_lsr & MCT_U232_LSR_ERR) { tty = tty_port_tty_get(&port->port); /* Overrun Error */ if (priv->last_lsr & MCT_U232_LSR_OE) { } /* Parity Error */ if (priv->last_lsr & MCT_U232_LSR_PE) { } /* Framing Error */ if (priv->last_lsr & MCT_U232_LSR_FE) { } /* Break Indicator */ if (priv->last_lsr & MCT_U232_LSR_BI) { } tty_kref_put(tty); } #endif wake_up_interruptible(&port->port.delta_msr_wait); spin_unlock_irqrestore(&priv->lock, flags); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&port->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } /* mct_u232_read_int_callback */ static void mct_u232_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); struct ktermios *termios = &tty->termios; unsigned int cflag = termios->c_cflag; unsigned int old_cflag = old_termios->c_cflag; unsigned long flags; unsigned int control_state; unsigned char last_lcr; /* get a local copy of the current port settings */ spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; spin_unlock_irqrestore(&priv->lock, flags); last_lcr = 0; /* * Update baud rate. * Do not attempt to cache old rates and skip settings, * disconnects screw such tricks up completely. * Premature optimization is the root of all evil. */ /* reassert DTR and RTS on transition from B0 */ if ((old_cflag & CBAUD) == B0) { dev_dbg(&port->dev, "%s: baud was B0\n", __func__); control_state |= TIOCM_DTR | TIOCM_RTS; mct_u232_set_modem_ctrl(port, control_state); } mct_u232_set_baud_rate(tty, serial, port, tty_get_baud_rate(tty)); if ((cflag & CBAUD) == B0) { dev_dbg(&port->dev, "%s: baud is B0\n", __func__); /* Drop RTS and DTR */ control_state &= ~(TIOCM_DTR | TIOCM_RTS); mct_u232_set_modem_ctrl(port, control_state); } /* * Update line control register (LCR) */ /* set the parity */ if (cflag & PARENB) last_lcr |= (cflag & PARODD) ? MCT_U232_PARITY_ODD : MCT_U232_PARITY_EVEN; else last_lcr |= MCT_U232_PARITY_NONE; /* set the number of data bits */ switch (cflag & CSIZE) { case CS5: last_lcr |= MCT_U232_DATA_BITS_5; break; case CS6: last_lcr |= MCT_U232_DATA_BITS_6; break; case CS7: last_lcr |= MCT_U232_DATA_BITS_7; break; case CS8: last_lcr |= MCT_U232_DATA_BITS_8; break; default: dev_err(&port->dev, "CSIZE was not CS5-CS8, using default of 8\n"); last_lcr |= MCT_U232_DATA_BITS_8; break; } termios->c_cflag &= ~CMSPAR; /* set the number of stop bits */ last_lcr |= (cflag & CSTOPB) ? MCT_U232_STOP_BITS_2 : MCT_U232_STOP_BITS_1; mct_u232_set_line_ctrl(port, last_lcr); /* save off the modified port settings */ spin_lock_irqsave(&priv->lock, flags); priv->control_state = control_state; priv->last_lcr = last_lcr; spin_unlock_irqrestore(&priv->lock, flags); } /* mct_u232_set_termios */ static void mct_u232_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned char lcr; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); lcr = priv->last_lcr; if (break_state) lcr |= MCT_U232_SET_BREAK; spin_unlock_irqrestore(&priv->lock, flags); mct_u232_set_line_ctrl(port, lcr); } /* mct_u232_break_ctl */ static int mct_u232_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; spin_unlock_irqrestore(&priv->lock, flags); return control_state; } static int mct_u232_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; if (set & TIOCM_RTS) control_state |= TIOCM_RTS; if (set & TIOCM_DTR) control_state |= TIOCM_DTR; if (clear & TIOCM_RTS) control_state &= ~TIOCM_RTS; if (clear & TIOCM_DTR) control_state &= ~TIOCM_DTR; priv->control_state = control_state; spin_unlock_irqrestore(&priv->lock, flags); return mct_u232_set_modem_ctrl(port, control_state); } static void mct_u232_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; spin_lock_irq(&priv->lock); priv->rx_flags |= THROTTLED; if (C_CRTSCTS(tty)) { priv->control_state &= ~TIOCM_RTS; control_state = priv->control_state; spin_unlock_irq(&priv->lock); mct_u232_set_modem_ctrl(port, control_state); } else { spin_unlock_irq(&priv->lock); } } static void mct_u232_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; spin_lock_irq(&priv->lock); if ((priv->rx_flags & THROTTLED) && C_CRTSCTS(tty)) { priv->rx_flags &= ~THROTTLED; priv->control_state |= TIOCM_RTS; control_state = priv->control_state; spin_unlock_irq(&priv->lock); mct_u232_set_modem_ctrl(port, control_state); } else { spin_unlock_irq(&priv->lock); } } module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
klin1344/FusionUlt-AOSP_OLD
arch/x86/vdso/vma.c
2324
2753
/* * Set up the VMAs to tell the VM about the vDSO. * Copyright 2007 Andi Kleen, SUSE Labs. * Subject to the GPL, v.2 */ #include <linux/mm.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/random.h> #include <linux/elf.h> #include <asm/vsyscall.h> #include <asm/vgtod.h> #include <asm/proto.h> #include <asm/vdso.h> unsigned int __read_mostly vdso_enabled = 1; extern char vdso_start[], vdso_end[]; extern unsigned short vdso_sync_cpuid; static struct page **vdso_pages; static unsigned vdso_size; static int __init init_vdso_vars(void) { int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; int i; vdso_size = npages << PAGE_SHIFT; vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); if (!vdso_pages) goto oom; for (i = 0; i < npages; i++) { struct page *p; p = alloc_page(GFP_KERNEL); if (!p) goto oom; vdso_pages[i] = p; copy_page(page_address(p), vdso_start + i*PAGE_SIZE); } return 0; oom: printk("Cannot allocate vdso\n"); vdso_enabled = 0; return -ENOMEM; } subsys_initcall(init_vdso_vars); struct linux_binprm; /* Put the vdso above the (randomized) stack with another randomized offset. This way there is no hole in the middle of address space. To save memory make sure it is still in the same PTE as the stack top. This doesn't give that many random bits */ static unsigned long vdso_addr(unsigned long start, unsigned len) { unsigned long addr, end; unsigned offset; end = (start + PMD_SIZE - 1) & PMD_MASK; if (end >= TASK_SIZE_MAX) end = TASK_SIZE_MAX; end -= len; /* This loses some more bits than a modulo, but is cheaper */ offset = get_random_int() & (PTRS_PER_PTE - 1); addr = start + (offset << PAGE_SHIFT); if (addr >= end) addr = end; return addr; } /* Setup a VMA at program startup for the vsyscall page. Not called for compat tasks */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; int ret; if (!vdso_enabled) return 0; down_write(&mm->mmap_sem); addr = vdso_addr(mm->start_stack, vdso_size); addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } current->mm->context.vdso = (void *)addr; ret = install_special_mapping(mm, addr, vdso_size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, vdso_pages); if (ret) { current->mm->context.vdso = NULL; goto up_fail; } up_fail: up_write(&mm->mmap_sem); return ret; } static __init int vdso_setup(char *s) { vdso_enabled = simple_strtoul(s, NULL, 0); return 0; } __setup("vdso=", vdso_setup);
gpl-2.0
Hellybean/android_kernel_amazon_otter-common
drivers/net/wireless/ath/ath9k/eeprom_4k.c
2324
31455
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" #include "ar9002_phy.h" static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) { return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF); } static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) { return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); } #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data = (u16 *)&ah->eeprom.map4k; int addr, eep_start_loc = 64; for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { ath_dbg(common, ATH_DBG_EEPROM, "Unable to read eeprom region\n"); return false; } eep_data++; } return true; } static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah) { u16 *eep_data = (u16 *)&ah->eeprom.map4k; ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K); return true; } static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { ath_dbg(common, ATH_DBG_EEPROM, "Reading from EEPROM, not flash\n"); } if (common->bus_ops->ath_bus_type == ATH_USB) return __ath9k_hw_usb_4k_fill_eeprom(ah); else return __ath9k_hw_4k_fill_eeprom(ah); } #undef SIZE_EEPROM_4K static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) { #define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) struct ath_common *common = ath9k_hw_common(ah); struct ar5416_eeprom_4k *eep = (struct ar5416_eeprom_4k *) &ah->eeprom.map4k; u16 *eepdata, temp, magic, magic2; u32 sum = 0, el; bool need_swap = false; int i, addr; if (!ath9k_hw_use_flash(ah)) { if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { ath_err(common, "Reading Magic # failed\n"); return false; } ath_dbg(common, ATH_DBG_EEPROM, "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); if (magic2 == AR5416_EEPROM_MAGIC) { need_swap = true; eepdata = (u16 *) (&ah->eeprom); for (addr = 0; addr < EEPROM_4K_SIZE; addr++) { temp = swab16(*eepdata); *eepdata = temp; eepdata++; } } else { ath_err(common, "Invalid EEPROM Magic. Endianness mismatch.\n"); return -EINVAL; } } } ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ? "True" : "False"); if (need_swap) el = swab16(ah->eeprom.map4k.baseEepHeader.length); else el = ah->eeprom.map4k.baseEepHeader.length; if (el > sizeof(struct ar5416_eeprom_4k)) el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16); else el = el / sizeof(u16); eepdata = (u16 *)(&ah->eeprom); for (i = 0; i < el; i++) sum ^= *eepdata++; if (need_swap) { u32 integer; u16 word; ath_dbg(common, ATH_DBG_EEPROM, "EEPROM Endianness is not native.. Changing\n"); word = swab16(eep->baseEepHeader.length); eep->baseEepHeader.length = word; word = swab16(eep->baseEepHeader.checksum); eep->baseEepHeader.checksum = word; word = swab16(eep->baseEepHeader.version); eep->baseEepHeader.version = word; word = swab16(eep->baseEepHeader.regDmn[0]); eep->baseEepHeader.regDmn[0] = word; word = swab16(eep->baseEepHeader.regDmn[1]); eep->baseEepHeader.regDmn[1] = word; word = swab16(eep->baseEepHeader.rfSilent); eep->baseEepHeader.rfSilent = word; word = swab16(eep->baseEepHeader.blueToothOptions); eep->baseEepHeader.blueToothOptions = word; word = swab16(eep->baseEepHeader.deviceCap); eep->baseEepHeader.deviceCap = word; integer = swab32(eep->modalHeader.antCtrlCommon); eep->modalHeader.antCtrlCommon = integer; for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { integer = swab32(eep->modalHeader.antCtrlChain[i]); eep->modalHeader.antCtrlChain[i] = integer; } for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { word = swab16(eep->modalHeader.spurChans[i].spurChan); eep->modalHeader.spurChans[i].spurChan = word; } } if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n", sum, ah->eep_ops->get_eeprom_ver(ah)); return -EINVAL; } return 0; #undef EEPROM_4K_SIZE } static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &eep->modalHeader; struct base_eep_header_4k *pBase = &eep->baseEepHeader; u16 ver_minor; ver_minor = pBase->version & AR5416_EEP_VER_MINOR_MASK; switch (param) { case EEP_NFTHRESH_2: return pModal->noiseFloorThreshCh[0]; case EEP_MAC_LSW: return pBase->macAddr[0] << 8 | pBase->macAddr[1]; case EEP_MAC_MID: return pBase->macAddr[2] << 8 | pBase->macAddr[3]; case EEP_MAC_MSW: return pBase->macAddr[4] << 8 | pBase->macAddr[5]; case EEP_REG_0: return pBase->regDmn[0]; case EEP_REG_1: return pBase->regDmn[1]; case EEP_OP_CAP: return pBase->deviceCap; case EEP_OP_MODE: return pBase->opCapFlags; case EEP_RF_SILENT: return pBase->rfSilent; case EEP_OB_2: return pModal->ob_0; case EEP_DB_2: return pModal->db1_1; case EEP_MINOR_REV: return ver_minor; case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: return pBase->rxMask; case EEP_FRAC_N_5G: return 0; case EEP_PWR_TABLE_OFFSET: return AR5416_PWR_TABLE_OFFSET_DB; case EEP_MODAL_VER: return pModal->version; case EEP_ANT_DIV_CTL1: return pModal->antdiv_ctl1; case EEP_TXGAIN_TYPE: if (ver_minor >= AR5416_EEP_MINOR_VER_19) return pBase->txGainType; else return AR5416_EEP_TXGAIN_ORIGINAL; default: return 0; } } static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, struct ath9k_channel *chan, int16_t *pTxPowerIndexOffset) { struct ath_common *common = ath9k_hw_common(ah); struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct cal_data_per_freq_4k *pRawDataset; u8 *pCalBChans = NULL; u16 pdGainOverlap_t2; static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK]; u16 numPiers, i, j; u16 numXpdGain, xpdMask; u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 }; u32 reg32, regOffset, regChainOffset; xpdMask = pEepData->modalHeader.xpdGain; if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; } else { pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), AR_PHY_TPCRG5_PD_GAIN_OVERLAP)); } pCalBChans = pEepData->calFreqPier2G; numPiers = AR5416_EEP4K_NUM_2G_CAL_PIERS; numXpdGain = 0; for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) { if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) { if (numXpdGain >= AR5416_EEP4K_NUM_PD_GAINS) break; xpdGainValues[numXpdGain] = (u16)(AR5416_PD_GAINS_IN_MASK - i); numXpdGain++; } } REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, (numXpdGain - 1) & 0x3); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, xpdGainValues[0]); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2, xpdGainValues[1]); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0); for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { if (AR_SREV_5416_20_OR_LATER(ah) && (ah->rxchainmask == 5 || ah->txchainmask == 5) && (i != 0)) { regChainOffset = (i == 1) ? 0x2000 : 0x1000; } else regChainOffset = i * 0x1000; if (pEepData->baseEepHeader.txMask & (1 << i)) { pRawDataset = pEepData->calPierData2G[i]; ath9k_hw_get_gain_boundaries_pdadcs(ah, chan, pRawDataset, pCalBChans, numPiers, pdGainOverlap_t2, gainBoundaries, pdadcValues, numXpdGain); ENABLE_REGWRITE_BUFFER(ah); if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset, SM(pdGainOverlap_t2, AR_PHY_TPCRG5_PD_GAIN_OVERLAP) | SM(gainBoundaries[0], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) | SM(gainBoundaries[1], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) | SM(gainBoundaries[2], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) | SM(gainBoundaries[3], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4)); } regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset; for (j = 0; j < 32; j++) { reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) | ((pdadcValues[4 * j + 1] & 0xFF) << 8) | ((pdadcValues[4 * j + 2] & 0xFF) << 16)| ((pdadcValues[4 * j + 3] & 0xFF) << 24); REG_WRITE(ah, regOffset, reg32); ath_dbg(common, ATH_DBG_EEPROM, "PDADC (%d,%4x): %4.4x %8.8x\n", i, regChainOffset, regOffset, reg32); ath_dbg(common, ATH_DBG_EEPROM, "PDADC: Chain %d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d |\n", i, 4 * j, pdadcValues[4 * j], 4 * j + 1, pdadcValues[4 * j + 1], 4 * j + 2, pdadcValues[4 * j + 2], 4 * j + 3, pdadcValues[4 * j + 3]); regOffset += 4; } REGWRITE_BUFFER_FLUSH(ah); } } *pTxPowerIndexOffset = 0; } static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, struct ath9k_channel *chan, int16_t *ratesArray, u16 cfgCtl, u16 AntennaReduction, u16 twiceMaxRegulatoryPower, u16 powerLimit) { #define CMP_TEST_GRP \ (((cfgCtl & ~CTL_MODE_M)| (pCtlMode[ctlMode] & CTL_MODE_M)) == \ pEepData->ctlIndex[i]) \ || (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \ ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL)) struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); int i; int16_t twiceLargestAntenna; u16 twiceMinEdgePower; u16 twiceMaxEdgePower = MAX_RATE_POWER; u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; u16 numCtlModes; const u16 *pCtlMode; u16 ctlMode, freq; struct chan_centers centers; struct cal_ctl_data_4k *rep; struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; static const u16 tpScaleReductionTable[5] = { 0, 3, 6, 9, MAX_RATE_POWER }; struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { 0, { 0, 0, 0, 0} }; struct cal_target_power_leg targetPowerOfdmExt = { 0, { 0, 0, 0, 0} }, targetPowerCckExt = { 0, { 0, 0, 0, 0 } }; struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = { 0, {0, 0, 0, 0} }; static const u16 ctlModesFor11g[] = { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40 }; ath9k_hw_get_channel_centers(ah, chan, &centers); twiceLargestAntenna = pEepData->modalHeader.antennaGainCh[0]; twiceLargestAntenna = (int16_t)min(AntennaReduction - twiceLargestAntenna, 0); maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) { maxRegAllowedPower -= (tpScaleReductionTable[(regulatory->tp_scale)] * 2); } scaledPower = min(powerLimit, maxRegAllowedPower); scaledPower = max((u16)0, scaledPower); numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; pCtlMode = ctlModesFor11g; ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPowerCck, AR5416_NUM_2G_CCK_TARGET_POWERS, &targetPowerCck, 4, false); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPower2G, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerOfdm, 4, false); ath9k_hw_get_target_powers(ah, chan, pEepData->calTargetPower2GHT20, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerHt20, 8, false); if (IS_CHAN_HT40(chan)) { numCtlModes = ARRAY_SIZE(ctlModesFor11g); ath9k_hw_get_target_powers(ah, chan, pEepData->calTargetPower2GHT40, AR5416_NUM_2G_40_TARGET_POWERS, &targetPowerHt40, 8, true); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPowerCck, AR5416_NUM_2G_CCK_TARGET_POWERS, &targetPowerCckExt, 4, true); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPower2G, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerOfdmExt, 4, true); } for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || (pCtlMode[ctlMode] == CTL_2GHT40); if (isHt40CtlMode) freq = centers.synth_center; else if (pCtlMode[ctlMode] & EXT_ADDITIVE) freq = centers.ext_center; else freq = centers.ctl_center; if (ah->eep_ops->get_eeprom_ver(ah) == 14 && ah->eep_ops->get_eeprom_rev(ah) <= 2) twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { if (CMP_TEST_GRP) { rep = &(pEepData->ctlData[i]); twiceMinEdgePower = ath9k_hw_get_max_edge_power( freq, rep->ctlEdges[ ar5416_get_ntxchains(ah->txchainmask) - 1], IS_CHAN_2GHZ(chan), AR5416_EEP4K_NUM_BAND_EDGES); if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) { twiceMaxEdgePower = min(twiceMaxEdgePower, twiceMinEdgePower); } else { twiceMaxEdgePower = twiceMinEdgePower; break; } } } minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); switch (pCtlMode[ctlMode]) { case CTL_11B: for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) { targetPowerCck.tPow2x[i] = min((u16)targetPowerCck.tPow2x[i], minCtlPower); } break; case CTL_11G: for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) { targetPowerOfdm.tPow2x[i] = min((u16)targetPowerOfdm.tPow2x[i], minCtlPower); } break; case CTL_2GHT20: for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) { targetPowerHt20.tPow2x[i] = min((u16)targetPowerHt20.tPow2x[i], minCtlPower); } break; case CTL_11B_EXT: targetPowerCckExt.tPow2x[0] = min((u16)targetPowerCckExt.tPow2x[0], minCtlPower); break; case CTL_11G_EXT: targetPowerOfdmExt.tPow2x[0] = min((u16)targetPowerOfdmExt.tPow2x[0], minCtlPower); break; case CTL_2GHT40: for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { targetPowerHt40.tPow2x[i] = min((u16)targetPowerHt40.tPow2x[i], minCtlPower); } break; default: break; } } ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] = ratesArray[rate18mb] = ratesArray[rate24mb] = targetPowerOfdm.tPow2x[0]; ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3]; ratesArray[rateXr] = targetPowerOfdm.tPow2x[0]; for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i]; ratesArray[rate1l] = targetPowerCck.tPow2x[0]; ratesArray[rate2s] = ratesArray[rate2l] = targetPowerCck.tPow2x[1]; ratesArray[rate5_5s] = ratesArray[rate5_5l] = targetPowerCck.tPow2x[2]; ratesArray[rate11s] = ratesArray[rate11l] = targetPowerCck.tPow2x[3]; if (IS_CHAN_HT40(chan)) { for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { ratesArray[rateHt40_0 + i] = targetPowerHt40.tPow2x[i]; } ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0]; ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0]; ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0]; ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0]; } #undef CMP_TEST_GRP } static void ath9k_hw_4k_set_txpower(struct ath_hw *ah, struct ath9k_channel *chan, u16 cfgCtl, u8 twiceAntennaReduction, u8 twiceMaxRegulatoryPower, u8 powerLimit, bool test) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &pEepData->modalHeader; int16_t ratesArray[Ar5416RateSize]; int16_t txPowerIndexOffset = 0; u8 ht40PowerIncForPdadc = 2; int i; memset(ratesArray, 0, sizeof(ratesArray)); if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; } ath9k_hw_set_4k_power_per_rate_table(ah, chan, &ratesArray[0], cfgCtl, twiceAntennaReduction, twiceMaxRegulatoryPower, powerLimit); ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset); regulatory->max_power_level = 0; for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); if (ratesArray[i] > MAX_RATE_POWER) ratesArray[i] = MAX_RATE_POWER; if (ratesArray[i] > regulatory->max_power_level) regulatory->max_power_level = ratesArray[i]; } if (test) return; /* Update regulatory */ i = rate6mb; if (IS_CHAN_HT40(chan)) i = rateHt40_0; else if (IS_CHAN_HT20(chan)) i = rateHt20_0; regulatory->max_power_level = ratesArray[i]; if (AR_SREV_9280_20_OR_LATER(ah)) { for (i = 0; i < Ar5416RateSize; i++) ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2; } ENABLE_REGWRITE_BUFFER(ah); /* OFDM power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, ATH9K_POW_SM(ratesArray[rate18mb], 24) | ATH9K_POW_SM(ratesArray[rate12mb], 16) | ATH9K_POW_SM(ratesArray[rate9mb], 8) | ATH9K_POW_SM(ratesArray[rate6mb], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE2, ATH9K_POW_SM(ratesArray[rate54mb], 24) | ATH9K_POW_SM(ratesArray[rate48mb], 16) | ATH9K_POW_SM(ratesArray[rate36mb], 8) | ATH9K_POW_SM(ratesArray[rate24mb], 0)); /* CCK power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, ATH9K_POW_SM(ratesArray[rate2s], 24) | ATH9K_POW_SM(ratesArray[rate2l], 16) | ATH9K_POW_SM(ratesArray[rateXr], 8) | ATH9K_POW_SM(ratesArray[rate1l], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE4, ATH9K_POW_SM(ratesArray[rate11s], 24) | ATH9K_POW_SM(ratesArray[rate11l], 16) | ATH9K_POW_SM(ratesArray[rate5_5s], 8) | ATH9K_POW_SM(ratesArray[rate5_5l], 0)); /* HT20 power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, ATH9K_POW_SM(ratesArray[rateHt20_3], 24) | ATH9K_POW_SM(ratesArray[rateHt20_2], 16) | ATH9K_POW_SM(ratesArray[rateHt20_1], 8) | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE6, ATH9K_POW_SM(ratesArray[rateHt20_7], 24) | ATH9K_POW_SM(ratesArray[rateHt20_6], 16) | ATH9K_POW_SM(ratesArray[rateHt20_5], 8) | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)); /* HT40 power per rate */ if (IS_CHAN_HT40(chan)) { REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, ATH9K_POW_SM(ratesArray[rateHt40_3] + ht40PowerIncForPdadc, 24) | ATH9K_POW_SM(ratesArray[rateHt40_2] + ht40PowerIncForPdadc, 16) | ATH9K_POW_SM(ratesArray[rateHt40_1] + ht40PowerIncForPdadc, 8) | ATH9K_POW_SM(ratesArray[rateHt40_0] + ht40PowerIncForPdadc, 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE8, ATH9K_POW_SM(ratesArray[rateHt40_7] + ht40PowerIncForPdadc, 24) | ATH9K_POW_SM(ratesArray[rateHt40_6] + ht40PowerIncForPdadc, 16) | ATH9K_POW_SM(ratesArray[rateHt40_5] + ht40PowerIncForPdadc, 8) | ATH9K_POW_SM(ratesArray[rateHt40_4] + ht40PowerIncForPdadc, 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) | ATH9K_POW_SM(ratesArray[rateExtCck], 16) | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); } REGWRITE_BUFFER_FLUSH(ah); } static void ath9k_hw_4k_set_addac(struct ath_hw *ah, struct ath9k_channel *chan) { struct modal_eep_4k_header *pModal; struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; u8 biaslevel; if (ah->hw_version.macVersion != AR_SREV_VERSION_9160) return; if (ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_MINOR_VER_7) return; pModal = &eep->modalHeader; if (pModal->xpaBiasLvl != 0xff) { biaslevel = pModal->xpaBiasLvl; INI_RA(&ah->iniAddac, 7, 1) = (INI_RA(&ah->iniAddac, 7, 1) & (~0x18)) | biaslevel << 3; } } static void ath9k_hw_4k_set_gain(struct ath_hw *ah, struct modal_eep_4k_header *pModal, struct ar5416_eeprom_4k *eep, u8 txRxAttenLocal) { REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0, pModal->antCtrlChain[0]); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), (REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) & ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_3) { txRxAttenLocal = pModal->txRxAttenCh[0]; REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, pModal->xatten2Margin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); /* Set the block 1 value to block 0 value */ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, pModal->xatten2Margin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); } REG_RMW_FIELD(ah, AR_PHY_RXGAIN, AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); REG_RMW_FIELD(ah, AR_PHY_RXGAIN, AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); } /* * Read EEPROM header info and program the device for correct operation * given the channel value. */ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { struct modal_eep_4k_header *pModal; struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct base_eep_header_4k *pBase = &eep->baseEepHeader; u8 txRxAttenLocal; u8 ob[5], db1[5], db2[5]; u8 ant_div_control1, ant_div_control2; u32 regVal; pModal = &eep->modalHeader; txRxAttenLocal = 23; REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon); /* Single chain for 4K EEPROM*/ ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal); /* Initialize Ant Diversity settings from EEPROM */ if (pModal->version >= 3) { ant_div_control1 = pModal->antdiv_ctl1; ant_div_control2 = pModal->antdiv_ctl2; regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regVal &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL)); regVal |= SM(ant_div_control1, AR_PHY_9285_ANT_DIV_CTL); regVal |= SM(ant_div_control2, AR_PHY_9285_ANT_DIV_ALT_LNACONF); regVal |= SM((ant_div_control2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF); regVal |= SM((ant_div_control1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB); regVal |= SM((ant_div_control1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB); REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal); regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regVal = REG_READ(ah, AR_PHY_CCK_DETECT); regVal &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); regVal |= SM((ant_div_control1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal); regVal = REG_READ(ah, AR_PHY_CCK_DETECT); } if (pModal->version >= 2) { ob[0] = pModal->ob_0; ob[1] = pModal->ob_1; ob[2] = pModal->ob_2; ob[3] = pModal->ob_3; ob[4] = pModal->ob_4; db1[0] = pModal->db1_0; db1[1] = pModal->db1_1; db1[2] = pModal->db1_2; db1[3] = pModal->db1_3; db1[4] = pModal->db1_4; db2[0] = pModal->db2_0; db2[1] = pModal->db2_1; db2[2] = pModal->db2_2; db2[3] = pModal->db2_3; db2[4] = pModal->db2_4; } else if (pModal->version == 1) { ob[0] = pModal->ob_0; ob[1] = ob[2] = ob[3] = ob[4] = pModal->ob_1; db1[0] = pModal->db1_0; db1[1] = db1[2] = db1[3] = db1[4] = pModal->db1_1; db2[0] = pModal->db2_0; db2[1] = db2[2] = db2[3] = db2[4] = pModal->db2_1; } else { int i; for (i = 0; i < 5; i++) { ob[i] = pModal->ob_0; db1[i] = pModal->db1_0; db2[i] = pModal->db1_0; } } if (AR_SREV_9271(ah)) { ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_cck, AR9271_AN_RF2G3_OB_cck_S, ob[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_psk, AR9271_AN_RF2G3_OB_psk_S, ob[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_qam, AR9271_AN_RF2G3_OB_qam_S, ob[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_DB_1, AR9271_AN_RF2G3_DB_1_S, db1[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9271_AN_RF2G4_DB_2, AR9271_AN_RF2G4_DB_2_S, db2[0]); } else { ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_0, AR9285_AN_RF2G3_OB_0_S, ob[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_1, AR9285_AN_RF2G3_OB_1_S, ob[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_2, AR9285_AN_RF2G3_OB_2_S, ob[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_3, AR9285_AN_RF2G3_OB_3_S, ob[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_4, AR9285_AN_RF2G3_OB_4_S, ob[4]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_0, AR9285_AN_RF2G3_DB1_0_S, db1[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_1, AR9285_AN_RF2G3_DB1_1_S, db1[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_2, AR9285_AN_RF2G3_DB1_2_S, db1[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB1_3, AR9285_AN_RF2G4_DB1_3_S, db1[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB1_4, AR9285_AN_RF2G4_DB1_4_S, db1[4]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_0, AR9285_AN_RF2G4_DB2_0_S, db2[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_1, AR9285_AN_RF2G4_DB2_1_S, db2[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_2, AR9285_AN_RF2G4_DB2_2_S, db2[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_3, AR9285_AN_RF2G4_DB2_3_S, db2[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_4, AR9285_AN_RF2G4_DB2_4_S, db2[4]); } REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, pModal->switchSettling); REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, pModal->adcDesiredSize); REG_WRITE(ah, AR_PHY_RF_CTL4, SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) | SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON)); REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn); if (AR_SREV_9271_10(ah)) REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn); REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, pModal->thresh62); REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62); if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON, pModal->txFrameToPaOn); } if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_3) { if (IS_CHAN_HT40(chan)) REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, pModal->swSettleHt40); } if (AR_SREV_9271(ah) || AR_SREV_9285(ah)) { u8 bb_desired_scale = (pModal->bb_scale_smrt_antenna & EEP_4K_BB_DESIRED_SCALE_MASK); if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) { u32 pwrctrl, mask, clr; mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr); REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr); REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr); mask = BIT(0)|BIT(5)|BIT(15); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_TX_PWRCTRL9, pwrctrl, clr); mask = BIT(0)|BIT(5); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr); REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr); } } } static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { #define EEP_MAP4K_SPURCHAN \ (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan) struct ath_common *common = ath9k_hw_common(ah); u16 spur_val = AR_NO_SPUR; ath_dbg(common, ATH_DBG_ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n", i, is2GHz, ah->config.spurchans[i][is2GHz]); switch (ah->config.spurmode) { case SPUR_DISABLE: break; case SPUR_ENABLE_IOCTL: spur_val = ah->config.spurchans[i][is2GHz]; ath_dbg(common, ATH_DBG_ANI, "Getting spur val from new loc. %d\n", spur_val); break; case SPUR_ENABLE_EEPROM: spur_val = EEP_MAP4K_SPURCHAN; break; } return spur_val; #undef EEP_MAP4K_SPURCHAN } const struct eeprom_ops eep_4k_ops = { .check_eeprom = ath9k_hw_4k_check_eeprom, .get_eeprom = ath9k_hw_4k_get_eeprom, .fill_eeprom = ath9k_hw_4k_fill_eeprom, .get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver, .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev, .set_board_values = ath9k_hw_4k_set_board_values, .set_addac = ath9k_hw_4k_set_addac, .set_txpower = ath9k_hw_4k_set_txpower, .get_spur_channel = ath9k_hw_4k_get_spur_channel };
gpl-2.0
janimo/android_kernel_huawei_u9200
drivers/net/wireless/bcm4329/linux_osl.c
2836
11942
/* * Linux OS Independent Layer * * Copyright (C) 1999-2010, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: linux_osl.c,v 1.125.12.3.8.7 2010/05/04 21:10:04 Exp $ */ #define LINUX_OSL #include <typedefs.h> #include <bcmendian.h> #include <linuxver.h> #include <bcmdefs.h> #include <osl.h> #include <bcmutils.h> #include <linux/delay.h> #include <pcicfg.h> #include <linux/mutex.h> #define PCI_CFG_RETRY 10 #define OS_HANDLE_MAGIC 0x1234abcd #define BCM_MEM_FILENAME_LEN 24 #ifdef DHD_USE_STATIC_BUF #define MAX_STATIC_BUF_NUM 16 #define STATIC_BUF_SIZE (PAGE_SIZE*2) #define STATIC_BUF_TOTAL_LEN (MAX_STATIC_BUF_NUM*STATIC_BUF_SIZE) typedef struct bcm_static_buf { struct mutex static_sem; unsigned char *buf_ptr; unsigned char buf_use[MAX_STATIC_BUF_NUM]; } bcm_static_buf_t; static bcm_static_buf_t *bcm_static_buf = 0; #define MAX_STATIC_PKT_NUM 8 typedef struct bcm_static_pkt { struct sk_buff *skb_4k[MAX_STATIC_PKT_NUM]; struct sk_buff *skb_8k[MAX_STATIC_PKT_NUM]; struct mutex osl_pkt_sem; unsigned char pkt_use[MAX_STATIC_PKT_NUM*2]; } bcm_static_pkt_t; static bcm_static_pkt_t *bcm_static_skb = 0; #endif typedef struct bcm_mem_link { struct bcm_mem_link *prev; struct bcm_mem_link *next; uint size; int line; char file[BCM_MEM_FILENAME_LEN]; } bcm_mem_link_t; struct osl_info { osl_pubinfo_t pub; uint magic; void *pdev; uint malloced; uint failed; uint bustype; bcm_mem_link_t *dbgmem_list; }; static int16 linuxbcmerrormap[] = { 0, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -E2BIG, -E2BIG, -EBUSY, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EFAULT, -ENOMEM, -EOPNOTSUPP, -EMSGSIZE, -EINVAL, -EPERM, -ENOMEM, -EINVAL, -ERANGE, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EINVAL, -EIO, -ENODEV, -EINVAL, -EIO, -EIO, -EINVAL, -EINVAL, #if BCME_LAST != -41 #error "You need to add a OS error translation in the linuxbcmerrormap \ for new error code defined in bcmutils.h" #endif }; int osl_error(int bcmerror) { if (bcmerror > 0) bcmerror = 0; else if (bcmerror < BCME_LAST) bcmerror = BCME_ERROR; return linuxbcmerrormap[-bcmerror]; } void * dhd_os_prealloc(int section, unsigned long size); osl_t * osl_attach(void *pdev, uint bustype, bool pkttag) { osl_t *osh; gfp_t flags; flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; osh = kmalloc(sizeof(osl_t), flags); ASSERT(osh); bzero(osh, sizeof(osl_t)); ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); osh->magic = OS_HANDLE_MAGIC; osh->malloced = 0; osh->failed = 0; osh->dbgmem_list = NULL; osh->pdev = pdev; osh->pub.pkttag = pkttag; osh->bustype = bustype; switch (bustype) { case PCI_BUS: case SI_BUS: case PCMCIA_BUS: osh->pub.mmbus = TRUE; break; case JTAG_BUS: case SDIO_BUS: case USB_BUS: case SPI_BUS: osh->pub.mmbus = FALSE; break; default: ASSERT(FALSE); break; } #ifdef DHD_USE_STATIC_BUF if (!bcm_static_buf) { if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(3, STATIC_BUF_SIZE+ STATIC_BUF_TOTAL_LEN))) { printk("can not alloc static buf!\n"); } else { /* printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf); */ } mutex_init(&bcm_static_buf->static_sem); bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; } if (!bcm_static_skb) { int i; void *skb_buff_ptr = 0; bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); skb_buff_ptr = dhd_os_prealloc(4, 0); bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16); for (i = 0; i < MAX_STATIC_PKT_NUM*2; i++) bcm_static_skb->pkt_use[i] = 0; mutex_init(&bcm_static_skb->osl_pkt_sem); } #endif return osh; } void osl_detach(osl_t *osh) { if (osh == NULL) return; #ifdef DHD_USE_STATIC_BUF if (bcm_static_buf) { bcm_static_buf = 0; } if (bcm_static_skb) { bcm_static_skb = 0; } #endif ASSERT(osh->magic == OS_HANDLE_MAGIC); kfree(osh); } void* osl_pktget(osl_t *osh, uint len) { struct sk_buff *skb; gfp_t flags; flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; if ((skb = __dev_alloc_skb(len, flags))) { skb_put(skb, len); skb->priority = 0; osh->pub.pktalloced++; } return ((void*) skb); } void osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); while (skb) { nskb = skb->next; skb->next = NULL; if (skb->destructor) { dev_kfree_skb_any(skb); } else { dev_kfree_skb(skb); } osh->pub.pktalloced--; skb = nskb; } } #ifdef DHD_USE_STATIC_BUF void* osl_pktget_static(osl_t *osh, uint len) { int i = 0; struct sk_buff *skb; if (len > (PAGE_SIZE*2)) { printk("Do we really need this big skb??\n"); return osl_pktget(osh, len); } mutex_lock(&bcm_static_skb->osl_pkt_sem); if (len <= PAGE_SIZE) { for (i = 0; i < MAX_STATIC_PKT_NUM; i++) { if (bcm_static_skb->pkt_use[i] == 0) break; } if (i != MAX_STATIC_PKT_NUM) { bcm_static_skb->pkt_use[i] = 1; mutex_unlock(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_4k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } } for (i = 0; i < MAX_STATIC_PKT_NUM; i++) { if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0) break; } if (i != MAX_STATIC_PKT_NUM) { bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1; mutex_unlock(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_8k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } mutex_unlock(&bcm_static_skb->osl_pkt_sem); printk("all static pkt in use!\n"); return osl_pktget(osh, len); } void osl_pktfree_static(osl_t *osh, void *p, bool send) { int i; for (i = 0; i < MAX_STATIC_PKT_NUM*2; i++) { if (p == bcm_static_skb->skb_4k[i]) { mutex_lock(&bcm_static_skb->osl_pkt_sem); bcm_static_skb->pkt_use[i] = 0; mutex_unlock(&bcm_static_skb->osl_pkt_sem); return; } } return osl_pktfree(osh, p, send); } #endif uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size) { uint val = 0; uint retry = PCI_CFG_RETRY; ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ASSERT(size == 4); do { pci_read_config_dword(osh->pdev, offset, &val); if (val != 0xffffffff) break; } while (retry--); return (val); } void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val) { uint retry = PCI_CFG_RETRY; ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ASSERT(size == 4); do { pci_write_config_dword(osh->pdev, offset, val); if (offset != PCI_BAR0_WIN) break; if (osl_pci_read_config(osh, offset, size) == val) break; } while (retry--); } uint osl_pci_bus(osl_t *osh) { ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); return ((struct pci_dev *)osh->pdev)->bus->number; } uint osl_pci_slot(osl_t *osh) { ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn); } static void osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write) { } void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size) { osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE); } void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size) { osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE); } void* osl_malloc(osl_t *osh, uint size) { void *addr; gfp_t flags; if (osh) ASSERT(osh->magic == OS_HANDLE_MAGIC); #ifdef DHD_USE_STATIC_BUF if (bcm_static_buf) { int i = 0; if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE)) { mutex_lock(&bcm_static_buf->static_sem); for (i = 0; i < MAX_STATIC_BUF_NUM; i++) { if (bcm_static_buf->buf_use[i] == 0) break; } if (i == MAX_STATIC_BUF_NUM) { mutex_unlock(&bcm_static_buf->static_sem); printk("all static buff in use!\n"); goto original; } bcm_static_buf->buf_use[i] = 1; mutex_unlock(&bcm_static_buf->static_sem); bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size); if (osh) osh->malloced += size; return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i)); } } original: #endif flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; if ((addr = kmalloc(size, flags)) == NULL) { if (osh) osh->failed++; return (NULL); } if (osh) osh->malloced += size; return (addr); } void osl_mfree(osl_t *osh, void *addr, uint size) { #ifdef DHD_USE_STATIC_BUF if (bcm_static_buf) { if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN))) { int buf_idx = 0; buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE; mutex_lock(&bcm_static_buf->static_sem); bcm_static_buf->buf_use[buf_idx] = 0; mutex_unlock(&bcm_static_buf->static_sem); if (osh) { ASSERT(osh->magic == OS_HANDLE_MAGIC); osh->malloced -= size; } return; } } #endif if (osh) { ASSERT(osh->magic == OS_HANDLE_MAGIC); osh->malloced -= size; } kfree(addr); } uint osl_malloced(osl_t *osh) { ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); return (osh->malloced); } uint osl_malloc_failed(osl_t *osh) { ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); return (osh->failed); } void* osl_dma_alloc_consistent(osl_t *osh, uint size, ulong *pap) { ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap)); } void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa) { ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa); } uint osl_dma_map(osl_t *osh, void *va, uint size, int direction) { int dir; ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; return (pci_map_single(osh->pdev, va, size, dir)); } void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction) { int dir; ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; pci_unmap_single(osh->pdev, (uint32)pa, size, dir); } void osl_delay(uint usec) { uint d; while (usec > 0) { d = MIN(usec, 1000); udelay(d); usec -= d; } } void * osl_pktdup(osl_t *osh, void *skb) { void * p; gfp_t flags; flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; if ((p = skb_clone((struct sk_buff*)skb, flags)) == NULL) return NULL; if (osh->pub.pkttag) bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ); osh->pub.pktalloced++; return (p); }
gpl-2.0
scarsxxx/kernel
drivers/net/wireless/iwlwifi/iwl-io.c
2836
7979
/****************************************************************************** * * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include "iwl-io.h" #define IWL_POLL_INTERVAL 10 /* microseconds */ static inline void __iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) { iwl_write32(priv, reg, iwl_read32(priv, reg) | mask); } static inline void __iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) { iwl_write32(priv, reg, iwl_read32(priv, reg) & ~mask); } void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) { unsigned long flags; spin_lock_irqsave(&priv->reg_lock, flags); __iwl_set_bit(priv, reg, mask); spin_unlock_irqrestore(&priv->reg_lock, flags); } void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) { unsigned long flags; spin_lock_irqsave(&priv->reg_lock, flags); __iwl_clear_bit(priv, reg, mask); spin_unlock_irqrestore(&priv->reg_lock, flags); } int iwl_poll_bit(struct iwl_priv *priv, u32 addr, u32 bits, u32 mask, int timeout) { int t = 0; do { if ((iwl_read32(priv, addr) & mask) == (bits & mask)) return t; udelay(IWL_POLL_INTERVAL); t += IWL_POLL_INTERVAL; } while (t < timeout); return -ETIMEDOUT; } int iwl_grab_nic_access_silent(struct iwl_priv *priv) { int ret; lockdep_assert_held(&priv->reg_lock); /* this bit wakes up the NIC */ __iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); /* * These bits say the device is running, and should keep running for * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), * but they do not indicate that embedded SRAM is restored yet; * 3945 and 4965 have volatile SRAM, and must save/restore contents * to/from host DRAM when sleeping/waking for power-saving. * Each direction takes approximately 1/4 millisecond; with this * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a * series of register accesses are expected (e.g. reading Event Log), * to keep device from sleeping. * * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that * SRAM is okay/restored. We don't check that here because this call * is just for hardware register access; but GP1 MAC_SLEEP check is a * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). * * 5000 series and later (including 1000 series) have non-volatile SRAM, * and do not save/restore SRAM when power cycling. */ ret = iwl_poll_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (ret < 0) { iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); return -EIO; } return 0; } int iwl_grab_nic_access(struct iwl_priv *priv) { int ret = iwl_grab_nic_access_silent(priv); if (ret) { u32 val = iwl_read32(priv, CSR_GP_CNTRL); IWL_ERR(priv, "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val); } return ret; } void iwl_release_nic_access(struct iwl_priv *priv) { lockdep_assert_held(&priv->reg_lock); __iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg) { u32 value; unsigned long flags; spin_lock_irqsave(&priv->reg_lock, flags); iwl_grab_nic_access(priv); value = iwl_read32(priv, reg); iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->reg_lock, flags); return value; } void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value) { unsigned long flags; spin_lock_irqsave(&priv->reg_lock, flags); if (!iwl_grab_nic_access(priv)) { iwl_write32(priv, reg, value); iwl_release_nic_access(priv); } spin_unlock_irqrestore(&priv->reg_lock, flags); } int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask, int timeout) { int t = 0; do { if ((iwl_read_direct32(priv, addr) & mask) == mask) return t; udelay(IWL_POLL_INTERVAL); t += IWL_POLL_INTERVAL; } while (t < timeout); return -ETIMEDOUT; } static inline u32 __iwl_read_prph(struct iwl_priv *priv, u32 reg) { iwl_write32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); rmb(); return iwl_read32(priv, HBUS_TARG_PRPH_RDAT); } static inline void __iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val) { iwl_write32(priv, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24))); wmb(); iwl_write32(priv, HBUS_TARG_PRPH_WDAT, val); } u32 iwl_read_prph(struct iwl_priv *priv, u32 reg) { unsigned long flags; u32 val; spin_lock_irqsave(&priv->reg_lock, flags); iwl_grab_nic_access(priv); val = __iwl_read_prph(priv, reg); iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->reg_lock, flags); return val; } void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val) { unsigned long flags; spin_lock_irqsave(&priv->reg_lock, flags); if (!iwl_grab_nic_access(priv)) { __iwl_write_prph(priv, addr, val); iwl_release_nic_access(priv); } spin_unlock_irqrestore(&priv->reg_lock, flags); } void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask) { unsigned long flags; spin_lock_irqsave(&priv->reg_lock, flags); iwl_grab_nic_access(priv); __iwl_write_prph(priv, reg, __iwl_read_prph(priv, reg) | mask); iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->reg_lock, flags); } void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg, u32 bits, u32 mask) { unsigned long flags; spin_lock_irqsave(&priv->reg_lock, flags); iwl_grab_nic_access(priv); __iwl_write_prph(priv, reg, (__iwl_read_prph(priv, reg) & mask) | bits); iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->reg_lock, flags); } void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask) { unsigned long flags; u32 val; spin_lock_irqsave(&priv->reg_lock, flags); iwl_grab_nic_access(priv); val = __iwl_read_prph(priv, reg); __iwl_write_prph(priv, reg, (val & ~mask)); iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->reg_lock, flags); } void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr, void *buf, int words) { unsigned long flags; int offs; u32 *vals = buf; spin_lock_irqsave(&priv->reg_lock, flags); iwl_grab_nic_access(priv); iwl_write32(priv, HBUS_TARG_MEM_RADDR, addr); rmb(); for (offs = 0; offs < words; offs++) vals[offs] = iwl_read32(priv, HBUS_TARG_MEM_RDAT); iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->reg_lock, flags); } u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr) { u32 value; _iwl_read_targ_mem_words(priv, addr, &value, 1); return value; } void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val) { unsigned long flags; spin_lock_irqsave(&priv->reg_lock, flags); if (!iwl_grab_nic_access(priv)) { iwl_write32(priv, HBUS_TARG_MEM_WADDR, addr); wmb(); iwl_write32(priv, HBUS_TARG_MEM_WDAT, val); iwl_release_nic_access(priv); } spin_unlock_irqrestore(&priv->reg_lock, flags); }
gpl-2.0
ryrzy/shooter_u_ics_kernel_3.0.xx
sound/soc/txx9/txx9aclc-generic.c
3092
2228
/* * Generic TXx9 ACLC machine driver * * Copyright (C) 2009 Atsushi Nemoto * * Based on RBTX49xx patch from CELF patch archive. * (C) Copyright TOSHIBA CORPORATION 2004-2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is a very generic AC97 sound machine driver for boards which * have (AC97) audio at ACLC (e.g. RBTX49XX boards). */ #include <linux/module.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include "txx9aclc.h" static struct snd_soc_dai_link txx9aclc_generic_dai = { .name = "AC97", .stream_name = "AC97 HiFi", .cpu_dai_name = "txx9aclc-ac97", .codec_dai_name = "ac97-hifi", .platform_name = "txx9aclc-pcm-audio", .codec_name = "ac97-codec", }; static struct snd_soc_card txx9aclc_generic_card = { .name = "Generic TXx9 ACLC Audio", .dai_link = &txx9aclc_generic_dai, .num_links = 1, }; static struct platform_device *soc_pdev; static int __init txx9aclc_generic_probe(struct platform_device *pdev) { int ret; soc_pdev = platform_device_alloc("soc-audio", -1); if (!soc_pdev) return -ENOMEM; platform_set_drvdata(soc_pdev, &txx9aclc_generic_card); ret = platform_device_add(soc_pdev); if (ret) { platform_device_put(soc_pdev); return ret; } return 0; } static int __exit txx9aclc_generic_remove(struct platform_device *pdev) { platform_device_unregister(soc_pdev); return 0; } static struct platform_driver txx9aclc_generic_driver = { .remove = txx9aclc_generic_remove, .driver = { .name = "txx9aclc-generic", .owner = THIS_MODULE, }, }; static int __init txx9aclc_generic_init(void) { return platform_driver_probe(&txx9aclc_generic_driver, txx9aclc_generic_probe); } static void __exit txx9aclc_generic_exit(void) { platform_driver_unregister(&txx9aclc_generic_driver); } module_init(txx9aclc_generic_init); module_exit(txx9aclc_generic_exit); MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); MODULE_DESCRIPTION("Generic TXx9 ACLC ALSA SoC audio driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:txx9aclc-generic");
gpl-2.0
S34Qu4K3/P6-U06-JellyBean-Kernel-3.0.8---China-Version
drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
3092
3585
/* * DigitalNow TinyTwin remote controller keytable * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> static struct rc_map_table digitalnow_tinytwin[] = { { 0x0000, KEY_MUTE }, /* [symbol speaker] */ { 0x0001, KEY_VOLUMEUP }, { 0x0002, KEY_POWER2 }, /* TV [power button] */ { 0x0003, KEY_2 }, { 0x0004, KEY_3 }, { 0x0005, KEY_4 }, { 0x0006, KEY_6 }, { 0x0007, KEY_7 }, { 0x0008, KEY_8 }, { 0x0009, KEY_NUMERIC_STAR }, /* [*] */ { 0x000a, KEY_0 }, { 0x000b, KEY_NUMERIC_POUND }, /* [#] */ { 0x000c, KEY_RIGHT }, /* [right arrow] */ { 0x000d, KEY_HOMEPAGE }, /* [symbol home] Start */ { 0x000e, KEY_RED }, /* [red] Videos */ { 0x0010, KEY_POWER }, /* PC [power button] */ { 0x0011, KEY_YELLOW }, /* [yellow] Pictures */ { 0x0012, KEY_DOWN }, /* [down arrow] */ { 0x0013, KEY_GREEN }, /* [green] Music */ { 0x0014, KEY_CYCLEWINDOWS }, /* BACK */ { 0x0015, KEY_FAVORITES }, /* MORE */ { 0x0016, KEY_UP }, /* [up arrow] */ { 0x0017, KEY_LEFT }, /* [left arrow] */ { 0x0018, KEY_OK }, /* OK */ { 0x0019, KEY_BLUE }, /* [blue] MyTV */ { 0x001a, KEY_REWIND }, /* REW [<<] */ { 0x001b, KEY_PLAY }, /* PLAY */ { 0x001c, KEY_5 }, { 0x001d, KEY_9 }, { 0x001e, KEY_VOLUMEDOWN }, { 0x001f, KEY_1 }, { 0x0040, KEY_STOP }, /* STOP */ { 0x0042, KEY_PAUSE }, /* PAUSE */ { 0x0043, KEY_SCREEN }, /* Aspect */ { 0x0044, KEY_FORWARD }, /* FWD [>>] */ { 0x0045, KEY_NEXT }, /* SKIP */ { 0x0048, KEY_RECORD }, /* RECORD */ { 0x0049, KEY_VIDEO }, /* RTV */ { 0x004a, KEY_EPG }, /* Guide */ { 0x004b, KEY_CHANNELUP }, { 0x004c, KEY_HELP }, /* Help */ { 0x004d, KEY_RADIO }, /* Radio */ { 0x004f, KEY_CHANNELDOWN }, { 0x0050, KEY_DVD }, /* DVD */ { 0x0051, KEY_AUDIO }, /* Audio */ { 0x0052, KEY_TITLE }, /* Title */ { 0x0053, KEY_NEW }, /* [symbol PIP?] */ { 0x0057, KEY_MENU }, /* Mouse */ { 0x005a, KEY_PREVIOUS }, /* REPLAY */ }; static struct rc_map_list digitalnow_tinytwin_map = { .map = { .scan = digitalnow_tinytwin, .size = ARRAY_SIZE(digitalnow_tinytwin), .rc_type = RC_TYPE_NEC, .name = RC_MAP_DIGITALNOW_TINYTWIN, } }; static int __init init_rc_map_digitalnow_tinytwin(void) { return rc_map_register(&digitalnow_tinytwin_map); } static void __exit exit_rc_map_digitalnow_tinytwin(void) { rc_map_unregister(&digitalnow_tinytwin_map); } module_init(init_rc_map_digitalnow_tinytwin) module_exit(exit_rc_map_digitalnow_tinytwin) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
gpl-2.0
jianC/kernel_htc_lexikon-3.0
drivers/pcmcia/tcic.c
3348
23966
/*====================================================================== Device driver for Databook TCIC-2 PCMCIA controller tcic.c 1.111 2000/02/15 04:13:12 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/system.h> #include <pcmcia/ss.h> #include "tcic.h" MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver"); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/ /* Parameters that can be set with 'insmod' */ /* The base port address of the TCIC-2 chip */ static unsigned long tcic_base = TCIC_BASE; /* Specify a socket number to ignore */ static int ignore = -1; /* Probe for safe interrupts? */ static int do_scan = 1; /* Bit map of interrupts to choose from */ static u_int irq_mask = 0xffff; static int irq_list[16]; static unsigned int irq_list_count; /* The card status change interrupt -- 0 means autoselect */ static int cs_irq; /* Poll status interval -- 0 means default to interrupt */ static int poll_interval; /* Delay for card status double-checking */ static int poll_quick = HZ/20; /* CCLK external clock time, in nanoseconds. 70 ns = 14.31818 MHz */ static int cycle_time = 70; module_param(tcic_base, ulong, 0444); module_param(ignore, int, 0444); module_param(do_scan, int, 0444); module_param(irq_mask, int, 0444); module_param_array(irq_list, int, &irq_list_count, 0444); module_param(cs_irq, int, 0444); module_param(poll_interval, int, 0444); module_param(poll_quick, int, 0444); module_param(cycle_time, int, 0444); /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev); static void tcic_timer(u_long data); static struct pccard_operations tcic_operations; struct tcic_socket { u_short psock; u_char last_sstat; u_char id; struct pcmcia_socket socket; }; static struct timer_list poll_timer; static int tcic_timer_pending; static int sockets; static struct tcic_socket socket_table[2]; /*====================================================================*/ /* Trick when selecting interrupts: the TCIC sktirq pin is supposed to map to irq 11, but is coded as 0 or 1 in the irq registers. */ #define TCIC_IRQ(x) ((x) ? (((x) == 11) ? 1 : (x)) : 15) #ifdef DEBUG_X static u_char tcic_getb(u_char reg) { u_char val = inb(tcic_base+reg); printk(KERN_DEBUG "tcic_getb(%#lx) = %#x\n", tcic_base+reg, val); return val; } static u_short tcic_getw(u_char reg) { u_short val = inw(tcic_base+reg); printk(KERN_DEBUG "tcic_getw(%#lx) = %#x\n", tcic_base+reg, val); return val; } static void tcic_setb(u_char reg, u_char data) { printk(KERN_DEBUG "tcic_setb(%#lx, %#x)\n", tcic_base+reg, data); outb(data, tcic_base+reg); } static void tcic_setw(u_char reg, u_short data) { printk(KERN_DEBUG "tcic_setw(%#lx, %#x)\n", tcic_base+reg, data); outw(data, tcic_base+reg); } #else #define tcic_getb(reg) inb(tcic_base+reg) #define tcic_getw(reg) inw(tcic_base+reg) #define tcic_setb(reg, data) outb(data, tcic_base+reg) #define tcic_setw(reg, data) outw(data, tcic_base+reg) #endif static void tcic_setl(u_char reg, u_int data) { #ifdef DEBUG_X printk(KERN_DEBUG "tcic_setl(%#x, %#lx)\n", tcic_base+reg, data); #endif outw(data & 0xffff, tcic_base+reg); outw(data >> 16, tcic_base+reg+2); } static void tcic_aux_setb(u_short reg, u_char data) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); tcic_setb(TCIC_AUX, data); } static u_short tcic_aux_getw(u_short reg) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); return tcic_getw(TCIC_AUX); } static void tcic_aux_setw(u_short reg, u_short data) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); tcic_setw(TCIC_AUX, data); } /*====================================================================*/ /* Time conversion functions */ static int to_cycles(int ns) { if (ns < 14) return 0; else return 2*(ns-14)/cycle_time; } /*====================================================================*/ static volatile u_int irq_hits; static irqreturn_t __init tcic_irq_count(int irq, void *dev) { irq_hits++; return IRQ_HANDLED; } static u_int __init try_irq(int irq) { u_short cfg; irq_hits = 0; if (request_irq(irq, tcic_irq_count, 0, "irq scan", tcic_irq_count) != 0) return -1; mdelay(10); if (irq_hits) { free_irq(irq, tcic_irq_count); return -1; } /* Generate one interrupt */ cfg = TCIC_SYSCFG_AUTOBUSY | 0x0a00; tcic_aux_setw(TCIC_AUX_SYSCFG, cfg | TCIC_IRQ(irq)); tcic_setb(TCIC_IENA, TCIC_IENA_ERR | TCIC_IENA_CFG_HIGH); tcic_setb(TCIC_ICSR, TCIC_ICSR_ERR | TCIC_ICSR_JAM); udelay(1000); free_irq(irq, tcic_irq_count); /* Turn off interrupts */ tcic_setb(TCIC_IENA, TCIC_IENA_CFG_OFF); while (tcic_getb(TCIC_ICSR)) tcic_setb(TCIC_ICSR, TCIC_ICSR_JAM); tcic_aux_setw(TCIC_AUX_SYSCFG, cfg); return (irq_hits != 1); } static u_int __init irq_scan(u_int mask0) { u_int mask1; int i; #ifdef __alpha__ #define PIC 0x4d0 /* Don't probe level-triggered interrupts -- reserved for PCI */ int level_mask = inb_p(PIC) | (inb_p(PIC+1) << 8); if (level_mask) mask0 &= ~level_mask; #endif mask1 = 0; if (do_scan) { for (i = 0; i < 16; i++) if ((mask0 & (1 << i)) && (try_irq(i) == 0)) mask1 |= (1 << i); for (i = 0; i < 16; i++) if ((mask1 & (1 << i)) && (try_irq(i) != 0)) { mask1 ^= (1 << i); } } if (mask1) { printk("scanned"); } else { /* Fallback: just find interrupts that aren't in use */ for (i = 0; i < 16; i++) if ((mask0 & (1 << i)) && (request_irq(i, tcic_irq_count, 0, "x", tcic_irq_count) == 0)) { mask1 |= (1 << i); free_irq(i, tcic_irq_count); } printk("default"); } printk(") = "); for (i = 0; i < 16; i++) if (mask1 & (1<<i)) printk("%s%d", ((mask1 & ((1<<i)-1)) ? "," : ""), i); printk(" "); return mask1; } /*====================================================================== See if a card is present, powered up, in IO mode, and already bound to a (non-PCMCIA) Linux driver. We make an exception for cards that look like serial devices. ======================================================================*/ static int __init is_active(int s) { u_short scf1, ioctl, base, num; u_char pwr, sstat; u_int addr; tcic_setl(TCIC_ADDR, (s << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(s)); scf1 = tcic_getw(TCIC_DATA); pwr = tcic_getb(TCIC_PWR); sstat = tcic_getb(TCIC_SSTAT); addr = TCIC_IWIN(s, 0); tcic_setw(TCIC_ADDR, addr + TCIC_IBASE_X); base = tcic_getw(TCIC_DATA); tcic_setw(TCIC_ADDR, addr + TCIC_ICTL_X); ioctl = tcic_getw(TCIC_DATA); if (ioctl & TCIC_ICTL_TINY) num = 1; else { num = (base ^ (base-1)); base = base & (base-1); } if ((sstat & TCIC_SSTAT_CD) && (pwr & TCIC_PWR_VCC(s)) && (scf1 & TCIC_SCF1_IOSTS) && (ioctl & TCIC_ICTL_ENA) && ((base & 0xfeef) != 0x02e8)) { struct resource *res = request_region(base, num, "tcic-2"); if (!res) /* region is busy */ return 1; release_region(base, num); } return 0; } /*====================================================================== This returns the revision code for the specified socket. ======================================================================*/ static int __init get_tcic_id(void) { u_short id; tcic_aux_setw(TCIC_AUX_TEST, TCIC_TEST_DIAG); id = tcic_aux_getw(TCIC_AUX_ILOCK); id = (id & TCIC_ILOCKTEST_ID_MASK) >> TCIC_ILOCKTEST_ID_SH; tcic_aux_setw(TCIC_AUX_TEST, 0); return id; } /*====================================================================*/ static struct platform_driver tcic_driver = { .driver = { .name = "tcic-pcmcia", .owner = THIS_MODULE, }, }; static struct platform_device tcic_device = { .name = "tcic-pcmcia", .id = 0, }; static int __init init_tcic(void) { int i, sock, ret = 0; u_int mask, scan; if (platform_driver_register(&tcic_driver)) return -1; printk(KERN_INFO "Databook TCIC-2 PCMCIA probe: "); sock = 0; if (!request_region(tcic_base, 16, "tcic-2")) { printk("could not allocate ports,\n "); platform_driver_unregister(&tcic_driver); return -ENODEV; } else { tcic_setw(TCIC_ADDR, 0); if (tcic_getw(TCIC_ADDR) == 0) { tcic_setw(TCIC_ADDR, 0xc3a5); if (tcic_getw(TCIC_ADDR) == 0xc3a5) sock = 2; } if (sock == 0) { /* See if resetting the controller does any good */ tcic_setb(TCIC_SCTRL, TCIC_SCTRL_RESET); tcic_setb(TCIC_SCTRL, 0); tcic_setw(TCIC_ADDR, 0); if (tcic_getw(TCIC_ADDR) == 0) { tcic_setw(TCIC_ADDR, 0xc3a5); if (tcic_getw(TCIC_ADDR) == 0xc3a5) sock = 2; } } } if (sock == 0) { printk("not found.\n"); release_region(tcic_base, 16); platform_driver_unregister(&tcic_driver); return -ENODEV; } sockets = 0; for (i = 0; i < sock; i++) { if ((i == ignore) || is_active(i)) continue; socket_table[sockets].psock = i; socket_table[sockets].id = get_tcic_id(); socket_table[sockets].socket.owner = THIS_MODULE; /* only 16-bit cards, memory windows must be size-aligned */ /* No PCI or CardBus support */ socket_table[sockets].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN; /* irq 14, 11, 10, 7, 6, 5, 4, 3 */ socket_table[sockets].socket.irq_mask = 0x4cf8; /* 4K minimum window size */ socket_table[sockets].socket.map_size = 0x1000; sockets++; } switch (socket_table[0].id) { case TCIC_ID_DB86082: printk("DB86082"); break; case TCIC_ID_DB86082A: printk("DB86082A"); break; case TCIC_ID_DB86084: printk("DB86084"); break; case TCIC_ID_DB86084A: printk("DB86084A"); break; case TCIC_ID_DB86072: printk("DB86072"); break; case TCIC_ID_DB86184: printk("DB86184"); break; case TCIC_ID_DB86082B: printk("DB86082B"); break; default: printk("Unknown ID 0x%02x", socket_table[0].id); } /* Set up polling */ poll_timer.function = &tcic_timer; poll_timer.data = 0; init_timer(&poll_timer); /* Build interrupt mask */ printk(KERN_CONT ", %d sockets\n", sockets); printk(KERN_INFO " irq list ("); if (irq_list_count == 0) mask = irq_mask; else for (i = mask = 0; i < irq_list_count; i++) mask |= (1<<irq_list[i]); /* irq 14, 11, 10, 7, 6, 5, 4, 3 */ mask &= 0x4cf8; /* Scan interrupts */ mask = irq_scan(mask); for (i=0;i<sockets;i++) socket_table[i].socket.irq_mask = mask; /* Check for only two interrupts available */ scan = (mask & (mask-1)); if (((scan & (scan-1)) == 0) && (poll_interval == 0)) poll_interval = HZ; if (poll_interval == 0) { /* Avoid irq 12 unless it is explicitly requested */ u_int cs_mask = mask & ((cs_irq) ? (1<<cs_irq) : ~(1<<12)); for (i = 15; i > 0; i--) if ((cs_mask & (1 << i)) && (request_irq(i, tcic_interrupt, 0, "tcic", tcic_interrupt) == 0)) break; cs_irq = i; if (cs_irq == 0) poll_interval = HZ; } if (socket_table[0].socket.irq_mask & (1 << 11)) printk("sktirq is irq 11, "); if (cs_irq != 0) printk("status change on irq %d\n", cs_irq); else printk("polled status, interval = %d ms\n", poll_interval * 1000 / HZ); for (i = 0; i < sockets; i++) { tcic_setw(TCIC_ADDR+2, socket_table[i].psock << TCIC_SS_SHFT); socket_table[i].last_sstat = tcic_getb(TCIC_SSTAT); } /* jump start interrupt handler, if needed */ tcic_interrupt(0, NULL); platform_device_register(&tcic_device); for (i = 0; i < sockets; i++) { socket_table[i].socket.ops = &tcic_operations; socket_table[i].socket.resource_ops = &pccard_nonstatic_ops; socket_table[i].socket.dev.parent = &tcic_device.dev; ret = pcmcia_register_socket(&socket_table[i].socket); if (ret && i) pcmcia_unregister_socket(&socket_table[0].socket); } return ret; return 0; } /* init_tcic */ /*====================================================================*/ static void __exit exit_tcic(void) { int i; del_timer_sync(&poll_timer); if (cs_irq != 0) { tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00); free_irq(cs_irq, tcic_interrupt); } release_region(tcic_base, 16); for (i = 0; i < sockets; i++) { pcmcia_unregister_socket(&socket_table[i].socket); } platform_device_unregister(&tcic_device); platform_driver_unregister(&tcic_driver); } /* exit_tcic */ /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev) { int i, quick = 0; u_char latch, sstat; u_short psock; u_int events; static volatile int active = 0; if (active) { printk(KERN_NOTICE "tcic: reentered interrupt handler!\n"); return IRQ_NONE; } else active = 1; pr_debug("tcic_interrupt()\n"); for (i = 0; i < sockets; i++) { psock = socket_table[i].psock; tcic_setl(TCIC_ADDR, (psock << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(psock)); sstat = tcic_getb(TCIC_SSTAT); latch = sstat ^ socket_table[psock].last_sstat; socket_table[i].last_sstat = sstat; if (tcic_getb(TCIC_ICSR) & TCIC_ICSR_CDCHG) { tcic_setb(TCIC_ICSR, TCIC_ICSR_CLEAR); quick = 1; } if (latch == 0) continue; events = (latch & TCIC_SSTAT_CD) ? SS_DETECT : 0; events |= (latch & TCIC_SSTAT_WP) ? SS_WRPROT : 0; if (tcic_getw(TCIC_DATA) & TCIC_SCF1_IOSTS) { events |= (latch & TCIC_SSTAT_LBAT1) ? SS_STSCHG : 0; } else { events |= (latch & TCIC_SSTAT_RDY) ? SS_READY : 0; events |= (latch & TCIC_SSTAT_LBAT1) ? SS_BATDEAD : 0; events |= (latch & TCIC_SSTAT_LBAT2) ? SS_BATWARN : 0; } if (events) { pcmcia_parse_events(&socket_table[i].socket, events); } } /* Schedule next poll, if needed */ if (((cs_irq == 0) || quick) && (!tcic_timer_pending)) { poll_timer.expires = jiffies + (quick ? poll_quick : poll_interval); add_timer(&poll_timer); tcic_timer_pending = 1; } active = 0; pr_debug("interrupt done\n"); return IRQ_HANDLED; } /* tcic_interrupt */ static void tcic_timer(u_long data) { pr_debug("tcic_timer()\n"); tcic_timer_pending = 0; tcic_interrupt(0, NULL); } /* tcic_timer */ /*====================================================================*/ static int tcic_get_status(struct pcmcia_socket *sock, u_int *value) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_char reg; tcic_setl(TCIC_ADDR, (psock << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(psock)); reg = tcic_getb(TCIC_SSTAT); *value = (reg & TCIC_SSTAT_CD) ? SS_DETECT : 0; *value |= (reg & TCIC_SSTAT_WP) ? SS_WRPROT : 0; if (tcic_getw(TCIC_DATA) & TCIC_SCF1_IOSTS) { *value |= (reg & TCIC_SSTAT_LBAT1) ? SS_STSCHG : 0; } else { *value |= (reg & TCIC_SSTAT_RDY) ? SS_READY : 0; *value |= (reg & TCIC_SSTAT_LBAT1) ? SS_BATDEAD : 0; *value |= (reg & TCIC_SSTAT_LBAT2) ? SS_BATWARN : 0; } reg = tcic_getb(TCIC_PWR); if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock))) *value |= SS_POWERON; dev_dbg(&sock->dev, "GetStatus(%d) = %#2.2x\n", psock, *value); return 0; } /* tcic_get_status */ /*====================================================================*/ static int tcic_set_socket(struct pcmcia_socket *sock, socket_state_t *state) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_char reg; u_short scf1, scf2; dev_dbg(&sock->dev, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags, state->Vcc, state->Vpp, state->io_irq, state->csc_mask); tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG); reg = tcic_getb(TCIC_PWR); reg &= ~(TCIC_PWR_VCC(psock) | TCIC_PWR_VPP(psock)); if (state->Vcc == 50) { switch (state->Vpp) { case 0: reg |= TCIC_PWR_VCC(psock) | TCIC_PWR_VPP(psock); break; case 50: reg |= TCIC_PWR_VCC(psock); break; case 120: reg |= TCIC_PWR_VPP(psock); break; default: return -EINVAL; } } else if (state->Vcc != 0) return -EINVAL; if (reg != tcic_getb(TCIC_PWR)) tcic_setb(TCIC_PWR, reg); reg = TCIC_ILOCK_HOLD_CCLK | TCIC_ILOCK_CWAIT; if (state->flags & SS_OUTPUT_ENA) { tcic_setb(TCIC_SCTRL, TCIC_SCTRL_ENA); reg |= TCIC_ILOCK_CRESENA; } else tcic_setb(TCIC_SCTRL, 0); if (state->flags & SS_RESET) reg |= TCIC_ILOCK_CRESET; tcic_aux_setb(TCIC_AUX_ILOCK, reg); tcic_setw(TCIC_ADDR, TCIC_SCF1(psock)); scf1 = TCIC_SCF1_FINPACK; scf1 |= TCIC_IRQ(state->io_irq); if (state->flags & SS_IOCARD) { scf1 |= TCIC_SCF1_IOSTS; if (state->flags & SS_SPKR_ENA) scf1 |= TCIC_SCF1_SPKR; if (state->flags & SS_DMA_MODE) scf1 |= TCIC_SCF1_DREQ2 << TCIC_SCF1_DMA_SHIFT; } tcic_setw(TCIC_DATA, scf1); /* Some general setup stuff, and configure status interrupt */ reg = TCIC_WAIT_ASYNC | TCIC_WAIT_SENSE | to_cycles(250); tcic_aux_setb(TCIC_AUX_WCTL, reg); tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00| TCIC_IRQ(cs_irq)); /* Card status change interrupt mask */ tcic_setw(TCIC_ADDR, TCIC_SCF2(psock)); scf2 = TCIC_SCF2_MALL; if (state->csc_mask & SS_DETECT) scf2 &= ~TCIC_SCF2_MCD; if (state->flags & SS_IOCARD) { if (state->csc_mask & SS_STSCHG) reg &= ~TCIC_SCF2_MLBAT1; } else { if (state->csc_mask & SS_BATDEAD) reg &= ~TCIC_SCF2_MLBAT1; if (state->csc_mask & SS_BATWARN) reg &= ~TCIC_SCF2_MLBAT2; if (state->csc_mask & SS_READY) reg &= ~TCIC_SCF2_MRDY; } tcic_setw(TCIC_DATA, scf2); /* For the ISA bus, the irq should be active-high totem-pole */ tcic_setb(TCIC_IENA, TCIC_IENA_CDCHG | TCIC_IENA_CFG_HIGH); return 0; } /* tcic_set_socket */ /*====================================================================*/ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_int addr; u_short base, len, ioctl; dev_dbg(&sock->dev, "SetIOMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, (unsigned long long)io->start, (unsigned long long)io->stop); if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); addr = TCIC_IWIN(psock, io->map); base = io->start; len = io->stop - io->start; /* Check to see that len+1 is power of two, etc */ if ((len & (len+1)) || (base & len)) return -EINVAL; base |= (len+1)>>1; tcic_setw(TCIC_ADDR, addr + TCIC_IBASE_X); tcic_setw(TCIC_DATA, base); ioctl = (psock << TCIC_ICTL_SS_SHFT); ioctl |= (len == 0) ? TCIC_ICTL_TINY : 0; ioctl |= (io->flags & MAP_ACTIVE) ? TCIC_ICTL_ENA : 0; ioctl |= to_cycles(io->speed) & TCIC_ICTL_WSCNT_MASK; if (!(io->flags & MAP_AUTOSZ)) { ioctl |= TCIC_ICTL_QUIET; ioctl |= (io->flags & MAP_16BIT) ? TCIC_ICTL_BW_16 : TCIC_ICTL_BW_8; } tcic_setw(TCIC_ADDR, addr + TCIC_ICTL_X); tcic_setw(TCIC_DATA, ioctl); return 0; } /* tcic_set_io_map */ /*====================================================================*/ static int tcic_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_short addr, ctl; u_long base, len, mmap; dev_dbg(&sock->dev, "SetMemMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags, mem->speed, (unsigned long long)mem->res->start, (unsigned long long)mem->res->end, mem->card_start); if ((mem->map > 3) || (mem->card_start > 0x3ffffff) || (mem->res->start > 0xffffff) || (mem->res->end > 0xffffff) || (mem->res->start > mem->res->end) || (mem->speed > 1000)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); addr = TCIC_MWIN(psock, mem->map); base = mem->res->start; len = mem->res->end - mem->res->start; if ((len & (len+1)) || (base & len)) return -EINVAL; if (len == 0x0fff) base = (base >> TCIC_MBASE_HA_SHFT) | TCIC_MBASE_4K_BIT; else base = (base | (len+1)>>1) >> TCIC_MBASE_HA_SHFT; tcic_setw(TCIC_ADDR, addr + TCIC_MBASE_X); tcic_setw(TCIC_DATA, base); mmap = mem->card_start - mem->res->start; mmap = (mmap >> TCIC_MMAP_CA_SHFT) & TCIC_MMAP_CA_MASK; if (mem->flags & MAP_ATTRIB) mmap |= TCIC_MMAP_REG; tcic_setw(TCIC_ADDR, addr + TCIC_MMAP_X); tcic_setw(TCIC_DATA, mmap); ctl = TCIC_MCTL_QUIET | (psock << TCIC_MCTL_SS_SHFT); ctl |= to_cycles(mem->speed) & TCIC_MCTL_WSCNT_MASK; ctl |= (mem->flags & MAP_16BIT) ? 0 : TCIC_MCTL_B8; ctl |= (mem->flags & MAP_WRPROT) ? TCIC_MCTL_WP : 0; ctl |= (mem->flags & MAP_ACTIVE) ? TCIC_MCTL_ENA : 0; tcic_setw(TCIC_ADDR, addr + TCIC_MCTL_X); tcic_setw(TCIC_DATA, ctl); return 0; } /* tcic_set_mem_map */ /*====================================================================*/ static int tcic_init(struct pcmcia_socket *s) { int i; struct resource res = { .start = 0, .end = 0x1000 }; pccard_io_map io = { 0, 0, 0, 0, 1 }; pccard_mem_map mem = { .res = &res, }; for (i = 0; i < 2; i++) { io.map = i; tcic_set_io_map(s, &io); } for (i = 0; i < 5; i++) { mem.map = i; tcic_set_mem_map(s, &mem); } return 0; } static struct pccard_operations tcic_operations = { .init = tcic_init, .get_status = tcic_get_status, .set_socket = tcic_set_socket, .set_io_map = tcic_set_io_map, .set_mem_map = tcic_set_mem_map, }; /*====================================================================*/ module_init(init_tcic); module_exit(exit_tcic);
gpl-2.0
MetSystem/Xiaomi_Kernel_OpenSource
drivers/usb/storage/jumpshot.c
4628
18478
/* Driver for Lexar "Jumpshot" Compact Flash reader * * jumpshot driver v0.1: * * First release * * Current development and maintenance by: * (c) 2000 Jimmie Mayfield (mayfield+usb@sackheads.org) * * Many thanks to Robert Baruch for the SanDisk SmartMedia reader driver * which I used as a template for this driver. * * Some bugfixes and scatter-gather code by Gregory P. Smith * (greg-usb@electricrain.com) * * Fix for media change by Joerg Schneider (js@joergschneider.com) * * Developed with the assistance of: * * (C) 2002 Alan Stern <stern@rowland.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * This driver attempts to support the Lexar Jumpshot USB CompactFlash * reader. Like many other USB CompactFlash readers, the Jumpshot contains * a USB-to-ATA chip. * * This driver supports reading and writing. If you're truly paranoid, * however, you can force the driver into a write-protected state by setting * the WP enable bits in jumpshot_handle_mode_sense. See the comments * in that routine. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" MODULE_DESCRIPTION("Driver for Lexar \"Jumpshot\" Compact Flash reader"); MODULE_AUTHOR("Jimmie Mayfield <mayfield+usb@sackheads.org>"); MODULE_LICENSE("GPL"); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags)|(USB_US_TYPE_STOR<<24) } static struct usb_device_id jumpshot_usb_ids[] = { # include "unusual_jumpshot.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, jumpshot_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev jumpshot_unusual_dev_list[] = { # include "unusual_jumpshot.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV struct jumpshot_info { unsigned long sectors; /* total sector count */ unsigned long ssize; /* sector size in bytes */ /* the following aren't used yet */ unsigned char sense_key; unsigned long sense_asc; /* additional sense code */ unsigned long sense_ascq; /* additional sense code qualifier */ }; static inline int jumpshot_bulk_read(struct us_data *us, unsigned char *data, unsigned int len) { if (len == 0) return USB_STOR_XFER_GOOD; US_DEBUGP("jumpshot_bulk_read: len = %d\n", len); return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data, len, NULL); } static inline int jumpshot_bulk_write(struct us_data *us, unsigned char *data, unsigned int len) { if (len == 0) return USB_STOR_XFER_GOOD; US_DEBUGP("jumpshot_bulk_write: len = %d\n", len); return usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, data, len, NULL); } static int jumpshot_get_status(struct us_data *us) { int rc; if (!us) return USB_STOR_TRANSPORT_ERROR; // send the setup rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, 0, 0xA0, 0, 7, us->iobuf, 1); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (us->iobuf[0] != 0x50) { US_DEBUGP("jumpshot_get_status: 0x%2x\n", us->iobuf[0]); return USB_STOR_TRANSPORT_ERROR; } return USB_STOR_TRANSPORT_GOOD; } static int jumpshot_read_data(struct us_data *us, struct jumpshot_info *info, u32 sector, u32 sectors) { unsigned char *command = us->iobuf; unsigned char *buffer; unsigned char thistime; unsigned int totallen, alloclen; int len, result; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; // we're working in LBA mode. according to the ATA spec, // we can support up to 28-bit addressing. I don't know if Jumpshot // supports beyond 24-bit addressing. It's kind of hard to test // since it requires > 8GB CF card. if (sector > 0x0FFFFFFF) return USB_STOR_TRANSPORT_ERROR; totallen = sectors * info->ssize; // Since we don't read more than 64 KB at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer. alloclen = min(totallen, 65536u); buffer = kmalloc(alloclen, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; do { // loop, never allocate or transfer more than 64k at once // (min(128k, 255*info->ssize) is the real limit) len = min(totallen, alloclen); thistime = (len / info->ssize) & 0xff; command[0] = 0; command[1] = thistime; command[2] = sector & 0xFF; command[3] = (sector >> 8) & 0xFF; command[4] = (sector >> 16) & 0xFF; command[5] = 0xE0 | ((sector >> 24) & 0x0F); command[6] = 0x20; // send the setup + command result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 0, 0x20, 0, 1, command, 7); if (result != USB_STOR_XFER_GOOD) goto leave; // read the result result = jumpshot_bulk_read(us, buffer, len); if (result != USB_STOR_XFER_GOOD) goto leave; US_DEBUGP("jumpshot_read_data: %d bytes\n", len); // Store the data in the transfer buffer usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &sg_offset, TO_XFER_BUF); sector += thistime; totallen -= len; } while (totallen > 0); kfree(buffer); return USB_STOR_TRANSPORT_GOOD; leave: kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } static int jumpshot_write_data(struct us_data *us, struct jumpshot_info *info, u32 sector, u32 sectors) { unsigned char *command = us->iobuf; unsigned char *buffer; unsigned char thistime; unsigned int totallen, alloclen; int len, result, waitcount; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; // we're working in LBA mode. according to the ATA spec, // we can support up to 28-bit addressing. I don't know if Jumpshot // supports beyond 24-bit addressing. It's kind of hard to test // since it requires > 8GB CF card. // if (sector > 0x0FFFFFFF) return USB_STOR_TRANSPORT_ERROR; totallen = sectors * info->ssize; // Since we don't write more than 64 KB at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer. alloclen = min(totallen, 65536u); buffer = kmalloc(alloclen, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; do { // loop, never allocate or transfer more than 64k at once // (min(128k, 255*info->ssize) is the real limit) len = min(totallen, alloclen); thistime = (len / info->ssize) & 0xff; // Get the data from the transfer buffer usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &sg_offset, FROM_XFER_BUF); command[0] = 0; command[1] = thistime; command[2] = sector & 0xFF; command[3] = (sector >> 8) & 0xFF; command[4] = (sector >> 16) & 0xFF; command[5] = 0xE0 | ((sector >> 24) & 0x0F); command[6] = 0x30; // send the setup + command result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 0, 0x20, 0, 1, command, 7); if (result != USB_STOR_XFER_GOOD) goto leave; // send the data result = jumpshot_bulk_write(us, buffer, len); if (result != USB_STOR_XFER_GOOD) goto leave; // read the result. apparently the bulk write can complete // before the jumpshot drive is finished writing. so we loop // here until we get a good return code waitcount = 0; do { result = jumpshot_get_status(us); if (result != USB_STOR_TRANSPORT_GOOD) { // I have not experimented to find the smallest value. // msleep(50); } } while ((result != USB_STOR_TRANSPORT_GOOD) && (waitcount < 10)); if (result != USB_STOR_TRANSPORT_GOOD) US_DEBUGP("jumpshot_write_data: Gah! Waitcount = 10. Bad write!?\n"); sector += thistime; totallen -= len; } while (totallen > 0); kfree(buffer); return result; leave: kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } static int jumpshot_id_device(struct us_data *us, struct jumpshot_info *info) { unsigned char *command = us->iobuf; unsigned char *reply; int rc; if (!info) return USB_STOR_TRANSPORT_ERROR; command[0] = 0xE0; command[1] = 0xEC; reply = kmalloc(512, GFP_NOIO); if (!reply) return USB_STOR_TRANSPORT_ERROR; // send the setup rc = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 0, 0x20, 0, 6, command, 2); if (rc != USB_STOR_XFER_GOOD) { US_DEBUGP("jumpshot_id_device: Gah! " "send_control for read_capacity failed\n"); rc = USB_STOR_TRANSPORT_ERROR; goto leave; } // read the reply rc = jumpshot_bulk_read(us, reply, 512); if (rc != USB_STOR_XFER_GOOD) { rc = USB_STOR_TRANSPORT_ERROR; goto leave; } info->sectors = ((u32)(reply[117]) << 24) | ((u32)(reply[116]) << 16) | ((u32)(reply[115]) << 8) | ((u32)(reply[114]) ); rc = USB_STOR_TRANSPORT_GOOD; leave: kfree(reply); return rc; } static int jumpshot_handle_mode_sense(struct us_data *us, struct scsi_cmnd * srb, int sense_6) { static unsigned char rw_err_page[12] = { 0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0 }; static unsigned char cache_page[12] = { 0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static unsigned char rbac_page[12] = { 0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0 }; static unsigned char timer_page[8] = { 0x1C, 0x6, 0, 0, 0, 0 }; unsigned char pc, page_code; unsigned int i = 0; struct jumpshot_info *info = (struct jumpshot_info *) (us->extra); unsigned char *ptr = us->iobuf; pc = srb->cmnd[2] >> 6; page_code = srb->cmnd[2] & 0x3F; switch (pc) { case 0x0: US_DEBUGP("jumpshot_handle_mode_sense: Current values\n"); break; case 0x1: US_DEBUGP("jumpshot_handle_mode_sense: Changeable values\n"); break; case 0x2: US_DEBUGP("jumpshot_handle_mode_sense: Default values\n"); break; case 0x3: US_DEBUGP("jumpshot_handle_mode_sense: Saves values\n"); break; } memset(ptr, 0, 8); if (sense_6) { ptr[2] = 0x00; // WP enable: 0x80 i = 4; } else { ptr[3] = 0x00; // WP enable: 0x80 i = 8; } switch (page_code) { case 0x0: // vendor-specific mode info->sense_key = 0x05; info->sense_asc = 0x24; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; case 0x1: memcpy(ptr + i, rw_err_page, sizeof(rw_err_page)); i += sizeof(rw_err_page); break; case 0x8: memcpy(ptr + i, cache_page, sizeof(cache_page)); i += sizeof(cache_page); break; case 0x1B: memcpy(ptr + i, rbac_page, sizeof(rbac_page)); i += sizeof(rbac_page); break; case 0x1C: memcpy(ptr + i, timer_page, sizeof(timer_page)); i += sizeof(timer_page); break; case 0x3F: memcpy(ptr + i, timer_page, sizeof(timer_page)); i += sizeof(timer_page); memcpy(ptr + i, rbac_page, sizeof(rbac_page)); i += sizeof(rbac_page); memcpy(ptr + i, cache_page, sizeof(cache_page)); i += sizeof(cache_page); memcpy(ptr + i, rw_err_page, sizeof(rw_err_page)); i += sizeof(rw_err_page); break; } if (sense_6) ptr[0] = i - 1; else ((__be16 *) ptr)[0] = cpu_to_be16(i - 2); usb_stor_set_xfer_buf(ptr, i, srb); return USB_STOR_TRANSPORT_GOOD; } static void jumpshot_info_destructor(void *extra) { // this routine is a placeholder... // currently, we don't allocate any extra blocks so we're okay } // Transport for the Lexar 'Jumpshot' // static int jumpshot_transport(struct scsi_cmnd *srb, struct us_data *us) { struct jumpshot_info *info; int rc; unsigned long block, blocks; unsigned char *ptr = us->iobuf; static unsigned char inquiry_response[8] = { 0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00 }; if (!us->extra) { us->extra = kzalloc(sizeof(struct jumpshot_info), GFP_NOIO); if (!us->extra) { US_DEBUGP("jumpshot_transport: Gah! Can't allocate storage for jumpshot info struct!\n"); return USB_STOR_TRANSPORT_ERROR; } us->extra_destructor = jumpshot_info_destructor; } info = (struct jumpshot_info *) (us->extra); if (srb->cmnd[0] == INQUIRY) { US_DEBUGP("jumpshot_transport: INQUIRY. Returning bogus response.\n"); memcpy(ptr, inquiry_response, sizeof(inquiry_response)); fill_inquiry_response(us, ptr, 36); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == READ_CAPACITY) { info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec rc = jumpshot_get_status(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; rc = jumpshot_id_device(us, info); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; US_DEBUGP("jumpshot_transport: READ_CAPACITY: %ld sectors, %ld bytes per sector\n", info->sectors, info->ssize); // build the reply // ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); ((__be32 *) ptr)[1] = cpu_to_be32(info->ssize); usb_stor_set_xfer_buf(ptr, 8, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == MODE_SELECT_10) { US_DEBUGP("jumpshot_transport: Gah! MODE_SELECT_10.\n"); return USB_STOR_TRANSPORT_ERROR; } if (srb->cmnd[0] == READ_10) { block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); US_DEBUGP("jumpshot_transport: READ_10: read block 0x%04lx count %ld\n", block, blocks); return jumpshot_read_data(us, info, block, blocks); } if (srb->cmnd[0] == READ_12) { // I don't think we'll ever see a READ_12 but support it anyway... // block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | ((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9])); US_DEBUGP("jumpshot_transport: READ_12: read block 0x%04lx count %ld\n", block, blocks); return jumpshot_read_data(us, info, block, blocks); } if (srb->cmnd[0] == WRITE_10) { block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); US_DEBUGP("jumpshot_transport: WRITE_10: write block 0x%04lx count %ld\n", block, blocks); return jumpshot_write_data(us, info, block, blocks); } if (srb->cmnd[0] == WRITE_12) { // I don't think we'll ever see a WRITE_12 but support it anyway... // block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | ((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9])); US_DEBUGP("jumpshot_transport: WRITE_12: write block 0x%04lx count %ld\n", block, blocks); return jumpshot_write_data(us, info, block, blocks); } if (srb->cmnd[0] == TEST_UNIT_READY) { US_DEBUGP("jumpshot_transport: TEST_UNIT_READY.\n"); return jumpshot_get_status(us); } if (srb->cmnd[0] == REQUEST_SENSE) { US_DEBUGP("jumpshot_transport: REQUEST_SENSE.\n"); memset(ptr, 0, 18); ptr[0] = 0xF0; ptr[2] = info->sense_key; ptr[7] = 11; ptr[12] = info->sense_asc; ptr[13] = info->sense_ascq; usb_stor_set_xfer_buf(ptr, 18, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == MODE_SENSE) { US_DEBUGP("jumpshot_transport: MODE_SENSE_6 detected\n"); return jumpshot_handle_mode_sense(us, srb, 1); } if (srb->cmnd[0] == MODE_SENSE_10) { US_DEBUGP("jumpshot_transport: MODE_SENSE_10 detected\n"); return jumpshot_handle_mode_sense(us, srb, 0); } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { // sure. whatever. not like we can stop the user from popping // the media out of the device (no locking doors, etc) // return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == START_STOP) { /* this is used by sd.c'check_scsidisk_media_change to detect media change */ US_DEBUGP("jumpshot_transport: START_STOP.\n"); /* the first jumpshot_id_device after a media change returns an error (determined experimentally) */ rc = jumpshot_id_device(us, info); if (rc == USB_STOR_TRANSPORT_GOOD) { info->sense_key = NO_SENSE; srb->result = SUCCESS; } else { info->sense_key = UNIT_ATTENTION; srb->result = SAM_STAT_CHECK_CONDITION; } return rc; } US_DEBUGP("jumpshot_transport: Gah! Unknown command: %d (0x%x)\n", srb->cmnd[0], srb->cmnd[0]); info->sense_key = 0x05; info->sense_asc = 0x20; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } static int jumpshot_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - jumpshot_usb_ids) + jumpshot_unusual_dev_list); if (result) return result; us->transport_name = "Lexar Jumpshot Control/Bulk"; us->transport = jumpshot_transport; us->transport_reset = usb_stor_Bulk_reset; us->max_lun = 1; result = usb_stor_probe2(us); return result; } static struct usb_driver jumpshot_driver = { .name = "ums-jumpshot", .probe = jumpshot_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = jumpshot_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_driver(jumpshot_driver);
gpl-2.0
CyanogenMod/android_kernel_samsung_jf
drivers/i2c/busses/i2c-omap.c
4884
33588
/* * TI OMAP I2C master mode driver * * Copyright (C) 2003 MontaVista Software, Inc. * Copyright (C) 2005 Nokia Corporation * Copyright (C) 2004 - 2007 Texas Instruments. * * Originally written by MontaVista Software, Inc. * Additional contributions by: * Tony Lindgren <tony@atomide.com> * Imre Deak <imre.deak@nokia.com> * Juha Yrjölä <juha.yrjola@solidboot.com> * Syed Khasim <x0khasim@ti.com> * Nishant Menon <nm@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_i2c.h> #include <linux/of_device.h> #include <linux/slab.h> #include <linux/i2c-omap.h> #include <linux/pm_runtime.h> /* I2C controller revisions */ #define OMAP_I2C_OMAP1_REV_2 0x20 /* I2C controller revisions present on specific hardware */ #define OMAP_I2C_REV_ON_2430 0x36 #define OMAP_I2C_REV_ON_3430 0x3C #define OMAP_I2C_REV_ON_3530_4430 0x40 /* timeout waiting for the controller to respond */ #define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000)) /* For OMAP3 I2C_IV has changed to I2C_WE (wakeup enable) */ enum { OMAP_I2C_REV_REG = 0, OMAP_I2C_IE_REG, OMAP_I2C_STAT_REG, OMAP_I2C_IV_REG, OMAP_I2C_WE_REG, OMAP_I2C_SYSS_REG, OMAP_I2C_BUF_REG, OMAP_I2C_CNT_REG, OMAP_I2C_DATA_REG, OMAP_I2C_SYSC_REG, OMAP_I2C_CON_REG, OMAP_I2C_OA_REG, OMAP_I2C_SA_REG, OMAP_I2C_PSC_REG, OMAP_I2C_SCLL_REG, OMAP_I2C_SCLH_REG, OMAP_I2C_SYSTEST_REG, OMAP_I2C_BUFSTAT_REG, /* only on OMAP4430 */ OMAP_I2C_IP_V2_REVNB_LO, OMAP_I2C_IP_V2_REVNB_HI, OMAP_I2C_IP_V2_IRQSTATUS_RAW, OMAP_I2C_IP_V2_IRQENABLE_SET, OMAP_I2C_IP_V2_IRQENABLE_CLR, }; /* I2C Interrupt Enable Register (OMAP_I2C_IE): */ #define OMAP_I2C_IE_XDR (1 << 14) /* TX Buffer drain int enable */ #define OMAP_I2C_IE_RDR (1 << 13) /* RX Buffer drain int enable */ #define OMAP_I2C_IE_XRDY (1 << 4) /* TX data ready int enable */ #define OMAP_I2C_IE_RRDY (1 << 3) /* RX data ready int enable */ #define OMAP_I2C_IE_ARDY (1 << 2) /* Access ready int enable */ #define OMAP_I2C_IE_NACK (1 << 1) /* No ack interrupt enable */ #define OMAP_I2C_IE_AL (1 << 0) /* Arbitration lost int ena */ /* I2C Status Register (OMAP_I2C_STAT): */ #define OMAP_I2C_STAT_XDR (1 << 14) /* TX Buffer draining */ #define OMAP_I2C_STAT_RDR (1 << 13) /* RX Buffer draining */ #define OMAP_I2C_STAT_BB (1 << 12) /* Bus busy */ #define OMAP_I2C_STAT_ROVR (1 << 11) /* Receive overrun */ #define OMAP_I2C_STAT_XUDF (1 << 10) /* Transmit underflow */ #define OMAP_I2C_STAT_AAS (1 << 9) /* Address as slave */ #define OMAP_I2C_STAT_AD0 (1 << 8) /* Address zero */ #define OMAP_I2C_STAT_XRDY (1 << 4) /* Transmit data ready */ #define OMAP_I2C_STAT_RRDY (1 << 3) /* Receive data ready */ #define OMAP_I2C_STAT_ARDY (1 << 2) /* Register access ready */ #define OMAP_I2C_STAT_NACK (1 << 1) /* No ack interrupt enable */ #define OMAP_I2C_STAT_AL (1 << 0) /* Arbitration lost int ena */ /* I2C WE wakeup enable register */ #define OMAP_I2C_WE_XDR_WE (1 << 14) /* TX drain wakup */ #define OMAP_I2C_WE_RDR_WE (1 << 13) /* RX drain wakeup */ #define OMAP_I2C_WE_AAS_WE (1 << 9) /* Address as slave wakeup*/ #define OMAP_I2C_WE_BF_WE (1 << 8) /* Bus free wakeup */ #define OMAP_I2C_WE_STC_WE (1 << 6) /* Start condition wakeup */ #define OMAP_I2C_WE_GC_WE (1 << 5) /* General call wakeup */ #define OMAP_I2C_WE_DRDY_WE (1 << 3) /* TX/RX data ready wakeup */ #define OMAP_I2C_WE_ARDY_WE (1 << 2) /* Reg access ready wakeup */ #define OMAP_I2C_WE_NACK_WE (1 << 1) /* No acknowledgment wakeup */ #define OMAP_I2C_WE_AL_WE (1 << 0) /* Arbitration lost wakeup */ #define OMAP_I2C_WE_ALL (OMAP_I2C_WE_XDR_WE | OMAP_I2C_WE_RDR_WE | \ OMAP_I2C_WE_AAS_WE | OMAP_I2C_WE_BF_WE | \ OMAP_I2C_WE_STC_WE | OMAP_I2C_WE_GC_WE | \ OMAP_I2C_WE_DRDY_WE | OMAP_I2C_WE_ARDY_WE | \ OMAP_I2C_WE_NACK_WE | OMAP_I2C_WE_AL_WE) /* I2C Buffer Configuration Register (OMAP_I2C_BUF): */ #define OMAP_I2C_BUF_RDMA_EN (1 << 15) /* RX DMA channel enable */ #define OMAP_I2C_BUF_RXFIF_CLR (1 << 14) /* RX FIFO Clear */ #define OMAP_I2C_BUF_XDMA_EN (1 << 7) /* TX DMA channel enable */ #define OMAP_I2C_BUF_TXFIF_CLR (1 << 6) /* TX FIFO Clear */ /* I2C Configuration Register (OMAP_I2C_CON): */ #define OMAP_I2C_CON_EN (1 << 15) /* I2C module enable */ #define OMAP_I2C_CON_BE (1 << 14) /* Big endian mode */ #define OMAP_I2C_CON_OPMODE_HS (1 << 12) /* High Speed support */ #define OMAP_I2C_CON_STB (1 << 11) /* Start byte mode (master) */ #define OMAP_I2C_CON_MST (1 << 10) /* Master/slave mode */ #define OMAP_I2C_CON_TRX (1 << 9) /* TX/RX mode (master only) */ #define OMAP_I2C_CON_XA (1 << 8) /* Expand address */ #define OMAP_I2C_CON_RM (1 << 2) /* Repeat mode (master only) */ #define OMAP_I2C_CON_STP (1 << 1) /* Stop cond (master only) */ #define OMAP_I2C_CON_STT (1 << 0) /* Start condition (master) */ /* I2C SCL time value when Master */ #define OMAP_I2C_SCLL_HSSCLL 8 #define OMAP_I2C_SCLH_HSSCLH 8 /* I2C System Test Register (OMAP_I2C_SYSTEST): */ #ifdef DEBUG #define OMAP_I2C_SYSTEST_ST_EN (1 << 15) /* System test enable */ #define OMAP_I2C_SYSTEST_FREE (1 << 14) /* Free running mode */ #define OMAP_I2C_SYSTEST_TMODE_MASK (3 << 12) /* Test mode select */ #define OMAP_I2C_SYSTEST_TMODE_SHIFT (12) /* Test mode select */ #define OMAP_I2C_SYSTEST_SCL_I (1 << 3) /* SCL line sense in */ #define OMAP_I2C_SYSTEST_SCL_O (1 << 2) /* SCL line drive out */ #define OMAP_I2C_SYSTEST_SDA_I (1 << 1) /* SDA line sense in */ #define OMAP_I2C_SYSTEST_SDA_O (1 << 0) /* SDA line drive out */ #endif /* OCP_SYSSTATUS bit definitions */ #define SYSS_RESETDONE_MASK (1 << 0) /* OCP_SYSCONFIG bit definitions */ #define SYSC_CLOCKACTIVITY_MASK (0x3 << 8) #define SYSC_SIDLEMODE_MASK (0x3 << 3) #define SYSC_ENAWAKEUP_MASK (1 << 2) #define SYSC_SOFTRESET_MASK (1 << 1) #define SYSC_AUTOIDLE_MASK (1 << 0) #define SYSC_IDLEMODE_SMART 0x2 #define SYSC_CLOCKACTIVITY_FCLK 0x2 /* Errata definitions */ #define I2C_OMAP_ERRATA_I207 (1 << 0) #define I2C_OMAP3_1P153 (1 << 1) struct omap_i2c_dev { struct device *dev; void __iomem *base; /* virtual */ int irq; int reg_shift; /* bit shift for I2C register addresses */ struct completion cmd_complete; struct resource *ioarea; u32 latency; /* maximum mpu wkup latency */ void (*set_mpu_wkup_lat)(struct device *dev, long latency); u32 speed; /* Speed of bus in kHz */ u32 dtrev; /* extra revision from DT */ u32 flags; u16 cmd_err; u8 *buf; u8 *regs; size_t buf_len; struct i2c_adapter adapter; u8 fifo_size; /* use as flag and value * fifo_size==0 implies no fifo * if set, should be trsh+1 */ u8 rev; unsigned b_hw:1; /* bad h/w fixes */ u16 iestate; /* Saved interrupt register */ u16 pscstate; u16 scllstate; u16 sclhstate; u16 bufstate; u16 syscstate; u16 westate; u16 errata; }; static const u8 reg_map_ip_v1[] = { [OMAP_I2C_REV_REG] = 0x00, [OMAP_I2C_IE_REG] = 0x01, [OMAP_I2C_STAT_REG] = 0x02, [OMAP_I2C_IV_REG] = 0x03, [OMAP_I2C_WE_REG] = 0x03, [OMAP_I2C_SYSS_REG] = 0x04, [OMAP_I2C_BUF_REG] = 0x05, [OMAP_I2C_CNT_REG] = 0x06, [OMAP_I2C_DATA_REG] = 0x07, [OMAP_I2C_SYSC_REG] = 0x08, [OMAP_I2C_CON_REG] = 0x09, [OMAP_I2C_OA_REG] = 0x0a, [OMAP_I2C_SA_REG] = 0x0b, [OMAP_I2C_PSC_REG] = 0x0c, [OMAP_I2C_SCLL_REG] = 0x0d, [OMAP_I2C_SCLH_REG] = 0x0e, [OMAP_I2C_SYSTEST_REG] = 0x0f, [OMAP_I2C_BUFSTAT_REG] = 0x10, }; static const u8 reg_map_ip_v2[] = { [OMAP_I2C_REV_REG] = 0x04, [OMAP_I2C_IE_REG] = 0x2c, [OMAP_I2C_STAT_REG] = 0x28, [OMAP_I2C_IV_REG] = 0x34, [OMAP_I2C_WE_REG] = 0x34, [OMAP_I2C_SYSS_REG] = 0x90, [OMAP_I2C_BUF_REG] = 0x94, [OMAP_I2C_CNT_REG] = 0x98, [OMAP_I2C_DATA_REG] = 0x9c, [OMAP_I2C_SYSC_REG] = 0x10, [OMAP_I2C_CON_REG] = 0xa4, [OMAP_I2C_OA_REG] = 0xa8, [OMAP_I2C_SA_REG] = 0xac, [OMAP_I2C_PSC_REG] = 0xb0, [OMAP_I2C_SCLL_REG] = 0xb4, [OMAP_I2C_SCLH_REG] = 0xb8, [OMAP_I2C_SYSTEST_REG] = 0xbC, [OMAP_I2C_BUFSTAT_REG] = 0xc0, [OMAP_I2C_IP_V2_REVNB_LO] = 0x00, [OMAP_I2C_IP_V2_REVNB_HI] = 0x04, [OMAP_I2C_IP_V2_IRQSTATUS_RAW] = 0x24, [OMAP_I2C_IP_V2_IRQENABLE_SET] = 0x2c, [OMAP_I2C_IP_V2_IRQENABLE_CLR] = 0x30, }; static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev, int reg, u16 val) { __raw_writew(val, i2c_dev->base + (i2c_dev->regs[reg] << i2c_dev->reg_shift)); } static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg) { return __raw_readw(i2c_dev->base + (i2c_dev->regs[reg] << i2c_dev->reg_shift)); } static void omap_i2c_unidle(struct omap_i2c_dev *dev) { if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate); omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate); omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate); omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, dev->bufstate); omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, dev->syscstate); omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate); omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); } /* * Don't write to this register if the IE state is 0 as it can * cause deadlock. */ if (dev->iestate) omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); } static void omap_i2c_idle(struct omap_i2c_dev *dev) { u16 iv; dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); if (dev->dtrev == OMAP_I2C_IP_VERSION_2) omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1); else omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0); if (dev->rev < OMAP_I2C_OMAP1_REV_2) { iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); /* Read clears */ } else { omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, dev->iestate); /* Flush posted write */ omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); } } static int omap_i2c_init(struct omap_i2c_dev *dev) { u16 psc = 0, scll = 0, sclh = 0, buf = 0; u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0; unsigned long fclk_rate = 12000000; unsigned long timeout; unsigned long internal_clk = 0; struct clk *fclk; if (dev->rev >= OMAP_I2C_OMAP1_REV_2) { /* Disable I2C controller before soft reset */ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) & ~(OMAP_I2C_CON_EN)); omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, SYSC_SOFTRESET_MASK); /* For some reason we need to set the EN bit before the * reset done bit gets set. */ timeout = jiffies + OMAP_I2C_TIMEOUT; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); while (!(omap_i2c_read_reg(dev, OMAP_I2C_SYSS_REG) & SYSS_RESETDONE_MASK)) { if (time_after(jiffies, timeout)) { dev_warn(dev->dev, "timeout waiting " "for controller reset\n"); return -ETIMEDOUT; } msleep(1); } /* SYSC register is cleared by the reset; rewrite it */ if (dev->rev == OMAP_I2C_REV_ON_2430) { omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, SYSC_AUTOIDLE_MASK); } else if (dev->rev >= OMAP_I2C_REV_ON_3430) { dev->syscstate = SYSC_AUTOIDLE_MASK; dev->syscstate |= SYSC_ENAWAKEUP_MASK; dev->syscstate |= (SYSC_IDLEMODE_SMART << __ffs(SYSC_SIDLEMODE_MASK)); dev->syscstate |= (SYSC_CLOCKACTIVITY_FCLK << __ffs(SYSC_CLOCKACTIVITY_MASK)); omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, dev->syscstate); /* * Enabling all wakup sources to stop I2C freezing on * WFI instruction. * REVISIT: Some wkup sources might not be needed. */ dev->westate = OMAP_I2C_WE_ALL; omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate); } } omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) { /* * The I2C functional clock is the armxor_ck, so there's * no need to get "armxor_ck" separately. Now, if OMAP2420 * always returns 12MHz for the functional clock, we can * do this bit unconditionally. */ fclk = clk_get(dev->dev, "fck"); fclk_rate = clk_get_rate(fclk); clk_put(fclk); /* TRM for 5912 says the I2C clock must be prescaled to be * between 7 - 12 MHz. The XOR input clock is typically * 12, 13 or 19.2 MHz. So we should have code that produces: * * XOR MHz Divider Prescaler * 12 1 0 * 13 2 1 * 19.2 2 1 */ if (fclk_rate > 12000000) psc = fclk_rate / 12000000; } if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) { /* * HSI2C controller internal clk rate should be 19.2 Mhz for * HS and for all modes on 2430. On 34xx we can use lower rate * to get longer filter period for better noise suppression. * The filter is iclk (fclk for HS) period. */ if (dev->speed > 400 || dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK) internal_clk = 19200; else if (dev->speed > 100) internal_clk = 9600; else internal_clk = 4000; fclk = clk_get(dev->dev, "fck"); fclk_rate = clk_get_rate(fclk) / 1000; clk_put(fclk); /* Compute prescaler divisor */ psc = fclk_rate / internal_clk; psc = psc - 1; /* If configured for High Speed */ if (dev->speed > 400) { unsigned long scl; /* For first phase of HS mode */ scl = internal_clk / 400; fsscll = scl - (scl / 3) - 7; fssclh = (scl / 3) - 5; /* For second phase of HS mode */ scl = fclk_rate / dev->speed; hsscll = scl - (scl / 3) - 7; hssclh = (scl / 3) - 5; } else if (dev->speed > 100) { unsigned long scl; /* Fast mode */ scl = internal_clk / dev->speed; fsscll = scl - (scl / 3) - 7; fssclh = (scl / 3) - 5; } else { /* Standard mode */ fsscll = internal_clk / (dev->speed * 2) - 7; fssclh = internal_clk / (dev->speed * 2) - 5; } scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll; sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh; } else { /* Program desired operating rate */ fclk_rate /= (psc + 1) * 1000; if (psc > 2) psc = 2; scll = fclk_rate / (dev->speed * 2) - 7 + psc; sclh = fclk_rate / (dev->speed * 2) - 7 + psc; } /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */ omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc); /* SCL low and high time values */ omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll); omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh); if (dev->fifo_size) { /* Note: setup required fifo size - 1. RTRSH and XTRSH */ buf = (dev->fifo_size - 1) << 8 | OMAP_I2C_BUF_RXFIF_CLR | (dev->fifo_size - 1) | OMAP_I2C_BUF_TXFIF_CLR; omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf); } /* Take the I2C module out of reset: */ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); dev->errata = 0; if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207) dev->errata |= I2C_OMAP_ERRATA_I207; /* Enable interrupts */ dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY | OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK | OMAP_I2C_IE_AL) | ((dev->fifo_size) ? (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0); omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { dev->pscstate = psc; dev->scllstate = scll; dev->sclhstate = sclh; dev->bufstate = buf; } return 0; } /* * Waiting on Bus Busy */ static int omap_i2c_wait_for_bb(struct omap_i2c_dev *dev) { unsigned long timeout; timeout = jiffies + OMAP_I2C_TIMEOUT; while (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) { if (time_after(jiffies, timeout)) { dev_warn(dev->dev, "timeout waiting for bus ready\n"); return -ETIMEDOUT; } msleep(1); } return 0; } /* * Low level master read/write transaction. */ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); int r; u16 w; dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", msg->addr, msg->len, msg->flags, stop); if (msg->len == 0) return -EINVAL; omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr); /* REVISIT: Could the STB bit of I2C_CON be used with probing? */ dev->buf = msg->buf; dev->buf_len = msg->len; omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len); /* Clear the FIFO Buffers */ w = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG); w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR; omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w); init_completion(&dev->cmd_complete); dev->cmd_err = 0; w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT; /* High speed configuration */ if (dev->speed > 400) w |= OMAP_I2C_CON_OPMODE_HS; if (msg->flags & I2C_M_TEN) w |= OMAP_I2C_CON_XA; if (!(msg->flags & I2C_M_RD)) w |= OMAP_I2C_CON_TRX; if (!dev->b_hw && stop) w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); /* * Don't write stt and stp together on some hardware. */ if (dev->b_hw && stop) { unsigned long delay = jiffies + OMAP_I2C_TIMEOUT; u16 con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); while (con & OMAP_I2C_CON_STT) { con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); /* Let the user know if i2c is in a bad state */ if (time_after(jiffies, delay)) { dev_err(dev->dev, "controller timed out " "waiting for start condition to finish\n"); return -ETIMEDOUT; } cpu_relax(); } w |= OMAP_I2C_CON_STP; w &= ~OMAP_I2C_CON_STT; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); } /* * REVISIT: We should abort the transfer on signals, but the bus goes * into arbitration and we're currently unable to recover from it. */ r = wait_for_completion_timeout(&dev->cmd_complete, OMAP_I2C_TIMEOUT); dev->buf_len = 0; if (r < 0) return r; if (r == 0) { dev_err(dev->dev, "controller timed out\n"); omap_i2c_init(dev); return -ETIMEDOUT; } if (likely(!dev->cmd_err)) return 0; /* We have an error */ if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) { omap_i2c_init(dev); return -EIO; } if (dev->cmd_err & OMAP_I2C_STAT_NACK) { if (msg->flags & I2C_M_IGNORE_NAK) return 0; if (stop) { w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); } return -EREMOTEIO; } return -EIO; } /* * Prepare controller for a transaction and call omap_i2c_xfer_msg * to do the work during IRQ processing. */ static int omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); int i; int r; pm_runtime_get_sync(dev->dev); r = omap_i2c_wait_for_bb(dev); if (r < 0) goto out; if (dev->set_mpu_wkup_lat != NULL) dev->set_mpu_wkup_lat(dev->dev, dev->latency); for (i = 0; i < num; i++) { r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); if (r != 0) break; } if (dev->set_mpu_wkup_lat != NULL) dev->set_mpu_wkup_lat(dev->dev, -1); if (r == 0) r = num; omap_i2c_wait_for_bb(dev); out: pm_runtime_put(dev->dev); return r; } static u32 omap_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } static inline void omap_i2c_complete_cmd(struct omap_i2c_dev *dev, u16 err) { dev->cmd_err |= err; complete(&dev->cmd_complete); } static inline void omap_i2c_ack_stat(struct omap_i2c_dev *dev, u16 stat) { omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat); } static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat) { /* * I2C Errata(Errata Nos. OMAP2: 1.67, OMAP3: 1.8) * Not applicable for OMAP4. * Under certain rare conditions, RDR could be set again * when the bus is busy, then ignore the interrupt and * clear the interrupt. */ if (stat & OMAP_I2C_STAT_RDR) { /* Step 1: If RDR is set, clear it */ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); /* Step 2: */ if (!(omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB)) { /* Step 3: */ if (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_RDR) { omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); dev_dbg(dev->dev, "RDR when bus is busy.\n"); } } } } /* rev1 devices are apparently only on some 15xx */ #ifdef CONFIG_ARCH_OMAP15XX static irqreturn_t omap_i2c_omap1_isr(int this_irq, void *dev_id) { struct omap_i2c_dev *dev = dev_id; u16 iv, w; if (pm_runtime_suspended(dev->dev)) return IRQ_NONE; iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); switch (iv) { case 0x00: /* None */ break; case 0x01: /* Arbitration lost */ dev_err(dev->dev, "Arbitration lost\n"); omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_AL); break; case 0x02: /* No acknowledgement */ omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_NACK); omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_STP); break; case 0x03: /* Register access ready */ omap_i2c_complete_cmd(dev, 0); break; case 0x04: /* Receive data ready */ if (dev->buf_len) { w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG); *dev->buf++ = w; dev->buf_len--; if (dev->buf_len) { *dev->buf++ = w >> 8; dev->buf_len--; } } else dev_err(dev->dev, "RRDY IRQ while no data requested\n"); break; case 0x05: /* Transmit data ready */ if (dev->buf_len) { w = *dev->buf++; dev->buf_len--; if (dev->buf_len) { w |= *dev->buf++ << 8; dev->buf_len--; } omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w); } else dev_err(dev->dev, "XRDY IRQ while no data to send\n"); break; default: return IRQ_NONE; } return IRQ_HANDLED; } #else #define omap_i2c_omap1_isr NULL #endif /* * OMAP3430 Errata 1.153: When an XRDY/XDR is hit, wait for XUDF before writing * data to DATA_REG. Otherwise some data bytes can be lost while transferring * them from the memory to the I2C interface. */ static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err) { unsigned long timeout = 10000; while (--timeout && !(*stat & OMAP_I2C_STAT_XUDF)) { if (*stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { omap_i2c_ack_stat(dev, *stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); *err |= OMAP_I2C_STAT_XUDF; return -ETIMEDOUT; } cpu_relax(); *stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); } if (!timeout) { dev_err(dev->dev, "timeout waiting on XUDF bit\n"); return 0; } return 0; } static irqreturn_t omap_i2c_isr(int this_irq, void *dev_id) { struct omap_i2c_dev *dev = dev_id; u16 bits; u16 stat, w; int err, count = 0; if (pm_runtime_suspended(dev->dev)) return IRQ_NONE; bits = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); while ((stat = (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG))) & bits) { dev_dbg(dev->dev, "IRQ (ISR = 0x%04x)\n", stat); if (count++ == 100) { dev_warn(dev->dev, "Too much work in one IRQ\n"); break; } err = 0; complete: /* * Ack the stat in one go, but [R/X]DR and [R/X]RDY should be * acked after the data operation is complete. * Ref: TRM SWPU114Q Figure 18-31 */ omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat & ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); if (stat & OMAP_I2C_STAT_NACK) err |= OMAP_I2C_STAT_NACK; if (stat & OMAP_I2C_STAT_AL) { dev_err(dev->dev, "Arbitration lost\n"); err |= OMAP_I2C_STAT_AL; } /* * ProDB0017052: Clear ARDY bit twice */ if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR | OMAP_I2C_STAT_ARDY)); omap_i2c_complete_cmd(dev, err); return IRQ_HANDLED; } if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) { u8 num_bytes = 1; if (dev->errata & I2C_OMAP_ERRATA_I207) i2c_omap_errata_i207(dev, stat); if (dev->fifo_size) { if (stat & OMAP_I2C_STAT_RRDY) num_bytes = dev->fifo_size; else /* read RXSTAT on RDR interrupt */ num_bytes = (omap_i2c_read_reg(dev, OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F; } while (num_bytes) { num_bytes--; w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG); if (dev->buf_len) { *dev->buf++ = w; dev->buf_len--; /* * Data reg in 2430, omap3 and * omap4 is 8 bit wide */ if (dev->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) { if (dev->buf_len) { *dev->buf++ = w >> 8; dev->buf_len--; } } } else { if (stat & OMAP_I2C_STAT_RRDY) dev_err(dev->dev, "RRDY IRQ while no data" " requested\n"); if (stat & OMAP_I2C_STAT_RDR) dev_err(dev->dev, "RDR IRQ while no data" " requested\n"); break; } } omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)); continue; } if (stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)) { u8 num_bytes = 1; if (dev->fifo_size) { if (stat & OMAP_I2C_STAT_XRDY) num_bytes = dev->fifo_size; else /* read TXSTAT on XDR interrupt */ num_bytes = omap_i2c_read_reg(dev, OMAP_I2C_BUFSTAT_REG) & 0x3F; } while (num_bytes) { num_bytes--; w = 0; if (dev->buf_len) { w = *dev->buf++; dev->buf_len--; /* * Data reg in 2430, omap3 and * omap4 is 8 bit wide */ if (dev->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) { if (dev->buf_len) { w |= *dev->buf++ << 8; dev->buf_len--; } } } else { if (stat & OMAP_I2C_STAT_XRDY) dev_err(dev->dev, "XRDY IRQ while no " "data to send\n"); if (stat & OMAP_I2C_STAT_XDR) dev_err(dev->dev, "XDR IRQ while no " "data to send\n"); break; } if ((dev->errata & I2C_OMAP3_1P153) && errata_omap3_1p153(dev, &stat, &err)) goto complete; omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w); } omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); continue; } if (stat & OMAP_I2C_STAT_ROVR) { dev_err(dev->dev, "Receive overrun\n"); dev->cmd_err |= OMAP_I2C_STAT_ROVR; } if (stat & OMAP_I2C_STAT_XUDF) { dev_err(dev->dev, "Transmit underflow\n"); dev->cmd_err |= OMAP_I2C_STAT_XUDF; } } return count ? IRQ_HANDLED : IRQ_NONE; } static const struct i2c_algorithm omap_i2c_algo = { .master_xfer = omap_i2c_xfer, .functionality = omap_i2c_func, }; #ifdef CONFIG_OF static struct omap_i2c_bus_platform_data omap3_pdata = { .rev = OMAP_I2C_IP_VERSION_1, .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | OMAP_I2C_FLAG_BUS_SHIFT_2, }; static struct omap_i2c_bus_platform_data omap4_pdata = { .rev = OMAP_I2C_IP_VERSION_2, }; static const struct of_device_id omap_i2c_of_match[] = { { .compatible = "ti,omap4-i2c", .data = &omap4_pdata, }, { .compatible = "ti,omap3-i2c", .data = &omap3_pdata, }, { }, }; MODULE_DEVICE_TABLE(of, omap_i2c_of_match); #endif static int __devinit omap_i2c_probe(struct platform_device *pdev) { struct omap_i2c_dev *dev; struct i2c_adapter *adap; struct resource *mem, *irq, *ioarea; struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data; struct device_node *node = pdev->dev.of_node; const struct of_device_id *match; irq_handler_t isr; int r; /* NOTE: driver uses the static register mapping */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no irq resource?\n"); return -ENODEV; } ioarea = request_mem_region(mem->start, resource_size(mem), pdev->name); if (!ioarea) { dev_err(&pdev->dev, "I2C region already claimed\n"); return -EBUSY; } dev = kzalloc(sizeof(struct omap_i2c_dev), GFP_KERNEL); if (!dev) { r = -ENOMEM; goto err_release_region; } match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev); if (match) { u32 freq = 100000; /* default to 100000 Hz */ pdata = match->data; dev->dtrev = pdata->rev; dev->flags = pdata->flags; of_property_read_u32(node, "clock-frequency", &freq); /* convert DT freq value in Hz into kHz for speed */ dev->speed = freq / 1000; } else if (pdata != NULL) { dev->speed = pdata->clkrate; dev->flags = pdata->flags; dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; dev->dtrev = pdata->rev; } dev->dev = &pdev->dev; dev->irq = irq->start; dev->base = ioremap(mem->start, resource_size(mem)); if (!dev->base) { r = -ENOMEM; goto err_free_mem; } platform_set_drvdata(pdev, dev); dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3; if (dev->dtrev == OMAP_I2C_IP_VERSION_2) dev->regs = (u8 *)reg_map_ip_v2; else dev->regs = (u8 *)reg_map_ip_v1; pm_runtime_enable(dev->dev); pm_runtime_get_sync(dev->dev); dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff; if (dev->rev <= OMAP_I2C_REV_ON_3430) dev->errata |= I2C_OMAP3_1P153; if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) { u16 s; /* Set up the fifo size - Get total size */ s = (omap_i2c_read_reg(dev, OMAP_I2C_BUFSTAT_REG) >> 14) & 0x3; dev->fifo_size = 0x8 << s; /* * Set up notification threshold as half the total available * size. This is to ensure that we can handle the status on int * call back latencies. */ dev->fifo_size = (dev->fifo_size / 2); if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) dev->b_hw = 0; /* Disable hardware fixes */ else dev->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ if (dev->set_mpu_wkup_lat != NULL) dev->latency = (1000000 * dev->fifo_size) / (1000 * dev->speed / 8); } /* reset ASAP, clearing any IRQs */ omap_i2c_init(dev); isr = (dev->rev < OMAP_I2C_OMAP1_REV_2) ? omap_i2c_omap1_isr : omap_i2c_isr; r = request_irq(dev->irq, isr, 0, pdev->name, dev); if (r) { dev_err(dev->dev, "failure requesting irq %i\n", dev->irq); goto err_unuse_clocks; } dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id, dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed); pm_runtime_put(dev->dev); adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON; strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); adap->algo = &omap_i2c_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; /* i2c device drivers may be active on return from add_adapter() */ adap->nr = pdev->id; r = i2c_add_numbered_adapter(adap); if (r) { dev_err(dev->dev, "failure adding adapter\n"); goto err_free_irq; } of_i2c_register_devices(adap); return 0; err_free_irq: free_irq(dev->irq, dev); err_unuse_clocks: omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); pm_runtime_put(dev->dev); iounmap(dev->base); err_free_mem: platform_set_drvdata(pdev, NULL); kfree(dev); err_release_region: release_mem_region(mem->start, resource_size(mem)); return r; } static int omap_i2c_remove(struct platform_device *pdev) { struct omap_i2c_dev *dev = platform_get_drvdata(pdev); struct resource *mem; platform_set_drvdata(pdev, NULL); free_irq(dev->irq, dev); i2c_del_adapter(&dev->adapter); omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); iounmap(dev->base); kfree(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); return 0; } #ifdef CONFIG_PM_RUNTIME static int omap_i2c_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_i2c_dev *_dev = platform_get_drvdata(pdev); omap_i2c_idle(_dev); return 0; } static int omap_i2c_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_i2c_dev *_dev = platform_get_drvdata(pdev); omap_i2c_unidle(_dev); return 0; } static struct dev_pm_ops omap_i2c_pm_ops = { .runtime_suspend = omap_i2c_runtime_suspend, .runtime_resume = omap_i2c_runtime_resume, }; #define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) #else #define OMAP_I2C_PM_OPS NULL #endif static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, .remove = omap_i2c_remove, .driver = { .name = "omap_i2c", .owner = THIS_MODULE, .pm = OMAP_I2C_PM_OPS, .of_match_table = of_match_ptr(omap_i2c_of_match), }, }; /* I2C may be needed to bring up other drivers */ static int __init omap_i2c_init_driver(void) { return platform_driver_register(&omap_i2c_driver); } subsys_initcall(omap_i2c_init_driver); static void __exit omap_i2c_exit_driver(void) { platform_driver_unregister(&omap_i2c_driver); } module_exit(omap_i2c_exit_driver); MODULE_AUTHOR("MontaVista Software, Inc. (and others)"); MODULE_DESCRIPTION("TI OMAP I2C bus adapter"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:omap_i2c");
gpl-2.0
whdghks913/android_kernel_pantech_ef47s
drivers/media/video/cx23885/cx23885-video.c
4884
49584
/* * Driver for the Conexant CX23885 PCIe bridge * * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kmod.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/kthread.h> #include <asm/div64.h> #include "cx23885.h" #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include "cx23885-ioctl.h" #include "tuner-xc2028.h" #include <media/cx25840.h> MODULE_DESCRIPTION("v4l2 driver module for cx23885 based TV cards"); MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>"); MODULE_LICENSE("GPL"); /* ------------------------------------------------------------------ */ static unsigned int video_nr[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET }; static unsigned int vbi_nr[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET }; static unsigned int radio_nr[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET }; module_param_array(video_nr, int, NULL, 0444); module_param_array(vbi_nr, int, NULL, 0444); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(video_nr, "video device numbers"); MODULE_PARM_DESC(vbi_nr, "vbi device numbers"); MODULE_PARM_DESC(radio_nr, "radio device numbers"); static unsigned int video_debug; module_param(video_debug, int, 0644); MODULE_PARM_DESC(video_debug, "enable debug messages [video]"); static unsigned int irq_debug; module_param(irq_debug, int, 0644); MODULE_PARM_DESC(irq_debug, "enable debug messages [IRQ handler]"); static unsigned int vid_limit = 16; module_param(vid_limit, int, 0644); MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes"); #define dprintk(level, fmt, arg...)\ do { if (video_debug >= level)\ printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\ } while (0) /* ------------------------------------------------------------------- */ /* static data */ #define FORMAT_FLAGS_PACKED 0x01 #if 0 static struct cx23885_fmt formats[] = { { .name = "8 bpp, gray", .fourcc = V4L2_PIX_FMT_GREY, .depth = 8, .flags = FORMAT_FLAGS_PACKED, }, { .name = "15 bpp RGB, le", .fourcc = V4L2_PIX_FMT_RGB555, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { .name = "15 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB555X, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { .name = "16 bpp RGB, le", .fourcc = V4L2_PIX_FMT_RGB565, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { .name = "16 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB565X, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { .name = "24 bpp RGB, le", .fourcc = V4L2_PIX_FMT_BGR24, .depth = 24, .flags = FORMAT_FLAGS_PACKED, }, { .name = "32 bpp RGB, le", .fourcc = V4L2_PIX_FMT_BGR32, .depth = 32, .flags = FORMAT_FLAGS_PACKED, }, { .name = "32 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB32, .depth = 32, .flags = FORMAT_FLAGS_PACKED, }, { .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { .name = "4:2:2, packed, UYVY", .fourcc = V4L2_PIX_FMT_UYVY, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, }; #else static struct cx23885_fmt formats[] = { { #if 0 .name = "4:2:2, packed, UYVY", .fourcc = V4L2_PIX_FMT_UYVY, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { #endif .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, .flags = FORMAT_FLAGS_PACKED, } }; #endif static struct cx23885_fmt *format_by_fourcc(unsigned int fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); i++) if (formats[i].fourcc == fourcc) return formats+i; printk(KERN_ERR "%s(%c%c%c%c) NOT FOUND\n", __func__, (fourcc & 0xff), ((fourcc >> 8) & 0xff), ((fourcc >> 16) & 0xff), ((fourcc >> 24) & 0xff) ); return NULL; } /* ------------------------------------------------------------------- */ static const struct v4l2_queryctrl no_ctl = { .name = "42", .flags = V4L2_CTRL_FLAG_DISABLED, }; static struct cx23885_ctrl cx23885_ctls[] = { /* --- video --- */ { .v = { .id = V4L2_CID_BRIGHTNESS, .name = "Brightness", .minimum = 0x00, .maximum = 0xff, .step = 1, .default_value = 0x7f, .type = V4L2_CTRL_TYPE_INTEGER, }, .off = 128, .reg = LUMA_CTRL, .mask = 0x00ff, .shift = 0, }, { .v = { .id = V4L2_CID_CONTRAST, .name = "Contrast", .minimum = 0, .maximum = 0x7f, .step = 1, .default_value = 0x3f, .type = V4L2_CTRL_TYPE_INTEGER, }, .off = 0, .reg = LUMA_CTRL, .mask = 0xff00, .shift = 8, }, { .v = { .id = V4L2_CID_HUE, .name = "Hue", .minimum = -127, .maximum = 128, .step = 1, .default_value = 0x0, .type = V4L2_CTRL_TYPE_INTEGER, }, .off = 128, .reg = CHROMA_CTRL, .mask = 0xff0000, .shift = 16, }, { /* strictly, this only describes only U saturation. * V saturation is handled specially through code. */ .v = { .id = V4L2_CID_SATURATION, .name = "Saturation", .minimum = 0, .maximum = 0x7f, .step = 1, .default_value = 0x3f, .type = V4L2_CTRL_TYPE_INTEGER, }, .off = 0, .reg = CHROMA_CTRL, .mask = 0x00ff, .shift = 0, }, { /* --- audio --- */ .v = { .id = V4L2_CID_AUDIO_MUTE, .name = "Mute", .minimum = 0, .maximum = 1, .default_value = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, }, .reg = PATH1_CTL1, .mask = (0x1f << 24), .shift = 24, }, { .v = { .id = V4L2_CID_AUDIO_VOLUME, .name = "Volume", .minimum = 0, .maximum = 65535, .step = 65535 / 100, .default_value = 65535, .type = V4L2_CTRL_TYPE_INTEGER, }, .reg = PATH1_VOL_CTL, .mask = 0xff, .shift = 0, } }; static const int CX23885_CTLS = ARRAY_SIZE(cx23885_ctls); /* Must be sorted from low to high control ID! */ static const u32 cx23885_user_ctrls[] = { V4L2_CID_USER_CLASS, V4L2_CID_BRIGHTNESS, V4L2_CID_CONTRAST, V4L2_CID_SATURATION, V4L2_CID_HUE, V4L2_CID_AUDIO_VOLUME, V4L2_CID_AUDIO_MUTE, 0 }; static const u32 *ctrl_classes[] = { cx23885_user_ctrls, NULL }; void cx23885_video_wakeup(struct cx23885_dev *dev, struct cx23885_dmaqueue *q, u32 count) { struct cx23885_buffer *buf; int bc; for (bc = 0;; bc++) { if (list_empty(&q->active)) break; buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue); /* count comes from the hw and is is 16bit wide -- * this trick handles wrap-arounds correctly for * up to 32767 buffers in flight... */ if ((s16) (count - buf->count) < 0) break; do_gettimeofday(&buf->vb.ts); dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i, count, buf->count); buf->vb.state = VIDEOBUF_DONE; list_del(&buf->vb.queue); wake_up(&buf->vb.done); } if (list_empty(&q->active)) del_timer(&q->timeout); else mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); if (bc != 1) printk(KERN_ERR "%s: %d buffers handled (should be 1)\n", __func__, bc); } int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm) { dprintk(1, "%s(norm = 0x%08x) name: [%s]\n", __func__, (unsigned int)norm, v4l2_norm_to_name(norm)); dev->tvnorm = norm; call_all(dev, core, s_std, norm); return 0; } static struct video_device *cx23885_vdev_init(struct cx23885_dev *dev, struct pci_dev *pci, struct video_device *template, char *type) { struct video_device *vfd; dprintk(1, "%s()\n", __func__); vfd = video_device_alloc(); if (NULL == vfd) return NULL; *vfd = *template; vfd->v4l2_dev = &dev->v4l2_dev; vfd->release = video_device_release; snprintf(vfd->name, sizeof(vfd->name), "%s (%s)", cx23885_boards[dev->board].name, type); video_set_drvdata(vfd, dev); return vfd; } static int cx23885_ctrl_query(struct v4l2_queryctrl *qctrl) { int i; if (qctrl->id < V4L2_CID_BASE || qctrl->id >= V4L2_CID_LASTP1) return -EINVAL; for (i = 0; i < CX23885_CTLS; i++) if (cx23885_ctls[i].v.id == qctrl->id) break; if (i == CX23885_CTLS) { *qctrl = no_ctl; return 0; } *qctrl = cx23885_ctls[i].v; return 0; } /* ------------------------------------------------------------------- */ /* resource management */ static int res_get(struct cx23885_dev *dev, struct cx23885_fh *fh, unsigned int bit) { dprintk(1, "%s()\n", __func__); if (fh->resources & bit) /* have it already allocated */ return 1; /* is it free? */ mutex_lock(&dev->lock); if (dev->resources & bit) { /* no, someone else uses it */ mutex_unlock(&dev->lock); return 0; } /* it's free, grab it */ fh->resources |= bit; dev->resources |= bit; dprintk(1, "res: get %d\n", bit); mutex_unlock(&dev->lock); return 1; } static int res_check(struct cx23885_fh *fh, unsigned int bit) { return fh->resources & bit; } static int res_locked(struct cx23885_dev *dev, unsigned int bit) { return dev->resources & bit; } static void res_free(struct cx23885_dev *dev, struct cx23885_fh *fh, unsigned int bits) { BUG_ON((fh->resources & bits) != bits); dprintk(1, "%s()\n", __func__); mutex_lock(&dev->lock); fh->resources &= ~bits; dev->resources &= ~bits; dprintk(1, "res: put %d\n", bits); mutex_unlock(&dev->lock); } static int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data) { /* 8 bit registers, 8 bit values */ u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = 0x98 >> 1, .flags = 0, .buf = buf, .len = 2 }; return i2c_transfer(&dev->i2c_bus[2].i2c_adap, &msg, 1); } static u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg) { /* 8 bit registers, 8 bit values */ int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = 0x98 >> 1, .flags = 0, .buf = b0, .len = 1 }, { .addr = 0x98 >> 1, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(&dev->i2c_bus[2].i2c_adap, &msg[0], 2); if (ret != 2) printk(KERN_ERR "%s() error\n", __func__); return b1[0]; } static void cx23885_flatiron_dump(struct cx23885_dev *dev) { int i; dprintk(1, "Flatiron dump\n"); for (i = 0; i < 0x24; i++) { dprintk(1, "FI[%02x] = %02x\n", i, cx23885_flatiron_read(dev, i)); } } static int cx23885_flatiron_mux(struct cx23885_dev *dev, int input) { u8 val; dprintk(1, "%s(input = %d)\n", __func__, input); if (input == 1) val = cx23885_flatiron_read(dev, CH_PWR_CTRL1) & ~FLD_CH_SEL; else if (input == 2) val = cx23885_flatiron_read(dev, CH_PWR_CTRL1) | FLD_CH_SEL; else return -EINVAL; val |= 0x20; /* Enable clock to delta-sigma and dec filter */ cx23885_flatiron_write(dev, CH_PWR_CTRL1, val); /* Wake up */ cx23885_flatiron_write(dev, CH_PWR_CTRL2, 0); if (video_debug) cx23885_flatiron_dump(dev); return 0; } static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input) { dprintk(1, "%s() video_mux: %d [vmux=%d, gpio=0x%x,0x%x,0x%x,0x%x]\n", __func__, input, INPUT(input)->vmux, INPUT(input)->gpio0, INPUT(input)->gpio1, INPUT(input)->gpio2, INPUT(input)->gpio3); dev->input = input; if (dev->board == CX23885_BOARD_MYGICA_X8506 || dev->board == CX23885_BOARD_MAGICPRO_PROHDTVE2 || dev->board == CX23885_BOARD_MYGICA_X8507) { /* Select Analog TV */ if (INPUT(input)->type == CX23885_VMUX_TELEVISION) cx23885_gpio_clear(dev, GPIO_0); } /* Tell the internal A/V decoder */ v4l2_subdev_call(dev->sd_cx25840, video, s_routing, INPUT(input)->vmux, 0, 0); if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1800) || (dev->board == CX23885_BOARD_MPX885) || (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)) { /* Configure audio routing */ v4l2_subdev_call(dev->sd_cx25840, audio, s_routing, INPUT(input)->amux, 0, 0); if (INPUT(input)->amux == CX25840_AUDIO7) cx23885_flatiron_mux(dev, 1); else if (INPUT(input)->amux == CX25840_AUDIO6) cx23885_flatiron_mux(dev, 2); } return 0; } static int cx23885_audio_mux(struct cx23885_dev *dev, unsigned int input) { dprintk(1, "%s(input=%d)\n", __func__, input); /* The baseband video core of the cx23885 has two audio inputs. * LR1 and LR2. In almost every single case so far only HVR1xxx * cards we've only ever supported LR1. Time to support LR2, * which is available via the optional white breakout header on * the board. * We'll use a could of existing enums in the card struct to allow * devs to specify which baseband input they need, or just default * to what we've always used. */ if (INPUT(input)->amux == CX25840_AUDIO7) cx23885_flatiron_mux(dev, 1); else if (INPUT(input)->amux == CX25840_AUDIO6) cx23885_flatiron_mux(dev, 2); else { /* Not specifically defined, assume the default. */ cx23885_flatiron_mux(dev, 1); } return 0; } /* ------------------------------------------------------------------ */ static int cx23885_start_video_dma(struct cx23885_dev *dev, struct cx23885_dmaqueue *q, struct cx23885_buffer *buf) { dprintk(1, "%s()\n", __func__); /* Stop the dma/fifo before we tamper with it's risc programs */ cx_clear(VID_A_DMA_CTL, 0x11); /* setup fifo + format */ cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01], buf->bpl, buf->risc.dma); /* reset counter */ cx_write(VID_A_GPCNT_CTL, 3); q->count = 1; /* enable irq */ cx23885_irq_add_enable(dev, 0x01); cx_set(VID_A_INT_MSK, 0x000011); /* start dma */ cx_set(DEV_CNTRL2, (1<<5)); cx_set(VID_A_DMA_CTL, 0x11); /* FIFO and RISC enable */ return 0; } static int cx23885_restart_video_queue(struct cx23885_dev *dev, struct cx23885_dmaqueue *q) { struct cx23885_buffer *buf, *prev; struct list_head *item; dprintk(1, "%s()\n", __func__); if (!list_empty(&q->active)) { buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue); dprintk(2, "restart_queue [%p/%d]: restart dma\n", buf, buf->vb.i); cx23885_start_video_dma(dev, q, buf); list_for_each(item, &q->active) { buf = list_entry(item, struct cx23885_buffer, vb.queue); buf->count = q->count++; } mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); return 0; } prev = NULL; for (;;) { if (list_empty(&q->queued)) return 0; buf = list_entry(q->queued.next, struct cx23885_buffer, vb.queue); if (NULL == prev) { list_move_tail(&buf->vb.queue, &q->active); cx23885_start_video_dma(dev, q, buf); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); dprintk(2, "[%p/%d] restart_queue - first active\n", buf, buf->vb.i); } else if (prev->vb.width == buf->vb.width && prev->vb.height == buf->vb.height && prev->fmt == buf->fmt) { list_move_tail(&buf->vb.queue, &q->active); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); prev->risc.jmp[2] = cpu_to_le32(0); /* Bits 63 - 32 */ dprintk(2, "[%p/%d] restart_queue - move to active\n", buf, buf->vb.i); } else { return 0; } prev = buf; } } static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { struct cx23885_fh *fh = q->priv_data; *size = fh->fmt->depth*fh->width*fh->height >> 3; if (0 == *count) *count = 32; if (*size * *count > vid_limit * 1024 * 1024) *count = (vid_limit * 1024 * 1024) / *size; return 0; } static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct cx23885_fh *fh = q->priv_data; struct cx23885_dev *dev = fh->dev; struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb); int rc, init_buffer = 0; u32 line0_offset, line1_offset; struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); int field_tff; BUG_ON(NULL == fh->fmt); if (fh->width < 48 || fh->width > norm_maxw(dev->tvnorm) || fh->height < 32 || fh->height > norm_maxh(dev->tvnorm)) return -EINVAL; buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3; if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) return -EINVAL; if (buf->fmt != fh->fmt || buf->vb.width != fh->width || buf->vb.height != fh->height || buf->vb.field != field) { buf->fmt = fh->fmt; buf->vb.width = fh->width; buf->vb.height = fh->height; buf->vb.field = field; init_buffer = 1; } if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { init_buffer = 1; rc = videobuf_iolock(q, &buf->vb, NULL); if (0 != rc) goto fail; } if (init_buffer) { buf->bpl = buf->vb.width * buf->fmt->depth >> 3; switch (buf->vb.field) { case V4L2_FIELD_TOP: cx23885_risc_buffer(dev->pci, &buf->risc, dma->sglist, 0, UNSET, buf->bpl, 0, buf->vb.height); break; case V4L2_FIELD_BOTTOM: cx23885_risc_buffer(dev->pci, &buf->risc, dma->sglist, UNSET, 0, buf->bpl, 0, buf->vb.height); break; case V4L2_FIELD_INTERLACED: if (dev->tvnorm & V4L2_STD_NTSC) /* NTSC or */ field_tff = 1; else field_tff = 0; if (cx23885_boards[dev->board].force_bff) /* PAL / SECAM OR 888 in NTSC MODE */ field_tff = 0; if (field_tff) { /* cx25840 transmits NTSC bottom field first */ dprintk(1, "%s() Creating TFF/NTSC risc\n", __func__); line0_offset = buf->bpl; line1_offset = 0; } else { /* All other formats are top field first */ dprintk(1, "%s() Creating BFF/PAL/SECAM risc\n", __func__); line0_offset = 0; line1_offset = buf->bpl; } cx23885_risc_buffer(dev->pci, &buf->risc, dma->sglist, line0_offset, line1_offset, buf->bpl, buf->bpl, buf->vb.height >> 1); break; case V4L2_FIELD_SEQ_TB: cx23885_risc_buffer(dev->pci, &buf->risc, dma->sglist, 0, buf->bpl * (buf->vb.height >> 1), buf->bpl, 0, buf->vb.height >> 1); break; case V4L2_FIELD_SEQ_BT: cx23885_risc_buffer(dev->pci, &buf->risc, dma->sglist, buf->bpl * (buf->vb.height >> 1), 0, buf->bpl, 0, buf->vb.height >> 1); break; default: BUG(); } } dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp \"%s\" - dma=0x%08lx\n", buf, buf->vb.i, fh->width, fh->height, fh->fmt->depth, fh->fmt->name, (unsigned long)buf->risc.dma); buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: cx23885_free_buffer(q, buf); return rc; } static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb); struct cx23885_buffer *prev; struct cx23885_fh *fh = vq->priv_data; struct cx23885_dev *dev = fh->dev; struct cx23885_dmaqueue *q = &dev->vidq; /* add jump to stopper */ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma); buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ if (!list_empty(&q->queued)) { list_add_tail(&buf->vb.queue, &q->queued); buf->vb.state = VIDEOBUF_QUEUED; dprintk(2, "[%p/%d] buffer_queue - append to queued\n", buf, buf->vb.i); } else if (list_empty(&q->active)) { list_add_tail(&buf->vb.queue, &q->active); cx23885_start_video_dma(dev, q, buf); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); dprintk(2, "[%p/%d] buffer_queue - first active\n", buf, buf->vb.i); } else { prev = list_entry(q->active.prev, struct cx23885_buffer, vb.queue); if (prev->vb.width == buf->vb.width && prev->vb.height == buf->vb.height && prev->fmt == buf->fmt) { list_add_tail(&buf->vb.queue, &q->active); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); /* 64 bit bits 63-32 */ prev->risc.jmp[2] = cpu_to_le32(0); dprintk(2, "[%p/%d] buffer_queue - append to active\n", buf, buf->vb.i); } else { list_add_tail(&buf->vb.queue, &q->queued); buf->vb.state = VIDEOBUF_QUEUED; dprintk(2, "[%p/%d] buffer_queue - first queued\n", buf, buf->vb.i); } } } static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb); cx23885_free_buffer(q, buf); } static struct videobuf_queue_ops cx23885_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .buf_release = buffer_release, }; static struct videobuf_queue *get_queue(struct cx23885_fh *fh) { switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: return &fh->vidq; case V4L2_BUF_TYPE_VBI_CAPTURE: return &fh->vbiq; default: BUG(); return NULL; } } static int get_resource(struct cx23885_fh *fh) { switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: return RESOURCE_VIDEO; case V4L2_BUF_TYPE_VBI_CAPTURE: return RESOURCE_VBI; default: BUG(); return 0; } } static int video_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct cx23885_dev *dev = video_drvdata(file); struct cx23885_fh *fh; enum v4l2_buf_type type = 0; int radio = 0; switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: type = V4L2_BUF_TYPE_VIDEO_CAPTURE; break; case VFL_TYPE_VBI: type = V4L2_BUF_TYPE_VBI_CAPTURE; break; case VFL_TYPE_RADIO: radio = 1; break; } dprintk(1, "open dev=%s radio=%d type=%s\n", video_device_node_name(vdev), radio, v4l2_type_names[type]); /* allocate + initialize per filehandle data */ fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (NULL == fh) return -ENOMEM; file->private_data = fh; fh->dev = dev; fh->radio = radio; fh->type = type; fh->width = 320; fh->height = 240; fh->fmt = format_by_fourcc(V4L2_PIX_FMT_YUYV); videobuf_queue_sg_init(&fh->vidq, &cx23885_video_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, sizeof(struct cx23885_buffer), fh, NULL); videobuf_queue_sg_init(&fh->vbiq, &cx23885_vbi_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VBI_CAPTURE, V4L2_FIELD_SEQ_TB, sizeof(struct cx23885_buffer), fh, NULL); dprintk(1, "post videobuf_queue_init()\n"); return 0; } static ssize_t video_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct cx23885_fh *fh = file->private_data; switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (res_locked(fh->dev, RESOURCE_VIDEO)) return -EBUSY; return videobuf_read_one(&fh->vidq, data, count, ppos, file->f_flags & O_NONBLOCK); case V4L2_BUF_TYPE_VBI_CAPTURE: if (!res_get(fh->dev, fh, RESOURCE_VBI)) return -EBUSY; return videobuf_read_stream(&fh->vbiq, data, count, ppos, 1, file->f_flags & O_NONBLOCK); default: BUG(); return 0; } } static unsigned int video_poll(struct file *file, struct poll_table_struct *wait) { struct cx23885_fh *fh = file->private_data; struct cx23885_buffer *buf; unsigned int rc = POLLERR; if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) { if (!res_get(fh->dev, fh, RESOURCE_VBI)) return POLLERR; return videobuf_poll_stream(file, &fh->vbiq, wait); } mutex_lock(&fh->vidq.vb_lock); if (res_check(fh, RESOURCE_VIDEO)) { /* streaming capture */ if (list_empty(&fh->vidq.stream)) goto done; buf = list_entry(fh->vidq.stream.next, struct cx23885_buffer, vb.stream); } else { /* read() capture */ buf = (struct cx23885_buffer *)fh->vidq.read_buf; if (NULL == buf) goto done; } poll_wait(file, &buf->vb.done, wait); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) rc = POLLIN|POLLRDNORM; else rc = 0; done: mutex_unlock(&fh->vidq.vb_lock); return rc; } static int video_release(struct file *file) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; /* turn off overlay */ if (res_check(fh, RESOURCE_OVERLAY)) { /* FIXME */ res_free(dev, fh, RESOURCE_OVERLAY); } /* stop video capture */ if (res_check(fh, RESOURCE_VIDEO)) { videobuf_queue_cancel(&fh->vidq); res_free(dev, fh, RESOURCE_VIDEO); } if (fh->vidq.read_buf) { buffer_release(&fh->vidq, fh->vidq.read_buf); kfree(fh->vidq.read_buf); } /* stop vbi capture */ if (res_check(fh, RESOURCE_VBI)) { if (fh->vbiq.streaming) videobuf_streamoff(&fh->vbiq); if (fh->vbiq.reading) videobuf_read_stop(&fh->vbiq); res_free(dev, fh, RESOURCE_VBI); } videobuf_mmap_free(&fh->vidq); videobuf_mmap_free(&fh->vbiq); file->private_data = NULL; kfree(fh); /* We are not putting the tuner to sleep here on exit, because * we want to use the mpeg encoder in another session to capture * tuner video. Closing this will result in no video to the encoder. */ return 0; } static int video_mmap(struct file *file, struct vm_area_struct *vma) { struct cx23885_fh *fh = file->private_data; return videobuf_mmap_mapper(get_queue(fh), vma); } /* ------------------------------------------------------------------ */ /* VIDEO CTRL IOCTLS */ int cx23885_get_control(struct cx23885_dev *dev, struct v4l2_control *ctl) { dprintk(1, "%s() calling cx25840(VIDIOC_G_CTRL)\n", __func__); call_all(dev, core, g_ctrl, ctl); return 0; } int cx23885_set_control(struct cx23885_dev *dev, struct v4l2_control *ctl) { dprintk(1, "%s() calling cx25840(VIDIOC_S_CTRL)\n", __func__); call_all(dev, core, s_ctrl, ctl); return 0; } static void init_controls(struct cx23885_dev *dev) { struct v4l2_control ctrl; int i; for (i = 0; i < CX23885_CTLS; i++) { ctrl.id = cx23885_ctls[i].v.id; ctrl.value = cx23885_ctls[i].v.default_value; cx23885_set_control(dev, &ctrl); } } /* ------------------------------------------------------------------ */ /* VIDEO IOCTLS */ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx23885_fh *fh = priv; f->fmt.pix.width = fh->width; f->fmt.pix.height = fh->height; f->fmt.pix.field = fh->vidq.field; f->fmt.pix.pixelformat = fh->fmt->fourcc; f->fmt.pix.bytesperline = (f->fmt.pix.width * fh->fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; struct cx23885_fmt *fmt; enum v4l2_field field; unsigned int maxw, maxh; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (NULL == fmt) return -EINVAL; field = f->fmt.pix.field; maxw = norm_maxw(dev->tvnorm); maxh = norm_maxh(dev->tvnorm); if (V4L2_FIELD_ANY == field) { field = (f->fmt.pix.height > maxh/2) ? V4L2_FIELD_INTERLACED : V4L2_FIELD_BOTTOM; } switch (field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: maxh = maxh / 2; break; case V4L2_FIELD_INTERLACED: break; default: return -EINVAL; } f->fmt.pix.field = field; v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2, &f->fmt.pix.height, 32, maxh, 0, 0); f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; struct v4l2_mbus_framefmt mbus_fmt; int err; dprintk(2, "%s()\n", __func__); err = vidioc_try_fmt_vid_cap(file, priv, f); if (0 != err) return err; fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat); fh->width = f->fmt.pix.width; fh->height = f->fmt.pix.height; fh->vidq.field = f->fmt.pix.field; dprintk(2, "%s() width=%d height=%d field=%d\n", __func__, fh->width, fh->height, fh->vidq.field); v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED); call_all(dev, video, s_mbus_fmt, &mbus_fmt); v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt); return 0; } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; strcpy(cap->driver, "cx23885"); strlcpy(cap->card, cx23885_boards[dev->board].name, sizeof(cap->card)); sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_VBI_CAPTURE; if (UNSET != dev->tuner_type) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (unlikely(f->index >= ARRAY_SIZE(formats))) return -EINVAL; strlcpy(f->description, formats[f->index].name, sizeof(f->description)); f->pixelformat = formats[f->index].fourcc; return 0; } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct cx23885_fh *fh = priv; return videobuf_reqbufs(get_queue(fh), p); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct cx23885_fh *fh = priv; return videobuf_querybuf(get_queue(fh), p); } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct cx23885_fh *fh = priv; return videobuf_qbuf(get_queue(fh), p); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct cx23885_fh *fh = priv; return videobuf_dqbuf(get_queue(fh), p, file->f_flags & O_NONBLOCK); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; dprintk(1, "%s()\n", __func__); if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; if (unlikely(i != fh->type)) return -EINVAL; if (unlikely(!res_get(dev, fh, get_resource(fh)))) return -EBUSY; /* Don't start VBI streaming unless vida streaming * has already started. */ if ((fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) && ((cx_read(VID_A_DMA_CTL) & 0x11) == 0)) return -EINVAL; return videobuf_streamon(get_queue(fh)); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; int err, res; dprintk(1, "%s()\n", __func__); if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; if (i != fh->type) return -EINVAL; res = get_resource(fh); err = videobuf_streamoff(get_queue(fh)); if (err < 0) return err; res_free(dev, fh, res); return 0; } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; dprintk(1, "%s()\n", __func__); call_all(dev, core, g_std, id); return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *tvnorms) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; dprintk(1, "%s()\n", __func__); mutex_lock(&dev->lock); cx23885_set_tvnorm(dev, *tvnorms); mutex_unlock(&dev->lock); return 0; } int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i) { static const char *iname[] = { [CX23885_VMUX_COMPOSITE1] = "Composite1", [CX23885_VMUX_COMPOSITE2] = "Composite2", [CX23885_VMUX_COMPOSITE3] = "Composite3", [CX23885_VMUX_COMPOSITE4] = "Composite4", [CX23885_VMUX_SVIDEO] = "S-Video", [CX23885_VMUX_COMPONENT] = "Component", [CX23885_VMUX_TELEVISION] = "Television", [CX23885_VMUX_CABLE] = "Cable TV", [CX23885_VMUX_DVB] = "DVB", [CX23885_VMUX_DEBUG] = "for debug only", }; unsigned int n; dprintk(1, "%s()\n", __func__); n = i->index; if (n >= MAX_CX23885_INPUT) return -EINVAL; if (0 == INPUT(n)->type) return -EINVAL; i->index = n; i->type = V4L2_INPUT_TYPE_CAMERA; strcpy(i->name, iname[INPUT(n)->type]); if ((CX23885_VMUX_TELEVISION == INPUT(n)->type) || (CX23885_VMUX_CABLE == INPUT(n)->type)) { i->type = V4L2_INPUT_TYPE_TUNER; i->std = CX23885_NORMS; } /* Two selectable audio inputs for non-tv inputs */ if (INPUT(n)->type != CX23885_VMUX_TELEVISION) i->audioset = 0x3; if (dev->input == n) { /* enum'd input matches our configured input. * Ask the video decoder to process the call * and give it an oppertunity to update the * status field. */ call_all(dev, video, g_input_status, &i->status); } return 0; } static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; dprintk(1, "%s()\n", __func__); return cx23885_enum_input(dev, i); } int cx23885_get_input(struct file *file, void *priv, unsigned int *i) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; *i = dev->input; dprintk(1, "%s() returns %d\n", __func__, *i); return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { return cx23885_get_input(file, priv, i); } int cx23885_set_input(struct file *file, void *priv, unsigned int i) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; dprintk(1, "%s(%d)\n", __func__, i); if (i >= MAX_CX23885_INPUT) { dprintk(1, "%s() -EINVAL\n", __func__); return -EINVAL; } if (INPUT(i)->type == 0) return -EINVAL; mutex_lock(&dev->lock); cx23885_video_mux(dev, i); /* By default establish the default audio input for the card also */ /* Caller is free to use VIDIOC_S_AUDIO to override afterwards */ cx23885_audio_mux(dev, i); mutex_unlock(&dev->lock); return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { return cx23885_set_input(file, priv, i); } static int vidioc_log_status(struct file *file, void *priv) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; printk(KERN_INFO "%s/0: ============ START LOG STATUS ============\n", dev->name); call_all(dev, core, log_status); printk(KERN_INFO "%s/0: ============= END LOG STATUS =============\n", dev->name); return 0; } static int cx23885_query_audinput(struct file *file, void *priv, struct v4l2_audio *i) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; static const char *iname[] = { [0] = "Baseband L/R 1", [1] = "Baseband L/R 2", }; unsigned int n; dprintk(1, "%s()\n", __func__); n = i->index; if (n >= 2) return -EINVAL; memset(i, 0, sizeof(*i)); i->index = n; strcpy(i->name, iname[n]); i->capability = V4L2_AUDCAP_STEREO; i->mode = V4L2_AUDMODE_AVL; return 0; } static int vidioc_enum_audinput(struct file *file, void *priv, struct v4l2_audio *i) { return cx23885_query_audinput(file, priv, i); } static int vidioc_g_audinput(struct file *file, void *priv, struct v4l2_audio *i) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; i->index = dev->audinput; dprintk(1, "%s(input=%d)\n", __func__, i->index); return cx23885_query_audinput(file, priv, i); } static int vidioc_s_audinput(struct file *file, void *priv, struct v4l2_audio *i) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; if (i->index >= 2) return -EINVAL; dprintk(1, "%s(%d)\n", __func__, i->index); dev->audinput = i->index; /* Skip the audio defaults from the cards struct, caller wants * directly touch the audio mux hardware. */ cx23885_flatiron_mux(dev, dev->audinput + 1); return 0; } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qctrl) { qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id); if (unlikely(qctrl->id == 0)) return -EINVAL; return cx23885_ctrl_query(qctrl); } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctl) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; return cx23885_get_control(dev, ctl); } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctl) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; return cx23885_set_control(dev, ctl); } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; if (unlikely(UNSET == dev->tuner_type)) return -EINVAL; if (0 != t->index) return -EINVAL; strcpy(t->name, "Television"); call_all(dev, tuner, g_tuner, t); return 0; } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; if (UNSET == dev->tuner_type) return -EINVAL; if (0 != t->index) return -EINVAL; /* Update the A/V core */ call_all(dev, tuner, s_tuner, t); return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; if (unlikely(UNSET == dev->tuner_type)) return -EINVAL; /* f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; */ f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; f->frequency = dev->freq; call_all(dev, tuner, g_frequency, f); return 0; } static int cx23885_set_freq(struct cx23885_dev *dev, struct v4l2_frequency *f) { struct v4l2_control ctrl; if (unlikely(UNSET == dev->tuner_type)) return -EINVAL; if (unlikely(f->tuner != 0)) return -EINVAL; mutex_lock(&dev->lock); dev->freq = f->frequency; /* I need to mute audio here */ ctrl.id = V4L2_CID_AUDIO_MUTE; ctrl.value = 1; cx23885_set_control(dev, &ctrl); call_all(dev, tuner, s_frequency, f); /* When changing channels it is required to reset TVAUDIO */ msleep(100); /* I need to unmute audio here */ ctrl.value = 0; cx23885_set_control(dev, &ctrl); mutex_unlock(&dev->lock); return 0; } static int cx23885_set_freq_via_ops(struct cx23885_dev *dev, struct v4l2_frequency *f) { struct v4l2_control ctrl; struct videobuf_dvb_frontend *vfe; struct dvb_frontend *fe; struct analog_parameters params = { .mode = V4L2_TUNER_ANALOG_TV, .audmode = V4L2_TUNER_MODE_STEREO, .std = dev->tvnorm, .frequency = f->frequency }; mutex_lock(&dev->lock); dev->freq = f->frequency; /* I need to mute audio here */ ctrl.id = V4L2_CID_AUDIO_MUTE; ctrl.value = 1; cx23885_set_control(dev, &ctrl); /* If HVR1850 */ dprintk(1, "%s() frequency=%d tuner=%d std=0x%llx\n", __func__, params.frequency, f->tuner, params.std); vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1); if (!vfe) { mutex_unlock(&dev->lock); return -EINVAL; } fe = vfe->dvb.frontend; if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) fe = &dev->ts1.analog_fe; if (fe && fe->ops.tuner_ops.set_analog_params) { call_all(dev, core, s_std, dev->tvnorm); fe->ops.tuner_ops.set_analog_params(fe, &params); } else printk(KERN_ERR "%s() No analog tuner, aborting\n", __func__); /* When changing channels it is required to reset TVAUDIO */ msleep(100); /* I need to unmute audio here */ ctrl.value = 0; cx23885_set_control(dev, &ctrl); mutex_unlock(&dev->lock); return 0; } int cx23885_set_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; int ret; switch (dev->board) { case CX23885_BOARD_HAUPPAUGE_HVR1850: ret = cx23885_set_freq_via_ops(dev, f); break; default: ret = cx23885_set_freq(dev, f); } return ret; } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { return cx23885_set_frequency(file, priv, f); } /* ----------------------------------------------------------- */ static void cx23885_vid_timeout(unsigned long data) { struct cx23885_dev *dev = (struct cx23885_dev *)data; struct cx23885_dmaqueue *q = &dev->vidq; struct cx23885_buffer *buf; unsigned long flags; spin_lock_irqsave(&dev->slock, flags); while (!list_empty(&q->active)) { buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue); list_del(&buf->vb.queue); buf->vb.state = VIDEOBUF_ERROR; wake_up(&buf->vb.done); printk(KERN_ERR "%s: [%p/%d] timeout - dma=0x%08lx\n", dev->name, buf, buf->vb.i, (unsigned long)buf->risc.dma); } cx23885_restart_video_queue(dev, q); spin_unlock_irqrestore(&dev->slock, flags); } int cx23885_video_irq(struct cx23885_dev *dev, u32 status) { u32 mask, count; int handled = 0; mask = cx_read(VID_A_INT_MSK); if (0 == (status & mask)) return handled; cx_write(VID_A_INT_STAT, status); /* risc op code error, fifo overflow or line sync detection error */ if ((status & VID_BC_MSK_OPC_ERR) || (status & VID_BC_MSK_SYNC) || (status & VID_BC_MSK_OF)) { if (status & VID_BC_MSK_OPC_ERR) { dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n", VID_BC_MSK_OPC_ERR); printk(KERN_WARNING "%s: video risc op code error\n", dev->name); cx23885_sram_channel_dump(dev, &dev->sram_channels[SRAM_CH01]); } if (status & VID_BC_MSK_SYNC) dprintk(7, " (VID_BC_MSK_SYNC 0x%08x) " "video lines miss-match\n", VID_BC_MSK_SYNC); if (status & VID_BC_MSK_OF) dprintk(7, " (VID_BC_MSK_OF 0x%08x) fifo overflow\n", VID_BC_MSK_OF); } /* Video */ if (status & VID_BC_MSK_RISCI1) { spin_lock(&dev->slock); count = cx_read(VID_A_GPCNT); cx23885_video_wakeup(dev, &dev->vidq, count); spin_unlock(&dev->slock); handled++; } if (status & VID_BC_MSK_RISCI2) { dprintk(2, "stopper video\n"); spin_lock(&dev->slock); cx23885_restart_video_queue(dev, &dev->vidq); spin_unlock(&dev->slock); handled++; } /* Allow the VBI framework to process it's payload */ handled += cx23885_vbi_irq(dev, status); return handled; } /* ----------------------------------------------------------- */ /* exported stuff */ static const struct v4l2_file_operations video_fops = { .owner = THIS_MODULE, .open = video_open, .release = video_release, .read = video_read, .poll = video_poll, .mmap = video_mmap, .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_g_fmt_vbi_cap = cx23885_vbi_fmt, .vidioc_try_fmt_vbi_cap = cx23885_vbi_fmt, .vidioc_s_fmt_vbi_cap = cx23885_vbi_fmt, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_s_std = vidioc_s_std, .vidioc_g_std = vidioc_g_std, .vidioc_querystd = vidioc_g_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_log_status = vidioc_log_status, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_g_chip_ident = cx23885_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = cx23885_g_register, .vidioc_s_register = cx23885_s_register, #endif .vidioc_enumaudio = vidioc_enum_audinput, .vidioc_g_audio = vidioc_g_audinput, .vidioc_s_audio = vidioc_s_audinput, }; static struct video_device cx23885_vbi_template; static struct video_device cx23885_video_template = { .name = "cx23885-video", .fops = &video_fops, .ioctl_ops = &video_ioctl_ops, .tvnorms = CX23885_NORMS, .current_norm = V4L2_STD_NTSC_M, }; static const struct v4l2_file_operations radio_fops = { .owner = THIS_MODULE, .open = video_open, .release = video_release, .ioctl = video_ioctl2, }; void cx23885_video_unregister(struct cx23885_dev *dev) { dprintk(1, "%s()\n", __func__); cx23885_irq_remove(dev, 0x01); if (dev->vbi_dev) { if (video_is_registered(dev->vbi_dev)) video_unregister_device(dev->vbi_dev); else video_device_release(dev->vbi_dev); dev->vbi_dev = NULL; btcx_riscmem_free(dev->pci, &dev->vbiq.stopper); } if (dev->video_dev) { if (video_is_registered(dev->video_dev)) video_unregister_device(dev->video_dev); else video_device_release(dev->video_dev); dev->video_dev = NULL; btcx_riscmem_free(dev->pci, &dev->vidq.stopper); } if (dev->audio_dev) cx23885_audio_unregister(dev); } int cx23885_video_register(struct cx23885_dev *dev) { int err; dprintk(1, "%s()\n", __func__); spin_lock_init(&dev->slock); /* Initialize VBI template */ memcpy(&cx23885_vbi_template, &cx23885_video_template, sizeof(cx23885_vbi_template)); strcpy(cx23885_vbi_template.name, "cx23885-vbi"); dev->tvnorm = cx23885_video_template.current_norm; /* init video dma queues */ INIT_LIST_HEAD(&dev->vidq.active); INIT_LIST_HEAD(&dev->vidq.queued); dev->vidq.timeout.function = cx23885_vid_timeout; dev->vidq.timeout.data = (unsigned long)dev; init_timer(&dev->vidq.timeout); cx23885_risc_stopper(dev->pci, &dev->vidq.stopper, VID_A_DMA_CTL, 0x11, 0x00); /* init vbi dma queues */ INIT_LIST_HEAD(&dev->vbiq.active); INIT_LIST_HEAD(&dev->vbiq.queued); dev->vbiq.timeout.function = cx23885_vbi_timeout; dev->vbiq.timeout.data = (unsigned long)dev; init_timer(&dev->vbiq.timeout); cx23885_risc_stopper(dev->pci, &dev->vbiq.stopper, VID_A_DMA_CTL, 0x22, 0x00); cx23885_irq_add_enable(dev, 0x01); if ((TUNER_ABSENT != dev->tuner_type) && ((dev->tuner_bus == 0) || (dev->tuner_bus == 1))) { struct v4l2_subdev *sd = NULL; if (dev->tuner_addr) sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[dev->tuner_bus].i2c_adap, "tuner", dev->tuner_addr, NULL); else sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[dev->tuner_bus].i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV)); if (sd) { struct tuner_setup tun_setup; memset(&tun_setup, 0, sizeof(tun_setup)); tun_setup.mode_mask = T_ANALOG_TV; tun_setup.type = dev->tuner_type; tun_setup.addr = v4l2_i2c_subdev_addr(sd); tun_setup.tuner_callback = cx23885_tuner_callback; v4l2_subdev_call(sd, tuner, s_type_addr, &tun_setup); if (dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXTV1200) { struct xc2028_ctrl ctrl = { .fname = XC2028_DEFAULT_FIRMWARE, .max_len = 64 }; struct v4l2_priv_tun_config cfg = { .tuner = dev->tuner_type, .priv = &ctrl }; v4l2_subdev_call(sd, tuner, s_config, &cfg); } } } /* register Video device */ dev->video_dev = cx23885_vdev_init(dev, dev->pci, &cx23885_video_template, "video"); err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER, video_nr[dev->nr]); if (err < 0) { printk(KERN_INFO "%s: can't register video device\n", dev->name); goto fail_unreg; } printk(KERN_INFO "%s: registered device %s [v4l2]\n", dev->name, video_device_node_name(dev->video_dev)); /* register VBI device */ dev->vbi_dev = cx23885_vdev_init(dev, dev->pci, &cx23885_vbi_template, "vbi"); err = video_register_device(dev->vbi_dev, VFL_TYPE_VBI, vbi_nr[dev->nr]); if (err < 0) { printk(KERN_INFO "%s: can't register vbi device\n", dev->name); goto fail_unreg; } printk(KERN_INFO "%s: registered device %s\n", dev->name, video_device_node_name(dev->vbi_dev)); /* Register ALSA audio device */ dev->audio_dev = cx23885_audio_register(dev); /* initial device configuration */ mutex_lock(&dev->lock); cx23885_set_tvnorm(dev, dev->tvnorm); init_controls(dev); cx23885_video_mux(dev, 0); cx23885_audio_mux(dev, 0); mutex_unlock(&dev->lock); return 0; fail_unreg: cx23885_video_unregister(dev); return err; }
gpl-2.0
ztemt/NX507J_5.1_kernel
drivers/net/ethernet/intel/igb/e1000_mbx.c
5140
11481
/******************************************************************************* Intel(R) Gigabit Ethernet Linux driver Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "e1000_mbx.h" /** * igb_read_mbx - Reads a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to read * * returns SUCCESS if it successfully read message from buffer **/ s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; /* limit read to size of mailbox */ if (size > mbx->size) size = mbx->size; if (mbx->ops.read) ret_val = mbx->ops.read(hw, msg, size, mbx_id); return ret_val; } /** * igb_write_mbx - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer **/ s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = 0; if (size > mbx->size) ret_val = -E1000_ERR_MBX; else if (mbx->ops.write) ret_val = mbx->ops.write(hw, msg, size, mbx_id); return ret_val; } /** * igb_check_for_msg - checks to see if someone sent us mail * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; if (mbx->ops.check_for_msg) ret_val = mbx->ops.check_for_msg(hw, mbx_id); return ret_val; } /** * igb_check_for_ack - checks to see if someone sent us ACK * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; if (mbx->ops.check_for_ack) ret_val = mbx->ops.check_for_ack(hw, mbx_id); return ret_val; } /** * igb_check_for_rst - checks to see if other side has reset * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; if (mbx->ops.check_for_rst) ret_val = mbx->ops.check_for_rst(hw, mbx_id); return ret_val; } /** * igb_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification **/ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; if (!countdown || !mbx->ops.check_for_msg) goto out; while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) break; udelay(mbx->usec_delay); } /* if we failed, all future posted messages fail until reset */ if (!countdown) mbx->timeout = 0; out: return countdown ? 0 : -E1000_ERR_MBX; } /** * igb_poll_for_ack - Wait for message acknowledgement * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message acknowledgement **/ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; if (!countdown || !mbx->ops.check_for_ack) goto out; while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) break; udelay(mbx->usec_delay); } /* if we failed, all future posted messages fail until reset */ if (!countdown) mbx->timeout = 0; out: return countdown ? 0 : -E1000_ERR_MBX; } /** * igb_read_posted_mbx - Wait for message notification and receive message * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; if (!mbx->ops.read) goto out; ret_val = igb_poll_for_msg(hw, mbx_id); if (!ret_val) ret_val = mbx->ops.read(hw, msg, size, mbx_id); out: return ret_val; } /** * igb_write_posted_mbx - Write a message to the mailbox, wait for ack * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; /* exit if either we can't write or there isn't a defined timeout */ if (!mbx->ops.write || !mbx->timeout) goto out; /* send msg */ ret_val = mbx->ops.write(hw, msg, size, mbx_id); /* if msg sent wait until we receive an ack */ if (!ret_val) ret_val = igb_poll_for_ack(hw, mbx_id); out: return ret_val; } static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) { u32 mbvficr = rd32(E1000_MBVFICR); s32 ret_val = -E1000_ERR_MBX; if (mbvficr & mask) { ret_val = 0; wr32(E1000_MBVFICR, mask); } return ret_val; } /** * igb_check_for_msg_pf - checks to see if the VF has sent mail * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { ret_val = 0; hw->mbx.stats.reqs++; } return ret_val; } /** * igb_check_for_ack_pf - checks to see if the VF has ACKed * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { ret_val = 0; hw->mbx.stats.acks++; } return ret_val; } /** * igb_check_for_rst_pf - checks to see if the VF has reset * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) { u32 vflre = rd32(E1000_VFLRE); s32 ret_val = -E1000_ERR_MBX; if (vflre & (1 << vf_number)) { ret_val = 0; wr32(E1000_VFLRE, (1 << vf_number)); hw->mbx.stats.rsts++; } return ret_val; } /** * igb_obtain_mbx_lock_pf - obtain mailbox lock * @hw: pointer to the HW structure * @vf_number: the VF index * * return SUCCESS if we obtained the mailbox lock **/ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; u32 p2v_mailbox; /* Take ownership of the buffer */ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); /* reserve mailbox for vf use */ p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); if (p2v_mailbox & E1000_P2VMAILBOX_PFU) ret_val = 0; return ret_val; } /** * igb_write_mbx_pf - Places a message in the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index * * returns SUCCESS if it successfully copied message into the buffer **/ static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; /* lock the mailbox to prevent pf/vf race condition */ ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) goto out_no_write; /* flush msg and acks as we are overwriting the message buffer */ igb_check_for_msg_pf(hw, vf_number); igb_check_for_ack_pf(hw, vf_number); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); /* Interrupt VF to tell it a message has been sent and release buffer*/ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); /* update stats */ hw->mbx.stats.msgs_tx++; out_no_write: return ret_val; } /** * igb_read_mbx_pf - Read a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index * * This function copies a message from the mailbox buffer to the caller's * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; /* lock the mailbox to prevent pf/vf race condition */ ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) goto out_no_read; /* copy the message to the mailbox memory buffer */ for (i = 0; i < size; i++) msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); /* Acknowledge the message and release buffer */ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; out_no_read: return ret_val; } /** * e1000_init_mbx_params_pf - set initial values for pf mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for pf mailbox */ s32 igb_init_mbx_params_pf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; mbx->timeout = 0; mbx->usec_delay = 0; mbx->size = E1000_VFMAILBOX_SIZE; mbx->ops.read = igb_read_mbx_pf; mbx->ops.write = igb_write_mbx_pf; mbx->ops.read_posted = igb_read_posted_mbx; mbx->ops.write_posted = igb_write_posted_mbx; mbx->ops.check_for_msg = igb_check_for_msg_pf; mbx->ops.check_for_ack = igb_check_for_ack_pf; mbx->ops.check_for_rst = igb_check_for_rst_pf; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; return 0; }
gpl-2.0
yank555-lu/N3-CM11-OSRC
arch/x86/boot/early_serial_console.c
11540
3436
#include "boot.h" #define DEFAULT_SERIAL_PORT 0x3f8 /* ttyS0 */ #define XMTRDY 0x20 #define DLAB 0x80 #define TXR 0 /* Transmit register (WRITE) */ #define RXR 0 /* Receive register (READ) */ #define IER 1 /* Interrupt Enable */ #define IIR 2 /* Interrupt ID */ #define FCR 2 /* FIFO control */ #define LCR 3 /* Line control */ #define MCR 4 /* Modem control */ #define LSR 5 /* Line Status */ #define MSR 6 /* Modem Status */ #define DLL 0 /* Divisor Latch Low */ #define DLH 1 /* Divisor latch High */ #define DEFAULT_BAUD 9600 static void early_serial_init(int port, int baud) { unsigned char c; unsigned divisor; outb(0x3, port + LCR); /* 8n1 */ outb(0, port + IER); /* no interrupt */ outb(0, port + FCR); /* no fifo */ outb(0x3, port + MCR); /* DTR + RTS */ divisor = 115200 / baud; c = inb(port + LCR); outb(c | DLAB, port + LCR); outb(divisor & 0xff, port + DLL); outb((divisor >> 8) & 0xff, port + DLH); outb(c & ~DLAB, port + LCR); early_serial_base = port; } static void parse_earlyprintk(void) { int baud = DEFAULT_BAUD; char arg[32]; int pos = 0; int port = 0; if (cmdline_find_option("earlyprintk", arg, sizeof arg) > 0) { char *e; if (!strncmp(arg, "serial", 6)) { port = DEFAULT_SERIAL_PORT; pos += 6; } if (arg[pos] == ',') pos++; /* * make sure we have * "serial,0x3f8,115200" * "serial,ttyS0,115200" * "ttyS0,115200" */ if (pos == 7 && !strncmp(arg + pos, "0x", 2)) { port = simple_strtoull(arg + pos, &e, 16); if (port == 0 || arg + pos == e) port = DEFAULT_SERIAL_PORT; else pos = e - arg; } else if (!strncmp(arg + pos, "ttyS", 4)) { static const int bases[] = { 0x3f8, 0x2f8 }; int idx = 0; if (!strncmp(arg + pos, "ttyS", 4)) pos += 4; if (arg[pos++] == '1') idx = 1; port = bases[idx]; } if (arg[pos] == ',') pos++; baud = simple_strtoull(arg + pos, &e, 0); if (baud == 0 || arg + pos == e) baud = DEFAULT_BAUD; } if (port) early_serial_init(port, baud); } #define BASE_BAUD (1843200/16) static unsigned int probe_baud(int port) { unsigned char lcr, dll, dlh; unsigned int quot; lcr = inb(port + LCR); outb(lcr | DLAB, port + LCR); dll = inb(port + DLL); dlh = inb(port + DLH); outb(lcr, port + LCR); quot = (dlh << 8) | dll; return BASE_BAUD / quot; } static void parse_console_uart8250(void) { char optstr[64], *options; int baud = DEFAULT_BAUD; int port = 0; /* * console=uart8250,io,0x3f8,115200n8 * need to make sure it is last one console ! */ if (cmdline_find_option("console", optstr, sizeof optstr) <= 0) return; options = optstr; if (!strncmp(options, "uart8250,io,", 12)) port = simple_strtoull(options + 12, &options, 0); else if (!strncmp(options, "uart,io,", 8)) port = simple_strtoull(options + 8, &options, 0); else return; if (options && (options[0] == ',')) baud = simple_strtoull(options + 1, &options, 0); else baud = probe_baud(port); if (port) early_serial_init(port, baud); } void console_init(void) { parse_earlyprintk(); if (!early_serial_base) parse_console_uart8250(); }
gpl-2.0
CyanideL/android_kernel_samsung_mondrianwifi
arch/powerpc/boot/treeboot-walnut.c
14100
2215
/* * Old U-boot compatibility for Walnut * * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com> * * Copyright 2007 IBM Corporation * Based on cuboot-83xx.c, which is: * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "dcr.h" #include "4xx.h" #include "io.h" BSS_STACK(4096); static void walnut_flashsel_fixup(void) { void *devp, *sram; u32 reg_flash[3] = {0x0, 0x0, 0x80000}; u32 reg_sram[3] = {0x0, 0x0, 0x80000}; u8 *fpga; u8 fpga_brds1 = 0x0; devp = finddevice("/plb/ebc/fpga"); if (!devp) fatal("Couldn't locate FPGA node\n\r"); if (getprop(devp, "virtual-reg", &fpga, sizeof(fpga)) != sizeof(fpga)) fatal("no virtual-reg property\n\r"); fpga_brds1 = in_8(fpga); devp = finddevice("/plb/ebc/flash"); if (!devp) fatal("Couldn't locate flash node\n\r"); if (getprop(devp, "reg", reg_flash, sizeof(reg_flash)) != sizeof(reg_flash)) fatal("flash reg property has unexpected size\n\r"); sram = finddevice("/plb/ebc/sram"); if (!sram) fatal("Couldn't locate sram node\n\r"); if (getprop(sram, "reg", reg_sram, sizeof(reg_sram)) != sizeof(reg_sram)) fatal("sram reg property has unexpected size\n\r"); if (fpga_brds1 & 0x1) { reg_flash[1] ^= 0x80000; reg_sram[1] ^= 0x80000; } setprop(devp, "reg", reg_flash, sizeof(reg_flash)); setprop(sram, "reg", reg_sram, sizeof(reg_sram)); } #define WALNUT_OPENBIOS_MAC_OFF 0xfffffe0b static void walnut_fixups(void) { ibm4xx_sdram_fixup_memsize(); ibm405gp_fixup_clocks(33330000, 0xa8c000); ibm4xx_quiesce_eth((u32 *)0xef600800, NULL); ibm4xx_fixup_ebc_ranges("/plb/ebc"); walnut_flashsel_fixup(); dt_fixup_mac_address_by_alias("ethernet0", (u8 *) WALNUT_OPENBIOS_MAC_OFF); } void platform_init(void) { unsigned long end_of_ram = 0x2000000; unsigned long avail_ram = end_of_ram - (unsigned long) _end; simple_alloc_init(_end, avail_ram, 32, 32); platform_ops.fixups = walnut_fixups; platform_ops.exit = ibm40x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
an0ninja/xbmc
xbmc/windowing/windows/WinSystemWin32DX.cpp
21
5861
/* * Copyright (C) 2005-2013 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "WinSystemWin32DX.h" #include "guilib/gui3d.h" #include "guilib/GraphicContext.h" #include "settings/DisplaySettings.h" #include "settings/Settings.h" #include "threads/SingleLock.h" #include "utils/CharsetConverter.h" #ifdef HAS_DX CWinSystemWin32DX::CWinSystemWin32DX() : CRenderSystemDX() { } CWinSystemWin32DX::~CWinSystemWin32DX() { } void CWinSystemWin32DX::PresentRender(bool rendered, bool videoLayer) { if (rendered) PresentRenderImpl(rendered); if (m_delayDispReset && m_dispResetTimer.IsTimePast()) { m_delayDispReset = false; CWinSystemWin32::OnDisplayReset(); } if (!rendered) Sleep(40); } bool CWinSystemWin32DX::UseWindowedDX(bool fullScreen) { return (CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOSCREEN_FAKEFULLSCREEN) || !fullScreen); } bool CWinSystemWin32DX::CreateNewWindow(std::string name, bool fullScreen, RESOLUTION_INFO& res, PHANDLE_EVENT_FUNC userFunction) { if(!CWinSystemWin32::CreateNewWindow(name, fullScreen, res, userFunction)) return false; SetFocusWnd(m_hWnd); SetDeviceWnd(m_hWnd); CRenderSystemDX::m_interlaced = ((res.dwFlags & D3DPRESENTFLAG_INTERLACED) != 0); CRenderSystemDX::m_useWindowedDX = UseWindowedDX(fullScreen); SetRenderParams(m_nWidth, m_nHeight, fullScreen, res.fRefreshRate); const MONITOR_DETAILS* monitor = GetMonitor(res.iScreen); if (!monitor) return false; SetMonitor(monitor->hMonitor); return true; } void CWinSystemWin32DX::UpdateMonitor() { const MONITOR_DETAILS* monitor = GetMonitor(m_nScreen); if (monitor) SetMonitor(monitor->hMonitor); } bool CWinSystemWin32DX::ResizeWindow(int newWidth, int newHeight, int newLeft, int newTop) { CWinSystemWin32::ResizeWindow(newWidth, newHeight, newLeft, newTop); CRenderSystemDX::OnResize(newWidth, newHeight); return true; } void CWinSystemWin32DX::OnMove(int x, int y) { CRenderSystemDX::OnMove(); } bool CWinSystemWin32DX::SetFullScreen(bool fullScreen, RESOLUTION_INFO& res, bool blankOtherDisplays) { // When going DX fullscreen -> windowed, we must switch DXGI device to windowed mode first to // get it out of fullscreen mode because it restores a former resolution. // We then change to the mode we want. // In other cases, set the window/mode then swith DXGI mode. bool FS2Windowed = !m_useWindowedDX && UseWindowedDX(fullScreen); const MONITOR_DETAILS* monitor = GetMonitor(res.iScreen); if (!monitor) return false; SetMonitor(monitor->hMonitor); CRenderSystemDX::m_interlaced = ((res.dwFlags & D3DPRESENTFLAG_INTERLACED) != 0); CRenderSystemDX::m_useWindowedDX = UseWindowedDX(fullScreen); // this needed to prevent resize/move events from DXGI during changing mode CWinSystemWin32::m_IsAlteringWindow = true; if (FS2Windowed) CRenderSystemDX::SetFullScreenInternal(); if (!m_useWindowedDX) SetForegroundWindowInternal(m_hWnd); // most 3D content has 23.976fps, so switch for this mode if (g_graphicsContext.GetStereoMode() == RENDER_STEREO_MODE_HARDWAREBASED) res = CDisplaySettings::GetInstance().GetResolutionInfo(CResolutionUtils::ChooseBestResolution(24.f / 1.001f, res.iWidth, true)); // so this flags delays call SetFullScreen _after_ resetting render system bool delaySetFS = CRenderSystemDX::m_bHWStereoEnabled; if (!delaySetFS) CWinSystemWin32::SetFullScreen(fullScreen, res, blankOtherDisplays); // this needed to prevent resize/move events from DXGI during changing mode CWinSystemWin32::m_IsAlteringWindow = true; CRenderSystemDX::ResetRenderSystem(res.iWidth, res.iHeight, fullScreen, res.fRefreshRate); if (delaySetFS) { // now resize window and force changing resolution if stereo mode disabled if (UseWindowedDX(fullScreen)) CWinSystemWin32::SetFullScreenEx(fullScreen, res, blankOtherDisplays, !CRenderSystemDX::m_bHWStereoEnabled); else { CRenderSystemDX::SetFullScreenInternal(); CRenderSystemDX::CreateWindowSizeDependentResources(); } } CWinSystemWin32::m_IsAlteringWindow = false; return true; } std::string CWinSystemWin32DX::GetClipboardText(void) { std::wstring unicode_text; std::string utf8_text; if (OpenClipboard(NULL)) { HGLOBAL hglb = GetClipboardData(CF_UNICODETEXT); if (hglb != NULL) { LPWSTR lpwstr = (LPWSTR) GlobalLock(hglb); if (lpwstr != NULL) { unicode_text = lpwstr; GlobalUnlock(hglb); } } CloseClipboard(); } g_charsetConverter.wToUTF8(unicode_text, utf8_text); return utf8_text; } void CWinSystemWin32DX::NotifyAppFocusChange(bool bGaining) { CWinSystemWin32::NotifyAppFocusChange(bGaining); // if true fullscreen we need switch render system to/from ff manually like dx9 does if (!UseWindowedDX(m_bFullScreen) && CRenderSystemDX::m_bRenderCreated) { CRenderSystemDX::m_useWindowedDX = !bGaining; CRenderSystemDX::SetFullScreenInternal(); if (bGaining) CRenderSystemDX::CreateWindowSizeDependentResources(); // minimize window on lost focus if (!bGaining) ShowWindow(m_hWnd, SW_FORCEMINIMIZE); } } #endif
gpl-2.0
gbenson/binutils-gdb
binutils/rescoff.c
21
22161
/* rescoff.c -- read and write resources in Windows COFF files. Copyright (C) 1997-2015 Free Software Foundation, Inc. Written by Ian Lance Taylor, Cygnus Support. Rewritten by Kai Tietz, Onevision. This file is part of GNU Binutils. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* This file contains function that read and write Windows resources in COFF files. */ #include "sysdep.h" #include "bfd.h" #include "bucomm.h" #include "libiberty.h" #include "windres.h" #include <assert.h> /* In order to use the address of a resource data entry, we need to get the image base of the file. Right now we extract it from internal BFD information. FIXME. */ #include "coff/internal.h" #include "libcoff.h" /* Information we extract from the file. */ struct coff_file_info { /* File name. */ const char *filename; /* Data read from the file. */ const bfd_byte *data; /* End of data read from file. */ const bfd_byte *data_end; /* Address of the resource section minus the image base of the file. */ rc_uint_type secaddr; }; /* A resource directory table in a COFF file. */ struct __attribute__ ((__packed__)) extern_res_directory { /* Characteristics. */ bfd_byte characteristics[4]; /* Time stamp. */ bfd_byte time[4]; /* Major version number. */ bfd_byte major[2]; /* Minor version number. */ bfd_byte minor[2]; /* Number of named directory entries. */ bfd_byte name_count[2]; /* Number of directory entries with IDs. */ bfd_byte id_count[2]; }; /* A resource directory entry in a COFF file. */ struct extern_res_entry { /* Name or ID. */ bfd_byte name[4]; /* Address of resource entry or subdirectory. */ bfd_byte rva[4]; }; /* A resource data entry in a COFF file. */ struct extern_res_data { /* Address of resource data. This is apparently a file relative address, rather than a section offset. */ bfd_byte rva[4]; /* Size of resource data. */ bfd_byte size[4]; /* Code page. */ bfd_byte codepage[4]; /* Reserved. */ bfd_byte reserved[4]; }; /* Local functions. */ static void overrun (const struct coff_file_info *, const char *); static rc_res_directory *read_coff_res_dir (windres_bfd *, const bfd_byte *, const struct coff_file_info *, const rc_res_id *, int); static rc_res_resource *read_coff_data_entry (windres_bfd *, const bfd_byte *, const struct coff_file_info *, const rc_res_id *); /* Read the resources in a COFF file. */ rc_res_directory * read_coff_rsrc (const char *filename, const char *target) { rc_res_directory *ret; bfd *abfd; windres_bfd wrbfd; char **matching; asection *sec; bfd_size_type size; bfd_byte *data; struct coff_file_info flaginfo; if (filename == NULL) fatal (_("filename required for COFF input")); abfd = bfd_openr (filename, target); if (abfd == NULL) bfd_fatal (filename); if (! bfd_check_format_matches (abfd, bfd_object, &matching)) { bfd_nonfatal (bfd_get_filename (abfd)); if (bfd_get_error () == bfd_error_file_ambiguously_recognized) list_matching_formats (matching); xexit (1); } sec = bfd_get_section_by_name (abfd, ".rsrc"); if (sec == NULL) { fatal (_("%s: no resource section"), filename); } set_windres_bfd (&wrbfd, abfd, sec, WR_KIND_BFD); size = bfd_section_size (abfd, sec); /* PR 17512: file: 1b25ba5d The call to get_file_size here may be expensive but there is no other way to determine if the section size is reasonable. */ if (size > (bfd_size_type) get_file_size (filename)) fatal (_("%s: .rsrc section is bigger than the file!"), filename); data = (bfd_byte *) res_alloc (size); get_windres_bfd_content (&wrbfd, data, 0, size); flaginfo.filename = filename; flaginfo.data = data; flaginfo.data_end = data + size; flaginfo.secaddr = (bfd_get_section_vma (abfd, sec) - pe_data (abfd)->pe_opthdr.ImageBase); /* Now just read in the top level resource directory. Note that we don't free data, since we create resource entries that point into it. If we ever want to free up the resource information we read, this will have to be cleaned up. */ ret = read_coff_res_dir (&wrbfd, data, &flaginfo, (const rc_res_id *) NULL, 0); bfd_close (abfd); return ret; } /* Give an error if we are out of bounds. */ static void overrun (const struct coff_file_info *flaginfo, const char *msg) { fatal (_("%s: %s: address out of bounds"), flaginfo->filename, msg); } /* Read a resource directory. */ static rc_res_directory * read_coff_res_dir (windres_bfd *wrbfd, const bfd_byte *data, const struct coff_file_info *flaginfo, const rc_res_id *type, int level) { const struct extern_res_directory *erd; rc_res_directory *rd; int name_count, id_count, i; rc_res_entry **pp; const struct extern_res_entry *ere; /* PR 17512: file: 09d80f53. Whilst in theory resources can nest to any level, in practice Microsoft only defines 3 levels. Corrupt files however might claim to use more. */ if (level > 4) overrun (flaginfo, _("Resources nest too deep")); if ((size_t) (flaginfo->data_end - data) < sizeof (struct extern_res_directory)) overrun (flaginfo, _("directory")); erd = (const struct extern_res_directory *) data; rd = (rc_res_directory *) res_alloc (sizeof (rc_res_directory)); rd->characteristics = windres_get_32 (wrbfd, erd->characteristics, 4); rd->time = windres_get_32 (wrbfd, erd->time, 4); rd->major = windres_get_16 (wrbfd, erd->major, 2); rd->minor = windres_get_16 (wrbfd, erd->minor, 2); rd->entries = NULL; name_count = windres_get_16 (wrbfd, erd->name_count, 2); id_count = windres_get_16 (wrbfd, erd->id_count, 2); pp = &rd->entries; /* The resource directory entries immediately follow the directory table. */ ere = (const struct extern_res_entry *) (erd + 1); for (i = 0; i < name_count; i++, ere++) { rc_uint_type name, rva; rc_res_entry *re; const bfd_byte *ers; int length, j; if ((const bfd_byte *) ere >= flaginfo->data_end) overrun (flaginfo, _("named directory entry")); name = windres_get_32 (wrbfd, ere->name, 4); rva = windres_get_32 (wrbfd, ere->rva, 4); /* For some reason the high bit in NAME is set. */ name &=~ 0x80000000; if (name > (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("directory entry name")); ers = flaginfo->data + name; re = (rc_res_entry *) res_alloc (sizeof *re); re->next = NULL; re->id.named = 1; length = windres_get_16 (wrbfd, ers, 2); re->id.u.n.length = length; re->id.u.n.name = (unichar *) res_alloc (length * sizeof (unichar)); for (j = 0; j < length; j++) { /* PR 17512: file: 05dc4a16. */ if (length < 0 || ers >= (bfd_byte *) ere || ers + j * 2 + 4 >= (bfd_byte *) ere) overrun (flaginfo, _("resource name")); re->id.u.n.name[j] = windres_get_16 (wrbfd, ers + j * 2 + 2, 2); } if (level == 0) type = &re->id; if ((rva & 0x80000000) != 0) { rva &=~ 0x80000000; if (rva >= (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("named subdirectory")); re->subdir = 1; re->u.dir = read_coff_res_dir (wrbfd, flaginfo->data + rva, flaginfo, type, level + 1); } else { if (rva >= (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("named resource")); re->subdir = 0; re->u.res = read_coff_data_entry (wrbfd, flaginfo->data + rva, flaginfo, type); } *pp = re; pp = &re->next; } for (i = 0; i < id_count; i++, ere++) { unsigned long name, rva; rc_res_entry *re; if ((const bfd_byte *) ere >= flaginfo->data_end) overrun (flaginfo, _("ID directory entry")); name = windres_get_32 (wrbfd, ere->name, 4); rva = windres_get_32 (wrbfd, ere->rva, 4); re = (rc_res_entry *) res_alloc (sizeof *re); re->next = NULL; re->id.named = 0; re->id.u.id = name; if (level == 0) type = &re->id; if ((rva & 0x80000000) != 0) { rva &=~ 0x80000000; if (rva >= (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("ID subdirectory")); re->subdir = 1; re->u.dir = read_coff_res_dir (wrbfd, flaginfo->data + rva, flaginfo, type, level + 1); } else { if (rva >= (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("ID resource")); re->subdir = 0; re->u.res = read_coff_data_entry (wrbfd, flaginfo->data + rva, flaginfo, type); } *pp = re; pp = &re->next; } return rd; } /* Read a resource data entry. */ static rc_res_resource * read_coff_data_entry (windres_bfd *wrbfd, const bfd_byte *data, const struct coff_file_info *flaginfo, const rc_res_id *type) { const struct extern_res_data *erd; rc_res_resource *r; rc_uint_type size, rva; const bfd_byte *resdata; if (type == NULL) fatal (_("resource type unknown")); if ((size_t) (flaginfo->data_end - data) < sizeof (struct extern_res_data)) overrun (flaginfo, _("data entry")); erd = (const struct extern_res_data *) data; size = windres_get_32 (wrbfd, erd->size, 4); rva = windres_get_32 (wrbfd, erd->rva, 4); if (rva < flaginfo->secaddr || rva - flaginfo->secaddr >= (rc_uint_type) (flaginfo->data_end - flaginfo->data)) overrun (flaginfo, _("resource data")); resdata = flaginfo->data + (rva - flaginfo->secaddr); if (size > (rc_uint_type) (flaginfo->data_end - resdata)) overrun (flaginfo, _("resource data size")); r = bin_to_res (wrbfd, *type, resdata, size); memset (&r->res_info, 0, sizeof (rc_res_res_info)); r->coff_info.codepage = windres_get_32 (wrbfd, erd->codepage, 4); r->coff_info.reserved = windres_get_32 (wrbfd, erd->reserved, 4); return r; } /* This structure is used to build a list of bindata structures. */ struct bindata_build { /* The data. */ bindata *d; /* The last structure we have added to the list. */ bindata *last; /* The size of the list as a whole. */ unsigned long length; }; struct coff_res_data_build { /* The data. */ coff_res_data *d; /* The last structure we have added to the list. */ coff_res_data *last; /* The size of the list as a whole. */ unsigned long length; }; /* This structure keeps track of information as we build the directory tree. */ struct coff_write_info { /* These fields are based on the BFD. */ /* The BFD itself. */ windres_bfd *wrbfd; /* Pointer to section symbol used to build RVA relocs. */ asymbol **sympp; /* These fields are computed initially, and then not changed. */ /* Length of directory tables and entries. */ unsigned long dirsize; /* Length of directory entry strings. */ unsigned long dirstrsize; /* Length of resource data entries. */ unsigned long dataentsize; /* These fields are updated as we add data. */ /* Directory tables and entries. */ struct bindata_build dirs; /* Directory entry strings. */ struct bindata_build dirstrs; /* Resource data entries. */ struct bindata_build dataents; /* Actual resource data. */ struct coff_res_data_build resources; /* Relocations. */ arelent **relocs; /* Number of relocations. */ unsigned int reloc_count; }; static void coff_bin_sizes (const rc_res_directory *, struct coff_write_info *); static bfd_byte *coff_alloc (struct bindata_build *, rc_uint_type); static void coff_to_bin (const rc_res_directory *, struct coff_write_info *); static void coff_res_to_bin (const rc_res_resource *, struct coff_write_info *); /* Write resources to a COFF file. RESOURCES should already be sorted. Right now we always create a new file. Someday we should also offer the ability to merge resources into an existing file. This would require doing the basic work of objcopy, just modifying or adding the .rsrc section. */ void write_coff_file (const char *filename, const char *target, const rc_res_directory *resources) { bfd *abfd; asection *sec; struct coff_write_info cwi; windres_bfd wrbfd; bindata *d; coff_res_data *rd; unsigned long length, offset; if (filename == NULL) fatal (_("filename required for COFF output")); abfd = bfd_openw (filename, target); if (abfd == NULL) bfd_fatal (filename); if (! bfd_set_format (abfd, bfd_object)) bfd_fatal ("bfd_set_format"); #if defined DLLTOOL_SH if (! bfd_set_arch_mach (abfd, bfd_arch_sh, 0)) bfd_fatal ("bfd_set_arch_mach(sh)"); #elif defined DLLTOOL_MIPS if (! bfd_set_arch_mach (abfd, bfd_arch_mips, 0)) bfd_fatal ("bfd_set_arch_mach(mips)"); #elif defined DLLTOOL_ARM if (! bfd_set_arch_mach (abfd, bfd_arch_arm, 0)) bfd_fatal ("bfd_set_arch_mach(arm)"); #else /* FIXME: This is obviously i386 specific. */ if (! bfd_set_arch_mach (abfd, bfd_arch_i386, 0)) bfd_fatal ("bfd_set_arch_mach(i386)"); #endif if (! bfd_set_file_flags (abfd, HAS_SYMS | HAS_RELOC)) bfd_fatal ("bfd_set_file_flags"); sec = bfd_make_section_with_flags (abfd, ".rsrc", (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_DATA)); if (sec == NULL) bfd_fatal ("bfd_make_section"); if (! bfd_set_symtab (abfd, sec->symbol_ptr_ptr, 1)) bfd_fatal ("bfd_set_symtab"); /* Requiring this is probably a bug in BFD. */ sec->output_section = sec; /* The order of data in the .rsrc section is resource directory tables and entries resource directory strings resource data entries actual resource data We build these different types of data in different lists. */ set_windres_bfd (&wrbfd, abfd, sec, WR_KIND_BFD); cwi.wrbfd = &wrbfd; cwi.sympp = sec->symbol_ptr_ptr; cwi.dirsize = 0; cwi.dirstrsize = 0; cwi.dataentsize = 0; cwi.dirs.d = NULL; cwi.dirs.last = NULL; cwi.dirs.length = 0; cwi.dirstrs.d = NULL; cwi.dirstrs.last = NULL; cwi.dirstrs.length = 0; cwi.dataents.d = NULL; cwi.dataents.last = NULL; cwi.dataents.length = 0; cwi.resources.d = NULL; cwi.resources.last = NULL; cwi.resources.length = 0; cwi.relocs = NULL; cwi.reloc_count = 0; /* Work out the sizes of the resource directory entries, so that we know the various offsets we will need. */ coff_bin_sizes (resources, &cwi); /* Force the directory strings to be 64 bit aligned. Every other structure is 64 bit aligned anyhow. */ cwi.dirstrsize = (cwi.dirstrsize + 7) & ~7; /* Actually convert the resources to binary. */ coff_to_bin (resources, &cwi); /* Add another few bytes to the directory strings if needed for alignment. */ if ((cwi.dirstrs.length & 7) != 0) { rc_uint_type pad = 8 - (cwi.dirstrs.length & 7); bfd_byte *ex; ex = coff_alloc (& cwi.dirstrs, pad); memset (ex, 0, pad); } /* Make sure that the data we built came out to the same size as we calculated initially. */ assert (cwi.dirs.length == cwi.dirsize); assert (cwi.dirstrs.length == cwi.dirstrsize); assert (cwi.dataents.length == cwi.dataentsize); length = (cwi.dirsize + cwi.dirstrsize + cwi.dataentsize + cwi.resources.length); if (! bfd_set_section_size (abfd, sec, length)) bfd_fatal ("bfd_set_section_size"); bfd_set_reloc (abfd, sec, cwi.relocs, cwi.reloc_count); offset = 0; for (d = cwi.dirs.d; d != NULL; d = d->next) { if (! bfd_set_section_contents (abfd, sec, d->data, offset, d->length)) bfd_fatal ("bfd_set_section_contents"); offset += d->length; } for (d = cwi.dirstrs.d; d != NULL; d = d->next) { set_windres_bfd_content (&wrbfd, d->data, offset, d->length); offset += d->length; } for (d = cwi.dataents.d; d != NULL; d = d->next) { set_windres_bfd_content (&wrbfd, d->data, offset, d->length); offset += d->length; } for (rd = cwi.resources.d; rd != NULL; rd = rd->next) { res_to_bin (cwi.wrbfd, (rc_uint_type) offset, rd->res); offset += rd->length; } assert (offset == length); if (! bfd_close (abfd)) bfd_fatal ("bfd_close"); /* We allocated the relocs array using malloc. */ free (cwi.relocs); } /* Work out the sizes of the various fixed size resource directory entries. This updates fields in CWI. */ static void coff_bin_sizes (const rc_res_directory *resdir, struct coff_write_info *cwi) { const rc_res_entry *re; cwi->dirsize += sizeof (struct extern_res_directory); for (re = resdir->entries; re != NULL; re = re->next) { cwi->dirsize += sizeof (struct extern_res_entry); if (re->id.named) cwi->dirstrsize += re->id.u.n.length * 2 + 2; if (re->subdir) coff_bin_sizes (re->u.dir, cwi); else cwi->dataentsize += sizeof (struct extern_res_data); } } /* Allocate data for a particular list. */ static bfd_byte * coff_alloc (struct bindata_build *bb, rc_uint_type size) { bindata *d; d = (bindata *) reswr_alloc (sizeof (bindata)); d->next = NULL; d->data = (bfd_byte *) reswr_alloc (size); d->length = size; if (bb->d == NULL) bb->d = d; else bb->last->next = d; bb->last = d; bb->length += size; return d->data; } /* Convert the resource directory RESDIR to binary. */ static void coff_to_bin (const rc_res_directory *resdir, struct coff_write_info *cwi) { struct extern_res_directory *erd; int ci, cn; const rc_res_entry *e; struct extern_res_entry *ere; /* Write out the directory table. */ erd = ((struct extern_res_directory *) coff_alloc (&cwi->dirs, sizeof (*erd))); windres_put_32 (cwi->wrbfd, erd->characteristics, resdir->characteristics); windres_put_32 (cwi->wrbfd, erd->time, resdir->time); windres_put_16 (cwi->wrbfd, erd->major, resdir->major); windres_put_16 (cwi->wrbfd, erd->minor, resdir->minor); ci = 0; cn = 0; for (e = resdir->entries; e != NULL; e = e->next) { if (e->id.named) ++cn; else ++ci; } windres_put_16 (cwi->wrbfd, erd->name_count, cn); windres_put_16 (cwi->wrbfd, erd->id_count, ci); /* Write out the data entries. Note that we allocate space for all the entries before writing them out. That permits a recursive call to work correctly when writing out subdirectories. */ ere = ((struct extern_res_entry *) coff_alloc (&cwi->dirs, (ci + cn) * sizeof (*ere))); for (e = resdir->entries; e != NULL; e = e->next, ere++) { if (! e->id.named) windres_put_32 (cwi->wrbfd, ere->name, e->id.u.id); else { bfd_byte *str; rc_uint_type i; /* For some reason existing files seem to have the high bit set on the address of the name, although that is not documented. */ windres_put_32 (cwi->wrbfd, ere->name, 0x80000000 | (cwi->dirsize + cwi->dirstrs.length)); str = coff_alloc (&cwi->dirstrs, e->id.u.n.length * 2 + 2); windres_put_16 (cwi->wrbfd, str, e->id.u.n.length); for (i = 0; i < e->id.u.n.length; i++) windres_put_16 (cwi->wrbfd, str + (i + 1) * sizeof (unichar), e->id.u.n.name[i]); } if (e->subdir) { windres_put_32 (cwi->wrbfd, ere->rva, 0x80000000 | cwi->dirs.length); coff_to_bin (e->u.dir, cwi); } else { windres_put_32 (cwi->wrbfd, ere->rva, cwi->dirsize + cwi->dirstrsize + cwi->dataents.length); coff_res_to_bin (e->u.res, cwi); } } } /* Convert the resource RES to binary. */ static void coff_res_to_bin (const rc_res_resource *res, struct coff_write_info *cwi) { arelent *r; struct extern_res_data *erd; coff_res_data *d; /* For some reason, although every other address is a section offset, the address of the resource data itself is an RVA. That means that we need to generate a relocation for it. We allocate the relocs array using malloc so that we can use realloc. FIXME: This relocation handling is correct for the i386, but probably not for any other target. */ r = (arelent *) reswr_alloc (sizeof (arelent)); r->sym_ptr_ptr = cwi->sympp; r->address = cwi->dirsize + cwi->dirstrsize + cwi->dataents.length; r->addend = 0; r->howto = bfd_reloc_type_lookup (WR_BFD (cwi->wrbfd), BFD_RELOC_RVA); if (r->howto == NULL) bfd_fatal (_("can't get BFD_RELOC_RVA relocation type")); cwi->relocs = xrealloc (cwi->relocs, (cwi->reloc_count + 2) * sizeof (arelent *)); cwi->relocs[cwi->reloc_count] = r; cwi->relocs[cwi->reloc_count + 1] = NULL; ++cwi->reloc_count; erd = (struct extern_res_data *) coff_alloc (&cwi->dataents, sizeof (*erd)); windres_put_32 (cwi->wrbfd, erd->rva, (cwi->dirsize + cwi->dirstrsize + cwi->dataentsize + cwi->resources.length)); windres_put_32 (cwi->wrbfd, erd->codepage, res->coff_info.codepage); windres_put_32 (cwi->wrbfd, erd->reserved, res->coff_info.reserved); d = (coff_res_data *) reswr_alloc (sizeof (coff_res_data)); d->length = res_to_bin (NULL, (rc_uint_type) 0, res); d->res = res; d->next = NULL; if (cwi->resources.d == NULL) cwi->resources.d = d; else cwi->resources.last->next = d; cwi->resources.last = d; cwi->resources.length += (d->length + 7) & ~7; windres_put_32 (cwi->wrbfd, erd->size, d->length); /* Force the next resource to have 64 bit alignment. */ d->length = (d->length + 7) & ~7; }
gpl-2.0
DirtyUnicorns/android_kernel_samsung_espresso10
drivers/media/radio/si470x/radio-si470x-dev.c
21
25970
/* * Copyright (C) 2011 Samsung Electronics Co, Ltd. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/ioctl.h> #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/poll.h> #include <linux/wakelock.h> #ifdef CONFIG_OMAP_PM #include <plat/omap-pm.h> static struct pm_qos_request_list pm_qos_dpll_handle; #endif #include "radio-si470x.h" #include "radio-si470x-dev.h" #define SI4709_DRIVER_NAME "fmradio" #define RDS_TIMEOUT 1000 struct wake_lock fm_prevent_suspend_lock; static int si470x_dev_powerup(struct si470x_device *radio) { int ret; wake_lock_init(&fm_prevent_suspend_lock, WAKE_LOCK_SUSPEND, "fm_prevent_suspend"); wake_lock(&fm_prevent_suspend_lock); /* Resetting the device */ enable_irq(radio->si470x_irq); radio->pdata->reset_gpio_on(0); msleep(20); radio->pdata->reset_gpio_on(1); radio->registers[POWERCFG] = POWERCFG_DMUTE | POWERCFG_ENABLE; radio->registers[POWERCFG] &= ~POWERCFG_DISABLE; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) { pr_err("(%s):err while setting powercfg reg\n", __func__); ret = -1; goto err; } else { /*Si4709/09 datasheet: Table 7 */ msleep(110); } radio->registers[POWERCFG] |= (0x01 << 11) & POWERCFG_RDSM; radio->registers[POWERCFG] &= ~POWERCFG_SKMODE; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) { pr_err("(%s):err while setting powercfg reg\n", __func__); ret = -1; goto err; } radio->registers[SYSCONFIG1] = (((0x01 << 11) & SYSCONFIG1_DE) | ((0x01 << 14) & SYSCONFIG1_STCIEN) | ((0x01 << 12) & SYSCONFIG1_RDS) | (0x01 << 2)); radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_RDSIEN; ret = si470x_set_register(radio, SYSCONFIG1); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconfig1 reg\n", __func__); ret = -1; goto err; } radio->registers[SYSCONFIG2] |= ((0x01 << 4) & SYSCONFIG2_SPACE_100KHZ) | (0x0F & SYSCONFIG2_VOLUME) | (0x9 << 8); ret = si470x_set_register(radio, SYSCONFIG2); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconfig2 reg\n", __func__); ret = -1; goto err; } radio->registers[SYSCONFIG3] |= ((0x04 << 4) & SYSCONFIG3_SKSNR_MIN4) |0x04; ret = si470x_set_register(radio, SYSCONFIG3); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf3 reg\n", __func__); ret = -1; goto err; } return 0; err: wake_unlock(&fm_prevent_suspend_lock); wake_lock_destroy(&fm_prevent_suspend_lock); return ret; } static int si470x_dev_powerdown(struct si470x_device *radio) { int ret; msleep(500); /* To avoid turn off pop noise */ radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_RDS; radio->registers[SYSCONFIG1] |= (0x01 << 3) & SYSCONFIG1_GPO_LOW; ret = si470x_set_register(radio, SYSCONFIG1); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf1 reg\n", __func__); ret = -1; goto err; } radio->registers[POWERCFG] |= (POWERCFG_DISABLE | POWERCFG_ENABLE); radio->registers[POWERCFG] &= ~POWERCFG_DMUTE; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) { pr_err("(%s):err while setting powercfg reg\n", __func__); ret = -1; goto err; } /* Resetting the device */ radio->pdata->reset_gpio_on(0); radio->pdata->reset_gpio_on(1); radio->pdata->reset_gpio_on(0); disable_irq(radio->si470x_irq); wake_unlock(&fm_prevent_suspend_lock); wake_lock_destroy(&fm_prevent_suspend_lock); return 0; err: wake_unlock(&fm_prevent_suspend_lock); wake_lock_destroy(&fm_prevent_suspend_lock); return ret; } static int si470x_dev_band_set(struct si470x_device *radio, int band) { int ret; switch (band) { case BAND_87500_108000_kHz: radio->registers[SYSCONFIG2] &= ~SYSCONFIG2_BAND_875MHZ; break; case BAND_76000_108000_kHz: radio->registers[SYSCONFIG2] |= SYSCONFIG2_BAND_76MHZ; break; case BAND_76000_90000_kHz: radio->registers[SYSCONFIG2] |= SYSCONFIG2_BAND; break; default: ret = -1; goto err; } ret = si470x_set_register(radio, SYSCONFIG2); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf2 reg\n", __func__); ret = -1; goto err; } return 0; err: return ret; } static int si470x_dev_ch_spacing_set(struct si470x_device *radio, int ch_spacing) { int ret; switch (ch_spacing) { case CHAN_SPACING_200_kHz: radio->registers[SYSCONFIG2] &= ~SYSCONFIG2_SPACE; break; case CHAN_SPACING_100_kHz: radio->registers[SYSCONFIG2] |= SYSCONFIG2_SPACE_100KHZ; break; case CHAN_SPACING_50_kHz: radio->registers[SYSCONFIG2] |= SYSCONFIG2_SPACE_50KHZ; break; default: ret = -1; goto err; } ret = si470x_set_register(radio, SYSCONFIG2); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf2 reg\n", __func__); ret = -1; goto err; } return 0; err: return ret; } static int si470x_dev_chan_select(struct si470x_device *radio, u32 frequency) { int ret; ret = si470x_set_freq(radio, frequency); if (unlikely(ret < 0)) { pr_err("(%s):err while setting the freq\n", __func__); ret = -1; } return ret; } static int si470x_dev_rssi_seek_th_set(struct si470x_device *radio, u8 seek_th) { int ret; radio->registers[SYSCONFIG2] &= SYSCONFIG2_BAND_SPA_VOL; radio->registers[SYSCONFIG2] |= (seek_th << 8) & SYSCONFIG2_SEEKTH; ret = si470x_set_register(radio, SYSCONFIG2); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf2 reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_seek_snr_th_set(struct si470x_device *radio, u8 seek_snr) { int ret; radio->registers[SYSCONFIG3] &= SYSCONFIG3_SKNR_CLR; radio->registers[SYSCONFIG3] |= (seek_snr << 4) & SYSCONFIG3_SKSNR; ret = si470x_set_register(radio, SYSCONFIG3); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf2 reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_seek_fm_id_th_set(struct si470x_device *radio, u8 seek_fm_id_th) { int ret; radio->registers[SYSCONFIG3] &= SYSCONFIG3_SKCNT_CLR; radio->registers[SYSCONFIG3] |= seek_fm_id_th & SYSCONFIG3_SKCNT; ret = si470x_set_register(radio, SYSCONFIG3); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf2 reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_de_set(struct si470x_device *radio, u8 de_tc) { int ret; switch (de_tc) { case DE_TIME_CONSTANT_50: radio->registers[SYSCONFIG1] |= SYSCONFIG1_DE; break; case DE_TIME_CONSTANT_75: radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_DE; break; default: ret = -1; goto err; } ret = si470x_set_register(radio, SYSCONFIG1); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf1 reg\n", __func__); ret = -1; goto err; } return 0; err: return ret; } static int si470x_dev_volext_enb(struct si470x_device *radio) { int ret; radio->registers[SYSCONFIG3] |= SYSCONFIG3_VOLEXT_EN; ret = si470x_set_register(radio, SYSCONFIG3); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf3 reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_volext_disb(struct si470x_device *radio) { int ret; radio->registers[SYSCONFIG3] &= ~SYSCONFIG3_VOLEXT_EN; ret = si470x_set_register(radio, SYSCONFIG3); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf3 reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_volume_set(struct si470x_device *radio, u8 volume) { int ret; radio->registers[SYSCONFIG2] &= SYSCONFIG2_VOLUME_CLR; radio->registers[SYSCONFIG2] |= (volume & SYSCONFIG2_VOLUME); ret = si470x_set_register(radio, SYSCONFIG2); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconf2 reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_volume_get(struct si470x_device *radio, u8 *volume) { int ret; ret = si470x_get_register(radio, SYSCONFIG2); if (unlikely(ret < 0)) { pr_err("(%s):err while getting sysconfig2 reg\n", __func__); ret = -1; } if (radio->registers[SYSCONFIG2] & SYSCONFIG2_VOLUME) *volume = radio->registers[SYSCONFIG2] & SYSCONFIG2_VOLUME; return ret; } static int si470x_dev_dsmute_on(struct si470x_device *radio) { int ret; radio->registers[POWERCFG] &= ~POWERCFG_DSMUTE; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) { pr_err("(%s):err while setting powercfg reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_dsmute_off(struct si470x_device *radio) { int ret; radio->registers[POWERCFG] |= POWERCFG_DSMUTE; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) { pr_err("(%s):err while setting powercfg reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_mute_on(struct si470x_device *radio) { int ret; radio->registers[POWERCFG] &= ~POWERCFG_DMUTE; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) { pr_err("(%s):err while setting powercfg reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_mute_off(struct si470x_device *radio) { int ret; radio->registers[POWERCFG] |= POWERCFG_DMUTE; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) { pr_err("(%s):err while setting powercfg reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_mono_set(struct si470x_device *radio) { int ret; radio->registers[POWERCFG] |= POWERCFG_MONO; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) { pr_err("(%s):err while setting powercfg reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_stereo_set(struct si470x_device *radio) { int ret; radio->registers[POWERCFG] &= ~POWERCFG_MONO; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) { pr_err("(%s):err while setting powercfg reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_rds_enable(struct si470x_device *radio) { int ret; radio->registers[POWERCFG] |= (0x00 << 11) & POWERCFG_RDSM; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) { pr_err("(%s):err while setting RDS mode\n", __func__); ret = -1; goto err; } radio->registers[SYSCONFIG1] |= ((0x01 << 12) & SYSCONFIG1_RDS) | ((0x01 << 15) & SYSCONFIG1_RDSIEN) | (0x01 << 2); ret = si470x_set_register(radio, SYSCONFIG1); if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconfig1 reg\n", __func__); ret = -1; goto err; } err: return ret; } static int si470x_dev_rds_disable(struct si470x_device *radio) { int ret = 0; radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_RDS; ret = si470x_set_register(radio, SYSCONFIG1) ; if (unlikely(ret < 0)) { pr_err("(%s):err while setting sysconfig1 reg\n", __func__); ret = -1; } return ret; } static int si470x_dev_get_freq(struct si470x_device *radio, unsigned int *freq) { return si470x_get_freq(radio, freq); } static int si470x_dev_set_seek(struct si470x_device *radio, unsigned int wrap_around, unsigned int seek_upward, u32 *frequency) { int ret; unsigned long timeout; bool timed_out = 0; unsigned int seek_timeout = 5000; #ifdef CONFIG_OMAP_PM static bool pm_qos_request_added; #endif #ifdef CONFIG_OMAP_PM if (!pm_qos_request_added) { pm_qos_request_added = true; pm_qos_add_request(&pm_qos_dpll_handle, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); } pm_qos_update_request(&pm_qos_dpll_handle, 7); #endif /* start seeking */ radio->registers[POWERCFG] |= POWERCFG_SEEK; if (wrap_around == 1) radio->registers[POWERCFG] &= ~POWERCFG_SKMODE; else radio->registers[POWERCFG] |= POWERCFG_SKMODE; if (seek_upward == 1) radio->registers[POWERCFG] |= POWERCFG_SEEKUP; else radio->registers[POWERCFG] &= ~POWERCFG_SEEKUP; ret = si470x_set_register(radio, POWERCFG); if (unlikely(ret < 0)) goto done; /* currently I2C driver only uses interrupt way to seek */ if (radio->stci_enabled) { INIT_COMPLETION(radio->completion); /* wait till seek operation has completed */ ret = wait_for_completion_timeout(&radio->completion, msecs_to_jiffies(seek_timeout)); if (unlikely(!ret)) timed_out = true; } else { /* wait till seek operation has completed */ timeout = jiffies + msecs_to_jiffies(seek_timeout); do { ret = si470x_get_register(radio, STATUSRSSI); if (unlikely(ret < 0)) goto stop; timed_out = time_after(jiffies, timeout); } while (((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) && (!timed_out)); } ret = si470x_dev_get_freq(radio, frequency); if (unlikely(ret < 0)) { pr_err("(%s):err while getting freq\n", __func__); ret = -1; } if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) pr_warn("seek doesnt complete\n"); if (radio->registers[STATUSRSSI] & STATUSRSSI_SF) pr_warn("seek failed/ band limit reached\n"); if (timed_out) pr_warn("seek timed out\n"); stop: /*stop seeking*/ radio->registers[POWERCFG] &= ~POWERCFG_SEEK; ret = si470x_set_register(radio, POWERCFG); #ifdef CONFIG_OMAP_PM pm_qos_update_request(&pm_qos_dpll_handle, -1); #endif done: if ((ret == 0) && timed_out) ret = -EAGAIN; return ret; } static int si470x_dev_chan_get(struct si470x_device *radio, unsigned int *frequency) { int ret; ret = si470x_dev_get_freq(radio, frequency); if (unlikely(ret < 0)) { pr_err("(%s):err while getting the freq\n", __func__); ret = -1; } return ret; } int si470x_dev_seek_full(struct si470x_device *radio, u32 *frequency) { int ret; ret = si470x_dev_set_seek(radio, 0, 1, frequency); if (unlikely(ret < 0)) { pr_err("(%s):err while setting the seekfull\n", __func__); ret = -1; } return ret; } static int si470x_dev_seek_up(struct si470x_device *radio, u32 *frequency) { int ret; ret = si470x_dev_set_seek(radio, 1, 1, frequency); if (unlikely(ret < 0)) { pr_err("(%s):err while setting the seekup\n", __func__); ret = -1; } return ret; } static int si470x_dev_seek_down(struct si470x_device *radio, u32 *frequency) { int ret; ret = si470x_dev_set_seek(radio, 1, 0, frequency); if (unlikely(ret < 0)) { pr_err("(%s):err while setting seek down\n", __func__); ret = -1; } return ret; } static int si470x_dev_rssi_get(struct si470x_device *radio, u32 *rssi) { int ret; ret = si470x_get_register(radio, SYSCONFIG2); if (unlikely(ret < 0)) { pr_err("(%s):err while getting the sysconfig2\n", __func__); ret = -1; } if ((radio->registers[SYSCONFIG2] & SYSCONFIG2_SEEKTH) >> 8) *rssi = ((radio->registers[SYSCONFIG2] & SYSCONFIG2_SEEKTH) >> 8); return ret; } static int si470x_dev_sksnr_get(struct si470x_device *radio, u32 *sksnr) { int ret; ret = si470x_get_register(radio, SYSCONFIG3); if (unlikely(ret < 0)) { pr_err("(%s):err while getting the sysconfig2\n", __func__); ret = -1; } if ((radio->registers[SYSCONFIG3] & SYSCONFIG3_SKSNR) >> 4) *sksnr = ((radio->registers[SYSCONFIG3] & SYSCONFIG3_SKSNR) >> 4); return ret; } static int si470x_dev_skcnt_get(struct si470x_device *radio, u32 *skcnt) { int ret; ret = si470x_get_register(radio, SYSCONFIG3); if (unlikely(ret < 0)) { pr_err("(%s):err while getting the sysconfig2\n", __func__); ret = -1; } if (radio->registers[SYSCONFIG3] & SYSCONFIG3_SKCNT) *skcnt = radio->registers[SYSCONFIG3] & SYSCONFIG3_SKCNT; return ret; } static int si470x_dev_afcrl_get(struct si470x_device *radio, u8 *afcrl) { int ret; ret = si470x_get_register(radio, STATUSRSSI); if (unlikely(ret < 0)) { pr_err("(%s):err while getting the statusrssi\n", __func__); ret = -1; } if (radio->registers[STATUSRSSI] & STATUSRSSI_AFCRL) *afcrl = 1; else *afcrl = 0; return ret; } static int si470x_dev_cur_rssi_get(struct si470x_device *radio, u32 *cur_rssi) { int ret; ret = si470x_get_register(radio, STATUSRSSI); if (unlikely(ret < 0)) { pr_err("(%s):err while getting the statusrssi\n", __func__); ret = -1; } if (radio->registers[STATUSRSSI] & STATUSRSSI_RSSI) *cur_rssi = (radio->registers[STATUSRSSI] & STATUSRSSI_RSSI); return ret; } static int si470x_dev_rds_get(struct si470x_device *radio, struct radio_data *data) { int i, ret = 0; mutex_lock(&radio->lock); while (radio->wr_index == radio->rd_index) { if (wait_event_interruptible_timeout(radio->read_queue, radio->wr_index != radio->rd_index, msecs_to_jiffies(RDS_TIMEOUT) ) <= 0) { ret = -EINTR; goto done; } } i = 0; data->rdsa = radio->rds_data_buff[i++ + 4 * radio->rd_index]; data->rdsb = radio->rds_data_buff[i++ + 4 * radio->rd_index]; data->rdsc = radio->rds_data_buff[i++ + 4 * radio->rd_index]; data->rdsd = radio->rds_data_buff[i++ + 4 * radio->rd_index]; memset(&radio->rds_data_buff[0 + 4 * radio->rd_index], 0, 8); radio->rd_index++; if (radio->rd_index >= RDS_BUF_LEN) radio->rd_index = 0; done: mutex_unlock(&radio->lock); return ret; } /* file operations */ static int si470x_dev_open(struct inode *inode, struct file *filp) { return nonseekable_open(inode, filp); } static ssize_t si470x_dev_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { int ret; unsigned int block_count; struct si470x_device *radio = container_of(filp->private_data, struct si470x_device, miscdev); /* switch on rds reception */ mutex_lock(&radio->lock); if (!(radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS)) { mutex_unlock(&radio->lock); return -1; } /* block if no new data available */ while (radio->wr_index == radio->rd_index) { if (filp->f_flags & O_NONBLOCK) { ret = -EWOULDBLOCK; goto done; } if (wait_event_interruptible(radio->read_queue, radio->wr_index != radio->rd_index) < 0) { ret = -EINTR; goto done; } } /* calculate block count from byte count */ count /= 3; /* copy RDS block out of internal buffer and to user buffer */ while (block_count < count) { if (radio->rd_index == radio->wr_index) break; /* always transfer rds complete blocks */ if (copy_to_user(buf, &radio->buffer[radio->rd_index], 3)) break; /* increment and wrap read pointer */ radio->rd_index = 3; if (radio->rd_index >= radio->buf_size) radio->rd_index = 0; /* increment counters */ block_count++; buf += 3; ret += 3; } done: mutex_unlock(&radio->lock); return ret; } static unsigned int si470x_dev_poll(struct file *filp, struct poll_table_struct *pts) { struct si470x_device *radio = container_of(filp->private_data, struct si470x_device, miscdev); int retval; /* switch on rds reception */ mutex_lock(&radio->lock); if (!(radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS)) { mutex_unlock(&radio->lock); return -1; } mutex_unlock(&radio->lock); poll_wait(filp, &radio->read_queue, pts); if (radio->rd_index != radio->wr_index) retval = POLLIN | POLLRDNORM; return retval; } static int si470x_dev_release(struct inode *inode, struct file *filp) { return 0; } #define m_si470x_dev_ioctl_operate(_radio, _ret, _fn) \ do { \ _ret = _fn(_radio); \ if (unlikely(_ret < 0)) \ pr_warn("(%s): operation (%s) failed\n", \ __func__, #_fn); \ } while (0) #define m_si470x_dev_ioctl_copy_from_user(_radio, _ret, _argp, _fn, _var)\ do { \ if (copy_from_user((void *)&_var, _argp, sizeof(_var))) { \ _ret = -EFAULT; \ } else { \ _ret = _fn(_radio, _var); \ if (unlikely(_ret < 0)) \ pr_warn("(%s): operation (%s) failed\n", \ __func__, #_fn); \ } \ } while (0) #define m_si470x_dev_ioctl_copy_to_user(_radio, _ret, _argp, _fn, _var) \ do { \ _ret = _fn(_radio, &_var); \ if (unlikely(_ret < 0)) \ pr_warn("(%s): operation (%s) failed\n", \ __func__, #_fn); \ else if (copy_to_user(_argp, (void *)&_var, sizeof(_var))) \ _ret = -EFAULT; \ } while (0) static long si470x_dev_ioctl(struct file *filp, unsigned int ioctl_cmd, unsigned long arg) { long ret; void __user *argp = (void __user *)arg; struct si470x_device *radio = container_of(filp->private_data, struct si470x_device, miscdev); s32 buf_s32; u32 buf_u32; u8 buf_u8; struct radio_data rds; if (_IOC_TYPE(ioctl_cmd) != SI470X_IOC_MAGIC) { pr_err("(%s): nappropriate ioctl 1 0x%x\n", __func__, ioctl_cmd); return -ENOTTY; } if (_IOC_NR(ioctl_cmd) > SI470X_IOC_NR_MAX) { pr_err("(%s): nappropriate ioctl 2 0x%x\n", __func__, ioctl_cmd); return -ENOTTY; } pr_debug("(%s): valid ioctl 0x%x\n", __func__, ioctl_cmd); switch (ioctl_cmd) { case SI470X_IOC_POWERUP: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_powerup); break; case SI470X_IOC_POWERDOWN: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_powerdown); break; case SI470X_IOC_BAND_SET: m_si470x_dev_ioctl_copy_from_user(radio, ret, argp, si470x_dev_band_set, buf_s32); break; case SI470X_IOC_CHAN_SPACING_SET: m_si470x_dev_ioctl_copy_from_user(radio, ret, argp, si470x_dev_ch_spacing_set, buf_s32); break; case SI470X_IOC_CHAN_SELECT: m_si470x_dev_ioctl_copy_from_user(radio, ret, argp, si470x_dev_chan_select, buf_u32); break; case SI470X_IOC_CHAN_GET: m_si470x_dev_ioctl_copy_to_user(radio, ret, argp, si470x_dev_chan_get, buf_u32); break; case SI470X_IOC_SEEK_FULL: m_si470x_dev_ioctl_copy_to_user(radio, ret, argp, si470x_dev_seek_full, buf_u32); break; case SI470X_IOC_SEEK_UP: m_si470x_dev_ioctl_copy_to_user(radio, ret, argp, si470x_dev_seek_up, buf_u32); break; case SI470X_IOC_SEEK_DOWN: m_si470x_dev_ioctl_copy_to_user(radio, ret, argp, si470x_dev_seek_down, buf_u32); break; case SI470X_IOC_RSSI_SEEK_TH_SET: m_si470x_dev_ioctl_copy_from_user(radio, ret, argp, si470x_dev_rssi_seek_th_set, buf_u8); break; case SI470X_IOC_SEEK_SNR_SET: m_si470x_dev_ioctl_copy_from_user(radio, ret, argp, si470x_dev_seek_snr_th_set, buf_u8); break; case SI470X_IOC_SEEK_CNT_SET: m_si470x_dev_ioctl_copy_from_user(radio, ret, argp, si470x_dev_seek_fm_id_th_set, buf_u8); break; case SI470X_IOC_VOLEXT_ENB: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_volext_enb); break; case SI470X_IOC_VOLEXT_DISB: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_volext_disb); break; case SI470X_IOC_VOLUME_SET: m_si470x_dev_ioctl_copy_from_user(radio, ret, argp, si470x_dev_volume_set, buf_u8); break; case SI470X_IOC_VOLUME_GET: m_si470x_dev_ioctl_copy_to_user(radio, ret, argp, si470x_dev_volume_get, buf_u8); break; case SI470X_IOC_DSMUTE_ON: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_dsmute_on); break; case SI470X_IOC_DSMUTE_OFF: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_dsmute_off); break; case SI470X_IOC_MUTE_ON: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_mute_on); break; case SI470X_IOC_MUTE_OFF: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_mute_off); break; case SI470X_IOC_MONO_SET: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_mono_set); break; case SI470X_IOC_STEREO_SET: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_stereo_set); break; case SI470X_IOC_RDS_ENABLE: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_rds_enable); break; case SI470X_IOC_RDS_DISABLE: m_si470x_dev_ioctl_operate(radio, ret, si470x_dev_rds_disable); break; case SI470X_IOC_RSSI_GET: m_si470x_dev_ioctl_copy_to_user(radio, ret, argp, si470x_dev_rssi_get, buf_u32); break; case SI470X_IOC_SKSNR_GET: m_si470x_dev_ioctl_copy_to_user(radio, ret, argp, si470x_dev_sksnr_get, buf_u32); break; case SI470X_IOC_SKCNT_GET: m_si470x_dev_ioctl_copy_to_user(radio, ret, argp, si470x_dev_skcnt_get, buf_u32); break; case SI470X_IOC_AFCRL_GET: m_si470x_dev_ioctl_copy_to_user(radio, ret, argp, si470x_dev_afcrl_get, buf_u8); break; case SI470X_IOC_STATUS_RSSI_GET: m_si470x_dev_ioctl_copy_to_user(radio, ret, argp, si470x_dev_cur_rssi_get, buf_u32); break; case SI470X_IOC_RDS_GET: { memset(&rds, 0, sizeof(struct radio_data)); ret = si470x_dev_rds_get(radio, &rds); if (unlikely(ret < 0)) pr_debug("(%s):operation (%s) failed\n", __func__, "si470x_dev_rds_get"); if (copy_to_user(argp, (void *)&rds, sizeof(struct radio_data))) ret = -EFAULT; } break; case SI470X_IOC_DE_SET: ret = si470x_dev_de_set(radio, buf_u32); if (unlikely(ret)) pr_debug("(%s): operation (%s) failed\n", __func__, "si470x_dev_de_set"); break; } return ret; } const struct file_operations si470x_dev_fops = { .owner = THIS_MODULE, .open = si470x_dev_open, .read = si470x_dev_read, .unlocked_ioctl = si470x_dev_ioctl, .poll = si470x_dev_poll, .release = si470x_dev_release, }; int si470x_dev_make_node(struct si470x_device *radio, struct i2c_client *client) { int ret; radio->miscdev.minor = MISC_DYNAMIC_MINOR; radio->miscdev.name = SI4709_DRIVER_NAME; radio->miscdev.fops = &si470x_dev_fops; radio->miscdev.parent = &client->dev; ret = misc_register(&radio->miscdev); if (unlikely(ret < 0)) { pr_err("(%s): misc register failed\n", __func__); return ret; } radio->pdata = client->dev.platform_data; radio->pdata->reset_gpio_on(0); msleep(20); radio->pdata->reset_gpio_on(1); radio->si470x_irq = client->irq; return 0; } int si470x_dev_rdsbuff_init(struct si470x_device *radio) { radio->rds_data_buff = kzalloc(RDS_BUF_LEN * 8, GFP_KERNEL); if (!radio->rds_data_buff) { pr_err("(%s):Not sufficient memory\n", __func__); return -ENOMEM; } return 0; }
gpl-2.0
arkzabor/testrepo
src/server/scripts/Commands/cs_lfg.cpp
21
4675
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "Chat.h" #include "Language.h" #include "LFGMgr.h" #include "Group.h" #include "Player.h" void GetPlayerInfo(ChatHandler* handler, Player* player) { if (!player) return; uint64 guid = player->GetGUID(); LfgDungeonSet dungeons = sLFGMgr->GetSelectedDungeons(guid); std::string const& state = sLFGMgr->GetStateString(sLFGMgr->GetState(guid)); handler->PSendSysMessage(LANG_LFG_PLAYER_INFO, player->GetName().c_str(), state.c_str(), uint8(dungeons.size()), sLFGMgr->ConcatenateDungeons(dungeons).c_str(), sLFGMgr->GetRolesString(sLFGMgr->GetRoles(guid)).c_str(), sLFGMgr->GetComment(guid).c_str()); } class lfg_commandscript : public CommandScript { public: lfg_commandscript() : CommandScript("lfg_commandscript") { } ChatCommand* GetCommands() const { static ChatCommand lfgCommandTable[] = { { "player", SEC_GAMEMASTER, false, &HandleLfgPlayerInfoCommand, "", NULL }, { "group", SEC_GAMEMASTER, false, &HandleLfgGroupInfoCommand, "", NULL }, { "queue", SEC_GAMEMASTER, false, &HandleLfgQueueInfoCommand, "", NULL }, { "clean", SEC_ADMINISTRATOR, false, &HandleLfgCleanCommand, "", NULL }, { "options", SEC_ADMINISTRATOR, false, &HandleLfgOptionsCommand, "", NULL }, { NULL, SEC_PLAYER, false, NULL, "", NULL } }; static ChatCommand commandTable[] = { { "lfg", SEC_GAMEMASTER, false, NULL, "", lfgCommandTable }, { NULL, SEC_PLAYER, false, NULL, "", NULL } }; return commandTable; } static bool HandleLfgPlayerInfoCommand(ChatHandler* handler, char const* args) { Player* target = NULL; std::string playerName; if (!handler->extractPlayerTarget((char*)args, &target, NULL, &playerName)) return false; GetPlayerInfo(handler, target); return true; } static bool HandleLfgGroupInfoCommand(ChatHandler* handler, char const* args) { Player* target = NULL; std::string playerName; if (!handler->extractPlayerTarget((char*)args, &target, NULL, &playerName)) return false; Group* grp = target->GetGroup(); if (!grp) { handler->PSendSysMessage(LANG_LFG_NOT_IN_GROUP, playerName.c_str()); return true; } uint64 guid = grp->GetGUID(); std::string const& state = sLFGMgr->GetStateString(sLFGMgr->GetState(guid)); handler->PSendSysMessage(LANG_LFG_GROUP_INFO, grp->isLFGGroup(), state.c_str(), sLFGMgr->GetDungeon(guid)); for (GroupReference* itr = grp->GetFirstMember(); itr != NULL; itr = itr->next()) GetPlayerInfo(handler, itr->getSource()); return true; } static bool HandleLfgOptionsCommand(ChatHandler* handler, char const* args) { int32 options = -1; if (char* str = strtok((char*)args, " ")) { int32 tmp = atoi(str); if (tmp > -1) options = tmp; } if (options != -1) { sLFGMgr->SetOptions(options); handler->PSendSysMessage(LANG_LFG_OPTIONS_CHANGED); } handler->PSendSysMessage(LANG_LFG_OPTIONS, sLFGMgr->GetOptions()); return true; } static bool HandleLfgQueueInfoCommand(ChatHandler* handler, char const* args) { handler->SendSysMessage(sLFGMgr->DumpQueueInfo(*args).c_str()); return true; } static bool HandleLfgCleanCommand(ChatHandler* handler, char const* /*args*/) { handler->PSendSysMessage(LANG_LFG_CLEAN); sLFGMgr->Clean(); return true; } }; void AddSC_lfg_commandscript() { new lfg_commandscript(); }
gpl-2.0
sandsmark/vlc-kio
modules/gui/qt4/dialogs/openurl.cpp
21
4209
/***************************************************************************** * openurl.cpp: Open a MRL or clipboard content ***************************************************************************** * Copyright © 2009 the VideoLAN team * $Id$ * * Authors: Jean-Philippe André <jpeg@videolan.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "dialogs/openurl.hpp" #include "util/searchlineedit.hpp" #include <QPushButton> #include <QDialogButtonBox> #include <QApplication> #include <QClipboard> #include <QMimeData> #include <QList> #include <QUrl> #include <QFile> #include <QLabel> #include <assert.h> OpenUrlDialog::OpenUrlDialog( intf_thread_t *_p_intf, bool _bClipboard ) : QVLCDialog( (QWidget*)_p_intf->p_sys->p_mi, _p_intf ), bClipboard( _bClipboard ) { setWindowTitle( qtr( "Open URL" ) ); setWindowRole( "vlc-open-url" ); /* Buttons */ QPushButton *but; QDialogButtonBox *box = new QDialogButtonBox( this ); but = box->addButton( qtr( "&Play" ), QDialogButtonBox::AcceptRole ); CONNECT( but, clicked(), this, play() ); but = box->addButton( qtr( "&Enqueue" ), QDialogButtonBox::AcceptRole ); CONNECT( but, clicked(), this, enqueue() ); but = box->addButton( qtr( "&Cancel" ) , QDialogButtonBox::RejectRole ); CONNECT( box, rejected(), this, reject() ); /* Info label and line edit */ edit = new ClickLineEdit( qtr( "Enter URL here..." ), this ); QLabel *info = new QLabel( qtr( "Please enter the URL or path " "to the media you want to play."), this ); setToolTip( qtr( "If your clipboard contains a valid URL\n" "or the path to a file on your computer,\n" "it will be automatically selected." ) ); /* Layout */ QVBoxLayout *vlay = new QVBoxLayout( this ); vlay->addWidget( info ); vlay->addWidget( edit ); vlay->addWidget( box ); } void OpenUrlDialog::enqueue() { bShouldEnqueue = true; lastUrl = edit->text(); accept(); } void OpenUrlDialog::play() { lastUrl = edit->text(); accept(); } QString OpenUrlDialog::url() const { return lastUrl; } bool OpenUrlDialog::shouldEnqueue() const { return bShouldEnqueue; } /** Show Event: * When the dialog is shown, try to extract a URL from the clipboard * and paste it in the Edit box. * showEvent can happen not only on exec() but I think it's cool to * actualize the URL on showEvent (eg. change virtual desktop...) **/ void OpenUrlDialog::showEvent( QShowEvent *ev ) { (void) ev; bShouldEnqueue = false; edit->setFocus( Qt::OtherFocusReason ); if( !lastUrl.isEmpty() && edit->text().isEmpty() ) { /* The text should not have been changed, excepted if the user has clicked Cancel before */ edit->setText( lastUrl ); } else edit->clear(); if( bClipboard ) { QClipboard *clipboard = QApplication::clipboard(); assert( clipboard != NULL ); QString txt = clipboard->text( QClipboard::Selection ).trimmed(); if( txt.isEmpty() || ( !txt.contains("://") && !QFile::exists(txt) ) ) txt = clipboard->text( QClipboard::Clipboard ).trimmed(); if( txt.contains( "://" ) || QFile::exists( txt ) ) edit->setText( txt ); } }
gpl-2.0
google-code-export/oregoncore
dep/g3dlite/source/GCamera.cpp
21
15429
/** @file GCamera.cpp @author Morgan McGuire, http://graphics.cs.williams.edu @author Jeff Marsceill, 08jcm@williams.edu @created 2005-07-20 @edited 2010-02-22 */ #include "G3D/GCamera.h" #include "G3D/platform.h" #include "G3D/Rect2D.h" #include "G3D/BinaryInput.h" #include "G3D/BinaryOutput.h" #include "G3D/Ray.h" #include "G3D/Matrix4.h" #include "G3D/Any.h" #include "G3D/stringutils.h" namespace G3D { GCamera::GCamera(const Any& any) { any.verifyName("GCamera"); any.verifyType(Any::TABLE); *this = GCamera(); const Any::AnyTable& table = any.table(); Any::AnyTable::Iterator it = table.begin(); while (it.hasMore()) { const std::string& k = toUpper(it->key); if (k == "FOVDIRECTION") { const std::string& v = toUpper(it->value); if (v == "HORIZONTAL") { m_direction = HORIZONTAL; } else if (v == "VERTICAL") { m_direction = VERTICAL; } else { any.verify(false, "fovDirection must be \"HORIZONTAL\" or \"VERTICAL\""); } } else if (k == "COORDINATEFRAME") { m_cframe = it->value; } else if (k == "FOVDEGREES") { m_fieldOfView = toRadians(it->value.number()); } else if (k == "NEARPLANEZ") { m_nearPlaneZ = it->value; } else if (k == "FARPLANEZ") { m_farPlaneZ = it->value; } else if (k == "PIXELOFFSET") { m_pixelOffset = it->value; } else { any.verify(false, std::string("Illegal key in table: ") + it->key); } ++it; } } GCamera::operator Any() const { Any any(Any::TABLE, "GCamera"); any.set("fovDirection", std::string((m_direction == HORIZONTAL) ? "HORIZONTAL" : "VERTICAL")); any.set("fovDegrees", toDegrees(m_fieldOfView)); any.set("nearPlaneZ", nearPlaneZ()); any.set("farPlaneZ", farPlaneZ()); any.set("coordinateFrame", coordinateFrame()); any.set("pixelOffset", pixelOffset()); return any; } GCamera::GCamera() { setNearPlaneZ(-0.2f); setFarPlaneZ(-150.0f); setFieldOfView((float)toRadians(90.0f), HORIZONTAL); } GCamera::GCamera(const Matrix4& proj, const CFrame& frame) { float left, right, bottom, top, nearval, farval; proj.getPerspectiveProjectionParameters(left, right, bottom, top, nearval, farval); setNearPlaneZ(-nearval); setFarPlaneZ(-farval); float x = right; // Assume horizontal field of view setFieldOfView(atan2(x, -m_nearPlaneZ) * 2.0f, HORIZONTAL); setCoordinateFrame(frame); } GCamera::~GCamera() { } void GCamera::getCoordinateFrame(CoordinateFrame& c) const { c = m_cframe; } void GCamera::setCoordinateFrame(const CoordinateFrame& c) { m_cframe = c; } void GCamera::setFieldOfView(float angle, FOVDirection dir) { debugAssert((angle < pi()) && (angle > 0)); m_fieldOfView = angle; m_direction = dir; } float GCamera::imagePlaneDepth() const{ return -m_nearPlaneZ; } float GCamera::viewportWidth(const Rect2D& viewport) const { // Compute the side of a square at the near plane based on our field of view float s = 2.0f * -m_nearPlaneZ * tan(m_fieldOfView * 0.5f); if (m_direction == VERTICAL) { s *= viewport.width() / viewport.height(); } return s; } float GCamera::viewportHeight(const Rect2D& viewport) const { // Compute the side of a square at the near plane based on our field of view float s = 2.0f * -m_nearPlaneZ * tan(m_fieldOfView * 0.5f); debugAssert(m_fieldOfView < toRadians(180)); if (m_direction == HORIZONTAL) { s *= viewport.height() / viewport.width(); } return s; } Ray GCamera::worldRay(float x, float y, const Rect2D& viewport) const { int screenWidth = iFloor(viewport.width()); int screenHeight = iFloor(viewport.height()); Vector3 origin = m_cframe.translation; float cx = screenWidth / 2.0f; float cy = screenHeight / 2.0f; float vw = viewportWidth(viewport); float vh = viewportHeight(viewport); Vector3 direction = Vector3( (x - cx) * vw / screenWidth, -(y - cy) * vh / screenHeight, m_nearPlaneZ); direction = m_cframe.vectorToWorldSpace(direction); // Normalize the direction (we didn't do it before) direction = direction.direction(); return Ray::fromOriginAndDirection(origin, direction); } void GCamera::getProjectPixelMatrix(const Rect2D& viewport, Matrix4& P) const { getProjectUnitMatrix(viewport, P); float screenWidth = viewport.width(); float screenHeight = viewport.height(); float sx = screenWidth / 2.0; float sy = screenHeight / 2.0; P = Matrix4(sx, 0, 0, sx + viewport.x0() - m_pixelOffset.x, 0, -sy, 0, sy + viewport.y0() + m_pixelOffset.y, 0, 0, 1, 0, 0, 0, 0, 1) * P; } void GCamera::getProjectUnitMatrix(const Rect2D& viewport, Matrix4& P) const { float screenWidth = viewport.width(); float screenHeight = viewport.height(); float r, l, t, b, n, f, x, y; float s = 1.0f; if (m_direction == VERTICAL) { y = -m_nearPlaneZ * tan(m_fieldOfView / 2); x = y * (screenWidth / screenHeight); s = screenHeight; } else { //m_direction == HORIZONTAL x = -m_nearPlaneZ * tan(m_fieldOfView / 2); y = x * (screenHeight / screenWidth); s = screenWidth; } n = -m_nearPlaneZ; f = -m_farPlaneZ; r = x - m_pixelOffset.x/s; l = -x - m_pixelOffset.x/s; t = y + m_pixelOffset.y/s; b = -y + m_pixelOffset.y/s; P = Matrix4::perspectiveProjection(l, r, b, t, n, f); } Vector3 GCamera::projectUnit(const Vector3& point, const Rect2D& viewport) const { Matrix4 M; getProjectUnitMatrix(viewport, M); Vector4 cameraSpacePoint(coordinateFrame().pointToObjectSpace(point), 1.0f); const Vector4& screenSpacePoint = M * cameraSpacePoint; return Vector3(screenSpacePoint.xyz() / screenSpacePoint.w); } Vector3 GCamera::project(const Vector3& point, const Rect2D& viewport) const { // Find the point in the homogeneous cube const Vector3& cube = projectUnit(point, viewport); return convertFromUnitToNormal(cube, viewport); } Vector3 GCamera::unprojectUnit(const Vector3& v, const Rect2D& viewport) const { const Vector3& projectedPoint = convertFromUnitToNormal(v, viewport); return unproject(projectedPoint, viewport); } Vector3 GCamera::unproject(const Vector3& v, const Rect2D& viewport) const { const float n = m_nearPlaneZ; const float f = m_farPlaneZ; float z; if (-f >= finf()) { // Infinite far plane z = 1.0f / (((-1.0f / n) * v.z) + 1.0f / n); } else { z = 1.0f / ((((1.0f / f) - (1.0f / n)) * v.z) + 1.0f / n); } const Ray& ray = worldRay(v.x - m_pixelOffset.x, v.y - m_pixelOffset.y, viewport); // Find out where the ray reaches the specified depth. const Vector3& out = ray.origin() + ray.direction() * -z / (ray.direction().dot(m_cframe.lookVector())); return out; } float GCamera::worldToScreenSpaceArea(float area, float z, const Rect2D& viewport) const { (void)viewport; if (z >= 0) { return finf(); } return area * (float)square(imagePlaneDepth() / z); } void GCamera::getClipPlanes( const Rect2D& viewport, Array<Plane>& clip) const { Frustum fr; frustum(viewport, fr); clip.resize(fr.faceArray.size(), DONT_SHRINK_UNDERLYING_ARRAY); for (int f = 0; f < clip.size(); ++f) { clip[f] = fr.faceArray[f].plane; } } GCamera::Frustum GCamera::frustum(const Rect2D& viewport) const { Frustum f; frustum(viewport, f); return f; } void GCamera::frustum(const Rect2D& viewport, Frustum& fr) const { // The volume is the convex hull of the vertices definining the view // frustum and the light source point at infinity. const float x = viewportWidth(viewport) / 2; const float y = viewportHeight(viewport) / 2; const float zn = m_nearPlaneZ; const float zf = m_farPlaneZ; float xx, zz, yy; float halfFOV = m_fieldOfView * 0.5f; // This computes the normal, which is based on the complement of the // halfFOV angle, so the equations are "backwards" if (m_direction == VERTICAL) { yy = -cosf(halfFOV); xx = yy * viewport.height() / viewport.width(); zz = -sinf(halfFOV); } else { xx = -cosf(halfFOV); yy = xx * viewport.width() / viewport.height(); zz = -sinf(halfFOV); } // Near face (ccw from UR) fr.vertexPos.append( Vector4( x, y, zn, 1), Vector4(-x, y, zn, 1), Vector4(-x, -y, zn, 1), Vector4( x, -y, zn, 1)); // Far face (ccw from UR, from origin) if (m_farPlaneZ == -finf()) { fr.vertexPos.append(Vector4( x, y, zn, 0), Vector4(-x, y, zn, 0), Vector4(-x, -y, zn, 0), Vector4( x, -y, zn, 0)); } else { // Finite const float s = zf / zn; fr.vertexPos.append(Vector4( x * s, y * s, zf, 1), Vector4(-x * s, y * s, zf, 1), Vector4(-x * s, -y * s, zf, 1), Vector4( x * s, -y * s, zf, 1)); } Frustum::Face face; // Near plane (wind backwards so normal faces into frustum) // Recall that nearPlane, farPlane are positive numbers, so // we need to negate them to produce actual z values. face.plane = Plane(Vector3(0,0,-1), Vector3(0,0,m_nearPlaneZ)); face.vertexIndex[0] = 3; face.vertexIndex[1] = 2; face.vertexIndex[2] = 1; face.vertexIndex[3] = 0; fr.faceArray.append(face); // Right plane face.plane = Plane(Vector3(xx, 0, zz), Vector3::zero()); face.vertexIndex[0] = 0; face.vertexIndex[1] = 4; face.vertexIndex[2] = 7; face.vertexIndex[3] = 3; fr.faceArray.append(face); // Left plane face.plane = Plane(Vector3(-fr.faceArray.last().plane.normal().x, 0, fr.faceArray.last().plane.normal().z), Vector3::zero()); face.vertexIndex[0] = 5; face.vertexIndex[1] = 1; face.vertexIndex[2] = 2; face.vertexIndex[3] = 6; fr.faceArray.append(face); // Top plane face.plane = Plane(Vector3(0, yy, zz), Vector3::zero()); face.vertexIndex[0] = 1; face.vertexIndex[1] = 5; face.vertexIndex[2] = 4; face.vertexIndex[3] = 0; fr.faceArray.append(face); // Bottom plane face.plane = Plane(Vector3(0, -fr.faceArray.last().plane.normal().y, fr.faceArray.last().plane.normal().z), Vector3::zero()); face.vertexIndex[0] = 2; face.vertexIndex[1] = 3; face.vertexIndex[2] = 7; face.vertexIndex[3] = 6; fr.faceArray.append(face); // Far plane if (-m_farPlaneZ < finf()) { face.plane = Plane(Vector3(0, 0, 1), Vector3(0, 0, m_farPlaneZ)); face.vertexIndex[0] = 4; face.vertexIndex[1] = 5; face.vertexIndex[2] = 6; face.vertexIndex[3] = 7; fr.faceArray.append(face); } // Transform vertices to world space for (int v = 0; v < fr.vertexPos.size(); ++v) { fr.vertexPos[v] = m_cframe.toWorldSpace(fr.vertexPos[v]); } // Transform planes to world space for (int p = 0; p < fr.faceArray.size(); ++p) { // Since there is no scale factor, we don't have to // worry about the inverse transpose of the normal. Vector3 normal; float d; fr.faceArray[p].plane.getEquation(normal, d); Vector3 newNormal = m_cframe.rotation * normal; if (isFinite(d)) { d = (newNormal * -d + m_cframe.translation).dot(newNormal); fr.faceArray[p].plane = Plane(newNormal, newNormal * d); } else { // When d is infinite, we can't multiply 0's by it without // generating NaNs. fr.faceArray[p].plane = Plane::fromEquation(newNormal.x, newNormal.y, newNormal.z, d); } } } void GCamera::getNearViewportCorners (const Rect2D& viewport, Vector3& outUR, Vector3& outUL, Vector3& outLL, Vector3& outLR) const { // Must be kept in sync with getFrustum() const float w = viewportWidth(viewport) / 2.0f; const float h = viewportHeight(viewport) / 2.0f; const float z = nearPlaneZ(); // Compute the points outUR = Vector3( w, h, z); outUL = Vector3(-w, h, z); outLL = Vector3(-w, -h, z); outLR = Vector3( w, -h, z); // Take to world space outUR = m_cframe.pointToWorldSpace(outUR); outUL = m_cframe.pointToWorldSpace(outUL); outLR = m_cframe.pointToWorldSpace(outLR); outLL = m_cframe.pointToWorldSpace(outLL); } void GCamera::getFarViewportCorners( const Rect2D& viewport, Vector3& outUR, Vector3& outUL, Vector3& outLL, Vector3& outLR) const { // Must be kept in sync with getFrustum() const float w = viewportWidth(viewport) * m_farPlaneZ / m_nearPlaneZ; const float h = viewportHeight(viewport) * m_farPlaneZ / m_nearPlaneZ; const float z = m_farPlaneZ; // Compute the points outUR = Vector3( w/2, h/2, z); outUL = Vector3(-w/2, h/2, z); outLL = Vector3(-w/2, -h/2, z); outLR = Vector3( w/2, -h/2, z); // Take to world space outUR = m_cframe.pointToWorldSpace(outUR); outUL = m_cframe.pointToWorldSpace(outUL); outLR = m_cframe.pointToWorldSpace(outLR); outLL = m_cframe.pointToWorldSpace(outLL); } void GCamera::setPosition(const Vector3& t) { m_cframe.translation = t; } void GCamera::lookAt(const Vector3& position, const Vector3& up) { m_cframe.lookAt(position, up); } void GCamera::serialize(BinaryOutput& bo) const { bo.writeFloat32(m_fieldOfView); bo.writeFloat32(imagePlaneDepth()); debugAssert(nearPlaneZ() < 0.0f); bo.writeFloat32(nearPlaneZ()); debugAssert(farPlaneZ() < 0.0f); bo.writeFloat32(farPlaneZ()); m_cframe.serialize(bo); bo.writeInt8(m_direction); m_pixelOffset.serialize(bo); } void GCamera::deserialize(BinaryInput& bi) { m_fieldOfView = bi.readFloat32(); m_nearPlaneZ = bi.readFloat32(); debugAssert(m_nearPlaneZ < 0.0f); m_farPlaneZ = bi.readFloat32(); debugAssert(m_farPlaneZ < 0.0f); m_cframe.deserialize(bi); m_direction = (FOVDirection)bi.readInt8(); m_pixelOffset.deserialize(bi); } Vector3 GCamera::convertFromUnitToNormal(const Vector3& in, const Rect2D& viewport) const{ return (in + Vector3(1,1,1)) * 0.5 * Vector3(viewport.width(), -viewport.height(), 1) + Vector3(viewport.x0(), viewport.y1(), 0); } } // namespace
gpl-2.0
robacklin/uclinux-users
net-tools/lib/inet6_gr.c
21
7423
/* Modifications: 1998-07-01 - Arnaldo Carvalho de Melo - GNU gettext instead of catgets, snprintf instead of sprintf */ #include "config.h" #if HAVE_AFINET6 #include <asm/types.h> #include <asm/param.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <arpa/nameser.h> /* #include <net/route.h> realy broken */ #include <ctype.h> #include <errno.h> #include <netdb.h> #include <resolv.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <unistd.h> #ifndef __GLIBC__ #include <netinet6/ipv6_route.h> /* glibc doesn't have this */ #endif #include "version.h" #include "net-support.h" #include "pathnames.h" #include "intl.h" #include "net-features.h" /* neighbour discovery from linux-2.4.0/include/net/neighbour.h */ #define NUD_INCOMPLETE 0x01 #define NUD_REACHABLE 0x02 #define NUD_STALE 0x04 #define NUD_DELAY 0x08 #define NUD_PROBE 0x10 #define NUD_FAILED 0x20 #define NUD_NOARP 0x40 #define NUD_PERMANENT 0x80 #define NUD_NONE 0x00 #define NTF_PROXY 0x08 /* == ATF_PUBL */ #define NTF_ROUTER 0x80 #define NTF_02 0x02 /* waiting for answer of Alexey -eckes */ #define NTF_04 0x04 /* waiting for answer of Alexey -eckes */ /* */ extern struct aftype inet6_aftype; int rprint_fib6(int ext, int numeric) { char buff[4096], iface[16], flags[16]; char addr6[128], naddr6[128]; struct sockaddr_in6 saddr6, snaddr6; int num, iflags, metric, refcnt, use, prefix_len, slen; FILE *fp = fopen(_PATH_PROCNET_ROUTE6, "r"); char addr6p[8][5], saddr6p[8][5], naddr6p[8][5]; if (!fp) { perror(_PATH_PROCNET_ROUTE6); printf(_("INET6 (IPv6) not configured in this system.\n")); return 1; } printf(_("Kernel IPv6 routing table\n")); printf(_("Destination " "Next Hop " "Flags Metric Ref Use Iface\n")); while (fgets(buff, 1023, fp)) { num = sscanf(buff, "%4s%4s%4s%4s%4s%4s%4s%4s %02x %4s%4s%4s%4s%4s%4s%4s%4s %02x %4s%4s%4s%4s%4s%4s%4s%4s %08x %08x %08x %08x %s\n", addr6p[0], addr6p[1], addr6p[2], addr6p[3], addr6p[4], addr6p[5], addr6p[6], addr6p[7], &prefix_len, saddr6p[0], saddr6p[1], saddr6p[2], saddr6p[3], saddr6p[4], saddr6p[5], saddr6p[6], saddr6p[7], &slen, naddr6p[0], naddr6p[1], naddr6p[2], naddr6p[3], naddr6p[4], naddr6p[5], naddr6p[6], naddr6p[7], &metric, &use, &refcnt, &iflags, iface); #if 0 if (num < 23) continue; #endif if (!(iflags & RTF_UP)) continue; /* Fetch and resolve the target address. */ snprintf(addr6, sizeof(addr6), "%s:%s:%s:%s:%s:%s:%s:%s", addr6p[0], addr6p[1], addr6p[2], addr6p[3], addr6p[4], addr6p[5], addr6p[6], addr6p[7]); inet6_aftype.input(1, addr6, (struct sockaddr *) &saddr6); snprintf(addr6, sizeof(addr6), "%s/%d", inet6_aftype.sprint((struct sockaddr *) &saddr6, 1), prefix_len); /* Fetch and resolve the nexthop address. */ snprintf(naddr6, sizeof(naddr6), "%s:%s:%s:%s:%s:%s:%s:%s", naddr6p[0], naddr6p[1], naddr6p[2], naddr6p[3], naddr6p[4], naddr6p[5], naddr6p[6], naddr6p[7]); inet6_aftype.input(1, naddr6, (struct sockaddr *) &snaddr6); snprintf(naddr6, sizeof(naddr6), "%s", inet6_aftype.sprint((struct sockaddr *) &snaddr6, 1)); /* Decode the flags. */ strcpy(flags, "U"); if (iflags & RTF_GATEWAY) strcat(flags, "G"); if (iflags & RTF_HOST) strcat(flags, "H"); if (iflags & RTF_DEFAULT) strcat(flags, "D"); if (iflags & RTF_ADDRCONF) strcat(flags, "A"); if (iflags & RTF_CACHE) strcat(flags, "C"); /* Print the info. */ printf("%-43s %-39s %-5s %-6d %-2d %7d %-8s\n", addr6, naddr6, flags, metric, refcnt, use, iface); } (void) fclose(fp); return (0); } int rprint_cache6(int ext, int numeric) { char buff[4096], iface[16], flags[16]; char addr6[128], haddr[20], statestr[20]; struct sockaddr_in6 saddr6; int type, num, refcnt, prefix_len, location, state, gc; long tstamp, expire, ndflags, reachable, stale, delete; FILE *fp = fopen(_PATH_PROCNET_NDISC, "r"); char addr6p[8][5], haddrp[6][3]; if (!fp) { ESYSNOT("nd_print", "ND Table"); return 1; } printf(_("Kernel IPv6 Neighbour Cache\n")); if (ext == 2) printf(_("Neighbour " "HW Address " "Iface Flags Ref State\n")); else printf(_("Neighbour " "HW Address " "Iface Flags Ref State Stale(sec) Delete(sec)\n")); while (fgets(buff, 1023, fp)) { num = sscanf(buff, "%4s%4s%4s%4s%4s%4s%4s%4s %02x %02x %02x %02x %08lx %08lx %08lx %04x %04x %04lx %8s %2s%2s%2s%2s%2s%2s\n", addr6p[0], addr6p[1], addr6p[2], addr6p[3], addr6p[4], addr6p[5], addr6p[6], addr6p[7], &location, &prefix_len, &type, &state, &expire, &tstamp, &reachable, &gc, &refcnt, &ndflags, iface, haddrp[0], haddrp[1], haddrp[2], haddrp[3], haddrp[4], haddrp[5]); /* Fetch and resolve the nexthop address. */ snprintf(addr6, sizeof(addr6), "%s:%s:%s:%s:%s:%s:%s:%s", addr6p[0], addr6p[1], addr6p[2], addr6p[3], addr6p[4], addr6p[5], addr6p[6], addr6p[7]); inet6_aftype.input(1, addr6, (struct sockaddr *) &saddr6); snprintf(addr6, sizeof(addr6), "%s/%d", inet6_aftype.sprint((struct sockaddr *) &saddr6, numeric), prefix_len); /* Fetch the hardware address. */ snprintf(haddr, sizeof(haddr), "%s:%s:%s:%s:%s:%s", haddrp[0], haddrp[1], haddrp[2], haddrp[3], haddrp[4], haddrp[5]); /* Decode the flags. */ flags[0] = '\0'; if (ndflags & NTF_ROUTER) strcat(flags, "R"); if (ndflags & NTF_04) strcat(flags, "x"); if (ndflags & NTF_02) strcat(flags, "h"); if (ndflags & NTF_PROXY) strcat(flags, "P"); /* Decode the state */ switch (state) { case NUD_NONE: strcpy(statestr, "NONE"); break; case NUD_INCOMPLETE: strcpy(statestr, "INCOMPLETE"); break; case NUD_REACHABLE: strcpy(statestr, "REACHABLE"); break; case NUD_STALE: strcpy(statestr, "STALE"); break; case NUD_DELAY: strcpy(statestr, "DELAY"); break; case NUD_PROBE: strcpy(statestr, "PROBE"); break; case NUD_FAILED: strcpy(statestr, "FAILED"); break; case NUD_NOARP: strcpy(statestr, "NOARP"); break; case NUD_PERMANENT: strcpy(statestr, "PERM"); break; default: snprintf(statestr, sizeof(statestr), "UNKNOWN(%02x)", state); break; } /* Print the info. */ printf("%-43s %-17s %-8s %-5s %-3d %-16s", addr6, haddr, iface, flags, refcnt, statestr); stale = 0; if (state == NUD_REACHABLE) stale = reachable > tstamp ? reachable - tstamp : 0; delete = gc > tstamp ? gc - tstamp : 0; if (ext != 2) { printf(" %-9ld ", stale / HZ); if (refcnt) printf(" * "); else printf(" %-7ld ", delete / HZ); } printf("\n"); } (void) fclose(fp); return (0); } int INET6_rprint(int options) { int ext = options & FLAG_EXT; int numeric = options & (FLAG_NUM_HOST | FLAG_SYM); int rc = E_INTERN; if (options & FLAG_FIB) if ((rc = rprint_fib6(ext, numeric))) return (rc); if (options & FLAG_CACHE) if ((rc = rprint_cache6(ext, numeric))) return (rc); return (rc); } #endif /* HAVE_AFINET6 */
gpl-2.0
bilalliberty/android_kernel_HTC_ville_evita
drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
21
16078
/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/memory_alloc.h> #include <linux/delay.h> #include <mach/msm_subsystem_map.h> #include <mach/peripheral-loader.h> #include "vcd_ddl_utils.h" #include "vcd_ddl.h" #include "vcd_res_tracker_api.h" struct time_data { unsigned int ddl_t1; unsigned int ddl_ttotal; unsigned int ddl_count; }; static struct time_data proc_time[MAX_TIME_DATA]; #define DDL_MSG_TIME(x...) printk(KERN_DEBUG "[VID] " x) static unsigned int vidc_mmu_subsystem[] = { MSM_SUBSYSTEM_VIDEO, MSM_SUBSYSTEM_VIDEO_FWARE}; #ifdef DDL_BUF_LOG static void ddl_print_buffer(struct ddl_context *ddl_context, struct ddl_buf_addr *buf, u32 idx, u8 *str); static void ddl_print_port(struct ddl_context *ddl_context, struct ddl_buf_addr *buf); static void ddl_print_buffer_port(struct ddl_context *ddl_context, struct ddl_buf_addr *buf, u32 idx, u8 *str); #endif void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size, offset = 0 ; u32 index = 0; struct ddl_context *ddl_context; struct msm_mapped_buffer *mapped_buffer = NULL; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long *kernel_vaddr = NULL; unsigned long ionflag = 0; unsigned long flags = 0; int ret = 0; ion_phys_addr_t phyaddr = 0; size_t len = 0; int rc = 0; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); goto bail_out; } ddl_context = ddl_get_context(); res_trk_set_mem_type(addr->mem_type); alloc_size = (sz + alignment); if (res_trk_get_enable_ion()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n", __func__); goto bail_out; } alloc_size = (alloc_size+4095) & ~4095; addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, res_trk_get_mem_type(), res_trk_get_ion_flags()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); goto bail_out; } if (res_trk_check_for_sec_session() || addr->mem_type == DDL_FW_MEM) ionflag = 0 ; else ionflag = ION_FLAG_CACHED; kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, addr->alloc_handle); if (IS_ERR_OR_NULL(kernel_vaddr)) { DDL_MSG_ERROR("%s() :DDL ION map failed\n", __func__); goto free_ion_alloc; } addr->virtual_base_addr = (u8 *) kernel_vaddr; if (res_trk_check_for_sec_session()) { rc = ion_phys(ddl_context->video_ion_client, addr->alloc_handle, &phyaddr, &len); if (rc || !phyaddr) { DDL_MSG_ERROR( "%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->alloced_phys_addr = phyaddr; } else { ret = ion_map_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL, SZ_4K, 0, &iova, &buffer_size, 0 , 0); if (ret || !iova) { DDL_MSG_ERROR( "%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n", __func__, ret, iova); goto unmap_ion_alloc; } addr->alloced_phys_addr = (phys_addr_t) iova; msm_ion_do_cache_op(ddl_context->video_ion_client, addr->alloc_handle, addr->virtual_base_addr, sz, ION_IOC_CLEAN_INV_CACHES); } if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->mapped_buffer = NULL; addr->physical_base_addr = (u8 *) addr->alloced_phys_addr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = alloc_size; } else { addr->alloced_phys_addr = (phys_addr_t) allocate_contiguous_memory_nomap(alloc_size, res_trk_get_mem_type(), SZ_4K); if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n", __func__, alloc_size); goto bail_out; } flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR; if (alignment == DDL_KILO_BYTE(128)) index = 1; else if (alignment > SZ_4K) flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K; addr->mapped_buffer = msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr, alloc_size, flags, &vidc_mmu_subsystem[index], sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int)); if (IS_ERR(addr->mapped_buffer)) { pr_err(" %s() buffer map failed", __func__); goto free_acm_alloc; } mapped_buffer = addr->mapped_buffer; if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) { pr_err("%s() map buffers failed\n", __func__); goto free_map_buffers; } addr->physical_base_addr = (u8 *)mapped_buffer->iova[0]; addr->virtual_base_addr = mapped_buffer->vaddr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = sz; } return addr->virtual_base_addr; free_map_buffers: msm_subsystem_unmap_buffer(addr->mapped_buffer); addr->mapped_buffer = NULL; free_acm_alloc: free_contiguous_memory_by_paddr( (unsigned long)addr->alloced_phys_addr); addr->alloced_phys_addr = (phys_addr_t)NULL; return NULL; unmap_ion_alloc: ion_unmap_kernel(ddl_context->video_ion_client, addr->alloc_handle); addr->virtual_base_addr = NULL; addr->alloced_phys_addr = (phys_addr_t)NULL; free_ion_alloc: ion_free(ddl_context->video_ion_client, addr->alloc_handle); addr->alloc_handle = NULL; bail_out: return NULL; } void ddl_pmem_free(struct ddl_buf_addr *addr) { struct ddl_context *ddl_context; ddl_context = ddl_get_context(); if (!addr) { pr_err("%s() invalid args\n", __func__); return; } if (ddl_context->video_ion_client) { if (!IS_ERR_OR_NULL(addr->alloc_handle)) { ion_unmap_kernel(ddl_context->video_ion_client, addr->alloc_handle); if (!res_trk_check_for_sec_session()) { ion_unmap_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL); } ion_free(ddl_context->video_ion_client, addr->alloc_handle); } } else { if (addr->mapped_buffer) msm_subsystem_unmap_buffer(addr->mapped_buffer); if (addr->alloced_phys_addr) free_contiguous_memory_by_paddr( (unsigned long)addr->alloced_phys_addr); } memset(addr, 0, sizeof(struct ddl_buf_addr)); } #ifdef DDL_BUF_LOG static void ddl_print_buffer(struct ddl_context *ddl_context, struct ddl_buf_addr *buf, u32 idx, u8 *str) { struct ddl_buf_addr *base_ram; s32 offset; size_t sz, KB = 0; base_ram = &ddl_context->dram_base_a; offset = (s32) DDL_ADDR_OFFSET(*base_ram, *buf); sz = buf->buffer_size; if (sz > 0) { if (!(sz % 1024)) { sz /= 1024; KB++; if (!(sz % 1024)) { sz /= 1024; KB++; } } } DDL_MSG_LOW("\n%12s [%2d]: 0x%08x [0x%04x], 0x%08x(%d%s), %s", str, idx, (u32) buf->align_physical_addr, (offset > 0) ? offset : 0, buf->buffer_size, sz, ((2 == KB) ? "MB" : (1 == KB) ? "KB" : ""), (((u32) buf->virtual_base_addr) ? "Alloc" : "")); } static void ddl_print_port(struct ddl_context *ddl_context, struct ddl_buf_addr *buf) { struct ddl_buf_addr *a = &ddl_context->dram_base_a; struct ddl_buf_addr *b = &ddl_context->dram_base_b; if (!buf->align_physical_addr || !buf->buffer_size) return; if (buf->align_physical_addr >= a->align_physical_addr && buf->align_physical_addr + buf->buffer_size <= a->align_physical_addr + a->buffer_size) DDL_MSG_LOW(" -A [0x%x]-", DDL_ADDR_OFFSET(*a, *buf)); else if (buf->align_physical_addr >= b->align_physical_addr && buf->align_physical_addr + buf->buffer_size <= b->align_physical_addr + b->buffer_size) DDL_MSG_LOW(" -B [0x%x]-", DDL_ADDR_OFFSET(*b, *buf)); else DDL_MSG_LOW(" -?-"); } static void ddl_print_buffer_port(struct ddl_context *ddl_context, struct ddl_buf_addr *buf, u32 idx, u8 *str) { DDL_MSG_LOW("\n"); ddl_print_buffer(ddl_context, buf, idx, str); ddl_print_port(ddl_context, buf); } void ddl_list_buffers(struct ddl_client_context *ddl) { struct ddl_context *ddl_context; u32 i; ddl_context = ddl->ddl_context; DDL_MSG_LOW("\n\n"); DDL_MSG_LOW("\n Buffer : Start [offs], Size \ (Size), Alloc/Port"); DDL_MSG_LOW("\n-------------------------------------------------------\ -------------------------"); ddl_print_buffer(ddl_context, &ddl_context->dram_base_a, 0, "dram_base_a"); ddl_print_buffer(ddl_context, &ddl_context->dram_base_b, 0, "dram_base_b"); if (ddl->codec_data.hdr.decoding) { struct ddl_dec_buffers *dec_bufs = &ddl->codec_data.decoder.hw_bufs; for (i = 0; i < 32; i++) ddl_print_buffer_port(ddl_context, &dec_bufs->h264Mv[i], i, "h264Mv"); ddl_print_buffer_port(ddl_context, &dec_bufs->h264Vert_nb_mv, 0, "h264Vert_nb_mv"); ddl_print_buffer_port(ddl_context, &dec_bufs->h264Nb_ip, 0, "h264Nb_ip"); ddl_print_buffer_port(ddl_context, &dec_bufs->nb_dcac, 0, "nb_dcac"); ddl_print_buffer_port(ddl_context, &dec_bufs->upnb_mv, 0, "upnb_mv"); ddl_print_buffer_port(ddl_context, &dec_bufs->sub_anchor_mv, 0, "sub_anchor_mv"); ddl_print_buffer_port(ddl_context, &dec_bufs->overlay_xform, 0, "overlay_xform"); ddl_print_buffer_port(ddl_context, &dec_bufs->bit_plane3, 0, "bit_plane3"); ddl_print_buffer_port(ddl_context, &dec_bufs->bit_plane2, 0, "bit_plane2"); ddl_print_buffer_port(ddl_context, &dec_bufs->bit_plane1, 0, "bit_plane1"); ddl_print_buffer_port(ddl_context, dec_bufs->stx_parser, 0, "stx_parser"); ddl_print_buffer_port(ddl_context, &dec_bufs->desc, 0, "desc"); ddl_print_buffer_port(ddl_context, &dec_bufs->context, 0, "context"); } else { struct ddl_enc_buffers *enc_bufs = &ddl->codec_data.encoder.hw_bufs; for (i = 0; i < 4; i++) ddl_print_buffer_port(ddl_context, &enc_bufs->dpb_y[i], i, "dpb_y"); for (i = 0; i < 4; i++) ddl_print_buffer_port(ddl_context, &enc_bufs->dpb_c[i], i, "dpb_c"); ddl_print_buffer_port(ddl_context, &enc_bufs->mv, 0, "mv"); ddl_print_buffer_port(ddl_context, &enc_bufs->col_zero, 0, "col_zero"); ddl_print_buffer_port(ddl_context, &enc_bufs->md, 0, "md"); ddl_print_buffer_port(ddl_context, &enc_bufs->pred, 0, "pred"); ddl_print_buffer_port(ddl_context, &enc_bufs->nbor_info, 0, "nbor_info"); ddl_print_buffer_port(ddl_context, &enc_bufs->acdc_coef, 0, "acdc_coef"); ddl_print_buffer_port(ddl_context, &enc_bufs->context, 0, "context"); } } #endif u32 ddl_fw_init(struct ddl_buf_addr *dram_base) { u8 *dest_addr; dest_addr = DDL_GET_ALIGNED_VITUAL(*dram_base); DDL_MSG_LOW("FW Addr / FW Size : %x/%d", (u32)vidc_video_codec_fw, vidc_video_codec_fw_size); if (res_trk_check_for_sec_session() && res_trk_is_cp_enabled()) { if (res_trk_enable_footswitch()) { pr_err("Failed to enable footswitch"); return false; } if (res_trk_enable_iommu_clocks()) { res_trk_disable_footswitch(); pr_err("Failed to enable iommu clocks\n"); return false; } dram_base->pil_cookie = pil_get("vidc"); if (res_trk_disable_iommu_clocks()) pr_err("Failed to disable iommu clocks\n"); if (IS_ERR_OR_NULL(dram_base->pil_cookie)) { res_trk_disable_footswitch(); pr_err("pil_get failed\n"); return false; } } else { if (vidc_video_codec_fw_size > dram_base->buffer_size || !vidc_video_codec_fw) return false; memcpy(dest_addr, vidc_video_codec_fw, vidc_video_codec_fw_size); } return true; } void ddl_fw_release(struct ddl_buf_addr *dram_base) { void *cookie = dram_base->pil_cookie; if (res_trk_is_cp_enabled() && res_trk_check_for_sec_session()) { res_trk_close_secure_session(); if (IS_ERR_OR_NULL(cookie)) { pr_err("Invalid params"); return; } if (res_trk_enable_footswitch()) { pr_err("Failed to enable footswitch"); return; } if (res_trk_enable_iommu_clocks()) { res_trk_disable_footswitch(); pr_err("Failed to enable iommu clocks\n"); return; } pil_put(cookie); if (res_trk_disable_iommu_clocks()) pr_err("Failed to disable iommu clocks\n"); if (res_trk_disable_footswitch()) pr_err("Failed to disable footswitch\n"); } else { if (res_trk_check_for_sec_session()) res_trk_close_secure_session(); res_trk_release_fw_addr(); } } void ddl_set_core_start_time(const char *func_name, u32 index) { u32 act_time; struct timeval ddl_tv; struct time_data *time_data = &proc_time[index]; do_gettimeofday(&ddl_tv); act_time = (ddl_tv.tv_sec * 1000) + (ddl_tv.tv_usec / 1000); if (!time_data->ddl_t1) { time_data->ddl_t1 = act_time; DDL_MSG_LOW("\n%s(): Start Time (%u)", func_name, act_time); } else if (vidc_msg_timing) { DDL_MSG_LOW("\n%s(): Timer already started! St(%u) Act(%u)", func_name, time_data->ddl_t1, act_time); } } void ddl_calc_core_proc_time(const char *func_name, u32 index, struct ddl_client_context *ddl) { struct time_data *time_data = &proc_time[index]; struct ddl_decoder_data *decoder = NULL; if (time_data->ddl_t1) { int ddl_t2; struct timeval ddl_tv; do_gettimeofday(&ddl_tv); ddl_t2 = (ddl_tv.tv_sec * 1000) + (ddl_tv.tv_usec / 1000); time_data->ddl_ttotal += (ddl_t2 - time_data->ddl_t1); time_data->ddl_count++; if (vidc_msg_timing) { DDL_MSG_TIME("\n%s(): cnt(%u) End Time (%u)" "Diff(%u) Avg(%u)", func_name, time_data->ddl_count, ddl_t2, ddl_t2 - time_data->ddl_t1, time_data->ddl_ttotal/time_data->ddl_count); } if ((index == DEC_OP_TIME) && (time_data->ddl_count > 2) && (time_data->ddl_count < 6)) { decoder = &(ddl->codec_data.decoder); decoder->dec_time_sum = decoder->dec_time_sum + ddl_t2 - time_data->ddl_t1; if (time_data->ddl_count == 5) decoder->avg_dec_time = decoder->dec_time_sum / 3; } time_data->ddl_t1 = 0; } } void ddl_calc_core_proc_time_cnt(const char *func_name, u32 index, u32 count) { struct time_data *time_data = &proc_time[index]; if (time_data->ddl_t1) { int ddl_t2; struct timeval ddl_tv; do_gettimeofday(&ddl_tv); ddl_t2 = (ddl_tv.tv_sec * 1000) + (ddl_tv.tv_usec / 1000); time_data->ddl_ttotal += (ddl_t2 - time_data->ddl_t1); time_data->ddl_count += count; DDL_MSG_TIME("\n%s(): cnt(%u) End Time (%u) Diff(%u) Avg(%u)", func_name, time_data->ddl_count, ddl_t2, ddl_t2 - time_data->ddl_t1, time_data->ddl_ttotal/time_data->ddl_count); time_data->ddl_t1 = 0; } } void ddl_update_core_start_time(const char *func_name, u32 index) { u32 act_time; struct timeval ddl_tv; struct time_data *time_data = &proc_time[index]; do_gettimeofday(&ddl_tv); act_time = (ddl_tv.tv_sec * 1000) + (ddl_tv.tv_usec / 1000); time_data->ddl_t1 = act_time; DDL_MSG_LOW("\n%s(): Start time updated Act(%u)", func_name, act_time); } void ddl_reset_core_time_variables(u32 index) { proc_time[index].ddl_t1 = 0; proc_time[index].ddl_ttotal = 0; proc_time[index].ddl_count = 0; } int ddl_get_core_decode_proc_time(u32 *ddl_handle) { int avg_time = 0; struct ddl_client_context *ddl = (struct ddl_client_context *) ddl_handle; avg_time = ddl_vidc_decode_get_avg_time(ddl); return avg_time; } void ddl_reset_avg_dec_time(u32 *ddl_handle) { struct ddl_client_context *ddl = (struct ddl_client_context *) ddl_handle; ddl_vidc_decode_reset_avg_time(ddl); }
gpl-2.0
markfasheh/btrfs-stuff
arch/arm/plat-samsung/s5p-irq-eint.c
277
5221
/* * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * S5P - IRQ EINT support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/device.h> #include <linux/gpio.h> #include <asm/hardware/vic.h> #include <plat/regs-irqtype.h> #include <mach/map.h> #include <plat/cpu.h> #include <plat/pm.h> #include <plat/gpio-cfg.h> #include <mach/regs-gpio.h> static inline void s5p_irq_eint_mask(struct irq_data *data) { u32 mask; mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq))); mask |= eint_irq_to_bit(data->irq); __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq))); } static void s5p_irq_eint_unmask(struct irq_data *data) { u32 mask; mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq))); mask &= ~(eint_irq_to_bit(data->irq)); __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq))); } static inline void s5p_irq_eint_ack(struct irq_data *data) { __raw_writel(eint_irq_to_bit(data->irq), S5P_EINT_PEND(EINT_REG_NR(data->irq))); } static void s5p_irq_eint_maskack(struct irq_data *data) { /* compiler should in-line these */ s5p_irq_eint_mask(data); s5p_irq_eint_ack(data); } static int s5p_irq_eint_set_type(struct irq_data *data, unsigned int type) { int offs = EINT_OFFSET(data->irq); int shift; u32 ctrl, mask; u32 newvalue = 0; switch (type) { case IRQ_TYPE_EDGE_RISING: newvalue = S5P_IRQ_TYPE_EDGE_RISING; break; case IRQ_TYPE_EDGE_FALLING: newvalue = S5P_IRQ_TYPE_EDGE_FALLING; break; case IRQ_TYPE_EDGE_BOTH: newvalue = S5P_IRQ_TYPE_EDGE_BOTH; break; case IRQ_TYPE_LEVEL_LOW: newvalue = S5P_IRQ_TYPE_LEVEL_LOW; break; case IRQ_TYPE_LEVEL_HIGH: newvalue = S5P_IRQ_TYPE_LEVEL_HIGH; break; default: printk(KERN_ERR "No such irq type %d", type); return -EINVAL; } shift = (offs & 0x7) * 4; mask = 0x7 << shift; ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq))); ctrl &= ~mask; ctrl |= newvalue << shift; __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq))); if ((0 <= offs) && (offs < 8)) s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE); else if ((8 <= offs) && (offs < 16)) s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE); else if ((16 <= offs) && (offs < 24)) s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE); else if ((24 <= offs) && (offs < 32)) s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE); else printk(KERN_ERR "No such irq number %d", offs); return 0; } static struct irq_chip s5p_irq_eint = { .name = "s5p-eint", .irq_mask = s5p_irq_eint_mask, .irq_unmask = s5p_irq_eint_unmask, .irq_mask_ack = s5p_irq_eint_maskack, .irq_ack = s5p_irq_eint_ack, .irq_set_type = s5p_irq_eint_set_type, #ifdef CONFIG_PM .irq_set_wake = s3c_irqext_wake, #endif }; /* s5p_irq_demux_eint * * This function demuxes the IRQ from the group0 external interrupts, * from EINTs 16 to 31. It is designed to be inlined into the specific * handler s5p_irq_demux_eintX_Y. * * Each EINT pend/mask registers handle eight of them. */ static inline void s5p_irq_demux_eint(unsigned int start) { u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start))); u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start))); unsigned int irq; status &= ~mask; status &= 0xff; while (status) { irq = fls(status) - 1; generic_handle_irq(irq + start); status &= ~(1 << irq); } } static void s5p_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) { s5p_irq_demux_eint(IRQ_EINT(16)); s5p_irq_demux_eint(IRQ_EINT(24)); } static inline void s5p_irq_vic_eint_mask(struct irq_data *data) { void __iomem *base = irq_data_get_irq_chip_data(data); s5p_irq_eint_mask(data); writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE_CLEAR); } static void s5p_irq_vic_eint_unmask(struct irq_data *data) { void __iomem *base = irq_data_get_irq_chip_data(data); s5p_irq_eint_unmask(data); writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE); } static inline void s5p_irq_vic_eint_ack(struct irq_data *data) { __raw_writel(eint_irq_to_bit(data->irq), S5P_EINT_PEND(EINT_REG_NR(data->irq))); } static void s5p_irq_vic_eint_maskack(struct irq_data *data) { s5p_irq_vic_eint_mask(data); s5p_irq_vic_eint_ack(data); } static struct irq_chip s5p_irq_vic_eint = { .name = "s5p_vic_eint", .irq_mask = s5p_irq_vic_eint_mask, .irq_unmask = s5p_irq_vic_eint_unmask, .irq_mask_ack = s5p_irq_vic_eint_maskack, .irq_ack = s5p_irq_vic_eint_ack, .irq_set_type = s5p_irq_eint_set_type, #ifdef CONFIG_PM .irq_set_wake = s3c_irqext_wake, #endif }; static int __init s5p_init_irq_eint(void) { int irq; for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++) irq_set_chip(irq, &s5p_irq_vic_eint); for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) { irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq); set_irq_flags(irq, IRQF_VALID); } irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31); return 0; } arch_initcall(s5p_init_irq_eint);
gpl-2.0
barakinflorida/samsung-kernel-aries
arch/mips/mm/hugetlbpage.c
533
2095
/* * MIPS Huge TLB Page Support for Kernel. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> * Copyright 2005, Embedded Alley Solutions, Inc. * Matt Porter <mporter@embeddedalley.com> * Copyright (C) 2008, 2009 Cavium Networks, Inc. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/sysctl.h> #include <asm/mman.h> #include <asm/tlb.h> #include <asm/tlbflush.h> pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (pud) pte = (pte_t *)pmd_alloc(mm, pud, addr); return pte; } pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd = NULL; pgd = pgd_offset(mm, addr); if (pgd_present(*pgd)) { pud = pud_offset(pgd, addr); if (pud_present(*pud)) pmd = pmd_offset(pud, addr); } return (pte_t *) pmd; } int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { return 0; } /* * This function checks for proper alignment of input addr and len parameters. */ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) { if (len & ~HPAGE_MASK) return -EINVAL; if (addr & ~HPAGE_MASK) return -EINVAL; return 0; } struct page * follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { return ERR_PTR(-EINVAL); } int pmd_huge(pmd_t pmd) { return (pmd_val(pmd) & _PAGE_HUGE) != 0; } int pud_huge(pud_t pud) { return (pud_val(pud) & _PAGE_HUGE) != 0; } struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { struct page *page; page = pte_page(*(pte_t *)pmd); if (page) page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); return page; }
gpl-2.0
kalltkaffe/zte-kernel
arch/mips/kernel/unaligned.c
533
13587
/* * Handle unaligned accesses by emulation. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. * * This file contains exception handler for address error exception with the * special capability to execute faulting instructions in software. The * handler does not try to handle the case when the program counter points * to an address not aligned to a word boundary. * * Putting data to unaligned addresses is a bad practice even on Intel where * only the performance is affected. Much worse is that such code is non- * portable. Due to several programs that die on MIPS due to alignment * problems I decided to implement this handler anyway though I originally * didn't intend to do this at all for user code. * * For now I enable fixing of address errors by default to make life easier. * I however intend to disable this somewhen in the future when the alignment * problems with user programs have been fixed. For programmers this is the * right way to go. * * Fixing address errors is a per process option. The option is inherited * across fork(2) and execve(2) calls. If you really want to use the * option in your user programs - I discourage the use of the software * emulation strongly - use the following code in your userland stuff: * * #include <sys/sysmips.h> * * ... * sysmips(MIPS_FIXADE, x); * ... * * The argument x is 0 for disabling software emulation, enabled otherwise. * * Below a little program to play around with this feature. * * #include <stdio.h> * #include <sys/sysmips.h> * * struct foo { * unsigned char bar[8]; * }; * * main(int argc, char *argv[]) * { * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; * unsigned int *p = (unsigned int *) (x.bar + 3); * int i; * * if (argc > 1) * sysmips(MIPS_FIXADE, atoi(argv[1])); * * printf("*p = %08lx\n", *p); * * *p = 0xdeadface; * * for(i = 0; i <= 7; i++) * printf("%02x ", x.bar[i]); * printf("\n"); * } * * Coprocessor loads are not supported; I think this case is unimportant * in the practice. * * TODO: Handle ndc (attempted store to doubleword in uncached memory) * exception for the R6000. * A store crossing a page boundary might be executed only partially. * Undo the partial store in this case. */ #include <linux/mm.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/debugfs.h> #include <asm/asm.h> #include <asm/branch.h> #include <asm/byteorder.h> #include <asm/inst.h> #include <asm/uaccess.h> #include <asm/system.h> #define STR(x) __STR(x) #define __STR(x) #x enum { UNALIGNED_ACTION_QUIET, UNALIGNED_ACTION_SIGNAL, UNALIGNED_ACTION_SHOW, }; #ifdef CONFIG_DEBUG_FS static u32 unaligned_instructions; static u32 unaligned_action; #else #define unaligned_action UNALIGNED_ACTION_QUIET #endif extern void show_registers(struct pt_regs *regs); static void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int __user *pc) { union mips_instruction insn; unsigned long value; unsigned int res; regs->regs[0] = 0; /* * This load never faults. */ __get_user(insn.word, pc); switch (insn.i_format.opcode) { /* * These are instructions that a compiler doesn't generate. We * can assume therefore that the code is MIPS-aware and * really buggy. Emulating these instructions would break the * semantics anyway. */ case ll_op: case lld_op: case sc_op: case scd_op: /* * For these instructions the only way to create an address * error is an attempted access to kernel/supervisor address * space. */ case ldl_op: case ldr_op: case lwl_op: case lwr_op: case sdl_op: case sdr_op: case swl_op: case swr_op: case lb_op: case lbu_op: case sb_op: goto sigbus; /* * The remaining opcodes are the ones that are really of interest. */ case lh_op: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; __asm__ __volatile__ (".set\tnoat\n" #ifdef __BIG_ENDIAN "1:\tlb\t%0, 0(%2)\n" "2:\tlbu\t$1, 1(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tlb\t%0, 1(%2)\n" "2:\tlbu\t$1, 0(%2)\n\t" #endif "sll\t%0, 0x8\n\t" "or\t%0, $1\n\t" "li\t%1, 0\n" "3:\t.set\tat\n\t" ".section\t.fixup,\"ax\"\n\t" "4:\tli\t%1, %3\n\t" "j\t3b\n\t" ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b, 4b\n\t" STR(PTR)"\t2b, 4b\n\t" ".previous" : "=&r" (value), "=r" (res) : "r" (addr), "i" (-EFAULT)); if (res) goto fault; compute_return_epc(regs); regs->regs[insn.i_format.rt] = value; break; case lw_op: if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; __asm__ __volatile__ ( #ifdef __BIG_ENDIAN "1:\tlwl\t%0, (%2)\n" "2:\tlwr\t%0, 3(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tlwl\t%0, 3(%2)\n" "2:\tlwr\t%0, (%2)\n\t" #endif "li\t%1, 0\n" "3:\t.section\t.fixup,\"ax\"\n\t" "4:\tli\t%1, %3\n\t" "j\t3b\n\t" ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b, 4b\n\t" STR(PTR)"\t2b, 4b\n\t" ".previous" : "=&r" (value), "=r" (res) : "r" (addr), "i" (-EFAULT)); if (res) goto fault; compute_return_epc(regs); regs->regs[insn.i_format.rt] = value; break; case lhu_op: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; __asm__ __volatile__ ( ".set\tnoat\n" #ifdef __BIG_ENDIAN "1:\tlbu\t%0, 0(%2)\n" "2:\tlbu\t$1, 1(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tlbu\t%0, 1(%2)\n" "2:\tlbu\t$1, 0(%2)\n\t" #endif "sll\t%0, 0x8\n\t" "or\t%0, $1\n\t" "li\t%1, 0\n" "3:\t.set\tat\n\t" ".section\t.fixup,\"ax\"\n\t" "4:\tli\t%1, %3\n\t" "j\t3b\n\t" ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b, 4b\n\t" STR(PTR)"\t2b, 4b\n\t" ".previous" : "=&r" (value), "=r" (res) : "r" (addr), "i" (-EFAULT)); if (res) goto fault; compute_return_epc(regs); regs->regs[insn.i_format.rt] = value; break; case lwu_op: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; __asm__ __volatile__ ( #ifdef __BIG_ENDIAN "1:\tlwl\t%0, (%2)\n" "2:\tlwr\t%0, 3(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tlwl\t%0, 3(%2)\n" "2:\tlwr\t%0, (%2)\n\t" #endif "dsll\t%0, %0, 32\n\t" "dsrl\t%0, %0, 32\n\t" "li\t%1, 0\n" "3:\t.section\t.fixup,\"ax\"\n\t" "4:\tli\t%1, %3\n\t" "j\t3b\n\t" ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b, 4b\n\t" STR(PTR)"\t2b, 4b\n\t" ".previous" : "=&r" (value), "=r" (res) : "r" (addr), "i" (-EFAULT)); if (res) goto fault; compute_return_epc(regs); regs->regs[insn.i_format.rt] = value; break; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; case ld_op: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_READ, addr, 8)) goto sigbus; __asm__ __volatile__ ( #ifdef __BIG_ENDIAN "1:\tldl\t%0, (%2)\n" "2:\tldr\t%0, 7(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tldl\t%0, 7(%2)\n" "2:\tldr\t%0, (%2)\n\t" #endif "li\t%1, 0\n" "3:\t.section\t.fixup,\"ax\"\n\t" "4:\tli\t%1, %3\n\t" "j\t3b\n\t" ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b, 4b\n\t" STR(PTR)"\t2b, 4b\n\t" ".previous" : "=&r" (value), "=r" (res) : "r" (addr), "i" (-EFAULT)); if (res) goto fault; compute_return_epc(regs); regs->regs[insn.i_format.rt] = value; break; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; case sh_op: if (!access_ok(VERIFY_WRITE, addr, 2)) goto sigbus; value = regs->regs[insn.i_format.rt]; __asm__ __volatile__ ( #ifdef __BIG_ENDIAN ".set\tnoat\n" "1:\tsb\t%1, 1(%2)\n\t" "srl\t$1, %1, 0x8\n" "2:\tsb\t$1, 0(%2)\n\t" ".set\tat\n\t" #endif #ifdef __LITTLE_ENDIAN ".set\tnoat\n" "1:\tsb\t%1, 0(%2)\n\t" "srl\t$1,%1, 0x8\n" "2:\tsb\t$1, 1(%2)\n\t" ".set\tat\n\t" #endif "li\t%0, 0\n" "3:\n\t" ".section\t.fixup,\"ax\"\n\t" "4:\tli\t%0, %3\n\t" "j\t3b\n\t" ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b, 4b\n\t" STR(PTR)"\t2b, 4b\n\t" ".previous" : "=r" (res) : "r" (value), "r" (addr), "i" (-EFAULT)); if (res) goto fault; compute_return_epc(regs); break; case sw_op: if (!access_ok(VERIFY_WRITE, addr, 4)) goto sigbus; value = regs->regs[insn.i_format.rt]; __asm__ __volatile__ ( #ifdef __BIG_ENDIAN "1:\tswl\t%1,(%2)\n" "2:\tswr\t%1, 3(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tswl\t%1, 3(%2)\n" "2:\tswr\t%1, (%2)\n\t" #endif "li\t%0, 0\n" "3:\n\t" ".section\t.fixup,\"ax\"\n\t" "4:\tli\t%0, %3\n\t" "j\t3b\n\t" ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b, 4b\n\t" STR(PTR)"\t2b, 4b\n\t" ".previous" : "=r" (res) : "r" (value), "r" (addr), "i" (-EFAULT)); if (res) goto fault; compute_return_epc(regs); break; case sd_op: #ifdef CONFIG_64BIT /* * A 32-bit kernel might be running on a 64-bit processor. But * if we're on a 32-bit processor and an i-cache incoherency * or race makes us see a 64-bit instruction here the sdl/sdr * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ if (!access_ok(VERIFY_WRITE, addr, 8)) goto sigbus; value = regs->regs[insn.i_format.rt]; __asm__ __volatile__ ( #ifdef __BIG_ENDIAN "1:\tsdl\t%1,(%2)\n" "2:\tsdr\t%1, 7(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tsdl\t%1, 7(%2)\n" "2:\tsdr\t%1, (%2)\n\t" #endif "li\t%0, 0\n" "3:\n\t" ".section\t.fixup,\"ax\"\n\t" "4:\tli\t%0, %3\n\t" "j\t3b\n\t" ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b, 4b\n\t" STR(PTR)"\t2b, 4b\n\t" ".previous" : "=r" (res) : "r" (value), "r" (addr), "i" (-EFAULT)); if (res) goto fault; compute_return_epc(regs); break; #endif /* CONFIG_64BIT */ /* Cannot handle 64-bit instructions in 32-bit kernel */ goto sigill; case lwc1_op: case ldc1_op: case swc1_op: case sdc1_op: /* * I herewith declare: this does not happen. So send SIGBUS. */ goto sigbus; case lwc2_op: case ldc2_op: case swc2_op: case sdc2_op: /* * These are the coprocessor 2 load/stores. The current * implementations don't use cp2 and cp2 should always be * disabled in c0_status. So send SIGILL. * (No longer true: The Sony Praystation uses cp2 for * 3D matrix operations. Dunno if that thingy has a MMU ...) */ default: /* * Pheeee... We encountered an yet unknown instruction or * cache coherence problem. Die sucker, die ... */ goto sigill; } #ifdef CONFIG_DEBUG_FS unaligned_instructions++; #endif return; fault: /* Did we have an exception handler installed? */ if (fixup_exception(regs)) return; die_if_kernel("Unhandled kernel unaligned access", regs); force_sig(SIGSEGV, current); return; sigbus: die_if_kernel("Unhandled kernel unaligned access", regs); force_sig(SIGBUS, current); return; sigill: die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); force_sig(SIGILL, current); } asmlinkage void do_ade(struct pt_regs *regs) { unsigned int __user *pc; mm_segment_t seg; /* * Did we catch a fault trying to load an instruction? * Or are we running in MIPS16 mode? */ if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) goto sigbus; pc = (unsigned int __user *) exception_epc(regs); if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) goto sigbus; if (unaligned_action == UNALIGNED_ACTION_SIGNAL) goto sigbus; else if (unaligned_action == UNALIGNED_ACTION_SHOW) show_registers(regs); /* * Do branch emulation only if we didn't forward the exception. * This is all so but ugly ... */ seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); set_fs(seg); return; sigbus: die_if_kernel("Kernel unaligned instruction access", regs); force_sig(SIGBUS, current); /* * XXX On return from the signal handler we should advance the epc */ } #ifdef CONFIG_DEBUG_FS extern struct dentry *mips_debugfs_dir; static int __init debugfs_unaligned(void) { struct dentry *d; if (!mips_debugfs_dir) return -ENODEV; d = debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir, &unaligned_instructions); if (!d) return -ENOMEM; d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, mips_debugfs_dir, &unaligned_action); if (!d) return -ENOMEM; return 0; } __initcall(debugfs_unaligned); #endif
gpl-2.0
TeamWin/android_kernel_samsung_zerolteeu
drivers/mtd/nand/mpc5121_nfc.c
2325
20953
/* * Copyright 2004-2008 Freescale Semiconductor, Inc. * Copyright 2009 Semihalf. * * Approved as OSADL project by a majority of OSADL members and funded * by OSADL membership fees in 2009; for details see www.osadl.org. * * Based on original driver from Freescale Semiconductor * written by John Rigby <jrigby@freescale.com> on basis * of drivers/mtd/nand/mxc_nand.c. Reworked and extended * Piotr Ziecik <kosmo@semihalf.com>. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/clk.h> #include <linux/gfp.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <asm/mpc5121.h> /* Addresses for NFC MAIN RAM BUFFER areas */ #define NFC_MAIN_AREA(n) ((n) * 0x200) /* Addresses for NFC SPARE BUFFER areas */ #define NFC_SPARE_BUFFERS 8 #define NFC_SPARE_LEN 0x40 #define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN)) /* MPC5121 NFC registers */ #define NFC_BUF_ADDR 0x1E04 #define NFC_FLASH_ADDR 0x1E06 #define NFC_FLASH_CMD 0x1E08 #define NFC_CONFIG 0x1E0A #define NFC_ECC_STATUS1 0x1E0C #define NFC_ECC_STATUS2 0x1E0E #define NFC_SPAS 0x1E10 #define NFC_WRPROT 0x1E12 #define NFC_NF_WRPRST 0x1E18 #define NFC_CONFIG1 0x1E1A #define NFC_CONFIG2 0x1E1C #define NFC_UNLOCKSTART_BLK0 0x1E20 #define NFC_UNLOCKEND_BLK0 0x1E22 #define NFC_UNLOCKSTART_BLK1 0x1E24 #define NFC_UNLOCKEND_BLK1 0x1E26 #define NFC_UNLOCKSTART_BLK2 0x1E28 #define NFC_UNLOCKEND_BLK2 0x1E2A #define NFC_UNLOCKSTART_BLK3 0x1E2C #define NFC_UNLOCKEND_BLK3 0x1E2E /* Bit Definitions: NFC_BUF_ADDR */ #define NFC_RBA_MASK (7 << 0) #define NFC_ACTIVE_CS_SHIFT 5 #define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT) /* Bit Definitions: NFC_CONFIG */ #define NFC_BLS_UNLOCKED (1 << 1) /* Bit Definitions: NFC_CONFIG1 */ #define NFC_ECC_4BIT (1 << 0) #define NFC_FULL_PAGE_DMA (1 << 1) #define NFC_SPARE_ONLY (1 << 2) #define NFC_ECC_ENABLE (1 << 3) #define NFC_INT_MASK (1 << 4) #define NFC_BIG_ENDIAN (1 << 5) #define NFC_RESET (1 << 6) #define NFC_CE (1 << 7) #define NFC_ONE_CYCLE (1 << 8) #define NFC_PPB_32 (0 << 9) #define NFC_PPB_64 (1 << 9) #define NFC_PPB_128 (2 << 9) #define NFC_PPB_256 (3 << 9) #define NFC_PPB_MASK (3 << 9) #define NFC_FULL_PAGE_INT (1 << 11) /* Bit Definitions: NFC_CONFIG2 */ #define NFC_COMMAND (1 << 0) #define NFC_ADDRESS (1 << 1) #define NFC_INPUT (1 << 2) #define NFC_OUTPUT (1 << 3) #define NFC_ID (1 << 4) #define NFC_STATUS (1 << 5) #define NFC_CMD_FAIL (1 << 15) #define NFC_INT (1 << 15) /* Bit Definitions: NFC_WRPROT */ #define NFC_WPC_LOCK_TIGHT (1 << 0) #define NFC_WPC_LOCK (1 << 1) #define NFC_WPC_UNLOCK (1 << 2) #define DRV_NAME "mpc5121_nfc" /* Timeouts */ #define NFC_RESET_TIMEOUT 1000 /* 1 ms */ #define NFC_TIMEOUT (HZ / 10) /* 1/10 s */ struct mpc5121_nfc_prv { struct mtd_info mtd; struct nand_chip chip; int irq; void __iomem *regs; struct clk *clk; wait_queue_head_t irq_waitq; uint column; int spareonly; void __iomem *csreg; struct device *dev; }; static void mpc5121_nfc_done(struct mtd_info *mtd); /* Read NFC register */ static inline u16 nfc_read(struct mtd_info *mtd, uint reg) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; return in_be16(prv->regs + reg); } /* Write NFC register */ static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; out_be16(prv->regs + reg, val); } /* Set bits in NFC register */ static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits) { nfc_write(mtd, reg, nfc_read(mtd, reg) | bits); } /* Clear bits in NFC register */ static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits) { nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits); } /* Invoke address cycle */ static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr) { nfc_write(mtd, NFC_FLASH_ADDR, addr); nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS); mpc5121_nfc_done(mtd); } /* Invoke command cycle */ static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd) { nfc_write(mtd, NFC_FLASH_CMD, cmd); nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND); mpc5121_nfc_done(mtd); } /* Send data from NFC buffers to NAND flash */ static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd) { nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); nfc_write(mtd, NFC_CONFIG2, NFC_INPUT); mpc5121_nfc_done(mtd); } /* Receive data from NAND flash */ static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd) { nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT); mpc5121_nfc_done(mtd); } /* Receive ID from NAND flash */ static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd) { nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); nfc_write(mtd, NFC_CONFIG2, NFC_ID); mpc5121_nfc_done(mtd); } /* Receive status from NAND flash */ static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd) { nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); nfc_write(mtd, NFC_CONFIG2, NFC_STATUS); mpc5121_nfc_done(mtd); } /* NFC interrupt handler */ static irqreturn_t mpc5121_nfc_irq(int irq, void *data) { struct mtd_info *mtd = data; struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK); wake_up(&prv->irq_waitq); return IRQ_HANDLED; } /* Wait for operation complete */ static void mpc5121_nfc_done(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; int rv; if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) { nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK); rv = wait_event_timeout(prv->irq_waitq, (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT); if (!rv) dev_warn(prv->dev, "Timeout while waiting for interrupt.\n"); } nfc_clear(mtd, NFC_CONFIG2, NFC_INT); } /* Do address cycle(s) */ static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page) { struct nand_chip *chip = mtd->priv; u32 pagemask = chip->pagemask; if (column != -1) { mpc5121_nfc_send_addr(mtd, column); if (mtd->writesize > 512) mpc5121_nfc_send_addr(mtd, column >> 8); } if (page != -1) { do { mpc5121_nfc_send_addr(mtd, page & 0xFF); page >>= 8; pagemask >>= 8; } while (pagemask); } } /* Control chip select signals */ static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip) { if (chip < 0) { nfc_clear(mtd, NFC_CONFIG1, NFC_CE); return; } nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK); nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) & NFC_ACTIVE_CS_MASK); nfc_set(mtd, NFC_CONFIG1, NFC_CE); } /* Init external chip select logic on ADS5121 board */ static int ads5121_chipselect_init(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; struct device_node *dn; dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld"); if (dn) { prv->csreg = of_iomap(dn, 0); of_node_put(dn); if (!prv->csreg) return -ENOMEM; /* CPLD Register 9 controls NAND /CE Lines */ prv->csreg += 9; return 0; } return -EINVAL; } /* Control chips select signal on ADS5121 board */ static void ads5121_select_chip(struct mtd_info *mtd, int chip) { struct nand_chip *nand = mtd->priv; struct mpc5121_nfc_prv *prv = nand->priv; u8 v; v = in_8(prv->csreg); v |= 0x0F; if (chip >= 0) { mpc5121_nfc_select_chip(mtd, 0); v &= ~(1 << chip); } else mpc5121_nfc_select_chip(mtd, -1); out_8(prv->csreg, v); } /* Read NAND Ready/Busy signal */ static int mpc5121_nfc_dev_ready(struct mtd_info *mtd) { /* * NFC handles ready/busy signal internally. Therefore, this function * always returns status as ready. */ return 1; } /* Write command to NAND flash */ static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command, int column, int page) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; prv->column = (column >= 0) ? column : 0; prv->spareonly = 0; switch (command) { case NAND_CMD_PAGEPROG: mpc5121_nfc_send_prog_page(mtd); break; /* * NFC does not support sub-page reads and writes, * so emulate them using full page transfers. */ case NAND_CMD_READ0: column = 0; break; case NAND_CMD_READ1: prv->column += 256; command = NAND_CMD_READ0; column = 0; break; case NAND_CMD_READOOB: prv->spareonly = 1; command = NAND_CMD_READ0; column = 0; break; case NAND_CMD_SEQIN: mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page); column = 0; break; case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: case NAND_CMD_READID: case NAND_CMD_STATUS: break; default: return; } mpc5121_nfc_send_cmd(mtd, command); mpc5121_nfc_addr_cycle(mtd, column, page); switch (command) { case NAND_CMD_READ0: if (mtd->writesize > 512) mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART); mpc5121_nfc_send_read_page(mtd); break; case NAND_CMD_READID: mpc5121_nfc_send_read_id(mtd); break; case NAND_CMD_STATUS: mpc5121_nfc_send_read_status(mtd); if (chip->options & NAND_BUSWIDTH_16) prv->column = 1; else prv->column = 0; break; } } /* Copy data from/to NFC spare buffers. */ static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset, u8 *buffer, uint size, int wr) { struct nand_chip *nand = mtd->priv; struct mpc5121_nfc_prv *prv = nand->priv; uint o, s, sbsize, blksize; /* * NAND spare area is available through NFC spare buffers. * The NFC divides spare area into (page_size / 512) chunks. * Each chunk is placed into separate spare memory area, using * first (spare_size / num_of_chunks) bytes of the buffer. * * For NAND device in which the spare area is not divided fully * by the number of chunks, number of used bytes in each spare * buffer is rounded down to the nearest even number of bytes, * and all remaining bytes are added to the last used spare area. * * For more information read section 26.6.10 of MPC5121e * Microcontroller Reference Manual, Rev. 3. */ /* Calculate number of valid bytes in each spare buffer */ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1; while (size) { /* Calculate spare buffer number */ s = offset / sbsize; if (s > NFC_SPARE_BUFFERS - 1) s = NFC_SPARE_BUFFERS - 1; /* * Calculate offset to requested data block in selected spare * buffer and its size. */ o = offset - (s * sbsize); blksize = min(sbsize - o, size); if (wr) memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o, buffer, blksize); else memcpy_fromio(buffer, prv->regs + NFC_SPARE_AREA(s) + o, blksize); buffer += blksize; offset += blksize; size -= blksize; }; } /* Copy data from/to NFC main and spare buffers */ static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len, int wr) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; uint c = prv->column; uint l; /* Handle spare area access */ if (prv->spareonly || c >= mtd->writesize) { /* Calculate offset from beginning of spare area */ if (c >= mtd->writesize) c -= mtd->writesize; prv->column += len; mpc5121_nfc_copy_spare(mtd, c, buf, len, wr); return; } /* * Handle main area access - limit copy length to prevent * crossing main/spare boundary. */ l = min((uint)len, mtd->writesize - c); prv->column += l; if (wr) memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l); else memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l); /* Handle crossing main/spare boundary */ if (l != len) { buf += l; len -= l; mpc5121_nfc_buf_copy(mtd, buf, len, wr); } } /* Read data from NFC buffers */ static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len) { mpc5121_nfc_buf_copy(mtd, buf, len, 0); } /* Write data to NFC buffers */ static void mpc5121_nfc_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1); } /* Read byte from NFC buffers */ static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd) { u8 tmp; mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp)); return tmp; } /* Read word from NFC buffers */ static u16 mpc5121_nfc_read_word(struct mtd_info *mtd) { u16 tmp; mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp)); return tmp; } /* * Read NFC configuration from Reset Config Word * * NFC is configured during reset in basis of information stored * in Reset Config Word. There is no other way to set NAND block * size, spare size and bus width. */ static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; struct mpc512x_reset_module *rm; struct device_node *rmnode; uint rcw_pagesize = 0; uint rcw_sparesize = 0; uint rcw_width; uint rcwh; uint romloc, ps; int ret = 0; rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset"); if (!rmnode) { dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' " "node in device tree!\n"); return -ENODEV; } rm = of_iomap(rmnode, 0); if (!rm) { dev_err(prv->dev, "Error mapping reset module node!\n"); ret = -EBUSY; goto out; } rcwh = in_be32(&rm->rcwhr); /* Bit 6: NFC bus width */ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1; /* Bit 7: NFC Page/Spare size */ ps = (rcwh >> 7) & 0x1; /* Bits [22:21]: ROM Location */ romloc = (rcwh >> 21) & 0x3; /* Decode RCW bits */ switch ((ps << 2) | romloc) { case 0x00: case 0x01: rcw_pagesize = 512; rcw_sparesize = 16; break; case 0x02: case 0x03: rcw_pagesize = 4096; rcw_sparesize = 128; break; case 0x04: case 0x05: rcw_pagesize = 2048; rcw_sparesize = 64; break; case 0x06: case 0x07: rcw_pagesize = 4096; rcw_sparesize = 218; break; } mtd->writesize = rcw_pagesize; mtd->oobsize = rcw_sparesize; if (rcw_width == 2) chip->options |= NAND_BUSWIDTH_16; dev_notice(prv->dev, "Configured for " "%u-bit NAND, page size %u " "with %u spare.\n", rcw_width * 8, rcw_pagesize, rcw_sparesize); iounmap(rm); out: of_node_put(rmnode); return ret; } /* Free driver resources */ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; if (prv->clk) { clk_disable(prv->clk); clk_put(prv->clk); } if (prv->csreg) iounmap(prv->csreg); } static int mpc5121_nfc_probe(struct platform_device *op) { struct device_node *rootnode, *dn = op->dev.of_node; struct device *dev = &op->dev; struct mpc5121_nfc_prv *prv; struct resource res; struct mtd_info *mtd; struct nand_chip *chip; unsigned long regs_paddr, regs_size; const __be32 *chips_no; int resettime = 0; int retval = 0; int rev, len; struct mtd_part_parser_data ppdata; /* * Check SoC revision. This driver supports only NFC * in MPC5121 revision 2 and MPC5123 revision 3. */ rev = (mfspr(SPRN_SVR) >> 4) & 0xF; if ((rev != 2) && (rev != 3)) { dev_err(dev, "SoC revision %u is not supported!\n", rev); return -ENXIO; } prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL); if (!prv) { dev_err(dev, "Memory exhausted!\n"); return -ENOMEM; } mtd = &prv->mtd; chip = &prv->chip; mtd->priv = chip; chip->priv = prv; prv->dev = dev; /* Read NFC configuration from Reset Config Word */ retval = mpc5121_nfc_read_hw_config(mtd); if (retval) { dev_err(dev, "Unable to read NFC config!\n"); return retval; } prv->irq = irq_of_parse_and_map(dn, 0); if (prv->irq == NO_IRQ) { dev_err(dev, "Error mapping IRQ!\n"); return -EINVAL; } retval = of_address_to_resource(dn, 0, &res); if (retval) { dev_err(dev, "Error parsing memory region!\n"); return retval; } chips_no = of_get_property(dn, "chips", &len); if (!chips_no || len != sizeof(*chips_no)) { dev_err(dev, "Invalid/missing 'chips' property!\n"); return -EINVAL; } regs_paddr = res.start; regs_size = resource_size(&res); if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) { dev_err(dev, "Error requesting memory region!\n"); return -EBUSY; } prv->regs = devm_ioremap(dev, regs_paddr, regs_size); if (!prv->regs) { dev_err(dev, "Error mapping memory region!\n"); return -ENOMEM; } mtd->name = "MPC5121 NAND"; ppdata.of_node = dn; chip->dev_ready = mpc5121_nfc_dev_ready; chip->cmdfunc = mpc5121_nfc_command; chip->read_byte = mpc5121_nfc_read_byte; chip->read_word = mpc5121_nfc_read_word; chip->read_buf = mpc5121_nfc_read_buf; chip->write_buf = mpc5121_nfc_write_buf; chip->select_chip = mpc5121_nfc_select_chip; chip->bbt_options = NAND_BBT_USE_FLASH; chip->ecc.mode = NAND_ECC_SOFT; /* Support external chip-select logic on ADS5121 board */ rootnode = of_find_node_by_path("/"); if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) { retval = ads5121_chipselect_init(mtd); if (retval) { dev_err(dev, "Chipselect init error!\n"); of_node_put(rootnode); return retval; } chip->select_chip = ads5121_select_chip; } of_node_put(rootnode); /* Enable NFC clock */ prv->clk = clk_get(dev, "nfc_clk"); if (IS_ERR(prv->clk)) { dev_err(dev, "Unable to acquire NFC clock!\n"); retval = PTR_ERR(prv->clk); goto error; } clk_enable(prv->clk); /* Reset NAND Flash controller */ nfc_set(mtd, NFC_CONFIG1, NFC_RESET); while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) { if (resettime++ >= NFC_RESET_TIMEOUT) { dev_err(dev, "Timeout while resetting NFC!\n"); retval = -EINVAL; goto error; } udelay(1); } /* Enable write to NFC memory */ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED); /* Enable write to all NAND pages */ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000); nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF); nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK); /* * Setup NFC: * - Big Endian transfers, * - Interrupt after full page read/write. */ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK | NFC_FULL_PAGE_INT); /* Set spare area size */ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1); init_waitqueue_head(&prv->irq_waitq); retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME, mtd); if (retval) { dev_err(dev, "Error requesting IRQ!\n"); goto error; } /* Detect NAND chips */ if (nand_scan(mtd, be32_to_cpup(chips_no))) { dev_err(dev, "NAND Flash not found !\n"); devm_free_irq(dev, prv->irq, mtd); retval = -ENXIO; goto error; } /* Set erase block size */ switch (mtd->erasesize / mtd->writesize) { case 32: nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32); break; case 64: nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64); break; case 128: nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128); break; case 256: nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256); break; default: dev_err(dev, "Unsupported NAND flash!\n"); devm_free_irq(dev, prv->irq, mtd); retval = -ENXIO; goto error; } dev_set_drvdata(dev, mtd); /* Register device in MTD */ retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); if (retval) { dev_err(dev, "Error adding MTD device!\n"); devm_free_irq(dev, prv->irq, mtd); goto error; } return 0; error: mpc5121_nfc_free(dev, mtd); return retval; } static int mpc5121_nfc_remove(struct platform_device *op) { struct device *dev = &op->dev; struct mtd_info *mtd = dev_get_drvdata(dev); struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; nand_release(mtd); devm_free_irq(dev, prv->irq, mtd); mpc5121_nfc_free(dev, mtd); return 0; } static struct of_device_id mpc5121_nfc_match[] = { { .compatible = "fsl,mpc5121-nfc", }, {}, }; static struct platform_driver mpc5121_nfc_driver = { .probe = mpc5121_nfc_probe, .remove = mpc5121_nfc_remove, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = mpc5121_nfc_match, }, }; module_platform_driver(mpc5121_nfc_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("MPC5121 NAND MTD driver"); MODULE_LICENSE("GPL");
gpl-2.0
W4TCH0UT/ZZ_angler
drivers/media/i2c/m5mols/m5mols_capture.c
2837
4345
/* * The Capture code for Fujitsu M-5MOLS ISP * * Copyright (C) 2011 Samsung Electronics Co., Ltd. * Author: HeungJun Kim <riverful.kim@samsung.com> * * Copyright (C) 2009 Samsung Electronics Co., Ltd. * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/m5mols.h> #include <media/s5p_fimc.h> #include "m5mols.h" #include "m5mols_reg.h" /** * m5mols_read_rational - I2C read of a rational number * * Read numerator and denominator from registers @addr_num and @addr_den * respectively and return the division result in @val. */ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num, u32 addr_den, u32 *val) { u32 num, den; int ret = m5mols_read_u32(sd, addr_num, &num); if (!ret) ret = m5mols_read_u32(sd, addr_den, &den); if (ret) return ret; *val = den == 0 ? 0 : num / den; return ret; } /** * m5mols_capture_info - Gather captured image information * * For now it gathers only EXIF information and file size. */ static int m5mols_capture_info(struct m5mols_info *info) { struct m5mols_exif *exif = &info->cap.exif; struct v4l2_subdev *sd = &info->sd; int ret; ret = m5mols_read_rational(sd, EXIF_INFO_EXPTIME_NU, EXIF_INFO_EXPTIME_DE, &exif->exposure_time); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_TV_NU, EXIF_INFO_TV_DE, &exif->shutter_speed); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_AV_NU, EXIF_INFO_AV_DE, &exif->aperture); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_BV_NU, EXIF_INFO_BV_DE, &exif->brightness); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_EBV_NU, EXIF_INFO_EBV_DE, &exif->exposure_bias); if (ret) return ret; ret = m5mols_read_u16(sd, EXIF_INFO_ISO, &exif->iso_speed); if (!ret) ret = m5mols_read_u16(sd, EXIF_INFO_FLASH, &exif->flash); if (!ret) ret = m5mols_read_u16(sd, EXIF_INFO_SDR, &exif->sdr); if (!ret) ret = m5mols_read_u16(sd, EXIF_INFO_QVAL, &exif->qval); if (ret) return ret; if (!ret) ret = m5mols_read_u32(sd, CAPC_IMAGE_SIZE, &info->cap.main); if (!ret) ret = m5mols_read_u32(sd, CAPC_THUMB_SIZE, &info->cap.thumb); if (!ret) info->cap.total = info->cap.main + info->cap.thumb; return ret; } int m5mols_start_capture(struct m5mols_info *info) { unsigned int framesize = info->cap.buf_size - M5MOLS_JPEG_TAGS_SIZE; struct v4l2_subdev *sd = &info->sd; int ret; /* * Synchronize the controls, set the capture frame resolution and color * format. The frame capture is initiated during switching from Monitor * to Capture mode. */ ret = m5mols_set_mode(info, REG_MONITOR); if (!ret) ret = m5mols_restore_controls(info); if (!ret) ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG); if (!ret) ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, info->resolution); if (!ret) ret = m5mols_write(sd, CAPP_JPEG_SIZE_MAX, framesize); if (!ret) ret = m5mols_set_mode(info, REG_CAPTURE); if (!ret) /* Wait until a frame is captured to ISP internal memory */ ret = m5mols_wait_interrupt(sd, REG_INT_CAPTURE, 2000); if (ret) return ret; /* * Initiate the captured data transfer to a MIPI-CSI receiver. */ ret = m5mols_write(sd, CAPC_SEL_FRAME, 1); if (!ret) ret = m5mols_write(sd, CAPC_START, REG_CAP_START_MAIN); if (!ret) { bool captured = false; unsigned int size; /* Wait for the capture completion interrupt */ ret = m5mols_wait_interrupt(sd, REG_INT_CAPTURE, 2000); if (!ret) { captured = true; ret = m5mols_capture_info(info); } size = captured ? info->cap.main : 0; v4l2_dbg(1, m5mols_debug, sd, "%s: size: %d, thumb.: %d B\n", __func__, size, info->cap.thumb); v4l2_subdev_notify(sd, S5P_FIMC_TX_END_NOTIFY, &size); } return ret; }
gpl-2.0
Tof37/Kernel-ES209RA-3.0.8
net/ax25/ax25_route.c
3093
11462
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Steven Whitehouse GW7RRM (stevew@acm.org) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de) * Copyright (C) Frederic Rible F1OAT (frible@teaser.fr) */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/timer.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/seq_file.h> static ax25_route *ax25_route_list; static DEFINE_RWLOCK(ax25_route_lock); void ax25_rt_device_down(struct net_device *dev) { ax25_route *s, *t, *ax25_rt; write_lock_bh(&ax25_route_lock); ax25_rt = ax25_route_list; while (ax25_rt != NULL) { s = ax25_rt; ax25_rt = ax25_rt->next; if (s->dev == dev) { if (ax25_route_list == s) { ax25_route_list = s->next; kfree(s->digipeat); kfree(s); } else { for (t = ax25_route_list; t != NULL; t = t->next) { if (t->next == s) { t->next = s->next; kfree(s->digipeat); kfree(s); break; } } } } } write_unlock_bh(&ax25_route_lock); } static int __must_check ax25_rt_add(struct ax25_routes_struct *route) { ax25_route *ax25_rt; ax25_dev *ax25_dev; int i; if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL) return -EINVAL; if (route->digi_count > AX25_MAX_DIGIS) return -EINVAL; write_lock_bh(&ax25_route_lock); ax25_rt = ax25_route_list; while (ax25_rt != NULL) { if (ax25cmp(&ax25_rt->callsign, &route->dest_addr) == 0 && ax25_rt->dev == ax25_dev->dev) { kfree(ax25_rt->digipeat); ax25_rt->digipeat = NULL; if (route->digi_count != 0) { if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); return -ENOMEM; } ax25_rt->digipeat->lastrepeat = -1; ax25_rt->digipeat->ndigi = route->digi_count; for (i = 0; i < route->digi_count; i++) { ax25_rt->digipeat->repeated[i] = 0; ax25_rt->digipeat->calls[i] = route->digi_addr[i]; } } write_unlock_bh(&ax25_route_lock); return 0; } ax25_rt = ax25_rt->next; } if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); return -ENOMEM; } atomic_set(&ax25_rt->refcount, 1); ax25_rt->callsign = route->dest_addr; ax25_rt->dev = ax25_dev->dev; ax25_rt->digipeat = NULL; ax25_rt->ip_mode = ' '; if (route->digi_count != 0) { if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); kfree(ax25_rt); return -ENOMEM; } ax25_rt->digipeat->lastrepeat = -1; ax25_rt->digipeat->ndigi = route->digi_count; for (i = 0; i < route->digi_count; i++) { ax25_rt->digipeat->repeated[i] = 0; ax25_rt->digipeat->calls[i] = route->digi_addr[i]; } } ax25_rt->next = ax25_route_list; ax25_route_list = ax25_rt; write_unlock_bh(&ax25_route_lock); return 0; } void __ax25_put_route(ax25_route *ax25_rt) { kfree(ax25_rt->digipeat); kfree(ax25_rt); } static int ax25_rt_del(struct ax25_routes_struct *route) { ax25_route *s, *t, *ax25_rt; ax25_dev *ax25_dev; if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL) return -EINVAL; write_lock_bh(&ax25_route_lock); ax25_rt = ax25_route_list; while (ax25_rt != NULL) { s = ax25_rt; ax25_rt = ax25_rt->next; if (s->dev == ax25_dev->dev && ax25cmp(&route->dest_addr, &s->callsign) == 0) { if (ax25_route_list == s) { ax25_route_list = s->next; ax25_put_route(s); } else { for (t = ax25_route_list; t != NULL; t = t->next) { if (t->next == s) { t->next = s->next; ax25_put_route(s); break; } } } } } write_unlock_bh(&ax25_route_lock); return 0; } static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option) { ax25_route *ax25_rt; ax25_dev *ax25_dev; int err = 0; if ((ax25_dev = ax25_addr_ax25dev(&rt_option->port_addr)) == NULL) return -EINVAL; write_lock_bh(&ax25_route_lock); ax25_rt = ax25_route_list; while (ax25_rt != NULL) { if (ax25_rt->dev == ax25_dev->dev && ax25cmp(&rt_option->dest_addr, &ax25_rt->callsign) == 0) { switch (rt_option->cmd) { case AX25_SET_RT_IPMODE: switch (rt_option->arg) { case ' ': case 'D': case 'V': ax25_rt->ip_mode = rt_option->arg; break; default: err = -EINVAL; goto out; } break; default: err = -EINVAL; goto out; } } ax25_rt = ax25_rt->next; } out: write_unlock_bh(&ax25_route_lock); return err; } int ax25_rt_ioctl(unsigned int cmd, void __user *arg) { struct ax25_route_opt_struct rt_option; struct ax25_routes_struct route; switch (cmd) { case SIOCADDRT: if (copy_from_user(&route, arg, sizeof(route))) return -EFAULT; return ax25_rt_add(&route); case SIOCDELRT: if (copy_from_user(&route, arg, sizeof(route))) return -EFAULT; return ax25_rt_del(&route); case SIOCAX25OPTRT: if (copy_from_user(&rt_option, arg, sizeof(rt_option))) return -EFAULT; return ax25_rt_opt(&rt_option); default: return -EINVAL; } } #ifdef CONFIG_PROC_FS static void *ax25_rt_seq_start(struct seq_file *seq, loff_t *pos) __acquires(ax25_route_lock) { struct ax25_route *ax25_rt; int i = 1; read_lock(&ax25_route_lock); if (*pos == 0) return SEQ_START_TOKEN; for (ax25_rt = ax25_route_list; ax25_rt != NULL; ax25_rt = ax25_rt->next) { if (i == *pos) return ax25_rt; ++i; } return NULL; } static void *ax25_rt_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return (v == SEQ_START_TOKEN) ? ax25_route_list : ((struct ax25_route *) v)->next; } static void ax25_rt_seq_stop(struct seq_file *seq, void *v) __releases(ax25_route_lock) { read_unlock(&ax25_route_lock); } static int ax25_rt_seq_show(struct seq_file *seq, void *v) { char buf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "callsign dev mode digipeaters\n"); else { struct ax25_route *ax25_rt = v; const char *callsign; int i; if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0) callsign = "default"; else callsign = ax2asc(buf, &ax25_rt->callsign); seq_printf(seq, "%-9s %-4s", callsign, ax25_rt->dev ? ax25_rt->dev->name : "???"); switch (ax25_rt->ip_mode) { case 'V': seq_puts(seq, " vc"); break; case 'D': seq_puts(seq, " dg"); break; default: seq_puts(seq, " *"); break; } if (ax25_rt->digipeat != NULL) for (i = 0; i < ax25_rt->digipeat->ndigi; i++) seq_printf(seq, " %s", ax2asc(buf, &ax25_rt->digipeat->calls[i])); seq_puts(seq, "\n"); } return 0; } static const struct seq_operations ax25_rt_seqops = { .start = ax25_rt_seq_start, .next = ax25_rt_seq_next, .stop = ax25_rt_seq_stop, .show = ax25_rt_seq_show, }; static int ax25_rt_info_open(struct inode *inode, struct file *file) { return seq_open(file, &ax25_rt_seqops); } const struct file_operations ax25_route_fops = { .owner = THIS_MODULE, .open = ax25_rt_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* * Find AX.25 route * * Only routes with a reference count of zero can be destroyed. */ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) { ax25_route *ax25_spe_rt = NULL; ax25_route *ax25_def_rt = NULL; ax25_route *ax25_rt; read_lock(&ax25_route_lock); /* * Bind to the physical interface we heard them on, or the default * route if none is found; */ for (ax25_rt = ax25_route_list; ax25_rt != NULL; ax25_rt = ax25_rt->next) { if (dev == NULL) { if (ax25cmp(&ax25_rt->callsign, addr) == 0 && ax25_rt->dev != NULL) ax25_spe_rt = ax25_rt; if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0 && ax25_rt->dev != NULL) ax25_def_rt = ax25_rt; } else { if (ax25cmp(&ax25_rt->callsign, addr) == 0 && ax25_rt->dev == dev) ax25_spe_rt = ax25_rt; if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0 && ax25_rt->dev == dev) ax25_def_rt = ax25_rt; } } ax25_rt = ax25_def_rt; if (ax25_spe_rt != NULL) ax25_rt = ax25_spe_rt; if (ax25_rt != NULL) ax25_hold_route(ax25_rt); read_unlock(&ax25_route_lock); return ax25_rt; } /* * Adjust path: If you specify a default route and want to connect * a target on the digipeater path but w/o having a special route * set before, the path has to be truncated from your target on. */ static inline void ax25_adjust_path(ax25_address *addr, ax25_digi *digipeat) { int k; for (k = 0; k < digipeat->ndigi; k++) { if (ax25cmp(addr, &digipeat->calls[k]) == 0) break; } digipeat->ndigi = k; } /* * Find which interface to use. */ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) { ax25_uid_assoc *user; ax25_route *ax25_rt; int err = 0; if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) return -EHOSTUNREACH; if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { err = -EHOSTUNREACH; goto put; } user = ax25_findbyuid(current_euid()); if (user) { ax25->source_addr = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { err = -EPERM; goto put; } ax25->source_addr = *(ax25_address *)ax25->ax25_dev->dev->dev_addr; } if (ax25_rt->digipeat != NULL) { ax25->digipeat = kmemdup(ax25_rt->digipeat, sizeof(ax25_digi), GFP_ATOMIC); if (ax25->digipeat == NULL) { err = -ENOMEM; goto put; } ax25_adjust_path(addr, ax25->digipeat); } if (ax25->sk != NULL) { bh_lock_sock(ax25->sk); sock_reset_flag(ax25->sk, SOCK_ZAPPED); bh_unlock_sock(ax25->sk); } put: ax25_put_route(ax25_rt); return err; } struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, ax25_address *dest, ax25_digi *digi) { struct sk_buff *skbn; unsigned char *bp; int len; len = digi->ndigi * AX25_ADDR_LEN; if (skb_headroom(skb) < len) { if ((skbn = skb_realloc_headroom(skb, len)) == NULL) { printk(KERN_CRIT "AX.25: ax25_dg_build_path - out of memory\n"); return NULL; } if (skb->sk != NULL) skb_set_owner_w(skbn, skb->sk); kfree_skb(skb); skb = skbn; } bp = skb_push(skb, len); ax25_addr_build(bp, src, dest, digi, AX25_COMMAND, AX25_MODULUS); return skb; } /* * Free all memory associated with routing structures. */ void __exit ax25_rt_free(void) { ax25_route *s, *ax25_rt = ax25_route_list; write_lock_bh(&ax25_route_lock); while (ax25_rt != NULL) { s = ax25_rt; ax25_rt = ax25_rt->next; kfree(s->digipeat); kfree(s); } write_unlock_bh(&ax25_route_lock); }
gpl-2.0
geoffret/litmus-rt
drivers/infiniband/core/fmr_pool.c
4117
14552
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/jhash.h> #include <linux/kthread.h> #include <rdma/ib_fmr_pool.h> #include "core_priv.h" #define PFX "fmr_pool: " enum { IB_FMR_MAX_REMAPS = 32, IB_FMR_HASH_BITS = 8, IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS, IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1 }; /* * If an FMR is not in use, then the list member will point to either * its pool's free_list (if the FMR can be mapped again; that is, * remap_count < pool->max_remaps) or its pool's dirty_list (if the * FMR needs to be unmapped before being remapped). In either of * these cases it is a bug if the ref_count is not 0. In other words, * if ref_count is > 0, then the list member must not be linked into * either free_list or dirty_list. * * The cache_node member is used to link the FMR into a cache bucket * (if caching is enabled). This is independent of the reference * count of the FMR. When a valid FMR is released, its ref_count is * decremented, and if ref_count reaches 0, the FMR is placed in * either free_list or dirty_list as appropriate. However, it is not * removed from the cache and may be "revived" if a call to * ib_fmr_register_physical() occurs before the FMR is remapped. In * this case we just increment the ref_count and remove the FMR from * free_list/dirty_list. * * Before we remap an FMR from free_list, we remove it from the cache * (to prevent another user from obtaining a stale FMR). When an FMR * is released, we add it to the tail of the free list, so that our * cache eviction policy is "least recently used." * * All manipulation of ref_count, list and cache_node is protected by * pool_lock to maintain consistency. */ struct ib_fmr_pool { spinlock_t pool_lock; int pool_size; int max_pages; int max_remaps; int dirty_watermark; int dirty_len; struct list_head free_list; struct list_head dirty_list; struct hlist_head *cache_bucket; void (*flush_function)(struct ib_fmr_pool *pool, void * arg); void *flush_arg; struct task_struct *thread; atomic_t req_ser; atomic_t flush_ser; wait_queue_head_t force_wait; }; static inline u32 ib_fmr_hash(u64 first_page) { return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) & (IB_FMR_HASH_SIZE - 1); } /* Caller must hold pool_lock */ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, u64 *page_list, int page_list_len, u64 io_virtual_address) { struct hlist_head *bucket; struct ib_pool_fmr *fmr; if (!pool->cache_bucket) return NULL; bucket = pool->cache_bucket + ib_fmr_hash(*page_list); hlist_for_each_entry(fmr, bucket, cache_node) if (io_virtual_address == fmr->io_virtual_address && page_list_len == fmr->page_list_len && !memcmp(page_list, fmr->page_list, page_list_len * sizeof *page_list)) return fmr; return NULL; } static void ib_fmr_batch_release(struct ib_fmr_pool *pool) { int ret; struct ib_pool_fmr *fmr; LIST_HEAD(unmap_list); LIST_HEAD(fmr_list); spin_lock_irq(&pool->pool_lock); list_for_each_entry(fmr, &pool->dirty_list, list) { hlist_del_init(&fmr->cache_node); fmr->remap_count = 0; list_add_tail(&fmr->fmr->list, &fmr_list); #ifdef DEBUG if (fmr->ref_count !=0) { printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n", fmr, fmr->ref_count); } #endif } list_splice_init(&pool->dirty_list, &unmap_list); pool->dirty_len = 0; spin_unlock_irq(&pool->pool_lock); if (list_empty(&unmap_list)) { return; } ret = ib_unmap_fmr(&fmr_list); if (ret) printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret); spin_lock_irq(&pool->pool_lock); list_splice(&unmap_list, &pool->free_list); spin_unlock_irq(&pool->pool_lock); } static int ib_fmr_cleanup_thread(void *pool_ptr) { struct ib_fmr_pool *pool = pool_ptr; do { if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { ib_fmr_batch_release(pool); atomic_inc(&pool->flush_ser); wake_up_interruptible(&pool->force_wait); if (pool->flush_function) pool->flush_function(pool, pool->flush_arg); } set_current_state(TASK_INTERRUPTIBLE); if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && !kthread_should_stop()) schedule(); __set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); return 0; } /** * ib_create_fmr_pool - Create an FMR pool * @pd:Protection domain for FMRs * @params:FMR pool parameters * * Create a pool of FMRs. Return value is pointer to new pool or * error code if creation failed. */ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, struct ib_fmr_pool_param *params) { struct ib_device *device; struct ib_fmr_pool *pool; struct ib_device_attr *attr; int i; int ret; int max_remaps; if (!params) return ERR_PTR(-EINVAL); device = pd->device; if (!device->alloc_fmr || !device->dealloc_fmr || !device->map_phys_fmr || !device->unmap_fmr) { printk(KERN_INFO PFX "Device %s does not support FMRs\n", device->name); return ERR_PTR(-ENOSYS); } attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) { printk(KERN_WARNING PFX "couldn't allocate device attr struct\n"); return ERR_PTR(-ENOMEM); } ret = ib_query_device(device, attr); if (ret) { printk(KERN_WARNING PFX "couldn't query device: %d\n", ret); kfree(attr); return ERR_PTR(ret); } if (!attr->max_map_per_fmr) max_remaps = IB_FMR_MAX_REMAPS; else max_remaps = attr->max_map_per_fmr; kfree(attr); pool = kmalloc(sizeof *pool, GFP_KERNEL); if (!pool) { printk(KERN_WARNING PFX "couldn't allocate pool struct\n"); return ERR_PTR(-ENOMEM); } pool->cache_bucket = NULL; pool->flush_function = params->flush_function; pool->flush_arg = params->flush_arg; INIT_LIST_HEAD(&pool->free_list); INIT_LIST_HEAD(&pool->dirty_list); if (params->cache) { pool->cache_bucket = kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, GFP_KERNEL); if (!pool->cache_bucket) { printk(KERN_WARNING PFX "Failed to allocate cache in pool\n"); ret = -ENOMEM; goto out_free_pool; } for (i = 0; i < IB_FMR_HASH_SIZE; ++i) INIT_HLIST_HEAD(pool->cache_bucket + i); } pool->pool_size = 0; pool->max_pages = params->max_pages_per_fmr; pool->max_remaps = max_remaps; pool->dirty_watermark = params->dirty_watermark; pool->dirty_len = 0; spin_lock_init(&pool->pool_lock); atomic_set(&pool->req_ser, 0); atomic_set(&pool->flush_ser, 0); init_waitqueue_head(&pool->force_wait); pool->thread = kthread_run(ib_fmr_cleanup_thread, pool, "ib_fmr(%s)", device->name); if (IS_ERR(pool->thread)) { printk(KERN_WARNING PFX "couldn't start cleanup thread\n"); ret = PTR_ERR(pool->thread); goto out_free_pool; } { struct ib_pool_fmr *fmr; struct ib_fmr_attr fmr_attr = { .max_pages = params->max_pages_per_fmr, .max_maps = pool->max_remaps, .page_shift = params->page_shift }; int bytes_per_fmr = sizeof *fmr; if (pool->cache_bucket) bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64); for (i = 0; i < params->pool_size; ++i) { fmr = kmalloc(bytes_per_fmr, GFP_KERNEL); if (!fmr) { printk(KERN_WARNING PFX "failed to allocate fmr " "struct for FMR %d\n", i); goto out_fail; } fmr->pool = pool; fmr->remap_count = 0; fmr->ref_count = 0; INIT_HLIST_NODE(&fmr->cache_node); fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr); if (IS_ERR(fmr->fmr)) { printk(KERN_WARNING PFX "fmr_create failed " "for FMR %d\n", i); kfree(fmr); goto out_fail; } list_add_tail(&fmr->list, &pool->free_list); ++pool->pool_size; } } return pool; out_free_pool: kfree(pool->cache_bucket); kfree(pool); return ERR_PTR(ret); out_fail: ib_destroy_fmr_pool(pool); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(ib_create_fmr_pool); /** * ib_destroy_fmr_pool - Free FMR pool * @pool:FMR pool to free * * Destroy an FMR pool and free all associated resources. */ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) { struct ib_pool_fmr *fmr; struct ib_pool_fmr *tmp; LIST_HEAD(fmr_list); int i; kthread_stop(pool->thread); ib_fmr_batch_release(pool); i = 0; list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { if (fmr->remap_count) { INIT_LIST_HEAD(&fmr_list); list_add_tail(&fmr->fmr->list, &fmr_list); ib_unmap_fmr(&fmr_list); } ib_dealloc_fmr(fmr->fmr); list_del(&fmr->list); kfree(fmr); ++i; } if (i < pool->pool_size) printk(KERN_WARNING PFX "pool still has %d regions registered\n", pool->pool_size - i); kfree(pool->cache_bucket); kfree(pool); } EXPORT_SYMBOL(ib_destroy_fmr_pool); /** * ib_flush_fmr_pool - Invalidate all unmapped FMRs * @pool:FMR pool to flush * * Ensure that all unmapped FMRs are fully invalidated. */ int ib_flush_fmr_pool(struct ib_fmr_pool *pool) { int serial; struct ib_pool_fmr *fmr, *next; /* * The free_list holds FMRs that may have been used * but have not been remapped enough times to be dirty. * Put them on the dirty list now so that the cleanup * thread will reap them too. */ spin_lock_irq(&pool->pool_lock); list_for_each_entry_safe(fmr, next, &pool->free_list, list) { if (fmr->remap_count > 0) list_move(&fmr->list, &pool->dirty_list); } spin_unlock_irq(&pool->pool_lock); serial = atomic_inc_return(&pool->req_ser); wake_up_process(pool->thread); if (wait_event_interruptible(pool->force_wait, atomic_read(&pool->flush_ser) - serial >= 0)) return -EINTR; return 0; } EXPORT_SYMBOL(ib_flush_fmr_pool); /** * ib_fmr_pool_map_phys - * @pool:FMR pool to allocate FMR from * @page_list:List of pages to map * @list_len:Number of pages in @page_list * @io_virtual_address:I/O virtual address for new FMR * * Map an FMR from an FMR pool. */ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, u64 *page_list, int list_len, u64 io_virtual_address) { struct ib_fmr_pool *pool = pool_handle; struct ib_pool_fmr *fmr; unsigned long flags; int result; if (list_len < 1 || list_len > pool->max_pages) return ERR_PTR(-EINVAL); spin_lock_irqsave(&pool->pool_lock, flags); fmr = ib_fmr_cache_lookup(pool, page_list, list_len, io_virtual_address); if (fmr) { /* found in cache */ ++fmr->ref_count; if (fmr->ref_count == 1) { list_del(&fmr->list); } spin_unlock_irqrestore(&pool->pool_lock, flags); return fmr; } if (list_empty(&pool->free_list)) { spin_unlock_irqrestore(&pool->pool_lock, flags); return ERR_PTR(-EAGAIN); } fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list); list_del(&fmr->list); hlist_del_init(&fmr->cache_node); spin_unlock_irqrestore(&pool->pool_lock, flags); result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, io_virtual_address); if (result) { spin_lock_irqsave(&pool->pool_lock, flags); list_add(&fmr->list, &pool->free_list); spin_unlock_irqrestore(&pool->pool_lock, flags); printk(KERN_WARNING PFX "fmr_map returns %d\n", result); return ERR_PTR(result); } ++fmr->remap_count; fmr->ref_count = 1; if (pool->cache_bucket) { fmr->io_virtual_address = io_virtual_address; fmr->page_list_len = list_len; memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); spin_lock_irqsave(&pool->pool_lock, flags); hlist_add_head(&fmr->cache_node, pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); spin_unlock_irqrestore(&pool->pool_lock, flags); } return fmr; } EXPORT_SYMBOL(ib_fmr_pool_map_phys); /** * ib_fmr_pool_unmap - Unmap FMR * @fmr:FMR to unmap * * Unmap an FMR. The FMR mapping may remain valid until the FMR is * reused (or until ib_flush_fmr_pool() is called). */ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) { struct ib_fmr_pool *pool; unsigned long flags; pool = fmr->pool; spin_lock_irqsave(&pool->pool_lock, flags); --fmr->ref_count; if (!fmr->ref_count) { if (fmr->remap_count < pool->max_remaps) { list_add_tail(&fmr->list, &pool->free_list); } else { list_add_tail(&fmr->list, &pool->dirty_list); if (++pool->dirty_len >= pool->dirty_watermark) { atomic_inc(&pool->req_ser); wake_up_process(pool->thread); } } } #ifdef DEBUG if (fmr->ref_count < 0) printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n", fmr, fmr->ref_count); #endif spin_unlock_irqrestore(&pool->pool_lock, flags); return 0; } EXPORT_SYMBOL(ib_fmr_pool_unmap);
gpl-2.0
javelinanddart/android_kernel_htc_pyramid
drivers/usb/mon/mon_bin.c
5141
33685
/* * The USB Monitor, inspired by Dave Harding's USBMon. * * This is a binary format reader. * * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/export.h> #include <linux/usb.h> #include <linux/poll.h> #include <linux/compat.h> #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "usb_mon.h" /* * Defined by USB 2.0 clause 9.3, table 9.2. */ #define SETUP_LEN 8 /* ioctl macros */ #define MON_IOC_MAGIC 0x92 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) /* #9 was MON_IOCT_SETAPI */ #define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get) #ifdef CONFIG_COMPAT #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) #define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32) #endif /* * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). * But it's all right. Just use a simple way to make sure the chunk is never * smaller than a page. * * N.B. An application does not know our chunk size. * * Woops, get_zeroed_page() returns a single page. I guess we're stuck with * page-sized chunks for the time being. */ #define CHUNK_SIZE PAGE_SIZE #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) /* * The magic limit was calculated so that it allows the monitoring * application to pick data once in two ticks. This way, another application, * which presumably drives the bus, gets to hog CPU, yet we collect our data. * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an * enormous overhead built into the bus protocol, so we need about 1000 KB. * * This is still too much for most cases, where we just snoop a few * descriptor fetches for enumeration. So, the default is a "reasonable" * amount for systems with HZ=250 and incomplete bus saturation. * * XXX What about multi-megabyte URBs which take minutes to transfer? */ #define BUFF_MAX CHUNK_ALIGN(1200*1024) #define BUFF_DFL CHUNK_ALIGN(300*1024) #define BUFF_MIN CHUNK_ALIGN(8*1024) /* * The per-event API header (2 per URB). * * This structure is seen in userland as defined by the documentation. */ struct mon_bin_hdr { u64 id; /* URB ID - from submission to callback */ unsigned char type; /* Same as in text API; extensible. */ unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ unsigned char epnum; /* Endpoint number and transfer direction */ unsigned char devnum; /* Device address */ unsigned short busnum; /* Bus number */ char flag_setup; char flag_data; s64 ts_sec; /* gettimeofday */ s32 ts_usec; /* gettimeofday */ int status; unsigned int len_urb; /* Length of data (submitted or actual) */ unsigned int len_cap; /* Delivered length */ union { unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ struct iso_rec { int error_count; int numdesc; } iso; } s; int interval; int start_frame; unsigned int xfer_flags; unsigned int ndesc; /* Actual number of ISO descriptors */ }; /* * ISO vector, packed into the head of data stream. * This has to take 16 bytes to make sure that the end of buffer * wrap is not happening in the middle of a descriptor. */ struct mon_bin_isodesc { int iso_status; unsigned int iso_off; unsigned int iso_len; u32 _pad; }; /* per file statistic */ struct mon_bin_stats { u32 queued; u32 dropped; }; struct mon_bin_get { struct mon_bin_hdr __user *hdr; /* Can be 48 bytes or 64. */ void __user *data; size_t alloc; /* Length of data (can be zero) */ }; struct mon_bin_mfetch { u32 __user *offvec; /* Vector of events fetched */ u32 nfetch; /* Number of events to fetch (out: fetched) */ u32 nflush; /* Number of events to flush */ }; #ifdef CONFIG_COMPAT struct mon_bin_get32 { u32 hdr32; u32 data32; u32 alloc32; }; struct mon_bin_mfetch32 { u32 offvec32; u32 nfetch32; u32 nflush32; }; #endif /* Having these two values same prevents wrapping of the mon_bin_hdr */ #define PKT_ALIGN 64 #define PKT_SIZE 64 #define PKT_SZ_API0 48 /* API 0 (2.6.20) size */ #define PKT_SZ_API1 64 /* API 1 size: extra fields */ #define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */ /* max number of USB bus supported */ #define MON_BIN_MAX_MINOR 128 /* * The buffer: map of used pages. */ struct mon_pgmap { struct page *pg; unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ }; /* * This gets associated with an open file struct. */ struct mon_reader_bin { /* The buffer: one per open. */ spinlock_t b_lock; /* Protect b_cnt, b_in */ unsigned int b_size; /* Current size of the buffer - bytes */ unsigned int b_cnt; /* Bytes used */ unsigned int b_in, b_out; /* Offsets into buffer - bytes */ unsigned int b_read; /* Amount of read data in curr. pkt. */ struct mon_pgmap *b_vec; /* The map array */ wait_queue_head_t b_wait; /* Wait for data here */ struct mutex fetch_lock; /* Protect b_read, b_out */ int mmap_active; /* A list of these is needed for "bus 0". Some time later. */ struct mon_reader r; /* Stats */ unsigned int cnt_lost; }; static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, unsigned int offset) { return (struct mon_bin_hdr *) (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); } #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) static unsigned char xfer_to_pipe[4] = { PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT }; static struct class *mon_bin_class; static dev_t mon_bin_dev0; static struct cdev mon_bin_cdev; static void mon_buff_area_fill(const struct mon_reader_bin *rp, unsigned int offset, unsigned int size); static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); static int mon_alloc_buff(struct mon_pgmap *map, int npages); static void mon_free_buff(struct mon_pgmap *map, int npages); /* * This is a "chunked memcpy". It does not manipulate any counters. */ static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, unsigned int off, const unsigned char *from, unsigned int length) { unsigned int step_len; unsigned char *buf; unsigned int in_page; while (length) { /* * Determine step_len. */ step_len = length; in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); if (in_page < step_len) step_len = in_page; /* * Copy data and advance pointers. */ buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; memcpy(buf, from, step_len); if ((off += step_len) >= this->b_size) off = 0; from += step_len; length -= step_len; } return off; } /* * This is a little worse than the above because it's "chunked copy_to_user". * The return value is an error code, not an offset. */ static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, char __user *to, int length) { unsigned int step_len; unsigned char *buf; unsigned int in_page; while (length) { /* * Determine step_len. */ step_len = length; in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); if (in_page < step_len) step_len = in_page; /* * Copy data and advance pointers. */ buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; if (copy_to_user(to, buf, step_len)) return -EINVAL; if ((off += step_len) >= this->b_size) off = 0; to += step_len; length -= step_len; } return 0; } /* * Allocate an (aligned) area in the buffer. * This is called under b_lock. * Returns ~0 on failure. */ static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, unsigned int size) { unsigned int offset; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if (rp->b_cnt + size > rp->b_size) return ~0; offset = rp->b_in; rp->b_cnt += size; if ((rp->b_in += size) >= rp->b_size) rp->b_in -= rp->b_size; return offset; } /* * This is the same thing as mon_buff_area_alloc, only it does not allow * buffers to wrap. This is needed by applications which pass references * into mmap-ed buffers up their stacks (libpcap can do that). * * Currently, we always have the header stuck with the data, although * it is not strictly speaking necessary. * * When a buffer would wrap, we place a filler packet to mark the space. */ static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, unsigned int size) { unsigned int offset; unsigned int fill_size; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if (rp->b_cnt + size > rp->b_size) return ~0; if (rp->b_in + size > rp->b_size) { /* * This would wrap. Find if we still have space after * skipping to the end of the buffer. If we do, place * a filler packet and allocate a new packet. */ fill_size = rp->b_size - rp->b_in; if (rp->b_cnt + size + fill_size > rp->b_size) return ~0; mon_buff_area_fill(rp, rp->b_in, fill_size); offset = 0; rp->b_in = size; rp->b_cnt += size + fill_size; } else if (rp->b_in + size == rp->b_size) { offset = rp->b_in; rp->b_in = 0; rp->b_cnt += size; } else { offset = rp->b_in; rp->b_in += size; rp->b_cnt += size; } return offset; } /* * Return a few (kilo-)bytes to the head of the buffer. * This is used if a data fetch fails. */ static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) { /* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */ rp->b_cnt -= size; if (rp->b_in < size) rp->b_in += rp->b_size; rp->b_in -= size; } /* * This has to be called under both b_lock and fetch_lock, because * it accesses both b_cnt and b_out. */ static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) { size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); rp->b_cnt -= size; if ((rp->b_out += size) >= rp->b_size) rp->b_out -= rp->b_size; } static void mon_buff_area_fill(const struct mon_reader_bin *rp, unsigned int offset, unsigned int size) { struct mon_bin_hdr *ep; ep = MON_OFF2HDR(rp, offset); memset(ep, 0, PKT_SIZE); ep->type = '@'; ep->len_cap = size - PKT_SIZE; } static inline char mon_bin_get_setup(unsigned char *setupb, const struct urb *urb, char ev_type) { if (urb->setup_packet == NULL) return 'Z'; memcpy(setupb, urb->setup_packet, SETUP_LEN); return 0; } static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp, unsigned int offset, struct urb *urb, unsigned int length, char *flag) { int i; struct scatterlist *sg; unsigned int this_len; *flag = 0; if (urb->num_sgs == 0) { if (urb->transfer_buffer == NULL) { *flag = 'Z'; return length; } mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); length = 0; } else { /* If IOMMU coalescing occurred, we cannot trust sg_page */ if (urb->transfer_flags & URB_DMA_SG_COMBINED) { *flag = 'D'; return length; } /* Copy up to the first non-addressable segment */ for_each_sg(urb->sg, sg, urb->num_sgs, i) { if (length == 0 || PageHighMem(sg_page(sg))) break; this_len = min_t(unsigned int, sg->length, length); offset = mon_copy_to_buff(rp, offset, sg_virt(sg), this_len); length -= this_len; } if (i == 0) *flag = 'D'; } return length; } /* * This is the look-ahead pass in case of 'C Zi', when actual_length cannot * be used to determine the length of the whole contiguous buffer. */ static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp, struct urb *urb, unsigned int ndesc) { struct usb_iso_packet_descriptor *fp; unsigned int length; length = 0; fp = urb->iso_frame_desc; while (ndesc-- != 0) { if (fp->actual_length != 0) { if (fp->offset + fp->actual_length > length) length = fp->offset + fp->actual_length; } fp++; } return length; } static void mon_bin_get_isodesc(const struct mon_reader_bin *rp, unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc) { struct mon_bin_isodesc *dp; struct usb_iso_packet_descriptor *fp; fp = urb->iso_frame_desc; while (ndesc-- != 0) { dp = (struct mon_bin_isodesc *) (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); dp->iso_status = fp->status; dp->iso_off = fp->offset; dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length; dp->_pad = 0; if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size) offset = 0; fp++; } } static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, char ev_type, int status) { const struct usb_endpoint_descriptor *epd = &urb->ep->desc; struct timeval ts; unsigned long flags; unsigned int urb_length; unsigned int offset; unsigned int length; unsigned int delta; unsigned int ndesc, lendesc; unsigned char dir; struct mon_bin_hdr *ep; char data_tag = 0; do_gettimeofday(&ts); spin_lock_irqsave(&rp->b_lock, flags); /* * Find the maximum allowable length, then allocate space. */ urb_length = (ev_type == 'S') ? urb->transfer_buffer_length : urb->actual_length; length = urb_length; if (usb_endpoint_xfer_isoc(epd)) { if (urb->number_of_packets < 0) { ndesc = 0; } else if (urb->number_of_packets >= ISODESC_MAX) { ndesc = ISODESC_MAX; } else { ndesc = urb->number_of_packets; } if (ev_type == 'C' && usb_urb_dir_in(urb)) length = mon_bin_collate_isodesc(rp, urb, ndesc); } else { ndesc = 0; } lendesc = ndesc*sizeof(struct mon_bin_isodesc); /* not an issue unless there's a subtle bug in a HCD somewhere */ if (length >= urb->transfer_buffer_length) length = urb->transfer_buffer_length; if (length >= rp->b_size/5) length = rp->b_size/5; if (usb_urb_dir_in(urb)) { if (ev_type == 'S') { length = 0; data_tag = '<'; } /* Cannot rely on endpoint number in case of control ep.0 */ dir = USB_DIR_IN; } else { if (ev_type == 'C') { length = 0; data_tag = '>'; } dir = 0; } if (rp->mmap_active) { offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE + lendesc); } else { offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc); } if (offset == ~0) { rp->cnt_lost++; spin_unlock_irqrestore(&rp->b_lock, flags); return; } ep = MON_OFF2HDR(rp, offset); if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; /* * Fill the allocated area. */ memset(ep, 0, PKT_SIZE); ep->type = ev_type; ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; ep->epnum = dir | usb_endpoint_num(epd); ep->devnum = urb->dev->devnum; ep->busnum = urb->dev->bus->busnum; ep->id = (unsigned long) urb; ep->ts_sec = ts.tv_sec; ep->ts_usec = ts.tv_usec; ep->status = status; ep->len_urb = urb_length; ep->len_cap = length + lendesc; ep->xfer_flags = urb->transfer_flags; if (usb_endpoint_xfer_int(epd)) { ep->interval = urb->interval; } else if (usb_endpoint_xfer_isoc(epd)) { ep->interval = urb->interval; ep->start_frame = urb->start_frame; ep->s.iso.error_count = urb->error_count; ep->s.iso.numdesc = urb->number_of_packets; } if (usb_endpoint_xfer_control(epd) && ev_type == 'S') { ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type); } else { ep->flag_setup = '-'; } if (ndesc != 0) { ep->ndesc = ndesc; mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc); if ((offset += lendesc) >= rp->b_size) offset -= rp->b_size; } if (length != 0) { length = mon_bin_get_data(rp, offset, urb, length, &ep->flag_data); if (length > 0) { delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); ep->len_cap -= length; delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); mon_buff_area_shrink(rp, delta); } } else { ep->flag_data = data_tag; } spin_unlock_irqrestore(&rp->b_lock, flags); wake_up(&rp->b_wait); } static void mon_bin_submit(void *data, struct urb *urb) { struct mon_reader_bin *rp = data; mon_bin_event(rp, urb, 'S', -EINPROGRESS); } static void mon_bin_complete(void *data, struct urb *urb, int status) { struct mon_reader_bin *rp = data; mon_bin_event(rp, urb, 'C', status); } static void mon_bin_error(void *data, struct urb *urb, int error) { struct mon_reader_bin *rp = data; struct timeval ts; unsigned long flags; unsigned int offset; struct mon_bin_hdr *ep; do_gettimeofday(&ts); spin_lock_irqsave(&rp->b_lock, flags); offset = mon_buff_area_alloc(rp, PKT_SIZE); if (offset == ~0) { /* Not incrementing cnt_lost. Just because. */ spin_unlock_irqrestore(&rp->b_lock, flags); return; } ep = MON_OFF2HDR(rp, offset); memset(ep, 0, PKT_SIZE); ep->type = 'E'; ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; ep->epnum |= usb_endpoint_num(&urb->ep->desc); ep->devnum = urb->dev->devnum; ep->busnum = urb->dev->bus->busnum; ep->id = (unsigned long) urb; ep->ts_sec = ts.tv_sec; ep->ts_usec = ts.tv_usec; ep->status = error; ep->flag_setup = '-'; ep->flag_data = 'E'; spin_unlock_irqrestore(&rp->b_lock, flags); wake_up(&rp->b_wait); } static int mon_bin_open(struct inode *inode, struct file *file) { struct mon_bus *mbus; struct mon_reader_bin *rp; size_t size; int rc; mutex_lock(&mon_lock); if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) { mutex_unlock(&mon_lock); return -ENODEV; } if (mbus != &mon_bus0 && mbus->u_bus == NULL) { printk(KERN_ERR TAG ": consistency error on open\n"); mutex_unlock(&mon_lock); return -ENODEV; } rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); if (rp == NULL) { rc = -ENOMEM; goto err_alloc; } spin_lock_init(&rp->b_lock); init_waitqueue_head(&rp->b_wait); mutex_init(&rp->fetch_lock); rp->b_size = BUFF_DFL; size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { rc = -ENOMEM; goto err_allocvec; } if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) goto err_allocbuff; rp->r.m_bus = mbus; rp->r.r_data = rp; rp->r.rnf_submit = mon_bin_submit; rp->r.rnf_error = mon_bin_error; rp->r.rnf_complete = mon_bin_complete; mon_reader_add(mbus, &rp->r); file->private_data = rp; mutex_unlock(&mon_lock); return 0; err_allocbuff: kfree(rp->b_vec); err_allocvec: kfree(rp); err_alloc: mutex_unlock(&mon_lock); return rc; } /* * Extract an event from buffer and copy it to user space. * Wait if there is no event ready. * Returns zero or error. */ static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, struct mon_bin_hdr __user *hdr, unsigned int hdrbytes, void __user *data, unsigned int nbytes) { unsigned long flags; struct mon_bin_hdr *ep; size_t step_len; unsigned int offset; int rc; mutex_lock(&rp->fetch_lock); if ((rc = mon_bin_wait_event(file, rp)) < 0) { mutex_unlock(&rp->fetch_lock); return rc; } ep = MON_OFF2HDR(rp, rp->b_out); if (copy_to_user(hdr, ep, hdrbytes)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } step_len = min(ep->len_cap, nbytes); if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; if (copy_from_buf(rp, offset, data, step_len)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } spin_lock_irqsave(&rp->b_lock, flags); mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); spin_unlock_irqrestore(&rp->b_lock, flags); rp->b_read = 0; mutex_unlock(&rp->fetch_lock); return 0; } static int mon_bin_release(struct inode *inode, struct file *file) { struct mon_reader_bin *rp = file->private_data; struct mon_bus* mbus = rp->r.m_bus; mutex_lock(&mon_lock); if (mbus->nreaders <= 0) { printk(KERN_ERR TAG ": consistency error on close\n"); mutex_unlock(&mon_lock); return 0; } mon_reader_del(mbus, &rp->r); mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); kfree(rp->b_vec); kfree(rp); mutex_unlock(&mon_lock); return 0; } static ssize_t mon_bin_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct mon_reader_bin *rp = file->private_data; unsigned int hdrbytes = PKT_SZ_API0; unsigned long flags; struct mon_bin_hdr *ep; unsigned int offset; size_t step_len; char *ptr; ssize_t done = 0; int rc; mutex_lock(&rp->fetch_lock); if ((rc = mon_bin_wait_event(file, rp)) < 0) { mutex_unlock(&rp->fetch_lock); return rc; } ep = MON_OFF2HDR(rp, rp->b_out); if (rp->b_read < hdrbytes) { step_len = min(nbytes, (size_t)(hdrbytes - rp->b_read)); ptr = ((char *)ep) + rp->b_read; if (step_len && copy_to_user(buf, ptr, step_len)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } nbytes -= step_len; buf += step_len; rp->b_read += step_len; done += step_len; } if (rp->b_read >= hdrbytes) { step_len = ep->len_cap; step_len -= rp->b_read - hdrbytes; if (step_len > nbytes) step_len = nbytes; offset = rp->b_out + PKT_SIZE; offset += rp->b_read - hdrbytes; if (offset >= rp->b_size) offset -= rp->b_size; if (copy_from_buf(rp, offset, buf, step_len)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } nbytes -= step_len; buf += step_len; rp->b_read += step_len; done += step_len; } /* * Check if whole packet was read, and if so, jump to the next one. */ if (rp->b_read >= hdrbytes + ep->len_cap) { spin_lock_irqsave(&rp->b_lock, flags); mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); spin_unlock_irqrestore(&rp->b_lock, flags); rp->b_read = 0; } mutex_unlock(&rp->fetch_lock); return done; } /* * Remove at most nevents from chunked buffer. * Returns the number of removed events. */ static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) { unsigned long flags; struct mon_bin_hdr *ep; int i; mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); for (i = 0; i < nevents; ++i) { if (MON_RING_EMPTY(rp)) break; ep = MON_OFF2HDR(rp, rp->b_out); mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); } spin_unlock_irqrestore(&rp->b_lock, flags); rp->b_read = 0; mutex_unlock(&rp->fetch_lock); return i; } /* * Fetch at most max event offsets into the buffer and put them into vec. * The events are usually freed later with mon_bin_flush. * Return the effective number of events fetched. */ static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, u32 __user *vec, unsigned int max) { unsigned int cur_out; unsigned int bytes, avail; unsigned int size; unsigned int nevents; struct mon_bin_hdr *ep; unsigned long flags; int rc; mutex_lock(&rp->fetch_lock); if ((rc = mon_bin_wait_event(file, rp)) < 0) { mutex_unlock(&rp->fetch_lock); return rc; } spin_lock_irqsave(&rp->b_lock, flags); avail = rp->b_cnt; spin_unlock_irqrestore(&rp->b_lock, flags); cur_out = rp->b_out; nevents = 0; bytes = 0; while (bytes < avail) { if (nevents >= max) break; ep = MON_OFF2HDR(rp, cur_out); if (put_user(cur_out, &vec[nevents])) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } nevents++; size = ep->len_cap + PKT_SIZE; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if ((cur_out += size) >= rp->b_size) cur_out -= rp->b_size; bytes += size; } mutex_unlock(&rp->fetch_lock); return nevents; } /* * Count events. This is almost the same as the above mon_bin_fetch, * only we do not store offsets into user vector, and we have no limit. */ static int mon_bin_queued(struct mon_reader_bin *rp) { unsigned int cur_out; unsigned int bytes, avail; unsigned int size; unsigned int nevents; struct mon_bin_hdr *ep; unsigned long flags; mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); avail = rp->b_cnt; spin_unlock_irqrestore(&rp->b_lock, flags); cur_out = rp->b_out; nevents = 0; bytes = 0; while (bytes < avail) { ep = MON_OFF2HDR(rp, cur_out); nevents++; size = ep->len_cap + PKT_SIZE; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if ((cur_out += size) >= rp->b_size) cur_out -= rp->b_size; bytes += size; } mutex_unlock(&rp->fetch_lock); return nevents; } /* */ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mon_reader_bin *rp = file->private_data; // struct mon_bus* mbus = rp->r.m_bus; int ret = 0; struct mon_bin_hdr *ep; unsigned long flags; switch (cmd) { case MON_IOCQ_URB_LEN: /* * N.B. This only returns the size of data, without the header. */ spin_lock_irqsave(&rp->b_lock, flags); if (!MON_RING_EMPTY(rp)) { ep = MON_OFF2HDR(rp, rp->b_out); ret = ep->len_cap; } spin_unlock_irqrestore(&rp->b_lock, flags); break; case MON_IOCQ_RING_SIZE: ret = rp->b_size; break; case MON_IOCT_RING_SIZE: /* * Changing the buffer size will flush it's contents; the new * buffer is allocated before releasing the old one to be sure * the device will stay functional also in case of memory * pressure. */ { int size; struct mon_pgmap *vec; if (arg < BUFF_MIN || arg > BUFF_MAX) return -EINVAL; size = CHUNK_ALIGN(arg); if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE), GFP_KERNEL)) == NULL) { ret = -ENOMEM; break; } ret = mon_alloc_buff(vec, size/CHUNK_SIZE); if (ret < 0) { kfree(vec); break; } mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); kfree(rp->b_vec); rp->b_vec = vec; rp->b_size = size; rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; rp->cnt_lost = 0; spin_unlock_irqrestore(&rp->b_lock, flags); mutex_unlock(&rp->fetch_lock); } break; case MON_IOCH_MFLUSH: ret = mon_bin_flush(rp, arg); break; case MON_IOCX_GET: case MON_IOCX_GETX: { struct mon_bin_get getb; if (copy_from_user(&getb, (void __user *)arg, sizeof(struct mon_bin_get))) return -EFAULT; if (getb.alloc > 0x10000000) /* Want to cast to u32 */ return -EINVAL; ret = mon_bin_get_event(file, rp, getb.hdr, (cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1, getb.data, (unsigned int)getb.alloc); } break; case MON_IOCX_MFETCH: { struct mon_bin_mfetch mfetch; struct mon_bin_mfetch __user *uptr; uptr = (struct mon_bin_mfetch __user *)arg; if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) return -EFAULT; if (mfetch.nflush) { ret = mon_bin_flush(rp, mfetch.nflush); if (ret < 0) return ret; if (put_user(ret, &uptr->nflush)) return -EFAULT; } ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); if (ret < 0) return ret; if (put_user(ret, &uptr->nfetch)) return -EFAULT; ret = 0; } break; case MON_IOCG_STATS: { struct mon_bin_stats __user *sp; unsigned int nevents; unsigned int ndropped; spin_lock_irqsave(&rp->b_lock, flags); ndropped = rp->cnt_lost; rp->cnt_lost = 0; spin_unlock_irqrestore(&rp->b_lock, flags); nevents = mon_bin_queued(rp); sp = (struct mon_bin_stats __user *)arg; if (put_user(ndropped, &sp->dropped)) return -EFAULT; if (put_user(nevents, &sp->queued)) return -EFAULT; } break; default: return -ENOTTY; } return ret; } #ifdef CONFIG_COMPAT static long mon_bin_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mon_reader_bin *rp = file->private_data; int ret; switch (cmd) { case MON_IOCX_GET32: case MON_IOCX_GETX32: { struct mon_bin_get32 getb; if (copy_from_user(&getb, (void __user *)arg, sizeof(struct mon_bin_get32))) return -EFAULT; ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32), (cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1, compat_ptr(getb.data32), getb.alloc32); if (ret < 0) return ret; } return 0; case MON_IOCX_MFETCH32: { struct mon_bin_mfetch32 mfetch; struct mon_bin_mfetch32 __user *uptr; uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) return -EFAULT; if (mfetch.nflush32) { ret = mon_bin_flush(rp, mfetch.nflush32); if (ret < 0) return ret; if (put_user(ret, &uptr->nflush32)) return -EFAULT; } ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), mfetch.nfetch32); if (ret < 0) return ret; if (put_user(ret, &uptr->nfetch32)) return -EFAULT; } return 0; case MON_IOCG_STATS: return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); case MON_IOCQ_URB_LEN: case MON_IOCQ_RING_SIZE: case MON_IOCT_RING_SIZE: case MON_IOCH_MFLUSH: return mon_bin_ioctl(file, cmd, arg); default: ; } return -ENOTTY; } #endif /* CONFIG_COMPAT */ static unsigned int mon_bin_poll(struct file *file, struct poll_table_struct *wait) { struct mon_reader_bin *rp = file->private_data; unsigned int mask = 0; unsigned long flags; if (file->f_mode & FMODE_READ) poll_wait(file, &rp->b_wait, wait); spin_lock_irqsave(&rp->b_lock, flags); if (!MON_RING_EMPTY(rp)) mask |= POLLIN | POLLRDNORM; /* readable */ spin_unlock_irqrestore(&rp->b_lock, flags); return mask; } /* * open and close: just keep track of how many times the device is * mapped, to use the proper memory allocation function. */ static void mon_bin_vma_open(struct vm_area_struct *vma) { struct mon_reader_bin *rp = vma->vm_private_data; rp->mmap_active++; } static void mon_bin_vma_close(struct vm_area_struct *vma) { struct mon_reader_bin *rp = vma->vm_private_data; rp->mmap_active--; } /* * Map ring pages to user space. */ static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct mon_reader_bin *rp = vma->vm_private_data; unsigned long offset, chunk_idx; struct page *pageptr; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= rp->b_size) return VM_FAULT_SIGBUS; chunk_idx = offset / CHUNK_SIZE; pageptr = rp->b_vec[chunk_idx].pg; get_page(pageptr); vmf->page = pageptr; return 0; } static const struct vm_operations_struct mon_bin_vm_ops = { .open = mon_bin_vma_open, .close = mon_bin_vma_close, .fault = mon_bin_vma_fault, }; static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) { /* don't do anything here: "fault" will set up page table entries */ vma->vm_ops = &mon_bin_vm_ops; vma->vm_flags |= VM_RESERVED; vma->vm_private_data = filp->private_data; mon_bin_vma_open(vma); return 0; } static const struct file_operations mon_fops_binary = { .owner = THIS_MODULE, .open = mon_bin_open, .llseek = no_llseek, .read = mon_bin_read, /* .write = mon_text_write, */ .poll = mon_bin_poll, .unlocked_ioctl = mon_bin_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mon_bin_compat_ioctl, #endif .release = mon_bin_release, .mmap = mon_bin_mmap, }; static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) { DECLARE_WAITQUEUE(waita, current); unsigned long flags; add_wait_queue(&rp->b_wait, &waita); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&rp->b_lock, flags); while (MON_RING_EMPTY(rp)) { spin_unlock_irqrestore(&rp->b_lock, flags); if (file->f_flags & O_NONBLOCK) { set_current_state(TASK_RUNNING); remove_wait_queue(&rp->b_wait, &waita); return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ } schedule(); if (signal_pending(current)) { remove_wait_queue(&rp->b_wait, &waita); return -EINTR; } set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&rp->b_lock, flags); } spin_unlock_irqrestore(&rp->b_lock, flags); set_current_state(TASK_RUNNING); remove_wait_queue(&rp->b_wait, &waita); return 0; } static int mon_alloc_buff(struct mon_pgmap *map, int npages) { int n; unsigned long vaddr; for (n = 0; n < npages; n++) { vaddr = get_zeroed_page(GFP_KERNEL); if (vaddr == 0) { while (n-- != 0) free_page((unsigned long) map[n].ptr); return -ENOMEM; } map[n].ptr = (unsigned char *) vaddr; map[n].pg = virt_to_page((void *) vaddr); } return 0; } static void mon_free_buff(struct mon_pgmap *map, int npages) { int n; for (n = 0; n < npages; n++) free_page((unsigned long) map[n].ptr); } int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) { struct device *dev; unsigned minor = ubus? ubus->busnum: 0; if (minor >= MON_BIN_MAX_MINOR) return 0; dev = device_create(mon_bin_class, ubus ? ubus->controller : NULL, MKDEV(MAJOR(mon_bin_dev0), minor), NULL, "usbmon%d", minor); if (IS_ERR(dev)) return 0; mbus->classdev = dev; return 1; } void mon_bin_del(struct mon_bus *mbus) { device_destroy(mon_bin_class, mbus->classdev->devt); } int __init mon_bin_init(void) { int rc; mon_bin_class = class_create(THIS_MODULE, "usbmon"); if (IS_ERR(mon_bin_class)) { rc = PTR_ERR(mon_bin_class); goto err_class; } rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); if (rc < 0) goto err_dev; cdev_init(&mon_bin_cdev, &mon_fops_binary); mon_bin_cdev.owner = THIS_MODULE; rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); if (rc < 0) goto err_add; return 0; err_add: unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); err_dev: class_destroy(mon_bin_class); err_class: return rc; } void mon_bin_exit(void) { cdev_del(&mon_bin_cdev); unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); class_destroy(mon_bin_class); }
gpl-2.0
ridon/ridon-kernel-mediatek-sprout
arch/powerpc/mm/pgtable.c
6933
6881
/* * This file contains common routines for dealing with free of page tables * Along with common page table handling code * * Derived from arch/powerpc/mm/tlb_64.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <engebret@us.ibm.com> * Rework for PPC64 port. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/hugetlb.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include "mmu_decl.h" static inline int is_exec_fault(void) { return current->thread.regs && TRAP(current->thread.regs) == 0x400; } /* We only try to do i/d cache coherency on stuff that looks like * reasonably "normal" PTEs. We currently require a PTE to be present * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE. We also only do that * on userspace PTEs */ static inline int pte_looks_normal(pte_t pte) { return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) == (_PAGE_PRESENT | _PAGE_USER); } struct page * maybe_pte_to_page(pte_t pte) { unsigned long pfn = pte_pfn(pte); struct page *page; if (unlikely(!pfn_valid(pfn))) return NULL; page = pfn_to_page(pfn); if (PageReserved(page)) return NULL; return page; } #if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 /* Server-style MMU handles coherency when hashing if HW exec permission * is supposed per page (currently 64-bit only). If not, then, we always * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec * support falls into the same category. */ static pte_t set_pte_filter(pte_t pte, unsigned long addr) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE))) { struct page *pg = maybe_pte_to_page(pte); if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { #ifdef CONFIG_8xx /* On 8xx, cache control instructions (particularly * "dcbst" from flush_dcache_icache) fault as write * operation if there is an unpopulated TLB entry * for the address in question. To workaround that, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ /* 8xx doesn't care about PID, size or ind args */ _tlbil_va(addr, 0, 0, 0); #endif /* CONFIG_8xx */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } } return pte; } static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, int dirty) { return pte; } #else /* defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 */ /* Embedded type MMU with HW exec support. This is a bit more complicated * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. */ static pte_t set_pte_filter(pte_t pte, unsigned long addr) { struct page *pg; /* No exec permission in the first place, move on */ if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte)) return pte; /* If you set _PAGE_EXEC on weird pages you're on your own */ pg = maybe_pte_to_page(pte); if (unlikely(!pg)) return pte; /* If the page clean, we move on */ if (test_bit(PG_arch_1, &pg->flags)) return pte; /* If it's an exec fault, we flush the cache and make it clean */ if (is_exec_fault()) { flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); return pte; } /* Else, we filter out _PAGE_EXEC */ return __pte(pte_val(pte) & ~_PAGE_EXEC); } static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, int dirty) { struct page *pg; /* So here, we only care about exec faults, as we use them * to recover lost _PAGE_EXEC and perform I$/D$ coherency * if necessary. Also if _PAGE_EXEC is already set, same deal, * we just bail out */ if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault()) return pte; #ifdef CONFIG_DEBUG_VM /* So this is an exec fault, _PAGE_EXEC is not set. If it was * an error we would have bailed out earlier in do_page_fault() * but let's make sure of it */ if (WARN_ON(!(vma->vm_flags & VM_EXEC))) return pte; #endif /* CONFIG_DEBUG_VM */ /* If you set _PAGE_EXEC on weird pages you're on your own */ pg = maybe_pte_to_page(pte); if (unlikely(!pg)) goto bail; /* If the page is already clean, we move on */ if (test_bit(PG_arch_1, &pg->flags)) goto bail; /* Clean the page and set PG_arch_1 */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); bail: return __pte(pte_val(pte) | _PAGE_EXEC); } #endif /* !(defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0) */ /* * set_pte stores a linux PTE into the linux page table. */ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { #ifdef CONFIG_DEBUG_VM WARN_ON(pte_present(*ptep)); #endif /* Note: mm->context.id might not yet have been assigned as * this context might not have been activated yet when this * is called. */ pte = set_pte_filter(pte, addr); /* Perform the setting of the PTE */ __set_pte_at(mm, addr, ptep, pte, 0); } /* * This is called when relaxing access to a PTE. It's also called in the page * fault path when we don't hit any of the major fault cases, ie, a minor * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have * handled those two for us, we additionally deal with missing execute * permission here on some processors */ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { int changed; entry = set_access_flags_filter(entry, vma, dirty); changed = !pte_same(*(ptep), entry); if (changed) { if (!is_vm_hugetlb_page(vma)) assert_pte_locked(vma->vm_mm, address); __ptep_set_access_flags(ptep, entry); flush_tlb_page_nohash(vma, address); } return changed; } #ifdef CONFIG_DEBUG_VM void assert_pte_locked(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; if (mm == &init_mm) return; pgd = mm->pgd + pgd_index(addr); BUG_ON(pgd_none(*pgd)); pud = pud_offset(pgd, addr); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, addr); BUG_ON(!pmd_present(*pmd)); assert_spin_locked(pte_lockptr(mm, pmd)); } #endif /* CONFIG_DEBUG_VM */
gpl-2.0
Fusion-Devices/android_kernel_asus_flo
arch/m68k/platform/523x/gpio.c
7445
8765
/* * Coldfire generic GPIO support * * (C) Copyright 2009, Steven King <sfking@fdwdc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfgpio.h> static struct mcf_gpio_chip mcf_gpio_chips[] = { { .gpio_chip = { .label = "PIRQ", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .base = 1, .ngpio = 7, }, .pddr = (void __iomem *) MCFEPORT_EPDDR, .podr = (void __iomem *) MCFEPORT_EPDR, .ppdr = (void __iomem *) MCFEPORT_EPPDR, }, { .gpio_chip = { .label = "ADDR", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 13, .ngpio = 3, }, .pddr = (void __iomem *) MCFGPIO_PDDR_ADDR, .podr = (void __iomem *) MCFGPIO_PODR_ADDR, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, .setr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, .clrr = (void __iomem *) MCFGPIO_PCLRR_ADDR, }, { .gpio_chip = { .label = "DATAH", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 16, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_DATAH, .podr = (void __iomem *) MCFGPIO_PODR_DATAH, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_DATAH, .setr = (void __iomem *) MCFGPIO_PPDSDR_DATAH, .clrr = (void __iomem *) MCFGPIO_PCLRR_DATAH, }, { .gpio_chip = { .label = "DATAL", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 24, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_DATAL, .podr = (void __iomem *) MCFGPIO_PODR_DATAL, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_DATAL, .setr = (void __iomem *) MCFGPIO_PPDSDR_DATAL, .clrr = (void __iomem *) MCFGPIO_PCLRR_DATAL, }, { .gpio_chip = { .label = "BUSCTL", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 32, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, }, { .gpio_chip = { .label = "BS", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 40, .ngpio = 4, }, .pddr = (void __iomem *) MCFGPIO_PDDR_BS, .podr = (void __iomem *) MCFGPIO_PODR_BS, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BS, .setr = (void __iomem *) MCFGPIO_PPDSDR_BS, .clrr = (void __iomem *) MCFGPIO_PCLRR_BS, }, { .gpio_chip = { .label = "CS", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 49, .ngpio = 7, }, .pddr = (void __iomem *) MCFGPIO_PDDR_CS, .podr = (void __iomem *) MCFGPIO_PODR_CS, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, }, { .gpio_chip = { .label = "SDRAM", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 56, .ngpio = 6, }, .pddr = (void __iomem *) MCFGPIO_PDDR_SDRAM, .podr = (void __iomem *) MCFGPIO_PODR_SDRAM, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, .setr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, .clrr = (void __iomem *) MCFGPIO_PCLRR_SDRAM, }, { .gpio_chip = { .label = "FECI2C", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 64, .ngpio = 4, }, .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, }, { .gpio_chip = { .label = "UARTH", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 72, .ngpio = 2, }, .pddr = (void __iomem *) MCFGPIO_PDDR_UARTH, .podr = (void __iomem *) MCFGPIO_PODR_UARTH, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTH, }, { .gpio_chip = { .label = "UARTL", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 80, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_UARTL, .podr = (void __iomem *) MCFGPIO_PODR_UARTL, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTL, }, { .gpio_chip = { .label = "QSPI", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 88, .ngpio = 5, }, .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, .podr = (void __iomem *) MCFGPIO_PODR_QSPI, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, }, { .gpio_chip = { .label = "TIMER", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 96, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_TIMER, .podr = (void __iomem *) MCFGPIO_PODR_TIMER, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER, }, { .gpio_chip = { .label = "ETPU", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 104, .ngpio = 3, }, .pddr = (void __iomem *) MCFGPIO_PDDR_ETPU, .podr = (void __iomem *) MCFGPIO_PODR_ETPU, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_ETPU, .setr = (void __iomem *) MCFGPIO_PPDSDR_ETPU, .clrr = (void __iomem *) MCFGPIO_PCLRR_ETPU, }, }; static int __init mcf_gpio_init(void) { unsigned i = 0; while (i < ARRAY_SIZE(mcf_gpio_chips)) (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]); return 0; } core_initcall(mcf_gpio_init);
gpl-2.0
corcor67/SMPL_M8_SENSE
arch/sh/kernel/cpu/sh2a/clock-sh7201.c
9237
2017
/* * arch/sh/kernel/cpu/sh2a/clock-sh7201.c * * SH7201 support for the clock framework * * Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk> * * Based on clock-sh4.c * Copyright (C) 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static const int pll1rate[]={1,2,3,4,6,8}; static const int pfc_divisors[]={1,2,3,4,6,8,12}; #define ifc_divisors pfc_divisors static unsigned int pll2_mult; static void master_clk_init(struct clk *clk) { clk->rate = 10000000 * pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; } static struct sh_clk_ops sh7201_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int idx = (__raw_readw(FREQCR) & 0x0007); return clk->parent->rate / pfc_divisors[idx]; } static struct sh_clk_ops sh7201_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long bus_clk_recalc(struct clk *clk) { int idx = (__raw_readw(FREQCR) & 0x0007); return clk->parent->rate / pfc_divisors[idx]; } static struct sh_clk_ops sh7201_bus_clk_ops = { .recalc = bus_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007); return clk->parent->rate / ifc_divisors[idx]; } static struct sh_clk_ops sh7201_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct sh_clk_ops *sh7201_clk_ops[] = { &sh7201_master_clk_ops, &sh7201_module_clk_ops, &sh7201_bus_clk_ops, &sh7201_cpu_clk_ops, }; void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { if (test_mode_pin(MODE_PIN1 | MODE_PIN0)) pll2_mult = 1; else if (test_mode_pin(MODE_PIN1)) pll2_mult = 2; else pll2_mult = 4; if (idx < ARRAY_SIZE(sh7201_clk_ops)) *ops = sh7201_clk_ops[idx]; }
gpl-2.0
LEPT-Development/android_kernel_lge_msm8916-old
arch/powerpc/boot/stdio.c
12053
6980
/* * Copyright (C) Paul Mackerras 1997. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <stddef.h> #include "string.h" #include "stdio.h" #include "ops.h" size_t strnlen(const char * s, size_t count) { const char *sc; for (sc = s; count-- && *sc != '\0'; ++sc) /* nothing */; return sc - s; } extern unsigned int __div64_32(unsigned long long *dividend, unsigned int divisor); /* The unnecessary pointer compare is there * to check for type safety (n must be 64bit) */ # define do_div(n,base) ({ \ unsigned int __base = (base); \ unsigned int __rem; \ (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \ if (((n) >> 32) == 0) { \ __rem = (unsigned int)(n) % __base; \ (n) = (unsigned int)(n) / __base; \ } else \ __rem = __div64_32(&(n), __base); \ __rem; \ }) static int skip_atoi(const char **s) { int i, c; for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s) i = i*10 + c - '0'; return i; } #define ZEROPAD 1 /* pad with zero */ #define SIGN 2 /* unsigned/signed long */ #define PLUS 4 /* show plus */ #define SPACE 8 /* space if plus */ #define LEFT 16 /* left justified */ #define SPECIAL 32 /* 0x */ #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ static char * number(char * str, unsigned long long num, int base, int size, int precision, int type) { char c,sign,tmp[66]; const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; int i; if (type & LARGE) digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; if (type & LEFT) type &= ~ZEROPAD; if (base < 2 || base > 36) return 0; c = (type & ZEROPAD) ? '0' : ' '; sign = 0; if (type & SIGN) { if ((signed long long)num < 0) { sign = '-'; num = - (signed long long)num; size--; } else if (type & PLUS) { sign = '+'; size--; } else if (type & SPACE) { sign = ' '; size--; } } if (type & SPECIAL) { if (base == 16) size -= 2; else if (base == 8) size--; } i = 0; if (num == 0) tmp[i++]='0'; else while (num != 0) { tmp[i++] = digits[do_div(num, base)]; } if (i > precision) precision = i; size -= precision; if (!(type&(ZEROPAD+LEFT))) while(size-->0) *str++ = ' '; if (sign) *str++ = sign; if (type & SPECIAL) { if (base==8) *str++ = '0'; else if (base==16) { *str++ = '0'; *str++ = digits[33]; } } if (!(type & LEFT)) while (size-- > 0) *str++ = c; while (i < precision--) *str++ = '0'; while (i-- > 0) *str++ = tmp[i]; while (size-- > 0) *str++ = ' '; return str; } int vsprintf(char *buf, const char *fmt, va_list args) { int len; unsigned long long num; int i, base; char * str; const char *s; int flags; /* flags to number() */ int field_width; /* width of output field */ int precision; /* min. # of digits for integers; max number of chars for from string */ int qualifier; /* 'h', 'l', or 'L' for integer fields */ /* 'z' support added 23/7/1999 S.H. */ /* 'z' changed to 'Z' --davidm 1/25/99 */ for (str=buf ; *fmt ; ++fmt) { if (*fmt != '%') { *str++ = *fmt; continue; } /* process flags */ flags = 0; repeat: ++fmt; /* this also skips first '%' */ switch (*fmt) { case '-': flags |= LEFT; goto repeat; case '+': flags |= PLUS; goto repeat; case ' ': flags |= SPACE; goto repeat; case '#': flags |= SPECIAL; goto repeat; case '0': flags |= ZEROPAD; goto repeat; } /* get field width */ field_width = -1; if ('0' <= *fmt && *fmt <= '9') field_width = skip_atoi(&fmt); else if (*fmt == '*') { ++fmt; /* it's the next argument */ field_width = va_arg(args, int); if (field_width < 0) { field_width = -field_width; flags |= LEFT; } } /* get the precision */ precision = -1; if (*fmt == '.') { ++fmt; if ('0' <= *fmt && *fmt <= '9') precision = skip_atoi(&fmt); else if (*fmt == '*') { ++fmt; /* it's the next argument */ precision = va_arg(args, int); } if (precision < 0) precision = 0; } /* get the conversion qualifier */ qualifier = -1; if (*fmt == 'l' && *(fmt + 1) == 'l') { qualifier = 'q'; fmt += 2; } else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z') { qualifier = *fmt; ++fmt; } /* default base */ base = 10; switch (*fmt) { case 'c': if (!(flags & LEFT)) while (--field_width > 0) *str++ = ' '; *str++ = (unsigned char) va_arg(args, int); while (--field_width > 0) *str++ = ' '; continue; case 's': s = va_arg(args, char *); if (!s) s = "<NULL>"; len = strnlen(s, precision); if (!(flags & LEFT)) while (len < field_width--) *str++ = ' '; for (i = 0; i < len; ++i) *str++ = *s++; while (len < field_width--) *str++ = ' '; continue; case 'p': if (field_width == -1) { field_width = 2*sizeof(void *); flags |= ZEROPAD; } str = number(str, (unsigned long) va_arg(args, void *), 16, field_width, precision, flags); continue; case 'n': if (qualifier == 'l') { long * ip = va_arg(args, long *); *ip = (str - buf); } else if (qualifier == 'Z') { size_t * ip = va_arg(args, size_t *); *ip = (str - buf); } else { int * ip = va_arg(args, int *); *ip = (str - buf); } continue; case '%': *str++ = '%'; continue; /* integer number formats - set up the flags and "break" */ case 'o': base = 8; break; case 'X': flags |= LARGE; case 'x': base = 16; break; case 'd': case 'i': flags |= SIGN; case 'u': break; default: *str++ = '%'; if (*fmt) *str++ = *fmt; else --fmt; continue; } if (qualifier == 'l') { num = va_arg(args, unsigned long); if (flags & SIGN) num = (signed long) num; } else if (qualifier == 'q') { num = va_arg(args, unsigned long long); if (flags & SIGN) num = (signed long long) num; } else if (qualifier == 'Z') { num = va_arg(args, size_t); } else if (qualifier == 'h') { num = (unsigned short) va_arg(args, int); if (flags & SIGN) num = (signed short) num; } else { num = va_arg(args, unsigned int); if (flags & SIGN) num = (signed int) num; } str = number(str, num, base, field_width, precision, flags); } *str = '\0'; return str-buf; } int sprintf(char * buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i=vsprintf(buf,fmt,args); va_end(args); return i; } static char sprint_buf[1024]; int printf(const char *fmt, ...) { va_list args; int n; va_start(args, fmt); n = vsprintf(sprint_buf, fmt, args); va_end(args); if (console_ops.write) console_ops.write(sprint_buf, n); return n; }
gpl-2.0
StelixROM/kernel_lge_msm8974
drivers/tc/tc.c
12053
5224
/* * TURBOchannel bus services. * * Copyright (c) Harald Koerfgen, 1998 * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki * Copyright (c) 2005 James Simmons * * This file is subject to the terms and conditions of the GNU * General Public License. See the file "COPYING" in the main * directory of this archive for more details. */ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/tc.h> #include <linux/types.h> #include <asm/io.h> static struct tc_bus tc_bus = { .name = "TURBOchannel", }; /* * Probing for TURBOchannel modules. */ static void __init tc_bus_add_devices(struct tc_bus *tbus) { resource_size_t slotsize = tbus->info.slot_size << 20; resource_size_t extslotsize = tbus->ext_slot_size; resource_size_t slotaddr; resource_size_t extslotaddr; resource_size_t devsize; void __iomem *module; struct tc_dev *tdev; int i, slot, err; u8 pattern[4]; long offset; for (slot = 0; slot < tbus->num_tcslots; slot++) { slotaddr = tbus->slot_base + slot * slotsize; extslotaddr = tbus->ext_slot_base + slot * extslotsize; module = ioremap_nocache(slotaddr, slotsize); BUG_ON(!module); offset = TC_OLDCARD; err = 0; err |= tc_preadb(pattern + 0, module + offset + TC_PATTERN0); err |= tc_preadb(pattern + 1, module + offset + TC_PATTERN1); err |= tc_preadb(pattern + 2, module + offset + TC_PATTERN2); err |= tc_preadb(pattern + 3, module + offset + TC_PATTERN3); if (err) goto out_err; if (pattern[0] != 0x55 || pattern[1] != 0x00 || pattern[2] != 0xaa || pattern[3] != 0xff) { offset = TC_NEWCARD; err = 0; err |= tc_preadb(pattern + 0, module + offset + TC_PATTERN0); err |= tc_preadb(pattern + 1, module + offset + TC_PATTERN1); err |= tc_preadb(pattern + 2, module + offset + TC_PATTERN2); err |= tc_preadb(pattern + 3, module + offset + TC_PATTERN3); if (err) goto out_err; } if (pattern[0] != 0x55 || pattern[1] != 0x00 || pattern[2] != 0xaa || pattern[3] != 0xff) goto out_err; /* Found a board, allocate it an entry in the list */ tdev = kzalloc(sizeof(*tdev), GFP_KERNEL); if (!tdev) { printk(KERN_ERR "tc%x: unable to allocate tc_dev\n", slot); goto out_err; } dev_set_name(&tdev->dev, "tc%x", slot); tdev->bus = tbus; tdev->dev.parent = &tbus->dev; tdev->dev.bus = &tc_bus_type; tdev->slot = slot; for (i = 0; i < 8; i++) { tdev->firmware[i] = readb(module + offset + TC_FIRM_VER + 4 * i); tdev->vendor[i] = readb(module + offset + TC_VENDOR + 4 * i); tdev->name[i] = readb(module + offset + TC_MODULE + 4 * i); } tdev->firmware[8] = 0; tdev->vendor[8] = 0; tdev->name[8] = 0; pr_info("%s: %s %s %s\n", dev_name(&tdev->dev), tdev->vendor, tdev->name, tdev->firmware); devsize = readb(module + offset + TC_SLOT_SIZE); devsize <<= 22; if (devsize <= slotsize) { tdev->resource.start = slotaddr; tdev->resource.end = slotaddr + devsize - 1; } else if (devsize <= extslotsize) { tdev->resource.start = extslotaddr; tdev->resource.end = extslotaddr + devsize - 1; } else { printk(KERN_ERR "%s: Cannot provide slot space " "(%dMiB required, up to %dMiB supported)\n", dev_name(&tdev->dev), devsize >> 20, max(slotsize, extslotsize) >> 20); kfree(tdev); goto out_err; } tdev->resource.name = tdev->name; tdev->resource.flags = IORESOURCE_MEM; tc_device_get_irq(tdev); device_register(&tdev->dev); list_add_tail(&tdev->node, &tbus->devices); out_err: iounmap(module); } } /* * The main entry. */ static int __init tc_init(void) { /* Initialize the TURBOchannel bus */ if (tc_bus_get_info(&tc_bus)) return 0; INIT_LIST_HEAD(&tc_bus.devices); dev_set_name(&tc_bus.dev, "tc"); device_register(&tc_bus.dev); if (tc_bus.info.slot_size) { unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000; pr_info("tc: TURBOchannel rev. %d at %d.%d MHz " "(with%s parity)\n", tc_bus.info.revision, tc_clock / 10, tc_clock % 10, tc_bus.info.parity ? "" : "out"); tc_bus.resource[0].start = tc_bus.slot_base; tc_bus.resource[0].end = tc_bus.slot_base + (tc_bus.info.slot_size << 20) * tc_bus.num_tcslots - 1; tc_bus.resource[0].name = tc_bus.name; tc_bus.resource[0].flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &tc_bus.resource[0]) < 0) { printk(KERN_ERR "tc: Cannot reserve resource\n"); return 0; } if (tc_bus.ext_slot_size) { tc_bus.resource[1].start = tc_bus.ext_slot_base; tc_bus.resource[1].end = tc_bus.ext_slot_base + tc_bus.ext_slot_size * tc_bus.num_tcslots - 1; tc_bus.resource[1].name = tc_bus.name; tc_bus.resource[1].flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &tc_bus.resource[1]) < 0) { printk(KERN_ERR "tc: Cannot reserve resource\n"); release_resource(&tc_bus.resource[0]); return 0; } } tc_bus_add_devices(&tc_bus); } return 0; } subsys_initcall(tc_init);
gpl-2.0
bbelos/YP-G1_GB_Kernel
drivers/infiniband/hw/amso1100/c2_mq.c
14101
4622
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "c2.h" #include "c2_mq.h" void *c2_mq_alloc(struct c2_mq *q) { BUG_ON(q->magic != C2_MQ_MAGIC); BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); if (c2_mq_full(q)) { return NULL; } else { #ifdef DEBUG struct c2wr_hdr *m = (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size); #ifdef CCMSGMAGIC BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC)); m->magic = cpu_to_be32(CCWR_MAGIC); #endif return m; #else return q->msg_pool.host + q->priv * q->msg_size; #endif } } void c2_mq_produce(struct c2_mq *q) { BUG_ON(q->magic != C2_MQ_MAGIC); BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); if (!c2_mq_full(q)) { q->priv = (q->priv + 1) % q->q_size; q->hint_count++; /* Update peer's offset. */ __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared); } } void *c2_mq_consume(struct c2_mq *q) { BUG_ON(q->magic != C2_MQ_MAGIC); BUG_ON(q->type != C2_MQ_HOST_TARGET); if (c2_mq_empty(q)) { return NULL; } else { #ifdef DEBUG struct c2wr_hdr *m = (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size); #ifdef CCMSGMAGIC BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC)); #endif return m; #else return q->msg_pool.host + q->priv * q->msg_size; #endif } } void c2_mq_free(struct c2_mq *q) { BUG_ON(q->magic != C2_MQ_MAGIC); BUG_ON(q->type != C2_MQ_HOST_TARGET); if (!c2_mq_empty(q)) { #ifdef CCMSGMAGIC { struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *) (q->msg_pool.adapter + q->priv * q->msg_size); __raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic); } #endif q->priv = (q->priv + 1) % q->q_size; /* Update peer's offset. */ __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared); } } void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count) { BUG_ON(q->magic != C2_MQ_MAGIC); BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); while (wqe_count--) { BUG_ON(c2_mq_empty(q)); *q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size); } } #if 0 u32 c2_mq_count(struct c2_mq *q) { s32 count; if (q->type == C2_MQ_HOST_TARGET) count = be16_to_cpu(*q->shared) - q->priv; else count = q->priv - be16_to_cpu(*q->shared); if (count < 0) count += q->q_size; return (u32) count; } #endif /* 0 */ void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size, u8 __iomem *pool_start, u16 __iomem *peer, u32 type) { BUG_ON(!q->shared); /* This code assumes the byte swapping has already been done! */ q->index = index; q->q_size = q_size; q->msg_size = msg_size; q->msg_pool.adapter = pool_start; q->peer = (struct c2_mq_shared __iomem *) peer; q->magic = C2_MQ_MAGIC; q->type = type; q->priv = 0; q->hint_count = 0; return; } void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size, u8 *pool_start, u16 __iomem *peer, u32 type) { BUG_ON(!q->shared); /* This code assumes the byte swapping has already been done! */ q->index = index; q->q_size = q_size; q->msg_size = msg_size; q->msg_pool.host = pool_start; q->peer = (struct c2_mq_shared __iomem *) peer; q->magic = C2_MQ_MAGIC; q->type = type; q->priv = 0; q->hint_count = 0; return; }
gpl-2.0
OptimusG-Dev-Team/lg-kernel
drivers/media/media-device.c
22
10565
/* * Media device * * Copyright (C) 2010 Nokia Corporation * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/ioctl.h> #include <linux/media.h> #include <linux/export.h> #include <media/media-device.h> #include <media/media-devnode.h> #include <media/media-entity.h> /* ----------------------------------------------------------------------------- * Userspace API */ static int media_device_open(struct file *filp) { return 0; } static int media_device_close(struct file *filp) { return 0; } static int media_device_get_info(struct media_device *dev, struct media_device_info __user *__info) { struct media_device_info info; memset(&info, 0, sizeof(info)); strlcpy(info.driver, dev->dev->driver->name, sizeof(info.driver)); strlcpy(info.model, dev->model, sizeof(info.model)); strlcpy(info.serial, dev->serial, sizeof(info.serial)); strlcpy(info.bus_info, dev->bus_info, sizeof(info.bus_info)); info.media_version = MEDIA_API_VERSION; info.hw_revision = dev->hw_revision; info.driver_version = dev->driver_version; return copy_to_user(__info, &info, sizeof(*__info)); } static struct media_entity *find_entity(struct media_device *mdev, u32 id) { struct media_entity *entity; int next = id & MEDIA_ENT_ID_FLAG_NEXT; id &= ~MEDIA_ENT_ID_FLAG_NEXT; spin_lock(&mdev->lock); media_device_for_each_entity(entity, mdev) { if ((entity->id == id && !next) || (entity->id > id && next)) { spin_unlock(&mdev->lock); return entity; } } spin_unlock(&mdev->lock); return NULL; } static long media_device_enum_entities(struct media_device *mdev, struct media_entity_desc __user *uent) { struct media_entity *ent; struct media_entity_desc u_ent; if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) return -EFAULT; ent = find_entity(mdev, u_ent.id); if (ent == NULL) return -EINVAL; u_ent.id = ent->id; u_ent.name[0] = '\0'; if (ent->name) strlcpy(u_ent.name, ent->name, sizeof(u_ent.name)); u_ent.type = ent->type; u_ent.revision = ent->revision; u_ent.flags = ent->flags; u_ent.group_id = ent->group_id; u_ent.pads = ent->num_pads; u_ent.links = ent->num_links - ent->num_backlinks; memcpy(&u_ent.raw, &ent->info, sizeof(ent->info)); if (copy_to_user(uent, &u_ent, sizeof(u_ent))) return -EFAULT; return 0; } static void media_device_kpad_to_upad(const struct media_pad *kpad, struct media_pad_desc *upad) { upad->entity = kpad->entity->id; upad->index = kpad->index; upad->flags = kpad->flags; } static long media_device_enum_links(struct media_device *mdev, struct media_links_enum __user *ulinks) { struct media_entity *entity; struct media_links_enum links; if (copy_from_user(&links, ulinks, sizeof(links))) return -EFAULT; entity = find_entity(mdev, links.entity); if (entity == NULL) return -EINVAL; if (links.pads) { unsigned int p; for (p = 0; p < entity->num_pads; p++) { struct media_pad_desc pad = {0}; media_device_kpad_to_upad(&entity->pads[p], &pad); if (copy_to_user(&links.pads[p], &pad, sizeof(pad))) return -EFAULT; } } if (links.links) { struct media_link_desc __user *ulink; unsigned int l; for (l = 0, ulink = links.links; l < entity->num_links; l++) { struct media_link_desc link; /* Ignore backlinks. */ if (entity->links[l].source->entity != entity) continue; media_device_kpad_to_upad(entity->links[l].source, &link.source); media_device_kpad_to_upad(entity->links[l].sink, &link.sink); link.flags = entity->links[l].flags; if (copy_to_user(ulink, &link, sizeof(*ulink))) return -EFAULT; ulink++; } } if (copy_to_user(ulinks, &links, sizeof(*ulinks))) return -EFAULT; return 0; } static long media_device_setup_link(struct media_device *mdev, struct media_link_desc __user *_ulink) { struct media_link *link = NULL; struct media_link_desc ulink; struct media_entity *source; struct media_entity *sink; int ret; if (copy_from_user(&ulink, _ulink, sizeof(ulink))) return -EFAULT; /* Find the source and sink entities and link. */ source = find_entity(mdev, ulink.source.entity); sink = find_entity(mdev, ulink.sink.entity); if (source == NULL || sink == NULL) return -EINVAL; if (ulink.source.index >= source->num_pads || ulink.sink.index >= sink->num_pads) return -EINVAL; link = media_entity_find_link(&source->pads[ulink.source.index], &sink->pads[ulink.sink.index]); if (link == NULL) return -EINVAL; /* Setup the link on both entities. */ ret = __media_entity_setup_link(link, ulink.flags); if (copy_to_user(_ulink, &ulink, sizeof(ulink))) return -EFAULT; return ret; } /* */ int sub_cam_id_for_keep_screen_on = -1; /* */ static long media_device_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct media_devnode *devnode = media_devnode_data(filp); struct media_device *dev = to_media_device(devnode); long ret; /* */ void __user *argp = (void __user *)arg; /* */ switch (cmd) { case MEDIA_IOC_DEVICE_INFO: ret = media_device_get_info(dev, (struct media_device_info __user *)arg); break; case MEDIA_IOC_ENUM_ENTITIES: ret = media_device_enum_entities(dev, (struct media_entity_desc __user *)arg); break; case MEDIA_IOC_ENUM_LINKS: mutex_lock(&dev->graph_mutex); ret = media_device_enum_links(dev, (struct media_links_enum __user *)arg); mutex_unlock(&dev->graph_mutex); break; case MEDIA_IOC_SETUP_LINK: mutex_lock(&dev->graph_mutex); ret = media_device_setup_link(dev, (struct media_link_desc __user *)arg); mutex_unlock(&dev->graph_mutex); break; /* */ case MEDIA_IOC_SUB_CAM_ID: if (copy_from_user(&sub_cam_id_for_keep_screen_on, argp, sizeof(sub_cam_id_for_keep_screen_on))) { ret = -EFAULT; } else { ret = 0; } break; /* */ default: ret = -ENOIOCTLCMD; } return ret; } static const struct media_file_operations media_device_fops = { .owner = THIS_MODULE, .open = media_device_open, .ioctl = media_device_ioctl, .release = media_device_close, }; /* ----------------------------------------------------------------------------- * sysfs */ static ssize_t show_model(struct device *cd, struct device_attribute *attr, char *buf) { struct media_device *mdev = to_media_device(to_media_devnode(cd)); return sprintf(buf, "%.*s\n", (int)sizeof(mdev->model), mdev->model); } static DEVICE_ATTR(model, S_IRUGO, show_model, NULL); /* ----------------------------------------------------------------------------- * Registration/unregistration */ static void media_device_release(struct media_devnode *mdev) { } /** * media_device_register - register a media device * @mdev: The media device * * The caller is responsible for initializing the media device before * registration. The following fields must be set: * * - dev must point to the parent device * - model must be filled with the device model name */ int __must_check media_device_register(struct media_device *mdev) { int ret; if (WARN_ON(mdev->dev == NULL || mdev->model[0] == 0)) return -EINVAL; mdev->entity_id = 1; INIT_LIST_HEAD(&mdev->entities); spin_lock_init(&mdev->lock); mutex_init(&mdev->graph_mutex); /* Register the device node. */ mdev->devnode.fops = &media_device_fops; mdev->devnode.parent = mdev->dev; mdev->devnode.release = media_device_release; ret = media_devnode_register(&mdev->devnode); if (ret < 0) return ret; ret = device_create_file(&mdev->devnode.dev, &dev_attr_model); if (ret < 0) { media_devnode_unregister(&mdev->devnode); return ret; } return 0; } EXPORT_SYMBOL_GPL(media_device_register); /** * media_device_unregister - unregister a media device * @mdev: The media device * */ void media_device_unregister(struct media_device *mdev) { struct media_entity *entity; struct media_entity *next; list_for_each_entry_safe(entity, next, &mdev->entities, list) media_device_unregister_entity(entity); device_remove_file(&mdev->devnode.dev, &dev_attr_model); media_devnode_unregister(&mdev->devnode); } EXPORT_SYMBOL_GPL(media_device_unregister); /** * media_device_register_entity - Register an entity with a media device * @mdev: The media device * @entity: The entity */ int __must_check media_device_register_entity(struct media_device *mdev, struct media_entity *entity) { /* Warn if we apparently re-register an entity */ WARN_ON(entity->parent != NULL); entity->parent = mdev; spin_lock(&mdev->lock); if (entity->id == 0) entity->id = mdev->entity_id++; else mdev->entity_id = max(entity->id + 1, mdev->entity_id); list_add_tail(&entity->list, &mdev->entities); spin_unlock(&mdev->lock); return 0; } EXPORT_SYMBOL_GPL(media_device_register_entity); /** * media_device_unregister_entity - Unregister an entity * @entity: The entity * * If the entity has never been registered this function will return * immediately. */ void media_device_unregister_entity(struct media_entity *entity) { struct media_device *mdev = entity->parent; if (mdev == NULL) return; spin_lock(&mdev->lock); list_del(&entity->list); spin_unlock(&mdev->lock); entity->parent = NULL; } EXPORT_SYMBOL_GPL(media_device_unregister_entity);
gpl-2.0
EZchip/gdb
sim/m32r/semx-switch.c
22
175890
/* Simulator instruction semantics for m32rxf. THIS FILE IS MACHINE GENERATED WITH CGEN. Copyright 1996-2010, 2012 Free Software Foundation, Inc. This file is part of the GNU simulators. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. It is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #ifdef DEFINE_LABELS /* The labels have the case they have because the enum of insn types is all uppercase and in the non-stdc case the insn symbol is built into the enum name. */ static struct { int index; void *label; } labels[] = { { M32RXF_INSN_X_INVALID, && case_sem_INSN_X_INVALID }, { M32RXF_INSN_X_AFTER, && case_sem_INSN_X_AFTER }, { M32RXF_INSN_X_BEFORE, && case_sem_INSN_X_BEFORE }, { M32RXF_INSN_X_CTI_CHAIN, && case_sem_INSN_X_CTI_CHAIN }, { M32RXF_INSN_X_CHAIN, && case_sem_INSN_X_CHAIN }, { M32RXF_INSN_X_BEGIN, && case_sem_INSN_X_BEGIN }, { M32RXF_INSN_ADD, && case_sem_INSN_ADD }, { M32RXF_INSN_ADD3, && case_sem_INSN_ADD3 }, { M32RXF_INSN_AND, && case_sem_INSN_AND }, { M32RXF_INSN_AND3, && case_sem_INSN_AND3 }, { M32RXF_INSN_OR, && case_sem_INSN_OR }, { M32RXF_INSN_OR3, && case_sem_INSN_OR3 }, { M32RXF_INSN_XOR, && case_sem_INSN_XOR }, { M32RXF_INSN_XOR3, && case_sem_INSN_XOR3 }, { M32RXF_INSN_ADDI, && case_sem_INSN_ADDI }, { M32RXF_INSN_ADDV, && case_sem_INSN_ADDV }, { M32RXF_INSN_ADDV3, && case_sem_INSN_ADDV3 }, { M32RXF_INSN_ADDX, && case_sem_INSN_ADDX }, { M32RXF_INSN_BC8, && case_sem_INSN_BC8 }, { M32RXF_INSN_BC24, && case_sem_INSN_BC24 }, { M32RXF_INSN_BEQ, && case_sem_INSN_BEQ }, { M32RXF_INSN_BEQZ, && case_sem_INSN_BEQZ }, { M32RXF_INSN_BGEZ, && case_sem_INSN_BGEZ }, { M32RXF_INSN_BGTZ, && case_sem_INSN_BGTZ }, { M32RXF_INSN_BLEZ, && case_sem_INSN_BLEZ }, { M32RXF_INSN_BLTZ, && case_sem_INSN_BLTZ }, { M32RXF_INSN_BNEZ, && case_sem_INSN_BNEZ }, { M32RXF_INSN_BL8, && case_sem_INSN_BL8 }, { M32RXF_INSN_BL24, && case_sem_INSN_BL24 }, { M32RXF_INSN_BCL8, && case_sem_INSN_BCL8 }, { M32RXF_INSN_BCL24, && case_sem_INSN_BCL24 }, { M32RXF_INSN_BNC8, && case_sem_INSN_BNC8 }, { M32RXF_INSN_BNC24, && case_sem_INSN_BNC24 }, { M32RXF_INSN_BNE, && case_sem_INSN_BNE }, { M32RXF_INSN_BRA8, && case_sem_INSN_BRA8 }, { M32RXF_INSN_BRA24, && case_sem_INSN_BRA24 }, { M32RXF_INSN_BNCL8, && case_sem_INSN_BNCL8 }, { M32RXF_INSN_BNCL24, && case_sem_INSN_BNCL24 }, { M32RXF_INSN_CMP, && case_sem_INSN_CMP }, { M32RXF_INSN_CMPI, && case_sem_INSN_CMPI }, { M32RXF_INSN_CMPU, && case_sem_INSN_CMPU }, { M32RXF_INSN_CMPUI, && case_sem_INSN_CMPUI }, { M32RXF_INSN_CMPEQ, && case_sem_INSN_CMPEQ }, { M32RXF_INSN_CMPZ, && case_sem_INSN_CMPZ }, { M32RXF_INSN_DIV, && case_sem_INSN_DIV }, { M32RXF_INSN_DIVU, && case_sem_INSN_DIVU }, { M32RXF_INSN_REM, && case_sem_INSN_REM }, { M32RXF_INSN_REMU, && case_sem_INSN_REMU }, { M32RXF_INSN_DIVH, && case_sem_INSN_DIVH }, { M32RXF_INSN_JC, && case_sem_INSN_JC }, { M32RXF_INSN_JNC, && case_sem_INSN_JNC }, { M32RXF_INSN_JL, && case_sem_INSN_JL }, { M32RXF_INSN_JMP, && case_sem_INSN_JMP }, { M32RXF_INSN_LD, && case_sem_INSN_LD }, { M32RXF_INSN_LD_D, && case_sem_INSN_LD_D }, { M32RXF_INSN_LDB, && case_sem_INSN_LDB }, { M32RXF_INSN_LDB_D, && case_sem_INSN_LDB_D }, { M32RXF_INSN_LDH, && case_sem_INSN_LDH }, { M32RXF_INSN_LDH_D, && case_sem_INSN_LDH_D }, { M32RXF_INSN_LDUB, && case_sem_INSN_LDUB }, { M32RXF_INSN_LDUB_D, && case_sem_INSN_LDUB_D }, { M32RXF_INSN_LDUH, && case_sem_INSN_LDUH }, { M32RXF_INSN_LDUH_D, && case_sem_INSN_LDUH_D }, { M32RXF_INSN_LD_PLUS, && case_sem_INSN_LD_PLUS }, { M32RXF_INSN_LD24, && case_sem_INSN_LD24 }, { M32RXF_INSN_LDI8, && case_sem_INSN_LDI8 }, { M32RXF_INSN_LDI16, && case_sem_INSN_LDI16 }, { M32RXF_INSN_LOCK, && case_sem_INSN_LOCK }, { M32RXF_INSN_MACHI_A, && case_sem_INSN_MACHI_A }, { M32RXF_INSN_MACLO_A, && case_sem_INSN_MACLO_A }, { M32RXF_INSN_MACWHI_A, && case_sem_INSN_MACWHI_A }, { M32RXF_INSN_MACWLO_A, && case_sem_INSN_MACWLO_A }, { M32RXF_INSN_MUL, && case_sem_INSN_MUL }, { M32RXF_INSN_MULHI_A, && case_sem_INSN_MULHI_A }, { M32RXF_INSN_MULLO_A, && case_sem_INSN_MULLO_A }, { M32RXF_INSN_MULWHI_A, && case_sem_INSN_MULWHI_A }, { M32RXF_INSN_MULWLO_A, && case_sem_INSN_MULWLO_A }, { M32RXF_INSN_MV, && case_sem_INSN_MV }, { M32RXF_INSN_MVFACHI_A, && case_sem_INSN_MVFACHI_A }, { M32RXF_INSN_MVFACLO_A, && case_sem_INSN_MVFACLO_A }, { M32RXF_INSN_MVFACMI_A, && case_sem_INSN_MVFACMI_A }, { M32RXF_INSN_MVFC, && case_sem_INSN_MVFC }, { M32RXF_INSN_MVTACHI_A, && case_sem_INSN_MVTACHI_A }, { M32RXF_INSN_MVTACLO_A, && case_sem_INSN_MVTACLO_A }, { M32RXF_INSN_MVTC, && case_sem_INSN_MVTC }, { M32RXF_INSN_NEG, && case_sem_INSN_NEG }, { M32RXF_INSN_NOP, && case_sem_INSN_NOP }, { M32RXF_INSN_NOT, && case_sem_INSN_NOT }, { M32RXF_INSN_RAC_DSI, && case_sem_INSN_RAC_DSI }, { M32RXF_INSN_RACH_DSI, && case_sem_INSN_RACH_DSI }, { M32RXF_INSN_RTE, && case_sem_INSN_RTE }, { M32RXF_INSN_SETH, && case_sem_INSN_SETH }, { M32RXF_INSN_SLL, && case_sem_INSN_SLL }, { M32RXF_INSN_SLL3, && case_sem_INSN_SLL3 }, { M32RXF_INSN_SLLI, && case_sem_INSN_SLLI }, { M32RXF_INSN_SRA, && case_sem_INSN_SRA }, { M32RXF_INSN_SRA3, && case_sem_INSN_SRA3 }, { M32RXF_INSN_SRAI, && case_sem_INSN_SRAI }, { M32RXF_INSN_SRL, && case_sem_INSN_SRL }, { M32RXF_INSN_SRL3, && case_sem_INSN_SRL3 }, { M32RXF_INSN_SRLI, && case_sem_INSN_SRLI }, { M32RXF_INSN_ST, && case_sem_INSN_ST }, { M32RXF_INSN_ST_D, && case_sem_INSN_ST_D }, { M32RXF_INSN_STB, && case_sem_INSN_STB }, { M32RXF_INSN_STB_D, && case_sem_INSN_STB_D }, { M32RXF_INSN_STH, && case_sem_INSN_STH }, { M32RXF_INSN_STH_D, && case_sem_INSN_STH_D }, { M32RXF_INSN_ST_PLUS, && case_sem_INSN_ST_PLUS }, { M32RXF_INSN_STH_PLUS, && case_sem_INSN_STH_PLUS }, { M32RXF_INSN_STB_PLUS, && case_sem_INSN_STB_PLUS }, { M32RXF_INSN_ST_MINUS, && case_sem_INSN_ST_MINUS }, { M32RXF_INSN_SUB, && case_sem_INSN_SUB }, { M32RXF_INSN_SUBV, && case_sem_INSN_SUBV }, { M32RXF_INSN_SUBX, && case_sem_INSN_SUBX }, { M32RXF_INSN_TRAP, && case_sem_INSN_TRAP }, { M32RXF_INSN_UNLOCK, && case_sem_INSN_UNLOCK }, { M32RXF_INSN_SATB, && case_sem_INSN_SATB }, { M32RXF_INSN_SATH, && case_sem_INSN_SATH }, { M32RXF_INSN_SAT, && case_sem_INSN_SAT }, { M32RXF_INSN_PCMPBZ, && case_sem_INSN_PCMPBZ }, { M32RXF_INSN_SADD, && case_sem_INSN_SADD }, { M32RXF_INSN_MACWU1, && case_sem_INSN_MACWU1 }, { M32RXF_INSN_MSBLO, && case_sem_INSN_MSBLO }, { M32RXF_INSN_MULWU1, && case_sem_INSN_MULWU1 }, { M32RXF_INSN_MACLH1, && case_sem_INSN_MACLH1 }, { M32RXF_INSN_SC, && case_sem_INSN_SC }, { M32RXF_INSN_SNC, && case_sem_INSN_SNC }, { M32RXF_INSN_CLRPSW, && case_sem_INSN_CLRPSW }, { M32RXF_INSN_SETPSW, && case_sem_INSN_SETPSW }, { M32RXF_INSN_BSET, && case_sem_INSN_BSET }, { M32RXF_INSN_BCLR, && case_sem_INSN_BCLR }, { M32RXF_INSN_BTST, && case_sem_INSN_BTST }, { M32RXF_INSN_PAR_ADD, && case_sem_INSN_PAR_ADD }, { M32RXF_INSN_WRITE_ADD, && case_sem_INSN_WRITE_ADD }, { M32RXF_INSN_PAR_AND, && case_sem_INSN_PAR_AND }, { M32RXF_INSN_WRITE_AND, && case_sem_INSN_WRITE_AND }, { M32RXF_INSN_PAR_OR, && case_sem_INSN_PAR_OR }, { M32RXF_INSN_WRITE_OR, && case_sem_INSN_WRITE_OR }, { M32RXF_INSN_PAR_XOR, && case_sem_INSN_PAR_XOR }, { M32RXF_INSN_WRITE_XOR, && case_sem_INSN_WRITE_XOR }, { M32RXF_INSN_PAR_ADDI, && case_sem_INSN_PAR_ADDI }, { M32RXF_INSN_WRITE_ADDI, && case_sem_INSN_WRITE_ADDI }, { M32RXF_INSN_PAR_ADDV, && case_sem_INSN_PAR_ADDV }, { M32RXF_INSN_WRITE_ADDV, && case_sem_INSN_WRITE_ADDV }, { M32RXF_INSN_PAR_ADDX, && case_sem_INSN_PAR_ADDX }, { M32RXF_INSN_WRITE_ADDX, && case_sem_INSN_WRITE_ADDX }, { M32RXF_INSN_PAR_BC8, && case_sem_INSN_PAR_BC8 }, { M32RXF_INSN_WRITE_BC8, && case_sem_INSN_WRITE_BC8 }, { M32RXF_INSN_PAR_BL8, && case_sem_INSN_PAR_BL8 }, { M32RXF_INSN_WRITE_BL8, && case_sem_INSN_WRITE_BL8 }, { M32RXF_INSN_PAR_BCL8, && case_sem_INSN_PAR_BCL8 }, { M32RXF_INSN_WRITE_BCL8, && case_sem_INSN_WRITE_BCL8 }, { M32RXF_INSN_PAR_BNC8, && case_sem_INSN_PAR_BNC8 }, { M32RXF_INSN_WRITE_BNC8, && case_sem_INSN_WRITE_BNC8 }, { M32RXF_INSN_PAR_BRA8, && case_sem_INSN_PAR_BRA8 }, { M32RXF_INSN_WRITE_BRA8, && case_sem_INSN_WRITE_BRA8 }, { M32RXF_INSN_PAR_BNCL8, && case_sem_INSN_PAR_BNCL8 }, { M32RXF_INSN_WRITE_BNCL8, && case_sem_INSN_WRITE_BNCL8 }, { M32RXF_INSN_PAR_CMP, && case_sem_INSN_PAR_CMP }, { M32RXF_INSN_WRITE_CMP, && case_sem_INSN_WRITE_CMP }, { M32RXF_INSN_PAR_CMPU, && case_sem_INSN_PAR_CMPU }, { M32RXF_INSN_WRITE_CMPU, && case_sem_INSN_WRITE_CMPU }, { M32RXF_INSN_PAR_CMPEQ, && case_sem_INSN_PAR_CMPEQ }, { M32RXF_INSN_WRITE_CMPEQ, && case_sem_INSN_WRITE_CMPEQ }, { M32RXF_INSN_PAR_CMPZ, && case_sem_INSN_PAR_CMPZ }, { M32RXF_INSN_WRITE_CMPZ, && case_sem_INSN_WRITE_CMPZ }, { M32RXF_INSN_PAR_JC, && case_sem_INSN_PAR_JC }, { M32RXF_INSN_WRITE_JC, && case_sem_INSN_WRITE_JC }, { M32RXF_INSN_PAR_JNC, && case_sem_INSN_PAR_JNC }, { M32RXF_INSN_WRITE_JNC, && case_sem_INSN_WRITE_JNC }, { M32RXF_INSN_PAR_JL, && case_sem_INSN_PAR_JL }, { M32RXF_INSN_WRITE_JL, && case_sem_INSN_WRITE_JL }, { M32RXF_INSN_PAR_JMP, && case_sem_INSN_PAR_JMP }, { M32RXF_INSN_WRITE_JMP, && case_sem_INSN_WRITE_JMP }, { M32RXF_INSN_PAR_LD, && case_sem_INSN_PAR_LD }, { M32RXF_INSN_WRITE_LD, && case_sem_INSN_WRITE_LD }, { M32RXF_INSN_PAR_LDB, && case_sem_INSN_PAR_LDB }, { M32RXF_INSN_WRITE_LDB, && case_sem_INSN_WRITE_LDB }, { M32RXF_INSN_PAR_LDH, && case_sem_INSN_PAR_LDH }, { M32RXF_INSN_WRITE_LDH, && case_sem_INSN_WRITE_LDH }, { M32RXF_INSN_PAR_LDUB, && case_sem_INSN_PAR_LDUB }, { M32RXF_INSN_WRITE_LDUB, && case_sem_INSN_WRITE_LDUB }, { M32RXF_INSN_PAR_LDUH, && case_sem_INSN_PAR_LDUH }, { M32RXF_INSN_WRITE_LDUH, && case_sem_INSN_WRITE_LDUH }, { M32RXF_INSN_PAR_LD_PLUS, && case_sem_INSN_PAR_LD_PLUS }, { M32RXF_INSN_WRITE_LD_PLUS, && case_sem_INSN_WRITE_LD_PLUS }, { M32RXF_INSN_PAR_LDI8, && case_sem_INSN_PAR_LDI8 }, { M32RXF_INSN_WRITE_LDI8, && case_sem_INSN_WRITE_LDI8 }, { M32RXF_INSN_PAR_LOCK, && case_sem_INSN_PAR_LOCK }, { M32RXF_INSN_WRITE_LOCK, && case_sem_INSN_WRITE_LOCK }, { M32RXF_INSN_PAR_MACHI_A, && case_sem_INSN_PAR_MACHI_A }, { M32RXF_INSN_WRITE_MACHI_A, && case_sem_INSN_WRITE_MACHI_A }, { M32RXF_INSN_PAR_MACLO_A, && case_sem_INSN_PAR_MACLO_A }, { M32RXF_INSN_WRITE_MACLO_A, && case_sem_INSN_WRITE_MACLO_A }, { M32RXF_INSN_PAR_MACWHI_A, && case_sem_INSN_PAR_MACWHI_A }, { M32RXF_INSN_WRITE_MACWHI_A, && case_sem_INSN_WRITE_MACWHI_A }, { M32RXF_INSN_PAR_MACWLO_A, && case_sem_INSN_PAR_MACWLO_A }, { M32RXF_INSN_WRITE_MACWLO_A, && case_sem_INSN_WRITE_MACWLO_A }, { M32RXF_INSN_PAR_MUL, && case_sem_INSN_PAR_MUL }, { M32RXF_INSN_WRITE_MUL, && case_sem_INSN_WRITE_MUL }, { M32RXF_INSN_PAR_MULHI_A, && case_sem_INSN_PAR_MULHI_A }, { M32RXF_INSN_WRITE_MULHI_A, && case_sem_INSN_WRITE_MULHI_A }, { M32RXF_INSN_PAR_MULLO_A, && case_sem_INSN_PAR_MULLO_A }, { M32RXF_INSN_WRITE_MULLO_A, && case_sem_INSN_WRITE_MULLO_A }, { M32RXF_INSN_PAR_MULWHI_A, && case_sem_INSN_PAR_MULWHI_A }, { M32RXF_INSN_WRITE_MULWHI_A, && case_sem_INSN_WRITE_MULWHI_A }, { M32RXF_INSN_PAR_MULWLO_A, && case_sem_INSN_PAR_MULWLO_A }, { M32RXF_INSN_WRITE_MULWLO_A, && case_sem_INSN_WRITE_MULWLO_A }, { M32RXF_INSN_PAR_MV, && case_sem_INSN_PAR_MV }, { M32RXF_INSN_WRITE_MV, && case_sem_INSN_WRITE_MV }, { M32RXF_INSN_PAR_MVFACHI_A, && case_sem_INSN_PAR_MVFACHI_A }, { M32RXF_INSN_WRITE_MVFACHI_A, && case_sem_INSN_WRITE_MVFACHI_A }, { M32RXF_INSN_PAR_MVFACLO_A, && case_sem_INSN_PAR_MVFACLO_A }, { M32RXF_INSN_WRITE_MVFACLO_A, && case_sem_INSN_WRITE_MVFACLO_A }, { M32RXF_INSN_PAR_MVFACMI_A, && case_sem_INSN_PAR_MVFACMI_A }, { M32RXF_INSN_WRITE_MVFACMI_A, && case_sem_INSN_WRITE_MVFACMI_A }, { M32RXF_INSN_PAR_MVFC, && case_sem_INSN_PAR_MVFC }, { M32RXF_INSN_WRITE_MVFC, && case_sem_INSN_WRITE_MVFC }, { M32RXF_INSN_PAR_MVTACHI_A, && case_sem_INSN_PAR_MVTACHI_A }, { M32RXF_INSN_WRITE_MVTACHI_A, && case_sem_INSN_WRITE_MVTACHI_A }, { M32RXF_INSN_PAR_MVTACLO_A, && case_sem_INSN_PAR_MVTACLO_A }, { M32RXF_INSN_WRITE_MVTACLO_A, && case_sem_INSN_WRITE_MVTACLO_A }, { M32RXF_INSN_PAR_MVTC, && case_sem_INSN_PAR_MVTC }, { M32RXF_INSN_WRITE_MVTC, && case_sem_INSN_WRITE_MVTC }, { M32RXF_INSN_PAR_NEG, && case_sem_INSN_PAR_NEG }, { M32RXF_INSN_WRITE_NEG, && case_sem_INSN_WRITE_NEG }, { M32RXF_INSN_PAR_NOP, && case_sem_INSN_PAR_NOP }, { M32RXF_INSN_WRITE_NOP, && case_sem_INSN_WRITE_NOP }, { M32RXF_INSN_PAR_NOT, && case_sem_INSN_PAR_NOT }, { M32RXF_INSN_WRITE_NOT, && case_sem_INSN_WRITE_NOT }, { M32RXF_INSN_PAR_RAC_DSI, && case_sem_INSN_PAR_RAC_DSI }, { M32RXF_INSN_WRITE_RAC_DSI, && case_sem_INSN_WRITE_RAC_DSI }, { M32RXF_INSN_PAR_RACH_DSI, && case_sem_INSN_PAR_RACH_DSI }, { M32RXF_INSN_WRITE_RACH_DSI, && case_sem_INSN_WRITE_RACH_DSI }, { M32RXF_INSN_PAR_RTE, && case_sem_INSN_PAR_RTE }, { M32RXF_INSN_WRITE_RTE, && case_sem_INSN_WRITE_RTE }, { M32RXF_INSN_PAR_SLL, && case_sem_INSN_PAR_SLL }, { M32RXF_INSN_WRITE_SLL, && case_sem_INSN_WRITE_SLL }, { M32RXF_INSN_PAR_SLLI, && case_sem_INSN_PAR_SLLI }, { M32RXF_INSN_WRITE_SLLI, && case_sem_INSN_WRITE_SLLI }, { M32RXF_INSN_PAR_SRA, && case_sem_INSN_PAR_SRA }, { M32RXF_INSN_WRITE_SRA, && case_sem_INSN_WRITE_SRA }, { M32RXF_INSN_PAR_SRAI, && case_sem_INSN_PAR_SRAI }, { M32RXF_INSN_WRITE_SRAI, && case_sem_INSN_WRITE_SRAI }, { M32RXF_INSN_PAR_SRL, && case_sem_INSN_PAR_SRL }, { M32RXF_INSN_WRITE_SRL, && case_sem_INSN_WRITE_SRL }, { M32RXF_INSN_PAR_SRLI, && case_sem_INSN_PAR_SRLI }, { M32RXF_INSN_WRITE_SRLI, && case_sem_INSN_WRITE_SRLI }, { M32RXF_INSN_PAR_ST, && case_sem_INSN_PAR_ST }, { M32RXF_INSN_WRITE_ST, && case_sem_INSN_WRITE_ST }, { M32RXF_INSN_PAR_STB, && case_sem_INSN_PAR_STB }, { M32RXF_INSN_WRITE_STB, && case_sem_INSN_WRITE_STB }, { M32RXF_INSN_PAR_STH, && case_sem_INSN_PAR_STH }, { M32RXF_INSN_WRITE_STH, && case_sem_INSN_WRITE_STH }, { M32RXF_INSN_PAR_ST_PLUS, && case_sem_INSN_PAR_ST_PLUS }, { M32RXF_INSN_WRITE_ST_PLUS, && case_sem_INSN_WRITE_ST_PLUS }, { M32RXF_INSN_PAR_STH_PLUS, && case_sem_INSN_PAR_STH_PLUS }, { M32RXF_INSN_WRITE_STH_PLUS, && case_sem_INSN_WRITE_STH_PLUS }, { M32RXF_INSN_PAR_STB_PLUS, && case_sem_INSN_PAR_STB_PLUS }, { M32RXF_INSN_WRITE_STB_PLUS, && case_sem_INSN_WRITE_STB_PLUS }, { M32RXF_INSN_PAR_ST_MINUS, && case_sem_INSN_PAR_ST_MINUS }, { M32RXF_INSN_WRITE_ST_MINUS, && case_sem_INSN_WRITE_ST_MINUS }, { M32RXF_INSN_PAR_SUB, && case_sem_INSN_PAR_SUB }, { M32RXF_INSN_WRITE_SUB, && case_sem_INSN_WRITE_SUB }, { M32RXF_INSN_PAR_SUBV, && case_sem_INSN_PAR_SUBV }, { M32RXF_INSN_WRITE_SUBV, && case_sem_INSN_WRITE_SUBV }, { M32RXF_INSN_PAR_SUBX, && case_sem_INSN_PAR_SUBX }, { M32RXF_INSN_WRITE_SUBX, && case_sem_INSN_WRITE_SUBX }, { M32RXF_INSN_PAR_TRAP, && case_sem_INSN_PAR_TRAP }, { M32RXF_INSN_WRITE_TRAP, && case_sem_INSN_WRITE_TRAP }, { M32RXF_INSN_PAR_UNLOCK, && case_sem_INSN_PAR_UNLOCK }, { M32RXF_INSN_WRITE_UNLOCK, && case_sem_INSN_WRITE_UNLOCK }, { M32RXF_INSN_PAR_PCMPBZ, && case_sem_INSN_PAR_PCMPBZ }, { M32RXF_INSN_WRITE_PCMPBZ, && case_sem_INSN_WRITE_PCMPBZ }, { M32RXF_INSN_PAR_SADD, && case_sem_INSN_PAR_SADD }, { M32RXF_INSN_WRITE_SADD, && case_sem_INSN_WRITE_SADD }, { M32RXF_INSN_PAR_MACWU1, && case_sem_INSN_PAR_MACWU1 }, { M32RXF_INSN_WRITE_MACWU1, && case_sem_INSN_WRITE_MACWU1 }, { M32RXF_INSN_PAR_MSBLO, && case_sem_INSN_PAR_MSBLO }, { M32RXF_INSN_WRITE_MSBLO, && case_sem_INSN_WRITE_MSBLO }, { M32RXF_INSN_PAR_MULWU1, && case_sem_INSN_PAR_MULWU1 }, { M32RXF_INSN_WRITE_MULWU1, && case_sem_INSN_WRITE_MULWU1 }, { M32RXF_INSN_PAR_MACLH1, && case_sem_INSN_PAR_MACLH1 }, { M32RXF_INSN_WRITE_MACLH1, && case_sem_INSN_WRITE_MACLH1 }, { M32RXF_INSN_PAR_SC, && case_sem_INSN_PAR_SC }, { M32RXF_INSN_WRITE_SC, && case_sem_INSN_WRITE_SC }, { M32RXF_INSN_PAR_SNC, && case_sem_INSN_PAR_SNC }, { M32RXF_INSN_WRITE_SNC, && case_sem_INSN_WRITE_SNC }, { M32RXF_INSN_PAR_CLRPSW, && case_sem_INSN_PAR_CLRPSW }, { M32RXF_INSN_WRITE_CLRPSW, && case_sem_INSN_WRITE_CLRPSW }, { M32RXF_INSN_PAR_SETPSW, && case_sem_INSN_PAR_SETPSW }, { M32RXF_INSN_WRITE_SETPSW, && case_sem_INSN_WRITE_SETPSW }, { M32RXF_INSN_PAR_BTST, && case_sem_INSN_PAR_BTST }, { M32RXF_INSN_WRITE_BTST, && case_sem_INSN_WRITE_BTST }, { 0, 0 } }; int i; for (i = 0; labels[i].label != 0; ++i) { #if FAST_P CPU_IDESC (current_cpu) [labels[i].index].sem_fast_lab = labels[i].label; #else CPU_IDESC (current_cpu) [labels[i].index].sem_full_lab = labels[i].label; #endif } #undef DEFINE_LABELS #endif /* DEFINE_LABELS */ #ifdef DEFINE_SWITCH /* If hyper-fast [well not unnecessarily slow] execution is selected, turn off frills like tracing and profiling. */ /* FIXME: A better way would be to have TRACE_RESULT check for something that can cause it to be optimized out. Another way would be to emit special handlers into the instruction "stream". */ #if FAST_P #undef TRACE_RESULT #define TRACE_RESULT(cpu, abuf, name, type, val) #endif #undef GET_ATTR #define GET_ATTR(cpu, num, attr) CGEN_ATTR_VALUE (NULL, abuf->idesc->attrs, CGEN_INSN_##attr) { #if WITH_SCACHE_PBB /* Branch to next handler without going around main loop. */ #define NEXT(vpc) goto * SEM_ARGBUF (vpc) -> semantic.sem_case SWITCH (sem, SEM_ARGBUF (vpc) -> semantic.sem_case) #else /* ! WITH_SCACHE_PBB */ #define NEXT(vpc) BREAK (sem) #ifdef __GNUC__ #if FAST_P SWITCH (sem, SEM_ARGBUF (sc) -> idesc->sem_fast_lab) #else SWITCH (sem, SEM_ARGBUF (sc) -> idesc->sem_full_lab) #endif #else SWITCH (sem, SEM_ARGBUF (sc) -> idesc->num) #endif #endif /* ! WITH_SCACHE_PBB */ { CASE (sem, INSN_X_INVALID) : /* --invalid-- */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); { /* Update the recorded pc in the cpu state struct. Only necessary for WITH_SCACHE case, but to avoid the conditional compilation .... */ SET_H_PC (pc); /* Virtual insns have zero size. Overwrite vpc with address of next insn using the default-insn-bitsize spec. When executing insns in parallel we may want to queue the fault and continue execution. */ vpc = SEM_NEXT_VPC (sem_arg, pc, 4); vpc = sim_engine_invalid_insn (current_cpu, pc, vpc); } #undef FLD } NEXT (vpc); CASE (sem, INSN_X_AFTER) : /* --after-- */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); { #if WITH_SCACHE_PBB_M32RXF m32rxf_pbb_after (current_cpu, sem_arg); #endif } #undef FLD } NEXT (vpc); CASE (sem, INSN_X_BEFORE) : /* --before-- */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); { #if WITH_SCACHE_PBB_M32RXF m32rxf_pbb_before (current_cpu, sem_arg); #endif } #undef FLD } NEXT (vpc); CASE (sem, INSN_X_CTI_CHAIN) : /* --cti-chain-- */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); { #if WITH_SCACHE_PBB_M32RXF #ifdef DEFINE_SWITCH vpc = m32rxf_pbb_cti_chain (current_cpu, sem_arg, pbb_br_type, pbb_br_npc); BREAK (sem); #else /* FIXME: Allow provision of explicit ifmt spec in insn spec. */ vpc = m32rxf_pbb_cti_chain (current_cpu, sem_arg, CPU_PBB_BR_TYPE (current_cpu), CPU_PBB_BR_NPC (current_cpu)); #endif #endif } #undef FLD } NEXT (vpc); CASE (sem, INSN_X_CHAIN) : /* --chain-- */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); { #if WITH_SCACHE_PBB_M32RXF vpc = m32rxf_pbb_chain (current_cpu, sem_arg); #ifdef DEFINE_SWITCH BREAK (sem); #endif #endif } #undef FLD } NEXT (vpc); CASE (sem, INSN_X_BEGIN) : /* --begin-- */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); { #if WITH_SCACHE_PBB_M32RXF #if defined DEFINE_SWITCH || defined FAST_P /* In the switch case FAST_P is a constant, allowing several optimizations in any called inline functions. */ vpc = m32rxf_pbb_begin (current_cpu, FAST_P); #else #if 0 /* cgen engine can't handle dynamic fast/full switching yet. */ vpc = m32rxf_pbb_begin (current_cpu, STATE_RUN_FAST_P (CPU_STATE (current_cpu))); #else vpc = m32rxf_pbb_begin (current_cpu, 0); #endif #endif #endif } #undef FLD } NEXT (vpc); CASE (sem, INSN_ADD) : /* add $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ADDSI (* FLD (i_dr), * FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_ADD3) : /* add3 $dr,$sr,$hash$slo16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = ADDSI (* FLD (i_sr), FLD (f_simm16)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_AND) : /* and $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ANDSI (* FLD (i_dr), * FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_AND3) : /* and3 $dr,$sr,$uimm16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_and3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = ANDSI (* FLD (i_sr), FLD (f_uimm16)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_OR) : /* or $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ORSI (* FLD (i_dr), * FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_OR3) : /* or3 $dr,$sr,$hash$ulo16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_and3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = ORSI (* FLD (i_sr), FLD (f_uimm16)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_XOR) : /* xor $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = XORSI (* FLD (i_dr), * FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_XOR3) : /* xor3 $dr,$sr,$uimm16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_and3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = XORSI (* FLD (i_sr), FLD (f_uimm16)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_ADDI) : /* addi $dr,$simm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_addi.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ADDSI (* FLD (i_dr), FLD (f_simm8)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_ADDV) : /* addv $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;BI temp1; temp0 = ADDSI (* FLD (i_dr), * FLD (i_sr)); temp1 = ADDOFSI (* FLD (i_dr), * FLD (i_sr), 0); { SI opval = temp0; * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { BI opval = temp1; CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_ADDV3) : /* addv3 $dr,$sr,$simm16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI temp0;BI temp1; temp0 = ADDSI (* FLD (i_sr), FLD (f_simm16)); temp1 = ADDOFSI (* FLD (i_sr), FLD (f_simm16), 0); { SI opval = temp0; * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { BI opval = temp1; CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_ADDX) : /* addx $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;BI temp1; temp0 = ADDCSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond)); temp1 = ADDCFSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond)); { SI opval = temp0; * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { BI opval = temp1; CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_BC8) : /* bc.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (CPU (h_cond)) { { USI opval = FLD (i_disp8); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BC24) : /* bc.l $disp24 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl24.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (CPU (h_cond)) { { USI opval = FLD (i_disp24); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BEQ) : /* beq $src1,$src2,$disp16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_beq.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (EQSI (* FLD (i_src1), * FLD (i_src2))) { { USI opval = FLD (i_disp16); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 3); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BEQZ) : /* beqz $src2,$disp16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_beq.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (EQSI (* FLD (i_src2), 0)) { { USI opval = FLD (i_disp16); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BGEZ) : /* bgez $src2,$disp16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_beq.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (GESI (* FLD (i_src2), 0)) { { USI opval = FLD (i_disp16); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BGTZ) : /* bgtz $src2,$disp16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_beq.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (GTSI (* FLD (i_src2), 0)) { { USI opval = FLD (i_disp16); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BLEZ) : /* blez $src2,$disp16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_beq.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (LESI (* FLD (i_src2), 0)) { { USI opval = FLD (i_disp16); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BLTZ) : /* bltz $src2,$disp16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_beq.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (LTSI (* FLD (i_src2), 0)) { { USI opval = FLD (i_disp16); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BNEZ) : /* bnez $src2,$disp16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_beq.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (NESI (* FLD (i_src2), 0)) { { USI opval = FLD (i_disp16); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BL8) : /* bl.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { { SI opval = ADDSI (ANDSI (pc, -4), 4); CPU (h_gr[((UINT) 14)]) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = FLD (i_disp8); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BL24) : /* bl.l $disp24 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl24.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { { SI opval = ADDSI (pc, 4); CPU (h_gr[((UINT) 14)]) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = FLD (i_disp24); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BCL8) : /* bcl.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (CPU (h_cond)) { { { SI opval = ADDSI (ANDSI (pc, -4), 4); CPU (h_gr[((UINT) 14)]) = opval; written |= (1 << 3); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = FLD (i_disp8); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 4); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BCL24) : /* bcl.l $disp24 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl24.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (CPU (h_cond)) { { { SI opval = ADDSI (pc, 4); CPU (h_gr[((UINT) 14)]) = opval; written |= (1 << 3); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = FLD (i_disp24); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 4); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BNC8) : /* bnc.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (NOTBI (CPU (h_cond))) { { USI opval = FLD (i_disp8); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BNC24) : /* bnc.l $disp24 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl24.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (NOTBI (CPU (h_cond))) { { USI opval = FLD (i_disp24); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BNE) : /* bne $src1,$src2,$disp16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_beq.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (NESI (* FLD (i_src1), * FLD (i_src2))) { { USI opval = FLD (i_disp16); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 3); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BRA8) : /* bra.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { USI opval = FLD (i_disp8); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BRA24) : /* bra.l $disp24 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl24.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { USI opval = FLD (i_disp24); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BNCL8) : /* bncl.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (NOTBI (CPU (h_cond))) { { { SI opval = ADDSI (ANDSI (pc, -4), 4); CPU (h_gr[((UINT) 14)]) = opval; written |= (1 << 3); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = FLD (i_disp8); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 4); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_BNCL24) : /* bncl.l $disp24 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl24.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (NOTBI (CPU (h_cond))) { { { SI opval = ADDSI (pc, 4); CPU (h_gr[((UINT) 14)]) = opval; written |= (1 << 3); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = FLD (i_disp24); SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc); written |= (1 << 4); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_CMP) : /* cmp $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = LTSI (* FLD (i_src1), * FLD (i_src2)); CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_CMPI) : /* cmpi $src2,$simm16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_d.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { BI opval = LTSI (* FLD (i_src2), FLD (f_simm16)); CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_CMPU) : /* cmpu $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = LTUSI (* FLD (i_src1), * FLD (i_src2)); CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_CMPUI) : /* cmpui $src2,$simm16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_d.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { BI opval = LTUSI (* FLD (i_src2), FLD (f_simm16)); CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_CMPEQ) : /* cmpeq $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = EQSI (* FLD (i_src1), * FLD (i_src2)); CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_CMPZ) : /* cmpz $src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = EQSI (* FLD (i_src2), 0); CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_DIV) : /* div $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (NESI (* FLD (i_sr), 0)) { { SI opval = DIVSI (* FLD (i_dr), * FLD (i_sr)); * FLD (i_dr) = opval; written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } abuf->written = written; #undef FLD } NEXT (vpc); CASE (sem, INSN_DIVU) : /* divu $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (NESI (* FLD (i_sr), 0)) { { SI opval = UDIVSI (* FLD (i_dr), * FLD (i_sr)); * FLD (i_dr) = opval; written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } abuf->written = written; #undef FLD } NEXT (vpc); CASE (sem, INSN_REM) : /* rem $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (NESI (* FLD (i_sr), 0)) { { SI opval = MODSI (* FLD (i_dr), * FLD (i_sr)); * FLD (i_dr) = opval; written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } abuf->written = written; #undef FLD } NEXT (vpc); CASE (sem, INSN_REMU) : /* remu $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (NESI (* FLD (i_sr), 0)) { { SI opval = UMODSI (* FLD (i_dr), * FLD (i_sr)); * FLD (i_dr) = opval; written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } abuf->written = written; #undef FLD } NEXT (vpc); CASE (sem, INSN_DIVH) : /* divh $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); if (NESI (* FLD (i_sr), 0)) { { SI opval = DIVSI (EXTHISI (TRUNCSIHI (* FLD (i_dr))), * FLD (i_sr)); * FLD (i_dr) = opval; written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } abuf->written = written; #undef FLD } NEXT (vpc); CASE (sem, INSN_JC) : /* jc $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_jl.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (CPU (h_cond)) { { USI opval = ANDSI (* FLD (i_sr), -4); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_JNC) : /* jnc $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_jl.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (NOTBI (CPU (h_cond))) { { USI opval = ANDSI (* FLD (i_sr), -4); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc); written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_JL) : /* jl $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_jl.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;USI temp1; temp0 = ADDSI (ANDSI (pc, -4), 4); temp1 = ANDSI (* FLD (i_sr), -4); { SI opval = temp0; CPU (h_gr[((UINT) 14)]) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = temp1; SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_JMP) : /* jmp $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_jl.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { USI opval = ANDSI (* FLD (i_sr), -4); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_LD) : /* ld $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = GETMEMSI (current_cpu, pc, * FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LD_D) : /* ld $dr,@($slo16,$sr) */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = GETMEMSI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LDB) : /* ldb $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = EXTQISI (GETMEMQI (current_cpu, pc, * FLD (i_sr))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LDB_D) : /* ldb $dr,@($slo16,$sr) */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = EXTQISI (GETMEMQI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16)))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LDH) : /* ldh $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = EXTHISI (GETMEMHI (current_cpu, pc, * FLD (i_sr))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LDH_D) : /* ldh $dr,@($slo16,$sr) */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = EXTHISI (GETMEMHI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16)))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LDUB) : /* ldub $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ZEXTQISI (GETMEMQI (current_cpu, pc, * FLD (i_sr))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LDUB_D) : /* ldub $dr,@($slo16,$sr) */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = ZEXTQISI (GETMEMQI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16)))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LDUH) : /* lduh $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ZEXTHISI (GETMEMHI (current_cpu, pc, * FLD (i_sr))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LDUH_D) : /* lduh $dr,@($slo16,$sr) */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = ZEXTHISI (GETMEMHI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16)))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LD_PLUS) : /* ld $dr,@$sr+ */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;SI temp1; temp0 = GETMEMSI (current_cpu, pc, * FLD (i_sr)); temp1 = ADDSI (* FLD (i_sr), 4); { SI opval = temp0; * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { SI opval = temp1; * FLD (i_sr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_LD24) : /* ld24 $dr,$uimm24 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld24.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = FLD (i_uimm24); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LDI8) : /* ldi8 $dr,$simm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_addi.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = FLD (f_simm8); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LDI16) : /* ldi16 $dr,$hash$slo16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = FLD (f_simm16); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_LOCK) : /* lock $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { { BI opval = 1; CPU (h_lock) = opval; TRACE_RESULT (current_cpu, abuf, "lock", 'x', opval); } { SI opval = GETMEMSI (current_cpu, pc, * FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_MACHI_A) : /* machi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (ANDSI (* FLD (i_src1), 0xffff0000)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16))))), 8), 8); SET_H_ACCUMS (FLD (f_acc), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MACLO_A) : /* maclo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (SLLSI (* FLD (i_src1), 16)), EXTHIDI (TRUNCSIHI (* FLD (i_src2))))), 8), 8); SET_H_ACCUMS (FLD (f_acc), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MACWHI_A) : /* macwhi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16))))); SET_H_ACCUMS (FLD (f_acc), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MACWLO_A) : /* macwlo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (* FLD (i_src2))))); SET_H_ACCUMS (FLD (f_acc), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MUL) : /* mul $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = MULSI (* FLD (i_dr), * FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MULHI_A) : /* mulhi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (MULDI (EXTSIDI (ANDSI (* FLD (i_src1), 0xffff0000)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16)))), 16), 16); SET_H_ACCUMS (FLD (f_acc), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MULLO_A) : /* mullo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (MULDI (EXTSIDI (SLLSI (* FLD (i_src1), 16)), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))), 16), 16); SET_H_ACCUMS (FLD (f_acc), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MULWHI_A) : /* mulwhi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16)))); SET_H_ACCUMS (FLD (f_acc), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MULWLO_A) : /* mulwlo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))); SET_H_ACCUMS (FLD (f_acc), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MV) : /* mv $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = * FLD (i_sr); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MVFACHI_A) : /* mvfachi $dr,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_mvfachi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = TRUNCDISI (SRADI (GET_H_ACCUMS (FLD (f_accs)), 32)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MVFACLO_A) : /* mvfaclo $dr,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_mvfachi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = TRUNCDISI (GET_H_ACCUMS (FLD (f_accs))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MVFACMI_A) : /* mvfacmi $dr,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_mvfachi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = TRUNCDISI (SRADI (GET_H_ACCUMS (FLD (f_accs)), 16)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MVFC) : /* mvfc $dr,$scr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = GET_H_CR (FLD (f_r2)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MVTACHI_A) : /* mvtachi $src1,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_mvtachi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = ORDI (ANDDI (GET_H_ACCUMS (FLD (f_accs)), MAKEDI (0, 0xffffffff)), SLLDI (EXTSIDI (* FLD (i_src1)), 32)); SET_H_ACCUMS (FLD (f_accs), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MVTACLO_A) : /* mvtaclo $src1,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_mvtachi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = ORDI (ANDDI (GET_H_ACCUMS (FLD (f_accs)), MAKEDI (0xffffffff, 0)), ZEXTSIDI (* FLD (i_src1))); SET_H_ACCUMS (FLD (f_accs), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MVTC) : /* mvtc $sr,$dcr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { USI opval = * FLD (i_sr); SET_H_CR (FLD (f_r1), opval); TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_NEG) : /* neg $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = NEGSI (* FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_NOP) : /* nop */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); PROFILE_COUNT_FILLNOPS (current_cpu, abuf->addr); #undef FLD } NEXT (vpc); CASE (sem, INSN_NOT) : /* not $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = INVSI (* FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_RAC_DSI) : /* rac $accd,$accs,$imm1 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_rac_dsi.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI tmp_tmp1; tmp_tmp1 = SLLDI (GET_H_ACCUMS (FLD (f_accs)), FLD (f_imm1)); tmp_tmp1 = ADDDI (tmp_tmp1, MAKEDI (0, 32768)); { DI opval = (GTDI (tmp_tmp1, MAKEDI (32767, 0xffff0000))) ? (MAKEDI (32767, 0xffff0000)) : (LTDI (tmp_tmp1, MAKEDI (0xffff8000, 0))) ? (MAKEDI (0xffff8000, 0)) : (ANDDI (tmp_tmp1, MAKEDI (0xffffffff, 0xffff0000))); SET_H_ACCUMS (FLD (f_accd), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_RACH_DSI) : /* rach $accd,$accs,$imm1 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_rac_dsi.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI tmp_tmp1; tmp_tmp1 = SLLDI (GET_H_ACCUMS (FLD (f_accs)), FLD (f_imm1)); tmp_tmp1 = ADDDI (tmp_tmp1, MAKEDI (0, 0x80000000)); { DI opval = (GTDI (tmp_tmp1, MAKEDI (32767, 0))) ? (MAKEDI (32767, 0)) : (LTDI (tmp_tmp1, MAKEDI (0xffff8000, 0))) ? (MAKEDI (0xffff8000, 0)) : (ANDDI (tmp_tmp1, MAKEDI (0xffffffff, 0))); SET_H_ACCUMS (FLD (f_accd), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_RTE) : /* rte */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { { USI opval = ANDSI (GET_H_CR (((UINT) 6)), -4); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } { USI opval = GET_H_CR (((UINT) 14)); SET_H_CR (((UINT) 6), opval); TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } { UQI opval = CPU (h_bpsw); SET_H_PSW (opval); TRACE_RESULT (current_cpu, abuf, "psw", 'x', opval); } { UQI opval = CPU (h_bbpsw); CPU (h_bpsw) = opval; TRACE_RESULT (current_cpu, abuf, "bpsw", 'x', opval); } } SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_SETH) : /* seth $dr,$hash$hi16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_seth.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = SLLSI (FLD (f_hi16), 16); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SLL) : /* sll $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SLLSI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SLL3) : /* sll3 $dr,$sr,$simm16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = SLLSI (* FLD (i_sr), ANDSI (FLD (f_simm16), 31)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SLLI) : /* slli $dr,$uimm5 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_slli.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SLLSI (* FLD (i_dr), FLD (f_uimm5)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SRA) : /* sra $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SRASI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SRA3) : /* sra3 $dr,$sr,$simm16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = SRASI (* FLD (i_sr), ANDSI (FLD (f_simm16), 31)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SRAI) : /* srai $dr,$uimm5 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_slli.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SRASI (* FLD (i_dr), FLD (f_uimm5)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SRL) : /* srl $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SRLSI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SRL3) : /* srl3 $dr,$sr,$simm16 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add3.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = SRLSI (* FLD (i_sr), ANDSI (FLD (f_simm16), 31)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SRLI) : /* srli $dr,$uimm5 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_slli.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SRLSI (* FLD (i_dr), FLD (f_uimm5)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_ST) : /* st $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = * FLD (i_src1); SETMEMSI (current_cpu, pc, * FLD (i_src2), opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_ST_D) : /* st $src1,@($slo16,$src2) */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_d.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = * FLD (i_src1); SETMEMSI (current_cpu, pc, ADDSI (* FLD (i_src2), FLD (f_simm16)), opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_STB) : /* stb $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { QI opval = * FLD (i_src1); SETMEMQI (current_cpu, pc, * FLD (i_src2), opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_STB_D) : /* stb $src1,@($slo16,$src2) */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_d.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { QI opval = * FLD (i_src1); SETMEMQI (current_cpu, pc, ADDSI (* FLD (i_src2), FLD (f_simm16)), opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_STH) : /* sth $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { HI opval = * FLD (i_src1); SETMEMHI (current_cpu, pc, * FLD (i_src2), opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_STH_D) : /* sth $src1,@($slo16,$src2) */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_d.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { HI opval = * FLD (i_src1); SETMEMHI (current_cpu, pc, ADDSI (* FLD (i_src2), FLD (f_simm16)), opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_ST_PLUS) : /* st $src1,@+$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI tmp_new_src2; tmp_new_src2 = ADDSI (* FLD (i_src2), 4); { SI opval = * FLD (i_src1); SETMEMSI (current_cpu, pc, tmp_new_src2, opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } { SI opval = tmp_new_src2; * FLD (i_src2) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_STH_PLUS) : /* sth $src1,@$src2+ */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI tmp_new_src2; tmp_new_src2 = * FLD (i_src2); { HI opval = * FLD (i_src1); SETMEMHI (current_cpu, pc, tmp_new_src2, opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } { SI opval = ADDSI (tmp_new_src2, 2); * FLD (i_src2) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_STB_PLUS) : /* stb $src1,@$src2+ */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI tmp_new_src2; tmp_new_src2 = * FLD (i_src2); { QI opval = * FLD (i_src1); SETMEMQI (current_cpu, pc, tmp_new_src2, opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } { SI opval = ADDSI (tmp_new_src2, 1); * FLD (i_src2) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_ST_MINUS) : /* st $src1,@-$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI tmp_new_src2; tmp_new_src2 = SUBSI (* FLD (i_src2), 4); { SI opval = * FLD (i_src1); SETMEMSI (current_cpu, pc, tmp_new_src2, opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } { SI opval = tmp_new_src2; * FLD (i_src2) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_SUB) : /* sub $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SUBSI (* FLD (i_dr), * FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SUBV) : /* subv $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;BI temp1; temp0 = SUBSI (* FLD (i_dr), * FLD (i_sr)); temp1 = SUBOFSI (* FLD (i_dr), * FLD (i_sr), 0); { SI opval = temp0; * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { BI opval = temp1; CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_SUBX) : /* subx $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;BI temp1; temp0 = SUBCSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond)); temp1 = SUBCFSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond)); { SI opval = temp0; * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { BI opval = temp1; CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } } #undef FLD } NEXT (vpc); CASE (sem, INSN_TRAP) : /* trap $uimm4 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_trap.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { { USI opval = GET_H_CR (((UINT) 6)); SET_H_CR (((UINT) 14), opval); TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } { USI opval = ADDSI (pc, 4); SET_H_CR (((UINT) 6), opval); TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } { UQI opval = CPU (h_bpsw); CPU (h_bbpsw) = opval; TRACE_RESULT (current_cpu, abuf, "bbpsw", 'x', opval); } { UQI opval = GET_H_PSW (); CPU (h_bpsw) = opval; TRACE_RESULT (current_cpu, abuf, "bpsw", 'x', opval); } { UQI opval = ANDQI (GET_H_PSW (), 128); SET_H_PSW (opval); TRACE_RESULT (current_cpu, abuf, "psw", 'x', opval); } { SI opval = m32r_trap (current_cpu, pc, FLD (f_uimm4)); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } SEM_BRANCH_FINI (vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_UNLOCK) : /* unlock $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { if (CPU (h_lock)) { { SI opval = * FLD (i_src1); SETMEMSI (current_cpu, pc, * FLD (i_src2), opval); written |= (1 << 4); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } } { BI opval = 0; CPU (h_lock) = opval; TRACE_RESULT (current_cpu, abuf, "lock", 'x', opval); } } abuf->written = written; #undef FLD } NEXT (vpc); CASE (sem, INSN_SATB) : /* satb $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = (GESI (* FLD (i_sr), 127)) ? (127) : (LESI (* FLD (i_sr), -128)) ? (-128) : (* FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SATH) : /* sath $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = (GESI (* FLD (i_sr), 32767)) ? (32767) : (LESI (* FLD (i_sr), -32768)) ? (-32768) : (* FLD (i_sr)); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SAT) : /* sat $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { SI opval = ((CPU (h_cond)) ? (((LTSI (* FLD (i_sr), 0)) ? (2147483647) : (0x80000000))) : (* FLD (i_sr))); * FLD (i_dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_PCMPBZ) : /* pcmpbz $src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = (EQSI (ANDSI (* FLD (i_src2), 255), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 65280), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 16711680), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 0xff000000), 0)) ? (1) : (0); CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SADD) : /* sadd */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = ADDDI (SRADI (GET_H_ACCUMS (((UINT) 1)), 16), GET_H_ACCUMS (((UINT) 0))); SET_H_ACCUMS (((UINT) 0), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MACWU1) : /* macwu1 $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (((UINT) 1)), MULDI (EXTSIDI (* FLD (i_src1)), EXTSIDI (ANDSI (* FLD (i_src2), 65535)))), 8), 8); SET_H_ACCUMS (((UINT) 1), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MSBLO) : /* msblo $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (SUBDI (GET_H_ACCUM (), SRADI (SLLDI (MULDI (EXTHIDI (TRUNCSIHI (* FLD (i_src1))), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))), 32), 16)), 8), 8); SET_H_ACCUM (opval); TRACE_RESULT (current_cpu, abuf, "accum", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MULWU1) : /* mulwu1 $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (MULDI (EXTSIDI (* FLD (i_src1)), EXTSIDI (ANDSI (* FLD (i_src2), 65535))), 16), 16); SET_H_ACCUMS (((UINT) 1), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_MACLH1) : /* maclh1 $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (((UINT) 1)), SLLDI (EXTSIDI (MULSI (EXTHISI (TRUNCSIHI (* FLD (i_src1))), SRASI (* FLD (i_src2), 16))), 16)), 8), 8); SET_H_ACCUMS (((UINT) 1), opval); TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SC) : /* sc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (ZEXTBISI (CPU (h_cond))) SEM_SKIP_INSN (current_cpu, sem_arg, vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_SNC) : /* snc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (ZEXTBISI (NOTBI (CPU (h_cond)))) SEM_SKIP_INSN (current_cpu, sem_arg, vpc); #undef FLD } NEXT (vpc); CASE (sem, INSN_CLRPSW) : /* clrpsw $uimm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_clrpsw.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { USI opval = ANDSI (GET_H_CR (((UINT) 0)), ORSI (ZEXTQISI (INVQI (FLD (f_uimm8))), 65280)); SET_H_CR (((UINT) 0), opval); TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_SETPSW) : /* setpsw $uimm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_clrpsw.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { USI opval = FLD (f_uimm8); SET_H_CR (((UINT) 0), opval); TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_BSET) : /* bset $uimm3,@($slo16,$sr) */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bset.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { QI opval = ORQI (GETMEMQI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16))), SLLQI (1, SUBSI (7, FLD (f_uimm3)))); SETMEMQI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16)), opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_BCLR) : /* bclr $uimm3,@($slo16,$sr) */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bset.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 4); { QI opval = ANDQI (GETMEMQI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16))), INVQI (SLLQI (1, SUBSI (7, FLD (f_uimm3))))); SETMEMQI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16)), opval); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_BTST) : /* btst $uimm3,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bset.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = ANDQI (SRLQI (* FLD (i_sr), SUBSI (7, FLD (f_uimm3))), 1); CPU (h_cond) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_ADD) : /* add $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ADDSI (* FLD (i_dr), * FLD (i_sr)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_ADD) : /* add $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_AND) : /* and $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ANDSI (* FLD (i_dr), * FLD (i_sr)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_AND) : /* and $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_OR) : /* or $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ORSI (* FLD (i_dr), * FLD (i_sr)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_OR) : /* or $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_XOR) : /* xor $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = XORSI (* FLD (i_dr), * FLD (i_sr)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_XOR) : /* xor $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_ADDI) : /* addi $dr,$simm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_addi.f #define OPRND(f) par_exec->operands.sfmt_addi.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ADDSI (* FLD (i_dr), FLD (f_simm8)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_ADDI) : /* addi $dr,$simm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_addi.f #define OPRND(f) par_exec->operands.sfmt_addi.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_ADDV) : /* addv $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_addv.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;BI temp1; temp0 = ADDSI (* FLD (i_dr), * FLD (i_sr)); temp1 = ADDOFSI (* FLD (i_dr), * FLD (i_sr), 0); { SI opval = temp0; OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { BI opval = temp1; OPRND (condbit) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_ADDV) : /* addv $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_addv.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_cond) = OPRND (condbit); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_ADDX) : /* addx $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_addx.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;BI temp1; temp0 = ADDCSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond)); temp1 = ADDCFSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond)); { SI opval = temp0; OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { BI opval = temp1; OPRND (condbit) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_ADDX) : /* addx $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_addx.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_cond) = OPRND (condbit); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_BC8) : /* bc.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bc8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (CPU (h_cond)) { { USI opval = FLD (i_disp8); OPRND (pc) = opval; written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_BC8) : /* bc.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bc8.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); if (written & (1 << 2)) { SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); } SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_BL8) : /* bl.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bl8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { { SI opval = ADDSI (ANDSI (pc, -4), 4); OPRND (h_gr_SI_14) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = FLD (i_disp8); OPRND (pc) = opval; TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_BL8) : /* bl.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bl8.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_gr[((UINT) 14)]) = OPRND (h_gr_SI_14); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_BCL8) : /* bcl.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bcl8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (CPU (h_cond)) { { { SI opval = ADDSI (ANDSI (pc, -4), 4); OPRND (h_gr_SI_14) = opval; written |= (1 << 3); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = FLD (i_disp8); OPRND (pc) = opval; written |= (1 << 4); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } } abuf->written = written; #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_BCL8) : /* bcl.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bcl8.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); if (written & (1 << 3)) { CPU (h_gr[((UINT) 14)]) = OPRND (h_gr_SI_14); } if (written & (1 << 4)) { SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); } SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_BNC8) : /* bnc.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bc8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (NOTBI (CPU (h_cond))) { { USI opval = FLD (i_disp8); OPRND (pc) = opval; written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_BNC8) : /* bnc.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bc8.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); if (written & (1 << 2)) { SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); } SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_BRA8) : /* bra.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bra8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { USI opval = FLD (i_disp8); OPRND (pc) = opval; TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_BRA8) : /* bra.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bra8.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_BNCL8) : /* bncl.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bcl8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (NOTBI (CPU (h_cond))) { { { SI opval = ADDSI (ANDSI (pc, -4), 4); OPRND (h_gr_SI_14) = opval; written |= (1 << 3); TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = FLD (i_disp8); OPRND (pc) = opval; written |= (1 << 4); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } } abuf->written = written; #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_BNCL8) : /* bncl.s $disp8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_bl8.f #define OPRND(f) par_exec->operands.sfmt_bcl8.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); if (written & (1 << 3)) { CPU (h_gr[((UINT) 14)]) = OPRND (h_gr_SI_14); } if (written & (1 << 4)) { SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); } SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_CMP) : /* cmp $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_cmp.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = LTSI (* FLD (i_src1), * FLD (i_src2)); OPRND (condbit) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_CMP) : /* cmp $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_cmp.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_cond) = OPRND (condbit); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_CMPU) : /* cmpu $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_cmp.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = LTUSI (* FLD (i_src1), * FLD (i_src2)); OPRND (condbit) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_CMPU) : /* cmpu $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_cmp.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_cond) = OPRND (condbit); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_CMPEQ) : /* cmpeq $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_cmp.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = EQSI (* FLD (i_src1), * FLD (i_src2)); OPRND (condbit) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_CMPEQ) : /* cmpeq $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_cmp.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_cond) = OPRND (condbit); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_CMPZ) : /* cmpz $src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_cmpz.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = EQSI (* FLD (i_src2), 0); OPRND (condbit) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_CMPZ) : /* cmpz $src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_cmpz.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_cond) = OPRND (condbit); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_JC) : /* jc $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_jl.f #define OPRND(f) par_exec->operands.sfmt_jc.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (CPU (h_cond)) { { USI opval = ANDSI (* FLD (i_sr), -4); OPRND (pc) = opval; written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_JC) : /* jc $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_jl.f #define OPRND(f) par_exec->operands.sfmt_jc.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); if (written & (1 << 2)) { SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); } SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_JNC) : /* jnc $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_jl.f #define OPRND(f) par_exec->operands.sfmt_jc.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (NOTBI (CPU (h_cond))) { { USI opval = ANDSI (* FLD (i_sr), -4); OPRND (pc) = opval; written |= (1 << 2); TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } abuf->written = written; #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_JNC) : /* jnc $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_jl.f #define OPRND(f) par_exec->operands.sfmt_jc.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); if (written & (1 << 2)) { SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); } SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_JL) : /* jl $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_jl.f #define OPRND(f) par_exec->operands.sfmt_jl.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;USI temp1; temp0 = ADDSI (ANDSI (pc, -4), 4); temp1 = ANDSI (* FLD (i_sr), -4); { SI opval = temp0; OPRND (h_gr_SI_14) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { USI opval = temp1; OPRND (pc) = opval; TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_JL) : /* jl $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_jl.f #define OPRND(f) par_exec->operands.sfmt_jl.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_gr[((UINT) 14)]) = OPRND (h_gr_SI_14); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_JMP) : /* jmp $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_jl.f #define OPRND(f) par_exec->operands.sfmt_jmp.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { USI opval = ANDSI (* FLD (i_sr), -4); OPRND (pc) = opval; TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_JMP) : /* jmp $sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_jl.f #define OPRND(f) par_exec->operands.sfmt_jmp.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_LD) : /* ld $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ld.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = GETMEMSI (current_cpu, pc, * FLD (i_sr)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_LD) : /* ld $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ld.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_LDB) : /* ldb $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ldb.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = EXTQISI (GETMEMQI (current_cpu, pc, * FLD (i_sr))); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_LDB) : /* ldb $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ldb.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_LDH) : /* ldh $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ldh.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = EXTHISI (GETMEMHI (current_cpu, pc, * FLD (i_sr))); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_LDH) : /* ldh $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ldh.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_LDUB) : /* ldub $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ldb.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ZEXTQISI (GETMEMQI (current_cpu, pc, * FLD (i_sr))); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_LDUB) : /* ldub $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ldb.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_LDUH) : /* lduh $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ldh.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = ZEXTHISI (GETMEMHI (current_cpu, pc, * FLD (i_sr))); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_LDUH) : /* lduh $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ldh.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_LD_PLUS) : /* ld $dr,@$sr+ */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ld_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;SI temp1; temp0 = GETMEMSI (current_cpu, pc, * FLD (i_sr)); temp1 = ADDSI (* FLD (i_sr), 4); { SI opval = temp0; OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { SI opval = temp1; OPRND (sr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_LD_PLUS) : /* ld $dr,@$sr+ */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_ld_plus.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); * FLD (i_sr) = OPRND (sr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_LDI8) : /* ldi8 $dr,$simm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_addi.f #define OPRND(f) par_exec->operands.sfmt_ldi8.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = FLD (f_simm8); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_LDI8) : /* ldi8 $dr,$simm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_addi.f #define OPRND(f) par_exec->operands.sfmt_ldi8.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_LOCK) : /* lock $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_lock.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { { BI opval = 1; OPRND (h_lock_BI) = opval; TRACE_RESULT (current_cpu, abuf, "lock", 'x', opval); } { SI opval = GETMEMSI (current_cpu, pc, * FLD (i_sr)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_LOCK) : /* lock $dr,@$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_lock.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); CPU (h_lock) = OPRND (h_lock_BI); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MACHI_A) : /* machi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (ANDSI (* FLD (i_src1), 0xffff0000)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16))))), 8), 8); OPRND (acc) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MACHI_A) : /* machi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_machi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_acc), OPRND (acc)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MACLO_A) : /* maclo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (SLLSI (* FLD (i_src1), 16)), EXTHIDI (TRUNCSIHI (* FLD (i_src2))))), 8), 8); OPRND (acc) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MACLO_A) : /* maclo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_machi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_acc), OPRND (acc)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MACWHI_A) : /* macwhi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16))))); OPRND (acc) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MACWHI_A) : /* macwhi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_machi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_acc), OPRND (acc)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MACWLO_A) : /* macwlo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_machi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (* FLD (i_src2))))); OPRND (acc) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MACWLO_A) : /* macwlo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_machi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_acc), OPRND (acc)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MUL) : /* mul $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = MULSI (* FLD (i_dr), * FLD (i_sr)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MUL) : /* mul $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MULHI_A) : /* mulhi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_mulhi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (MULDI (EXTSIDI (ANDSI (* FLD (i_src1), 0xffff0000)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16)))), 16), 16); OPRND (acc) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MULHI_A) : /* mulhi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_mulhi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_acc), OPRND (acc)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MULLO_A) : /* mullo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_mulhi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (MULDI (EXTSIDI (SLLSI (* FLD (i_src1), 16)), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))), 16), 16); OPRND (acc) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MULLO_A) : /* mullo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_mulhi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_acc), OPRND (acc)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MULWHI_A) : /* mulwhi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_mulhi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16)))); OPRND (acc) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MULWHI_A) : /* mulwhi $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_mulhi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_acc), OPRND (acc)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MULWLO_A) : /* mulwlo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_mulhi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))); OPRND (acc) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MULWLO_A) : /* mulwlo $src1,$src2,$acc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_machi_a.f #define OPRND(f) par_exec->operands.sfmt_mulhi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_acc), OPRND (acc)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MV) : /* mv $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_mv.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = * FLD (i_sr); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MV) : /* mv $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_mv.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MVFACHI_A) : /* mvfachi $dr,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_mvfachi_a.f #define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = TRUNCDISI (SRADI (GET_H_ACCUMS (FLD (f_accs)), 32)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MVFACHI_A) : /* mvfachi $dr,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_mvfachi_a.f #define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MVFACLO_A) : /* mvfaclo $dr,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_mvfachi_a.f #define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = TRUNCDISI (GET_H_ACCUMS (FLD (f_accs))); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MVFACLO_A) : /* mvfaclo $dr,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_mvfachi_a.f #define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MVFACMI_A) : /* mvfacmi $dr,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_mvfachi_a.f #define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = TRUNCDISI (SRADI (GET_H_ACCUMS (FLD (f_accs)), 16)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MVFACMI_A) : /* mvfacmi $dr,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_mvfachi_a.f #define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MVFC) : /* mvfc $dr,$scr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_mvfc.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = GET_H_CR (FLD (f_r2)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MVFC) : /* mvfc $dr,$scr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_mvfc.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MVTACHI_A) : /* mvtachi $src1,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_mvtachi_a.f #define OPRND(f) par_exec->operands.sfmt_mvtachi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = ORDI (ANDDI (GET_H_ACCUMS (FLD (f_accs)), MAKEDI (0, 0xffffffff)), SLLDI (EXTSIDI (* FLD (i_src1)), 32)); OPRND (accs) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MVTACHI_A) : /* mvtachi $src1,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_mvtachi_a.f #define OPRND(f) par_exec->operands.sfmt_mvtachi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_accs), OPRND (accs)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MVTACLO_A) : /* mvtaclo $src1,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_mvtachi_a.f #define OPRND(f) par_exec->operands.sfmt_mvtachi_a.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = ORDI (ANDDI (GET_H_ACCUMS (FLD (f_accs)), MAKEDI (0xffffffff, 0)), ZEXTSIDI (* FLD (i_src1))); OPRND (accs) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MVTACLO_A) : /* mvtaclo $src1,$accs */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_mvtachi_a.f #define OPRND(f) par_exec->operands.sfmt_mvtachi_a.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_accs), OPRND (accs)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MVTC) : /* mvtc $sr,$dcr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_mvtc.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { USI opval = * FLD (i_sr); OPRND (dcr) = opval; TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MVTC) : /* mvtc $sr,$dcr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_mvtc.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_CR (FLD (f_r1), OPRND (dcr)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_NEG) : /* neg $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_mv.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = NEGSI (* FLD (i_sr)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_NEG) : /* neg $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_mv.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_NOP) : /* nop */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f #define OPRND(f) par_exec->operands.sfmt_nop.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); PROFILE_COUNT_FILLNOPS (current_cpu, abuf->addr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_NOP) : /* nop */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_empty.f #define OPRND(f) par_exec->operands.sfmt_nop.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_NOT) : /* not $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_mv.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = INVSI (* FLD (i_sr)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_NOT) : /* not $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_ld_plus.f #define OPRND(f) par_exec->operands.sfmt_mv.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_RAC_DSI) : /* rac $accd,$accs,$imm1 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_rac_dsi.f #define OPRND(f) par_exec->operands.sfmt_rac_dsi.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI tmp_tmp1; tmp_tmp1 = SLLDI (GET_H_ACCUMS (FLD (f_accs)), FLD (f_imm1)); tmp_tmp1 = ADDDI (tmp_tmp1, MAKEDI (0, 32768)); { DI opval = (GTDI (tmp_tmp1, MAKEDI (32767, 0xffff0000))) ? (MAKEDI (32767, 0xffff0000)) : (LTDI (tmp_tmp1, MAKEDI (0xffff8000, 0))) ? (MAKEDI (0xffff8000, 0)) : (ANDDI (tmp_tmp1, MAKEDI (0xffffffff, 0xffff0000))); OPRND (accd) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_RAC_DSI) : /* rac $accd,$accs,$imm1 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_rac_dsi.f #define OPRND(f) par_exec->operands.sfmt_rac_dsi.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_accd), OPRND (accd)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_RACH_DSI) : /* rach $accd,$accs,$imm1 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_rac_dsi.f #define OPRND(f) par_exec->operands.sfmt_rac_dsi.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI tmp_tmp1; tmp_tmp1 = SLLDI (GET_H_ACCUMS (FLD (f_accs)), FLD (f_imm1)); tmp_tmp1 = ADDDI (tmp_tmp1, MAKEDI (0, 0x80000000)); { DI opval = (GTDI (tmp_tmp1, MAKEDI (32767, 0))) ? (MAKEDI (32767, 0)) : (LTDI (tmp_tmp1, MAKEDI (0xffff8000, 0))) ? (MAKEDI (0xffff8000, 0)) : (ANDDI (tmp_tmp1, MAKEDI (0xffffffff, 0))); OPRND (accd) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_RACH_DSI) : /* rach $accd,$accs,$imm1 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_rac_dsi.f #define OPRND(f) par_exec->operands.sfmt_rac_dsi.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (FLD (f_accd), OPRND (accd)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_RTE) : /* rte */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f #define OPRND(f) par_exec->operands.sfmt_rte.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { { USI opval = ANDSI (GET_H_CR (((UINT) 6)), -4); OPRND (pc) = opval; TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } { USI opval = GET_H_CR (((UINT) 14)); OPRND (h_cr_USI_6) = opval; TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } { UQI opval = CPU (h_bpsw); OPRND (h_psw_UQI) = opval; TRACE_RESULT (current_cpu, abuf, "psw", 'x', opval); } { UQI opval = CPU (h_bbpsw); OPRND (h_bpsw_UQI) = opval; TRACE_RESULT (current_cpu, abuf, "bpsw", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_RTE) : /* rte */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_empty.f #define OPRND(f) par_exec->operands.sfmt_rte.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_bpsw) = OPRND (h_bpsw_UQI); SET_H_CR (((UINT) 6), OPRND (h_cr_USI_6)); SET_H_PSW (OPRND (h_psw_UQI)); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SLL) : /* sll $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SLLSI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SLL) : /* sll $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SLLI) : /* slli $dr,$uimm5 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_slli.f #define OPRND(f) par_exec->operands.sfmt_slli.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SLLSI (* FLD (i_dr), FLD (f_uimm5)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SLLI) : /* slli $dr,$uimm5 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_slli.f #define OPRND(f) par_exec->operands.sfmt_slli.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SRA) : /* sra $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SRASI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SRA) : /* sra $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SRAI) : /* srai $dr,$uimm5 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_slli.f #define OPRND(f) par_exec->operands.sfmt_slli.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SRASI (* FLD (i_dr), FLD (f_uimm5)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SRAI) : /* srai $dr,$uimm5 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_slli.f #define OPRND(f) par_exec->operands.sfmt_slli.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SRL) : /* srl $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SRLSI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SRL) : /* srl $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SRLI) : /* srli $dr,$uimm5 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_slli.f #define OPRND(f) par_exec->operands.sfmt_slli.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SRLSI (* FLD (i_dr), FLD (f_uimm5)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SRLI) : /* srli $dr,$uimm5 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_slli.f #define OPRND(f) par_exec->operands.sfmt_slli.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_ST) : /* st $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_st.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = * FLD (i_src1); OPRND (h_memory_SI_src2_idx) = * FLD (i_src2); OPRND (h_memory_SI_src2) = opval; TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_ST) : /* st $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_st.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SETMEMSI (current_cpu, pc, OPRND (h_memory_SI_src2_idx), OPRND (h_memory_SI_src2)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_STB) : /* stb $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_stb.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { QI opval = * FLD (i_src1); OPRND (h_memory_QI_src2_idx) = * FLD (i_src2); OPRND (h_memory_QI_src2) = opval; TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_STB) : /* stb $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_stb.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SETMEMQI (current_cpu, pc, OPRND (h_memory_QI_src2_idx), OPRND (h_memory_QI_src2)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_STH) : /* sth $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_sth.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { HI opval = * FLD (i_src1); OPRND (h_memory_HI_src2_idx) = * FLD (i_src2); OPRND (h_memory_HI_src2) = opval; TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_STH) : /* sth $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_sth.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SETMEMHI (current_cpu, pc, OPRND (h_memory_HI_src2_idx), OPRND (h_memory_HI_src2)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_ST_PLUS) : /* st $src1,@+$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI tmp_new_src2; tmp_new_src2 = ADDSI (* FLD (i_src2), 4); { SI opval = * FLD (i_src1); OPRND (h_memory_SI_new_src2_idx) = tmp_new_src2; OPRND (h_memory_SI_new_src2) = opval; TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } { SI opval = tmp_new_src2; OPRND (src2) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_ST_PLUS) : /* st $src1,@+$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_st_plus.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SETMEMSI (current_cpu, pc, OPRND (h_memory_SI_new_src2_idx), OPRND (h_memory_SI_new_src2)); * FLD (i_src2) = OPRND (src2); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_STH_PLUS) : /* sth $src1,@$src2+ */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_sth_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI tmp_new_src2; tmp_new_src2 = * FLD (i_src2); { HI opval = * FLD (i_src1); OPRND (h_memory_HI_new_src2_idx) = tmp_new_src2; OPRND (h_memory_HI_new_src2) = opval; TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } { SI opval = ADDSI (tmp_new_src2, 2); OPRND (src2) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_STH_PLUS) : /* sth $src1,@$src2+ */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_sth_plus.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SETMEMHI (current_cpu, pc, OPRND (h_memory_HI_new_src2_idx), OPRND (h_memory_HI_new_src2)); * FLD (i_src2) = OPRND (src2); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_STB_PLUS) : /* stb $src1,@$src2+ */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_stb_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI tmp_new_src2; tmp_new_src2 = * FLD (i_src2); { QI opval = * FLD (i_src1); OPRND (h_memory_QI_new_src2_idx) = tmp_new_src2; OPRND (h_memory_QI_new_src2) = opval; TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } { SI opval = ADDSI (tmp_new_src2, 1); OPRND (src2) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_STB_PLUS) : /* stb $src1,@$src2+ */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_stb_plus.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SETMEMQI (current_cpu, pc, OPRND (h_memory_QI_new_src2_idx), OPRND (h_memory_QI_new_src2)); * FLD (i_src2) = OPRND (src2); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_ST_MINUS) : /* st $src1,@-$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_st_plus.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI tmp_new_src2; tmp_new_src2 = SUBSI (* FLD (i_src2), 4); { SI opval = * FLD (i_src1); OPRND (h_memory_SI_new_src2_idx) = tmp_new_src2; OPRND (h_memory_SI_new_src2) = opval; TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } { SI opval = tmp_new_src2; OPRND (src2) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_ST_MINUS) : /* st $src1,@-$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_st_plus.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SETMEMSI (current_cpu, pc, OPRND (h_memory_SI_new_src2_idx), OPRND (h_memory_SI_new_src2)); * FLD (i_src2) = OPRND (src2); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SUB) : /* sub $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI opval = SUBSI (* FLD (i_dr), * FLD (i_sr)); OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SUB) : /* sub $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_add.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SUBV) : /* subv $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_addv.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;BI temp1; temp0 = SUBSI (* FLD (i_dr), * FLD (i_sr)); temp1 = SUBOFSI (* FLD (i_dr), * FLD (i_sr), 0); { SI opval = temp0; OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { BI opval = temp1; OPRND (condbit) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SUBV) : /* subv $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_addv.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_cond) = OPRND (condbit); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SUBX) : /* subx $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_addx.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { SI temp0;BI temp1; temp0 = SUBCSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond)); temp1 = SUBCFSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond)); { SI opval = temp0; OPRND (dr) = opval; TRACE_RESULT (current_cpu, abuf, "gr", 'x', opval); } { BI opval = temp1; OPRND (condbit) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SUBX) : /* subx $dr,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_add.f #define OPRND(f) par_exec->operands.sfmt_addx.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_cond) = OPRND (condbit); * FLD (i_dr) = OPRND (dr); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_TRAP) : /* trap $uimm4 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_trap.f #define OPRND(f) par_exec->operands.sfmt_trap.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { { USI opval = GET_H_CR (((UINT) 6)); OPRND (h_cr_USI_14) = opval; TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } { USI opval = ADDSI (pc, 4); OPRND (h_cr_USI_6) = opval; TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } { UQI opval = CPU (h_bpsw); OPRND (h_bbpsw_UQI) = opval; TRACE_RESULT (current_cpu, abuf, "bbpsw", 'x', opval); } { UQI opval = GET_H_PSW (); OPRND (h_bpsw_UQI) = opval; TRACE_RESULT (current_cpu, abuf, "bpsw", 'x', opval); } { UQI opval = ANDQI (GET_H_PSW (), 128); OPRND (h_psw_UQI) = opval; TRACE_RESULT (current_cpu, abuf, "psw", 'x', opval); } { SI opval = m32r_trap (current_cpu, pc, FLD (f_uimm4)); OPRND (pc) = opval; TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval); } } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_TRAP) : /* trap $uimm4 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_trap.f #define OPRND(f) par_exec->operands.sfmt_trap.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; SEM_BRANCH_INIT vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_bbpsw) = OPRND (h_bbpsw_UQI); CPU (h_bpsw) = OPRND (h_bpsw_UQI); SET_H_CR (((UINT) 14), OPRND (h_cr_USI_14)); SET_H_CR (((UINT) 6), OPRND (h_cr_USI_6)); SET_H_PSW (OPRND (h_psw_UQI)); SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc); SEM_BRANCH_FINI (vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_UNLOCK) : /* unlock $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_unlock.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { if (CPU (h_lock)) { { SI opval = * FLD (i_src1); OPRND (h_memory_SI_src2_idx) = * FLD (i_src2); OPRND (h_memory_SI_src2) = opval; written |= (1 << 4); TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval); } } { BI opval = 0; OPRND (h_lock_BI) = opval; TRACE_RESULT (current_cpu, abuf, "lock", 'x', opval); } } abuf->written = written; #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_UNLOCK) : /* unlock $src1,@$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_unlock.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_lock) = OPRND (h_lock_BI); if (written & (1 << 4)) { SETMEMSI (current_cpu, pc, OPRND (h_memory_SI_src2_idx), OPRND (h_memory_SI_src2)); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_PCMPBZ) : /* pcmpbz $src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_cmpz.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = (EQSI (ANDSI (* FLD (i_src2), 255), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 65280), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 16711680), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 0xff000000), 0)) ? (1) : (0); OPRND (condbit) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_PCMPBZ) : /* pcmpbz $src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_cmpz.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_cond) = OPRND (condbit); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SADD) : /* sadd */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f #define OPRND(f) par_exec->operands.sfmt_sadd.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = ADDDI (SRADI (GET_H_ACCUMS (((UINT) 1)), 16), GET_H_ACCUMS (((UINT) 0))); OPRND (h_accums_DI_0) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SADD) : /* sadd */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_empty.f #define OPRND(f) par_exec->operands.sfmt_sadd.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (((UINT) 0), OPRND (h_accums_DI_0)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MACWU1) : /* macwu1 $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_macwu1.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (((UINT) 1)), MULDI (EXTSIDI (* FLD (i_src1)), EXTSIDI (ANDSI (* FLD (i_src2), 65535)))), 8), 8); OPRND (h_accums_DI_1) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MACWU1) : /* macwu1 $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_macwu1.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (((UINT) 1), OPRND (h_accums_DI_1)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MSBLO) : /* msblo $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_msblo.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (SUBDI (GET_H_ACCUM (), SRADI (SLLDI (MULDI (EXTHIDI (TRUNCSIHI (* FLD (i_src1))), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))), 32), 16)), 8), 8); OPRND (accum) = opval; TRACE_RESULT (current_cpu, abuf, "accum", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MSBLO) : /* msblo $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_msblo.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUM (OPRND (accum)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MULWU1) : /* mulwu1 $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_mulwu1.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (MULDI (EXTSIDI (* FLD (i_src1)), EXTSIDI (ANDSI (* FLD (i_src2), 65535))), 16), 16); OPRND (h_accums_DI_1) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MULWU1) : /* mulwu1 $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_mulwu1.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (((UINT) 1), OPRND (h_accums_DI_1)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_MACLH1) : /* maclh1 $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_macwu1.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (((UINT) 1)), SLLDI (EXTSIDI (MULSI (EXTHISI (TRUNCSIHI (* FLD (i_src1))), SRASI (* FLD (i_src2), 16))), 16)), 8), 8); OPRND (h_accums_DI_1) = opval; TRACE_RESULT (current_cpu, abuf, "accums", 'D', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_MACLH1) : /* maclh1 $src1,$src2 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_st_plus.f #define OPRND(f) par_exec->operands.sfmt_macwu1.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_ACCUMS (((UINT) 1), OPRND (h_accums_DI_1)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SC) : /* sc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f #define OPRND(f) par_exec->operands.sfmt_sc.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (ZEXTBISI (CPU (h_cond))) SEM_SKIP_INSN (current_cpu, sem_arg, vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SC) : /* sc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_empty.f #define OPRND(f) par_exec->operands.sfmt_sc.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SNC) : /* snc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_empty.f #define OPRND(f) par_exec->operands.sfmt_sc.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); if (ZEXTBISI (NOTBI (CPU (h_cond)))) SEM_SKIP_INSN (current_cpu, sem_arg, vpc); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SNC) : /* snc */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_empty.f #define OPRND(f) par_exec->operands.sfmt_sc.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_CLRPSW) : /* clrpsw $uimm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_clrpsw.f #define OPRND(f) par_exec->operands.sfmt_clrpsw.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { USI opval = ANDSI (GET_H_CR (((UINT) 0)), ORSI (ZEXTQISI (INVQI (FLD (f_uimm8))), 65280)); OPRND (h_cr_USI_0) = opval; TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_CLRPSW) : /* clrpsw $uimm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_clrpsw.f #define OPRND(f) par_exec->operands.sfmt_clrpsw.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_CR (((UINT) 0), OPRND (h_cr_USI_0)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_SETPSW) : /* setpsw $uimm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_clrpsw.f #define OPRND(f) par_exec->operands.sfmt_setpsw.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { USI opval = FLD (f_uimm8); OPRND (h_cr_USI_0) = opval; TRACE_RESULT (current_cpu, abuf, "cr", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_SETPSW) : /* setpsw $uimm8 */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_clrpsw.f #define OPRND(f) par_exec->operands.sfmt_setpsw.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); SET_H_CR (((UINT) 0), OPRND (h_cr_USI_0)); #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_PAR_BTST) : /* btst $uimm3,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); ARGBUF *abuf = SEM_ARGBUF (sem_arg); #define FLD(f) abuf->fields.sfmt_bset.f #define OPRND(f) par_exec->operands.sfmt_btst.f int UNUSED written = 0; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 2); { BI opval = ANDQI (SRLQI (* FLD (i_sr), SUBSI (7, FLD (f_uimm3))), 1); OPRND (condbit) = opval; TRACE_RESULT (current_cpu, abuf, "cond", 'x', opval); } #undef OPRND #undef FLD } NEXT (vpc); CASE (sem, INSN_WRITE_BTST) : /* btst $uimm3,$sr */ { SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc); const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf; #define FLD(f) abuf->fields.sfmt_bset.f #define OPRND(f) par_exec->operands.sfmt_btst.f int UNUSED written = abuf->written; IADDR UNUSED pc = abuf->addr; vpc = SEM_NEXT_VPC (sem_arg, pc, 0); CPU (h_cond) = OPRND (condbit); #undef OPRND #undef FLD } NEXT (vpc); } ENDSWITCH (sem) /* End of semantic switch. */ /* At this point `vpc' contains the next insn to execute. */ } #undef DEFINE_SWITCH #endif /* DEFINE_SWITCH */
gpl-2.0
Forvater/Jedi_Outcast
CODE-mp/botlib/be_aas_entity.cpp
22
12512
/***************************************************************************** * name: be_aas_entity.c * * desc: AAS entities * * $Archive: /MissionPack/code/botlib/be_aas_entity.c $ * $Author: Zaphod $ * $Revision: 11 $ * $Modtime: 11/22/00 8:50a $ * $Date: 11/22/00 8:55a $ * *****************************************************************************/ #include "../game/q_shared.h" #include "l_memory.h" #include "l_script.h" #include "l_precomp.h" #include "l_struct.h" #include "l_utils.h" #include "l_log.h" #include "aasfile.h" #include "../game/botlib.h" #include "../game/be_aas.h" #include "be_aas_funcs.h" #include "be_interface.h" #include "be_aas_def.h" #define MASK_SOLID CONTENTS_PLAYERCLIP //FIXME: these might change enum { ET_GENERAL, ET_PLAYER, ET_ITEM, ET_MISSILE, ET_MOVER }; //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== int AAS_UpdateEntity(int entnum, bot_entitystate_t *state) { int relink; aas_entity_t *ent; vec3_t absmins, absmaxs; if (!aasworld.loaded) { botimport.Print(PRT_MESSAGE, "AAS_UpdateEntity: not loaded\n"); return BLERR_NOAASFILE; } //end if ent = &aasworld.entities[entnum]; if (!state) { //unlink the entity AAS_UnlinkFromAreas(ent->areas); //unlink the entity from the BSP leaves AAS_UnlinkFromBSPLeaves(ent->leaves); // ent->areas = NULL; // ent->leaves = NULL; return BLERR_NOERROR; } ent->i.update_time = AAS_Time() - ent->i.ltime; ent->i.type = state->type; ent->i.flags = state->flags; ent->i.ltime = AAS_Time(); VectorCopy(ent->i.origin, ent->i.lastvisorigin); VectorCopy(state->old_origin, ent->i.old_origin); ent->i.solid = state->solid; ent->i.groundent = state->groundent; ent->i.modelindex = state->modelindex; ent->i.modelindex2 = state->modelindex2; ent->i.frame = state->frame; ent->i.event = state->event; ent->i.eventParm = state->eventParm; ent->i.powerups = state->powerups; ent->i.weapon = state->weapon; ent->i.legsAnim = state->legsAnim; ent->i.torsoAnim = state->torsoAnim; //number of the entity ent->i.number = entnum; //updated so set valid flag ent->i.valid = qtrue; //link everything the first frame if (aasworld.numframes == 1) relink = qtrue; else relink = qfalse; // if (ent->i.solid == SOLID_BSP) { //if the angles of the model changed if (!VectorCompare(state->angles, ent->i.angles)) { VectorCopy(state->angles, ent->i.angles); relink = qtrue; } //end if //get the mins and maxs of the model //FIXME: rotate mins and maxs AAS_BSPModelMinsMaxsOrigin(ent->i.modelindex, ent->i.angles, ent->i.mins, ent->i.maxs, NULL); } //end if else if (ent->i.solid == SOLID_BBOX) { //if the bounding box size changed if (!VectorCompare(state->mins, ent->i.mins) || !VectorCompare(state->maxs, ent->i.maxs)) { VectorCopy(state->mins, ent->i.mins); VectorCopy(state->maxs, ent->i.maxs); relink = qtrue; } //end if VectorCopy(state->angles, ent->i.angles); } //end if //if the origin changed if (!VectorCompare(state->origin, ent->i.origin)) { VectorCopy(state->origin, ent->i.origin); relink = qtrue; } //end if //if the entity should be relinked if (relink) { //don't link the world model if (entnum != ENTITYNUM_WORLD) { //absolute mins and maxs VectorAdd(ent->i.mins, ent->i.origin, absmins); VectorAdd(ent->i.maxs, ent->i.origin, absmaxs); //unlink the entity AAS_UnlinkFromAreas(ent->areas); //relink the entity to the AAS areas (use the larges bbox) ent->areas = AAS_LinkEntityClientBBox(absmins, absmaxs, entnum, PRESENCE_NORMAL); //unlink the entity from the BSP leaves AAS_UnlinkFromBSPLeaves(ent->leaves); //link the entity to the world BSP tree ent->leaves = AAS_BSPLinkEntity(absmins, absmaxs, entnum, 0); } //end if } //end if return BLERR_NOERROR; } //end of the function AAS_UpdateEntity //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AAS_EntityInfo(int entnum, aas_entityinfo_t *info) { if (!aasworld.initialized) { botimport.Print(PRT_FATAL, "AAS_EntityInfo: aasworld not initialized\n"); Com_Memset(info, 0, sizeof(aas_entityinfo_t)); return; } //end if if (entnum < 0 || entnum >= aasworld.maxentities) { botimport.Print(PRT_FATAL, "AAS_EntityInfo: entnum %d out of range\n", entnum); Com_Memset(info, 0, sizeof(aas_entityinfo_t)); return; } //end if Com_Memcpy(info, &aasworld.entities[entnum].i, sizeof(aas_entityinfo_t)); } //end of the function AAS_EntityInfo //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AAS_EntityOrigin(int entnum, vec3_t origin) { if (entnum < 0 || entnum >= aasworld.maxentities) { botimport.Print(PRT_FATAL, "AAS_EntityOrigin: entnum %d out of range\n", entnum); VectorClear(origin); return; } //end if VectorCopy(aasworld.entities[entnum].i.origin, origin); } //end of the function AAS_EntityOrigin //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== int AAS_EntityModelindex(int entnum) { if (entnum < 0 || entnum >= aasworld.maxentities) { botimport.Print(PRT_FATAL, "AAS_EntityModelindex: entnum %d out of range\n", entnum); return 0; } //end if return aasworld.entities[entnum].i.modelindex; } //end of the function AAS_EntityModelindex //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== int AAS_EntityType(int entnum) { if (!aasworld.initialized) return 0; if (entnum < 0 || entnum >= aasworld.maxentities) { botimport.Print(PRT_FATAL, "AAS_EntityType: entnum %d out of range\n", entnum); return 0; } //end if return aasworld.entities[entnum].i.type; } //end of the AAS_EntityType //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== int AAS_EntityModelNum(int entnum) { if (!aasworld.initialized) return 0; if (entnum < 0 || entnum >= aasworld.maxentities) { botimport.Print(PRT_FATAL, "AAS_EntityModelNum: entnum %d out of range\n", entnum); return 0; } //end if return aasworld.entities[entnum].i.modelindex; } //end of the function AAS_EntityModelNum //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== int AAS_OriginOfMoverWithModelNum(int modelnum, vec3_t origin) { int i; aas_entity_t *ent; for (i = 0; i < aasworld.maxentities; i++) { ent = &aasworld.entities[i]; if (ent->i.type == ET_MOVER) { if (ent->i.modelindex == modelnum) { VectorCopy(ent->i.origin, origin); return qtrue; } //end if } //end if } //end for return qfalse; } //end of the function AAS_OriginOfMoverWithModelNum //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AAS_EntitySize(int entnum, vec3_t mins, vec3_t maxs) { aas_entity_t *ent; if (!aasworld.initialized) return; if (entnum < 0 || entnum >= aasworld.maxentities) { botimport.Print(PRT_FATAL, "AAS_EntitySize: entnum %d out of range\n", entnum); return; } //end if ent = &aasworld.entities[entnum]; VectorCopy(ent->i.mins, mins); VectorCopy(ent->i.maxs, maxs); } //end of the function AAS_EntitySize //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AAS_EntityBSPData(int entnum, bsp_entdata_t *entdata) { aas_entity_t *ent; ent = &aasworld.entities[entnum]; VectorCopy(ent->i.origin, entdata->origin); VectorCopy(ent->i.angles, entdata->angles); VectorAdd(ent->i.origin, ent->i.mins, entdata->absmins); VectorAdd(ent->i.origin, ent->i.maxs, entdata->absmaxs); entdata->solid = ent->i.solid; entdata->modelnum = ent->i.modelindex - 1; } //end of the function AAS_EntityBSPData //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AAS_ResetEntityLinks(void) { int i; for (i = 0; i < aasworld.maxentities; i++) { aasworld.entities[i].areas = NULL; aasworld.entities[i].leaves = NULL; } //end for } //end of the function AAS_ResetEntityLinks //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AAS_InvalidateEntities(void) { int i; for (i = 0; i < aasworld.maxentities; i++) { aasworld.entities[i].i.valid = qfalse; aasworld.entities[i].i.number = i; } //end for } //end of the function AAS_InvalidateEntities //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AAS_UnlinkInvalidEntities(void) { int i; aas_entity_t *ent; for (i = 0; i < aasworld.maxentities; i++) { ent = &aasworld.entities[i]; if (!ent->i.valid) { AAS_UnlinkFromAreas( ent->areas ); ent->areas = NULL; AAS_UnlinkFromBSPLeaves( ent->leaves ); ent->leaves = NULL; } //end for } //end for } //end of the function AAS_UnlinkInvalidEntities //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== int AAS_NearestEntity(vec3_t origin, int modelindex) { int i, bestentnum; float dist, bestdist; aas_entity_t *ent; vec3_t dir; bestentnum = 0; bestdist = 99999; for (i = 0; i < aasworld.maxentities; i++) { ent = &aasworld.entities[i]; if (ent->i.modelindex != modelindex) continue; VectorSubtract(ent->i.origin, origin, dir); if (abs(dir[0]) < 40) { if (abs(dir[1]) < 40) { dist = VectorLength(dir); if (dist < bestdist) { bestdist = dist; bestentnum = i; } //end if } //end if } //end if } //end for return bestentnum; } //end of the function AAS_NearestEntity //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== int AAS_BestReachableEntityArea(int entnum) { aas_entity_t *ent; ent = &aasworld.entities[entnum]; return AAS_BestReachableLinkArea(ent->areas); } //end of the function AAS_BestReachableEntityArea //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== int AAS_NextEntity(int entnum) { if (!aasworld.loaded) return 0; if (entnum < 0) entnum = -1; while(++entnum < aasworld.maxentities) { if (aasworld.entities[entnum].i.valid) return entnum; } //end while return 0; } //end of the function AAS_NextEntity
gpl-2.0
tsoliman/scummvm
engines/titanic/game/chicken_cooler.cpp
22
2211
/* ScummVM - Graphic Adventure Engine * * ScummVM is the legal property of its developers, whose names * are too numerous to list here. Please refer to the COPYRIGHT * file distributed with this source distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #include "titanic/game/chicken_cooler.h" #include "titanic/carry/chicken.h" namespace Titanic { BEGIN_MESSAGE_MAP(CChickenCooler, CGameObject) ON_MESSAGE(EnterRoomMsg) ON_MESSAGE(EnterViewMsg) END_MESSAGE_MAP() void CChickenCooler::save(SimpleFile *file, int indent) { file->writeNumberLine(1, indent); file->writeNumberLine(_newTemperature, indent); file->writeNumberLine(_triggerOnRoomEntry, indent); CGameObject::save(file, indent); } void CChickenCooler::load(SimpleFile *file) { file->readNumber(); _newTemperature = file->readNumber(); _triggerOnRoomEntry = file->readNumber(); CGameObject::load(file); } bool CChickenCooler::EnterRoomMsg(CEnterRoomMsg *msg) { if (_triggerOnRoomEntry) { CGameObject *obj = getMailManFirstObject(); if (!obj) { if (CChicken::_temperature > _newTemperature) CChicken::_temperature = _newTemperature; } } return true; } bool CChickenCooler::EnterViewMsg(CEnterViewMsg *msg) { if (!_triggerOnRoomEntry) { for (CGameObject *obj = getMailManFirstObject(); obj; obj = getNextMail(obj)) { if (obj->isEquals("Chicken")) return true; } if (CChicken::_temperature > _newTemperature) CChicken::_temperature = _newTemperature; } return true; } } // End of namespace Titanic
gpl-2.0
sigma-random/asuswrt-merlin
release/src-rt-6.x.4708/bcmcrypto/random.c
22
4050
/* * random.c * Copyright (C) 2014, Broadcom Corporation * All Rights Reserved. * * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; * the contents of this file may not be disclosed to third parties, copied * or duplicated in any form, in whole or in part, without the prior * written permission of Broadcom Corporation. * * $Id: random.c 241182 2011-02-17 21:50:03Z $ */ #include <stdio.h> #if defined(__linux__) #include <stdlib.h> #include <unistd.h> #include <string.h> #include <signal.h> #include <errno.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/ioctl.h> #include <netinet/in.h> #include <net/if.h> #include <fcntl.h> #include <linux/if_packet.h> #elif (defined(__ECOS) || defined(TARGETOS_nucleus)) #include <stdlib.h> #elif WIN32 #include <stdio.h> #include <windows.h> #include <Wincrypt.h> #endif /* __linux__ */ #include <assert.h> #include <typedefs.h> #include <bcmcrypto/bn.h> #if defined(__linux__) void linux_random(uint8 *rand, int len); #elif WIN32 void windows_random(uint8 *rand, int len); #elif (defined(__ECOS) || defined(TARGETOS_nucleus)) void generic_random(uint8* rand, int len); #elif defined(TARGETOS_symbian) void generic_random(uint8* rand, int len); #endif /* __linux__ */ void RAND_bytes(unsigned char *buf, int num) { #if defined(__linux__) linux_random(buf, num); #elif WIN32 windows_random(buf, num); #elif (defined(__ECOS) || defined(TARGETOS_nucleus)) generic_random(buf, num); #elif defined(TARGETOS_symbian) generic_random(buf, num); #endif /* __linux__ */ } #if defined(__linux__) void RAND_linux_init() { BN_register_RAND(linux_random); } #ifndef RANDOM_READ_TRY_MAX #define RANDOM_READ_TRY_MAX 10 #endif void linux_random(uint8 *rand, int len) { static int dev_random_fd = -1; int status; int i; if (dev_random_fd == -1) dev_random_fd = open("/dev/urandom", O_RDONLY|O_NONBLOCK); assert(dev_random_fd != -1); for (i = 0; i < RANDOM_READ_TRY_MAX; i++) { status = read(dev_random_fd, rand, len); if (status == -1) { if (errno == EINTR) continue; assert(status != -1); } return; } assert(i != RANDOM_READ_TRY_MAX); } #elif __ECOS void RAND_ecos_init() { BN_register_RAND(generic_random); } #elif WIN32 void RAND_windows_init() { BN_register_RAND(windows_random); } void windows_random(uint8 *rand, int len) { /* Declare and initialize variables */ HCRYPTPROV hCryptProv = NULL; LPCSTR UserName = "{56E9D11F-76B8-42fa-8645-76980E4E8648}"; /* Attempt to acquire a context and a key container. The context will use the default CSP for the RSA_FULL provider type. DwFlags is set to 0 to attempt to open an existing key container. */ if (CryptAcquireContext(&hCryptProv, UserName, NULL, PROV_RSA_FULL, 0)) { /* do nothing */ } else { /* An error occurred in acquiring the context. This could mean that the key container requested does not exist. In this case, the function can be called again to attempt to create a new key container. Error codes are defined in winerror.h. */ if (GetLastError() == NTE_BAD_KEYSET) { if (!CryptAcquireContext(&hCryptProv, UserName, NULL, PROV_RSA_FULL, CRYPT_NEWKEYSET)) { printf("Could not create a new key container.\n"); } } else { printf("A cryptographic service handle could not be acquired.\n"); } } if (hCryptProv) { /* Generate a random initialization vector. */ if (!CryptGenRandom(hCryptProv, len, rand)) { printf("Error during CryptGenRandom.\n"); } if (!CryptReleaseContext(hCryptProv, 0)) printf("Failed CryptReleaseContext\n"); } return; } #elif TARGETOS_nucleus void RAND_generic_init() { BN_register_RAND(generic_random); } #elif TARGETOS_symbian void RAND_generic_init() { BN_register_RAND(generic_random); } #endif /* __linux__ */ #if (defined(__ECOS) || defined(TARGETOS_nucleus) || defined(TARGETOS_symbian)) void generic_random(uint8 * random, int len) { int tlen = len; while (tlen--) { *random = (uint8)rand(); *random++; } return; } #endif
gpl-2.0
PJayB/jk2src
code/jpeg-6/jcprepct.cpp
22
13237
/* * jcprepct.c * * Copyright (C) 1994, Thomas G. Lane. * This file is part of the Independent JPEG Group's software. * For conditions of distribution and use, see the accompanying README file. * * This file contains the compression preprocessing controller. * This controller manages the color conversion, downsampling, * and edge expansion steps. * * Most of the complexity here is associated with buffering input rows * as required by the downsampler. See the comments at the head of * jcsample.c for the downsampler's needs. */ // leave this as first line for PCH reasons... // #include "../server/exe_headers.h" #define JPEG_INTERNALS #include "jinclude.h" #include "jpeglib.h" /* At present, jcsample.c can request context rows only for smoothing. * In the future, we might also need context rows for CCIR601 sampling * or other more-complex downsampling procedures. The code to support * context rows should be compiled only if needed. */ #ifdef INPUT_SMOOTHING_SUPPORTED #define CONTEXT_ROWS_SUPPORTED #endif /* * For the simple (no-context-row) case, we just need to buffer one * row group's worth of pixels for the downsampling step. At the bottom of * the image, we pad to a full row group by replicating the last pixel row. * The downsampler's last output row is then replicated if needed to pad * out to a full iMCU row. * * When providing context rows, we must buffer three row groups' worth of * pixels. Three row groups are physically allocated, but the row pointer * arrays are made five row groups high, with the extra pointers above and * below "wrapping around" to point to the last and first real row groups. * This allows the downsampler to access the proper context rows. * At the top and bottom of the image, we create dummy context rows by * copying the first or last real pixel row. This copying could be avoided * by pointer hacking as is done in jdmainct.c, but it doesn't seem worth the * trouble on the compression side. */ /* Private buffer controller object */ typedef struct { struct jpeg_c_prep_controller pub; /* public fields */ /* Downsampling input buffer. This buffer holds color-converted data * until we have enough to do a downsample step. */ JSAMPARRAY color_buf[MAX_COMPONENTS]; JDIMENSION rows_to_go; /* counts rows remaining in source image */ int next_buf_row; /* index of next row to store in color_buf */ #ifdef CONTEXT_ROWS_SUPPORTED /* only needed for context case */ int this_row_group; /* starting row index of group to process */ int next_buf_stop; /* downsample when we reach this index */ #endif } my_prep_controller; typedef my_prep_controller * my_prep_ptr; /* * Initialize for a processing pass. */ METHODDEF void start_pass_prep (j_compress_ptr cinfo, J_BUF_MODE pass_mode) { my_prep_ptr prep = (my_prep_ptr) cinfo->prep; if (pass_mode != JBUF_PASS_THRU) ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); /* Initialize total-height counter for detecting bottom of image */ prep->rows_to_go = cinfo->image_height; /* Mark the conversion buffer empty */ prep->next_buf_row = 0; #ifdef CONTEXT_ROWS_SUPPORTED /* Preset additional state variables for context mode. * These aren't used in non-context mode, so we needn't test which mode. */ prep->this_row_group = 0; /* Set next_buf_stop to stop after two row groups have been read in. */ prep->next_buf_stop = 2 * cinfo->max_v_samp_factor; #endif } /* * Expand an image vertically from height input_rows to height output_rows, * by duplicating the bottom row. */ LOCAL void expand_bottom_edge (JSAMPARRAY image_data, JDIMENSION num_cols, int input_rows, int output_rows) { register int row; for (row = input_rows; row < output_rows; row++) { jcopy_sample_rows(image_data, input_rows-1, image_data, row, 1, num_cols); } } /* * Process some data in the simple no-context case. * * Preprocessor output data is counted in "row groups". A row group * is defined to be v_samp_factor sample rows of each component. * Downsampling will produce this much data from each max_v_samp_factor * input rows. */ METHODDEF void pre_process_data (j_compress_ptr cinfo, JSAMPARRAY input_buf, JDIMENSION *in_row_ctr, JDIMENSION in_rows_avail, JSAMPIMAGE output_buf, JDIMENSION *out_row_group_ctr, JDIMENSION out_row_groups_avail) { my_prep_ptr prep = (my_prep_ptr) cinfo->prep; int numrows, ci; JDIMENSION inrows; jpeg_component_info * compptr; while (*in_row_ctr < in_rows_avail && *out_row_group_ctr < out_row_groups_avail) { /* Do color conversion to fill the conversion buffer. */ inrows = in_rows_avail - *in_row_ctr; numrows = cinfo->max_v_samp_factor - prep->next_buf_row; numrows = (int) MIN((JDIMENSION) numrows, inrows); (*cinfo->cconvert->color_convert) (cinfo, input_buf + *in_row_ctr, prep->color_buf, (JDIMENSION) prep->next_buf_row, numrows); *in_row_ctr += numrows; prep->next_buf_row += numrows; prep->rows_to_go -= numrows; /* If at bottom of image, pad to fill the conversion buffer. */ if (prep->rows_to_go == 0 && prep->next_buf_row < cinfo->max_v_samp_factor) { for (ci = 0; ci < cinfo->num_components; ci++) { expand_bottom_edge(prep->color_buf[ci], cinfo->image_width, prep->next_buf_row, cinfo->max_v_samp_factor); } prep->next_buf_row = cinfo->max_v_samp_factor; } /* If we've filled the conversion buffer, empty it. */ if (prep->next_buf_row == cinfo->max_v_samp_factor) { (*cinfo->downsample->downsample) (cinfo, prep->color_buf, (JDIMENSION) 0, output_buf, *out_row_group_ctr); prep->next_buf_row = 0; (*out_row_group_ctr)++; } /* If at bottom of image, pad the output to a full iMCU height. * Note we assume the caller is providing a one-iMCU-height output buffer! */ if (prep->rows_to_go == 0 && *out_row_group_ctr < out_row_groups_avail) { for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { expand_bottom_edge(output_buf[ci], compptr->width_in_blocks * DCTSIZE, (int) (*out_row_group_ctr * compptr->v_samp_factor), (int) (out_row_groups_avail * compptr->v_samp_factor)); } *out_row_group_ctr = out_row_groups_avail; break; /* can exit outer loop without test */ } } } #ifdef CONTEXT_ROWS_SUPPORTED /* * Process some data in the context case. */ METHODDEF void pre_process_context (j_compress_ptr cinfo, JSAMPARRAY input_buf, JDIMENSION *in_row_ctr, JDIMENSION in_rows_avail, JSAMPIMAGE output_buf, JDIMENSION *out_row_group_ctr, JDIMENSION out_row_groups_avail) { my_prep_ptr prep = (my_prep_ptr) cinfo->prep; int numrows, ci; int buf_height = cinfo->max_v_samp_factor * 3; JDIMENSION inrows; jpeg_component_info * compptr; while (*out_row_group_ctr < out_row_groups_avail) { if (*in_row_ctr < in_rows_avail) { /* Do color conversion to fill the conversion buffer. */ inrows = in_rows_avail - *in_row_ctr; numrows = prep->next_buf_stop - prep->next_buf_row; numrows = (int) MIN((JDIMENSION) numrows, inrows); (*cinfo->cconvert->color_convert) (cinfo, input_buf + *in_row_ctr, prep->color_buf, (JDIMENSION) prep->next_buf_row, numrows); /* Pad at top of image, if first time through */ if (prep->rows_to_go == cinfo->image_height) { for (ci = 0; ci < cinfo->num_components; ci++) { int row; for (row = 1; row <= cinfo->max_v_samp_factor; row++) { jcopy_sample_rows(prep->color_buf[ci], 0, prep->color_buf[ci], -row, 1, cinfo->image_width); } } } *in_row_ctr += numrows; prep->next_buf_row += numrows; prep->rows_to_go -= numrows; } else { /* Return for more data, unless we are at the bottom of the image. */ if (prep->rows_to_go != 0) break; } /* If at bottom of image, pad to fill the conversion buffer. */ if (prep->rows_to_go == 0 && prep->next_buf_row < prep->next_buf_stop) { for (ci = 0; ci < cinfo->num_components; ci++) { expand_bottom_edge(prep->color_buf[ci], cinfo->image_width, prep->next_buf_row, prep->next_buf_stop); } prep->next_buf_row = prep->next_buf_stop; } /* If we've gotten enough data, downsample a row group. */ if (prep->next_buf_row == prep->next_buf_stop) { (*cinfo->downsample->downsample) (cinfo, prep->color_buf, (JDIMENSION) prep->this_row_group, output_buf, *out_row_group_ctr); (*out_row_group_ctr)++; /* Advance pointers with wraparound as necessary. */ prep->this_row_group += cinfo->max_v_samp_factor; if (prep->this_row_group >= buf_height) prep->this_row_group = 0; if (prep->next_buf_row >= buf_height) prep->next_buf_row = 0; prep->next_buf_stop = prep->next_buf_row + cinfo->max_v_samp_factor; } /* If at bottom of image, pad the output to a full iMCU height. * Note we assume the caller is providing a one-iMCU-height output buffer! */ if (prep->rows_to_go == 0 && *out_row_group_ctr < out_row_groups_avail) { for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { expand_bottom_edge(output_buf[ci], compptr->width_in_blocks * DCTSIZE, (int) (*out_row_group_ctr * compptr->v_samp_factor), (int) (out_row_groups_avail * compptr->v_samp_factor)); } *out_row_group_ctr = out_row_groups_avail; break; /* can exit outer loop without test */ } } } /* * Create the wrapped-around downsampling input buffer needed for context mode. */ LOCAL void create_context_buffer (j_compress_ptr cinfo) { my_prep_ptr prep = (my_prep_ptr) cinfo->prep; int rgroup_height = cinfo->max_v_samp_factor; int ci, i; jpeg_component_info * compptr; JSAMPARRAY true_buffer, fake_buffer; /* Grab enough space for fake row pointers for all the components; * we need five row groups' worth of pointers for each component. */ fake_buffer = (JSAMPARRAY) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, (cinfo->num_components * 5 * rgroup_height) * SIZEOF(JSAMPROW)); for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { /* Allocate the actual buffer space (3 row groups) for this component. * We make the buffer wide enough to allow the downsampler to edge-expand * horizontally within the buffer, if it so chooses. */ true_buffer = (*cinfo->mem->alloc_sarray) ((j_common_ptr) cinfo, JPOOL_IMAGE, (JDIMENSION) (((long) compptr->width_in_blocks * DCTSIZE * cinfo->max_h_samp_factor) / compptr->h_samp_factor), (JDIMENSION) (3 * rgroup_height)); /* Copy true buffer row pointers into the middle of the fake row array */ MEMCOPY(fake_buffer + rgroup_height, true_buffer, 3 * rgroup_height * SIZEOF(JSAMPROW)); /* Fill in the above and below wraparound pointers */ for (i = 0; i < rgroup_height; i++) { fake_buffer[i] = true_buffer[2 * rgroup_height + i]; fake_buffer[4 * rgroup_height + i] = true_buffer[i]; } prep->color_buf[ci] = fake_buffer + rgroup_height; fake_buffer += 5 * rgroup_height; /* point to space for next component */ } } #endif /* CONTEXT_ROWS_SUPPORTED */ /* * Initialize preprocessing controller. */ GLOBAL void jinit_c_prep_controller (j_compress_ptr cinfo, boolean need_full_buffer) { my_prep_ptr prep; int ci; jpeg_component_info * compptr; if (need_full_buffer) /* safety check */ ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); prep = (my_prep_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, SIZEOF(my_prep_controller)); cinfo->prep = (struct jpeg_c_prep_controller *) prep; prep->pub.start_pass = start_pass_prep; /* Allocate the color conversion buffer. * We make the buffer wide enough to allow the downsampler to edge-expand * horizontally within the buffer, if it so chooses. */ if (cinfo->downsample->need_context_rows) { /* Set up to provide context rows */ #ifdef CONTEXT_ROWS_SUPPORTED prep->pub.pre_process_data = pre_process_context; create_context_buffer(cinfo); #else ERREXIT(cinfo, JERR_NOT_COMPILED); #endif } else { /* No context, just make it tall enough for one row group */ prep->pub.pre_process_data = pre_process_data; for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { prep->color_buf[ci] = (*cinfo->mem->alloc_sarray) ((j_common_ptr) cinfo, JPOOL_IMAGE, (JDIMENSION) (((long) compptr->width_in_blocks * DCTSIZE * cinfo->max_h_samp_factor) / compptr->h_samp_factor), (JDIMENSION) cinfo->max_v_samp_factor); } } }
gpl-2.0
mambomark/linux-systemsim
drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
534
2871
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/option.h> #include <subdev/bios.h> #include <subdev/bios/init.h> #include <subdev/vga.h> #include "priv.h" int _nouveau_devinit_fini(struct nouveau_object *object, bool suspend) { struct nouveau_devinit *devinit = (void *)object; /* force full reinit on resume */ if (suspend) devinit->post = true; /* unlock the extended vga crtc regs */ nv_lockvgac(devinit, false); return nouveau_subdev_fini(&devinit->base, suspend); } int _nouveau_devinit_init(struct nouveau_object *object) { struct nouveau_devinit_impl *impl = (void *)object->oclass; struct nouveau_devinit *devinit = (void *)object; int ret; ret = nouveau_subdev_init(&devinit->base); if (ret) return ret; ret = nvbios_init(&devinit->base, devinit->post); if (ret) return ret; if (impl->disable) nv_device(devinit)->disable_mask |= impl->disable(devinit); return 0; } void _nouveau_devinit_dtor(struct nouveau_object *object) { struct nouveau_devinit *devinit = (void *)object; /* lock crtc regs */ nv_lockvgac(devinit, true); nouveau_subdev_destroy(&devinit->base); } int nouveau_devinit_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, int size, void **pobject) { struct nouveau_devinit_impl *impl = (void *)oclass; struct nouveau_device *device = nv_device(parent); struct nouveau_devinit *devinit; int ret; ret = nouveau_subdev_create_(parent, engine, oclass, 0, "DEVINIT", "init", size, pobject); devinit = *pobject; if (ret) return ret; devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false); devinit->meminit = impl->meminit; devinit->pll_set = impl->pll_set; devinit->mmio = impl->mmio; return 0; }
gpl-2.0
tapash/linux
arch/x86/crypto/sha-mb/sha1_mb.c
534
26212
/* * Multi buffer SHA1 algorithm Glue Code * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Tim Chen <tim.c.chen@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/cryptohash.h> #include <linux/types.h> #include <linux/list.h> #include <crypto/scatterwalk.h> #include <crypto/sha.h> #include <crypto/mcryptd.h> #include <crypto/crypto_wq.h> #include <asm/byteorder.h> #include <linux/hardirq.h> #include <asm/fpu/api.h> #include "sha_mb_ctx.h" #define FLUSH_INTERVAL 1000 /* in usec */ static struct mcryptd_alg_state sha1_mb_alg_state; struct sha1_mb_ctx { struct mcryptd_ahash *mcryptd_tfm; }; static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx) { struct shash_desc *desc; desc = container_of((void *) hash_ctx, struct shash_desc, __ctx); return container_of(desc, struct mcryptd_hash_request_ctx, desc); } static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) { return container_of((void *) ctx, struct ahash_request, __ctx); } static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, struct shash_desc *desc) { rctx->flag = HASH_UPDATE; } static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state); static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr *state, struct job_sha1 *job); static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state); static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state); inline void sha1_init_digest(uint32_t *digest) { static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }; memcpy(digest, initial_digest, sizeof(initial_digest)); } inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2], uint32_t total_len) { uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1); memset(&padblock[i], 0, SHA1_BLOCK_SIZE); padblock[i] = 0x80; i += ((SHA1_BLOCK_SIZE - 1) & (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1))) + 1 + SHA1_PADLENGTHFIELD_SIZE; #if SHA1_PADLENGTHFIELD_SIZE == 16 *((uint64_t *) &padblock[i - 16]) = 0; #endif *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); /* Number of extra blocks to hash */ return i >> SHA1_LOG2_BLOCK_SIZE; } static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx) { while (ctx) { if (ctx->status & HASH_CTX_STS_COMPLETE) { /* Clear PROCESSING bit */ ctx->status = HASH_CTX_STS_COMPLETE; return ctx; } /* * If the extra blocks are empty, begin hashing what remains * in the user's buffer. */ if (ctx->partial_block_buffer_length == 0 && ctx->incoming_buffer_length) { const void *buffer = ctx->incoming_buffer; uint32_t len = ctx->incoming_buffer_length; uint32_t copy_len; /* * Only entire blocks can be hashed. * Copy remainder to extra blocks buffer. */ copy_len = len & (SHA1_BLOCK_SIZE-1); if (copy_len) { len -= copy_len; memcpy(ctx->partial_block_buffer, ((const char *) buffer + len), copy_len); ctx->partial_block_buffer_length = copy_len; } ctx->incoming_buffer_length = 0; /* len should be a multiple of the block size now */ assert((len % SHA1_BLOCK_SIZE) == 0); /* Set len to the number of blocks to be hashed */ len >>= SHA1_LOG2_BLOCK_SIZE; if (len) { ctx->job.buffer = (uint8_t *) buffer; ctx->job.len = len; ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); continue; } } /* * If the extra blocks are not empty, then we are * either on the last block(s) or we need more * user input before continuing. */ if (ctx->status & HASH_CTX_STS_LAST) { uint8_t *buf = ctx->partial_block_buffer; uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length); ctx->status = (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_COMPLETE); ctx->job.buffer = buf; ctx->job.len = (uint32_t) n_extra_blocks; ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); continue; } ctx->status = HASH_CTX_STS_IDLE; return ctx; } return NULL; } static struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr) { /* * If get_comp_job returns NULL, there are no jobs complete. * If get_comp_job returns a job, verify that it is safe to return to the user. * If it is not ready, resubmit the job to finish processing. * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing. */ struct sha1_hash_ctx *ctx; ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr); return sha1_ctx_mgr_resubmit(mgr, ctx); } static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr) { sha1_job_mgr_init(&mgr->mgr); } static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx, const void *buffer, uint32_t len, int flags) { if (flags & (~HASH_ENTIRE)) { /* User should not pass anything other than FIRST, UPDATE, or LAST */ ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; return ctx; } if (ctx->status & HASH_CTX_STS_PROCESSING) { /* Cannot submit to a currently processing job. */ ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; return ctx; } if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { /* Cannot update a finished job. */ ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; return ctx; } if (flags & HASH_FIRST) { /* Init digest */ sha1_init_digest(ctx->job.result_digest); /* Reset byte counter */ ctx->total_length = 0; /* Clear extra blocks */ ctx->partial_block_buffer_length = 0; } /* If we made it here, there were no errors during this call to submit */ ctx->error = HASH_CTX_ERROR_NONE; /* Store buffer ptr info from user */ ctx->incoming_buffer = buffer; ctx->incoming_buffer_length = len; /* Store the user's request flags and mark this ctx as currently being processed. */ ctx->status = (flags & HASH_LAST) ? (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : HASH_CTX_STS_PROCESSING; /* Advance byte counter */ ctx->total_length += len; /* * If there is anything currently buffered in the extra blocks, * append to it until it contains a whole block. * Or if the user's buffer contains less than a whole block, * append as much as possible to the extra block. */ if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) { /* Compute how many bytes to copy from user buffer into extra block */ uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length; if (len < copy_len) copy_len = len; if (copy_len) { /* Copy and update relevant pointers and counters */ memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length], buffer, copy_len); ctx->partial_block_buffer_length += copy_len; ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len); ctx->incoming_buffer_length = len - copy_len; } /* The extra block should never contain more than 1 block here */ assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE); /* If the extra block buffer contains exactly 1 block, it can be hashed. */ if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) { ctx->partial_block_buffer_length = 0; ctx->job.buffer = ctx->partial_block_buffer; ctx->job.len = 1; ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); } } return sha1_ctx_mgr_resubmit(mgr, ctx); } static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr) { struct sha1_hash_ctx *ctx; while (1) { ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr); /* If flush returned 0, there are no more jobs in flight. */ if (!ctx) return NULL; /* * If flush returned a job, resubmit the job to finish processing. */ ctx = sha1_ctx_mgr_resubmit(mgr, ctx); /* * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. * Otherwise, all jobs currently being managed by the sha1_ctx_mgr * still need processing. Loop. */ if (ctx) return ctx; } } static int sha1_mb_init(struct shash_desc *desc) { struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); hash_ctx_init(sctx); sctx->job.result_digest[0] = SHA1_H0; sctx->job.result_digest[1] = SHA1_H1; sctx->job.result_digest[2] = SHA1_H2; sctx->job.result_digest[3] = SHA1_H3; sctx->job.result_digest[4] = SHA1_H4; sctx->total_length = 0; sctx->partial_block_buffer_length = 0; sctx->status = HASH_CTX_STS_IDLE; return 0; } static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx) { int i; struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc); __be32 *dst = (__be32 *) rctx->out; for (i = 0; i < 5; ++i) dst[i] = cpu_to_be32(sctx->job.result_digest[i]); return 0; } static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, struct mcryptd_alg_cstate *cstate, bool flush) { int flag = HASH_UPDATE; int nbytes, err = 0; struct mcryptd_hash_request_ctx *rctx = *ret_rctx; struct sha1_hash_ctx *sha_ctx; /* more work ? */ while (!(rctx->flag & HASH_DONE)) { nbytes = crypto_ahash_walk_done(&rctx->walk, 0); if (nbytes < 0) { err = nbytes; goto out; } /* check if the walk is done */ if (crypto_ahash_walk_last(&rctx->walk)) { rctx->flag |= HASH_DONE; if (rctx->flag & HASH_FINAL) flag |= HASH_LAST; } sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(&rctx->desc); kernel_fpu_begin(); sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); if (!sha_ctx) { if (flush) sha_ctx = sha1_ctx_mgr_flush(cstate->mgr); } kernel_fpu_end(); if (sha_ctx) rctx = cast_hash_to_mcryptd_ctx(sha_ctx); else { rctx = NULL; goto out; } } /* copy the results */ if (rctx->flag & HASH_FINAL) sha1_mb_set_results(rctx); out: *ret_rctx = rctx; return err; } static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, struct mcryptd_alg_cstate *cstate, int err) { struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); struct sha1_hash_ctx *sha_ctx; struct mcryptd_hash_request_ctx *req_ctx; int ret; /* remove from work list */ spin_lock(&cstate->work_lock); list_del(&rctx->waiter); spin_unlock(&cstate->work_lock); if (irqs_disabled()) rctx->complete(&req->base, err); else { local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); } /* check to see if there are other jobs that are done */ sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); while (sha_ctx) { req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); ret = sha_finish_walk(&req_ctx, cstate, false); if (req_ctx) { spin_lock(&cstate->work_lock); list_del(&req_ctx->waiter); spin_unlock(&cstate->work_lock); req = cast_mcryptd_ctx_to_req(req_ctx); if (irqs_disabled()) rctx->complete(&req->base, ret); else { local_bh_disable(); rctx->complete(&req->base, ret); local_bh_enable(); } } sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); } return 0; } static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx, struct mcryptd_alg_cstate *cstate) { unsigned long next_flush; unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); /* initialize tag */ rctx->tag.arrival = jiffies; /* tag the arrival time */ rctx->tag.seq_num = cstate->next_seq_num++; next_flush = rctx->tag.arrival + delay; rctx->tag.expire = next_flush; spin_lock(&cstate->work_lock); list_add_tail(&rctx->waiter, &cstate->work_list); spin_unlock(&cstate->work_lock); mcryptd_arm_flusher(cstate, delay); } static int sha1_mb_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct mcryptd_hash_request_ctx *rctx = container_of(desc, struct mcryptd_hash_request_ctx, desc); struct mcryptd_alg_cstate *cstate = this_cpu_ptr(sha1_mb_alg_state.alg_cstate); struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); struct sha1_hash_ctx *sha_ctx; int ret = 0, nbytes; /* sanity check */ if (rctx->tag.cpu != smp_processor_id()) { pr_err("mcryptd error: cpu clash\n"); goto done; } /* need to init context */ req_ctx_init(rctx, desc); nbytes = crypto_ahash_walk_first(req, &rctx->walk); if (nbytes < 0) { ret = nbytes; goto done; } if (crypto_ahash_walk_last(&rctx->walk)) rctx->flag |= HASH_DONE; /* submit */ sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); sha1_mb_add_list(rctx, cstate); kernel_fpu_begin(); sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE); kernel_fpu_end(); /* check if anything is returned */ if (!sha_ctx) return -EINPROGRESS; if (sha_ctx->error) { ret = sha_ctx->error; rctx = cast_hash_to_mcryptd_ctx(sha_ctx); goto done; } rctx = cast_hash_to_mcryptd_ctx(sha_ctx); ret = sha_finish_walk(&rctx, cstate, false); if (!rctx) return -EINPROGRESS; done: sha_complete_job(rctx, cstate, ret); return ret; } static int sha1_mb_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct mcryptd_hash_request_ctx *rctx = container_of(desc, struct mcryptd_hash_request_ctx, desc); struct mcryptd_alg_cstate *cstate = this_cpu_ptr(sha1_mb_alg_state.alg_cstate); struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); struct sha1_hash_ctx *sha_ctx; int ret = 0, flag = HASH_UPDATE, nbytes; /* sanity check */ if (rctx->tag.cpu != smp_processor_id()) { pr_err("mcryptd error: cpu clash\n"); goto done; } /* need to init context */ req_ctx_init(rctx, desc); nbytes = crypto_ahash_walk_first(req, &rctx->walk); if (nbytes < 0) { ret = nbytes; goto done; } if (crypto_ahash_walk_last(&rctx->walk)) { rctx->flag |= HASH_DONE; flag = HASH_LAST; } rctx->out = out; /* submit */ rctx->flag |= HASH_FINAL; sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); sha1_mb_add_list(rctx, cstate); kernel_fpu_begin(); sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); kernel_fpu_end(); /* check if anything is returned */ if (!sha_ctx) return -EINPROGRESS; if (sha_ctx->error) { ret = sha_ctx->error; goto done; } rctx = cast_hash_to_mcryptd_ctx(sha_ctx); ret = sha_finish_walk(&rctx, cstate, false); if (!rctx) return -EINPROGRESS; done: sha_complete_job(rctx, cstate, ret); return ret; } static int sha1_mb_final(struct shash_desc *desc, u8 *out) { struct mcryptd_hash_request_ctx *rctx = container_of(desc, struct mcryptd_hash_request_ctx, desc); struct mcryptd_alg_cstate *cstate = this_cpu_ptr(sha1_mb_alg_state.alg_cstate); struct sha1_hash_ctx *sha_ctx; int ret = 0; u8 data; /* sanity check */ if (rctx->tag.cpu != smp_processor_id()) { pr_err("mcryptd error: cpu clash\n"); goto done; } /* need to init context */ req_ctx_init(rctx, desc); rctx->out = out; rctx->flag |= HASH_DONE | HASH_FINAL; sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); /* flag HASH_FINAL and 0 data size */ sha1_mb_add_list(rctx, cstate); kernel_fpu_begin(); sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, HASH_LAST); kernel_fpu_end(); /* check if anything is returned */ if (!sha_ctx) return -EINPROGRESS; if (sha_ctx->error) { ret = sha_ctx->error; rctx = cast_hash_to_mcryptd_ctx(sha_ctx); goto done; } rctx = cast_hash_to_mcryptd_ctx(sha_ctx); ret = sha_finish_walk(&rctx, cstate, false); if (!rctx) return -EINPROGRESS; done: sha_complete_job(rctx, cstate, ret); return ret; } static int sha1_mb_export(struct shash_desc *desc, void *out) { struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int sha1_mb_import(struct shash_desc *desc, const void *in) { struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg sha1_mb_shash_alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_mb_init, .update = sha1_mb_update, .final = sha1_mb_final, .finup = sha1_mb_finup, .export = sha1_mb_export, .import = sha1_mb_import, .descsize = sizeof(struct sha1_hash_ctx), .statesize = sizeof(struct sha1_hash_ctx), .base = { .cra_name = "__sha1-mb", .cra_driver_name = "__intel_sha1-mb", .cra_priority = 100, /* * use ASYNC flag as some buffers in multi-buffer * algo may not have completed before hashing thread sleep */ .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list), } }; static int sha1_mb_async_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *mcryptd_req = ahash_request_ctx(req); struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; memcpy(mcryptd_req, req, sizeof(*req)); ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); return crypto_ahash_init(mcryptd_req); } static int sha1_mb_async_update(struct ahash_request *req) { struct ahash_request *mcryptd_req = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; memcpy(mcryptd_req, req, sizeof(*req)); ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); return crypto_ahash_update(mcryptd_req); } static int sha1_mb_async_finup(struct ahash_request *req) { struct ahash_request *mcryptd_req = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; memcpy(mcryptd_req, req, sizeof(*req)); ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); return crypto_ahash_finup(mcryptd_req); } static int sha1_mb_async_final(struct ahash_request *req) { struct ahash_request *mcryptd_req = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; memcpy(mcryptd_req, req, sizeof(*req)); ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); return crypto_ahash_final(mcryptd_req); } static int sha1_mb_async_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *mcryptd_req = ahash_request_ctx(req); struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; memcpy(mcryptd_req, req, sizeof(*req)); ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); return crypto_ahash_digest(mcryptd_req); } static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm) { struct mcryptd_ahash *mcryptd_tfm; struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); struct mcryptd_hash_ctx *mctx; mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL); if (IS_ERR(mcryptd_tfm)) return PTR_ERR(mcryptd_tfm); mctx = crypto_ahash_ctx(&mcryptd_tfm->base); mctx->alg_state = &sha1_mb_alg_state; ctx->mcryptd_tfm = mcryptd_tfm; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct ahash_request) + crypto_ahash_reqsize(&mcryptd_tfm->base)); return 0; } static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm) { struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); mcryptd_free_ahash(ctx->mcryptd_tfm); } static struct ahash_alg sha1_mb_async_alg = { .init = sha1_mb_async_init, .update = sha1_mb_async_update, .final = sha1_mb_async_final, .finup = sha1_mb_async_finup, .digest = sha1_mb_async_digest, .halg = { .digestsize = SHA1_DIGEST_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1_mb", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_type = &crypto_ahash_type, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list), .cra_init = sha1_mb_async_init_tfm, .cra_exit = sha1_mb_async_exit_tfm, .cra_ctxsize = sizeof(struct sha1_mb_ctx), .cra_alignmask = 0, }, }, }; static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate) { struct mcryptd_hash_request_ctx *rctx; unsigned long cur_time; unsigned long next_flush = 0; struct sha1_hash_ctx *sha_ctx; cur_time = jiffies; while (!list_empty(&cstate->work_list)) { rctx = list_entry(cstate->work_list.next, struct mcryptd_hash_request_ctx, waiter); if (time_before(cur_time, rctx->tag.expire)) break; kernel_fpu_begin(); sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr); kernel_fpu_end(); if (!sha_ctx) { pr_err("sha1_mb error: nothing got flushed for non-empty list\n"); break; } rctx = cast_hash_to_mcryptd_ctx(sha_ctx); sha_finish_walk(&rctx, cstate, true); sha_complete_job(rctx, cstate, 0); } if (!list_empty(&cstate->work_list)) { rctx = list_entry(cstate->work_list.next, struct mcryptd_hash_request_ctx, waiter); /* get the hash context and then flush time */ next_flush = rctx->tag.expire; mcryptd_arm_flusher(cstate, get_delay(next_flush)); } return next_flush; } static int __init sha1_mb_mod_init(void) { int cpu; int err; struct mcryptd_alg_cstate *cpu_state; /* check for dependent cpu features */ if (!boot_cpu_has(X86_FEATURE_AVX2) || !boot_cpu_has(X86_FEATURE_BMI2)) return -ENODEV; /* initialize multibuffer structures */ sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate); sha1_job_mgr_init = sha1_mb_mgr_init_avx2; sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2; sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2; sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2; if (!sha1_mb_alg_state.alg_cstate) return -ENOMEM; for_each_possible_cpu(cpu) { cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); cpu_state->next_flush = 0; cpu_state->next_seq_num = 0; cpu_state->flusher_engaged = false; INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); cpu_state->cpu = cpu; cpu_state->alg_state = &sha1_mb_alg_state; cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr), GFP_KERNEL); if (!cpu_state->mgr) goto err2; sha1_ctx_mgr_init(cpu_state->mgr); INIT_LIST_HEAD(&cpu_state->work_list); spin_lock_init(&cpu_state->work_lock); } sha1_mb_alg_state.flusher = &sha1_mb_flusher; err = crypto_register_shash(&sha1_mb_shash_alg); if (err) goto err2; err = crypto_register_ahash(&sha1_mb_async_alg); if (err) goto err1; return 0; err1: crypto_unregister_shash(&sha1_mb_shash_alg); err2: for_each_possible_cpu(cpu) { cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); kfree(cpu_state->mgr); } free_percpu(sha1_mb_alg_state.alg_cstate); return -ENODEV; } static void __exit sha1_mb_mod_fini(void) { int cpu; struct mcryptd_alg_cstate *cpu_state; crypto_unregister_ahash(&sha1_mb_async_alg); crypto_unregister_shash(&sha1_mb_shash_alg); for_each_possible_cpu(cpu) { cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); kfree(cpu_state->mgr); } free_percpu(sha1_mb_alg_state.alg_cstate); } module_init(sha1_mb_mod_init); module_exit(sha1_mb_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated"); MODULE_ALIAS_CRYPTO("sha1");
gpl-2.0
zjgeer/linux
drivers/acpi/acpica/exmisc.c
534
20161
/****************************************************************************** * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes * *****************************************************************************/ /* * Copyright (C) 2000 - 2015, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #include "amlresrc.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exmisc") /******************************************************************************* * * FUNCTION: acpi_ex_get_object_reference * * PARAMETERS: obj_desc - Create a reference to this object * return_desc - Where to store the reference * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Obtain and return a "reference" to the target object * Common code for the ref_of_op and the cond_ref_of_op. * ******************************************************************************/ acpi_status acpi_ex_get_object_reference(union acpi_operand_object *obj_desc, union acpi_operand_object **return_desc, struct acpi_walk_state *walk_state) { union acpi_operand_object *reference_obj; union acpi_operand_object *referenced_obj; ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc); *return_desc = NULL; switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) { case ACPI_DESC_TYPE_OPERAND: if (obj_desc->common.type != ACPI_TYPE_LOCAL_REFERENCE) { return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* * Must be a reference to a Local or Arg */ switch (obj_desc->reference.class) { case ACPI_REFCLASS_LOCAL: case ACPI_REFCLASS_ARG: case ACPI_REFCLASS_DEBUG: /* The referenced object is the pseudo-node for the local/arg */ referenced_obj = obj_desc->reference.object; break; default: ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X", obj_desc->reference.class)); return_ACPI_STATUS(AE_AML_INTERNAL); } break; case ACPI_DESC_TYPE_NAMED: /* * A named reference that has already been resolved to a Node */ referenced_obj = obj_desc; break; default: ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X", ACPI_GET_DESCRIPTOR_TYPE(obj_desc))); return_ACPI_STATUS(AE_TYPE); } /* Create a new reference object */ reference_obj = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE); if (!reference_obj) { return_ACPI_STATUS(AE_NO_MEMORY); } reference_obj->reference.class = ACPI_REFCLASS_REFOF; reference_obj->reference.object = referenced_obj; *return_desc = reference_obj; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Object %p Type [%s], returning Reference %p\n", obj_desc, acpi_ut_get_object_type_name(obj_desc), *return_desc)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_concat_template * * PARAMETERS: operand0 - First source object * operand1 - Second source object * actual_return_desc - Where to place the return object * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Concatenate two resource templates * ******************************************************************************/ acpi_status acpi_ex_concat_template(union acpi_operand_object *operand0, union acpi_operand_object *operand1, union acpi_operand_object **actual_return_desc, struct acpi_walk_state *walk_state) { acpi_status status; union acpi_operand_object *return_desc; u8 *new_buf; u8 *end_tag; acpi_size length0; acpi_size length1; acpi_size new_length; ACPI_FUNCTION_TRACE(ex_concat_template); /* * Find the end_tag descriptor in each resource template. * Note1: returned pointers point TO the end_tag, not past it. * Note2: zero-length buffers are allowed; treated like one end_tag */ /* Get the length of the first resource template */ status = acpi_ut_get_resource_end_tag(operand0, &end_tag); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer); /* Get the length of the second resource template */ status = acpi_ut_get_resource_end_tag(operand1, &end_tag); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer); /* Combine both lengths, minimum size will be 2 for end_tag */ new_length = length0 + length1 + sizeof(struct aml_resource_end_tag); /* Create a new buffer object for the result (with one end_tag) */ return_desc = acpi_ut_create_buffer_object(new_length); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* * Copy the templates to the new buffer, 0 first, then 1 follows. One * end_tag descriptor is copied from Operand1. */ new_buf = return_desc->buffer.pointer; memcpy(new_buf, operand0->buffer.pointer, length0); memcpy(new_buf + length0, operand1->buffer.pointer, length1); /* Insert end_tag and set the checksum to zero, means "ignore checksum" */ new_buf[new_length - 1] = 0; new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1; /* Return the completed resource template */ *actual_return_desc = return_desc; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_do_concatenate * * PARAMETERS: operand0 - First source object * operand1 - Second source object * actual_return_desc - Where to place the return object * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Concatenate two objects OF THE SAME TYPE. * ******************************************************************************/ acpi_status acpi_ex_do_concatenate(union acpi_operand_object *operand0, union acpi_operand_object *operand1, union acpi_operand_object **actual_return_desc, struct acpi_walk_state *walk_state) { union acpi_operand_object *local_operand1 = operand1; union acpi_operand_object *return_desc; char *new_buf; acpi_status status; ACPI_FUNCTION_TRACE(ex_do_concatenate); /* * Convert the second operand if necessary. The first operand * determines the type of the second operand, (See the Data Types * section of the ACPI specification.) Both object types are * guaranteed to be either Integer/String/Buffer by the operand * resolution mechanism. */ switch (operand0->common.type) { case ACPI_TYPE_INTEGER: status = acpi_ex_convert_to_integer(operand1, &local_operand1, 16); break; case ACPI_TYPE_STRING: status = acpi_ex_convert_to_string(operand1, &local_operand1, ACPI_IMPLICIT_CONVERT_HEX); break; case ACPI_TYPE_BUFFER: status = acpi_ex_convert_to_buffer(operand1, &local_operand1); break; default: ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X", operand0->common.type)); status = AE_AML_INTERNAL; } if (ACPI_FAILURE(status)) { goto cleanup; } /* * Both operands are now known to be the same object type * (Both are Integer, String, or Buffer), and we can now perform the * concatenation. */ /* * There are three cases to handle: * * 1) Two Integers concatenated to produce a new Buffer * 2) Two Strings concatenated to produce a new String * 3) Two Buffers concatenated to produce a new Buffer */ switch (operand0->common.type) { case ACPI_TYPE_INTEGER: /* Result of two Integers is a Buffer */ /* Need enough buffer space for two integers */ return_desc = acpi_ut_create_buffer_object((acpi_size) ACPI_MUL_2 (acpi_gbl_integer_byte_width)); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } new_buf = (char *)return_desc->buffer.pointer; /* Copy the first integer, LSB first */ memcpy(new_buf, &operand0->integer.value, acpi_gbl_integer_byte_width); /* Copy the second integer (LSB first) after the first */ memcpy(new_buf + acpi_gbl_integer_byte_width, &local_operand1->integer.value, acpi_gbl_integer_byte_width); break; case ACPI_TYPE_STRING: /* Result of two Strings is a String */ return_desc = acpi_ut_create_string_object(((acpi_size) operand0->string. length + local_operand1-> string.length)); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } new_buf = return_desc->string.pointer; /* Concatenate the strings */ strcpy(new_buf, operand0->string.pointer); strcpy(new_buf + operand0->string.length, local_operand1->string.pointer); break; case ACPI_TYPE_BUFFER: /* Result of two Buffers is a Buffer */ return_desc = acpi_ut_create_buffer_object(((acpi_size) operand0->buffer. length + local_operand1-> buffer.length)); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } new_buf = (char *)return_desc->buffer.pointer; /* Concatenate the buffers */ memcpy(new_buf, operand0->buffer.pointer, operand0->buffer.length); memcpy(new_buf + operand0->buffer.length, local_operand1->buffer.pointer, local_operand1->buffer.length); break; default: /* Invalid object type, should not happen here */ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X", operand0->common.type)); status = AE_AML_INTERNAL; goto cleanup; } *actual_return_desc = return_desc; cleanup: if (local_operand1 != operand1) { acpi_ut_remove_reference(local_operand1); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_do_math_op * * PARAMETERS: opcode - AML opcode * integer0 - Integer operand #0 * integer1 - Integer operand #1 * * RETURN: Integer result of the operation * * DESCRIPTION: Execute a math AML opcode. The purpose of having all of the * math functions here is to prevent a lot of pointer dereferencing * to obtain the operands. * ******************************************************************************/ u64 acpi_ex_do_math_op(u16 opcode, u64 integer0, u64 integer1) { ACPI_FUNCTION_ENTRY(); switch (opcode) { case AML_ADD_OP: /* Add (Integer0, Integer1, Result) */ return (integer0 + integer1); case AML_BIT_AND_OP: /* And (Integer0, Integer1, Result) */ return (integer0 & integer1); case AML_BIT_NAND_OP: /* NAnd (Integer0, Integer1, Result) */ return (~(integer0 & integer1)); case AML_BIT_OR_OP: /* Or (Integer0, Integer1, Result) */ return (integer0 | integer1); case AML_BIT_NOR_OP: /* NOr (Integer0, Integer1, Result) */ return (~(integer0 | integer1)); case AML_BIT_XOR_OP: /* XOr (Integer0, Integer1, Result) */ return (integer0 ^ integer1); case AML_MULTIPLY_OP: /* Multiply (Integer0, Integer1, Result) */ return (integer0 * integer1); case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */ /* * We need to check if the shiftcount is larger than the integer bit * width since the behavior of this is not well-defined in the C language. */ if (integer1 >= acpi_gbl_integer_bit_width) { return (0); } return (integer0 << integer1); case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */ /* * We need to check if the shiftcount is larger than the integer bit * width since the behavior of this is not well-defined in the C language. */ if (integer1 >= acpi_gbl_integer_bit_width) { return (0); } return (integer0 >> integer1); case AML_SUBTRACT_OP: /* Subtract (Integer0, Integer1, Result) */ return (integer0 - integer1); default: return (0); } } /******************************************************************************* * * FUNCTION: acpi_ex_do_logical_numeric_op * * PARAMETERS: opcode - AML opcode * integer0 - Integer operand #0 * integer1 - Integer operand #1 * logical_result - TRUE/FALSE result of the operation * * RETURN: Status * * DESCRIPTION: Execute a logical "Numeric" AML opcode. For these Numeric * operators (LAnd and LOr), both operands must be integers. * * Note: cleanest machine code seems to be produced by the code * below, rather than using statements of the form: * Result = (Integer0 && Integer1); * ******************************************************************************/ acpi_status acpi_ex_do_logical_numeric_op(u16 opcode, u64 integer0, u64 integer1, u8 *logical_result) { acpi_status status = AE_OK; u8 local_result = FALSE; ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op); switch (opcode) { case AML_LAND_OP: /* LAnd (Integer0, Integer1) */ if (integer0 && integer1) { local_result = TRUE; } break; case AML_LOR_OP: /* LOr (Integer0, Integer1) */ if (integer0 || integer1) { local_result = TRUE; } break; default: status = AE_AML_INTERNAL; break; } /* Return the logical result and status */ *logical_result = local_result; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_do_logical_op * * PARAMETERS: opcode - AML opcode * operand0 - operand #0 * operand1 - operand #1 * logical_result - TRUE/FALSE result of the operation * * RETURN: Status * * DESCRIPTION: Execute a logical AML opcode. The purpose of having all of the * functions here is to prevent a lot of pointer dereferencing * to obtain the operands and to simplify the generation of the * logical value. For the Numeric operators (LAnd and LOr), both * operands must be integers. For the other logical operators, * operands can be any combination of Integer/String/Buffer. The * first operand determines the type to which the second operand * will be converted. * * Note: cleanest machine code seems to be produced by the code * below, rather than using statements of the form: * Result = (Operand0 == Operand1); * ******************************************************************************/ acpi_status acpi_ex_do_logical_op(u16 opcode, union acpi_operand_object *operand0, union acpi_operand_object *operand1, u8 * logical_result) { union acpi_operand_object *local_operand1 = operand1; u64 integer0; u64 integer1; u32 length0; u32 length1; acpi_status status = AE_OK; u8 local_result = FALSE; int compare; ACPI_FUNCTION_TRACE(ex_do_logical_op); /* * Convert the second operand if necessary. The first operand * determines the type of the second operand, (See the Data Types * section of the ACPI 3.0+ specification.) Both object types are * guaranteed to be either Integer/String/Buffer by the operand * resolution mechanism. */ switch (operand0->common.type) { case ACPI_TYPE_INTEGER: status = acpi_ex_convert_to_integer(operand1, &local_operand1, 16); break; case ACPI_TYPE_STRING: status = acpi_ex_convert_to_string(operand1, &local_operand1, ACPI_IMPLICIT_CONVERT_HEX); break; case ACPI_TYPE_BUFFER: status = acpi_ex_convert_to_buffer(operand1, &local_operand1); break; default: status = AE_AML_INTERNAL; break; } if (ACPI_FAILURE(status)) { goto cleanup; } /* * Two cases: 1) Both Integers, 2) Both Strings or Buffers */ if (operand0->common.type == ACPI_TYPE_INTEGER) { /* * 1) Both operands are of type integer * Note: local_operand1 may have changed above */ integer0 = operand0->integer.value; integer1 = local_operand1->integer.value; switch (opcode) { case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */ if (integer0 == integer1) { local_result = TRUE; } break; case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */ if (integer0 > integer1) { local_result = TRUE; } break; case AML_LLESS_OP: /* LLess (Operand0, Operand1) */ if (integer0 < integer1) { local_result = TRUE; } break; default: status = AE_AML_INTERNAL; break; } } else { /* * 2) Both operands are Strings or both are Buffers * Note: Code below takes advantage of common Buffer/String * object fields. local_operand1 may have changed above. Use * memcmp to handle nulls in buffers. */ length0 = operand0->buffer.length; length1 = local_operand1->buffer.length; /* Lexicographic compare: compare the data bytes */ compare = memcmp(operand0->buffer.pointer, local_operand1->buffer.pointer, (length0 > length1) ? length1 : length0); switch (opcode) { case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */ /* Length and all bytes must be equal */ if ((length0 == length1) && (compare == 0)) { /* Length and all bytes match ==> TRUE */ local_result = TRUE; } break; case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */ if (compare > 0) { local_result = TRUE; goto cleanup; /* TRUE */ } if (compare < 0) { goto cleanup; /* FALSE */ } /* Bytes match (to shortest length), compare lengths */ if (length0 > length1) { local_result = TRUE; } break; case AML_LLESS_OP: /* LLess (Operand0, Operand1) */ if (compare > 0) { goto cleanup; /* FALSE */ } if (compare < 0) { local_result = TRUE; goto cleanup; /* TRUE */ } /* Bytes match (to shortest length), compare lengths */ if (length0 < length1) { local_result = TRUE; } break; default: status = AE_AML_INTERNAL; break; } } cleanup: /* New object was created if implicit conversion performed - delete */ if (local_operand1 != operand1) { acpi_ut_remove_reference(local_operand1); } /* Return the logical result and status */ *logical_result = local_result; return_ACPI_STATUS(status); }
gpl-2.0
chrbayer/linux-sunxi
drivers/usb/host/ehci-omap.c
1814
8689
/* * ehci-omap.c - driver for USBHOST on OMAP3/4 processors * * Bus Glue for the EHCI controllers in OMAP3/4 * Tested on several OMAP3 boards, and OMAP4 Pandaboard * * Copyright (C) 2007-2013 Texas Instruments, Inc. * Author: Vikram Pandita <vikram.pandita@ti.com> * Author: Anand Gadiyar <gadiyar@ti.com> * Author: Keshava Munegowda <keshava_mgowda@ti.com> * Author: Roger Quadros <rogerq@ti.com> * * Copyright (C) 2009 Nokia Corporation * Contact: Felipe Balbi <felipe.balbi@nokia.com> * * Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/usb/ulpi.h> #include <linux/pm_runtime.h> #include <linux/gpio.h> #include <linux/clk.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/of.h> #include <linux/dma-mapping.h> #include "ehci.h" #include <linux/platform_data/usb-omap.h> /* EHCI Register Set */ #define EHCI_INSNREG04 (0xA0) #define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5) #define EHCI_INSNREG05_ULPI (0xA4) #define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31 #define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24 #define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22 #define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16 #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 #define DRIVER_DESC "OMAP-EHCI Host Controller driver" static const char hcd_name[] = "ehci-omap"; /*-------------------------------------------------------------------------*/ struct omap_hcd { struct usb_phy *phy[OMAP3_HS_USB_PORTS]; /* one PHY for each port */ int nports; }; static inline void ehci_write(void __iomem *base, u32 reg, u32 val) { __raw_writel(val, base + reg); } static inline u32 ehci_read(void __iomem *base, u32 reg) { return __raw_readl(base + reg); } /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ static struct hc_driver __read_mostly ehci_omap_hc_driver; static const struct ehci_driver_overrides ehci_omap_overrides __initdata = { .extra_priv_size = sizeof(struct omap_hcd), }; /** * ehci_hcd_omap_probe - initialize TI-based HCDs * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ static int ehci_hcd_omap_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev); struct resource *res; struct usb_hcd *hcd; void __iomem *regs; int ret; int irq; int i; struct omap_hcd *omap; if (usb_disabled()) return -ENODEV; if (!dev->parent) { dev_err(dev, "Missing parent device\n"); return -ENODEV; } /* For DT boot, get platform data from parent. i.e. usbhshost */ if (dev->of_node) { pdata = dev_get_platdata(dev->parent); dev->platform_data = pdata; } if (!pdata) { dev_err(dev, "Missing platform data\n"); return -ENODEV; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "EHCI irq failed\n"); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); /* * Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return ret; ret = -ENODEV; hcd = usb_create_hcd(&ehci_omap_hc_driver, dev, dev_name(dev)); if (!hcd) { dev_err(dev, "Failed to create HCD\n"); return -ENOMEM; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = regs; hcd_to_ehci(hcd)->caps = regs; omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv; omap->nports = pdata->nports; platform_set_drvdata(pdev, hcd); /* get the PHY devices if needed */ for (i = 0 ; i < omap->nports ; i++) { struct usb_phy *phy; /* get the PHY device */ if (dev->of_node) phy = devm_usb_get_phy_by_phandle(dev, "phys", i); else phy = devm_usb_get_phy_dev(dev, i); if (IS_ERR(phy)) { /* Don't bail out if PHY is not absolutely necessary */ if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) continue; ret = PTR_ERR(phy); dev_err(dev, "Can't get PHY device for port %d: %d\n", i, ret); goto err_phy; } omap->phy[i] = phy; if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) { usb_phy_init(omap->phy[i]); /* bring PHY out of suspend */ usb_phy_set_suspend(omap->phy[i], 0); } } pm_runtime_enable(dev); pm_runtime_get_sync(dev); /* * An undocumented "feature" in the OMAP3 EHCI controller, * causes suspended ports to be taken out of suspend when * the USBCMD.Run/Stop bit is cleared (for example when * we do ehci_bus_suspend). * This breaks suspend-resume if the root-hub is allowed * to suspend. Writing 1 to this undocumented register bit * disables this feature and restores normal behavior. */ ehci_write(regs, EHCI_INSNREG04, EHCI_INSNREG04_DISABLE_UNSUSPEND); ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) { dev_err(dev, "failed to add hcd with err %d\n", ret); goto err_pm_runtime; } device_wakeup_enable(hcd->self.controller); /* * Bring PHYs out of reset for non PHY modes. * Even though HSIC mode is a PHY-less mode, the reset * line exists between the chips and can be modelled * as a PHY device for reset control. */ for (i = 0; i < omap->nports; i++) { if (!omap->phy[i] || pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) continue; usb_phy_init(omap->phy[i]); /* bring PHY out of suspend */ usb_phy_set_suspend(omap->phy[i], 0); } return 0; err_pm_runtime: pm_runtime_put_sync(dev); err_phy: for (i = 0; i < omap->nports; i++) { if (omap->phy[i]) usb_phy_shutdown(omap->phy[i]); } usb_put_hcd(hcd); return ret; } /** * ehci_hcd_omap_remove - shutdown processing for EHCI HCDs * @pdev: USB Host Controller being removed * * Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. */ static int ehci_hcd_omap_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usb_hcd *hcd = dev_get_drvdata(dev); struct omap_hcd *omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv; int i; usb_remove_hcd(hcd); for (i = 0; i < omap->nports; i++) { if (omap->phy[i]) usb_phy_shutdown(omap->phy[i]); } usb_put_hcd(hcd); pm_runtime_put_sync(dev); pm_runtime_disable(dev); return 0; } static const struct of_device_id omap_ehci_dt_ids[] = { { .compatible = "ti,ehci-omap" }, { } }; MODULE_DEVICE_TABLE(of, omap_ehci_dt_ids); static struct platform_driver ehci_hcd_omap_driver = { .probe = ehci_hcd_omap_probe, .remove = ehci_hcd_omap_remove, .shutdown = usb_hcd_platform_shutdown, /*.suspend = ehci_hcd_omap_suspend, */ /*.resume = ehci_hcd_omap_resume, */ .driver = { .name = hcd_name, .of_match_table = omap_ehci_dt_ids, } }; /*-------------------------------------------------------------------------*/ static int __init ehci_omap_init(void) { if (usb_disabled()) return -ENODEV; pr_info("%s: " DRIVER_DESC "\n", hcd_name); ehci_init_driver(&ehci_omap_hc_driver, &ehci_omap_overrides); return platform_driver_register(&ehci_hcd_omap_driver); } module_init(ehci_omap_init); static void __exit ehci_omap_cleanup(void) { platform_driver_unregister(&ehci_hcd_omap_driver); } module_exit(ehci_omap_cleanup); MODULE_ALIAS("platform:ehci-omap"); MODULE_AUTHOR("Texas Instruments, Inc."); MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>"); MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
omnirom/android_kernel_samsung_espresso10
drivers/net/phy/mdio-gpio.c
1814
6442
/* * GPIO based MDIO bitbang driver. * Supports OpenFirmware. * * Copyright (c) 2008 CSE Semaphore Belgium. * by Laurent Pinchart <laurentp@cse-semaphore.com> * * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * Based on earlier work by * * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/mdio-gpio.h> #ifdef CONFIG_OF_GPIO #include <linux/of_gpio.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #endif struct mdio_gpio_info { struct mdiobb_ctrl ctrl; int mdc, mdio; }; static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) { struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); if (dir) gpio_direction_output(bitbang->mdio, 1); else gpio_direction_input(bitbang->mdio); } static int mdio_get(struct mdiobb_ctrl *ctrl) { struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); return gpio_get_value(bitbang->mdio); } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) { struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); gpio_set_value(bitbang->mdio, what); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) { struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); gpio_set_value(bitbang->mdc, what); } static struct mdiobb_ops mdio_gpio_ops = { .owner = THIS_MODULE, .set_mdc = mdc_set, .set_mdio_dir = mdio_dir, .set_mdio_data = mdio_set, .get_mdio_data = mdio_get, }; static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev, struct mdio_gpio_platform_data *pdata, int bus_id) { struct mii_bus *new_bus; struct mdio_gpio_info *bitbang; int i; bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL); if (!bitbang) goto out; bitbang->ctrl.ops = &mdio_gpio_ops; bitbang->mdc = pdata->mdc; bitbang->mdio = pdata->mdio; new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) goto out_free_bitbang; new_bus->name = "GPIO Bitbanged MDIO", new_bus->phy_mask = pdata->phy_mask; new_bus->irq = pdata->irqs; new_bus->parent = dev; if (new_bus->phy_mask == ~0) goto out_free_bus; for (i = 0; i < PHY_MAX_ADDR; i++) if (!new_bus->irq[i]) new_bus->irq[i] = PHY_POLL; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", bus_id); if (gpio_request(bitbang->mdc, "mdc")) goto out_free_bus; if (gpio_request(bitbang->mdio, "mdio")) goto out_free_mdc; gpio_direction_output(bitbang->mdc, 0); dev_set_drvdata(dev, new_bus); return new_bus; out_free_mdc: gpio_free(bitbang->mdc); out_free_bus: free_mdio_bitbang(new_bus); out_free_bitbang: kfree(bitbang); out: return NULL; } static void mdio_gpio_bus_deinit(struct device *dev) { struct mii_bus *bus = dev_get_drvdata(dev); struct mdio_gpio_info *bitbang = bus->priv; dev_set_drvdata(dev, NULL); gpio_free(bitbang->mdio); gpio_free(bitbang->mdc); free_mdio_bitbang(bus); kfree(bitbang); } static void __devexit mdio_gpio_bus_destroy(struct device *dev) { struct mii_bus *bus = dev_get_drvdata(dev); mdiobus_unregister(bus); mdio_gpio_bus_deinit(dev); } static int __devinit mdio_gpio_probe(struct platform_device *pdev) { struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; struct mii_bus *new_bus; int ret; if (!pdata) return -ENODEV; new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); if (!new_bus) return -ENODEV; ret = mdiobus_register(new_bus); if (ret) mdio_gpio_bus_deinit(&pdev->dev); return ret; } static int __devexit mdio_gpio_remove(struct platform_device *pdev) { mdio_gpio_bus_destroy(&pdev->dev); return 0; } #ifdef CONFIG_OF_GPIO static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev) { struct mdio_gpio_platform_data *pdata; struct mii_bus *new_bus; int ret; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; ret = of_get_gpio(ofdev->dev.of_node, 0); if (ret < 0) goto out_free; pdata->mdc = ret; ret = of_get_gpio(ofdev->dev.of_node, 1); if (ret < 0) goto out_free; pdata->mdio = ret; new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc); if (!new_bus) goto out_free; ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) mdio_gpio_bus_deinit(&ofdev->dev); return ret; out_free: kfree(pdata); return -ENODEV; } static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev) { mdio_gpio_bus_destroy(&ofdev->dev); kfree(ofdev->dev.platform_data); return 0; } static struct of_device_id mdio_ofgpio_match[] = { { .compatible = "virtual,mdio-gpio", }, {}, }; MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); static struct platform_driver mdio_ofgpio_driver = { .driver = { .name = "mdio-ofgpio", .owner = THIS_MODULE, .of_match_table = mdio_ofgpio_match, }, .probe = mdio_ofgpio_probe, .remove = __devexit_p(mdio_ofgpio_remove), }; static inline int __init mdio_ofgpio_init(void) { return platform_driver_register(&mdio_ofgpio_driver); } static inline void __exit mdio_ofgpio_exit(void) { platform_driver_unregister(&mdio_ofgpio_driver); } #else static inline int __init mdio_ofgpio_init(void) { return 0; } static inline void __exit mdio_ofgpio_exit(void) { } #endif /* CONFIG_OF_GPIO */ static struct platform_driver mdio_gpio_driver = { .probe = mdio_gpio_probe, .remove = __devexit_p(mdio_gpio_remove), .driver = { .name = "mdio-gpio", .owner = THIS_MODULE, }, }; static int __init mdio_gpio_init(void) { int ret; ret = mdio_ofgpio_init(); if (ret) return ret; ret = platform_driver_register(&mdio_gpio_driver); if (ret) mdio_ofgpio_exit(); return ret; } module_init(mdio_gpio_init); static void __exit mdio_gpio_exit(void) { platform_driver_unregister(&mdio_gpio_driver); mdio_ofgpio_exit(); } module_exit(mdio_gpio_exit); MODULE_ALIAS("platform:mdio-gpio"); MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic driver for MDIO bus emulation using GPIO");
gpl-2.0
LiquidSmooth-Devices/android_kernel_samsung_klimtwifi
drivers/input/touchscreen/eeti_ts.c
2070
8061
/* * Touch Screen driver for EETI's I2C connected touch screen panels * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * See EETI's software guide for the protocol specification: * http://home.eeti.com.tw/web20/eg/guide.htm * * Based on migor_ts.c * Copyright (c) 2008 Magnus Damm * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com> * * This file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This file is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/timer.h> #include <linux/gpio.h> #include <linux/input/eeti_ts.h> #include <linux/slab.h> static bool flip_x; module_param(flip_x, bool, 0644); MODULE_PARM_DESC(flip_x, "flip x coordinate"); static bool flip_y; module_param(flip_y, bool, 0644); MODULE_PARM_DESC(flip_y, "flip y coordinate"); struct eeti_ts_priv { struct i2c_client *client; struct input_dev *input; struct work_struct work; struct mutex mutex; int irq_gpio, irq, irq_active_high; }; #define EETI_TS_BITDEPTH (11) #define EETI_MAXVAL ((1 << (EETI_TS_BITDEPTH + 1)) - 1) #define REPORT_BIT_PRESSED (1 << 0) #define REPORT_BIT_AD0 (1 << 1) #define REPORT_BIT_AD1 (1 << 2) #define REPORT_BIT_HAS_PRESSURE (1 << 6) #define REPORT_RES_BITS(v) (((v) >> 1) + EETI_TS_BITDEPTH) static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv) { return gpio_get_value(priv->irq_gpio) == priv->irq_active_high; } static void eeti_ts_read(struct work_struct *work) { char buf[6]; unsigned int x, y, res, pressed, to = 100; struct eeti_ts_priv *priv = container_of(work, struct eeti_ts_priv, work); mutex_lock(&priv->mutex); while (eeti_ts_irq_active(priv) && --to) i2c_master_recv(priv->client, buf, sizeof(buf)); if (!to) { dev_err(&priv->client->dev, "unable to clear IRQ - line stuck?\n"); goto out; } /* drop non-report packets */ if (!(buf[0] & 0x80)) goto out; pressed = buf[0] & REPORT_BIT_PRESSED; res = REPORT_RES_BITS(buf[0] & (REPORT_BIT_AD0 | REPORT_BIT_AD1)); x = buf[2] | (buf[1] << 8); y = buf[4] | (buf[3] << 8); /* fix the range to 11 bits */ x >>= res - EETI_TS_BITDEPTH; y >>= res - EETI_TS_BITDEPTH; if (flip_x) x = EETI_MAXVAL - x; if (flip_y) y = EETI_MAXVAL - y; if (buf[0] & REPORT_BIT_HAS_PRESSURE) input_report_abs(priv->input, ABS_PRESSURE, buf[5]); input_report_abs(priv->input, ABS_X, x); input_report_abs(priv->input, ABS_Y, y); input_report_key(priv->input, BTN_TOUCH, !!pressed); input_sync(priv->input); out: mutex_unlock(&priv->mutex); } static irqreturn_t eeti_ts_isr(int irq, void *dev_id) { struct eeti_ts_priv *priv = dev_id; /* postpone I2C transactions as we are atomic */ schedule_work(&priv->work); return IRQ_HANDLED; } static void eeti_ts_start(struct eeti_ts_priv *priv) { enable_irq(priv->irq); /* Read the events once to arm the IRQ */ eeti_ts_read(&priv->work); } static void eeti_ts_stop(struct eeti_ts_priv *priv) { disable_irq(priv->irq); cancel_work_sync(&priv->work); } static int eeti_ts_open(struct input_dev *dev) { struct eeti_ts_priv *priv = input_get_drvdata(dev); eeti_ts_start(priv); return 0; } static void eeti_ts_close(struct input_dev *dev) { struct eeti_ts_priv *priv = input_get_drvdata(dev); eeti_ts_stop(priv); } static int __devinit eeti_ts_probe(struct i2c_client *client, const struct i2c_device_id *idp) { struct eeti_ts_platform_data *pdata = client->dev.platform_data; struct eeti_ts_priv *priv; struct input_dev *input; unsigned int irq_flags; int err = -ENOMEM; /* * In contrast to what's described in the datasheet, there seems * to be no way of probing the presence of that device using I2C * commands. So we need to blindly believe it is there, and wait * for interrupts to occur. */ priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&client->dev, "failed to allocate driver data\n"); goto err0; } mutex_init(&priv->mutex); input = input_allocate_device(); if (!input) { dev_err(&client->dev, "Failed to allocate input device.\n"); goto err1; } input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input, ABS_X, 0, EETI_MAXVAL, 0, 0); input_set_abs_params(input, ABS_Y, 0, EETI_MAXVAL, 0, 0); input_set_abs_params(input, ABS_PRESSURE, 0, 0xff, 0, 0); input->name = client->name; input->id.bustype = BUS_I2C; input->dev.parent = &client->dev; input->open = eeti_ts_open; input->close = eeti_ts_close; priv->client = client; priv->input = input; priv->irq_gpio = pdata->irq_gpio; priv->irq = gpio_to_irq(pdata->irq_gpio); err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name); if (err < 0) goto err1; if (pdata) priv->irq_active_high = pdata->irq_active_high; irq_flags = priv->irq_active_high ? IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; INIT_WORK(&priv->work, eeti_ts_read); i2c_set_clientdata(client, priv); input_set_drvdata(input, priv); err = input_register_device(input); if (err) goto err2; err = request_irq(priv->irq, eeti_ts_isr, irq_flags, client->name, priv); if (err) { dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); goto err3; } /* * Disable the device for now. It will be enabled once the * input device is opened. */ eeti_ts_stop(priv); device_init_wakeup(&client->dev, 0); return 0; err3: input_unregister_device(input); input = NULL; /* so we dont try to free it below */ err2: gpio_free(pdata->irq_gpio); err1: input_free_device(input); kfree(priv); err0: return err; } static int __devexit eeti_ts_remove(struct i2c_client *client) { struct eeti_ts_priv *priv = i2c_get_clientdata(client); free_irq(priv->irq, priv); /* * eeti_ts_stop() leaves IRQ disabled. We need to re-enable it * so that device still works if we reload the driver. */ enable_irq(priv->irq); input_unregister_device(priv->input); kfree(priv); return 0; } #ifdef CONFIG_PM static int eeti_ts_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct eeti_ts_priv *priv = i2c_get_clientdata(client); struct input_dev *input_dev = priv->input; mutex_lock(&input_dev->mutex); if (input_dev->users) eeti_ts_stop(priv); mutex_unlock(&input_dev->mutex); if (device_may_wakeup(&client->dev)) enable_irq_wake(priv->irq); return 0; } static int eeti_ts_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct eeti_ts_priv *priv = i2c_get_clientdata(client); struct input_dev *input_dev = priv->input; if (device_may_wakeup(&client->dev)) disable_irq_wake(priv->irq); mutex_lock(&input_dev->mutex); if (input_dev->users) eeti_ts_start(priv); mutex_unlock(&input_dev->mutex); return 0; } static SIMPLE_DEV_PM_OPS(eeti_ts_pm, eeti_ts_suspend, eeti_ts_resume); #endif static const struct i2c_device_id eeti_ts_id[] = { { "eeti_ts", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, eeti_ts_id); static struct i2c_driver eeti_ts_driver = { .driver = { .name = "eeti_ts", #ifdef CONFIG_PM .pm = &eeti_ts_pm, #endif }, .probe = eeti_ts_probe, .remove = __devexit_p(eeti_ts_remove), .id_table = eeti_ts_id, }; module_i2c_driver(eeti_ts_driver); MODULE_DESCRIPTION("EETI Touchscreen driver"); MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_LICENSE("GPL");
gpl-2.0
yajnab/android_kernel_samsung_baffin
arch/arm/mach-s3c64xx/mach-anw6410.c
2070
5968
/* linux/arch/arm/mach-s3c64xx/mach-anw6410.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * Copyright 2009 Kwangwoo Lee * Kwangwoo Lee <kwangwoo.lee@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/fb.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/dm9000.h> #include <video/platform_lcd.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/regs-fb.h> #include <mach/map.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <plat/regs-serial.h> #include <plat/iic.h> #include <plat/fb.h> #include <mach/s3c6410.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <mach/regs-gpio.h> #include <mach/regs-modem.h> /* DM9000 */ #define ANW6410_PA_DM9000 (0x18000000) /* A hardware buffer to control external devices is mapped at 0x30000000. * It can not be read. So current status must be kept in anw6410_extdev_status. */ #define ANW6410_VA_EXTDEV S3C_ADDR(0x02000000) #define ANW6410_PA_EXTDEV (0x30000000) #define ANW6410_EN_DM9000 (1<<11) #define ANW6410_EN_LCD (1<<14) static __u32 anw6410_extdev_status; static struct s3c2410_uartcfg anw6410_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, }, [1] = { .hwport = 1, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, }, }; /* framebuffer and LCD setup. */ static void __init anw6410_lcd_mode_set(void) { u32 tmp; /* set the LCD type */ tmp = __raw_readl(S3C64XX_SPCON); tmp &= ~S3C64XX_SPCON_LCD_SEL_MASK; tmp |= S3C64XX_SPCON_LCD_SEL_RGB; __raw_writel(tmp, S3C64XX_SPCON); /* remove the LCD bypass */ tmp = __raw_readl(S3C64XX_MODEM_MIFPCON); tmp &= ~MIFPCON_LCD_BYPASS; __raw_writel(tmp, S3C64XX_MODEM_MIFPCON); } /* GPF1 = LCD panel power * GPF4 = LCD backlight control */ static void anw6410_lcd_power_set(struct plat_lcd_data *pd, unsigned int power) { if (power) { anw6410_extdev_status |= (ANW6410_EN_LCD << 16); __raw_writel(anw6410_extdev_status, ANW6410_VA_EXTDEV); gpio_direction_output(S3C64XX_GPF(1), 1); gpio_direction_output(S3C64XX_GPF(4), 1); } else { anw6410_extdev_status &= ~(ANW6410_EN_LCD << 16); __raw_writel(anw6410_extdev_status, ANW6410_VA_EXTDEV); gpio_direction_output(S3C64XX_GPF(1), 0); gpio_direction_output(S3C64XX_GPF(4), 0); } } static struct plat_lcd_data anw6410_lcd_power_data = { .set_power = anw6410_lcd_power_set, }; static struct platform_device anw6410_lcd_powerdev = { .name = "platform-lcd", .dev.parent = &s3c_device_fb.dev, .dev.platform_data = &anw6410_lcd_power_data, }; static struct s3c_fb_pd_win anw6410_fb_win0 = { /* this is to ensure we use win0 */ .win_mode = { .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 16, }; /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ static struct s3c_fb_platdata anw6410_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, .win[0] = &anw6410_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }; /* DM9000AEP 10/100 ethernet controller */ static void __init anw6410_dm9000_enable(void) { anw6410_extdev_status |= (ANW6410_EN_DM9000 << 16); __raw_writel(anw6410_extdev_status, ANW6410_VA_EXTDEV); } static struct resource anw6410_dm9000_resource[] = { [0] = { .start = ANW6410_PA_DM9000, .end = ANW6410_PA_DM9000 + 3, .flags = IORESOURCE_MEM, }, [1] = { .start = ANW6410_PA_DM9000 + 4, .end = ANW6410_PA_DM9000 + 4 + 500, .flags = IORESOURCE_MEM, }, [2] = { .start = IRQ_EINT(15), .end = IRQ_EINT(15), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH, }, }; static struct dm9000_plat_data anw6410_dm9000_pdata = { .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM), /* dev_addr can be set to provide hwaddr. */ }; static struct platform_device anw6410_device_eth = { .name = "dm9000", .id = -1, .num_resources = ARRAY_SIZE(anw6410_dm9000_resource), .resource = anw6410_dm9000_resource, .dev = { .platform_data = &anw6410_dm9000_pdata, }, }; static struct map_desc anw6410_iodesc[] __initdata = { { .virtual = (unsigned long)ANW6410_VA_EXTDEV, .pfn = __phys_to_pfn(ANW6410_PA_EXTDEV), .length = SZ_64K, .type = MT_DEVICE, }, }; static struct platform_device *anw6410_devices[] __initdata = { &s3c_device_fb, &anw6410_lcd_powerdev, &anw6410_device_eth, }; static void __init anw6410_map_io(void) { s3c64xx_init_io(anw6410_iodesc, ARRAY_SIZE(anw6410_iodesc)); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(anw6410_uartcfgs, ARRAY_SIZE(anw6410_uartcfgs)); anw6410_lcd_mode_set(); } static void __init anw6410_machine_init(void) { s3c_fb_set_platdata(&anw6410_lcd_pdata); gpio_request(S3C64XX_GPF(1), "panel power"); gpio_request(S3C64XX_GPF(4), "LCD backlight"); anw6410_dm9000_enable(); platform_add_devices(anw6410_devices, ARRAY_SIZE(anw6410_devices)); } MACHINE_START(ANW6410, "A&W6410") /* Maintainer: Kwangwoo Lee <kwangwoo.lee@gmail.com> */ .boot_params = S3C64XX_PA_SDRAM + 0x100, .init_irq = s3c6410_init_irq, .map_io = anw6410_map_io, .init_machine = anw6410_machine_init, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
AnguisCaptor/PwnKernel_Shamu_M
arch/sparc/kernel/traps_64.c
2070
80075
/* arch/sparc64/kernel/traps.c * * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net) * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com) */ /* * I like traps on v9, :)))) */ #include <linux/module.h> #include <linux/sched.h> #include <linux/linkage.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/kdebug.h> #include <linux/ftrace.h> #include <linux/reboot.h> #include <linux/gfp.h> #include <asm/smp.h> #include <asm/delay.h> #include <asm/ptrace.h> #include <asm/oplib.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/unistd.h> #include <asm/uaccess.h> #include <asm/fpumacro.h> #include <asm/lsu.h> #include <asm/dcu.h> #include <asm/estate.h> #include <asm/chafsr.h> #include <asm/sfafsr.h> #include <asm/psrcompat.h> #include <asm/processor.h> #include <asm/timer.h> #include <asm/head.h> #include <asm/prom.h> #include <asm/memctrl.h> #include <asm/cacheflush.h> #include "entry.h" #include "kstack.h" /* When an irrecoverable trap occurs at tl > 0, the trap entry * code logs the trap state registers at every level in the trap * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout * is as follows: */ struct tl1_traplog { struct { unsigned long tstate; unsigned long tpc; unsigned long tnpc; unsigned long tt; } trapstack[4]; unsigned long tl; }; static void dump_tl1_traplog(struct tl1_traplog *p) { int i, limit; printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, " "dumping track stack.\n", p->tl); limit = (tlb_type == hypervisor) ? 2 : 4; for (i = 0; i < limit; i++) { printk(KERN_EMERG "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " "TNPC[%016lx] TT[%lx]\n", i + 1, p->trapstack[i].tstate, p->trapstack[i].tpc, p->trapstack[i].tnpc, p->trapstack[i].tt); printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); } } void bad_trap(struct pt_regs *regs, long lvl) { char buffer[32]; siginfo_t info; if (notify_die(DIE_TRAP, "bad trap", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) return; if (lvl < 0x100) { sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl); die_if_kernel(buffer, regs); } lvl -= 0x100; if (regs->tstate & TSTATE_PRIV) { sprintf(buffer, "Kernel bad sw trap %lx", lvl); die_if_kernel(buffer, regs); } if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLTRP; info.si_addr = (void __user *)regs->tpc; info.si_trapno = lvl; force_sig_info(SIGILL, &info, current); } void bad_trap_tl1(struct pt_regs *regs, long lvl) { char buffer[32]; if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) return; dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); sprintf (buffer, "Bad trap %lx at tl>0", lvl); die_if_kernel (buffer, regs); } #ifdef CONFIG_DEBUG_BUGVERBOSE void do_BUG(const char *file, int line) { bust_spinlocks(1); printk("kernel BUG at %s:%d!\n", file, line); } EXPORT_SYMBOL(do_BUG); #endif static DEFINE_SPINLOCK(dimm_handler_lock); static dimm_printer_t dimm_handler; static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen) { unsigned long flags; int ret = -ENODEV; spin_lock_irqsave(&dimm_handler_lock, flags); if (dimm_handler) { ret = dimm_handler(synd_code, paddr, buf, buflen); } else if (tlb_type == spitfire) { if (prom_getunumber(synd_code, paddr, buf, buflen) == -1) ret = -EINVAL; else ret = 0; } else ret = -ENODEV; spin_unlock_irqrestore(&dimm_handler_lock, flags); return ret; } int register_dimm_printer(dimm_printer_t func) { unsigned long flags; int ret = 0; spin_lock_irqsave(&dimm_handler_lock, flags); if (!dimm_handler) dimm_handler = func; else ret = -EEXIST; spin_unlock_irqrestore(&dimm_handler_lock, flags); return ret; } EXPORT_SYMBOL_GPL(register_dimm_printer); void unregister_dimm_printer(dimm_printer_t func) { unsigned long flags; spin_lock_irqsave(&dimm_handler_lock, flags); if (dimm_handler == func) dimm_handler = NULL; spin_unlock_irqrestore(&dimm_handler_lock, flags); } EXPORT_SYMBOL_GPL(unregister_dimm_printer); void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { siginfo_t info; if (notify_die(DIE_TRAP, "instruction access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { printk("spitfire_insn_access_exception: SFSR[%016lx] " "SFAR[%016lx], going.\n", sfsr, sfar); die_if_kernel("Iax", regs); } if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); } void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); spitfire_insn_access_exception(regs, sfsr, sfar); } void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { unsigned short type = (type_ctx >> 16); unsigned short ctx = (type_ctx & 0xffff); siginfo_t info; if (notify_die(DIE_TRAP, "instruction access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { printk("sun4v_insn_access_exception: ADDR[%016lx] " "CTX[%04x] TYPE[%04x], going.\n", addr, ctx, type); die_if_kernel("Iax", regs); } if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *) addr; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); } void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); sun4v_insn_access_exception(regs, addr, type_ctx); } void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { siginfo_t info; if (notify_die(DIE_TRAP, "data access exception", regs, 0, 0x30, SIGTRAP) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { /* Ouch, somebody is trying VM hole tricks on us... */ #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", regs->tpc, entry->fixup); #endif regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; return; } /* Shit... */ printk("spitfire_data_access_exception: SFSR[%016lx] " "SFAR[%016lx], going.\n", sfsr, sfar); die_if_kernel("Dax", regs); } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *)sfar; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); } void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, 0, 0x30, SIGTRAP) == NOTIFY_STOP) return; dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); spitfire_data_access_exception(regs, sfsr, sfar); } void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { unsigned short type = (type_ctx >> 16); unsigned short ctx = (type_ctx & 0xffff); siginfo_t info; if (notify_die(DIE_TRAP, "data access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { /* Ouch, somebody is trying VM hole tricks on us... */ #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", regs->tpc, entry->fixup); #endif regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; return; } printk("sun4v_data_access_exception: ADDR[%016lx] " "CTX[%04x] TYPE[%04x], going.\n", addr, ctx, type); die_if_kernel("Dax", regs); } if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *) addr; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); } void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); sun4v_data_access_exception(regs, addr, type_ctx); } #ifdef CONFIG_PCI #include "pci_impl.h" #endif /* When access exceptions happen, we must do this. */ static void spitfire_clean_and_reenable_l1_caches(void) { unsigned long va; if (tlb_type != spitfire) BUG(); /* Clean 'em. */ for (va = 0; va < (PAGE_SIZE << 1); va += 32) { spitfire_put_icache_tag(va, 0x0); spitfire_put_dcache_tag(va, 0x0); } /* Re-enable in LSU. */ __asm__ __volatile__("flush %%g6\n\t" "membar #Sync\n\t" "stxa %0, [%%g0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), "i" (ASI_LSU_CONTROL) : "memory"); } static void spitfire_enable_estate_errors(void) { __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (ESTATE_ERR_ALL), "i" (ASI_ESTATE_ERROR_EN)); } static char ecc_syndrome_table[] = { 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49, 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a, 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48, 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c, 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48, 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29, 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b, 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48, 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48, 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e, 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b, 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48, 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36, 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48, 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48, 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b, 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48, 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b, 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32, 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48, 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b, 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48, 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b, 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49, 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48, 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b, 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48, 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b, 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b, 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a }; static char *syndrome_unknown = "<Unknown>"; static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit) { unsigned short scode; char memmod_str[64], *p; if (udbl & bit) { scode = ecc_syndrome_table[udbl & 0xff]; if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0) p = syndrome_unknown; else p = memmod_str; printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] " "Memory Module \"%s\"\n", smp_processor_id(), scode, p); } if (udbh & bit) { scode = ecc_syndrome_table[udbh & 0xff]; if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0) p = syndrome_unknown; else p = memmod_str; printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] " "Memory Module \"%s\"\n", smp_processor_id(), scode, p); } } static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs) { printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n", smp_processor_id(), afsr, afar, udbl, udbh, tl1); spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE); /* We always log it, even if someone is listening for this * trap. */ notify_die(DIE_TRAP, "Correctable ECC Error", regs, 0, TRAP_TYPE_CEE, SIGTRAP); /* The Correctable ECC Error trap does not disable I/D caches. So * we only have to restore the ESTATE Error Enable register. */ spitfire_enable_estate_errors(); } static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs) { siginfo_t info; printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] " "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n", smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1); /* XXX add more human friendly logging of the error status * XXX as is implemented for cheetah */ spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE); /* We always log it, even if someone is listening for this * trap. */ notify_die(DIE_TRAP, "Uncorrectable Error", regs, 0, tt, SIGTRAP); if (regs->tstate & TSTATE_PRIV) { if (tl1) dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("UE", regs); } /* XXX need more intelligent processing here, such as is implemented * XXX for cheetah errors, in fact if the E-cache still holds the * XXX line with bad parity this will loop */ spitfire_clean_and_reenable_l1_caches(); spitfire_enable_estate_errors(); if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_OBJERR; info.si_addr = (void *)0; info.si_trapno = 0; force_sig_info(SIGBUS, &info, current); } void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar) { unsigned long afsr, tt, udbh, udbl; int tl1; afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT; tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT; tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0; udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT; udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT; #ifdef CONFIG_PCI if (tt == TRAP_TYPE_DAE && pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { spitfire_clean_and_reenable_l1_caches(); spitfire_enable_estate_errors(); pci_poke_faulted = 1; regs->tnpc = regs->tpc + 4; return; } #endif if (afsr & SFAFSR_UE) spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs); if (tt == TRAP_TYPE_CEE) { /* Handle the case where we took a CEE trap, but ACK'd * only the UE state in the UDB error registers. */ if (afsr & SFAFSR_UE) { if (udbh & UDBE_CE) { __asm__ __volatile__( "stxa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (udbh & UDBE_CE), "r" (0x0), "i" (ASI_UDB_ERROR_W)); } if (udbl & UDBE_CE) { __asm__ __volatile__( "stxa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (udbl & UDBE_CE), "r" (0x18), "i" (ASI_UDB_ERROR_W)); } } spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs); } } int cheetah_pcache_forced_on; void cheetah_enable_pcache(void) { unsigned long dcr; printk("CHEETAH: Enabling P-Cache on cpu %d.\n", smp_processor_id()); __asm__ __volatile__("ldxa [%%g0] %1, %0" : "=r" (dcr) : "i" (ASI_DCU_CONTROL_REG)); dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL); __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (dcr), "i" (ASI_DCU_CONTROL_REG)); } /* Cheetah error trap handling. */ static unsigned long ecache_flush_physbase; static unsigned long ecache_flush_linesize; static unsigned long ecache_flush_size; /* This table is ordered in priority of errors and matches the * AFAR overwrite policy as well. */ struct afsr_error_table { unsigned long mask; const char *name; }; static const char CHAFSR_PERR_msg[] = "System interface protocol error"; static const char CHAFSR_IERR_msg[] = "Internal processor error"; static const char CHAFSR_ISAP_msg[] = "System request parity error on incoming address"; static const char CHAFSR_UCU_msg[] = "Uncorrectable E-cache ECC error for ifetch/data"; static const char CHAFSR_UCC_msg[] = "SW Correctable E-cache ECC error for ifetch/data"; static const char CHAFSR_UE_msg[] = "Uncorrectable system bus data ECC error for read"; static const char CHAFSR_EDU_msg[] = "Uncorrectable E-cache ECC error for stmerge/blkld"; static const char CHAFSR_EMU_msg[] = "Uncorrectable system bus MTAG error"; static const char CHAFSR_WDU_msg[] = "Uncorrectable E-cache ECC error for writeback"; static const char CHAFSR_CPU_msg[] = "Uncorrectable ECC error for copyout"; static const char CHAFSR_CE_msg[] = "HW corrected system bus data ECC error for read"; static const char CHAFSR_EDC_msg[] = "HW corrected E-cache ECC error for stmerge/blkld"; static const char CHAFSR_EMC_msg[] = "HW corrected system bus MTAG ECC error"; static const char CHAFSR_WDC_msg[] = "HW corrected E-cache ECC error for writeback"; static const char CHAFSR_CPC_msg[] = "HW corrected ECC error for copyout"; static const char CHAFSR_TO_msg[] = "Unmapped error from system bus"; static const char CHAFSR_BERR_msg[] = "Bus error response from system bus"; static const char CHAFSR_IVC_msg[] = "HW corrected system bus data ECC error for ivec read"; static const char CHAFSR_IVU_msg[] = "Uncorrectable system bus data ECC error for ivec read"; static struct afsr_error_table __cheetah_error_table[] = { { CHAFSR_PERR, CHAFSR_PERR_msg }, { CHAFSR_IERR, CHAFSR_IERR_msg }, { CHAFSR_ISAP, CHAFSR_ISAP_msg }, { CHAFSR_UCU, CHAFSR_UCU_msg }, { CHAFSR_UCC, CHAFSR_UCC_msg }, { CHAFSR_UE, CHAFSR_UE_msg }, { CHAFSR_EDU, CHAFSR_EDU_msg }, { CHAFSR_EMU, CHAFSR_EMU_msg }, { CHAFSR_WDU, CHAFSR_WDU_msg }, { CHAFSR_CPU, CHAFSR_CPU_msg }, { CHAFSR_CE, CHAFSR_CE_msg }, { CHAFSR_EDC, CHAFSR_EDC_msg }, { CHAFSR_EMC, CHAFSR_EMC_msg }, { CHAFSR_WDC, CHAFSR_WDC_msg }, { CHAFSR_CPC, CHAFSR_CPC_msg }, { CHAFSR_TO, CHAFSR_TO_msg }, { CHAFSR_BERR, CHAFSR_BERR_msg }, /* These two do not update the AFAR. */ { CHAFSR_IVC, CHAFSR_IVC_msg }, { CHAFSR_IVU, CHAFSR_IVU_msg }, { 0, NULL }, }; static const char CHPAFSR_DTO_msg[] = "System bus unmapped error for prefetch/storequeue-read"; static const char CHPAFSR_DBERR_msg[] = "System bus error for prefetch/storequeue-read"; static const char CHPAFSR_THCE_msg[] = "Hardware corrected E-cache Tag ECC error"; static const char CHPAFSR_TSCE_msg[] = "SW handled correctable E-cache Tag ECC error"; static const char CHPAFSR_TUE_msg[] = "Uncorrectable E-cache Tag ECC error"; static const char CHPAFSR_DUE_msg[] = "System bus uncorrectable data ECC error due to prefetch/store-fill"; static struct afsr_error_table __cheetah_plus_error_table[] = { { CHAFSR_PERR, CHAFSR_PERR_msg }, { CHAFSR_IERR, CHAFSR_IERR_msg }, { CHAFSR_ISAP, CHAFSR_ISAP_msg }, { CHAFSR_UCU, CHAFSR_UCU_msg }, { CHAFSR_UCC, CHAFSR_UCC_msg }, { CHAFSR_UE, CHAFSR_UE_msg }, { CHAFSR_EDU, CHAFSR_EDU_msg }, { CHAFSR_EMU, CHAFSR_EMU_msg }, { CHAFSR_WDU, CHAFSR_WDU_msg }, { CHAFSR_CPU, CHAFSR_CPU_msg }, { CHAFSR_CE, CHAFSR_CE_msg }, { CHAFSR_EDC, CHAFSR_EDC_msg }, { CHAFSR_EMC, CHAFSR_EMC_msg }, { CHAFSR_WDC, CHAFSR_WDC_msg }, { CHAFSR_CPC, CHAFSR_CPC_msg }, { CHAFSR_TO, CHAFSR_TO_msg }, { CHAFSR_BERR, CHAFSR_BERR_msg }, { CHPAFSR_DTO, CHPAFSR_DTO_msg }, { CHPAFSR_DBERR, CHPAFSR_DBERR_msg }, { CHPAFSR_THCE, CHPAFSR_THCE_msg }, { CHPAFSR_TSCE, CHPAFSR_TSCE_msg }, { CHPAFSR_TUE, CHPAFSR_TUE_msg }, { CHPAFSR_DUE, CHPAFSR_DUE_msg }, /* These two do not update the AFAR. */ { CHAFSR_IVC, CHAFSR_IVC_msg }, { CHAFSR_IVU, CHAFSR_IVU_msg }, { 0, NULL }, }; static const char JPAFSR_JETO_msg[] = "System interface protocol error, hw timeout caused"; static const char JPAFSR_SCE_msg[] = "Parity error on system snoop results"; static const char JPAFSR_JEIC_msg[] = "System interface protocol error, illegal command detected"; static const char JPAFSR_JEIT_msg[] = "System interface protocol error, illegal ADTYPE detected"; static const char JPAFSR_OM_msg[] = "Out of range memory error has occurred"; static const char JPAFSR_ETP_msg[] = "Parity error on L2 cache tag SRAM"; static const char JPAFSR_UMS_msg[] = "Error due to unsupported store"; static const char JPAFSR_RUE_msg[] = "Uncorrectable ECC error from remote cache/memory"; static const char JPAFSR_RCE_msg[] = "Correctable ECC error from remote cache/memory"; static const char JPAFSR_BP_msg[] = "JBUS parity error on returned read data"; static const char JPAFSR_WBP_msg[] = "JBUS parity error on data for writeback or block store"; static const char JPAFSR_FRC_msg[] = "Foreign read to DRAM incurring correctable ECC error"; static const char JPAFSR_FRU_msg[] = "Foreign read to DRAM incurring uncorrectable ECC error"; static struct afsr_error_table __jalapeno_error_table[] = { { JPAFSR_JETO, JPAFSR_JETO_msg }, { JPAFSR_SCE, JPAFSR_SCE_msg }, { JPAFSR_JEIC, JPAFSR_JEIC_msg }, { JPAFSR_JEIT, JPAFSR_JEIT_msg }, { CHAFSR_PERR, CHAFSR_PERR_msg }, { CHAFSR_IERR, CHAFSR_IERR_msg }, { CHAFSR_ISAP, CHAFSR_ISAP_msg }, { CHAFSR_UCU, CHAFSR_UCU_msg }, { CHAFSR_UCC, CHAFSR_UCC_msg }, { CHAFSR_UE, CHAFSR_UE_msg }, { CHAFSR_EDU, CHAFSR_EDU_msg }, { JPAFSR_OM, JPAFSR_OM_msg }, { CHAFSR_WDU, CHAFSR_WDU_msg }, { CHAFSR_CPU, CHAFSR_CPU_msg }, { CHAFSR_CE, CHAFSR_CE_msg }, { CHAFSR_EDC, CHAFSR_EDC_msg }, { JPAFSR_ETP, JPAFSR_ETP_msg }, { CHAFSR_WDC, CHAFSR_WDC_msg }, { CHAFSR_CPC, CHAFSR_CPC_msg }, { CHAFSR_TO, CHAFSR_TO_msg }, { CHAFSR_BERR, CHAFSR_BERR_msg }, { JPAFSR_UMS, JPAFSR_UMS_msg }, { JPAFSR_RUE, JPAFSR_RUE_msg }, { JPAFSR_RCE, JPAFSR_RCE_msg }, { JPAFSR_BP, JPAFSR_BP_msg }, { JPAFSR_WBP, JPAFSR_WBP_msg }, { JPAFSR_FRC, JPAFSR_FRC_msg }, { JPAFSR_FRU, JPAFSR_FRU_msg }, /* These two do not update the AFAR. */ { CHAFSR_IVU, CHAFSR_IVU_msg }, { 0, NULL }, }; static struct afsr_error_table *cheetah_error_table; static unsigned long cheetah_afsr_errors; struct cheetah_err_info *cheetah_error_log; static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr) { struct cheetah_err_info *p; int cpu = smp_processor_id(); if (!cheetah_error_log) return NULL; p = cheetah_error_log + (cpu * 2); if ((afsr & CHAFSR_TL1) != 0UL) p++; return p; } extern unsigned int tl0_icpe[], tl1_icpe[]; extern unsigned int tl0_dcpe[], tl1_dcpe[]; extern unsigned int tl0_fecc[], tl1_fecc[]; extern unsigned int tl0_cee[], tl1_cee[]; extern unsigned int tl0_iae[], tl1_iae[]; extern unsigned int tl0_dae[], tl1_dae[]; extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[]; extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[]; extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[]; extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[]; extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[]; void __init cheetah_ecache_flush_init(void) { unsigned long largest_size, smallest_linesize, order, ver; int i, sz; /* Scan all cpu device tree nodes, note two values: * 1) largest E-cache size * 2) smallest E-cache line size */ largest_size = 0UL; smallest_linesize = ~0UL; for (i = 0; i < NR_CPUS; i++) { unsigned long val; val = cpu_data(i).ecache_size; if (!val) continue; if (val > largest_size) largest_size = val; val = cpu_data(i).ecache_line_size; if (val < smallest_linesize) smallest_linesize = val; } if (largest_size == 0UL || smallest_linesize == ~0UL) { prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache " "parameters.\n"); prom_halt(); } ecache_flush_size = (2 * largest_size); ecache_flush_linesize = smallest_linesize; ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size); if (ecache_flush_physbase == ~0UL) { prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte " "contiguous physical memory.\n", ecache_flush_size); prom_halt(); } /* Now allocate error trap reporting scoreboard. */ sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info)); for (order = 0; order < MAX_ORDER; order++) { if ((PAGE_SIZE << order) >= sz) break; } cheetah_error_log = (struct cheetah_err_info *) __get_free_pages(GFP_KERNEL, order); if (!cheetah_error_log) { prom_printf("cheetah_ecache_flush_init: Failed to allocate " "error logging scoreboard (%d bytes).\n", sz); prom_halt(); } memset(cheetah_error_log, 0, PAGE_SIZE << order); /* Mark all AFSRs as invalid so that the trap handler will * log new new information there. */ for (i = 0; i < 2 * NR_CPUS; i++) cheetah_error_log[i].afsr = CHAFSR_INVALID; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32) == __JALAPENO_ID || (ver >> 32) == __SERRANO_ID) { cheetah_error_table = &__jalapeno_error_table[0]; cheetah_afsr_errors = JPAFSR_ERRORS; } else if ((ver >> 32) == 0x003e0015) { cheetah_error_table = &__cheetah_plus_error_table[0]; cheetah_afsr_errors = CHPAFSR_ERRORS; } else { cheetah_error_table = &__cheetah_error_table[0]; cheetah_afsr_errors = CHAFSR_ERRORS; } /* Now patch trap tables. */ memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4)); memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4)); memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4)); memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4)); memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4)); memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4)); memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4)); memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4)); if (tlb_type == cheetah_plus) { memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4)); memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4)); memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4)); memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4)); } flushi(PAGE_OFFSET); } static void cheetah_flush_ecache(void) { unsigned long flush_base = ecache_flush_physbase; unsigned long flush_linesize = ecache_flush_linesize; unsigned long flush_size = ecache_flush_size; __asm__ __volatile__("1: subcc %0, %4, %0\n\t" " bne,pt %%xcc, 1b\n\t" " ldxa [%2 + %0] %3, %%g0\n\t" : "=&r" (flush_size) : "0" (flush_size), "r" (flush_base), "i" (ASI_PHYS_USE_EC), "r" (flush_linesize)); } static void cheetah_flush_ecache_line(unsigned long physaddr) { unsigned long alias; physaddr &= ~(8UL - 1UL); physaddr = (ecache_flush_physbase + (physaddr & ((ecache_flush_size>>1UL) - 1UL))); alias = physaddr + (ecache_flush_size >> 1UL); __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t" "ldxa [%1] %2, %%g0\n\t" "membar #Sync" : /* no outputs */ : "r" (physaddr), "r" (alias), "i" (ASI_PHYS_USE_EC)); } /* Unfortunately, the diagnostic access to the I-cache tags we need to * use to clear the thing interferes with I-cache coherency transactions. * * So we must only flush the I-cache when it is disabled. */ static void __cheetah_flush_icache(void) { unsigned int icache_size, icache_line_size; unsigned long addr; icache_size = local_cpu_data().icache_size; icache_line_size = local_cpu_data().icache_line_size; /* Clear the valid bits in all the tags. */ for (addr = 0; addr < icache_size; addr += icache_line_size) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (addr | (2 << 3)), "i" (ASI_IC_TAG)); } } static void cheetah_flush_icache(void) { unsigned long dcu_save; /* Save current DCU, disable I-cache. */ __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" "or %0, %2, %%g1\n\t" "stxa %%g1, [%%g0] %1\n\t" "membar #Sync" : "=r" (dcu_save) : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC) : "g1"); __cheetah_flush_icache(); /* Restore DCU register */ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG)); } static void cheetah_flush_dcache(void) { unsigned int dcache_size, dcache_line_size; unsigned long addr; dcache_size = local_cpu_data().dcache_size; dcache_line_size = local_cpu_data().dcache_line_size; for (addr = 0; addr < dcache_size; addr += dcache_line_size) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (addr), "i" (ASI_DCACHE_TAG)); } } /* In order to make the even parity correct we must do two things. * First, we clear DC_data_parity and set DC_utag to an appropriate value. * Next, we clear out all 32-bytes of data for that line. Data of * all-zero + tag parity value of zero == correct parity. */ static void cheetah_plus_zap_dcache_parity(void) { unsigned int dcache_size, dcache_line_size; unsigned long addr; dcache_size = local_cpu_data().dcache_size; dcache_line_size = local_cpu_data().dcache_line_size; for (addr = 0; addr < dcache_size; addr += dcache_line_size) { unsigned long tag = (addr >> 14); unsigned long line; __asm__ __volatile__("membar #Sync\n\t" "stxa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (tag), "r" (addr), "i" (ASI_DCACHE_UTAG)); for (line = addr; line < addr + dcache_line_size; line += 8) __asm__ __volatile__("membar #Sync\n\t" "stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (line), "i" (ASI_DCACHE_DATA)); } } /* Conversion tables used to frob Cheetah AFSR syndrome values into * something palatable to the memory controller driver get_unumber * routine. */ #define MT0 137 #define MT1 138 #define MT2 139 #define NONE 254 #define MTC0 140 #define MTC1 141 #define MTC2 142 #define MTC3 143 #define C0 128 #define C1 129 #define C2 130 #define C3 131 #define C4 132 #define C5 133 #define C6 134 #define C7 135 #define C8 136 #define M2 144 #define M3 145 #define M4 146 #define M 147 static unsigned char cheetah_ecc_syntab[] = { /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M, /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16, /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10, /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M, /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6, /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4, /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4, /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3, /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5, /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M, /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2, /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3, /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M, /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3, /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M, /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M, /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4, /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M, /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2, /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M, /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4, /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3, /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3, /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2, /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4, /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M, /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3, /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M, /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3, /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M, /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M, /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M }; static unsigned char cheetah_mtag_syntab[] = { NONE, MTC0, MTC1, NONE, MTC2, NONE, NONE, MT0, MTC3, NONE, NONE, MT1, NONE, MT2, NONE, NONE }; /* Return the highest priority error conditon mentioned. */ static inline unsigned long cheetah_get_hipri(unsigned long afsr) { unsigned long tmp = 0; int i; for (i = 0; cheetah_error_table[i].mask; i++) { if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL) return tmp; } return tmp; } static const char *cheetah_get_string(unsigned long bit) { int i; for (i = 0; cheetah_error_table[i].mask; i++) { if ((bit & cheetah_error_table[i].mask) != 0UL) return cheetah_error_table[i].name; } return "???"; } static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info, unsigned long afsr, unsigned long afar, int recoverable) { unsigned long hipri; char unum[256]; printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), afsr, afar, (afsr & CHAFSR_TL1) ? 1 : 0); printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); printk("%s" "ERROR(%d): ", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); printk("TPC<%pS>\n", (void *) regs->tpc); printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT, (afsr & CHAFSR_ME) ? ", Multiple Errors" : "", (afsr & CHAFSR_PRIV) ? ", Privileged" : ""); hipri = cheetah_get_hipri(afsr); printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), hipri, cheetah_get_string(hipri)); /* Try to get unumber if relevant. */ #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \ CHAFSR_CPC | CHAFSR_CPU | \ CHAFSR_UE | CHAFSR_CE | \ CHAFSR_EDC | CHAFSR_EDU | \ CHAFSR_UCC | CHAFSR_UCU | \ CHAFSR_WDU | CHAFSR_WDC) #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU) if (afsr & ESYND_ERRORS) { int syndrome; int ret; syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT; syndrome = cheetah_ecc_syntab[syndrome]; ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum)); if (ret != -1) printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), unum); } else if (afsr & MSYND_ERRORS) { int syndrome; int ret; syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT; syndrome = cheetah_mtag_syntab[syndrome]; ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum)); if (ret != -1) printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), unum); } /* Now dump the cache snapshots. */ printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (int) info->dcache_index, info->dcache_tag, info->dcache_utag, info->dcache_stag); printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->dcache_data[0], info->dcache_data[1], info->dcache_data[2], info->dcache_data[3]); printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] " "u[%016llx] l[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (int) info->icache_index, info->icache_tag, info->icache_utag, info->icache_stag, info->icache_upper, info->icache_lower); printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->icache_data[0], info->icache_data[1], info->icache_data[2], info->icache_data[3]); printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->icache_data[4], info->icache_data[5], info->icache_data[6], info->icache_data[7]); printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (int) info->ecache_index, info->ecache_tag); printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), info->ecache_data[0], info->ecache_data[1], info->ecache_data[2], info->ecache_data[3]); afsr = (afsr & ~hipri) & cheetah_afsr_errors; while (afsr != 0UL) { unsigned long bit = cheetah_get_hipri(afsr); printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n", (recoverable ? KERN_WARNING : KERN_CRIT), bit, cheetah_get_string(bit)); afsr &= ~bit; } if (!recoverable) printk(KERN_CRIT "ERROR: This condition is not recoverable.\n"); } static int cheetah_recheck_errors(struct cheetah_err_info *logp) { unsigned long afsr, afar; int ret = 0; __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" : "=r" (afsr) : "i" (ASI_AFSR)); if ((afsr & cheetah_afsr_errors) != 0) { if (logp != NULL) { __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" : "=r" (afar) : "i" (ASI_AFAR)); logp->afsr = afsr; logp->afar = afar; } ret = 1; } __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync\n\t" : : "r" (afsr), "i" (ASI_AFSR)); return ret; } void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) { struct cheetah_err_info local_snapshot, *p; int recoverable; /* Flush E-cache */ cheetah_flush_ecache(); p = cheetah_get_error_log(afsr); if (!p) { prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n", afsr, afar); prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); prom_halt(); } /* Grab snapshot of logged error. */ memcpy(&local_snapshot, p, sizeof(local_snapshot)); /* If the current trap snapshot does not match what the * trap handler passed along into our args, big trouble. * In such a case, mark the local copy as invalid. * * Else, it matches and we mark the afsr in the non-local * copy as invalid so we may log new error traps there. */ if (p->afsr != afsr || p->afar != afar) local_snapshot.afsr = CHAFSR_INVALID; else p->afsr = CHAFSR_INVALID; cheetah_flush_icache(); cheetah_flush_dcache(); /* Re-enable I-cache/D-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_DC | DCU_IC) : "g1"); /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) : "g1"); /* Decide if we can continue after handling this trap and * logging the error. */ recoverable = 1; if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) recoverable = 0; /* Re-check AFSR/AFAR. What we are looking for here is whether a new * error was logged while we had error reporting traps disabled. */ if (cheetah_recheck_errors(&local_snapshot)) { unsigned long new_afsr = local_snapshot.afsr; /* If we got a new asynchronous error, die... */ if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU | CHAFSR_WDU | CHAFSR_CPU | CHAFSR_IVU | CHAFSR_UE | CHAFSR_BERR | CHAFSR_TO)) recoverable = 0; } /* Log errors. */ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); if (!recoverable) panic("Irrecoverable Fast-ECC error trap.\n"); /* Flush E-cache to kick the error trap handlers out. */ cheetah_flush_ecache(); } /* Try to fix a correctable error by pushing the line out from * the E-cache. Recheck error reporting registers to see if the * problem is intermittent. */ static int cheetah_fix_ce(unsigned long physaddr) { unsigned long orig_estate; unsigned long alias1, alias2; int ret; /* Make sure correctable error traps are disabled. */ __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t" "andn %0, %1, %%g1\n\t" "stxa %%g1, [%%g0] %2\n\t" "membar #Sync" : "=&r" (orig_estate) : "i" (ESTATE_ERROR_CEEN), "i" (ASI_ESTATE_ERROR_EN) : "g1"); /* We calculate alias addresses that will force the * cache line in question out of the E-cache. Then * we bring it back in with an atomic instruction so * that we get it in some modified/exclusive state, * then we displace it again to try and get proper ECC * pushed back into the system. */ physaddr &= ~(8UL - 1UL); alias1 = (ecache_flush_physbase + (physaddr & ((ecache_flush_size >> 1) - 1))); alias2 = alias1 + (ecache_flush_size >> 1); __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t" "ldxa [%1] %3, %%g0\n\t" "casxa [%2] %3, %%g0, %%g0\n\t" "ldxa [%0] %3, %%g0\n\t" "ldxa [%1] %3, %%g0\n\t" "membar #Sync" : /* no outputs */ : "r" (alias1), "r" (alias2), "r" (physaddr), "i" (ASI_PHYS_USE_EC)); /* Did that trigger another error? */ if (cheetah_recheck_errors(NULL)) { /* Try one more time. */ __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t" "membar #Sync" : : "r" (physaddr), "i" (ASI_PHYS_USE_EC)); if (cheetah_recheck_errors(NULL)) ret = 2; else ret = 1; } else { /* No new error, intermittent problem. */ ret = 0; } /* Restore error enables. */ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" "membar #Sync" : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN)); return ret; } /* Return non-zero if PADDR is a valid physical memory address. */ static int cheetah_check_main_memory(unsigned long paddr) { unsigned long vaddr = PAGE_OFFSET + paddr; if (vaddr > (unsigned long) high_memory) return 0; return kern_addr_valid(vaddr); } void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) { struct cheetah_err_info local_snapshot, *p; int recoverable, is_memory; p = cheetah_get_error_log(afsr); if (!p) { prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n", afsr, afar); prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); prom_halt(); } /* Grab snapshot of logged error. */ memcpy(&local_snapshot, p, sizeof(local_snapshot)); /* If the current trap snapshot does not match what the * trap handler passed along into our args, big trouble. * In such a case, mark the local copy as invalid. * * Else, it matches and we mark the afsr in the non-local * copy as invalid so we may log new error traps there. */ if (p->afsr != afsr || p->afar != afar) local_snapshot.afsr = CHAFSR_INVALID; else p->afsr = CHAFSR_INVALID; is_memory = cheetah_check_main_memory(afar); if (is_memory && (afsr & CHAFSR_CE) != 0UL) { /* XXX Might want to log the results of this operation * XXX somewhere... -DaveM */ cheetah_fix_ce(afar); } { int flush_all, flush_line; flush_all = flush_line = 0; if ((afsr & CHAFSR_EDC) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC) flush_line = 1; else flush_all = 1; } else if ((afsr & CHAFSR_CPC) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC) flush_line = 1; else flush_all = 1; } /* Trap handler only disabled I-cache, flush it. */ cheetah_flush_icache(); /* Re-enable I-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC) : "g1"); if (flush_all) cheetah_flush_ecache(); else if (flush_line) cheetah_flush_ecache_line(afar); } /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_CEEN) : "g1"); /* Decide if we can continue after handling this trap and * logging the error. */ recoverable = 1; if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) recoverable = 0; /* Re-check AFSR/AFAR */ (void) cheetah_recheck_errors(&local_snapshot); /* Log errors. */ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); if (!recoverable) panic("Irrecoverable Correctable-ECC error trap.\n"); } void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) { struct cheetah_err_info local_snapshot, *p; int recoverable, is_memory; #ifdef CONFIG_PCI /* Check for the special PCI poke sequence. */ if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { cheetah_flush_icache(); cheetah_flush_dcache(); /* Re-enable I-cache/D-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_DC | DCU_IC) : "g1"); /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) : "g1"); (void) cheetah_recheck_errors(NULL); pci_poke_faulted = 1; regs->tpc += 4; regs->tnpc = regs->tpc + 4; return; } #endif p = cheetah_get_error_log(afsr); if (!p) { prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n", afsr, afar); prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); prom_halt(); } /* Grab snapshot of logged error. */ memcpy(&local_snapshot, p, sizeof(local_snapshot)); /* If the current trap snapshot does not match what the * trap handler passed along into our args, big trouble. * In such a case, mark the local copy as invalid. * * Else, it matches and we mark the afsr in the non-local * copy as invalid so we may log new error traps there. */ if (p->afsr != afsr || p->afar != afar) local_snapshot.afsr = CHAFSR_INVALID; else p->afsr = CHAFSR_INVALID; is_memory = cheetah_check_main_memory(afar); { int flush_all, flush_line; flush_all = flush_line = 0; if ((afsr & CHAFSR_EDU) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU) flush_line = 1; else flush_all = 1; } else if ((afsr & CHAFSR_BERR) != 0UL) { if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR) flush_line = 1; else flush_all = 1; } cheetah_flush_icache(); cheetah_flush_dcache(); /* Re-enable I/D caches */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC | DCU_DC) : "g1"); if (flush_all) cheetah_flush_ecache(); else if (flush_line) cheetah_flush_ecache_line(afar); } /* Re-enable error reporting */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_ESTATE_ERROR_EN), "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) : "g1"); /* Decide if we can continue after handling this trap and * logging the error. */ recoverable = 1; if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) recoverable = 0; /* Re-check AFSR/AFAR. What we are looking for here is whether a new * error was logged while we had error reporting traps disabled. */ if (cheetah_recheck_errors(&local_snapshot)) { unsigned long new_afsr = local_snapshot.afsr; /* If we got a new asynchronous error, die... */ if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU | CHAFSR_WDU | CHAFSR_CPU | CHAFSR_IVU | CHAFSR_UE | CHAFSR_BERR | CHAFSR_TO)) recoverable = 0; } /* Log errors. */ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); /* "Recoverable" here means we try to yank the page from ever * being newly used again. This depends upon a few things: * 1) Must be main memory, and AFAR must be valid. * 2) If we trapped from user, OK. * 3) Else, if we trapped from kernel we must find exception * table entry (ie. we have to have been accessing user * space). * * If AFAR is not in main memory, or we trapped from kernel * and cannot find an exception table entry, it is unacceptable * to try and continue. */ if (recoverable && is_memory) { if ((regs->tstate & TSTATE_PRIV) == 0UL) { /* OK, usermode access. */ recoverable = 1; } else { const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { /* OK, kernel access to userspace. */ recoverable = 1; } else { /* BAD, privileged state is corrupted. */ recoverable = 0; } if (recoverable) { if (pfn_valid(afar >> PAGE_SHIFT)) get_page(pfn_to_page(afar >> PAGE_SHIFT)); else recoverable = 0; /* Only perform fixup if we still have a * recoverable condition. */ if (recoverable) { regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; } } } } else { recoverable = 0; } if (!recoverable) panic("Irrecoverable deferred error trap.\n"); } /* Handle a D/I cache parity error trap. TYPE is encoded as: * * Bit0: 0=dcache,1=icache * Bit1: 0=recoverable,1=unrecoverable * * The hardware has disabled both the I-cache and D-cache in * the %dcr register. */ void cheetah_plus_parity_error(int type, struct pt_regs *regs) { if (type & 0x1) __cheetah_flush_icache(); else cheetah_plus_zap_dcache_parity(); cheetah_flush_dcache(); /* Re-enable I-cache/D-cache */ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" "or %%g1, %1, %%g1\n\t" "stxa %%g1, [%%g0] %0\n\t" "membar #Sync" : /* no outputs */ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_DC | DCU_IC) : "g1"); if (type & 0x2) { printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n", smp_processor_id(), (type & 0x1) ? 'I' : 'D', regs->tpc); printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); panic("Irrecoverable Cheetah+ parity error."); } printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n", smp_processor_id(), (type & 0x1) ? 'I' : 'D', regs->tpc); printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); } struct sun4v_error_entry { /* Unique error handle */ /*0x00*/u64 err_handle; /* %stick value at the time of the error */ /*0x08*/u64 err_stick; /*0x10*/u8 reserved_1[3]; /* Error type */ /*0x13*/u8 err_type; #define SUN4V_ERR_TYPE_UNDEFINED 0 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3 #define SUN4V_ERR_TYPE_SHUTDOWN_RQST 4 #define SUN4V_ERR_TYPE_DUMP_CORE 5 #define SUN4V_ERR_TYPE_SP_STATE_CHANGE 6 #define SUN4V_ERR_TYPE_NUM 7 /* Error attributes */ /*0x14*/u32 err_attrs; #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002 #define SUN4V_ERR_ATTRS_PIO 0x00000004 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010 #define SUN4V_ERR_ATTRS_SHUTDOWN_RQST 0x00000020 #define SUN4V_ERR_ATTRS_ASR 0x00000040 #define SUN4V_ERR_ATTRS_ASI 0x00000080 #define SUN4V_ERR_ATTRS_PRIV_REG 0x00000100 #define SUN4V_ERR_ATTRS_SPSTATE_MSK 0x00000600 #define SUN4V_ERR_ATTRS_SPSTATE_SHFT 9 #define SUN4V_ERR_ATTRS_MODE_MSK 0x03000000 #define SUN4V_ERR_ATTRS_MODE_SHFT 24 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000 #define SUN4V_ERR_SPSTATE_FAULTED 0 #define SUN4V_ERR_SPSTATE_AVAILABLE 1 #define SUN4V_ERR_SPSTATE_NOT_PRESENT 2 #define SUN4V_ERR_MODE_USER 1 #define SUN4V_ERR_MODE_PRIV 2 /* Real address of the memory region or PIO transaction */ /*0x18*/u64 err_raddr; /* Size of the operation triggering the error, in bytes */ /*0x20*/u32 err_size; /* ID of the CPU */ /*0x24*/u16 err_cpu; /* Grace periof for shutdown, in seconds */ /*0x26*/u16 err_secs; /* Value of the %asi register */ /*0x28*/u8 err_asi; /*0x29*/u8 reserved_2; /* Value of the ASR register number */ /*0x2a*/u16 err_asr; #define SUN4V_ERR_ASR_VALID 0x8000 /*0x2c*/u32 reserved_3; /*0x30*/u64 reserved_4; /*0x38*/u64 reserved_5; }; static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); static const char *sun4v_err_type_to_str(u8 type) { static const char *types[SUN4V_ERR_TYPE_NUM] = { "undefined", "uncorrected resumable", "precise nonresumable", "deferred nonresumable", "shutdown request", "dump core", "SP state change", }; if (type < SUN4V_ERR_TYPE_NUM) return types[type]; return "unknown"; } static void sun4v_emit_err_attr_strings(u32 attrs) { static const char *attr_names[] = { "processor", "memory", "PIO", "int-registers", "fpu-registers", "shutdown-request", "ASR", "ASI", "priv-reg", }; static const char *sp_states[] = { "sp-faulted", "sp-available", "sp-not-present", "sp-state-reserved", }; static const char *modes[] = { "mode-reserved0", "user", "priv", "mode-reserved1", }; u32 sp_state, mode; int i; for (i = 0; i < ARRAY_SIZE(attr_names); i++) { if (attrs & (1U << i)) { const char *s = attr_names[i]; pr_cont("%s ", s); } } sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >> SUN4V_ERR_ATTRS_SPSTATE_SHFT); pr_cont("%s ", sp_states[sp_state]); mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >> SUN4V_ERR_ATTRS_MODE_SHFT); pr_cont("%s ", modes[mode]); if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) pr_cont("res-queue-full "); } /* When the report contains a real-address of "-1" it means that the * hardware did not provide the address. So we compute the effective * address of the load or store instruction at regs->tpc and report * that. Usually when this happens it's a PIO and in such a case we * are using physical addresses with bypass ASIs anyways, so what we * report here is exactly what we want. */ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs) { unsigned int insn; u64 addr; if (!(regs->tstate & TSTATE_PRIV)) return; insn = *(unsigned int *) regs->tpc; addr = compute_effective_address(regs, insn, 0); printk("%s: insn effective address [0x%016llx]\n", pfx, addr); } static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) { u64 *raw_ptr = (u64 *) ent; u32 attrs; int cnt; printk("%s: Reporting on cpu %d\n", pfx, cpu); printk("%s: TPC [0x%016lx] <%pS>\n", pfx, regs->tpc, (void *) regs->tpc); printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n", pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]); printk("%s: %016llx:%016llx:%016llx:%016llx]\n", pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]); printk("%s: handle [0x%016llx] stick [0x%016llx]\n", pfx, ent->err_handle, ent->err_stick); printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type)); attrs = ent->err_attrs; printk("%s: attrs [0x%08x] < ", pfx, attrs); sun4v_emit_err_attr_strings(attrs); pr_cont(">\n"); /* Various fields in the error report are only valid if * certain attribute bits are set. */ if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_PIO | SUN4V_ERR_ATTRS_ASI)) { printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr); if (ent->err_raddr == ~(u64)0) sun4v_report_real_raddr(pfx, regs); } if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI)) printk("%s: size [0x%x]\n", pfx, ent->err_size); if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR | SUN4V_ERR_ATTRS_INT_REGISTERS | SUN4V_ERR_ATTRS_FPU_REGISTERS | SUN4V_ERR_ATTRS_PRIV_REG)) printk("%s: cpu[%u]\n", pfx, ent->err_cpu); if (attrs & SUN4V_ERR_ATTRS_ASI) printk("%s: asi [0x%02x]\n", pfx, ent->err_asi); if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS | SUN4V_ERR_ATTRS_FPU_REGISTERS | SUN4V_ERR_ATTRS_PRIV_REG)) && (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0) printk("%s: reg [0x%04x]\n", pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID); show_regs(regs); if ((cnt = atomic_read(ocnt)) != 0) { atomic_set(ocnt, 0); wmb(); printk("%s: Queue overflowed %d times.\n", pfx, cnt); } } /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. * Log the event and clear the first word of the entry. */ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) { struct sun4v_error_entry *ent, local_copy; struct trap_per_cpu *tb; unsigned long paddr; int cpu; cpu = get_cpu(); tb = &trap_block[cpu]; paddr = tb->resum_kernel_buf_pa + offset; ent = __va(paddr); memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); /* We have a local copy now, so release the entry. */ ent->err_handle = 0; wmb(); put_cpu(); if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) { /* We should really take the seconds field of * the error report and use it for the shutdown * invocation, but for now do the same thing we * do for a DS shutdown request. */ pr_info("Shutdown request, %u seconds...\n", local_copy.err_secs); orderly_poweroff(true); return; } sun4v_log_error(regs, &local_copy, cpu, KERN_ERR "RESUMABLE ERROR", &sun4v_resum_oflow_cnt); } /* If we try to printk() we'll probably make matters worse, by trying * to retake locks this cpu already holds or causing more errors. So * just bump a counter, and we'll report these counter bumps above. */ void sun4v_resum_overflow(struct pt_regs *regs) { atomic_inc(&sun4v_resum_oflow_cnt); } /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. * Log the event, clear the first word of the entry, and die. */ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) { struct sun4v_error_entry *ent, local_copy; struct trap_per_cpu *tb; unsigned long paddr; int cpu; cpu = get_cpu(); tb = &trap_block[cpu]; paddr = tb->nonresum_kernel_buf_pa + offset; ent = __va(paddr); memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); /* We have a local copy now, so release the entry. */ ent->err_handle = 0; wmb(); put_cpu(); #ifdef CONFIG_PCI /* Check for the special PCI poke sequence. */ if (pci_poke_in_progress && pci_poke_cpu == cpu) { pci_poke_faulted = 1; regs->tpc += 4; regs->tnpc = regs->tpc + 4; return; } #endif sun4v_log_error(regs, &local_copy, cpu, KERN_EMERG "NON-RESUMABLE ERROR", &sun4v_nonresum_oflow_cnt); panic("Non-resumable error."); } /* If we try to printk() we'll probably make matters worse, by trying * to retake locks this cpu already holds or causing more errors. So * just bump a counter, and we'll report these counter bumps above. */ void sun4v_nonresum_overflow(struct pt_regs *regs) { /* XXX Actually even this can make not that much sense. Perhaps * XXX we should just pull the plug and panic directly from here? */ atomic_inc(&sun4v_nonresum_oflow_cnt); } unsigned long sun4v_err_itlb_vaddr; unsigned long sun4v_err_itlb_ctx; unsigned long sun4v_err_itlb_pte; unsigned long sun4v_err_itlb_error; void sun4v_itlb_error_report(struct pt_regs *regs, int tl) { if (tl > 1) dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", (void *) regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " "pte[%lx] error[%lx]\n", sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx, sun4v_err_itlb_pte, sun4v_err_itlb_error); prom_halt(); } unsigned long sun4v_err_dtlb_vaddr; unsigned long sun4v_err_dtlb_ctx; unsigned long sun4v_err_dtlb_pte; unsigned long sun4v_err_dtlb_error; void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) { if (tl > 1) dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", (void *) regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " "pte[%lx] error[%lx]\n", sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx, sun4v_err_dtlb_pte, sun4v_err_dtlb_error); prom_halt(); } void hypervisor_tlbop_error(unsigned long err, unsigned long op) { printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n", err, op); } void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op) { printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n", err, op); } void do_fpe_common(struct pt_regs *regs) { if (regs->tstate & TSTATE_PRIV) { regs->tpc = regs->tnpc; regs->tnpc += 4; } else { unsigned long fsr = current_thread_info()->xfsr[0]; siginfo_t info; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; info.si_code = __SI_FAULT; if ((fsr & 0x1c000) == (1 << 14)) { if (fsr & 0x10) info.si_code = FPE_FLTINV; else if (fsr & 0x08) info.si_code = FPE_FLTOVF; else if (fsr & 0x04) info.si_code = FPE_FLTUND; else if (fsr & 0x02) info.si_code = FPE_FLTDIV; else if (fsr & 0x01) info.si_code = FPE_FLTRES; } force_sig_info(SIGFPE, &info, current); } } void do_fpieee(struct pt_regs *regs) { if (notify_die(DIE_TRAP, "fpu exception ieee", regs, 0, 0x24, SIGFPE) == NOTIFY_STOP) return; do_fpe_common(regs); } extern int do_mathemu(struct pt_regs *, struct fpustate *, bool); void do_fpother(struct pt_regs *regs) { struct fpustate *f = FPUSTATE; int ret = 0; if (notify_die(DIE_TRAP, "fpu exception other", regs, 0, 0x25, SIGFPE) == NOTIFY_STOP) return; switch ((current_thread_info()->xfsr[0] & 0x1c000)) { case (2 << 14): /* unfinished_FPop */ case (3 << 14): /* unimplemented_FPop */ ret = do_mathemu(regs, f, false); break; } if (ret) return; do_fpe_common(regs); } void do_tof(struct pt_regs *regs) { siginfo_t info; if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs, 0, 0x26, SIGEMT) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) die_if_kernel("Penguin overflow trap from kernel mode", regs); if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGEMT; info.si_errno = 0; info.si_code = EMT_TAGOVF; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGEMT, &info, current); } void do_div0(struct pt_regs *regs) { siginfo_t info; if (notify_die(DIE_TRAP, "integer division by zero", regs, 0, 0x28, SIGFPE) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) die_if_kernel("TL0: Kernel divide by zero.", regs); if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = FPE_INTDIV; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGFPE, &info, current); } static void instruction_dump(unsigned int *pc) { int i; if ((((unsigned long) pc) & 3)) return; printk("Instruction DUMP:"); for (i = -3; i < 6; i++) printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>'); printk("\n"); } static void user_instruction_dump(unsigned int __user *pc) { int i; unsigned int buf[9]; if ((((unsigned long) pc) & 3)) return; if (copy_from_user(buf, pc - 3, sizeof(buf))) return; printk("Instruction DUMP:"); for (i = 0; i < 9; i++) printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>'); printk("\n"); } void show_stack(struct task_struct *tsk, unsigned long *_ksp) { unsigned long fp, ksp; struct thread_info *tp; int count = 0; #ifdef CONFIG_FUNCTION_GRAPH_TRACER int graph = 0; #endif ksp = (unsigned long) _ksp; if (!tsk) tsk = current; tp = task_thread_info(tsk); if (ksp == 0UL) { if (tsk == current) asm("mov %%fp, %0" : "=r" (ksp)); else ksp = tp->ksp; } if (tp == current_thread_info()) flushw_all(); fp = ksp + STACK_BIAS; printk("Call Trace:\n"); do { struct sparc_stackf *sf; struct pt_regs *regs; unsigned long pc; if (!kstack_valid(tp, fp)) break; sf = (struct sparc_stackf *) fp; regs = (struct pt_regs *) (sf + 1); if (kstack_is_trap_frame(tp, regs)) { if (!(regs->tstate & TSTATE_PRIV)) break; pc = regs->tpc; fp = regs->u_regs[UREG_I6] + STACK_BIAS; } else { pc = sf->callers_pc; fp = (unsigned long)sf->fp + STACK_BIAS; } printk(" [%016lx] %pS\n", pc, (void *) pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((pc + 8UL) == (unsigned long) &return_to_handler) { int index = tsk->curr_ret_stack; if (tsk->ret_stack && index >= graph) { pc = tsk->ret_stack[index - graph].ret; printk(" [%016lx] %pS\n", pc, (void *) pc); graph++; } } #endif } while (++count < 16); } static inline struct reg_window *kernel_stack_up(struct reg_window *rw) { unsigned long fp = rw->ins[6]; if (!fp) return NULL; return (struct reg_window *) (fp + STACK_BIAS); } void die_if_kernel(char *str, struct pt_regs *regs) { static int die_counter; int count = 0; /* Amuse the user. */ printk( " \\|/ ____ \\|/\n" " \"@'/ .. \\`@\"\n" " /_| \\__/ |_\\\n" " \\__U_/\n"); printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV); __asm__ __volatile__("flushw"); show_regs(regs); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); if (regs->tstate & TSTATE_PRIV) { struct thread_info *tp = current_thread_info(); struct reg_window *rw = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); /* Stop the back trace when we hit userland or we * find some badly aligned kernel stack. */ while (rw && count++ < 30 && kstack_valid(tp, (unsigned long) rw)) { printk("Caller[%016lx]: %pS\n", rw->ins[7], (void *) rw->ins[7]); rw = kernel_stack_up(rw); } instruction_dump ((unsigned int *) regs->tpc); } else { if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } user_instruction_dump ((unsigned int __user *) regs->tpc); } if (regs->tstate & TSTATE_PRIV) do_exit(SIGKILL); do_exit(SIGSEGV); } EXPORT_SYMBOL(die_if_kernel); #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19)) #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19)) extern int handle_popc(u32 insn, struct pt_regs *regs); extern int handle_ldf_stq(u32 insn, struct pt_regs *regs); void do_illegal_instruction(struct pt_regs *regs) { unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; siginfo_t info; if (notify_die(DIE_TRAP, "illegal instruction", regs, 0, 0x10, SIGILL) == NOTIFY_STOP) return; if (tstate & TSTATE_PRIV) die_if_kernel("Kernel illegal instruction", regs); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ { if (handle_popc(insn, regs)) return; } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { if (handle_ldf_stq(insn, regs)) return; } else if (tlb_type == hypervisor) { if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) { if (!vis_emul(regs, insn)) return; } else { struct fpustate *f = FPUSTATE; /* On UltraSPARC T2 and later, FPU insns which * are not implemented in HW signal an illegal * instruction trap and do not set the FP Trap * Trap in the %fsr to unimplemented_FPop. */ if (do_mathemu(regs, f, true)) return; } } } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; info.si_addr = (void __user *)pc; info.si_trapno = 0; force_sig_info(SIGILL, &info, current); } extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { siginfo_t info; if (notify_die(DIE_TRAP, "memory address unaligned", regs, 0, 0x34, SIGSEGV) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); return; } info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void __user *)sfar; info.si_trapno = 0; force_sig_info(SIGBUS, &info, current); } void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { siginfo_t info; if (notify_die(DIE_TRAP, "memory address unaligned", regs, 0, 0x34, SIGSEGV) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); return; } info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void __user *) addr; info.si_trapno = 0; force_sig_info(SIGBUS, &info, current); } void do_privop(struct pt_regs *regs) { siginfo_t info; if (notify_die(DIE_TRAP, "privileged operation", regs, 0, 0x11, SIGILL) == NOTIFY_STOP) return; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_PRVOPC; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGILL, &info, current); } void do_privact(struct pt_regs *regs) { do_privop(regs); } /* Trap level 1 stuff or other traps we should never see... */ void do_cee(struct pt_regs *regs) { die_if_kernel("TL0: Cache Error Exception", regs); } void do_cee_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Cache Error Exception", regs); } void do_dae_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Data Access Exception", regs); } void do_iae_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Instruction Access Exception", regs); } void do_div0_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: DIV0 Exception", regs); } void do_fpdis_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU Disabled", regs); } void do_fpieee_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU IEEE Exception", regs); } void do_fpother_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU Other Exception", regs); } void do_ill_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Illegal Instruction Exception", regs); } void do_irq_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: IRQ Exception", regs); } void do_lddfmna_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: LDDF Exception", regs); } void do_stdfmna_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: STDF Exception", regs); } void do_paw(struct pt_regs *regs) { die_if_kernel("TL0: Phys Watchpoint Exception", regs); } void do_paw_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Phys Watchpoint Exception", regs); } void do_vaw(struct pt_regs *regs) { die_if_kernel("TL0: Virt Watchpoint Exception", regs); } void do_vaw_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Virt Watchpoint Exception", regs); } void do_tof_tl1(struct pt_regs *regs) { dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Tag Overflow Exception", regs); } void do_getpsr(struct pt_regs *regs) { regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate); regs->tpc = regs->tnpc; regs->tnpc += 4; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } } struct trap_per_cpu trap_block[NR_CPUS]; EXPORT_SYMBOL(trap_block); /* This can get invoked before sched_init() so play it super safe * and use hard_smp_processor_id(). */ void notrace init_cur_cpu_trap(struct thread_info *t) { int cpu = hard_smp_processor_id(); struct trap_per_cpu *p = &trap_block[cpu]; p->thread = t; p->pgd_paddr = 0; } extern void thread_info_offsets_are_bolixed_dave(void); extern void trap_per_cpu_offsets_are_bolixed_dave(void); extern void tsb_config_offsets_are_bolixed_dave(void); /* Only invoked on boot processor. */ void __init trap_init(void) { /* Compile time sanity check. */ BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) || TI_FLAGS != offsetof(struct thread_info, flags) || TI_CPU != offsetof(struct thread_info, cpu) || TI_FPSAVED != offsetof(struct thread_info, fpsaved) || TI_KSP != offsetof(struct thread_info, ksp) || TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) || TI_KREGS != offsetof(struct thread_info, kregs) || TI_UTRAPS != offsetof(struct thread_info, utraps) || TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) || TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) || TI_GSR != offsetof(struct thread_info, gsr) || TI_XFSR != offsetof(struct thread_info, xfsr) || TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || TI_NEW_CHILD != offsetof(struct thread_info, new_child) || TI_CURRENT_DS != offsetof(struct thread_info, current_ds) || TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) || TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) || TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) || TI_FPREGS != offsetof(struct thread_info, fpregs) || (TI_FPREGS & (64 - 1))); BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || (TRAP_PER_CPU_PGD_PADDR != offsetof(struct trap_per_cpu, pgd_paddr)) || (TRAP_PER_CPU_CPU_MONDO_PA != offsetof(struct trap_per_cpu, cpu_mondo_pa)) || (TRAP_PER_CPU_DEV_MONDO_PA != offsetof(struct trap_per_cpu, dev_mondo_pa)) || (TRAP_PER_CPU_RESUM_MONDO_PA != offsetof(struct trap_per_cpu, resum_mondo_pa)) || (TRAP_PER_CPU_RESUM_KBUF_PA != offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || (TRAP_PER_CPU_NONRESUM_MONDO_PA != offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || (TRAP_PER_CPU_NONRESUM_KBUF_PA != offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || (TRAP_PER_CPU_FAULT_INFO != offsetof(struct trap_per_cpu, fault_info)) || (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || (TRAP_PER_CPU_CPU_LIST_PA != offsetof(struct trap_per_cpu, cpu_list_pa)) || (TRAP_PER_CPU_TSB_HUGE != offsetof(struct trap_per_cpu, tsb_huge)) || (TRAP_PER_CPU_TSB_HUGE_TEMP != offsetof(struct trap_per_cpu, tsb_huge_temp)) || (TRAP_PER_CPU_IRQ_WORKLIST_PA != offsetof(struct trap_per_cpu, irq_worklist_pa)) || (TRAP_PER_CPU_CPU_MONDO_QMASK != offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || (TRAP_PER_CPU_DEV_MONDO_QMASK != offsetof(struct trap_per_cpu, dev_mondo_qmask)) || (TRAP_PER_CPU_RESUM_QMASK != offsetof(struct trap_per_cpu, resum_qmask)) || (TRAP_PER_CPU_NONRESUM_QMASK != offsetof(struct trap_per_cpu, nonresum_qmask)) || (TRAP_PER_CPU_PER_CPU_BASE != offsetof(struct trap_per_cpu, __per_cpu_base))); BUILD_BUG_ON((TSB_CONFIG_TSB != offsetof(struct tsb_config, tsb)) || (TSB_CONFIG_RSS_LIMIT != offsetof(struct tsb_config, tsb_rss_limit)) || (TSB_CONFIG_NENTRIES != offsetof(struct tsb_config, tsb_nentries)) || (TSB_CONFIG_REG_VAL != offsetof(struct tsb_config, tsb_reg_val)) || (TSB_CONFIG_MAP_VADDR != offsetof(struct tsb_config, tsb_map_vaddr)) || (TSB_CONFIG_MAP_PTE != offsetof(struct tsb_config, tsb_map_pte))); /* Attach to the address space of init_task. On SMP we * do this in smp.c:smp_callin for other cpus. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; }
gpl-2.0
garwynn/L900_MA7_Kernel
arch/powerpc/mm/pgtable.c
2582
6859
/* * This file contains common routines for dealing with free of page tables * Along with common page table handling code * * Derived from arch/powerpc/mm/tlb_64.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <engebret@us.ibm.com> * Rework for PPC64 port. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include "mmu_decl.h" static inline int is_exec_fault(void) { return current->thread.regs && TRAP(current->thread.regs) == 0x400; } /* We only try to do i/d cache coherency on stuff that looks like * reasonably "normal" PTEs. We currently require a PTE to be present * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE. We also only do that * on userspace PTEs */ static inline int pte_looks_normal(pte_t pte) { return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) == (_PAGE_PRESENT | _PAGE_USER); } struct page * maybe_pte_to_page(pte_t pte) { unsigned long pfn = pte_pfn(pte); struct page *page; if (unlikely(!pfn_valid(pfn))) return NULL; page = pfn_to_page(pfn); if (PageReserved(page)) return NULL; return page; } #if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 /* Server-style MMU handles coherency when hashing if HW exec permission * is supposed per page (currently 64-bit only). If not, then, we always * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec * support falls into the same category. */ static pte_t set_pte_filter(pte_t pte, unsigned long addr) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE))) { struct page *pg = maybe_pte_to_page(pte); if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { #ifdef CONFIG_8xx /* On 8xx, cache control instructions (particularly * "dcbst" from flush_dcache_icache) fault as write * operation if there is an unpopulated TLB entry * for the address in question. To workaround that, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ /* 8xx doesn't care about PID, size or ind args */ _tlbil_va(addr, 0, 0, 0); #endif /* CONFIG_8xx */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } } return pte; } static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, int dirty) { return pte; } #else /* defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 */ /* Embedded type MMU with HW exec support. This is a bit more complicated * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. */ static pte_t set_pte_filter(pte_t pte, unsigned long addr) { struct page *pg; /* No exec permission in the first place, move on */ if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte)) return pte; /* If you set _PAGE_EXEC on weird pages you're on your own */ pg = maybe_pte_to_page(pte); if (unlikely(!pg)) return pte; /* If the page clean, we move on */ if (test_bit(PG_arch_1, &pg->flags)) return pte; /* If it's an exec fault, we flush the cache and make it clean */ if (is_exec_fault()) { flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); return pte; } /* Else, we filter out _PAGE_EXEC */ return __pte(pte_val(pte) & ~_PAGE_EXEC); } static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, int dirty) { struct page *pg; /* So here, we only care about exec faults, as we use them * to recover lost _PAGE_EXEC and perform I$/D$ coherency * if necessary. Also if _PAGE_EXEC is already set, same deal, * we just bail out */ if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault()) return pte; #ifdef CONFIG_DEBUG_VM /* So this is an exec fault, _PAGE_EXEC is not set. If it was * an error we would have bailed out earlier in do_page_fault() * but let's make sure of it */ if (WARN_ON(!(vma->vm_flags & VM_EXEC))) return pte; #endif /* CONFIG_DEBUG_VM */ /* If you set _PAGE_EXEC on weird pages you're on your own */ pg = maybe_pte_to_page(pte); if (unlikely(!pg)) goto bail; /* If the page is already clean, we move on */ if (test_bit(PG_arch_1, &pg->flags)) goto bail; /* Clean the page and set PG_arch_1 */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); bail: return __pte(pte_val(pte) | _PAGE_EXEC); } #endif /* !(defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0) */ /* * set_pte stores a linux PTE into the linux page table. */ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { #ifdef CONFIG_DEBUG_VM WARN_ON(pte_present(*ptep)); #endif /* Note: mm->context.id might not yet have been assigned as * this context might not have been activated yet when this * is called. */ pte = set_pte_filter(pte, addr); /* Perform the setting of the PTE */ __set_pte_at(mm, addr, ptep, pte, 0); } /* * This is called when relaxing access to a PTE. It's also called in the page * fault path when we don't hit any of the major fault cases, ie, a minor * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have * handled those two for us, we additionally deal with missing execute * permission here on some processors */ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { int changed; entry = set_access_flags_filter(entry, vma, dirty); changed = !pte_same(*(ptep), entry); if (changed) { if (!(vma->vm_flags & VM_HUGETLB)) assert_pte_locked(vma->vm_mm, address); __ptep_set_access_flags(ptep, entry); flush_tlb_page_nohash(vma, address); } return changed; } #ifdef CONFIG_DEBUG_VM void assert_pte_locked(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; if (mm == &init_mm) return; pgd = mm->pgd + pgd_index(addr); BUG_ON(pgd_none(*pgd)); pud = pud_offset(pgd, addr); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, addr); BUG_ON(!pmd_present(*pmd)); assert_spin_locked(pte_lockptr(mm, pmd)); } #endif /* CONFIG_DEBUG_VM */
gpl-2.0
andip71/boeffla-kernel-samsung-s3
arch/blackfin/kernel/bfin_dma_5xx.c
2838
14121
/* * bfin_dma_5xx.c - Blackfin DMA implementation * * Copyright 2004-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/param.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <asm/blackfin.h> #include <asm/cacheflush.h> #include <asm/dma.h> #include <asm/uaccess.h> #include <asm/early_printk.h> /* * To make sure we work around 05000119 - we always check DMA_DONE bit, * never the DMA_RUN bit */ struct dma_channel dma_ch[MAX_DMA_CHANNELS]; EXPORT_SYMBOL(dma_ch); static int __init blackfin_dma_init(void) { int i; printk(KERN_INFO "Blackfin DMA Controller\n"); #if ANOMALY_05000480 bfin_write_DMAC_TC_PER(0x0111); #endif for (i = 0; i < MAX_DMA_CHANNELS; i++) { atomic_set(&dma_ch[i].chan_status, 0); dma_ch[i].regs = dma_io_base_addr[i]; } /* Mark MEMDMA Channel 0 as requested since we're using it internally */ request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy"); request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy"); #if defined(CONFIG_DEB_DMA_URGENT) bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE() | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT); #endif return 0; } arch_initcall(blackfin_dma_init); #ifdef CONFIG_PROC_FS static int proc_dma_show(struct seq_file *m, void *v) { int i; for (i = 0; i < MAX_DMA_CHANNELS; ++i) if (dma_channel_active(i)) seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id); return 0; } static int proc_dma_open(struct inode *inode, struct file *file) { return single_open(file, proc_dma_show, NULL); } static const struct file_operations proc_dma_operations = { .open = proc_dma_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_dma_init(void) { return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL; } late_initcall(proc_dma_init); #endif static void set_dma_peripheral_map(unsigned int channel, const char *device_id) { #ifdef CONFIG_BF54x unsigned int per_map; switch (channel) { case CH_UART2_RX: per_map = 0xC << 12; break; case CH_UART2_TX: per_map = 0xD << 12; break; case CH_UART3_RX: per_map = 0xE << 12; break; case CH_UART3_TX: per_map = 0xF << 12; break; default: return; } if (strncmp(device_id, "BFIN_UART", 9) == 0) dma_ch[channel].regs->peripheral_map = per_map; #endif } /** * request_dma - request a DMA channel * * Request the specific DMA channel from the system if it's available. */ int request_dma(unsigned int channel, const char *device_id) { pr_debug("request_dma() : BEGIN\n"); if (device_id == NULL) printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); #if defined(CONFIG_BF561) && ANOMALY_05000182 if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) { if (get_cclk() > 500000000) { printk(KERN_WARNING "Request IMDMA failed due to ANOMALY 05000182\n"); return -EFAULT; } } #endif if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) { pr_debug("DMA CHANNEL IN USE\n"); return -EBUSY; } set_dma_peripheral_map(channel, device_id); dma_ch[channel].device_id = device_id; dma_ch[channel].irq = 0; /* This is to be enabled by putting a restriction - * you have to request DMA, before doing any operations on * descriptor/channel */ pr_debug("request_dma() : END\n"); return 0; } EXPORT_SYMBOL(request_dma); int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data) { int ret; unsigned int irq; BUG_ON(channel >= MAX_DMA_CHANNELS || !callback || !atomic_read(&dma_ch[channel].chan_status)); irq = channel2irq(channel); ret = request_irq(irq, callback, 0, dma_ch[channel].device_id, data); if (ret) return ret; dma_ch[channel].irq = irq; dma_ch[channel].data = data; return 0; } EXPORT_SYMBOL(set_dma_callback); /** * clear_dma_buffer - clear DMA fifos for specified channel * * Set the Buffer Clear bit in the Configuration register of specific DMA * channel. This will stop the descriptor based DMA operation. */ static void clear_dma_buffer(unsigned int channel) { dma_ch[channel].regs->cfg |= RESTART; SSYNC(); dma_ch[channel].regs->cfg &= ~RESTART; } void free_dma(unsigned int channel) { pr_debug("freedma() : BEGIN\n"); BUG_ON(channel >= MAX_DMA_CHANNELS || !atomic_read(&dma_ch[channel].chan_status)); /* Halt the DMA */ disable_dma(channel); clear_dma_buffer(channel); if (dma_ch[channel].irq) free_irq(dma_ch[channel].irq, dma_ch[channel].data); /* Clear the DMA Variable in the Channel */ atomic_set(&dma_ch[channel].chan_status, 0); pr_debug("freedma() : END\n"); } EXPORT_SYMBOL(free_dma); #ifdef CONFIG_PM # ifndef MAX_DMA_SUSPEND_CHANNELS # define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS # endif int blackfin_dma_suspend(void) { int i; for (i = 0; i < MAX_DMA_CHANNELS; ++i) { if (dma_ch[i].regs->cfg & DMAEN) { printk(KERN_ERR "DMA Channel %d failed to suspend\n", i); return -EBUSY; } if (i < MAX_DMA_SUSPEND_CHANNELS) dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map; } return 0; } void blackfin_dma_resume(void) { int i; for (i = 0; i < MAX_DMA_CHANNELS; ++i) { dma_ch[i].regs->cfg = 0; if (i < MAX_DMA_SUSPEND_CHANNELS) dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map; } } #endif /** * blackfin_dma_early_init - minimal DMA init * * Setup a few DMA registers so we can safely do DMA transfers early on in * the kernel booting process. Really this just means using dma_memcpy(). */ void __init blackfin_dma_early_init(void) { early_shadow_stamp(); bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_S1_CONFIG(0); } void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size) { unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; struct dma_register *dst_ch, *src_ch; early_shadow_stamp(); /* We assume that everything is 4 byte aligned, so include * a basic sanity check */ BUG_ON(dst % 4); BUG_ON(src % 4); BUG_ON(size % 4); src_ch = 0; /* Find an avalible memDMA channel */ while (1) { if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) { dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR; } else { dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR; } if (!bfin_read16(&src_ch->cfg)) break; else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) { bfin_write16(&src_ch->cfg, 0); break; } } /* Force a sync in case a previous config reset on this channel * occurred. This is needed so subsequent writes to DMA registers * are not spuriously lost/corrupted. */ __builtin_bfin_ssync(); /* Destination */ bfin_write32(&dst_ch->start_addr, dst); bfin_write16(&dst_ch->x_count, size >> 2); bfin_write16(&dst_ch->x_modify, 1 << 2); bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR); /* Source */ bfin_write32(&src_ch->start_addr, src); bfin_write16(&src_ch->x_count, size >> 2); bfin_write16(&src_ch->x_modify, 1 << 2); bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR); /* Enable */ bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32); bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32); /* Since we are atomic now, don't use the workaround ssync */ __builtin_bfin_ssync(); } void __init early_dma_memcpy_done(void) { early_shadow_stamp(); while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) || (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE))) continue; bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR); /* * Now that DMA is done, we would normally flush cache, but * i/d cache isn't running this early, so we don't bother, * and just clear out the DMA channel for next time */ bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_S1_CONFIG(0); bfin_write_MDMA_D0_CONFIG(0); bfin_write_MDMA_D1_CONFIG(0); __builtin_bfin_ssync(); } /** * __dma_memcpy - program the MDMA registers * * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs * while programming registers so that everything is fully configured. Wait * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE * check will make sure we don't clobber any existing transfer. */ static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf) { static DEFINE_SPINLOCK(mdma_lock); unsigned long flags; spin_lock_irqsave(&mdma_lock, flags); /* Force a sync in case a previous config reset on this channel * occurred. This is needed so subsequent writes to DMA registers * are not spuriously lost/corrupted. Do it under irq lock and * without the anomaly version (because we are atomic already). */ __builtin_bfin_ssync(); if (bfin_read_MDMA_S0_CONFIG()) while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) continue; if (conf & DMA2D) { /* For larger bit sizes, we've already divided down cnt so it * is no longer a multiple of 64k. So we have to break down * the limit here so it is a multiple of the incoming size. * There is no limitation here in terms of total size other * than the hardware though as the bits lost in the shift are * made up by MODIFY (== we can hit the whole address space). * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4 */ u32 shift = abs(dmod) >> 1; size_t ycnt = cnt >> (16 - shift); cnt = 1 << (16 - shift); bfin_write_MDMA_D0_Y_COUNT(ycnt); bfin_write_MDMA_S0_Y_COUNT(ycnt); bfin_write_MDMA_D0_Y_MODIFY(dmod); bfin_write_MDMA_S0_Y_MODIFY(smod); } bfin_write_MDMA_D0_START_ADDR(daddr); bfin_write_MDMA_D0_X_COUNT(cnt); bfin_write_MDMA_D0_X_MODIFY(dmod); bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_S0_START_ADDR(saddr); bfin_write_MDMA_S0_X_COUNT(cnt); bfin_write_MDMA_S0_X_MODIFY(smod); bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_S0_CONFIG(DMAEN | conf); bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf); spin_unlock_irqrestore(&mdma_lock, flags); SSYNC(); while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) if (bfin_read_MDMA_S0_CONFIG()) continue; else return; bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_D0_CONFIG(0); } /** * _dma_memcpy - translate C memcpy settings into MDMA settings * * Handle all the high level steps before we touch the MDMA registers. So * handle direction, tweaking of sizes, and formatting of addresses. */ static void *_dma_memcpy(void *pdst, const void *psrc, size_t size) { u32 conf, shift; s16 mod; unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; if (size == 0) return NULL; if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) { conf = WDSIZE_32; shift = 2; } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) { conf = WDSIZE_16; shift = 1; } else { conf = WDSIZE_8; shift = 0; } /* If the two memory regions have a chance of overlapping, make * sure the memcpy still works as expected. Do this by having the * copy run backwards instead. */ mod = 1 << shift; if (src < dst) { mod *= -1; dst += size + mod; src += size + mod; } size >>= shift; if (size > 0x10000) conf |= DMA2D; __dma_memcpy(dst, mod, src, mod, size, conf); return pdst; } /** * dma_memcpy - DMA memcpy under mutex lock * * Do not check arguments before starting the DMA memcpy. Break the transfer * up into two pieces. The first transfer is in multiples of 64k and the * second transfer is the piece smaller than 64k. */ void *dma_memcpy(void *pdst, const void *psrc, size_t size) { unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; if (bfin_addr_dcacheable(src)) blackfin_dcache_flush_range(src, src + size); if (bfin_addr_dcacheable(dst)) blackfin_dcache_invalidate_range(dst, dst + size); return dma_memcpy_nocache(pdst, psrc, size); } EXPORT_SYMBOL(dma_memcpy); /** * dma_memcpy_nocache - DMA memcpy under mutex lock * - No cache flush/invalidate * * Do not check arguments before starting the DMA memcpy. Break the transfer * up into two pieces. The first transfer is in multiples of 64k and the * second transfer is the piece smaller than 64k. */ void *dma_memcpy_nocache(void *pdst, const void *psrc, size_t size) { size_t bulk, rest; bulk = size & ~0xffff; rest = size - bulk; if (bulk) _dma_memcpy(pdst, psrc, bulk); _dma_memcpy(pdst + bulk, psrc + bulk, rest); return pdst; } EXPORT_SYMBOL(dma_memcpy_nocache); /** * safe_dma_memcpy - DMA memcpy w/argument checking * * Verify arguments are safe before heading to dma_memcpy(). */ void *safe_dma_memcpy(void *dst, const void *src, size_t size) { if (!access_ok(VERIFY_WRITE, dst, size)) return NULL; if (!access_ok(VERIFY_READ, src, size)) return NULL; return dma_memcpy(dst, src, size); } EXPORT_SYMBOL(safe_dma_memcpy); static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len, u16 size, u16 dma_size) { blackfin_dcache_flush_range(buf, buf + len * size); __dma_memcpy(addr, 0, buf, size, len, dma_size); } static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len, u16 size, u16 dma_size) { blackfin_dcache_invalidate_range(buf, buf + len * size); __dma_memcpy(buf, size, addr, 0, len, dma_size); } #define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \ void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \ { \ _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \ } \ EXPORT_SYMBOL(dma_##io##s##bwl) MAKE_DMA_IO(out, b, 1, 8, const); MAKE_DMA_IO(in, b, 1, 8, ); MAKE_DMA_IO(out, w, 2, 16, const); MAKE_DMA_IO(in, w, 2, 16, ); MAKE_DMA_IO(out, l, 4, 32, const); MAKE_DMA_IO(in, l, 4, 32, );
gpl-2.0
simone201/neak-gs3-jb
drivers/ata/pata_rb532_cf.c
3094
5560
/* * A low-level PATA driver to handle a Compact Flash connected on the * Mikrotik's RouterBoard 532 board. * * Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org> * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org> * * This file was based on: drivers/ata/pata_ixp4xx_cf.c * Copyright (C) 2006-07 Tower Technologies * Author: Alessandro Zummo <a.zummo@towertech.it> * * Also was based on the driver for Linux 2.4.xx published by Mikrotik for * their RouterBoard 1xx and 5xx series devices. The original Mikrotik code * seems not to have a license. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/libata.h> #include <scsi/scsi_host.h> #include <asm/gpio.h> #define DRV_NAME "pata-rb532-cf" #define DRV_VERSION "0.1.0" #define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" #define RB500_CF_MAXPORTS 1 #define RB500_CF_IO_DELAY 400 #define RB500_CF_REG_BASE 0x0800 #define RB500_CF_REG_ERR 0x080D #define RB500_CF_REG_CTRL 0x080E /* 32bit buffered data register offset */ #define RB500_CF_REG_DBUF32 0x0C00 struct rb532_cf_info { void __iomem *iobase; unsigned int gpio_line; unsigned int irq; }; /* ------------------------------------------------------------------------ */ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance) { struct ata_host *ah = dev_instance; struct rb532_cf_info *info = ah->private_data; if (gpio_get_value(info->gpio_line)) { irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); ata_sff_interrupt(info->irq, dev_instance); } else { irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); } return IRQ_HANDLED; } static struct ata_port_operations rb532_pata_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer32, }; /* ------------------------------------------------------------------------ */ static struct scsi_host_template rb532_pata_sht = { ATA_PIO_SHT(DRV_NAME), }; /* ------------------------------------------------------------------------ */ static void rb532_pata_setup_ports(struct ata_host *ah) { struct rb532_cf_info *info = ah->private_data; struct ata_port *ap; ap = ah->ports[0]; ap->ops = &rb532_pata_port_ops; ap->pio_mask = ATA_PIO4; ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE; ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL; ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL; ata_sff_std_ports(&ap->ioaddr); ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DBUF32; ap->ioaddr.error_addr = info->iobase + RB500_CF_REG_ERR; } static __devinit int rb532_pata_driver_probe(struct platform_device *pdev) { int irq; int gpio; struct resource *res; struct ata_host *ah; struct rb532_cf_info *info; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no IOMEM resource found\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(&pdev->dev, "no IRQ resource found\n"); return -ENOENT; } gpio = irq_to_gpio(irq); if (gpio < 0) { dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq); return -ENOENT; } ret = gpio_request(gpio, DRV_NAME); if (ret) { dev_err(&pdev->dev, "GPIO request failed\n"); return ret; } /* allocate host */ ah = ata_host_alloc(&pdev->dev, RB500_CF_MAXPORTS); if (!ah) return -ENOMEM; platform_set_drvdata(pdev, ah); info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; ah->private_data = info; info->gpio_line = gpio; info->irq = irq; info->iobase = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!info->iobase) return -ENOMEM; ret = gpio_direction_input(gpio); if (ret) { dev_err(&pdev->dev, "unable to set GPIO direction, err=%d\n", ret); goto err_free_gpio; } rb532_pata_setup_ports(ah); ret = ata_host_activate(ah, irq, rb532_pata_irq_handler, IRQF_TRIGGER_LOW, &rb532_pata_sht); if (ret) goto err_free_gpio; return 0; err_free_gpio: gpio_free(gpio); return ret; } static __devexit int rb532_pata_driver_remove(struct platform_device *pdev) { struct ata_host *ah = platform_get_drvdata(pdev); struct rb532_cf_info *info = ah->private_data; ata_host_detach(ah); gpio_free(info->gpio_line); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:" DRV_NAME); static struct platform_driver rb532_pata_platform_driver = { .probe = rb532_pata_driver_probe, .remove = __devexit_p(rb532_pata_driver_remove), .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; /* ------------------------------------------------------------------------ */ #define DRV_INFO DRV_DESC " version " DRV_VERSION static int __init rb532_pata_module_init(void) { printk(KERN_INFO DRV_INFO "\n"); return platform_driver_register(&rb532_pata_platform_driver); } static void __exit rb532_pata_module_exit(void) { platform_driver_unregister(&rb532_pata_platform_driver); } MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>"); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_DESCRIPTION(DRV_DESC); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); module_init(rb532_pata_module_init); module_exit(rb532_pata_module_exit);
gpl-2.0
kprkpr/kernel-e400
drivers/sh/intc/userimask.c
3094
2104
/* * Support for hardware-assisted userspace interrupt masking. * * Copyright (C) 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #define pr_fmt(fmt) "intc: " fmt #include <linux/errno.h> #include <linux/sysdev.h> #include <linux/init.h> #include <linux/io.h> #include <asm/sizes.h> #include "internals.h" static void __iomem *uimask; static ssize_t show_intc_userimask(struct sysdev_class *cls, struct sysdev_class_attribute *attr, char *buf) { return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf); } static ssize_t store_intc_userimask(struct sysdev_class *cls, struct sysdev_class_attribute *attr, const char *buf, size_t count) { unsigned long level; level = simple_strtoul(buf, NULL, 10); /* * Minimal acceptable IRQ levels are in the 2 - 16 range, but * these are chomped so as to not interfere with normal IRQs. * * Level 1 is a special case on some CPUs in that it's not * directly settable, but given that USERIMASK cuts off below a * certain level, we don't care about this limitation here. * Level 0 on the other hand equates to user masking disabled. * * We use the default priority level as a cut off so that only * special case opt-in IRQs can be mangled. */ if (level >= intc_get_dfl_prio_level()) return -EINVAL; __raw_writel(0xa5 << 24 | level << 4, uimask); return count; } static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR, show_intc_userimask, store_intc_userimask); static int __init userimask_sysdev_init(void) { if (unlikely(!uimask)) return -ENXIO; return sysdev_class_create_file(&intc_sysdev_class, &attr_userimask); } late_initcall(userimask_sysdev_init); int register_intc_userimask(unsigned long addr) { if (unlikely(uimask)) return -EBUSY; uimask = ioremap_nocache(addr, SZ_4K); if (unlikely(!uimask)) return -ENOMEM; pr_info("userimask support registered for levels 0 -> %d\n", intc_get_dfl_prio_level() - 1); return 0; }
gpl-2.0
Metallice/GTab2-Kernel-TW
drivers/i2c/busses/i2c-hydra.c
4118
4379
/* i2c Support for the Apple `Hydra' Mac I/O Copyright (c) 1999-2004 Geert Uytterhoeven <geert@linux-m68k.org> Based on i2c Support for Via Technologies 82C586B South Bridge Copyright (c) 1998, 1999 Kyösti Mälkki <kmalkki@cc.hut.fi> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/init.h> #include <linux/io.h> #include <asm/hydra.h> #define HYDRA_CPD_PD0 0x00000001 /* CachePD lines */ #define HYDRA_CPD_PD1 0x00000002 #define HYDRA_CPD_PD2 0x00000004 #define HYDRA_CPD_PD3 0x00000008 #define HYDRA_SCLK HYDRA_CPD_PD0 #define HYDRA_SDAT HYDRA_CPD_PD1 #define HYDRA_SCLK_OE 0x00000010 #define HYDRA_SDAT_OE 0x00000020 static inline void pdregw(void *data, u32 val) { struct Hydra *hydra = (struct Hydra *)data; writel(val, &hydra->CachePD); } static inline u32 pdregr(void *data) { struct Hydra *hydra = (struct Hydra *)data; return readl(&hydra->CachePD); } static void hydra_bit_setscl(void *data, int state) { u32 val = pdregr(data); if (state) val &= ~HYDRA_SCLK_OE; else { val &= ~HYDRA_SCLK; val |= HYDRA_SCLK_OE; } pdregw(data, val); } static void hydra_bit_setsda(void *data, int state) { u32 val = pdregr(data); if (state) val &= ~HYDRA_SDAT_OE; else { val &= ~HYDRA_SDAT; val |= HYDRA_SDAT_OE; } pdregw(data, val); } static int hydra_bit_getscl(void *data) { return (pdregr(data) & HYDRA_SCLK) != 0; } static int hydra_bit_getsda(void *data) { return (pdregr(data) & HYDRA_SDAT) != 0; } /* ------------------------------------------------------------------------ */ static struct i2c_algo_bit_data hydra_bit_data = { .setsda = hydra_bit_setsda, .setscl = hydra_bit_setscl, .getsda = hydra_bit_getsda, .getscl = hydra_bit_getscl, .udelay = 5, .timeout = HZ }; static struct i2c_adapter hydra_adap = { .owner = THIS_MODULE, .name = "Hydra i2c", .algo_data = &hydra_bit_data, }; static const struct pci_device_id hydra_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) }, { 0, } }; MODULE_DEVICE_TABLE (pci, hydra_ids); static int __devinit hydra_probe(struct pci_dev *dev, const struct pci_device_id *id) { unsigned long base = pci_resource_start(dev, 0); int res; if (!request_mem_region(base+offsetof(struct Hydra, CachePD), 4, hydra_adap.name)) return -EBUSY; hydra_bit_data.data = pci_ioremap_bar(dev, 0); if (hydra_bit_data.data == NULL) { release_mem_region(base+offsetof(struct Hydra, CachePD), 4); return -ENODEV; } pdregw(hydra_bit_data.data, 0); /* clear SCLK_OE and SDAT_OE */ hydra_adap.dev.parent = &dev->dev; res = i2c_bit_add_bus(&hydra_adap); if (res < 0) { iounmap(hydra_bit_data.data); release_mem_region(base+offsetof(struct Hydra, CachePD), 4); return res; } return 0; } static void __devexit hydra_remove(struct pci_dev *dev) { pdregw(hydra_bit_data.data, 0); /* clear SCLK_OE and SDAT_OE */ i2c_del_adapter(&hydra_adap); iounmap(hydra_bit_data.data); release_mem_region(pci_resource_start(dev, 0)+ offsetof(struct Hydra, CachePD), 4); } static struct pci_driver hydra_driver = { .name = "hydra_smbus", .id_table = hydra_ids, .probe = hydra_probe, .remove = __devexit_p(hydra_remove), }; static int __init i2c_hydra_init(void) { return pci_register_driver(&hydra_driver); } static void __exit i2c_hydra_exit(void) { pci_unregister_driver(&hydra_driver); } MODULE_AUTHOR("Geert Uytterhoeven <geert@linux-m68k.org>"); MODULE_DESCRIPTION("i2c for Apple Hydra Mac I/O"); MODULE_LICENSE("GPL"); module_init(i2c_hydra_init); module_exit(i2c_hydra_exit);
gpl-2.0
eaglerazor/android_kernel_samsung_apollo
drivers/i2c/busses/i2c-powermac.c
4886
8630
/* i2c Support for Apple SMU Controller Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp. <benh@kernel.crashing.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <asm/prom.h> #include <asm/pmac_low_i2c.h> MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("I2C driver for Apple PowerMac"); MODULE_LICENSE("GPL"); /* * SMBUS-type transfer entrypoint */ static s32 i2c_powermac_smbus_xfer( struct i2c_adapter* adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data* data) { struct pmac_i2c_bus *bus = i2c_get_adapdata(adap); int rc = 0; int read = (read_write == I2C_SMBUS_READ); int addrdir = (addr << 1) | read; int mode, subsize, len; u32 subaddr; u8 *buf; u8 local[2]; if (size == I2C_SMBUS_QUICK || size == I2C_SMBUS_BYTE) { mode = pmac_i2c_mode_std; subsize = 0; subaddr = 0; } else { mode = read ? pmac_i2c_mode_combined : pmac_i2c_mode_stdsub; subsize = 1; subaddr = command; } switch (size) { case I2C_SMBUS_QUICK: buf = NULL; len = 0; break; case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: buf = &data->byte; len = 1; break; case I2C_SMBUS_WORD_DATA: if (!read) { local[0] = data->word & 0xff; local[1] = (data->word >> 8) & 0xff; } buf = local; len = 2; break; /* Note that these are broken vs. the expected smbus API where * on reads, the length is actually returned from the function, * but I think the current API makes no sense and I don't want * any driver that I haven't verified for correctness to go * anywhere near a pmac i2c bus anyway ... * * I'm also not completely sure what kind of phases to do between * the actual command and the data (what I am _supposed_ to do that * is). For now, I assume writes are a single stream and reads have * a repeat start/addr phase (but not stop in between) */ case I2C_SMBUS_BLOCK_DATA: buf = data->block; len = data->block[0] + 1; break; case I2C_SMBUS_I2C_BLOCK_DATA: buf = &data->block[1]; len = data->block[0]; break; default: return -EINVAL; } rc = pmac_i2c_open(bus, 0); if (rc) { dev_err(&adap->dev, "Failed to open I2C, err %d\n", rc); return rc; } rc = pmac_i2c_setmode(bus, mode); if (rc) { dev_err(&adap->dev, "Failed to set I2C mode %d, err %d\n", mode, rc); goto bail; } rc = pmac_i2c_xfer(bus, addrdir, subsize, subaddr, buf, len); if (rc) { if (rc == -ENXIO) dev_dbg(&adap->dev, "I2C transfer at 0x%02x failed, size %d, " "err %d\n", addrdir >> 1, size, rc); else dev_err(&adap->dev, "I2C transfer at 0x%02x failed, size %d, " "err %d\n", addrdir >> 1, size, rc); goto bail; } if (size == I2C_SMBUS_WORD_DATA && read) { data->word = ((u16)local[1]) << 8; data->word |= local[0]; } bail: pmac_i2c_close(bus); return rc; } /* * Generic i2c master transfer entrypoint. This driver only support single * messages (for "lame i2c" transfers). Anything else should use the smbus * entry point */ static int i2c_powermac_master_xfer( struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct pmac_i2c_bus *bus = i2c_get_adapdata(adap); int rc = 0; int read; int addrdir; if (num != 1) { dev_err(&adap->dev, "Multi-message I2C transactions not supported\n"); return -EOPNOTSUPP; } if (msgs->flags & I2C_M_TEN) return -EINVAL; read = (msgs->flags & I2C_M_RD) != 0; addrdir = (msgs->addr << 1) | read; rc = pmac_i2c_open(bus, 0); if (rc) { dev_err(&adap->dev, "Failed to open I2C, err %d\n", rc); return rc; } rc = pmac_i2c_setmode(bus, pmac_i2c_mode_std); if (rc) { dev_err(&adap->dev, "Failed to set I2C mode %d, err %d\n", pmac_i2c_mode_std, rc); goto bail; } rc = pmac_i2c_xfer(bus, addrdir, 0, 0, msgs->buf, msgs->len); if (rc < 0) { if (rc == -ENXIO) dev_dbg(&adap->dev, "I2C %s 0x%02x failed, err %d\n", addrdir & 1 ? "read from" : "write to", addrdir >> 1, rc); else dev_err(&adap->dev, "I2C %s 0x%02x failed, err %d\n", addrdir & 1 ? "read from" : "write to", addrdir >> 1, rc); } bail: pmac_i2c_close(bus); return rc < 0 ? rc : 1; } static u32 i2c_powermac_func(struct i2c_adapter * adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_I2C; } /* For now, we only handle smbus */ static const struct i2c_algorithm i2c_powermac_algorithm = { .smbus_xfer = i2c_powermac_smbus_xfer, .master_xfer = i2c_powermac_master_xfer, .functionality = i2c_powermac_func, }; static int __devexit i2c_powermac_remove(struct platform_device *dev) { struct i2c_adapter *adapter = platform_get_drvdata(dev); int rc; rc = i2c_del_adapter(adapter); /* We aren't that prepared to deal with this... */ if (rc) printk(KERN_WARNING "i2c-powermac.c: Failed to remove bus %s !\n", adapter->name); platform_set_drvdata(dev, NULL); memset(adapter, 0, sizeof(*adapter)); return 0; } static int __devinit i2c_powermac_probe(struct platform_device *dev) { struct pmac_i2c_bus *bus = dev->dev.platform_data; struct device_node *parent = NULL; struct i2c_adapter *adapter; const char *basename; int rc; if (bus == NULL) return -EINVAL; adapter = pmac_i2c_get_adapter(bus); /* Ok, now we need to make up a name for the interface that will * match what we used to do in the past, that is basically the * controller's parent device node for keywest. PMU didn't have a * naming convention and SMU has a different one */ switch(pmac_i2c_get_type(bus)) { case pmac_i2c_bus_keywest: parent = of_get_parent(pmac_i2c_get_controller(bus)); if (parent == NULL) return -EINVAL; basename = parent->name; break; case pmac_i2c_bus_pmu: basename = "pmu"; break; case pmac_i2c_bus_smu: /* This is not what we used to do but I'm fixing drivers at * the same time as this change */ basename = "smu"; break; default: return -EINVAL; } snprintf(adapter->name, sizeof(adapter->name), "%s %d", basename, pmac_i2c_get_channel(bus)); of_node_put(parent); platform_set_drvdata(dev, adapter); adapter->algo = &i2c_powermac_algorithm; i2c_set_adapdata(adapter, bus); adapter->dev.parent = &dev->dev; rc = i2c_add_adapter(adapter); if (rc) { printk(KERN_ERR "i2c-powermac: Adapter %s registration " "failed\n", adapter->name); memset(adapter, 0, sizeof(*adapter)); } printk(KERN_INFO "PowerMac i2c bus %s registered\n", adapter->name); if (!strncmp(basename, "uni-n", 5)) { struct device_node *np; const u32 *prop; struct i2c_board_info info; /* Instantiate I2C motion sensor if present */ np = of_find_node_by_name(NULL, "accelerometer"); if (np && of_device_is_compatible(np, "AAPL,accelerometer_1") && (prop = of_get_property(np, "reg", NULL))) { int i2c_bus; const char *tmp_bus; /* look for bus either using "reg" or by path */ tmp_bus = strstr(np->full_name, "/i2c-bus@"); if (tmp_bus) i2c_bus = *(tmp_bus + 9) - '0'; else i2c_bus = ((*prop) >> 8) & 0x0f; if (pmac_i2c_get_channel(bus) == i2c_bus) { memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = ((*prop) & 0xff) >> 1; strlcpy(info.type, "ams", I2C_NAME_SIZE); i2c_new_device(adapter, &info); } } } return rc; } static struct platform_driver i2c_powermac_driver = { .probe = i2c_powermac_probe, .remove = __devexit_p(i2c_powermac_remove), .driver = { .name = "i2c-powermac", .bus = &platform_bus_type, }, }; module_platform_driver(i2c_powermac_driver); MODULE_ALIAS("platform:i2c-powermac");
gpl-2.0
schqiushui/kernel_kk442_sense_dlx
drivers/mtd/devices/docecc.c
5398
15902
/* * ECC algorithm for M-systems disk on chip. We use the excellent Reed * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the * GNU GPL License. The rest is simply to convert the disk on chip * syndrome into a standard syndome. * * Author: Fabrice Bellard (fabrice.bellard@netgem.com) * Copyright (C) 2000 Netgem S.A. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <asm/errno.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/doc2000.h> #define DEBUG_ECC 0 /* need to undef it (from asm/termbits.h) */ #undef B0 #define MM 10 /* Symbol size in bits */ #define KK (1023-4) /* Number of data symbols per block */ #define B0 510 /* First root of generator polynomial, alpha form */ #define PRIM 1 /* power of alpha used to generate roots of generator poly */ #define NN ((1 << MM) - 1) typedef unsigned short dtype; /* 1+x^3+x^10 */ static const int Pp[MM+1] = { 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1 }; /* This defines the type used to store an element of the Galois Field * used by the code. Make sure this is something larger than a char if * if anything larger than GF(256) is used. * * Note: unsigned char will work up to GF(256) but int seems to run * faster on the Pentium. */ typedef int gf; /* No legal value in index form represents zero, so * we need a special value for this purpose */ #define A0 (NN) /* Compute x % NN, where NN is 2**MM - 1, * without a slow divide */ static inline gf modnn(int x) { while (x >= NN) { x -= NN; x = (x >> MM) + (x & NN); } return x; } #define CLEAR(a,n) {\ int ci;\ for(ci=(n)-1;ci >=0;ci--)\ (a)[ci] = 0;\ } #define COPY(a,b,n) {\ int ci;\ for(ci=(n)-1;ci >=0;ci--)\ (a)[ci] = (b)[ci];\ } #define COPYDOWN(a,b,n) {\ int ci;\ for(ci=(n)-1;ci >=0;ci--)\ (a)[ci] = (b)[ci];\ } #define Ldec 1 /* generate GF(2**m) from the irreducible polynomial p(X) in Pp[0]..Pp[m] lookup tables: index->polynomial form alpha_to[] contains j=alpha**i; polynomial form -> index form index_of[j=alpha**i] = i alpha=2 is the primitive element of GF(2**m) HARI's COMMENT: (4/13/94) alpha_to[] can be used as follows: Let @ represent the primitive element commonly called "alpha" that is the root of the primitive polynomial p(x). Then in GF(2^m), for any 0 <= i <= 2^m-2, @^i = a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1) where the binary vector (a(0),a(1),a(2),...,a(m-1)) is the representation of the integer "alpha_to[i]" with a(0) being the LSB and a(m-1) the MSB. Thus for example the polynomial representation of @^5 would be given by the binary representation of the integer "alpha_to[5]". Similarly, index_of[] can be used as follows: As above, let @ represent the primitive element of GF(2^m) that is the root of the primitive polynomial p(x). In order to find the power of @ (alpha) that has the polynomial representation a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1) we consider the integer "i" whose binary representation with a(0) being LSB and a(m-1) MSB is (a(0),a(1),...,a(m-1)) and locate the entry "index_of[i]". Now, @^index_of[i] is that element whose polynomial representation is (a(0),a(1),a(2),...,a(m-1)). NOTE: The element alpha_to[2^m-1] = 0 always signifying that the representation of "@^infinity" = 0 is (0,0,0,...,0). Similarly, the element index_of[0] = A0 always signifying that the power of alpha which has the polynomial representation (0,0,...,0) is "infinity". */ static void generate_gf(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1]) { register int i, mask; mask = 1; Alpha_to[MM] = 0; for (i = 0; i < MM; i++) { Alpha_to[i] = mask; Index_of[Alpha_to[i]] = i; /* If Pp[i] == 1 then, term @^i occurs in poly-repr of @^MM */ if (Pp[i] != 0) Alpha_to[MM] ^= mask; /* Bit-wise EXOR operation */ mask <<= 1; /* single left-shift */ } Index_of[Alpha_to[MM]] = MM; /* * Have obtained poly-repr of @^MM. Poly-repr of @^(i+1) is given by * poly-repr of @^i shifted left one-bit and accounting for any @^MM * term that may occur when poly-repr of @^i is shifted. */ mask >>= 1; for (i = MM + 1; i < NN; i++) { if (Alpha_to[i - 1] >= mask) Alpha_to[i] = Alpha_to[MM] ^ ((Alpha_to[i - 1] ^ mask) << 1); else Alpha_to[i] = Alpha_to[i - 1] << 1; Index_of[Alpha_to[i]] = i; } Index_of[0] = A0; Alpha_to[NN] = 0; } /* * Performs ERRORS+ERASURES decoding of RS codes. bb[] is the content * of the feedback shift register after having processed the data and * the ECC. * * Return number of symbols corrected, or -1 if codeword is illegal * or uncorrectable. If eras_pos is non-null, the detected error locations * are written back. NOTE! This array must be at least NN-KK elements long. * The corrected data are written in eras_val[]. They must be xor with the data * to retrieve the correct data : data[erase_pos[i]] ^= erase_val[i] . * * First "no_eras" erasures are declared by the calling program. Then, the * maximum # of errors correctable is t_after_eras = floor((NN-KK-no_eras)/2). * If the number of channel errors is not greater than "t_after_eras" the * transmitted codeword will be recovered. Details of algorithm can be found * in R. Blahut's "Theory ... of Error-Correcting Codes". * Warning: the eras_pos[] array must not contain duplicate entries; decoder failure * will result. The decoder *could* check for this condition, but it would involve * extra time on every decoding operation. * */ static int eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1], gf bb[NN - KK + 1], gf eras_val[NN-KK], int eras_pos[NN-KK], int no_eras) { int deg_lambda, el, deg_omega; int i, j, r,k; gf u,q,tmp,num1,num2,den,discr_r; gf lambda[NN-KK + 1], s[NN-KK + 1]; /* Err+Eras Locator poly * and syndrome poly */ gf b[NN-KK + 1], t[NN-KK + 1], omega[NN-KK + 1]; gf root[NN-KK], reg[NN-KK + 1], loc[NN-KK]; int syn_error, count; syn_error = 0; for(i=0;i<NN-KK;i++) syn_error |= bb[i]; if (!syn_error) { /* if remainder is zero, data[] is a codeword and there are no * errors to correct. So return data[] unmodified */ count = 0; goto finish; } for(i=1;i<=NN-KK;i++){ s[i] = bb[0]; } for(j=1;j<NN-KK;j++){ if(bb[j] == 0) continue; tmp = Index_of[bb[j]]; for(i=1;i<=NN-KK;i++) s[i] ^= Alpha_to[modnn(tmp + (B0+i-1)*PRIM*j)]; } /* undo the feedback register implicit multiplication and convert syndromes to index form */ for(i=1;i<=NN-KK;i++) { tmp = Index_of[s[i]]; if (tmp != A0) tmp = modnn(tmp + 2 * KK * (B0+i-1)*PRIM); s[i] = tmp; } CLEAR(&lambda[1],NN-KK); lambda[0] = 1; if (no_eras > 0) { /* Init lambda to be the erasure locator polynomial */ lambda[1] = Alpha_to[modnn(PRIM * eras_pos[0])]; for (i = 1; i < no_eras; i++) { u = modnn(PRIM*eras_pos[i]); for (j = i+1; j > 0; j--) { tmp = Index_of[lambda[j - 1]]; if(tmp != A0) lambda[j] ^= Alpha_to[modnn(u + tmp)]; } } #if DEBUG_ECC >= 1 /* Test code that verifies the erasure locator polynomial just constructed Needed only for decoder debugging. */ /* find roots of the erasure location polynomial */ for(i=1;i<=no_eras;i++) reg[i] = Index_of[lambda[i]]; count = 0; for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) { q = 1; for (j = 1; j <= no_eras; j++) if (reg[j] != A0) { reg[j] = modnn(reg[j] + j); q ^= Alpha_to[reg[j]]; } if (q != 0) continue; /* store root and error location number indices */ root[count] = i; loc[count] = k; count++; } if (count != no_eras) { printf("\n lambda(x) is WRONG\n"); count = -1; goto finish; } #if DEBUG_ECC >= 2 printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n"); for (i = 0; i < count; i++) printf("%d ", loc[i]); printf("\n"); #endif #endif } for(i=0;i<NN-KK+1;i++) b[i] = Index_of[lambda[i]]; /* * Begin Berlekamp-Massey algorithm to determine error+erasure * locator polynomial */ r = no_eras; el = no_eras; while (++r <= NN-KK) { /* r is the step number */ /* Compute discrepancy at the r-th step in poly-form */ discr_r = 0; for (i = 0; i < r; i++){ if ((lambda[i] != 0) && (s[r - i] != A0)) { discr_r ^= Alpha_to[modnn(Index_of[lambda[i]] + s[r - i])]; } } discr_r = Index_of[discr_r]; /* Index form */ if (discr_r == A0) { /* 2 lines below: B(x) <-- x*B(x) */ COPYDOWN(&b[1],b,NN-KK); b[0] = A0; } else { /* 7 lines below: T(x) <-- lambda(x) - discr_r*x*b(x) */ t[0] = lambda[0]; for (i = 0 ; i < NN-KK; i++) { if(b[i] != A0) t[i+1] = lambda[i+1] ^ Alpha_to[modnn(discr_r + b[i])]; else t[i+1] = lambda[i+1]; } if (2 * el <= r + no_eras - 1) { el = r + no_eras - el; /* * 2 lines below: B(x) <-- inv(discr_r) * * lambda(x) */ for (i = 0; i <= NN-KK; i++) b[i] = (lambda[i] == 0) ? A0 : modnn(Index_of[lambda[i]] - discr_r + NN); } else { /* 2 lines below: B(x) <-- x*B(x) */ COPYDOWN(&b[1],b,NN-KK); b[0] = A0; } COPY(lambda,t,NN-KK+1); } } /* Convert lambda to index form and compute deg(lambda(x)) */ deg_lambda = 0; for(i=0;i<NN-KK+1;i++){ lambda[i] = Index_of[lambda[i]]; if(lambda[i] != A0) deg_lambda = i; } /* * Find roots of the error+erasure locator polynomial by Chien * Search */ COPY(&reg[1],&lambda[1],NN-KK); count = 0; /* Number of roots of lambda(x) */ for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) { q = 1; for (j = deg_lambda; j > 0; j--){ if (reg[j] != A0) { reg[j] = modnn(reg[j] + j); q ^= Alpha_to[reg[j]]; } } if (q != 0) continue; /* store root (index-form) and error location number */ root[count] = i; loc[count] = k; /* If we've already found max possible roots, * abort the search to save time */ if(++count == deg_lambda) break; } if (deg_lambda != count) { /* * deg(lambda) unequal to number of roots => uncorrectable * error detected */ count = -1; goto finish; } /* * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo * x**(NN-KK)). in index form. Also find deg(omega). */ deg_omega = 0; for (i = 0; i < NN-KK;i++){ tmp = 0; j = (deg_lambda < i) ? deg_lambda : i; for(;j >= 0; j--){ if ((s[i + 1 - j] != A0) && (lambda[j] != A0)) tmp ^= Alpha_to[modnn(s[i + 1 - j] + lambda[j])]; } if(tmp != 0) deg_omega = i; omega[i] = Index_of[tmp]; } omega[NN-KK] = A0; /* * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 = * inv(X(l))**(B0-1) and den = lambda_pr(inv(X(l))) all in poly-form */ for (j = count-1; j >=0; j--) { num1 = 0; for (i = deg_omega; i >= 0; i--) { if (omega[i] != A0) num1 ^= Alpha_to[modnn(omega[i] + i * root[j])]; } num2 = Alpha_to[modnn(root[j] * (B0 - 1) + NN)]; den = 0; /* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */ for (i = min(deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) { if(lambda[i+1] != A0) den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])]; } if (den == 0) { #if DEBUG_ECC >= 1 printf("\n ERROR: denominator = 0\n"); #endif /* Convert to dual- basis */ count = -1; goto finish; } /* Apply error to data */ if (num1 != 0) { eras_val[j] = Alpha_to[modnn(Index_of[num1] + Index_of[num2] + NN - Index_of[den])]; } else { eras_val[j] = 0; } } finish: for(i=0;i<count;i++) eras_pos[i] = loc[i]; return count; } /***************************************************************************/ /* The DOC specific code begins here */ #define SECTOR_SIZE 512 /* The sector bytes are packed into NB_DATA MM bits words */ #define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / MM) /* * Correct the errors in 'sector[]' by using 'ecc1[]' which is the * content of the feedback shift register applyied to the sector and * the ECC. Return the number of errors corrected (and correct them in * sector), or -1 if error */ int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6]) { int parity, i, nb_errors; gf bb[NN - KK + 1]; gf error_val[NN-KK]; int error_pos[NN-KK], pos, bitpos, index, val; dtype *Alpha_to, *Index_of; /* init log and exp tables here to save memory. However, it is slower */ Alpha_to = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL); if (!Alpha_to) return -1; Index_of = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL); if (!Index_of) { kfree(Alpha_to); return -1; } generate_gf(Alpha_to, Index_of); parity = ecc1[1]; bb[0] = (ecc1[4] & 0xff) | ((ecc1[5] & 0x03) << 8); bb[1] = ((ecc1[5] & 0xfc) >> 2) | ((ecc1[2] & 0x0f) << 6); bb[2] = ((ecc1[2] & 0xf0) >> 4) | ((ecc1[3] & 0x3f) << 4); bb[3] = ((ecc1[3] & 0xc0) >> 6) | ((ecc1[0] & 0xff) << 2); nb_errors = eras_dec_rs(Alpha_to, Index_of, bb, error_val, error_pos, 0); if (nb_errors <= 0) goto the_end; /* correct the errors */ for(i=0;i<nb_errors;i++) { pos = error_pos[i]; if (pos >= NB_DATA && pos < KK) { nb_errors = -1; goto the_end; } if (pos < NB_DATA) { /* extract bit position (MSB first) */ pos = 10 * (NB_DATA - 1 - pos) - 6; /* now correct the following 10 bits. At most two bytes can be modified since pos is even */ index = (pos >> 3) ^ 1; bitpos = pos & 7; if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) { val = error_val[i] >> (2 + bitpos); parity ^= val; if (index < SECTOR_SIZE) sector[index] ^= val; } index = ((pos >> 3) + 1) ^ 1; bitpos = (bitpos + 10) & 7; if (bitpos == 0) bitpos = 8; if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) { val = error_val[i] << (8 - bitpos); parity ^= val; if (index < SECTOR_SIZE) sector[index] ^= val; } } } /* use parity to test extra errors */ if ((parity & 0xff) != 0) nb_errors = -1; the_end: kfree(Alpha_to); kfree(Index_of); return nb_errors; } EXPORT_SYMBOL_GPL(doc_decode_ecc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Fabrice Bellard <fabrice.bellard@netgem.com>"); MODULE_DESCRIPTION("ECC code for correcting errors detected by DiskOnChip 2000 and Millennium ECC hardware");
gpl-2.0
jld/b2g-hamachi-kernel
arch/m68k/platform/5272/gpio.c
7446
2243
/* * Coldfire generic GPIO support * * (C) Copyright 2009, Steven King <sfking@fdwdc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfgpio.h> static struct mcf_gpio_chip mcf_gpio_chips[] = { { .gpio_chip = { .label = "PA", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .ngpio = 16, }, .pddr = (void __iomem *) MCFSIM_PADDR, .podr = (void __iomem *) MCFSIM_PADAT, .ppdr = (void __iomem *) MCFSIM_PADAT, }, { .gpio_chip = { .label = "PB", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .base = 16, .ngpio = 16, }, .pddr = (void __iomem *) MCFSIM_PBDDR, .podr = (void __iomem *) MCFSIM_PBDAT, .ppdr = (void __iomem *) MCFSIM_PBDAT, }, { .gpio_chip = { .label = "PC", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .base = 32, .ngpio = 16, }, .pddr = (void __iomem *) MCFSIM_PCDDR, .podr = (void __iomem *) MCFSIM_PCDAT, .ppdr = (void __iomem *) MCFSIM_PCDAT, }, }; static int __init mcf_gpio_init(void) { unsigned i = 0; while (i < ARRAY_SIZE(mcf_gpio_chips)) (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]); return 0; } core_initcall(mcf_gpio_init);
gpl-2.0
786228836/linux
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
7958
6568
/* * Linux LED driver for RTL8187 * * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net> * * Based on the LED handling in the r8187 driver, which is: * Copyright (c) Realtek Semiconductor Corp. All rights reserved. * * Thanks to Realtek for their support! * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifdef CONFIG_RTL8187_LEDS #include <net/mac80211.h> #include <linux/usb.h> #include <linux/eeprom_93cx6.h> #include "rtl8187.h" #include "leds.h" static void led_turn_on(struct work_struct *work) { /* As this routine does read/write operations on the hardware, it must * be run from a work queue. */ u8 reg; struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, led_on.work); struct rtl8187_led *led = &priv->led_tx; /* Don't change the LED, when the device is down. */ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED) return ; /* Skip if the LED is not registered. */ if (!led->dev) return; mutex_lock(&priv->conf_mutex); switch (led->ledpin) { case LED_PIN_GPIO0: rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01); rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x00); break; case LED_PIN_LED0: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 4); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_LED1: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 5); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_HW: default: break; } mutex_unlock(&priv->conf_mutex); } static void led_turn_off(struct work_struct *work) { /* As this routine does read/write operations on the hardware, it must * be run from a work queue. */ u8 reg; struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, led_off.work); struct rtl8187_led *led = &priv->led_tx; /* Don't change the LED, when the device is down. */ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED) return ; /* Skip if the LED is not registered. */ if (!led->dev) return; mutex_lock(&priv->conf_mutex); switch (led->ledpin) { case LED_PIN_GPIO0: rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01); rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x01); break; case LED_PIN_LED0: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 4); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_LED1: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 5); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_HW: default: break; } mutex_unlock(&priv->conf_mutex); } /* Callback from the LED subsystem. */ static void rtl8187_led_brightness_set(struct led_classdev *led_dev, enum led_brightness brightness) { struct rtl8187_led *led = container_of(led_dev, struct rtl8187_led, led_dev); struct ieee80211_hw *hw = led->dev; struct rtl8187_priv *priv; static bool radio_on; if (!hw) return; priv = hw->priv; if (led->is_radio) { if (brightness == LED_FULL) { ieee80211_queue_delayed_work(hw, &priv->led_on, 0); radio_on = true; } else if (radio_on) { radio_on = false; cancel_delayed_work(&priv->led_on); ieee80211_queue_delayed_work(hw, &priv->led_off, 0); } } else if (radio_on) { if (brightness == LED_OFF) { ieee80211_queue_delayed_work(hw, &priv->led_off, 0); /* The LED is off for 1/20 sec - it just blinks. */ ieee80211_queue_delayed_work(hw, &priv->led_on, HZ / 20); } else ieee80211_queue_delayed_work(hw, &priv->led_on, 0); } } static int rtl8187_register_led(struct ieee80211_hw *dev, struct rtl8187_led *led, const char *name, const char *default_trigger, u8 ledpin, bool is_radio) { int err; struct rtl8187_priv *priv = dev->priv; if (led->dev) return -EEXIST; if (!default_trigger) return -EINVAL; led->dev = dev; led->ledpin = ledpin; led->is_radio = is_radio; strncpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = default_trigger; led->led_dev.brightness_set = rtl8187_led_brightness_set; err = led_classdev_register(&priv->udev->dev, &led->led_dev); if (err) { printk(KERN_INFO "LEDs: Failed to register %s\n", name); led->dev = NULL; return err; } return 0; } static void rtl8187_unregister_led(struct rtl8187_led *led) { struct ieee80211_hw *hw = led->dev; struct rtl8187_priv *priv = hw->priv; led_classdev_unregister(&led->led_dev); flush_delayed_work(&priv->led_off); led->dev = NULL; } void rtl8187_leds_init(struct ieee80211_hw *dev, u16 custid) { struct rtl8187_priv *priv = dev->priv; char name[RTL8187_LED_MAX_NAME_LEN + 1]; u8 ledpin; int err; /* According to the vendor driver, the LED operation depends on the * customer ID encoded in the EEPROM */ printk(KERN_INFO "rtl8187: Customer ID is 0x%02X\n", custid); switch (custid) { case EEPROM_CID_RSVD0: case EEPROM_CID_RSVD1: case EEPROM_CID_SERCOMM_PS: case EEPROM_CID_QMI: case EEPROM_CID_DELL: case EEPROM_CID_TOSHIBA: ledpin = LED_PIN_GPIO0; break; case EEPROM_CID_ALPHA0: ledpin = LED_PIN_LED0; break; case EEPROM_CID_HW: ledpin = LED_PIN_HW; break; default: ledpin = LED_PIN_GPIO0; } INIT_DELAYED_WORK(&priv->led_on, led_turn_on); INIT_DELAYED_WORK(&priv->led_off, led_turn_off); snprintf(name, sizeof(name), "rtl8187-%s::radio", wiphy_name(dev->wiphy)); err = rtl8187_register_led(dev, &priv->led_radio, name, ieee80211_get_radio_led_name(dev), ledpin, true); if (err) return; snprintf(name, sizeof(name), "rtl8187-%s::tx", wiphy_name(dev->wiphy)); err = rtl8187_register_led(dev, &priv->led_tx, name, ieee80211_get_tx_led_name(dev), ledpin, false); if (err) goto err_tx; snprintf(name, sizeof(name), "rtl8187-%s::rx", wiphy_name(dev->wiphy)); err = rtl8187_register_led(dev, &priv->led_rx, name, ieee80211_get_rx_led_name(dev), ledpin, false); if (!err) return; /* registration of RX LED failed - unregister */ rtl8187_unregister_led(&priv->led_tx); err_tx: rtl8187_unregister_led(&priv->led_radio); } void rtl8187_leds_exit(struct ieee80211_hw *dev) { struct rtl8187_priv *priv = dev->priv; rtl8187_unregister_led(&priv->led_radio); rtl8187_unregister_led(&priv->led_rx); rtl8187_unregister_led(&priv->led_tx); cancel_delayed_work_sync(&priv->led_off); cancel_delayed_work_sync(&priv->led_on); } #endif /* def CONFIG_RTL8187_LEDS */
gpl-2.0
kbukin1/pnotify-linux-3.18.9
drivers/net/wireless/rtl818x/rtl8187/leds.c
7958
6568
/* * Linux LED driver for RTL8187 * * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net> * * Based on the LED handling in the r8187 driver, which is: * Copyright (c) Realtek Semiconductor Corp. All rights reserved. * * Thanks to Realtek for their support! * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifdef CONFIG_RTL8187_LEDS #include <net/mac80211.h> #include <linux/usb.h> #include <linux/eeprom_93cx6.h> #include "rtl8187.h" #include "leds.h" static void led_turn_on(struct work_struct *work) { /* As this routine does read/write operations on the hardware, it must * be run from a work queue. */ u8 reg; struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, led_on.work); struct rtl8187_led *led = &priv->led_tx; /* Don't change the LED, when the device is down. */ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED) return ; /* Skip if the LED is not registered. */ if (!led->dev) return; mutex_lock(&priv->conf_mutex); switch (led->ledpin) { case LED_PIN_GPIO0: rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01); rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x00); break; case LED_PIN_LED0: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 4); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_LED1: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 5); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_HW: default: break; } mutex_unlock(&priv->conf_mutex); } static void led_turn_off(struct work_struct *work) { /* As this routine does read/write operations on the hardware, it must * be run from a work queue. */ u8 reg; struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, led_off.work); struct rtl8187_led *led = &priv->led_tx; /* Don't change the LED, when the device is down. */ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED) return ; /* Skip if the LED is not registered. */ if (!led->dev) return; mutex_lock(&priv->conf_mutex); switch (led->ledpin) { case LED_PIN_GPIO0: rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01); rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x01); break; case LED_PIN_LED0: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 4); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_LED1: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 5); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_HW: default: break; } mutex_unlock(&priv->conf_mutex); } /* Callback from the LED subsystem. */ static void rtl8187_led_brightness_set(struct led_classdev *led_dev, enum led_brightness brightness) { struct rtl8187_led *led = container_of(led_dev, struct rtl8187_led, led_dev); struct ieee80211_hw *hw = led->dev; struct rtl8187_priv *priv; static bool radio_on; if (!hw) return; priv = hw->priv; if (led->is_radio) { if (brightness == LED_FULL) { ieee80211_queue_delayed_work(hw, &priv->led_on, 0); radio_on = true; } else if (radio_on) { radio_on = false; cancel_delayed_work(&priv->led_on); ieee80211_queue_delayed_work(hw, &priv->led_off, 0); } } else if (radio_on) { if (brightness == LED_OFF) { ieee80211_queue_delayed_work(hw, &priv->led_off, 0); /* The LED is off for 1/20 sec - it just blinks. */ ieee80211_queue_delayed_work(hw, &priv->led_on, HZ / 20); } else ieee80211_queue_delayed_work(hw, &priv->led_on, 0); } } static int rtl8187_register_led(struct ieee80211_hw *dev, struct rtl8187_led *led, const char *name, const char *default_trigger, u8 ledpin, bool is_radio) { int err; struct rtl8187_priv *priv = dev->priv; if (led->dev) return -EEXIST; if (!default_trigger) return -EINVAL; led->dev = dev; led->ledpin = ledpin; led->is_radio = is_radio; strncpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = default_trigger; led->led_dev.brightness_set = rtl8187_led_brightness_set; err = led_classdev_register(&priv->udev->dev, &led->led_dev); if (err) { printk(KERN_INFO "LEDs: Failed to register %s\n", name); led->dev = NULL; return err; } return 0; } static void rtl8187_unregister_led(struct rtl8187_led *led) { struct ieee80211_hw *hw = led->dev; struct rtl8187_priv *priv = hw->priv; led_classdev_unregister(&led->led_dev); flush_delayed_work(&priv->led_off); led->dev = NULL; } void rtl8187_leds_init(struct ieee80211_hw *dev, u16 custid) { struct rtl8187_priv *priv = dev->priv; char name[RTL8187_LED_MAX_NAME_LEN + 1]; u8 ledpin; int err; /* According to the vendor driver, the LED operation depends on the * customer ID encoded in the EEPROM */ printk(KERN_INFO "rtl8187: Customer ID is 0x%02X\n", custid); switch (custid) { case EEPROM_CID_RSVD0: case EEPROM_CID_RSVD1: case EEPROM_CID_SERCOMM_PS: case EEPROM_CID_QMI: case EEPROM_CID_DELL: case EEPROM_CID_TOSHIBA: ledpin = LED_PIN_GPIO0; break; case EEPROM_CID_ALPHA0: ledpin = LED_PIN_LED0; break; case EEPROM_CID_HW: ledpin = LED_PIN_HW; break; default: ledpin = LED_PIN_GPIO0; } INIT_DELAYED_WORK(&priv->led_on, led_turn_on); INIT_DELAYED_WORK(&priv->led_off, led_turn_off); snprintf(name, sizeof(name), "rtl8187-%s::radio", wiphy_name(dev->wiphy)); err = rtl8187_register_led(dev, &priv->led_radio, name, ieee80211_get_radio_led_name(dev), ledpin, true); if (err) return; snprintf(name, sizeof(name), "rtl8187-%s::tx", wiphy_name(dev->wiphy)); err = rtl8187_register_led(dev, &priv->led_tx, name, ieee80211_get_tx_led_name(dev), ledpin, false); if (err) goto err_tx; snprintf(name, sizeof(name), "rtl8187-%s::rx", wiphy_name(dev->wiphy)); err = rtl8187_register_led(dev, &priv->led_rx, name, ieee80211_get_rx_led_name(dev), ledpin, false); if (!err) return; /* registration of RX LED failed - unregister */ rtl8187_unregister_led(&priv->led_tx); err_tx: rtl8187_unregister_led(&priv->led_radio); } void rtl8187_leds_exit(struct ieee80211_hw *dev) { struct rtl8187_priv *priv = dev->priv; rtl8187_unregister_led(&priv->led_radio); rtl8187_unregister_led(&priv->led_rx); rtl8187_unregister_led(&priv->led_tx); cancel_delayed_work_sync(&priv->led_off); cancel_delayed_work_sync(&priv->led_on); } #endif /* def CONFIG_RTL8187_LEDS */
gpl-2.0
JPRasquin/Ubuntu12.04
arch/powerpc/boot/elf_util.c
12054
2339
/* * Copyright (C) Paul Mackerras 1997. * * Updates for PPC64 by Todd Inglett, Dave Engebretsen & Peter Bergner. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <stddef.h> #include "elf.h" #include "page.h" #include "string.h" #include "stdio.h" int parse_elf64(void *hdr, struct elf_info *info) { Elf64_Ehdr *elf64 = hdr; Elf64_Phdr *elf64ph; unsigned int i; if (!(elf64->e_ident[EI_MAG0] == ELFMAG0 && elf64->e_ident[EI_MAG1] == ELFMAG1 && elf64->e_ident[EI_MAG2] == ELFMAG2 && elf64->e_ident[EI_MAG3] == ELFMAG3 && elf64->e_ident[EI_CLASS] == ELFCLASS64 && elf64->e_ident[EI_DATA] == ELFDATA2MSB && (elf64->e_type == ET_EXEC || elf64->e_type == ET_DYN) && elf64->e_machine == EM_PPC64)) return 0; elf64ph = (Elf64_Phdr *)((unsigned long)elf64 + (unsigned long)elf64->e_phoff); for (i = 0; i < (unsigned int)elf64->e_phnum; i++, elf64ph++) if (elf64ph->p_type == PT_LOAD) break; if (i >= (unsigned int)elf64->e_phnum) return 0; info->loadsize = (unsigned long)elf64ph->p_filesz; info->memsize = (unsigned long)elf64ph->p_memsz; info->elfoffset = (unsigned long)elf64ph->p_offset; return 1; } int parse_elf32(void *hdr, struct elf_info *info) { Elf32_Ehdr *elf32 = hdr; Elf32_Phdr *elf32ph; unsigned int i; if (!(elf32->e_ident[EI_MAG0] == ELFMAG0 && elf32->e_ident[EI_MAG1] == ELFMAG1 && elf32->e_ident[EI_MAG2] == ELFMAG2 && elf32->e_ident[EI_MAG3] == ELFMAG3 && elf32->e_ident[EI_CLASS] == ELFCLASS32 && elf32->e_ident[EI_DATA] == ELFDATA2MSB && (elf32->e_type == ET_EXEC || elf32->e_type == ET_DYN) && elf32->e_machine == EM_PPC)) return 0; elf32ph = (Elf32_Phdr *) ((unsigned long)elf32 + elf32->e_phoff); for (i = 0; i < elf32->e_phnum; i++, elf32ph++) if (elf32ph->p_type == PT_LOAD) break; if (i >= elf32->e_phnum) return 0; info->loadsize = elf32ph->p_filesz; info->memsize = elf32ph->p_memsz; info->elfoffset = elf32ph->p_offset; return 1; }
gpl-2.0
pranav01/linux-3.10.y
fs/nls/nls_cp866.c
12566
12666
/* * linux/fs/nls/nls_cp866.c * * Charset cp866 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, /* 0x90*/ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, /* 0xa0*/ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f, /* 0xf0*/ 0x0401, 0x0451, 0x0404, 0x0454, 0x0407, 0x0457, 0x040e, 0x045e, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x2116, 0x00a4, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, /* 0xb0-0xb7 */ }; static const unsigned char page04[256] = { 0x00, 0xf0, 0x00, 0x00, 0xf2, 0x00, 0x00, 0xf4, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, /* 0x08-0x0f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x10-0x17 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x18-0x1f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x20-0x27 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x28-0x2f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x30-0x37 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x38-0x3f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */ 0x00, 0xf1, 0x00, 0x00, 0xf3, 0x00, 0x00, 0xf5, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf7, 0x00, /* 0x58-0x5f */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x00, /* 0x10-0x17 */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page21, page22, NULL, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x80-0x87 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x88-0x8f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x90-0x97 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf1, 0xf1, 0xf3, 0xf3, 0xf5, 0xf5, 0xf7, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0xa0-0xa7 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0xe0-0xe7 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0xe8-0xef */ 0xf0, 0xf0, 0xf2, 0xf2, 0xf4, 0xf4, 0xf6, 0xf6, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp866", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp866(void) { return register_nls(&table); } static void __exit exit_nls_cp866(void) { unregister_nls(&table); } module_init(init_nls_cp866) module_exit(exit_nls_cp866) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
arkas/Callisto_kernel_2.6.35
fs/nls/nls_cp865.c
12566
17508
/* * linux/fs/nls/nls_cp865.c * * Charset cp865 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0, 0x00e5, 0x00e7, 0x00ea, 0x00eb, 0x00e8, 0x00ef, 0x00ee, 0x00ec, 0x00c4, 0x00c5, /* 0x90*/ 0x00c9, 0x00e6, 0x00c6, 0x00f4, 0x00f6, 0x00f2, 0x00fb, 0x00f9, 0x00ff, 0x00d6, 0x00dc, 0x00f8, 0x00a3, 0x00d8, 0x20a7, 0x0192, /* 0xa0*/ 0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1, 0x00d1, 0x00aa, 0x00ba, 0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00a4, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x00b5, 0x03c4, 0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229, /* 0xf0*/ 0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0xad, 0x00, 0x9c, 0xaf, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */ 0x00, 0x00, 0xa7, 0x00, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */ 0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x99, 0x00, /* 0xd0-0xd7 */ 0x9d, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */ 0x85, 0xa0, 0x83, 0x00, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */ 0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, /* 0xe8-0xef */ 0x00, 0xa4, 0x95, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */ 0x9b, 0x97, 0xa3, 0x96, 0x81, 0x00, 0x00, 0x98, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */ 0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */ }; static const unsigned char page23[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, page03, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, page22, page23, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, /* 0x88-0x8f */ 0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x9a, 0x90, 0x00, 0x8e, 0x00, 0x8f, 0x80, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x92, 0x92, 0x00, 0x99, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp865", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp865(void) { return register_nls(&table); } static void __exit exit_nls_cp865(void) { unregister_nls(&table); } module_init(init_nls_cp865) module_exit(exit_nls_cp865) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
civato/KK_RUNNER-Note8.0
fs/nls/nls_cp865.c
12566
17508
/* * linux/fs/nls/nls_cp865.c * * Charset cp865 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0, 0x00e5, 0x00e7, 0x00ea, 0x00eb, 0x00e8, 0x00ef, 0x00ee, 0x00ec, 0x00c4, 0x00c5, /* 0x90*/ 0x00c9, 0x00e6, 0x00c6, 0x00f4, 0x00f6, 0x00f2, 0x00fb, 0x00f9, 0x00ff, 0x00d6, 0x00dc, 0x00f8, 0x00a3, 0x00d8, 0x20a7, 0x0192, /* 0xa0*/ 0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1, 0x00d1, 0x00aa, 0x00ba, 0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00a4, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x00b5, 0x03c4, 0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229, /* 0xf0*/ 0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0xad, 0x00, 0x9c, 0xaf, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */ 0x00, 0x00, 0xa7, 0x00, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */ 0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x99, 0x00, /* 0xd0-0xd7 */ 0x9d, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */ 0x85, 0xa0, 0x83, 0x00, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */ 0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, /* 0xe8-0xef */ 0x00, 0xa4, 0x95, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */ 0x9b, 0x97, 0xa3, 0x96, 0x81, 0x00, 0x00, 0x98, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */ 0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */ }; static const unsigned char page23[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, page03, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, page22, page23, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, /* 0x88-0x8f */ 0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x9a, 0x90, 0x00, 0x8e, 0x00, 0x8f, 0x80, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x92, 0x92, 0x00, 0x99, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp865", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp865(void) { return register_nls(&table); } static void __exit exit_nls_cp865(void) { unregister_nls(&table); } module_init(init_nls_cp865) module_exit(exit_nls_cp865) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
carlocaione/geniatech-kernel
fs/nls/nls_cp862.c
12566
19506
/* * linux/fs/nls/nls_cp862.c * * Charset cp862 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x05d0, 0x05d1, 0x05d2, 0x05d3, 0x05d4, 0x05d5, 0x05d6, 0x05d7, 0x05d8, 0x05d9, 0x05da, 0x05db, 0x05dc, 0x05dd, 0x05de, 0x05df, /* 0x90*/ 0x05e0, 0x05e1, 0x05e2, 0x05e3, 0x05e4, 0x05e5, 0x05e6, 0x05e7, 0x05e8, 0x05e9, 0x05ea, 0x00a2, 0x00a3, 0x00a5, 0x20a7, 0x0192, /* 0xa0*/ 0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1, 0x00d1, 0x00aa, 0x00ba, 0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00bb, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x00b5, 0x03c4, 0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229, /* 0xf0*/ 0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0xad, 0x9b, 0x9c, 0x00, 0x9d, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */ 0x00, 0x00, 0xa7, 0xaf, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */ 0x00, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0xa4, 0x00, 0xa2, 0x00, 0x00, 0x00, 0xf6, /* 0xf0-0xf7 */ 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */ 0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */ }; static const unsigned char page05[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0xd0-0xd7 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0xd8-0xdf */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0xe0-0xe7 */ 0x98, 0x99, 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */ }; static const unsigned char page23[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, page03, NULL, page05, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, page22, page23, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp862", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp862(void) { return register_nls(&table); } static void __exit exit_nls_cp862(void) { unregister_nls(&table); } module_init(init_nls_cp862) module_exit(exit_nls_cp862) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
ninjablocks/kernel-VAR-SOM-AMxx
drivers/infiniband/hw/amso1100/c2_vq.c
12566
7714
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <linux/spinlock.h> #include "c2_vq.h" #include "c2_provider.h" /* * Verbs Request Objects: * * VQ Request Objects are allocated by the kernel verbs handlers. * They contain a wait object, a refcnt, an atomic bool indicating that the * adapter has replied, and a copy of the verb reply work request. * A pointer to the VQ Request Object is passed down in the context * field of the work request message, and reflected back by the adapter * in the verbs reply message. The function handle_vq() in the interrupt * path will use this pointer to: * 1) append a copy of the verbs reply message * 2) mark that the reply is ready * 3) wake up the kernel verbs handler blocked awaiting the reply. * * * The kernel verbs handlers do a "get" to put a 2nd reference on the * VQ Request object. If the kernel verbs handler exits before the adapter * can respond, this extra reference will keep the VQ Request object around * until the adapter's reply can be processed. The reason we need this is * because a pointer to this object is stuffed into the context field of * the verbs work request message, and reflected back in the reply message. * It is used in the interrupt handler (handle_vq()) to wake up the appropriate * kernel verb handler that is blocked awaiting the verb reply. * So handle_vq() will do a "put" on the object when it's done accessing it. * NOTE: If we guarantee that the kernel verb handler will never bail before * getting the reply, then we don't need these refcnts. * * * VQ Request objects are freed by the kernel verbs handlers only * after the verb has been processed, or when the adapter fails and * does not reply. * * * Verbs Reply Buffers: * * VQ Reply bufs are local host memory copies of a * outstanding Verb Request reply * message. The are always allocated by the kernel verbs handlers, and _may_ be * freed by either the kernel verbs handler -or- the interrupt handler. The * kernel verbs handler _must_ free the repbuf, then free the vq request object * in that order. */ int vq_init(struct c2_dev *c2dev) { sprintf(c2dev->vq_cache_name, "c2-vq:dev%c", (char) ('0' + c2dev->devnum)); c2dev->host_msg_cache = kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (c2dev->host_msg_cache == NULL) { return -ENOMEM; } return 0; } void vq_term(struct c2_dev *c2dev) { kmem_cache_destroy(c2dev->host_msg_cache); } /* vq_req_alloc - allocate a VQ Request Object and initialize it. * The refcnt is set to 1. */ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev) { struct c2_vq_req *r; r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL); if (r) { init_waitqueue_head(&r->wait_object); r->reply_msg = 0; r->event = 0; r->cm_id = NULL; r->qp = NULL; atomic_set(&r->refcnt, 1); atomic_set(&r->reply_ready, 0); } return r; } /* vq_req_free - free the VQ Request Object. It is assumed the verbs handler * has already free the VQ Reply Buffer if it existed. */ void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r) { r->reply_msg = 0; if (atomic_dec_and_test(&r->refcnt)) { kfree(r); } } /* vq_req_get - reference a VQ Request Object. Done * only in the kernel verbs handlers. */ void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r) { atomic_inc(&r->refcnt); } /* vq_req_put - dereference and potentially free a VQ Request Object. * * This is only called by handle_vq() on the * interrupt when it is done processing * a verb reply message. If the associated * kernel verbs handler has already bailed, * then this put will actually free the VQ * Request object _and_ the VQ Reply Buffer * if it exists. */ void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) { if (atomic_dec_and_test(&r->refcnt)) { if (r->reply_msg != 0) vq_repbuf_free(c2dev, (void *) (unsigned long) r->reply_msg); kfree(r); } } /* * vq_repbuf_alloc - allocate a VQ Reply Buffer. */ void *vq_repbuf_alloc(struct c2_dev *c2dev) { return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC); } /* * vq_send_wr - post a verbs request message to the Verbs Request Queue. * If a message is not available in the MQ, then block until one is available. * NOTE: handle_mq() on the interrupt context will wake up threads blocked here. * When the adapter drains the Verbs Request Queue, * it inserts MQ index 0 in to the * adapter->host activity fifo and interrupts the host. */ int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr) { void *msg; wait_queue_t __wait; /* * grab adapter vq lock */ spin_lock(&c2dev->vqlock); /* * allocate msg */ msg = c2_mq_alloc(&c2dev->req_vq); /* * If we cannot get a msg, then we'll wait * When a messages are available, the int handler will wake_up() * any waiters. */ while (msg == NULL) { pr_debug("%s:%d no available msg in VQ, waiting...\n", __func__, __LINE__); init_waitqueue_entry(&__wait, current); add_wait_queue(&c2dev->req_vq_wo, &__wait); spin_unlock(&c2dev->vqlock); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!c2_mq_full(&c2dev->req_vq)) { break; } if (!signal_pending(current)) { schedule_timeout(1 * HZ); /* 1 second... */ continue; } set_current_state(TASK_RUNNING); remove_wait_queue(&c2dev->req_vq_wo, &__wait); return -EINTR; } set_current_state(TASK_RUNNING); remove_wait_queue(&c2dev->req_vq_wo, &__wait); spin_lock(&c2dev->vqlock); msg = c2_mq_alloc(&c2dev->req_vq); } /* * copy wr into adapter msg */ memcpy(msg, wr, c2dev->req_vq.msg_size); /* * post msg */ c2_mq_produce(&c2dev->req_vq); /* * release adapter vq lock */ spin_unlock(&c2dev->vqlock); return 0; } /* * vq_wait_for_reply - block until the adapter posts a Verb Reply Message. */ int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req) { if (!wait_event_timeout(req->wait_object, atomic_read(&req->reply_ready), 60*HZ)) return -ETIMEDOUT; return 0; } /* * vq_repbuf_free - Free a Verbs Reply Buffer. */ void vq_repbuf_free(struct c2_dev *c2dev, void *reply) { kmem_cache_free(c2dev->host_msg_cache, reply); }
gpl-2.0
mike-dunn/linux-treo680
fs/nls/nls_iso8859-13.c
12566
11786
/* * linux/fs/nls/nls_iso8859-13.c * * Charset iso8859-13 translation tables. * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f, /* 0x90*/ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f, /* 0xa0*/ 0x00a0, 0x201d, 0x00a2, 0x00a3, 0x00a4, 0x201e, 0x00a6, 0x00a7, 0x00d8, 0x00a9, 0x0156, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00c6, /* 0xb0*/ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x201c, 0x00b5, 0x00b6, 0x00b7, 0x00f8, 0x00b9, 0x0157, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00e6, /* 0xc0*/ 0x0104, 0x012e, 0x0100, 0x0106, 0x00c4, 0x00c5, 0x0118, 0x0112, 0x010c, 0x00c9, 0x0179, 0x0116, 0x0122, 0x0136, 0x012a, 0x013b, /* 0xd0*/ 0x0160, 0x0143, 0x0145, 0x00d3, 0x014c, 0x00d5, 0x00d6, 0x00d7, 0x0172, 0x0141, 0x015a, 0x016a, 0x00dc, 0x017b, 0x017d, 0x00df, /* 0xe0*/ 0x0105, 0x012f, 0x0101, 0x0107, 0x00e4, 0x00e5, 0x0119, 0x0113, 0x010d, 0x00e9, 0x017a, 0x0117, 0x0123, 0x0137, 0x012b, 0x013c, /* 0xf0*/ 0x0161, 0x0144, 0x0146, 0x00f3, 0x014d, 0x00f5, 0x00f6, 0x00f7, 0x0173, 0x0142, 0x015b, 0x016b, 0x00fc, 0x017c, 0x017e, 0x2019, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0x00, 0xa2, 0xa3, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0x00, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0x00, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0x00, 0xb9, 0x00, 0xbb, 0xbc, 0xbd, 0xbe, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0xc4, 0xc5, 0xaf, 0x00, /* 0xc0-0xc7 */ 0x00, 0xc9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xd3, 0x00, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xa8, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0xe4, 0xe5, 0xbf, 0x00, /* 0xe0-0xe7 */ 0x00, 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xf3, 0x00, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xb8, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0xc2, 0xe2, 0x00, 0x00, 0xc0, 0xe0, 0xc3, 0xe3, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0xc7, 0xe7, 0x00, 0x00, 0xcb, 0xeb, /* 0x10-0x17 */ 0xc6, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0xce, 0xee, 0x00, 0x00, 0xc1, 0xe1, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xed, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0xcf, 0xef, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0xd9, 0xf9, 0xd1, 0xf1, 0xd2, 0xf2, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0xd4, 0xf4, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0xba, /* 0x50-0x57 */ 0x00, 0x00, 0xda, 0xfa, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0xd8, 0xf8, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0xca, 0xea, 0xdd, 0xfd, 0xde, 0xfe, 0x00, /* 0x78-0x7f */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xff, 0x00, 0x00, 0xb4, 0xa1, 0xa5, 0x00, /* 0x18-0x1f */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xb1, 0xa2, 0xb3, 0xa4, 0xb5, 0xb6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xb9, 0xba, 0xbb, 0xbc, 0xad, 0xbe, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbf, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xa1, 0xb2, 0xa3, 0xb4, 0xa5, 0xa6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xa9, 0xaa, 0xab, 0xac, 0xbd, 0xae, 0xbd, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "iso8859-13", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_iso8859_13(void) { return register_nls(&table); } static void __exit exit_nls_iso8859_13(void) { unregister_nls(&table); } module_init(init_nls_iso8859_13) module_exit(exit_nls_iso8859_13) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
tamirda/N7100_PhoeniX_Kernel
fs/nls/nls_cp852.c
12566
14830
/* * linux/fs/nls/nls_cp852.c * * Charset cp852 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x016f, 0x0107, 0x00e7, 0x0142, 0x00eb, 0x0150, 0x0151, 0x00ee, 0x0179, 0x00c4, 0x0106, /* 0x90*/ 0x00c9, 0x0139, 0x013a, 0x00f4, 0x00f6, 0x013d, 0x013e, 0x015a, 0x015b, 0x00d6, 0x00dc, 0x0164, 0x0165, 0x0141, 0x00d7, 0x010d, /* 0xa0*/ 0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x0104, 0x0105, 0x017d, 0x017e, 0x0118, 0x0119, 0x00ac, 0x017a, 0x010c, 0x015f, 0x00ab, 0x00bb, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x00c1, 0x00c2, 0x011a, 0x015e, 0x2563, 0x2551, 0x2557, 0x255d, 0x017b, 0x017c, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x0102, 0x0103, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x00a4, /* 0xd0*/ 0x0111, 0x0110, 0x010e, 0x00cb, 0x010f, 0x0147, 0x00cd, 0x00ce, 0x011b, 0x2518, 0x250c, 0x2588, 0x2584, 0x0162, 0x016e, 0x2580, /* 0xe0*/ 0x00d3, 0x00df, 0x00d4, 0x0143, 0x0144, 0x0148, 0x0160, 0x0161, 0x0154, 0x00da, 0x0155, 0x0170, 0x00fd, 0x00dd, 0x0163, 0x00b4, /* 0xf0*/ 0x00ad, 0x02dd, 0x02db, 0x02c7, 0x02d8, 0x00a7, 0x00f7, 0x00b8, 0x00b0, 0x00a8, 0x02d9, 0x0171, 0x0158, 0x0159, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0xf5, /* 0xa0-0xa7 */ 0xf9, 0x00, 0x00, 0xae, 0xaa, 0xf0, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0x00, 0x00, 0x00, 0xef, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0xf7, 0x00, 0x00, 0xaf, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0xb5, 0xb6, 0x00, 0x8e, 0x00, 0x00, 0x80, /* 0xc0-0xc7 */ 0x00, 0x90, 0x00, 0xd3, 0x00, 0xd6, 0xd7, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xe0, 0xe2, 0x00, 0x99, 0x9e, /* 0xd0-0xd7 */ 0x00, 0x00, 0xe9, 0x00, 0x9a, 0xed, 0x00, 0xe1, /* 0xd8-0xdf */ 0x00, 0xa0, 0x83, 0x00, 0x84, 0x00, 0x00, 0x87, /* 0xe0-0xe7 */ 0x00, 0x82, 0x00, 0x89, 0x00, 0xa1, 0x8c, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */ 0x00, 0x00, 0xa3, 0x00, 0x81, 0xec, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0xc6, 0xc7, 0xa4, 0xa5, 0x8f, 0x86, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xac, 0x9f, 0xd2, 0xd4, /* 0x08-0x0f */ 0xd1, 0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xa8, 0xa9, 0xb7, 0xd8, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x91, 0x92, 0x00, 0x00, 0x95, 0x96, 0x00, /* 0x38-0x3f */ 0x00, 0x9d, 0x88, 0xe3, 0xe4, 0x00, 0x00, 0xd5, /* 0x40-0x47 */ 0xe5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x8a, 0x8b, 0x00, 0x00, 0xe8, 0xea, 0x00, 0x00, /* 0x50-0x57 */ 0xfc, 0xfd, 0x97, 0x98, 0x00, 0x00, 0xb8, 0xad, /* 0x58-0x5f */ 0xe6, 0xe7, 0xdd, 0xee, 0x9b, 0x9c, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0x85, /* 0x68-0x6f */ 0xeb, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x8d, 0xab, 0xbd, 0xbe, 0xa6, 0xa7, 0x00, /* 0x78-0x7f */ }; static const unsigned char page02[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0xf4, 0xfa, 0x00, 0xf2, 0x00, 0xf1, 0x00, 0x00, /* 0xd8-0xdf */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */ 0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */ 0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */ 0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, page02, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8b, 0x8b, 0x8c, 0xab, 0x84, 0x86, /* 0x88-0x8f */ 0x82, 0x92, 0x92, 0x93, 0x94, 0x96, 0x96, 0x98, /* 0x90-0x97 */ 0x98, 0x94, 0x81, 0x9c, 0x9c, 0x88, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa5, 0xa5, 0xa7, 0xa7, /* 0xa0-0xa7 */ 0xa9, 0xa9, 0xaa, 0xab, 0x9f, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xa0, 0x83, 0xd8, /* 0xb0-0xb7 */ 0xad, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc7, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd0, 0xd4, 0x89, 0xd4, 0xe5, 0xa1, 0x8c, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xee, 0x85, 0xdf, /* 0xd8-0xdf */ 0xa2, 0xe1, 0x93, 0xe4, 0xe4, 0xe5, 0xe7, 0xe7, /* 0xe0-0xe7 */ 0xea, 0xa3, 0xea, 0xfb, 0xec, 0xec, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfd, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x9a, 0x90, 0xb6, 0x8e, 0xde, 0x8f, 0x80, /* 0x80-0x87 */ 0x9d, 0xd3, 0x8a, 0x8a, 0xd7, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x91, 0xe2, 0x99, 0x95, 0x95, 0x97, /* 0x90-0x97 */ 0x97, 0x99, 0x9a, 0x9b, 0x9b, 0x9d, 0x9e, 0xac, /* 0x98-0x9f */ 0xb5, 0xd6, 0xe0, 0xe9, 0xa4, 0xa4, 0xa6, 0xa6, /* 0xa0-0xa7 */ 0xa8, 0xa8, 0xaa, 0x8d, 0xac, 0xb8, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc6, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd1, 0xd1, 0xd2, 0xd3, 0xd2, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xb7, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe3, 0xd5, 0xe6, 0xe6, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xe8, 0xeb, 0xed, 0xed, 0xdd, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xeb, 0xfc, 0xfc, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp852", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp852(void) { return register_nls(&table); } static void __exit exit_nls_cp852(void) { unregister_nls(&table); } module_init(init_nls_cp852) module_exit(exit_nls_cp852) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
zaphodatreides/P8000-Kernel
fs/nls/nls_iso8859-5.c
12566
10909
/* * linux/fs/nls/nls_iso8859-5.c * * Charset iso8859-5 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f, /* 0x90*/ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f, /* 0xa0*/ 0x00a0, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406, 0x0407, 0x0408, 0x0409, 0x040a, 0x040b, 0x040c, 0x00ad, 0x040e, 0x040f, /* 0xb0*/ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, /* 0xc0*/ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, /* 0xd0*/ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f, /* 0xe0*/ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f, /* 0xf0*/ 0x2116, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457, 0x0458, 0x0459, 0x045a, 0x045b, 0x045c, 0x00a7, 0x045e, 0x045f, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */ }; static const unsigned char page04[256] = { 0x00, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x00-0x07 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0x00, 0xae, 0xaf, /* 0x08-0x0f */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x10-0x17 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0x18-0x1f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0x38-0x3f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */ 0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x50-0x57 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, 0xfe, 0xff, /* 0x58-0x5f */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x00, /* 0x10-0x17 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page21, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xa0-0xa7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xad, 0xfe, 0xff, /* 0xa8-0xaf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xb0-0xb7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xd0-0xd7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xf0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xf0-0xf7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xfd, 0xae, 0xaf, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "iso8859-5", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_iso8859_5(void) { return register_nls(&table); } static void __exit exit_nls_iso8859_5(void) { unregister_nls(&table); } module_init(init_nls_iso8859_5) module_exit(exit_nls_iso8859_5) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
KuronekoDungeon/android_kernel_sony_msm
sound/aoa/core/gpio-pmf.c
13078
6267
/* * Apple Onboard Audio pmf GPIOs * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * GPL v2, can be found in COPYING. */ #include <linux/slab.h> #include <asm/pmac_feature.h> #include <asm/pmac_pfunc.h> #include "../aoa.h" #define PMF_GPIO(name, bit) \ static void pmf_gpio_set_##name(struct gpio_runtime *rt, int on)\ { \ struct pmf_args args = { .count = 1, .u[0].v = !on }; \ int rc; \ \ if (unlikely(!rt)) return; \ rc = pmf_call_function(rt->node, #name "-mute", &args); \ if (rc && rc != -ENODEV) \ printk(KERN_WARNING "pmf_gpio_set_" #name \ " failed, rc: %d\n", rc); \ rt->implementation_private &= ~(1<<bit); \ rt->implementation_private |= (!!on << bit); \ } \ static int pmf_gpio_get_##name(struct gpio_runtime *rt) \ { \ if (unlikely(!rt)) return 0; \ return (rt->implementation_private>>bit)&1; \ } PMF_GPIO(headphone, 0); PMF_GPIO(amp, 1); PMF_GPIO(lineout, 2); static void pmf_gpio_set_hw_reset(struct gpio_runtime *rt, int on) { struct pmf_args args = { .count = 1, .u[0].v = !!on }; int rc; if (unlikely(!rt)) return; rc = pmf_call_function(rt->node, "hw-reset", &args); if (rc) printk(KERN_WARNING "pmf_gpio_set_hw_reset" " failed, rc: %d\n", rc); } static void pmf_gpio_all_amps_off(struct gpio_runtime *rt) { int saved; if (unlikely(!rt)) return; saved = rt->implementation_private; pmf_gpio_set_headphone(rt, 0); pmf_gpio_set_amp(rt, 0); pmf_gpio_set_lineout(rt, 0); rt->implementation_private = saved; } static void pmf_gpio_all_amps_restore(struct gpio_runtime *rt) { int s; if (unlikely(!rt)) return; s = rt->implementation_private; pmf_gpio_set_headphone(rt, (s>>0)&1); pmf_gpio_set_amp(rt, (s>>1)&1); pmf_gpio_set_lineout(rt, (s>>2)&1); } static void pmf_handle_notify(struct work_struct *work) { struct gpio_notification *notif = container_of(work, struct gpio_notification, work.work); mutex_lock(&notif->mutex); if (notif->notify) notif->notify(notif->data); mutex_unlock(&notif->mutex); } static void pmf_gpio_init(struct gpio_runtime *rt) { pmf_gpio_all_amps_off(rt); rt->implementation_private = 0; INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify); INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify); INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify); mutex_init(&rt->headphone_notify.mutex); mutex_init(&rt->line_in_notify.mutex); mutex_init(&rt->line_out_notify.mutex); } static void pmf_gpio_exit(struct gpio_runtime *rt) { pmf_gpio_all_amps_off(rt); rt->implementation_private = 0; if (rt->headphone_notify.gpio_private) pmf_unregister_irq_client(rt->headphone_notify.gpio_private); if (rt->line_in_notify.gpio_private) pmf_unregister_irq_client(rt->line_in_notify.gpio_private); if (rt->line_out_notify.gpio_private) pmf_unregister_irq_client(rt->line_out_notify.gpio_private); /* make sure no work is pending before freeing * all things */ cancel_delayed_work_sync(&rt->headphone_notify.work); cancel_delayed_work_sync(&rt->line_in_notify.work); cancel_delayed_work_sync(&rt->line_out_notify.work); mutex_destroy(&rt->headphone_notify.mutex); mutex_destroy(&rt->line_in_notify.mutex); mutex_destroy(&rt->line_out_notify.mutex); kfree(rt->headphone_notify.gpio_private); kfree(rt->line_in_notify.gpio_private); kfree(rt->line_out_notify.gpio_private); } static void pmf_handle_notify_irq(void *data) { struct gpio_notification *notif = data; schedule_delayed_work(&notif->work, 0); } static int pmf_set_notify(struct gpio_runtime *rt, enum notify_type type, notify_func_t notify, void *data) { struct gpio_notification *notif; notify_func_t old; struct pmf_irq_client *irq_client; char *name; int err = -EBUSY; switch (type) { case AOA_NOTIFY_HEADPHONE: notif = &rt->headphone_notify; name = "headphone-detect"; break; case AOA_NOTIFY_LINE_IN: notif = &rt->line_in_notify; name = "linein-detect"; break; case AOA_NOTIFY_LINE_OUT: notif = &rt->line_out_notify; name = "lineout-detect"; break; default: return -EINVAL; } mutex_lock(&notif->mutex); old = notif->notify; if (!old && !notify) { err = 0; goto out_unlock; } if (old && notify) { if (old == notify && notif->data == data) err = 0; goto out_unlock; } if (old && !notify) { irq_client = notif->gpio_private; pmf_unregister_irq_client(irq_client); kfree(irq_client); notif->gpio_private = NULL; } if (!old && notify) { irq_client = kzalloc(sizeof(struct pmf_irq_client), GFP_KERNEL); if (!irq_client) { err = -ENOMEM; goto out_unlock; } irq_client->data = notif; irq_client->handler = pmf_handle_notify_irq; irq_client->owner = THIS_MODULE; err = pmf_register_irq_client(rt->node, name, irq_client); if (err) { printk(KERN_ERR "snd-aoa: gpio layer failed to" " register %s irq (%d)\n", name, err); kfree(irq_client); goto out_unlock; } notif->gpio_private = irq_client; } notif->notify = notify; notif->data = data; err = 0; out_unlock: mutex_unlock(&notif->mutex); return err; } static int pmf_get_detect(struct gpio_runtime *rt, enum notify_type type) { char *name; int err = -EBUSY, ret; struct pmf_args args = { .count = 1, .u[0].p = &ret }; switch (type) { case AOA_NOTIFY_HEADPHONE: name = "headphone-detect"; break; case AOA_NOTIFY_LINE_IN: name = "linein-detect"; break; case AOA_NOTIFY_LINE_OUT: name = "lineout-detect"; break; default: return -EINVAL; } err = pmf_call_function(rt->node, name, &args); if (err) return err; return ret; } static struct gpio_methods methods = { .init = pmf_gpio_init, .exit = pmf_gpio_exit, .all_amps_off = pmf_gpio_all_amps_off, .all_amps_restore = pmf_gpio_all_amps_restore, .set_headphone = pmf_gpio_set_headphone, .set_speakers = pmf_gpio_set_amp, .set_lineout = pmf_gpio_set_lineout, .set_hw_reset = pmf_gpio_set_hw_reset, .get_headphone = pmf_gpio_get_headphone, .get_speakers = pmf_gpio_get_amp, .get_lineout = pmf_gpio_get_lineout, .set_notify = pmf_set_notify, .get_detect = pmf_get_detect, }; struct gpio_methods *pmf_gpio_methods = &methods; EXPORT_SYMBOL_GPL(pmf_gpio_methods);
gpl-2.0
mgbotoe/GT-I8552-kernel-source
drivers/hid/hid-pl.c
23
6009
/* * Force feedback support for PantherLord/GreenAsia based devices * * The devices are distributed under various names and the same USB device ID * can be used in both adapters and actual game controllers. * * 0810:0001 "Twin USB Joystick" * - tested with PantherLord USB/PS2 2in1 Adapter * - contains two reports, one for each port (HID_QUIRK_MULTI_INPUT) * * 0e8f:0003 "GreenAsia Inc. USB Joystick " * - tested with König Gaming gamepad * * 0e8f:0003 "GASIA USB Gamepad" * - another version of the König gamepad * * Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define DEBUG */ #define debug(format, arg...) pr_debug("hid-plff: " format "\n" , ## arg) #include <linux/input.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #ifdef CONFIG_PANTHERLORD_FF #include "usbhid/usbhid.h" struct plff_device { struct hid_report *report; s32 *strong; s32 *weak; }; static int hid_plff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct plff_device *plff = data; int left, right; left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; debug("called with 0x%04x 0x%04x", left, right); left = left * 0x7f / 0xffff; right = right * 0x7f / 0xffff; *plff->strong = left; *plff->weak = right; debug("running with 0x%02x 0x%02x", left, right); usbhid_submit_report(hid, plff->report, USB_DIR_OUT); return 0; } static int plff_init(struct hid_device *hid) { struct plff_device *plff; struct hid_report *report; struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct list_head *report_ptr = report_list; struct input_dev *dev; int error; s32 *strong; s32 *weak; /* The device contains one output report per physical device, all containing 1 field, which contains 4 ff00.0002 usages and 4 16bit absolute values. The input reports also contain a field which contains 8 ff00.0001 usages and 8 boolean values. Their meaning is currently unknown. A version of the 0e8f:0003 exists that has all the values in separate fields and misses the extra input field, thus resembling Zeroplus (hid-zpff) devices. */ if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; } list_for_each_entry(hidinput, &hid->inputs, list) { report_ptr = report_ptr->next; if (report_ptr == report_list) { hid_err(hid, "required output report is missing\n"); return -ENODEV; } report = list_entry(report_ptr, struct hid_report, list); if (report->maxfield < 1) { hid_err(hid, "no fields in the report\n"); return -ENODEV; } if (report->field[0]->report_count >= 4) { report->field[0]->value[0] = 0x00; report->field[0]->value[1] = 0x00; strong = &report->field[0]->value[2]; weak = &report->field[0]->value[3]; debug("detected single-field device"); } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 && report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) { report->field[0]->value[0] = 0x00; report->field[1]->value[0] = 0x00; strong = &report->field[2]->value[0]; weak = &report->field[3]->value[0]; debug("detected 4-field device"); } else { hid_err(hid, "not enough fields or values\n"); return -ENODEV; } plff = kzalloc(sizeof(struct plff_device), GFP_KERNEL); if (!plff) return -ENOMEM; dev = hidinput->input; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, plff, hid_plff_play); if (error) { kfree(plff); return error; } plff->report = report; plff->strong = strong; plff->weak = weak; *strong = 0x00; *weak = 0x00; usbhid_submit_report(hid, plff->report, USB_DIR_OUT); } hid_info(hid, "Force feedback for PantherLord/GreenAsia devices by Anssi Hannula <anssi.hannula@gmail.com>\n"); return 0; } #else static inline int plff_init(struct hid_device *hid) { return 0; } #endif static int pl_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; if (id->driver_data) hdev->quirks |= HID_QUIRK_MULTI_INPUT; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } plff_init(hdev); return 0; err: return ret; } static const struct hid_device_id pl_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR), .driver_data = 1 }, /* Twin USB Joystick */ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR), .driver_data = 1 }, /* Twin USB Joystick */ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003), }, { } }; MODULE_DEVICE_TABLE(hid, pl_devices); static struct hid_driver pl_driver = { .name = "pantherlord", .id_table = pl_devices, .probe = pl_probe, }; static int __init pl_init(void) { return hid_register_driver(&pl_driver); } static void __exit pl_exit(void) { hid_unregister_driver(&pl_driver); } module_init(pl_init); module_exit(pl_exit); MODULE_LICENSE("GPL");
gpl-2.0
jkent/mini210s-barebox
drivers/mfd/mc9sdz60.c
23
3092
/* * Copyright (C) 2007 Sascha Hauer, Pengutronix * 2009 Marc Kleine-Budde <mkl@pengutronix.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * */ #include <common.h> #include <init.h> #include <driver.h> #include <xfuncs.h> #include <errno.h> #include <i2c/i2c.h> #include <mfd/mc9sdz60.h> #define DRIVERNAME "mc9sdz60" #define to_mc9sdz60(a) container_of(a, struct mc9sdz60, cdev) static struct mc9sdz60 *mc_dev; struct mc9sdz60 *mc9sdz60_get(void) { if (!mc_dev) return NULL; return mc_dev; } EXPORT_SYMBOL(mc9sdz60_get); int mc9sdz60_reg_read(struct mc9sdz60 *mc9sdz60, enum mc9sdz60_reg reg, u8 *val) { int ret; ret = i2c_read_reg(mc9sdz60->client, reg, val, 1); return ret == 1 ? 0 : ret; } EXPORT_SYMBOL(mc9sdz60_reg_read); int mc9sdz60_reg_write(struct mc9sdz60 *mc9sdz60, enum mc9sdz60_reg reg, u8 val) { int ret; ret = i2c_write_reg(mc9sdz60->client, reg, &val, 1); return ret == 1 ? 0 : ret; } EXPORT_SYMBOL(mc9sdz60_reg_write); int mc9sdz60_set_bits(struct mc9sdz60 *mc9sdz60, enum mc9sdz60_reg reg, u8 mask, u8 val) { u8 tmp; int err; err = mc9sdz60_reg_read(mc9sdz60, reg, &tmp); tmp = (tmp & ~mask) | val; if (!err) err = mc9sdz60_reg_write(mc9sdz60, reg, tmp); return err; } EXPORT_SYMBOL(mc9sdz60_set_bits); static ssize_t mc_read(struct cdev *cdev, void *_buf, size_t count, loff_t offset, ulong flags) { struct mc9sdz60 *mc9sdz60 = to_mc9sdz60(cdev); u8 *buf = _buf; size_t i = count; int err; while (i) { err = mc9sdz60_reg_read(mc9sdz60, offset, buf); if (err) return (ssize_t)err; buf++; i--; offset++; } return count; } static ssize_t mc_write(struct cdev *cdev, const void *_buf, size_t count, loff_t offset, ulong flags) { struct mc9sdz60 *mc9sdz60 = to_mc9sdz60(cdev); const u8 *buf = _buf; size_t i = count; int err; while (i) { err = mc9sdz60_reg_write(mc9sdz60, offset, *buf); if (err) return (ssize_t)err; buf++; i--; offset++; } return count; } static struct file_operations mc_fops = { .lseek = dev_lseek_default, .read = mc_read, .write = mc_write, }; static int mc_probe(struct device_d *dev) { if (mc_dev) return -EBUSY; mc_dev = xzalloc(sizeof(struct mc9sdz60)); mc_dev->cdev.name = DRIVERNAME; mc_dev->client = to_i2c_client(dev); mc_dev->cdev.size = 64; /* 35 known registers */ mc_dev->cdev.dev = dev; mc_dev->cdev.ops = &mc_fops; devfs_create(&mc_dev->cdev); return 0; } static struct driver_d mc_driver = { .name = DRIVERNAME, .probe = mc_probe, }; static int mc_init(void) { i2c_driver_register(&mc_driver); return 0; } device_initcall(mc_init);
gpl-2.0