repo_name
string
path
string
copies
string
size
string
content
string
license
string
DirtyUnicorns/android_kernel_htc_m4
fs/ocfs2/move_extents.c
4881
27875
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * move_extents.c * * Copyright (C) 2011 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/fs.h> #include <linux/types.h> #include <linux/mount.h> #include <linux/swap.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "ocfs2_ioctl.h" #include "alloc.h" #include "aops.h" #include "dlmglue.h" #include "extent_map.h" #include "inode.h" #include "journal.h" #include "suballoc.h" #include "uptodate.h" #include "super.h" #include "dir.h" #include "buffer_head_io.h" #include "sysfile.h" #include "refcounttree.h" #include "move_extents.h" struct ocfs2_move_extents_context { struct inode *inode; struct file *file; int auto_defrag; int partial; int credits; u32 new_phys_cpos; u32 clusters_moved; u64 refcount_loc; struct ocfs2_move_extents *range; struct ocfs2_extent_tree et; struct ocfs2_alloc_context *meta_ac; struct ocfs2_alloc_context *data_ac; struct ocfs2_cached_dealloc_ctxt dealloc; }; static int __ocfs2_move_extent(handle_t *handle, struct ocfs2_move_extents_context *context, u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos, int ext_flags) { int ret = 0, index; struct inode *inode = context->inode; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_extent_rec *rec, replace_rec; struct ocfs2_path *path = NULL; struct ocfs2_extent_list *el; u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, p_cpos, new_p_cpos, len); if (ret) { mlog_errno(ret); goto out; } memset(&replace_rec, 0, sizeof(replace_rec)); replace_rec.e_cpos = cpu_to_le32(cpos); replace_rec.e_leaf_clusters = cpu_to_le16(len); replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb, new_p_cpos)); path = ocfs2_new_path_from_et(&context->et); if (!path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { ocfs2_error(inode->i_sb, "Inode %llu has an extent at cpos %u which can no " "longer be found.\n", (unsigned long long)ino, cpos); ret = -EROFS; goto out; } rec = &el->l_recs[index]; BUG_ON(ext_flags != rec->e_flags); /* * after moving/defraging to new location, the extent is not going * to be refcounted anymore. */ replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), context->et.et_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_split_extent(handle, &context->et, path, index, &replace_rec, context->meta_ac, &context->dealloc); if (ret) { mlog_errno(ret); goto out; } ocfs2_journal_dirty(handle, context->et.et_root_bh); context->new_phys_cpos = new_p_cpos; /* * need I to append truncate log for old clusters? */ if (old_blkno) { if (ext_flags & OCFS2_EXT_REFCOUNTED) ret = ocfs2_decrease_refcount(inode, handle, ocfs2_blocks_to_clusters(osb->sb, old_blkno), len, context->meta_ac, &context->dealloc, 1); else ret = ocfs2_truncate_log_append(osb, handle, old_blkno, len); } out: return ret; } /* * lock allocators, and reserving appropriate number of bits for * meta blocks and data clusters. * * in some cases, we don't need to reserve clusters, just let data_ac * be NULL. */ static int ocfs2_lock_allocators_move_extents(struct inode *inode, struct ocfs2_extent_tree *et, u32 clusters_to_move, u32 extents_to_split, struct ocfs2_alloc_context **meta_ac, struct ocfs2_alloc_context **data_ac, int extra_blocks, int *credits) { int ret, num_free_extents; unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); num_free_extents = ocfs2_num_free_extents(osb, et); if (num_free_extents < 0) { ret = num_free_extents; mlog_errno(ret); goto out; } if (!num_free_extents || (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) extra_blocks += ocfs2_extend_meta_needed(et->et_root_el); ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac); if (ret) { mlog_errno(ret); goto out; } if (data_ac) { ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac); if (ret) { mlog_errno(ret); goto out; } } *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el, clusters_to_move + 2); mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n", extra_blocks, clusters_to_move, *credits); out: if (ret) { if (*meta_ac) { ocfs2_free_alloc_context(*meta_ac); *meta_ac = NULL; } } return ret; } /* * Using one journal handle to guarantee the data consistency in case * crash happens anywhere. * * XXX: defrag can end up with finishing partial extent as requested, * due to not enough contiguous clusters can be found in allocator. */ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, u32 cpos, u32 phys_cpos, u32 *len, int ext_flags) { int ret, credits = 0, extra_blocks = 0, partial = context->partial; handle_t *handle; struct inode *inode = context->inode; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_refcount_tree *ref_tree = NULL; u32 new_phys_cpos, new_len; u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) { BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); BUG_ON(!context->refcount_loc); ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, &ref_tree, NULL); if (ret) { mlog_errno(ret); return ret; } ret = ocfs2_prepare_refcount_change_for_del(inode, context->refcount_loc, phys_blkno, *len, &credits, &extra_blocks); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1, &context->meta_ac, &context->data_ac, extra_blocks, &credits); if (ret) { mlog_errno(ret); goto out; } /* * should be using allocation reservation strategy there? * * if (context->data_ac) * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; */ mutex_lock(&tl_inode->i_mutex); if (ocfs2_truncate_log_needs_flush(osb)) { ret = __ocfs2_flush_truncate_log(osb); if (ret < 0) { mlog_errno(ret); goto out_unlock_mutex; } } handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock_mutex; } ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len, &new_phys_cpos, &new_len); if (ret) { mlog_errno(ret); goto out_commit; } /* * allowing partial extent moving is kind of 'pros and cons', it makes * whole defragmentation less likely to fail, on the contrary, the bad * thing is it may make the fs even more fragmented after moving, let * userspace make a good decision here. */ if (new_len != *len) { mlog(0, "len_claimed: %u, len: %u\n", new_len, *len); if (!partial) { context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE; ret = -ENOSPC; goto out_commit; } } mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos, phys_cpos, new_phys_cpos); ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos, new_phys_cpos, ext_flags); if (ret) mlog_errno(ret); if (partial && (new_len != *len)) *len = new_len; /* * Here we should write the new page out first if we are * in write-back mode. */ ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len); if (ret) mlog_errno(ret); out_commit: ocfs2_commit_trans(osb, handle); out_unlock_mutex: mutex_unlock(&tl_inode->i_mutex); if (context->data_ac) { ocfs2_free_alloc_context(context->data_ac); context->data_ac = NULL; } if (context->meta_ac) { ocfs2_free_alloc_context(context->meta_ac); context->meta_ac = NULL; } out: if (ref_tree) ocfs2_unlock_refcount_tree(osb, ref_tree, 1); return ret; } /* * find the victim alloc group, where #blkno fits. */ static int ocfs2_find_victim_alloc_group(struct inode *inode, u64 vict_blkno, int type, int slot, int *vict_bit, struct buffer_head **ret_bh) { int ret, i, bits_per_unit = 0; u64 blkno; char namebuf[40]; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *ac_bh = NULL, *gd_bh = NULL; struct ocfs2_chain_list *cl; struct ocfs2_chain_rec *rec; struct ocfs2_dinode *ac_dinode; struct ocfs2_group_desc *bg; ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot); ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf, strlen(namebuf), &blkno); if (ret) { ret = -ENOENT; goto out; } ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh); if (ret) { mlog_errno(ret); goto out; } ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data; cl = &(ac_dinode->id2.i_chain); rec = &(cl->cl_recs[0]); if (type == GLOBAL_BITMAP_SYSTEM_INODE) bits_per_unit = osb->s_clustersize_bits - inode->i_sb->s_blocksize_bits; /* * 'vict_blkno' was out of the valid range. */ if ((vict_blkno < le64_to_cpu(rec->c_blkno)) || (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) << bits_per_unit))) { ret = -EINVAL; goto out; } for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) { rec = &(cl->cl_recs[i]); if (!rec) continue; bg = NULL; do { if (!bg) blkno = le64_to_cpu(rec->c_blkno); else blkno = le64_to_cpu(bg->bg_next_group); if (gd_bh) { brelse(gd_bh); gd_bh = NULL; } ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh); if (ret) { mlog_errno(ret); goto out; } bg = (struct ocfs2_group_desc *)gd_bh->b_data; if (vict_blkno < (le64_to_cpu(bg->bg_blkno) + le16_to_cpu(bg->bg_bits))) { *ret_bh = gd_bh; *vict_bit = (vict_blkno - blkno) >> bits_per_unit; mlog(0, "find the victim group: #%llu, " "total_bits: %u, vict_bit: %u\n", blkno, le16_to_cpu(bg->bg_bits), *vict_bit); goto out; } } while (le64_to_cpu(bg->bg_next_group)); } ret = -EINVAL; out: brelse(ac_bh); /* * caller has to release the gd_bh properly. */ return ret; } /* * XXX: helper to validate and adjust moving goal. */ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode, struct ocfs2_move_extents *range) { int ret, goal_bit = 0; struct buffer_head *gd_bh = NULL; struct ocfs2_group_desc *bg = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int c_to_b = 1 << (osb->s_clustersize_bits - inode->i_sb->s_blocksize_bits); /* * make goal become cluster aligned. */ range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb, range->me_goal); /* * moving goal is not allowd to start with a group desc blok(#0 blk) * let's compromise to the latter cluster. */ if (range->me_goal == le64_to_cpu(bg->bg_blkno)) range->me_goal += c_to_b; /* * validate goal sits within global_bitmap, and return the victim * group desc */ ret = ocfs2_find_victim_alloc_group(inode, range->me_goal, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT, &goal_bit, &gd_bh); if (ret) goto out; bg = (struct ocfs2_group_desc *)gd_bh->b_data; /* * movement is not gonna cross two groups. */ if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize < range->me_len) { ret = -EINVAL; goto out; } /* * more exact validations/adjustments will be performed later during * moving operation for each extent range. */ mlog(0, "extents get ready to be moved to #%llu block\n", range->me_goal); out: brelse(gd_bh); return ret; } static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh, int *goal_bit, u32 move_len, u32 max_hop, u32 *phys_cpos) { int i, used, last_free_bits = 0, base_bit = *goal_bit; struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb, le64_to_cpu(gd->bg_blkno)); for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) { used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap); if (used) { /* * we even tried searching the free chunk by jumping * a 'max_hop' distance, but still failed. */ if ((i - base_bit) > max_hop) { *phys_cpos = 0; break; } if (last_free_bits) last_free_bits = 0; continue; } else last_free_bits++; if (last_free_bits == move_len) { *goal_bit = i; *phys_cpos = base_cpos + i; break; } } mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos); } static int ocfs2_alloc_dinode_update_counts(struct inode *inode, handle_t *handle, struct buffer_head *di_bh, u32 num_bits, u16 chain) { int ret; u32 tmp_used; struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out; } tmp_used = le32_to_cpu(di->id1.bitmap1.i_used); di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used); le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits); ocfs2_journal_dirty(handle, di_bh); out: return ret; } static inline int ocfs2_block_group_set_bits(handle_t *handle, struct inode *alloc_inode, struct ocfs2_group_desc *bg, struct buffer_head *group_bh, unsigned int bit_off, unsigned int num_bits) { int status; void *bitmap = bg->bg_bitmap; int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; /* All callers get the descriptor via * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits); mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off, num_bits); if (ocfs2_is_cluster_bitmap(alloc_inode)) journal_type = OCFS2_JOURNAL_ACCESS_UNDO; status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), group_bh, journal_type); if (status < 0) { mlog_errno(status); goto bail; } le16_add_cpu(&bg->bg_free_bits_count, -num_bits); if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) { ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit" " count %u but claims %u are freed. num_bits %d", (unsigned long long)le64_to_cpu(bg->bg_blkno), le16_to_cpu(bg->bg_bits), le16_to_cpu(bg->bg_free_bits_count), num_bits); return -EROFS; } while (num_bits--) ocfs2_set_bit(bit_off++, bitmap); ocfs2_journal_dirty(handle, group_bh); bail: return status; } static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, u32 cpos, u32 phys_cpos, u32 *new_phys_cpos, u32 len, int ext_flags) { int ret, credits = 0, extra_blocks = 0, goal_bit = 0; handle_t *handle; struct inode *inode = context->inode; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; struct inode *gb_inode = NULL; struct buffer_head *gb_bh = NULL; struct buffer_head *gd_bh = NULL; struct ocfs2_group_desc *gd; struct ocfs2_refcount_tree *ref_tree = NULL; u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb, context->range->me_threshold); u64 phys_blkno, new_phys_blkno; phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) { BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); BUG_ON(!context->refcount_loc); ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, &ref_tree, NULL); if (ret) { mlog_errno(ret); return ret; } ret = ocfs2_prepare_refcount_change_for_del(inode, context->refcount_loc, phys_blkno, len, &credits, &extra_blocks); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1, &context->meta_ac, NULL, extra_blocks, &credits); if (ret) { mlog_errno(ret); goto out; } /* * need to count 2 extra credits for global_bitmap inode and * group descriptor. */ credits += OCFS2_INODE_UPDATE_CREDITS + 1; /* * ocfs2_move_extent() didn't reserve any clusters in lock_allocators() * logic, while we still need to lock the global_bitmap. */ gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!gb_inode) { mlog(ML_ERROR, "unable to get global_bitmap inode\n"); ret = -EIO; goto out; } mutex_lock(&gb_inode->i_mutex); ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1); if (ret) { mlog_errno(ret); goto out_unlock_gb_mutex; } mutex_lock(&tl_inode->i_mutex); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock_tl_inode; } new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos); ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT, &goal_bit, &gd_bh); if (ret) { mlog_errno(ret); goto out_commit; } /* * probe the victim cluster group to find a proper * region to fit wanted movement, it even will perfrom * a best-effort attempt by compromising to a threshold * around the goal. */ ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, new_phys_cpos); if (!*new_phys_cpos) { ret = -ENOSPC; goto out_commit; } ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos, *new_phys_cpos, ext_flags); if (ret) { mlog_errno(ret); goto out_commit; } gd = (struct ocfs2_group_desc *)gd_bh->b_data; ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len, le16_to_cpu(gd->bg_chain)); if (ret) { mlog_errno(ret); goto out_commit; } ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh, goal_bit, len); if (ret) mlog_errno(ret); /* * Here we should write the new page out first if we are * in write-back mode. */ ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len); if (ret) mlog_errno(ret); out_commit: ocfs2_commit_trans(osb, handle); brelse(gd_bh); out_unlock_tl_inode: mutex_unlock(&tl_inode->i_mutex); ocfs2_inode_unlock(gb_inode, 1); out_unlock_gb_mutex: mutex_unlock(&gb_inode->i_mutex); brelse(gb_bh); iput(gb_inode); out: if (context->meta_ac) { ocfs2_free_alloc_context(context->meta_ac); context->meta_ac = NULL; } if (ref_tree) ocfs2_unlock_refcount_tree(osb, ref_tree, 1); return ret; } /* * Helper to calculate the defraging length in one run according to threshold. */ static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged, u32 threshold, int *skip) { if ((*alloc_size + *len_defraged) < threshold) { /* * proceed defragmentation until we meet the thresh */ *len_defraged += *alloc_size; } else if (*len_defraged == 0) { /* * XXX: skip a large extent. */ *skip = 1; } else { /* * split this extent to coalesce with former pieces as * to reach the threshold. * * we're done here with one cycle of defragmentation * in a size of 'thresh', resetting 'len_defraged' * forces a new defragmentation. */ *alloc_size = threshold - *len_defraged; *len_defraged = 0; } } static int __ocfs2_move_extents_range(struct buffer_head *di_bh, struct ocfs2_move_extents_context *context) { int ret = 0, flags, do_defrag, skip = 0; u32 cpos, phys_cpos, move_start, len_to_move, alloc_size; u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0; struct inode *inode = context->inode; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_move_extents *range = context->range; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if ((inode->i_size == 0) || (range->me_len == 0)) return 0; if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; context->refcount_loc = le64_to_cpu(di->i_refcount_loc); ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh); ocfs2_init_dealloc_ctxt(&context->dealloc); /* * TO-DO XXX: * * - xattr extents. */ do_defrag = context->auto_defrag; /* * extents moving happens in unit of clusters, for the sake * of simplicity, we may ignore two clusters where 'byte_start' * and 'byte_start + len' were within. */ move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start); len_to_move = (range->me_start + range->me_len) >> osb->s_clustersize_bits; if (len_to_move >= move_start) len_to_move -= move_start; else len_to_move = 0; if (do_defrag) { defrag_thresh = range->me_threshold >> osb->s_clustersize_bits; if (defrag_thresh <= 1) goto done; } else new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, range->me_goal); mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, " "thresh: %u\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)range->me_start, (unsigned long long)range->me_len, move_start, len_to_move, defrag_thresh); cpos = move_start; while (len_to_move) { ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size, &flags); if (ret) { mlog_errno(ret); goto out; } if (alloc_size > len_to_move) alloc_size = len_to_move; /* * XXX: how to deal with a hole: * * - skip the hole of course * - force a new defragmentation */ if (!phys_cpos) { if (do_defrag) len_defraged = 0; goto next; } if (do_defrag) { ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged, defrag_thresh, &skip); /* * skip large extents */ if (skip) { skip = 0; goto next; } mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, " "alloc_size: %u, len_defraged: %u\n", cpos, phys_cpos, alloc_size, len_defraged); ret = ocfs2_defrag_extent(context, cpos, phys_cpos, &alloc_size, flags); } else { ret = ocfs2_move_extent(context, cpos, phys_cpos, &new_phys_cpos, alloc_size, flags); new_phys_cpos += alloc_size; } if (ret < 0) { mlog_errno(ret); goto out; } context->clusters_moved += alloc_size; next: cpos += alloc_size; len_to_move -= alloc_size; } done: range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE; out: range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb, context->clusters_moved); range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb, context->new_phys_cpos); ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &context->dealloc); return ret; } static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) { int status; handle_t *handle; struct inode *inode = context->inode; struct ocfs2_dinode *di; struct buffer_head *di_bh = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (!inode) return -ENOENT; if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return -EROFS; mutex_lock(&inode->i_mutex); /* * This prevents concurrent writes from other nodes */ status = ocfs2_rw_lock(inode, 1); if (status) { mlog_errno(status); goto out; } status = ocfs2_inode_lock(inode, &di_bh, 1); if (status) { mlog_errno(status); goto out_rw_unlock; } /* * rememer ip_xattr_sem also needs to be held if necessary */ down_write(&OCFS2_I(inode)->ip_alloc_sem); status = __ocfs2_move_extents_range(di_bh, context); up_write(&OCFS2_I(inode)->ip_alloc_sem); if (status) { mlog_errno(status); goto out_inode_unlock; } /* * We update ctime for these changes */ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto out_inode_unlock; } status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status) { mlog_errno(status); goto out_commit; } di = (struct ocfs2_dinode *)di_bh->b_data; inode->i_ctime = CURRENT_TIME; di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ocfs2_journal_dirty(handle, di_bh); out_commit: ocfs2_commit_trans(osb, handle); out_inode_unlock: brelse(di_bh); ocfs2_inode_unlock(inode, 1); out_rw_unlock: ocfs2_rw_unlock(inode, 1); out: mutex_unlock(&inode->i_mutex); return status; } int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp) { int status; struct inode *inode = filp->f_path.dentry->d_inode; struct ocfs2_move_extents range; struct ocfs2_move_extents_context *context = NULL; status = mnt_want_write_file(filp); if (status) return status; if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) goto out; if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) { status = -EPERM; goto out; } context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS); if (!context) { status = -ENOMEM; mlog_errno(status); goto out; } context->inode = inode; context->file = filp; if (argp) { if (copy_from_user(&range, (struct ocfs2_move_extents *)argp, sizeof(range))) { status = -EFAULT; goto out; } } else { status = -EINVAL; goto out; } if (range.me_start > i_size_read(inode)) goto out; if (range.me_start + range.me_len > i_size_read(inode)) range.me_len = i_size_read(inode) - range.me_start; context->range = &range; if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) { context->auto_defrag = 1; /* * ok, the default theshold for the defragmentation * is 1M, since our maximum clustersize was 1M also. * any thought? */ if (!range.me_threshold) range.me_threshold = 1024 * 1024; if (range.me_threshold > i_size_read(inode)) range.me_threshold = i_size_read(inode); if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG) context->partial = 1; } else { /* * first best-effort attempt to validate and adjust the goal * (physical address in block), while it can't guarantee later * operation can succeed all the time since global_bitmap may * change a bit over time. */ status = ocfs2_validate_and_adjust_move_goal(inode, &range); if (status) goto out; } status = ocfs2_move_extents(context); if (status) mlog_errno(status); out: /* * movement/defragmentation may end up being partially completed, * that's the reason why we need to return userspace the finished * length and new_offset even if failure happens somewhere. */ if (argp) { if (copy_to_user((struct ocfs2_move_extents *)argp, &range, sizeof(range))) status = -EFAULT; } kfree(context); mnt_drop_write_file(filp); return status; }
gpl-2.0
emceethemouth/kernel_flo
drivers/gpu/drm/drm_lock.c
4881
10696
/** * \file drm_lock.c * IOCTLs for locking * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/export.h> #include "drmP.h" static int drm_notifier(void *priv); static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); /** * Lock ioctl. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Add the current task to the lock wait queue, and attempt to take to lock. */ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) { DECLARE_WAITQUEUE(entry, current); struct drm_lock *lock = data; struct drm_master *master = file_priv->master; int ret = 0; ++file_priv->lock_count; if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", task_pid_nr(current), lock->context); return -EINVAL; } DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", lock->context, task_pid_nr(current), master->lock.hw_lock->lock, lock->flags); if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) if (lock->context < 0) return -EINVAL; add_wait_queue(&master->lock.lock_queue, &entry); spin_lock_bh(&master->lock.spinlock); master->lock.user_waiters++; spin_unlock_bh(&master->lock.spinlock); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (!master->lock.hw_lock) { /* Device has been unregistered */ send_sig(SIGTERM, current, 0); ret = -EINTR; break; } if (drm_lock_take(&master->lock, lock->context)) { master->lock.file_priv = file_priv; master->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } /* Contention */ mutex_unlock(&drm_global_mutex); schedule(); mutex_lock(&drm_global_mutex); if (signal_pending(current)) { ret = -EINTR; break; } } spin_lock_bh(&master->lock.spinlock); master->lock.user_waiters--; spin_unlock_bh(&master->lock.spinlock); __set_current_state(TASK_RUNNING); remove_wait_queue(&master->lock.lock_queue, &entry); DRM_DEBUG("%d %s\n", lock->context, ret ? "interrupted" : "has lock"); if (ret) return ret; /* don't set the block all signals on the master process for now * really probably not the correct answer but lets us debug xkb * xserver for now */ if (!file_priv->is_master) { sigemptyset(&dev->sigmask); sigaddset(&dev->sigmask, SIGSTOP); sigaddset(&dev->sigmask, SIGTSTP); sigaddset(&dev->sigmask, SIGTTIN); sigaddset(&dev->sigmask, SIGTTOU); dev->sigdata.context = lock->context; dev->sigdata.lock = master->lock.hw_lock; block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); } if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) { if (dev->driver->dma_quiescent(dev)) { DRM_DEBUG("%d waiting for DMA quiescent\n", lock->context); return -EBUSY; } } return 0; } /** * Unlock ioctl. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Transfer and free the lock. */ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_lock *lock = data; struct drm_master *master = file_priv->master; if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", task_pid_nr(current), lock->context); return -EINVAL; } atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); if (drm_lock_free(&master->lock, lock->context)) { /* FIXME: Should really bail out here. */ } unblock_all_signals(); return 0; } /** * Take the heavyweight lock. * * \param lock lock pointer. * \param context locking context. * \return one if the lock is held, or zero otherwise. * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; spin_lock_bh(&lock_data->spinlock); do { old = *lock; if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT; else { new = context | _DRM_LOCK_HELD | ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? _DRM_LOCK_CONT : 0); } prev = cmpxchg(lock, old, new); } while (prev != old); spin_unlock_bh(&lock_data->spinlock); if (_DRM_LOCKING_CONTEXT(old) == context) { if (old & _DRM_LOCK_HELD) { if (context != DRM_KERNEL_CONTEXT) { DRM_ERROR("%d holds heavyweight lock\n", context); } return 0; } } if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { /* Have lock */ return 1; } return 0; } /** * This takes a lock forcibly and hands it to context. Should ONLY be used * inside *_unlock to give lock to kernel before calling *_dma_schedule. * * \param dev DRM device. * \param lock lock pointer. * \param context locking context. * \return always one. * * Resets the lock file pointer. * Marks the lock as held by the given context, via the \p cmpxchg instruction. */ static int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; lock_data->file_priv = NULL; do { old = *lock; new = context | _DRM_LOCK_HELD; prev = cmpxchg(lock, old, new); } while (prev != old); return 1; } /** * Free lock. * * \param dev DRM device. * \param lock lock. * \param context context. * * Resets the lock file pointer. * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task * waiting on the lock queue. */ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; spin_lock_bh(&lock_data->spinlock); if (lock_data->kernel_waiters != 0) { drm_lock_transfer(lock_data, 0); lock_data->idle_has_lock = 1; spin_unlock_bh(&lock_data->spinlock); return 1; } spin_unlock_bh(&lock_data->spinlock); do { old = *lock; new = _DRM_LOCKING_CONTEXT(old); prev = cmpxchg(lock, old, new); } while (prev != old); if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { DRM_ERROR("%d freed heavyweight lock held by %d\n", context, _DRM_LOCKING_CONTEXT(old)); return 1; } wake_up_interruptible(&lock_data->lock_queue); return 0; } /** * If we get here, it means that the process has called DRM_IOCTL_LOCK * without calling DRM_IOCTL_UNLOCK. * * If the lock is not held, then let the signal proceed as usual. If the lock * is held, then set the contended flag and keep the signal blocked. * * \param priv pointer to a drm_sigdata structure. * \return one if the signal should be delivered normally, or zero if the * signal should be blocked. */ static int drm_notifier(void *priv) { struct drm_sigdata *s = (struct drm_sigdata *) priv; unsigned int old, new, prev; /* Allow signal delivery if lock isn't held */ if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1; /* Otherwise, set flag to force call to drmUnlock */ do { old = s->lock->lock; new = old | _DRM_LOCK_CONT; prev = cmpxchg(&s->lock->lock, old, new); } while (prev != old); return 0; } /** * This function returns immediately and takes the hw lock * with the kernel context if it is free, otherwise it gets the highest priority when and if * it is eventually released. * * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause * a deadlock, which is why the "idlelock" was invented). * * This should be sufficient to wait for GPU idle without * having to worry about starvation. */ void drm_idlelock_take(struct drm_lock_data *lock_data) { int ret = 0; spin_lock_bh(&lock_data->spinlock); lock_data->kernel_waiters++; if (!lock_data->idle_has_lock) { spin_unlock_bh(&lock_data->spinlock); ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); spin_lock_bh(&lock_data->spinlock); if (ret == 1) lock_data->idle_has_lock = 1; } spin_unlock_bh(&lock_data->spinlock); } EXPORT_SYMBOL(drm_idlelock_take); void drm_idlelock_release(struct drm_lock_data *lock_data) { unsigned int old, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; spin_lock_bh(&lock_data->spinlock); if (--lock_data->kernel_waiters == 0) { if (lock_data->idle_has_lock) { do { old = *lock; prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); } while (prev != old); wake_up_interruptible(&lock_data->lock_queue); lock_data->idle_has_lock = 0; } } spin_unlock_bh(&lock_data->spinlock); } EXPORT_SYMBOL(drm_idlelock_release); int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) { struct drm_master *master = file_priv->master; return (file_priv->lock_count && master->lock.hw_lock && _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && master->lock.file_priv == file_priv); }
gpl-2.0
varchild/SebastianFM-kernel
drivers/usb/host/uhci-grlib.c
5649
5551
/* * UHCI HCD (Host Controller Driver) for GRLIB GRUSBHC * * Copyright (c) 2011 Jan Andersson <jan@gaisler.com> * * This file is based on UHCI PCI HCD: * (C) Copyright 1999 Linus Torvalds * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com * (C) Copyright 1999 Randy Dunlap * (C) Copyright 1999 Georg Acher, acher@in.tum.de * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu */ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_platform.h> static int uhci_grlib_init(struct usb_hcd *hcd) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); /* * Probe to determine the endianness of the controller. * We know that bit 7 of the PORTSC1 register is always set * and bit 15 is always clear. If uhci_readw() yields a value * with bit 7 (0x80) turned on then the current little-endian * setting is correct. Otherwise we assume the value was * byte-swapped; hence the register interface and presumably * also the descriptors are big-endian. */ if (!(uhci_readw(uhci, USBPORTSC1) & 0x80)) { uhci->big_endian_mmio = 1; uhci->big_endian_desc = 1; } uhci->rh_numports = uhci_count_ports(hcd); /* Set up pointers to to generic functions */ uhci->reset_hc = uhci_generic_reset_hc; uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc; /* No special actions need to be taken for the functions below */ uhci->configure_hc = NULL; uhci->resume_detect_interrupts_are_broken = NULL; uhci->global_suspend_mode_is_broken = NULL; /* Reset if the controller isn't already safely quiescent. */ check_and_reset_hc(uhci); return 0; } static const struct hc_driver uhci_grlib_hc_driver = { .description = hcd_name, .product_desc = "GRLIB GRUSBHC UHCI Host Controller", .hcd_priv_size = sizeof(struct uhci_hcd), /* Generic hardware linkage */ .irq = uhci_irq, .flags = HCD_MEMORY | HCD_USB11, /* Basic lifecycle operations */ .reset = uhci_grlib_init, .start = uhci_start, #ifdef CONFIG_PM .pci_suspend = NULL, .pci_resume = NULL, .bus_suspend = uhci_rh_suspend, .bus_resume = uhci_rh_resume, #endif .stop = uhci_stop, .urb_enqueue = uhci_urb_enqueue, .urb_dequeue = uhci_urb_dequeue, .endpoint_disable = uhci_hcd_endpoint_disable, .get_frame_number = uhci_hcd_get_frame_number, .hub_status_data = uhci_hub_status_data, .hub_control = uhci_hub_control, }; static int __devinit uhci_hcd_grlib_probe(struct platform_device *op) { struct device_node *dn = op->dev.of_node; struct usb_hcd *hcd; struct uhci_hcd *uhci = NULL; struct resource res; int irq; int rv; if (usb_disabled()) return -ENODEV; dev_dbg(&op->dev, "initializing GRUSBHC UHCI USB Controller\n"); rv = of_address_to_resource(dn, 0, &res); if (rv) return rv; /* usb_create_hcd requires dma_mask != NULL */ op->dev.dma_mask = &op->dev.coherent_dma_mask; hcd = usb_create_hcd(&uhci_grlib_hc_driver, &op->dev, "GRUSBHC UHCI USB"); if (!hcd) return -ENOMEM; hcd->rsrc_start = res.start; hcd->rsrc_len = resource_size(&res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); rv = -EBUSY; goto err_rmr; } irq = irq_of_parse_and_map(dn, 0); if (irq == NO_IRQ) { printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { printk(KERN_ERR "%s: ioremap failed\n", __FILE__); rv = -ENOMEM; goto err_ioremap; } uhci = hcd_to_uhci(hcd); uhci->regs = hcd->regs; rv = usb_add_hcd(hcd, irq, 0); if (rv) goto err_uhci; return 0; err_uhci: iounmap(hcd->regs); err_ioremap: irq_dispose_mapping(irq); err_irq: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err_rmr: usb_put_hcd(hcd); return rv; } static int uhci_hcd_grlib_remove(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); dev_set_drvdata(&op->dev, NULL); dev_dbg(&op->dev, "stopping GRLIB GRUSBHC UHCI USB Controller\n"); usb_remove_hcd(hcd); iounmap(hcd->regs); irq_dispose_mapping(hcd->irq); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); return 0; } /* Make sure the controller is quiescent and that we're not using it * any more. This is mainly for the benefit of programs which, like kexec, * expect the hardware to be idle: not doing DMA or generating IRQs. * * This routine may be called in a damaged or failing kernel. Hence we * do not acquire the spinlock before shutting down the controller. */ static void uhci_hcd_grlib_shutdown(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); uhci_hc_died(hcd_to_uhci(hcd)); } static const struct of_device_id uhci_hcd_grlib_of_match[] = { { .name = "GAISLER_UHCI", }, { .name = "01_027", }, {}, }; MODULE_DEVICE_TABLE(of, uhci_hcd_grlib_of_match); static struct platform_driver uhci_grlib_driver = { .probe = uhci_hcd_grlib_probe, .remove = uhci_hcd_grlib_remove, .shutdown = uhci_hcd_grlib_shutdown, .driver = { .name = "grlib-uhci", .owner = THIS_MODULE, .of_match_table = uhci_hcd_grlib_of_match, }, };
gpl-2.0
jonsmirl/mpc5200
scripts/dtc/libfdt/fdt_wip.c
7953
4074
/* * libfdt - Flat Device Tree manipulation * Copyright (C) 2006 David Gibson, IBM Corporation. * * libfdt is dual licensed: you can use it either under the terms of * the GPL, or the BSD license, at your option. * * a) This library is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, * MA 02110-1301 USA * * Alternatively, * * b) Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "libfdt_env.h" #include <fdt.h> #include <libfdt.h> #include "libfdt_internal.h" int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name, const void *val, int len) { void *propval; int proplen; propval = fdt_getprop_w(fdt, nodeoffset, name, &proplen); if (! propval) return proplen; if (proplen != len) return -FDT_ERR_NOSPACE; memcpy(propval, val, len); return 0; } static void _fdt_nop_region(void *start, int len) { uint32_t *p; for (p = start; (char *)p < ((char *)start + len); p++) *p = cpu_to_fdt32(FDT_NOP); } int fdt_nop_property(void *fdt, int nodeoffset, const char *name) { struct fdt_property *prop; int len; prop = fdt_get_property_w(fdt, nodeoffset, name, &len); if (! prop) return len; _fdt_nop_region(prop, len + sizeof(*prop)); return 0; } int _fdt_node_end_offset(void *fdt, int nodeoffset) { int level = 0; uint32_t tag; int offset, nextoffset; tag = fdt_next_tag(fdt, nodeoffset, &nextoffset); if (tag != FDT_BEGIN_NODE) return -FDT_ERR_BADOFFSET; do { offset = nextoffset; tag = fdt_next_tag(fdt, offset, &nextoffset); switch (tag) { case FDT_END: return offset; case FDT_BEGIN_NODE: level++; break; case FDT_END_NODE: level--; break; case FDT_PROP: case FDT_NOP: break; default: return -FDT_ERR_BADSTRUCTURE; } } while (level >= 0); return nextoffset; } int fdt_nop_node(void *fdt, int nodeoffset) { int endoffset; endoffset = _fdt_node_end_offset(fdt, nodeoffset); if (endoffset < 0) return endoffset; _fdt_nop_region(fdt_offset_ptr_w(fdt, nodeoffset, 0), endoffset - nodeoffset); return 0; }
gpl-2.0
piasek1906/Piasek-G2
drivers/lguest/interrupts_and_traps.c
7953
20663
/*P:800 * Interrupts (traps) are complicated enough to earn their own file. * There are three classes of interrupts: * * 1) Real hardware interrupts which occur while we're running the Guest, * 2) Interrupts for virtual devices attached to the Guest, and * 3) Traps and faults from the Guest. * * Real hardware interrupts must be delivered to the Host, not the Guest. * Virtual interrupts must be delivered to the Guest, but we make them look * just like real hardware would deliver them. Traps from the Guest can be set * up to go directly back into the Guest, but sometimes the Host wants to see * them first, so we also have a way of "reflecting" them into the Guest as if * they had been delivered to it directly. :*/ #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/sched.h> #include "lg.h" /* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */ static unsigned int syscall_vector = SYSCALL_VECTOR; module_param(syscall_vector, uint, 0444); /* The address of the interrupt handler is split into two bits: */ static unsigned long idt_address(u32 lo, u32 hi) { return (lo & 0x0000FFFF) | (hi & 0xFFFF0000); } /* * The "type" of the interrupt handler is a 4 bit field: we only support a * couple of types. */ static int idt_type(u32 lo, u32 hi) { return (hi >> 8) & 0xF; } /* An IDT entry can't be used unless the "present" bit is set. */ static bool idt_present(u32 lo, u32 hi) { return (hi & 0x8000); } /* * We need a helper to "push" a value onto the Guest's stack, since that's a * big part of what delivering an interrupt does. */ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) { /* Stack grows upwards: move stack then write value. */ *gstack -= 4; lgwrite(cpu, *gstack, u32, val); } /*H:210 * The set_guest_interrupt() routine actually delivers the interrupt or * trap. The mechanics of delivering traps and interrupts to the Guest are the * same, except some traps have an "error code" which gets pushed onto the * stack as well: the caller tells us if this is one. * * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this * interrupt or trap. It's split into two parts for traditional reasons: gcc * on i386 used to be frightened by 64 bit numbers. * * We set up the stack just like the CPU does for a real interrupt, so it's * identical for the Guest (and the standard "iret" instruction will undo * it). */ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, bool has_err) { unsigned long gstack, origstack; u32 eflags, ss, irq_enable; unsigned long virtstack; /* * There are two cases for interrupts: one where the Guest is already * in the kernel, and a more complex one where the Guest is in * userspace. We check the privilege level to find out. */ if ((cpu->regs->ss&0x3) != GUEST_PL) { /* * The Guest told us their kernel stack with the SET_STACK * hypercall: both the virtual address and the segment. */ virtstack = cpu->esp1; ss = cpu->ss1; origstack = gstack = guest_pa(cpu, virtstack); /* * We push the old stack segment and pointer onto the new * stack: when the Guest does an "iret" back from the interrupt * handler the CPU will notice they're dropping privilege * levels and expect these here. */ push_guest_stack(cpu, &gstack, cpu->regs->ss); push_guest_stack(cpu, &gstack, cpu->regs->esp); } else { /* We're staying on the same Guest (kernel) stack. */ virtstack = cpu->regs->esp; ss = cpu->regs->ss; origstack = gstack = guest_pa(cpu, virtstack); } /* * Remember that we never let the Guest actually disable interrupts, so * the "Interrupt Flag" bit is always set. We copy that bit from the * Guest's "irq_enabled" field into the eflags word: we saw the Guest * copy it back in "lguest_iret". */ eflags = cpu->regs->eflags; if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0 && !(irq_enable & X86_EFLAGS_IF)) eflags &= ~X86_EFLAGS_IF; /* * An interrupt is expected to push three things on the stack: the old * "eflags" word, the old code segment, and the old instruction * pointer. */ push_guest_stack(cpu, &gstack, eflags); push_guest_stack(cpu, &gstack, cpu->regs->cs); push_guest_stack(cpu, &gstack, cpu->regs->eip); /* For the six traps which supply an error code, we push that, too. */ if (has_err) push_guest_stack(cpu, &gstack, cpu->regs->errcode); /* * Now we've pushed all the old state, we change the stack, the code * segment and the address to execute. */ cpu->regs->ss = ss; cpu->regs->esp = virtstack + (gstack - origstack); cpu->regs->cs = (__KERNEL_CS|GUEST_PL); cpu->regs->eip = idt_address(lo, hi); /* * There are two kinds of interrupt handlers: 0xE is an "interrupt * gate" which expects interrupts to be disabled on entry. */ if (idt_type(lo, hi) == 0xE) if (put_user(0, &cpu->lg->lguest_data->irq_enabled)) kill_guest(cpu, "Disabling interrupts"); } /*H:205 * Virtual Interrupts. * * interrupt_pending() returns the first pending interrupt which isn't blocked * by the Guest. It is called before every entry to the Guest, and just before * we go to sleep when the Guest has halted itself. */ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) { unsigned int irq; DECLARE_BITMAP(blk, LGUEST_IRQS); /* If the Guest hasn't even initialized yet, we can do nothing. */ if (!cpu->lg->lguest_data) return LGUEST_IRQS; /* * Take our "irqs_pending" array and remove any interrupts the Guest * wants blocked: the result ends up in "blk". */ if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, sizeof(blk))) return LGUEST_IRQS; bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS); /* Find the first interrupt. */ irq = find_first_bit(blk, LGUEST_IRQS); *more = find_next_bit(blk, LGUEST_IRQS, irq+1); return irq; } /* * This actually diverts the Guest to running an interrupt handler, once an * interrupt has been identified by interrupt_pending(). */ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) { struct desc_struct *idt; BUG_ON(irq >= LGUEST_IRQS); /* * They may be in the middle of an iret, where they asked us never to * deliver interrupts. */ if (cpu->regs->eip >= cpu->lg->noirq_start && (cpu->regs->eip < cpu->lg->noirq_end)) return; /* If they're halted, interrupts restart them. */ if (cpu->halted) { /* Re-enable interrupts. */ if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled)) kill_guest(cpu, "Re-enabling interrupts"); cpu->halted = 0; } else { /* Otherwise we check if they have interrupts disabled. */ u32 irq_enabled; if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled)) irq_enabled = 0; if (!irq_enabled) { /* Make sure they know an IRQ is pending. */ put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_pending); return; } } /* * Look at the IDT entry the Guest gave us for this interrupt. The * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip * over them. */ idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; /* If they don't have a handler (yet?), we just ignore it */ if (idt_present(idt->a, idt->b)) { /* OK, mark it no longer pending and deliver it. */ clear_bit(irq, cpu->irqs_pending); /* * set_guest_interrupt() takes the interrupt descriptor and a * flag to say whether this interrupt pushes an error code onto * the stack as well: virtual interrupts never do. */ set_guest_interrupt(cpu, idt->a, idt->b, false); } /* * Every time we deliver an interrupt, we update the timestamp in the * Guest's lguest_data struct. It would be better for the Guest if we * did this more often, but it can actually be quite slow: doing it * here is a compromise which means at least it gets updated every * timer interrupt. */ write_timestamp(cpu); /* * If there are no other interrupts we want to deliver, clear * the pending flag. */ if (!more) put_user(0, &cpu->lg->lguest_data->irq_pending); } /* And this is the routine when we want to set an interrupt for the Guest. */ void set_interrupt(struct lg_cpu *cpu, unsigned int irq) { /* * Next time the Guest runs, the core code will see if it can deliver * this interrupt. */ set_bit(irq, cpu->irqs_pending); /* * Make sure it sees it; it might be asleep (eg. halted), or running * the Guest right now, in which case kick_process() will knock it out. */ if (!wake_up_process(cpu->tsk)) kick_process(cpu->tsk); } /*:*/ /* * Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent * me a patch, so we support that too. It'd be a big step for lguest if half * the Plan 9 user base were to start using it. * * Actually now I think of it, it's possible that Ron *is* half the Plan 9 * userbase. Oh well. */ static bool could_be_syscall(unsigned int num) { /* Normal Linux SYSCALL_VECTOR or reserved vector? */ return num == SYSCALL_VECTOR || num == syscall_vector; } /* The syscall vector it wants must be unused by Host. */ bool check_syscall_vector(struct lguest *lg) { u32 vector; if (get_user(vector, &lg->lguest_data->syscall_vec)) return false; return could_be_syscall(vector); } int init_interrupts(void) { /* If they want some strange system call vector, reserve it now */ if (syscall_vector != SYSCALL_VECTOR) { if (test_bit(syscall_vector, used_vectors) || vector_used_by_percpu_irq(syscall_vector)) { printk(KERN_ERR "lg: couldn't reserve syscall %u\n", syscall_vector); return -EBUSY; } set_bit(syscall_vector, used_vectors); } return 0; } void free_interrupts(void) { if (syscall_vector != SYSCALL_VECTOR) clear_bit(syscall_vector, used_vectors); } /*H:220 * Now we've got the routines to deliver interrupts, delivering traps like * page fault is easy. The only trick is that Intel decided that some traps * should have error codes: */ static bool has_err(unsigned int trap) { return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); } /* deliver_trap() returns true if it could deliver the trap. */ bool deliver_trap(struct lg_cpu *cpu, unsigned int num) { /* * Trap numbers are always 8 bit, but we set an impossible trap number * for traps inside the Switcher, so check that here. */ if (num >= ARRAY_SIZE(cpu->arch.idt)) return false; /* * Early on the Guest hasn't set the IDT entries (or maybe it put a * bogus one in): if we fail here, the Guest will be killed. */ if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) return false; set_guest_interrupt(cpu, cpu->arch.idt[num].a, cpu->arch.idt[num].b, has_err(num)); return true; } /*H:250 * Here's the hard part: returning to the Host every time a trap happens * and then calling deliver_trap() and re-entering the Guest is slow. * Particularly because Guest userspace system calls are traps (usually trap * 128). * * So we'd like to set up the IDT to tell the CPU to deliver traps directly * into the Guest. This is possible, but the complexities cause the size of * this file to double! However, 150 lines of code is worth writing for taking * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all * the other hypervisors would beat it up at lunchtime. * * This routine indicates if a particular trap number could be delivered * directly. */ static bool direct_trap(unsigned int num) { /* * Hardware interrupts don't go to the Guest at all (except system * call). */ if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num)) return false; /* * The Host needs to see page faults (for shadow paging and to save the * fault address), general protection faults (in/out emulation) and * device not available (TS handling) and of course, the hypercall trap. */ return num != 14 && num != 13 && num != 7 && num != LGUEST_TRAP_ENTRY; } /*:*/ /*M:005 * The Guest has the ability to turn its interrupt gates into trap gates, * if it is careful. The Host will let trap gates can go directly to the * Guest, but the Guest needs the interrupts atomically disabled for an * interrupt gate. It can do this by pointing the trap gate at instructions * within noirq_start and noirq_end, where it can safely disable interrupts. */ /*M:006 * The Guests do not use the sysenter (fast system call) instruction, * because it's hardcoded to enter privilege level 0 and so can't go direct. * It's about twice as fast as the older "int 0x80" system call, so it might * still be worthwhile to handle it in the Switcher and lcall down to the * Guest. The sysenter semantics are hairy tho: search for that keyword in * entry.S :*/ /*H:260 * When we make traps go directly into the Guest, we need to make sure * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the * CPU trying to deliver the trap will fault while trying to push the interrupt * words on the stack: this is called a double fault, and it forces us to kill * the Guest. * * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. */ void pin_stack_pages(struct lg_cpu *cpu) { unsigned int i; /* * Depending on the CONFIG_4KSTACKS option, the Guest can have one or * two pages of stack space. */ for (i = 0; i < cpu->lg->stack_pages; i++) /* * The stack grows *upwards*, so the address we're given is the * start of the page after the kernel stack. Subtract one to * get back onto the first stack page, and keep subtracting to * get to the rest of the stack pages. */ pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE); } /* * Direct traps also mean that we need to know whenever the Guest wants to use * a different kernel stack, so we can change the guest TSS to use that * stack. The TSS entries expect a virtual address, so unlike most addresses * the Guest gives us, the "esp" (stack pointer) value here is virtual, not * physical. * * In Linux each process has its own kernel stack, so this happens a lot: we * change stacks on each context switch. */ void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) { /* * You're not allowed a stack segment with privilege level 0: bad Guest! */ if ((seg & 0x3) != GUEST_PL) kill_guest(cpu, "bad stack segment %i", seg); /* We only expect one or two stack pages. */ if (pages > 2) kill_guest(cpu, "bad stack pages %u", pages); /* Save where the stack is, and how many pages */ cpu->ss1 = seg; cpu->esp1 = esp; cpu->lg->stack_pages = pages; /* Make sure the new stack pages are mapped */ pin_stack_pages(cpu); } /* * All this reference to mapping stacks leads us neatly into the other complex * part of the Host: page table handling. */ /*H:235 * This is the routine which actually checks the Guest's IDT entry and * transfers it into the entry in "struct lguest": */ static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, unsigned int num, u32 lo, u32 hi) { u8 type = idt_type(lo, hi); /* We zero-out a not-present entry */ if (!idt_present(lo, hi)) { trap->a = trap->b = 0; return; } /* We only support interrupt and trap gates. */ if (type != 0xE && type != 0xF) kill_guest(cpu, "bad IDT type %i", type); /* * We only copy the handler address, present bit, privilege level and * type. The privilege level controls where the trap can be triggered * manually with an "int" instruction. This is usually GUEST_PL, * except for system calls which userspace can use. */ trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF); trap->b = (hi&0xFFFFEF00); } /*H:230 * While we're here, dealing with delivering traps and interrupts to the * Guest, we might as well complete the picture: how the Guest tells us where * it wants them to go. This would be simple, except making traps fast * requires some tricks. * * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */ void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) { /* * Guest never handles: NMI, doublefault, spurious interrupt or * hypercall. We ignore when it tries to set them. */ if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY) return; /* * Mark the IDT as changed: next time the Guest runs we'll know we have * to copy this again. */ cpu->changed |= CHANGED_IDT; /* Check that the Guest doesn't try to step outside the bounds. */ if (num >= ARRAY_SIZE(cpu->arch.idt)) kill_guest(cpu, "Setting idt entry %u", num); else set_trap(cpu, &cpu->arch.idt[num], num, lo, hi); } /* * The default entry for each interrupt points into the Switcher routines which * simply return to the Host. The run_guest() loop will then call * deliver_trap() to bounce it back into the Guest. */ static void default_idt_entry(struct desc_struct *idt, int trap, const unsigned long handler, const struct desc_struct *base) { /* A present interrupt gate. */ u32 flags = 0x8e00; /* * Set the privilege level on the entry for the hypercall: this allows * the Guest to use the "int" instruction to trigger it. */ if (trap == LGUEST_TRAP_ENTRY) flags |= (GUEST_PL << 13); else if (base) /* * Copy privilege level from what Guest asked for. This allows * debug (int 3) traps from Guest userspace, for example. */ flags |= (base->b & 0x6000); /* Now pack it into the IDT entry in its weird format. */ idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF); idt->b = (handler&0xFFFF0000) | flags; } /* When the Guest first starts, we put default entries into the IDT. */ void setup_default_idt_entries(struct lguest_ro_state *state, const unsigned long *def) { unsigned int i; for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++) default_idt_entry(&state->guest_idt[i], i, def[i], NULL); } /*H:240 * We don't use the IDT entries in the "struct lguest" directly, instead * we copy them into the IDT which we've set up for Guests on this CPU, just * before we run the Guest. This routine does that copy. */ void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, const unsigned long *def) { unsigned int i; /* * We can simply copy the direct traps, otherwise we use the default * ones in the Switcher: they will return to the Host. */ for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) { const struct desc_struct *gidt = &cpu->arch.idt[i]; /* If no Guest can ever override this trap, leave it alone. */ if (!direct_trap(i)) continue; /* * Only trap gates (type 15) can go direct to the Guest. * Interrupt gates (type 14) disable interrupts as they are * entered, which we never let the Guest do. Not present * entries (type 0x0) also can't go direct, of course. * * If it can't go direct, we still need to copy the priv. level: * they might want to give userspace access to a software * interrupt. */ if (idt_type(gidt->a, gidt->b) == 0xF) idt[i] = *gidt; else default_idt_entry(&idt[i], i, def[i], gidt); } } /*H:200 * The Guest Clock. * * There are two sources of virtual interrupts. We saw one in lguest_user.c: * the Launcher sending interrupts for virtual devices. The other is the Guest * timer interrupt. * * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to * the next timer interrupt (in nanoseconds). We use the high-resolution timer * infrastructure to set a callback at that time. * * 0 means "turn off the clock". */ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) { ktime_t expires; if (unlikely(delta == 0)) { /* Clock event device is shutting down. */ hrtimer_cancel(&cpu->hrt); return; } /* * We use wallclock time here, so the Guest might not be running for * all the time between now and the timer interrupt it asked for. This * is almost always the right thing to do. */ expires = ktime_add_ns(ktime_get_real(), delta); hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS); } /* This is the function called when the Guest's timer expires. */ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) { struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt); /* Remember the first interrupt is the timer interrupt. */ set_interrupt(cpu, 0); return HRTIMER_NORESTART; } /* This sets up the timer for this Guest. */ void init_clockdev(struct lg_cpu *cpu) { hrtimer_init(&cpu->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); cpu->hrt.function = clockdev_fn; }
gpl-2.0
izzaeroth/kernel_vzw_m8
drivers/media/video/pvrusb2/pvrusb2-dvb.c
13841
11041
/* * pvrusb2-dvb.c - linux-dvb api interface to the pvrusb2 driver. * * Copyright (C) 2007, 2008 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/slab.h> #include <linux/mm.h> #include "dvbdev.h" #include "pvrusb2-debug.h" #include "pvrusb2-hdw-internal.h" #include "pvrusb2-hdw.h" #include "pvrusb2-io.h" #include "pvrusb2-dvb.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int pvr2_dvb_feed_func(struct pvr2_dvb_adapter *adap) { int ret; unsigned int count; struct pvr2_buffer *bp; struct pvr2_stream *stream; pvr2_trace(PVR2_TRACE_DVB_FEED, "dvb feed thread started"); set_freezable(); stream = adap->channel.stream->stream; for (;;) { if (kthread_should_stop()) break; /* Not sure about this... */ try_to_freeze(); bp = pvr2_stream_get_ready_buffer(stream); if (bp != NULL) { count = pvr2_buffer_get_count(bp); if (count) { dvb_dmx_swfilter( &adap->demux, adap->buffer_storage[ pvr2_buffer_get_id(bp)], count); } else { ret = pvr2_buffer_get_status(bp); if (ret < 0) break; } ret = pvr2_buffer_queue(bp); if (ret < 0) break; /* Since we know we did something to a buffer, just go back and try again. No point in blocking unless we really ran out of buffers to process. */ continue; } /* Wait until more buffers become available or we're told not to wait any longer. */ ret = wait_event_interruptible( adap->buffer_wait_data, (pvr2_stream_get_ready_count(stream) > 0) || kthread_should_stop()); if (ret < 0) break; } /* If we get here and ret is < 0, then an error has occurred. Probably would be a good idea to communicate that to DVB core... */ pvr2_trace(PVR2_TRACE_DVB_FEED, "dvb feed thread stopped"); return 0; } static int pvr2_dvb_feed_thread(void *data) { int stat = pvr2_dvb_feed_func(data); /* from videobuf-dvb.c: */ while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); schedule(); } return stat; } static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap) { wake_up(&adap->buffer_wait_data); } static void pvr2_dvb_stream_end(struct pvr2_dvb_adapter *adap) { unsigned int idx; struct pvr2_stream *stream; if (adap->thread) { kthread_stop(adap->thread); adap->thread = NULL; } if (adap->channel.stream) { stream = adap->channel.stream->stream; } else { stream = NULL; } if (stream) { pvr2_hdw_set_streaming(adap->channel.hdw, 0); pvr2_stream_set_callback(stream, NULL, NULL); pvr2_stream_kill(stream); pvr2_stream_set_buffer_count(stream, 0); pvr2_channel_claim_stream(&adap->channel, NULL); } if (adap->stream_run) { for (idx = 0; idx < PVR2_DVB_BUFFER_COUNT; idx++) { if (!(adap->buffer_storage[idx])) continue; kfree(adap->buffer_storage[idx]); adap->buffer_storage[idx] = NULL; } adap->stream_run = 0; } } static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap) { struct pvr2_context *pvr = adap->channel.mc_head; unsigned int idx; int ret; struct pvr2_buffer *bp; struct pvr2_stream *stream = NULL; if (adap->stream_run) return -EIO; ret = pvr2_channel_claim_stream(&adap->channel, &pvr->video_stream); /* somebody else already has the stream */ if (ret < 0) return ret; stream = adap->channel.stream->stream; for (idx = 0; idx < PVR2_DVB_BUFFER_COUNT; idx++) { adap->buffer_storage[idx] = kmalloc(PVR2_DVB_BUFFER_SIZE, GFP_KERNEL); if (!(adap->buffer_storage[idx])) return -ENOMEM; } pvr2_stream_set_callback(pvr->video_stream.stream, (pvr2_stream_callback) pvr2_dvb_notify, adap); ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT); if (ret < 0) return ret; for (idx = 0; idx < PVR2_DVB_BUFFER_COUNT; idx++) { bp = pvr2_stream_get_buffer(stream, idx); pvr2_buffer_set_buffer(bp, adap->buffer_storage[idx], PVR2_DVB_BUFFER_SIZE); } ret = pvr2_hdw_set_streaming(adap->channel.hdw, 1); if (ret < 0) return ret; while ((bp = pvr2_stream_get_idle_buffer(stream)) != NULL) { ret = pvr2_buffer_queue(bp); if (ret < 0) return ret; } adap->thread = kthread_run(pvr2_dvb_feed_thread, adap, "pvrusb2-dvb"); if (IS_ERR(adap->thread)) { ret = PTR_ERR(adap->thread); adap->thread = NULL; return ret; } adap->stream_run = !0; return 0; } static int pvr2_dvb_stream_start(struct pvr2_dvb_adapter *adap) { int ret = pvr2_dvb_stream_do_start(adap); if (ret < 0) pvr2_dvb_stream_end(adap); return ret; } static int pvr2_dvb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff) { struct pvr2_dvb_adapter *adap = dvbdmxfeed->demux->priv; int ret = 0; if (adap == NULL) return -ENODEV; mutex_lock(&adap->lock); do { if (onoff) { if (!adap->feedcount) { pvr2_trace(PVR2_TRACE_DVB_FEED, "start feeding demux"); ret = pvr2_dvb_stream_start(adap); if (ret < 0) break; } (adap->feedcount)++; } else if (adap->feedcount > 0) { (adap->feedcount)--; if (!adap->feedcount) { pvr2_trace(PVR2_TRACE_DVB_FEED, "stop feeding demux"); pvr2_dvb_stream_end(adap); } } } while (0); mutex_unlock(&adap->lock); return ret; } static int pvr2_dvb_start_feed(struct dvb_demux_feed *dvbdmxfeed) { pvr2_trace(PVR2_TRACE_DVB_FEED, "start pid: 0x%04x", dvbdmxfeed->pid); return pvr2_dvb_ctrl_feed(dvbdmxfeed, 1); } static int pvr2_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed) { pvr2_trace(PVR2_TRACE_DVB_FEED, "stop pid: 0x%04x", dvbdmxfeed->pid); return pvr2_dvb_ctrl_feed(dvbdmxfeed, 0); } static int pvr2_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire) { struct pvr2_dvb_adapter *adap = fe->dvb->priv; return pvr2_channel_limit_inputs( &adap->channel, (acquire ? (1 << PVR2_CVAL_INPUT_DTV) : 0)); } static int pvr2_dvb_adapter_init(struct pvr2_dvb_adapter *adap) { int ret; ret = dvb_register_adapter(&adap->dvb_adap, "pvrusb2-dvb", THIS_MODULE/*&hdw->usb_dev->owner*/, &adap->channel.hdw->usb_dev->dev, adapter_nr); if (ret < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "dvb_register_adapter failed: error %d", ret); goto err; } adap->dvb_adap.priv = adap; adap->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING; adap->demux.priv = adap; adap->demux.filternum = 256; adap->demux.feednum = 256; adap->demux.start_feed = pvr2_dvb_start_feed; adap->demux.stop_feed = pvr2_dvb_stop_feed; adap->demux.write_to_decoder = NULL; ret = dvb_dmx_init(&adap->demux); if (ret < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "dvb_dmx_init failed: error %d", ret); goto err_dmx; } adap->dmxdev.filternum = adap->demux.filternum; adap->dmxdev.demux = &adap->demux.dmx; adap->dmxdev.capabilities = 0; ret = dvb_dmxdev_init(&adap->dmxdev, &adap->dvb_adap); if (ret < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "dvb_dmxdev_init failed: error %d", ret); goto err_dmx_dev; } dvb_net_init(&adap->dvb_adap, &adap->dvb_net, &adap->demux.dmx); return 0; err_dmx_dev: dvb_dmx_release(&adap->demux); err_dmx: dvb_unregister_adapter(&adap->dvb_adap); err: return ret; } static int pvr2_dvb_adapter_exit(struct pvr2_dvb_adapter *adap) { pvr2_trace(PVR2_TRACE_INFO, "unregistering DVB devices"); dvb_net_release(&adap->dvb_net); adap->demux.dmx.close(&adap->demux.dmx); dvb_dmxdev_release(&adap->dmxdev); dvb_dmx_release(&adap->demux); dvb_unregister_adapter(&adap->dvb_adap); return 0; } static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap) { struct pvr2_hdw *hdw = adap->channel.hdw; const struct pvr2_dvb_props *dvb_props = hdw->hdw_desc->dvb_props; int ret = 0; if (dvb_props == NULL) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "fe_props not defined!"); return -EINVAL; } ret = pvr2_channel_limit_inputs( &adap->channel, (1 << PVR2_CVAL_INPUT_DTV)); if (ret) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "failed to grab control of dtv input (code=%d)", ret); return ret; } if (dvb_props->frontend_attach == NULL) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "frontend_attach not defined!"); ret = -EINVAL; goto done; } if ((dvb_props->frontend_attach(adap) == 0) && (adap->fe)) { if (dvb_register_frontend(&adap->dvb_adap, adap->fe)) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "frontend registration failed!"); dvb_frontend_detach(adap->fe); adap->fe = NULL; ret = -ENODEV; goto done; } if (dvb_props->tuner_attach) dvb_props->tuner_attach(adap); if (adap->fe->ops.analog_ops.standby) adap->fe->ops.analog_ops.standby(adap->fe); /* Ensure all frontends negotiate bus access */ adap->fe->ops.ts_bus_ctrl = pvr2_dvb_bus_ctrl; } else { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "no frontend was attached!"); ret = -ENODEV; return ret; } done: pvr2_channel_limit_inputs(&adap->channel, 0); return ret; } static int pvr2_dvb_frontend_exit(struct pvr2_dvb_adapter *adap) { if (adap->fe != NULL) { dvb_unregister_frontend(adap->fe); dvb_frontend_detach(adap->fe); } return 0; } static void pvr2_dvb_destroy(struct pvr2_dvb_adapter *adap) { pvr2_dvb_stream_end(adap); pvr2_dvb_frontend_exit(adap); pvr2_dvb_adapter_exit(adap); pvr2_channel_done(&adap->channel); kfree(adap); } static void pvr2_dvb_internal_check(struct pvr2_channel *chp) { struct pvr2_dvb_adapter *adap; adap = container_of(chp, struct pvr2_dvb_adapter, channel); if (!adap->channel.mc_head->disconnect_flag) return; pvr2_dvb_destroy(adap); } struct pvr2_dvb_adapter *pvr2_dvb_create(struct pvr2_context *pvr) { int ret = 0; struct pvr2_dvb_adapter *adap; if (!pvr->hdw->hdw_desc->dvb_props) { /* Device lacks a digital interface so don't set up the DVB side of the driver either. For now. */ return NULL; } adap = kzalloc(sizeof(*adap), GFP_KERNEL); if (!adap) return adap; pvr2_channel_init(&adap->channel, pvr); adap->channel.check_func = pvr2_dvb_internal_check; init_waitqueue_head(&adap->buffer_wait_data); mutex_init(&adap->lock); ret = pvr2_dvb_adapter_init(adap); if (ret < 0) goto fail1; ret = pvr2_dvb_frontend_init(adap); if (ret < 0) goto fail2; return adap; fail2: pvr2_dvb_adapter_exit(adap); fail1: pvr2_channel_done(&adap->channel); return NULL; }
gpl-2.0
raj-bhatia/grooveip-ios-public
submodules/externals/libvpx/vpx_dsp/x86/highbd_quantize_intrin_sse2.c
18
6717
/* * Copyright (c) 2015 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include <emmintrin.h> #include "vpx_dsp/vpx_dsp_common.h" #include "vpx_mem/vpx_mem.h" #include "vpx_ports/mem.h" #if CONFIG_VP9_HIGHBITDEPTH void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan) { int i, j, non_zero_regs = (int)count / 4, eob_i = -1; __m128i zbins[2]; __m128i nzbins[2]; zbins[0] = _mm_set_epi32((int)zbin_ptr[1], (int)zbin_ptr[1], (int)zbin_ptr[1], (int)zbin_ptr[0]); zbins[1] = _mm_set1_epi32((int)zbin_ptr[1]); nzbins[0] = _mm_setzero_si128(); nzbins[1] = _mm_setzero_si128(); nzbins[0] = _mm_sub_epi32(nzbins[0], zbins[0]); nzbins[1] = _mm_sub_epi32(nzbins[1], zbins[1]); (void)scan; memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr)); memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr)); if (!skip_block) { // Pre-scan pass for (i = ((int)count / 4) - 1; i >= 0; i--) { __m128i coeffs, cmp1, cmp2; int test; coeffs = _mm_load_si128((const __m128i *)(coeff_ptr + i * 4)); cmp1 = _mm_cmplt_epi32(coeffs, zbins[i != 0]); cmp2 = _mm_cmpgt_epi32(coeffs, nzbins[i != 0]); cmp1 = _mm_and_si128(cmp1, cmp2); test = _mm_movemask_epi8(cmp1); if (test == 0xffff) non_zero_regs--; else break; } // Quantization pass: for (i = 0; i < non_zero_regs; i++) { __m128i coeffs, coeffs_sign, tmp1, tmp2; int test; int abs_coeff[4]; int coeff_sign[4]; coeffs = _mm_load_si128((const __m128i *)(coeff_ptr + i * 4)); coeffs_sign = _mm_srai_epi32(coeffs, 31); coeffs = _mm_sub_epi32( _mm_xor_si128(coeffs, coeffs_sign), coeffs_sign); tmp1 = _mm_cmpgt_epi32(coeffs, zbins[i != 0]); tmp2 = _mm_cmpeq_epi32(coeffs, zbins[i != 0]); tmp1 = _mm_or_si128(tmp1, tmp2); test = _mm_movemask_epi8(tmp1); _mm_storeu_si128((__m128i*)abs_coeff, coeffs); _mm_storeu_si128((__m128i*)coeff_sign, coeffs_sign); for (j = 0; j < 4; j++) { if (test & (1 << (4 * j))) { int k = 4 * i + j; const int64_t tmp1 = abs_coeff[j] + round_ptr[k != 0]; const int64_t tmp2 = ((tmp1 * quant_ptr[k != 0]) >> 16) + tmp1; const uint32_t abs_qcoeff = (uint32_t)((tmp2 * quant_shift_ptr[k != 0]) >> 16); qcoeff_ptr[k] = (int)(abs_qcoeff ^ coeff_sign[j]) - coeff_sign[j]; dqcoeff_ptr[k] = qcoeff_ptr[k] * dequant_ptr[k != 0]; if (abs_qcoeff) eob_i = iscan[k] > eob_i ? iscan[k] : eob_i; } } } } *eob_ptr = eob_i + 1; } void vpx_highbd_quantize_b_32x32_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan) { __m128i zbins[2]; __m128i nzbins[2]; int idx = 0; int idx_arr[1024]; int i, eob = -1; const int zbin0_tmp = ROUND_POWER_OF_TWO(zbin_ptr[0], 1); const int zbin1_tmp = ROUND_POWER_OF_TWO(zbin_ptr[1], 1); (void)scan; zbins[0] = _mm_set_epi32(zbin1_tmp, zbin1_tmp, zbin1_tmp, zbin0_tmp); zbins[1] = _mm_set1_epi32(zbin1_tmp); nzbins[0] = _mm_setzero_si128(); nzbins[1] = _mm_setzero_si128(); nzbins[0] = _mm_sub_epi32(nzbins[0], zbins[0]); nzbins[1] = _mm_sub_epi32(nzbins[1], zbins[1]); memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr)); memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr)); if (!skip_block) { // Pre-scan pass for (i = 0; i < n_coeffs / 4; i++) { __m128i coeffs, cmp1, cmp2; int test; coeffs = _mm_load_si128((const __m128i *)(coeff_ptr + i * 4)); cmp1 = _mm_cmplt_epi32(coeffs, zbins[i != 0]); cmp2 = _mm_cmpgt_epi32(coeffs, nzbins[i != 0]); cmp1 = _mm_and_si128(cmp1, cmp2); test = _mm_movemask_epi8(cmp1); if (!(test & 0xf)) idx_arr[idx++] = i * 4; if (!(test & 0xf0)) idx_arr[idx++] = i * 4 + 1; if (!(test & 0xf00)) idx_arr[idx++] = i * 4 + 2; if (!(test & 0xf000)) idx_arr[idx++] = i * 4 + 3; } // Quantization pass: only process the coefficients selected in // pre-scan pass. Note: idx can be zero. for (i = 0; i < idx; i++) { const int rc = idx_arr[i]; const int coeff = coeff_ptr[rc]; const int coeff_sign = (coeff >> 31); const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; const int64_t tmp1 = abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); const int64_t tmp2 = ((tmp1 * quant_ptr[rc != 0]) >> 16) + tmp1; const uint32_t abs_qcoeff = (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> 15); qcoeff_ptr[rc] = (int)(abs_qcoeff ^ coeff_sign) - coeff_sign; dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2; if (abs_qcoeff) eob = iscan[idx_arr[i]] > eob ? iscan[idx_arr[i]] : eob; } } *eob_ptr = eob + 1; } #endif
gpl-2.0
smarkwell/asuswrt-merlin
release/src/router/samba-3.0.25b/source/printing/printing.c
18
77388
/* Unix SMB/Netbios implementation. Version 3.0 printing backend routines Copyright (C) Andrew Tridgell 1992-2000 Copyright (C) Jeremy Allison 2002 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "includes.h" #include "printing.h" extern SIG_ATOMIC_T got_sig_term; extern SIG_ATOMIC_T reload_after_sighup; extern struct current_user current_user; extern userdom_struct current_user_info; /* Current printer interface */ static BOOL remove_from_jobs_changed(const char* sharename, uint32 jobid); /* the printing backend revolves around a tdb database that stores the SMB view of the print queue The key for this database is a jobid - a internally generated number that uniquely identifies a print job reading the print queue involves two steps: - possibly running lpq and updating the internal database from that - reading entries from the database jobids are assigned when a job starts spooling. */ static TDB_CONTEXT *rap_tdb; static uint16 next_rap_jobid; struct rap_jobid_key { fstring sharename; uint32 jobid; }; /*************************************************************************** Nightmare. LANMAN jobid's are 16 bit numbers..... We must map them to 32 bit RPC jobids.... JRA. ***************************************************************************/ uint16 pjobid_to_rap(const char* sharename, uint32 jobid) { uint16 rap_jobid; TDB_DATA data, key; struct rap_jobid_key jinfo; uint8 buf[2]; DEBUG(10,("pjobid_to_rap: called.\n")); if (!rap_tdb) { /* Create the in-memory tdb. */ rap_tdb = tdb_open_log(NULL, 0, TDB_INTERNAL, (O_RDWR|O_CREAT), 0644); if (!rap_tdb) return 0; } ZERO_STRUCT( jinfo ); fstrcpy( jinfo.sharename, sharename ); jinfo.jobid = jobid; key.dptr = (char*)&jinfo; key.dsize = sizeof(jinfo); data = tdb_fetch(rap_tdb, key); if (data.dptr && data.dsize == sizeof(uint16)) { rap_jobid = SVAL(data.dptr, 0); SAFE_FREE(data.dptr); DEBUG(10,("pjobid_to_rap: jobid %u maps to RAP jobid %u\n", (unsigned int)jobid, (unsigned int)rap_jobid)); return rap_jobid; } SAFE_FREE(data.dptr); /* Not found - create and store mapping. */ rap_jobid = ++next_rap_jobid; if (rap_jobid == 0) rap_jobid = ++next_rap_jobid; SSVAL(buf,0,rap_jobid); data.dptr = (char*)buf; data.dsize = sizeof(rap_jobid); tdb_store(rap_tdb, key, data, TDB_REPLACE); tdb_store(rap_tdb, data, key, TDB_REPLACE); DEBUG(10,("pjobid_to_rap: created jobid %u maps to RAP jobid %u\n", (unsigned int)jobid, (unsigned int)rap_jobid)); return rap_jobid; } BOOL rap_to_pjobid(uint16 rap_jobid, fstring sharename, uint32 *pjobid) { TDB_DATA data, key; uint8 buf[2]; DEBUG(10,("rap_to_pjobid called.\n")); if (!rap_tdb) return False; SSVAL(buf,0,rap_jobid); key.dptr = (char*)buf; key.dsize = sizeof(rap_jobid); data = tdb_fetch(rap_tdb, key); if ( data.dptr && data.dsize == sizeof(struct rap_jobid_key) ) { struct rap_jobid_key *jinfo = (struct rap_jobid_key*)data.dptr; fstrcpy( sharename, jinfo->sharename ); *pjobid = jinfo->jobid; DEBUG(10,("rap_to_pjobid: jobid %u maps to RAP jobid %u\n", (unsigned int)*pjobid, (unsigned int)rap_jobid)); SAFE_FREE(data.dptr); return True; } DEBUG(10,("rap_to_pjobid: Failed to lookup RAP jobid %u\n", (unsigned int)rap_jobid)); SAFE_FREE(data.dptr); return False; } static void rap_jobid_delete(const char* sharename, uint32 jobid) { TDB_DATA key, data; uint16 rap_jobid; struct rap_jobid_key jinfo; uint8 buf[2]; DEBUG(10,("rap_jobid_delete: called.\n")); if (!rap_tdb) return; ZERO_STRUCT( jinfo ); fstrcpy( jinfo.sharename, sharename ); jinfo.jobid = jobid; key.dptr = (char*)&jinfo; key.dsize = sizeof(jinfo); data = tdb_fetch(rap_tdb, key); if (!data.dptr || (data.dsize != sizeof(uint16))) { DEBUG(10,("rap_jobid_delete: cannot find jobid %u\n", (unsigned int)jobid )); SAFE_FREE(data.dptr); return; } DEBUG(10,("rap_jobid_delete: deleting jobid %u\n", (unsigned int)jobid )); rap_jobid = SVAL(data.dptr, 0); SAFE_FREE(data.dptr); SSVAL(buf,0,rap_jobid); data.dptr = (char*)buf; data.dsize = sizeof(rap_jobid); tdb_delete(rap_tdb, key); tdb_delete(rap_tdb, data); } static int get_queue_status(const char* sharename, print_status_struct *); /**************************************************************************** Initialise the printing backend. Called once at startup before the fork(). ****************************************************************************/ BOOL print_backend_init(void) { const char *sversion = "INFO/version"; pstring printing_path; int services = lp_numservices(); int snum; unlink(lock_path("printing.tdb")); pstrcpy(printing_path,lock_path("printing")); mkdir(printing_path,0755); /* handle a Samba upgrade */ for (snum = 0; snum < services; snum++) { struct tdb_print_db *pdb; if (!lp_print_ok(snum)) continue; pdb = get_print_db_byname(lp_const_servicename(snum)); if (!pdb) continue; if (tdb_lock_bystring(pdb->tdb, sversion) == -1) { DEBUG(0,("print_backend_init: Failed to open printer %s database\n", lp_const_servicename(snum) )); release_print_db(pdb); return False; } if (tdb_fetch_int32(pdb->tdb, sversion) != PRINT_DATABASE_VERSION) { tdb_traverse(pdb->tdb, tdb_traverse_delete_fn, NULL); tdb_store_int32(pdb->tdb, sversion, PRINT_DATABASE_VERSION); } tdb_unlock_bystring(pdb->tdb, sversion); release_print_db(pdb); } close_all_print_db(); /* Don't leave any open. */ /* do NT print initialization... */ return nt_printing_init(); } /**************************************************************************** Shut down printing backend. Called once at shutdown to close the tdb. ****************************************************************************/ void printing_end(void) { close_all_print_db(); /* Don't leave any open. */ } /**************************************************************************** Retrieve the set of printing functions for a given service. This allows us to set the printer function table based on the value of the 'printing' service parameter. Use the generic interface as the default and only use cups interface only when asked for (and only when supported) ****************************************************************************/ static struct printif *get_printer_fns_from_type( enum printing_types type ) { struct printif *printer_fns = &generic_printif; #ifdef HAVE_CUPS if ( type == PRINT_CUPS ) { printer_fns = &cups_printif; } #endif /* HAVE_CUPS */ #ifdef HAVE_IPRINT if ( type == PRINT_IPRINT ) { printer_fns = &iprint_printif; } #endif /* HAVE_IPRINT */ printer_fns->type = type; return printer_fns; } static struct printif *get_printer_fns( int snum ) { return get_printer_fns_from_type( (enum printing_types)lp_printing(snum) ); } /**************************************************************************** Useful function to generate a tdb key. ****************************************************************************/ static TDB_DATA print_key(uint32 jobid) { static uint32 j; TDB_DATA ret; SIVAL(&j, 0, jobid); ret.dptr = (char *)&j; ret.dsize = sizeof(j); return ret; } /*********************************************************************** unpack a pjob from a tdb buffer ***********************************************************************/ int unpack_pjob( char* buf, int buflen, struct printjob *pjob ) { int len = 0; int used; uint32 pjpid, pjsysjob, pjfd, pjstarttime, pjstatus; uint32 pjsize, pjpage_count, pjspooled, pjsmbjob; if ( !buf || !pjob ) return -1; len += tdb_unpack(buf+len, buflen-len, "dddddddddffff", &pjpid, &pjsysjob, &pjfd, &pjstarttime, &pjstatus, &pjsize, &pjpage_count, &pjspooled, &pjsmbjob, pjob->filename, pjob->jobname, pjob->user, pjob->queuename); if ( len == -1 ) return -1; if ( (used = unpack_devicemode(&pjob->nt_devmode, buf+len, buflen-len)) == -1 ) return -1; len += used; pjob->pid = pjpid; pjob->sysjob = pjsysjob; pjob->fd = pjfd; pjob->starttime = pjstarttime; pjob->status = pjstatus; pjob->size = pjsize; pjob->page_count = pjpage_count; pjob->spooled = pjspooled; pjob->smbjob = pjsmbjob; return len; } /**************************************************************************** Useful function to find a print job in the database. ****************************************************************************/ static struct printjob *print_job_find(const char *sharename, uint32 jobid) { static struct printjob pjob; TDB_DATA ret; struct tdb_print_db *pdb = get_print_db_byname(sharename); DEBUG(10,("print_job_find: looking up job %u for share %s\n", (unsigned int)jobid, sharename )); if (!pdb) { return NULL; } ret = tdb_fetch(pdb->tdb, print_key(jobid)); release_print_db(pdb); if (!ret.dptr) { DEBUG(10,("print_job_find: failed to find jobid %u.\n", (unsigned int)jobid )); return NULL; } if ( pjob.nt_devmode ) { free_nt_devicemode( &pjob.nt_devmode ); } ZERO_STRUCT( pjob ); if ( unpack_pjob( ret.dptr, ret.dsize, &pjob ) == -1 ) { DEBUG(10,("print_job_find: failed to unpack jobid %u.\n", (unsigned int)jobid )); SAFE_FREE(ret.dptr); return NULL; } SAFE_FREE(ret.dptr); DEBUG(10,("print_job_find: returning system job %d for jobid %u.\n", (int)pjob.sysjob, (unsigned int)jobid )); return &pjob; } /* Convert a unix jobid to a smb jobid */ static uint32 sysjob_to_jobid_value; static int unixjob_traverse_fn(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA data, void *state) { struct printjob *pjob; int *sysjob = (int *)state; if (!data.dptr || data.dsize == 0) return 0; pjob = (struct printjob *)data.dptr; if (key.dsize != sizeof(uint32)) return 0; if (*sysjob == pjob->sysjob) { uint32 jobid = IVAL(key.dptr,0); sysjob_to_jobid_value = jobid; return 1; } return 0; } /**************************************************************************** This is a *horribly expensive call as we have to iterate through all the current printer tdb's. Don't do this often ! JRA. ****************************************************************************/ uint32 sysjob_to_jobid(int unix_jobid) { int services = lp_numservices(); int snum; sysjob_to_jobid_value = (uint32)-1; for (snum = 0; snum < services; snum++) { struct tdb_print_db *pdb; if (!lp_print_ok(snum)) continue; pdb = get_print_db_byname(lp_const_servicename(snum)); if (!pdb) { continue; } tdb_traverse(pdb->tdb, unixjob_traverse_fn, &unix_jobid); release_print_db(pdb); if (sysjob_to_jobid_value != (uint32)-1) return sysjob_to_jobid_value; } return (uint32)-1; } /**************************************************************************** Send notifications based on what has changed after a pjob_store. ****************************************************************************/ static struct { uint32 lpq_status; uint32 spoolss_status; } lpq_to_spoolss_status_map[] = { { LPQ_QUEUED, JOB_STATUS_QUEUED }, { LPQ_PAUSED, JOB_STATUS_PAUSED }, { LPQ_SPOOLING, JOB_STATUS_SPOOLING }, { LPQ_PRINTING, JOB_STATUS_PRINTING }, { LPQ_DELETING, JOB_STATUS_DELETING }, { LPQ_OFFLINE, JOB_STATUS_OFFLINE }, { LPQ_PAPEROUT, JOB_STATUS_PAPEROUT }, { LPQ_PRINTED, JOB_STATUS_PRINTED }, { LPQ_DELETED, JOB_STATUS_DELETED }, { LPQ_BLOCKED, JOB_STATUS_BLOCKED }, { LPQ_USER_INTERVENTION, JOB_STATUS_USER_INTERVENTION }, { -1, 0 } }; /* Convert a lpq status value stored in printing.tdb into the appropriate win32 API constant. */ static uint32 map_to_spoolss_status(uint32 lpq_status) { int i = 0; while (lpq_to_spoolss_status_map[i].lpq_status != -1) { if (lpq_to_spoolss_status_map[i].lpq_status == lpq_status) return lpq_to_spoolss_status_map[i].spoolss_status; i++; } return 0; } static void pjob_store_notify(const char* sharename, uint32 jobid, struct printjob *old_data, struct printjob *new_data) { BOOL new_job = False; if (!old_data) new_job = True; /* Job attributes that can't be changed. We only send notification for these on a new job. */ /* ACHTUNG! Due to a bug in Samba's spoolss parsing of the NOTIFY_INFO_DATA buffer, we *have* to send the job submission time first or else we'll end up with potential alignment errors. I don't think the systemtime should be spooled as a string, but this gets us around that error. --jerry (i'll feel dirty for this) */ if (new_job) { notify_job_submitted(sharename, jobid, new_data->starttime); notify_job_username(sharename, jobid, new_data->user); } if (new_job || !strequal(old_data->jobname, new_data->jobname)) notify_job_name(sharename, jobid, new_data->jobname); /* Job attributes of a new job or attributes that can be modified. */ if (new_job || !strequal(old_data->jobname, new_data->jobname)) notify_job_name(sharename, jobid, new_data->jobname); if (new_job || old_data->status != new_data->status) notify_job_status(sharename, jobid, map_to_spoolss_status(new_data->status)); if (new_job || old_data->size != new_data->size) notify_job_total_bytes(sharename, jobid, new_data->size); if (new_job || old_data->page_count != new_data->page_count) notify_job_total_pages(sharename, jobid, new_data->page_count); } /**************************************************************************** Store a job structure back to the database. ****************************************************************************/ static BOOL pjob_store(const char* sharename, uint32 jobid, struct printjob *pjob) { TDB_DATA old_data, new_data; BOOL ret = False; struct tdb_print_db *pdb = get_print_db_byname(sharename); char *buf = NULL; int len, newlen, buflen; if (!pdb) return False; /* Get old data */ old_data = tdb_fetch(pdb->tdb, print_key(jobid)); /* Doh! Now we have to pack/unpack data since the NT_DEVICEMODE was added */ newlen = 0; do { len = 0; buflen = newlen; len += tdb_pack(buf+len, buflen-len, "dddddddddffff", (uint32)pjob->pid, (uint32)pjob->sysjob, (uint32)pjob->fd, (uint32)pjob->starttime, (uint32)pjob->status, (uint32)pjob->size, (uint32)pjob->page_count, (uint32)pjob->spooled, (uint32)pjob->smbjob, pjob->filename, pjob->jobname, pjob->user, pjob->queuename); len += pack_devicemode(pjob->nt_devmode, buf+len, buflen-len); if (buflen != len) { buf = (char *)SMB_REALLOC(buf, len); if (!buf) { DEBUG(0,("pjob_store: failed to enlarge buffer!\n")); goto done; } newlen = len; } } while ( buflen != len ); /* Store new data */ new_data.dptr = buf; new_data.dsize = len; ret = (tdb_store(pdb->tdb, print_key(jobid), new_data, TDB_REPLACE) == 0); release_print_db(pdb); /* Send notify updates for what has changed */ if ( ret ) { struct printjob old_pjob; if ( old_data.dsize ) { if ( unpack_pjob( old_data.dptr, old_data.dsize, &old_pjob ) != -1 ) { pjob_store_notify( sharename, jobid, &old_pjob , pjob ); free_nt_devicemode( &old_pjob.nt_devmode ); } } else { /* new job */ pjob_store_notify( sharename, jobid, NULL, pjob ); } } done: SAFE_FREE( old_data.dptr ); SAFE_FREE( buf ); return ret; } /**************************************************************************** Remove a job structure from the database. ****************************************************************************/ void pjob_delete(const char* sharename, uint32 jobid) { struct printjob *pjob; uint32 job_status = 0; struct tdb_print_db *pdb; pdb = get_print_db_byname( sharename ); if (!pdb) return; pjob = print_job_find( sharename, jobid ); if (!pjob) { DEBUG(5, ("pjob_delete: we were asked to delete nonexistent job %u\n", (unsigned int)jobid)); release_print_db(pdb); return; } /* We must cycle through JOB_STATUS_DELETING and JOB_STATUS_DELETED for the port monitor to delete the job properly. */ job_status = JOB_STATUS_DELETING|JOB_STATUS_DELETED; notify_job_status(sharename, jobid, job_status); /* Remove from printing.tdb */ tdb_delete(pdb->tdb, print_key(jobid)); remove_from_jobs_changed(sharename, jobid); release_print_db( pdb ); rap_jobid_delete(sharename, jobid); } /**************************************************************************** Parse a file name from the system spooler to generate a jobid. ****************************************************************************/ static uint32 print_parse_jobid(char *fname) { int jobid; if (strncmp(fname,PRINT_SPOOL_PREFIX,strlen(PRINT_SPOOL_PREFIX)) != 0) return (uint32)-1; fname += strlen(PRINT_SPOOL_PREFIX); jobid = atoi(fname); if (jobid <= 0) return (uint32)-1; return (uint32)jobid; } /**************************************************************************** List a unix job in the print database. ****************************************************************************/ static void print_unix_job(const char *sharename, print_queue_struct *q, uint32 jobid) { struct printjob pj, *old_pj; if (jobid == (uint32)-1) jobid = q->job + UNIX_JOB_START; /* Preserve the timestamp on an existing unix print job */ old_pj = print_job_find(sharename, jobid); ZERO_STRUCT(pj); pj.pid = (pid_t)-1; pj.sysjob = q->job; pj.fd = -1; pj.starttime = old_pj ? old_pj->starttime : q->time; pj.status = q->status; pj.size = q->size; pj.spooled = True; fstrcpy(pj.filename, old_pj ? old_pj->filename : ""); if (jobid < UNIX_JOB_START) { pj.smbjob = True; fstrcpy(pj.jobname, old_pj ? old_pj->jobname : "Remote Downlevel Document"); } else { pj.smbjob = False; fstrcpy(pj.jobname, old_pj ? old_pj->jobname : q->fs_file); } fstrcpy(pj.user, old_pj ? old_pj->user : q->fs_user); fstrcpy(pj.queuename, old_pj ? old_pj->queuename : sharename ); pjob_store(sharename, jobid, &pj); } struct traverse_struct { print_queue_struct *queue; int qcount, snum, maxcount, total_jobs; const char *sharename; time_t lpq_time; const char *lprm_command; struct printif *print_if; }; /**************************************************************************** Utility fn to delete any jobs that are no longer active. ****************************************************************************/ static int traverse_fn_delete(TDB_CONTEXT *t, TDB_DATA key, TDB_DATA data, void *state) { struct traverse_struct *ts = (struct traverse_struct *)state; struct printjob pjob; uint32 jobid; int i = 0; if ( key.dsize != sizeof(jobid) ) return 0; jobid = IVAL(key.dptr, 0); if ( unpack_pjob( data.dptr, data.dsize, &pjob ) == -1 ) return 0; free_nt_devicemode( &pjob.nt_devmode ); if (!pjob.smbjob) { /* remove a unix job if it isn't in the system queue any more */ for (i=0;i<ts->qcount;i++) { uint32 u_jobid = (ts->queue[i].job + UNIX_JOB_START); if (jobid == u_jobid) break; } if (i == ts->qcount) { DEBUG(10,("traverse_fn_delete: pjob %u deleted due to !smbjob\n", (unsigned int)jobid )); pjob_delete(ts->sharename, jobid); return 0; } /* need to continue the the bottom of the function to save the correct attributes */ } /* maybe it hasn't been spooled yet */ if (!pjob.spooled) { /* if a job is not spooled and the process doesn't exist then kill it. This cleans up after smbd deaths */ if (!process_exists_by_pid(pjob.pid)) { DEBUG(10,("traverse_fn_delete: pjob %u deleted due to !process_exists (%u)\n", (unsigned int)jobid, (unsigned int)pjob.pid )); pjob_delete(ts->sharename, jobid); } else ts->total_jobs++; return 0; } /* this check only makes sense for jobs submitted from Windows clients */ if ( pjob.smbjob ) { for (i=0;i<ts->qcount;i++) { uint32 curr_jobid; if ( pjob.status == LPQ_DELETED ) continue; curr_jobid = print_parse_jobid(ts->queue[i].fs_file); if (jobid == curr_jobid) { /* try to clean up any jobs that need to be deleted */ if ( pjob.status == LPQ_DELETING ) { int result; result = (*(ts->print_if->job_delete))( ts->sharename, ts->lprm_command, &pjob ); if ( result != 0 ) { /* if we can't delete, then reset the job status */ pjob.status = LPQ_QUEUED; pjob_store(ts->sharename, jobid, &pjob); } else { /* if we deleted the job, the remove the tdb record */ pjob_delete(ts->sharename, jobid); pjob.status = LPQ_DELETED; } } break; } } } /* The job isn't in the system queue - we have to assume it has completed, so delete the database entry. */ if (i == ts->qcount) { /* A race can occur between the time a job is spooled and when it appears in the lpq output. This happens when the job is added to printing.tdb when another smbd running print_queue_update() has completed a lpq and is currently traversing the printing tdb and deleting jobs. Don't delete the job if it was submitted after the lpq_time. */ if (pjob.starttime < ts->lpq_time) { DEBUG(10,("traverse_fn_delete: pjob %u deleted due to pjob.starttime (%u) < ts->lpq_time (%u)\n", (unsigned int)jobid, (unsigned int)pjob.starttime, (unsigned int)ts->lpq_time )); pjob_delete(ts->sharename, jobid); } else ts->total_jobs++; return 0; } /* Save the pjob attributes we will store. FIXME!!! This is the only place where queue->job represents the SMB jobid --jerry */ ts->queue[i].job = jobid; ts->queue[i].size = pjob.size; ts->queue[i].page_count = pjob.page_count; ts->queue[i].status = pjob.status; ts->queue[i].priority = 1; ts->queue[i].time = pjob.starttime; fstrcpy(ts->queue[i].fs_user, pjob.user); fstrcpy(ts->queue[i].fs_file, pjob.jobname); ts->total_jobs++; return 0; } /**************************************************************************** Check if the print queue has been updated recently enough. ****************************************************************************/ static void print_cache_flush(const char *sharename) { fstring key; struct tdb_print_db *pdb = get_print_db_byname(sharename); if (!pdb) return; slprintf(key, sizeof(key)-1, "CACHE/%s", sharename); tdb_store_int32(pdb->tdb, key, -1); release_print_db(pdb); } /**************************************************************************** Check if someone already thinks they are doing the update. ****************************************************************************/ static pid_t get_updating_pid(const char *sharename) { fstring keystr; TDB_DATA data, key; pid_t updating_pid; struct tdb_print_db *pdb = get_print_db_byname(sharename); if (!pdb) return (pid_t)-1; slprintf(keystr, sizeof(keystr)-1, "UPDATING/%s", sharename); key.dptr = keystr; key.dsize = strlen(keystr); data = tdb_fetch(pdb->tdb, key); release_print_db(pdb); if (!data.dptr || data.dsize != sizeof(pid_t)) { SAFE_FREE(data.dptr); return (pid_t)-1; } updating_pid = IVAL(data.dptr, 0); SAFE_FREE(data.dptr); if (process_exists_by_pid(updating_pid)) return updating_pid; return (pid_t)-1; } /**************************************************************************** Set the fact that we're doing the update, or have finished doing the update in the tdb. ****************************************************************************/ static void set_updating_pid(const fstring sharename, BOOL updating) { fstring keystr; TDB_DATA key; TDB_DATA data; pid_t updating_pid = sys_getpid(); uint8 buffer[4]; struct tdb_print_db *pdb = get_print_db_byname(sharename); if (!pdb) return; slprintf(keystr, sizeof(keystr)-1, "UPDATING/%s", sharename); key.dptr = keystr; key.dsize = strlen(keystr); DEBUG(5, ("set_updating_pid: %s updating lpq cache for print share %s\n", updating ? "" : "not ", sharename )); if ( !updating ) { tdb_delete(pdb->tdb, key); release_print_db(pdb); return; } SIVAL( buffer, 0, updating_pid); data.dptr = (char *)buffer; data.dsize = 4; /* we always assume this is a 4 byte value */ tdb_store(pdb->tdb, key, data, TDB_REPLACE); release_print_db(pdb); } /**************************************************************************** Sort print jobs by submittal time. ****************************************************************************/ static int printjob_comp(print_queue_struct *j1, print_queue_struct *j2) { /* Silly cases */ if (!j1 && !j2) return 0; if (!j1) return -1; if (!j2) return 1; /* Sort on job start time */ if (j1->time == j2->time) return 0; return (j1->time > j2->time) ? 1 : -1; } /**************************************************************************** Store the sorted queue representation for later portmon retrieval. Skip deleted jobs ****************************************************************************/ static void store_queue_struct(struct tdb_print_db *pdb, struct traverse_struct *pts) { TDB_DATA data; int max_reported_jobs = lp_max_reported_jobs(pts->snum); print_queue_struct *queue = pts->queue; size_t len; size_t i; uint qcount; if (max_reported_jobs && (max_reported_jobs < pts->qcount)) pts->qcount = max_reported_jobs; qcount = 0; /* Work out the size. */ data.dsize = 0; data.dsize += tdb_pack(NULL, 0, "d", qcount); for (i = 0; i < pts->qcount; i++) { if ( queue[i].status == LPQ_DELETED ) continue; qcount++; data.dsize += tdb_pack(NULL, 0, "ddddddff", (uint32)queue[i].job, (uint32)queue[i].size, (uint32)queue[i].page_count, (uint32)queue[i].status, (uint32)queue[i].priority, (uint32)queue[i].time, queue[i].fs_user, queue[i].fs_file); } if ((data.dptr = (char *)SMB_MALLOC(data.dsize)) == NULL) return; len = 0; len += tdb_pack(data.dptr + len, data.dsize - len, "d", qcount); for (i = 0; i < pts->qcount; i++) { if ( queue[i].status == LPQ_DELETED ) continue; len += tdb_pack(data.dptr + len, data.dsize - len, "ddddddff", (uint32)queue[i].job, (uint32)queue[i].size, (uint32)queue[i].page_count, (uint32)queue[i].status, (uint32)queue[i].priority, (uint32)queue[i].time, queue[i].fs_user, queue[i].fs_file); } tdb_store(pdb->tdb, string_tdb_data("INFO/linear_queue_array"), data, TDB_REPLACE); SAFE_FREE(data.dptr); return; } static TDB_DATA get_jobs_changed_data(struct tdb_print_db *pdb) { TDB_DATA data; ZERO_STRUCT(data); data = tdb_fetch(pdb->tdb, string_tdb_data("INFO/jobs_changed")); if (data.dptr == NULL || data.dsize == 0 || (data.dsize % 4 != 0)) { SAFE_FREE(data.dptr); ZERO_STRUCT(data); } return data; } static void check_job_changed(const char *sharename, TDB_DATA data, uint32 jobid) { unsigned int i; unsigned int job_count = data.dsize / 4; for (i = 0; i < job_count; i++) { uint32 ch_jobid; ch_jobid = IVAL(data.dptr, i*4); if (ch_jobid == jobid) remove_from_jobs_changed(sharename, jobid); } } /**************************************************************************** Check if the print queue has been updated recently enough. ****************************************************************************/ static BOOL print_cache_expired(const char *sharename, BOOL check_pending) { fstring key; time_t last_qscan_time, time_now = time(NULL); struct tdb_print_db *pdb = get_print_db_byname(sharename); BOOL result = False; if (!pdb) return False; snprintf(key, sizeof(key), "CACHE/%s", sharename); last_qscan_time = (time_t)tdb_fetch_int32(pdb->tdb, key); /* * Invalidate the queue for 3 reasons. * (1). last queue scan time == -1. * (2). Current time - last queue scan time > allowed cache time. * (3). last queue scan time > current time + MAX_CACHE_VALID_TIME (1 hour by default). * This last test picks up machines for which the clock has been moved * forward, an lpq scan done and then the clock moved back. Otherwise * that last lpq scan would stay around for a loooong loooong time... :-). JRA. */ if (last_qscan_time == ((time_t)-1) || (time_now - last_qscan_time) >= lp_lpqcachetime() || last_qscan_time > (time_now + MAX_CACHE_VALID_TIME)) { uint32 u; time_t msg_pending_time; DEBUG(4, ("print_cache_expired: cache expired for queue %s " "(last_qscan_time = %d, time now = %d, qcachetime = %d)\n", sharename, (int)last_qscan_time, (int)time_now, (int)lp_lpqcachetime() )); /* check if another smbd has already sent a message to update the queue. Give the pending message one minute to clear and then send another message anyways. Make sure to check for clocks that have been run forward and then back again. */ snprintf(key, sizeof(key), "MSG_PENDING/%s", sharename); if ( check_pending && tdb_fetch_uint32( pdb->tdb, key, &u ) && (msg_pending_time=u) > 0 && msg_pending_time <= time_now && (time_now - msg_pending_time) < 60 ) { DEBUG(4,("print_cache_expired: message already pending for %s. Accepting cache\n", sharename)); goto done; } result = True; } done: release_print_db(pdb); return result; } /**************************************************************************** main work for updating the lpq cahe for a printer queue ****************************************************************************/ static void print_queue_update_internal( const char *sharename, struct printif *current_printif, char *lpq_command, char *lprm_command ) { int i, qcount; print_queue_struct *queue = NULL; print_status_struct status; print_status_struct old_status; struct printjob *pjob; struct traverse_struct tstruct; TDB_DATA data, key; TDB_DATA jcdata; fstring keystr, cachestr; struct tdb_print_db *pdb = get_print_db_byname(sharename); if (!pdb) { return; } DEBUG(5,("print_queue_update_internal: printer = %s, type = %d, lpq command = [%s]\n", sharename, current_printif->type, lpq_command)); /* * Update the cache time FIRST ! Stops others even * attempting to get the lock and doing this * if the lpq takes a long time. */ slprintf(cachestr, sizeof(cachestr)-1, "CACHE/%s", sharename); tdb_store_int32(pdb->tdb, cachestr, (int)time(NULL)); /* get the current queue using the appropriate interface */ ZERO_STRUCT(status); qcount = (*(current_printif->queue_get))(sharename, current_printif->type, lpq_command, &queue, &status); DEBUG(3, ("print_queue_update_internal: %d job%s in queue for %s\n", qcount, (qcount != 1) ? "s" : "", sharename)); /* Sort the queue by submission time otherwise they are displayed in hash order. */ qsort(queue, qcount, sizeof(print_queue_struct), QSORT_CAST(printjob_comp)); /* any job in the internal database that is marked as spooled and doesn't exist in the system queue is considered finished and removed from the database any job in the system database but not in the internal database is added as a unix job fill in any system job numbers as we go */ jcdata = get_jobs_changed_data(pdb); for (i=0; i<qcount; i++) { uint32 jobid = print_parse_jobid(queue[i].fs_file); if (jobid == (uint32)-1) { /* assume its a unix print job */ print_unix_job(sharename, &queue[i], jobid); continue; } /* we have an active SMB print job - update its status */ pjob = print_job_find(sharename, jobid); if (!pjob) { /* err, somethings wrong. Probably smbd was restarted with jobs in the queue. All we can do is treat them like unix jobs. Pity. */ print_unix_job(sharename, &queue[i], jobid); continue; } pjob->sysjob = queue[i].job; /* don't reset the status on jobs to be deleted */ if ( pjob->status != LPQ_DELETING ) pjob->status = queue[i].status; pjob_store(sharename, jobid, pjob); check_job_changed(sharename, jcdata, jobid); } SAFE_FREE(jcdata.dptr); /* now delete any queued entries that don't appear in the system queue */ tstruct.queue = queue; tstruct.qcount = qcount; tstruct.snum = -1; tstruct.total_jobs = 0; tstruct.lpq_time = time(NULL); tstruct.sharename = sharename; tstruct.lprm_command = lprm_command; tstruct.print_if = current_printif; tdb_traverse(pdb->tdb, traverse_fn_delete, (void *)&tstruct); /* Store the linearised queue, max jobs only. */ store_queue_struct(pdb, &tstruct); SAFE_FREE(tstruct.queue); DEBUG(10,("print_queue_update_internal: printer %s INFO/total_jobs = %d\n", sharename, tstruct.total_jobs )); tdb_store_int32(pdb->tdb, "INFO/total_jobs", tstruct.total_jobs); get_queue_status(sharename, &old_status); if (old_status.qcount != qcount) DEBUG(10,("print_queue_update_internal: queue status change %d jobs -> %d jobs for printer %s\n", old_status.qcount, qcount, sharename)); /* store the new queue status structure */ slprintf(keystr, sizeof(keystr)-1, "STATUS/%s", sharename); key.dptr = keystr; key.dsize = strlen(keystr); status.qcount = qcount; data.dptr = (char *)&status; data.dsize = sizeof(status); tdb_store(pdb->tdb, key, data, TDB_REPLACE); /* * Update the cache time again. We want to do this call * as little as possible... */ slprintf(keystr, sizeof(keystr)-1, "CACHE/%s", sharename); tdb_store_int32(pdb->tdb, keystr, (int32)time(NULL)); /* clear the msg pending record for this queue */ snprintf(keystr, sizeof(keystr), "MSG_PENDING/%s", sharename); if ( !tdb_store_uint32( pdb->tdb, keystr, 0 ) ) { /* log a message but continue on */ DEBUG(0,("print_queue_update: failed to store MSG_PENDING flag for [%s]!\n", sharename)); } release_print_db( pdb ); return; } /**************************************************************************** Update the internal database from the system print queue for a queue. obtain a lock on the print queue before proceeding (needed when mutiple smbd processes maytry to update the lpq cache concurrently). ****************************************************************************/ static void print_queue_update_with_lock( const char *sharename, struct printif *current_printif, char *lpq_command, char *lprm_command ) { fstring keystr; struct tdb_print_db *pdb; DEBUG(5,("print_queue_update_with_lock: printer share = %s\n", sharename)); pdb = get_print_db_byname(sharename); if (!pdb) return; if ( !print_cache_expired(sharename, False) ) { DEBUG(5,("print_queue_update_with_lock: print cache for %s is still ok\n", sharename)); release_print_db(pdb); return; } /* * Check to see if someone else is doing this update. * This is essentially a mutex on the update. */ if (get_updating_pid(sharename) != -1) { release_print_db(pdb); return; } /* Lock the queue for the database update */ slprintf(keystr, sizeof(keystr) - 1, "LOCK/%s", sharename); /* Only wait 10 seconds for this. */ if (tdb_lock_bystring_with_timeout(pdb->tdb, keystr, 10) == -1) { DEBUG(0,("print_queue_update_with_lock: Failed to lock printer %s database\n", sharename)); release_print_db(pdb); return; } /* * Ensure that no one else got in here. * If the updating pid is still -1 then we are * the winner. */ if (get_updating_pid(sharename) != -1) { /* * Someone else is doing the update, exit. */ tdb_unlock_bystring(pdb->tdb, keystr); release_print_db(pdb); return; } /* * We're going to do the update ourselves. */ /* Tell others we're doing the update. */ set_updating_pid(sharename, True); /* * Allow others to enter and notice we're doing * the update. */ tdb_unlock_bystring(pdb->tdb, keystr); /* do the main work now */ print_queue_update_internal( sharename, current_printif, lpq_command, lprm_command ); /* Delete our pid from the db. */ set_updating_pid(sharename, False); release_print_db(pdb); } /**************************************************************************** this is the receive function of the background lpq updater ****************************************************************************/ static void print_queue_receive(int msg_type, struct process_id src, void *buf, size_t msglen, void *private_data) { fstring sharename; pstring lpqcommand, lprmcommand; int printing_type; size_t len; len = tdb_unpack( (char *)buf, msglen, "fdPP", sharename, &printing_type, lpqcommand, lprmcommand ); if ( len == -1 ) { DEBUG(0,("print_queue_receive: Got invalid print queue update message\n")); return; } print_queue_update_with_lock(sharename, get_printer_fns_from_type((enum printing_types)printing_type), lpqcommand, lprmcommand ); return; } static pid_t background_lpq_updater_pid = -1; /**************************************************************************** main thread of the background lpq updater ****************************************************************************/ void start_background_queue(void) { DEBUG(3,("start_background_queue: Starting background LPQ thread\n")); background_lpq_updater_pid = sys_fork(); if (background_lpq_updater_pid == -1) { DEBUG(5,("start_background_queue: background LPQ thread failed to start. %s\n", strerror(errno) )); exit(1); } if(background_lpq_updater_pid == 0) { /* Child. */ DEBUG(5,("start_background_queue: background LPQ thread started\n")); claim_connection( NULL, "smbd lpq backend", 0, False, FLAG_MSG_GENERAL|FLAG_MSG_SMBD|FLAG_MSG_PRINT_GENERAL); if (!locking_init(0)) { exit(1); } message_register(MSG_PRINTER_UPDATE, print_queue_receive, NULL); DEBUG(5,("start_background_queue: background LPQ thread waiting for messages\n")); while (1) { pause(); /* check for some essential signals first */ if (got_sig_term) { exit_server_cleanly(NULL); } if (reload_after_sighup) { change_to_root_user(); DEBUG(1,("Reloading services after SIGHUP\n")); reload_services(False); reload_after_sighup = 0; } /* now check for messages */ DEBUG(10,("start_background_queue: background LPQ thread got a message\n")); message_dispatch(); /* process any pending print change notify messages */ print_notify_send_messages(0); } } } /**************************************************************************** update the internal database from the system print queue for a queue ****************************************************************************/ static void print_queue_update(int snum, BOOL force) { fstring key; fstring sharename; pstring lpqcommand, lprmcommand; char *buffer = NULL; size_t len = 0; size_t newlen; struct tdb_print_db *pdb; int type; struct printif *current_printif; fstrcpy( sharename, lp_const_servicename(snum)); /* don't strip out characters like '$' from the printername */ pstrcpy( lpqcommand, lp_lpqcommand(snum)); string_sub2( lpqcommand, "%p", PRINTERNAME(snum), sizeof(lpqcommand), False, False, False ); standard_sub_advanced(lp_servicename(snum), current_user_info.unix_name, "", current_user.ut.gid, get_current_username(), current_user_info.domain, lpqcommand, sizeof(lpqcommand) ); pstrcpy( lprmcommand, lp_lprmcommand(snum)); string_sub2( lprmcommand, "%p", PRINTERNAME(snum), sizeof(lprmcommand), False, False, False ); standard_sub_advanced(lp_servicename(snum), current_user_info.unix_name, "", current_user.ut.gid, get_current_username(), current_user_info.domain, lprmcommand, sizeof(lprmcommand) ); /* * Make sure that the background queue process exists. * Otherwise just do the update ourselves */ if ( force || background_lpq_updater_pid == -1 ) { DEBUG(4,("print_queue_update: updating queue [%s] myself\n", sharename)); current_printif = get_printer_fns( snum ); print_queue_update_with_lock( sharename, current_printif, lpqcommand, lprmcommand ); return; } type = lp_printing(snum); /* get the length */ len = tdb_pack( NULL, 0, "fdPP", sharename, type, lpqcommand, lprmcommand ); buffer = SMB_XMALLOC_ARRAY( char, len ); /* now pack the buffer */ newlen = tdb_pack( buffer, len, "fdPP", sharename, type, lpqcommand, lprmcommand ); SMB_ASSERT( newlen == len ); DEBUG(10,("print_queue_update: Sending message -> printer = %s, " "type = %d, lpq command = [%s] lprm command = [%s]\n", sharename, type, lpqcommand, lprmcommand )); /* here we set a msg pending record for other smbd processes to throttle the number of duplicate print_queue_update msgs sent. */ pdb = get_print_db_byname(sharename); if (!pdb) { SAFE_FREE(buffer); return; } snprintf(key, sizeof(key), "MSG_PENDING/%s", sharename); if ( !tdb_store_uint32( pdb->tdb, key, time(NULL) ) ) { /* log a message but continue on */ DEBUG(0,("print_queue_update: failed to store MSG_PENDING flag for [%s]!\n", sharename)); } release_print_db( pdb ); /* finally send the message */ message_send_pid(pid_to_procid(background_lpq_updater_pid), MSG_PRINTER_UPDATE, buffer, len, False); SAFE_FREE( buffer ); return; } /**************************************************************************** Create/Update an entry in the print tdb that will allow us to send notify updates only to interested smbd's. ****************************************************************************/ BOOL print_notify_register_pid(int snum) { TDB_DATA data; struct tdb_print_db *pdb = NULL; TDB_CONTEXT *tdb = NULL; const char *printername; uint32 mypid = (uint32)sys_getpid(); BOOL ret = False; size_t i; /* if (snum == -1), then the change notify request was on a print server handle and we need to register on all print queus */ if (snum == -1) { int num_services = lp_numservices(); int idx; for ( idx=0; idx<num_services; idx++ ) { if (lp_snum_ok(idx) && lp_print_ok(idx) ) print_notify_register_pid(idx); } return True; } else /* register for a specific printer */ { printername = lp_const_servicename(snum); pdb = get_print_db_byname(printername); if (!pdb) return False; tdb = pdb->tdb; } if (tdb_lock_bystring_with_timeout(tdb, NOTIFY_PID_LIST_KEY, 10) == -1) { DEBUG(0,("print_notify_register_pid: Failed to lock printer %s\n", printername)); if (pdb) release_print_db(pdb); return False; } data = get_printer_notify_pid_list( tdb, printername, True ); /* Add ourselves and increase the refcount. */ for (i = 0; i < data.dsize; i += 8) { if (IVAL(data.dptr,i) == mypid) { uint32 new_refcount = IVAL(data.dptr, i+4) + 1; SIVAL(data.dptr, i+4, new_refcount); break; } } if (i == data.dsize) { /* We weren't in the list. Realloc. */ data.dptr = (char *)SMB_REALLOC(data.dptr, data.dsize + 8); if (!data.dptr) { DEBUG(0,("print_notify_register_pid: Relloc fail for printer %s\n", printername)); goto done; } data.dsize += 8; SIVAL(data.dptr,data.dsize - 8,mypid); SIVAL(data.dptr,data.dsize - 4,1); /* Refcount. */ } /* Store back the record. */ if (tdb_store_bystring(tdb, NOTIFY_PID_LIST_KEY, data, TDB_REPLACE) == -1) { DEBUG(0,("print_notify_register_pid: Failed to update pid \ list for printer %s\n", printername)); goto done; } ret = True; done: tdb_unlock_bystring(tdb, NOTIFY_PID_LIST_KEY); if (pdb) release_print_db(pdb); SAFE_FREE(data.dptr); return ret; } /**************************************************************************** Update an entry in the print tdb that will allow us to send notify updates only to interested smbd's. ****************************************************************************/ BOOL print_notify_deregister_pid(int snum) { TDB_DATA data; struct tdb_print_db *pdb = NULL; TDB_CONTEXT *tdb = NULL; const char *printername; uint32 mypid = (uint32)sys_getpid(); size_t i; BOOL ret = False; /* if ( snum == -1 ), we are deregister a print server handle which means to deregister on all print queues */ if (snum == -1) { int num_services = lp_numservices(); int idx; for ( idx=0; idx<num_services; idx++ ) { if ( lp_snum_ok(idx) && lp_print_ok(idx) ) print_notify_deregister_pid(idx); } return True; } else /* deregister a specific printer */ { printername = lp_const_servicename(snum); pdb = get_print_db_byname(printername); if (!pdb) return False; tdb = pdb->tdb; } if (tdb_lock_bystring_with_timeout(tdb, NOTIFY_PID_LIST_KEY, 10) == -1) { DEBUG(0,("print_notify_register_pid: Failed to lock \ printer %s database\n", printername)); if (pdb) release_print_db(pdb); return False; } data = get_printer_notify_pid_list( tdb, printername, True ); /* Reduce refcount. Remove ourselves if zero. */ for (i = 0; i < data.dsize; ) { if (IVAL(data.dptr,i) == mypid) { uint32 refcount = IVAL(data.dptr, i+4); refcount--; if (refcount == 0) { if (data.dsize - i > 8) memmove( &data.dptr[i], &data.dptr[i+8], data.dsize - i - 8); data.dsize -= 8; continue; } SIVAL(data.dptr, i+4, refcount); } i += 8; } if (data.dsize == 0) SAFE_FREE(data.dptr); /* Store back the record. */ if (tdb_store_bystring(tdb, NOTIFY_PID_LIST_KEY, data, TDB_REPLACE) == -1) { DEBUG(0,("print_notify_register_pid: Failed to update pid \ list for printer %s\n", printername)); goto done; } ret = True; done: tdb_unlock_bystring(tdb, NOTIFY_PID_LIST_KEY); if (pdb) release_print_db(pdb); SAFE_FREE(data.dptr); return ret; } /**************************************************************************** Check if a jobid is valid. It is valid if it exists in the database. ****************************************************************************/ BOOL print_job_exists(const char* sharename, uint32 jobid) { struct tdb_print_db *pdb = get_print_db_byname(sharename); BOOL ret; if (!pdb) return False; ret = tdb_exists(pdb->tdb, print_key(jobid)); release_print_db(pdb); return ret; } /**************************************************************************** Give the fd used for a jobid. ****************************************************************************/ int print_job_fd(const char* sharename, uint32 jobid) { struct printjob *pjob = print_job_find(sharename, jobid); if (!pjob) return -1; /* don't allow another process to get this info - it is meaningless */ if (pjob->pid != sys_getpid()) return -1; return pjob->fd; } /**************************************************************************** Give the filename used for a jobid. Only valid for the process doing the spooling and when the job has not been spooled. ****************************************************************************/ char *print_job_fname(const char* sharename, uint32 jobid) { struct printjob *pjob = print_job_find(sharename, jobid); if (!pjob || pjob->spooled || pjob->pid != sys_getpid()) return NULL; return pjob->filename; } /**************************************************************************** Give the filename used for a jobid. Only valid for the process doing the spooling and when the job has not been spooled. ****************************************************************************/ NT_DEVICEMODE *print_job_devmode(const char* sharename, uint32 jobid) { struct printjob *pjob = print_job_find(sharename, jobid); if ( !pjob ) return NULL; return pjob->nt_devmode; } /**************************************************************************** Set the place in the queue for a job. ****************************************************************************/ BOOL print_job_set_place(const char *sharename, uint32 jobid, int place) { DEBUG(2,("print_job_set_place not implemented yet\n")); return False; } /**************************************************************************** Set the name of a job. Only possible for owner. ****************************************************************************/ BOOL print_job_set_name(const char *sharename, uint32 jobid, char *name) { struct printjob *pjob; pjob = print_job_find(sharename, jobid); if (!pjob || pjob->pid != sys_getpid()) return False; fstrcpy(pjob->jobname, name); return pjob_store(sharename, jobid, pjob); } /*************************************************************************** Remove a jobid from the 'jobs changed' list. ***************************************************************************/ static BOOL remove_from_jobs_changed(const char* sharename, uint32 jobid) { struct tdb_print_db *pdb = get_print_db_byname(sharename); TDB_DATA data, key; size_t job_count, i; BOOL ret = False; BOOL gotlock = False; if (!pdb) { return False; } ZERO_STRUCT(data); key = string_tdb_data("INFO/jobs_changed"); if (tdb_chainlock_with_timeout(pdb->tdb, key, 5) == -1) goto out; gotlock = True; data = tdb_fetch(pdb->tdb, key); if (data.dptr == NULL || data.dsize == 0 || (data.dsize % 4 != 0)) goto out; job_count = data.dsize / 4; for (i = 0; i < job_count; i++) { uint32 ch_jobid; ch_jobid = IVAL(data.dptr, i*4); if (ch_jobid == jobid) { if (i < job_count -1 ) memmove(data.dptr + (i*4), data.dptr + (i*4) + 4, (job_count - i - 1)*4 ); data.dsize -= 4; if (tdb_store(pdb->tdb, key, data, TDB_REPLACE) == -1) goto out; break; } } ret = True; out: if (gotlock) tdb_chainunlock(pdb->tdb, key); SAFE_FREE(data.dptr); release_print_db(pdb); if (ret) DEBUG(10,("remove_from_jobs_changed: removed jobid %u\n", (unsigned int)jobid )); else DEBUG(10,("remove_from_jobs_changed: Failed to remove jobid %u\n", (unsigned int)jobid )); return ret; } /**************************************************************************** Delete a print job - don't update queue. ****************************************************************************/ static BOOL print_job_delete1(int snum, uint32 jobid) { const char* sharename = lp_const_servicename(snum); struct printjob *pjob = print_job_find(sharename, jobid); int result = 0; struct printif *current_printif = get_printer_fns( snum ); if (!pjob) return False; /* * If already deleting just return. */ if (pjob->status == LPQ_DELETING) return True; /* Hrm - we need to be able to cope with deleting a job before it has reached the spooler. Just mark it as LPQ_DELETING and let the print_queue_update() code rmeove the record */ if (pjob->sysjob == -1) { DEBUG(5, ("attempt to delete job %u not seen by lpr\n", (unsigned int)jobid)); } /* Set the tdb entry to be deleting. */ pjob->status = LPQ_DELETING; pjob_store(sharename, jobid, pjob); if (pjob->spooled && pjob->sysjob != -1) { result = (*(current_printif->job_delete))( PRINTERNAME(snum), lp_lprmcommand(snum), pjob); /* Delete the tdb entry if the delete succeeded or the job hasn't been spooled. */ if (result == 0) { struct tdb_print_db *pdb = get_print_db_byname(sharename); int njobs = 1; if (!pdb) return False; pjob_delete(sharename, jobid); /* Ensure we keep a rough count of the number of total jobs... */ tdb_change_int32_atomic(pdb->tdb, "INFO/total_jobs", &njobs, -1); release_print_db(pdb); } } remove_from_jobs_changed( sharename, jobid ); return (result == 0); } /**************************************************************************** Return true if the current user owns the print job. ****************************************************************************/ static BOOL is_owner(struct current_user *user, const char *servicename, uint32 jobid) { struct printjob *pjob = print_job_find(servicename, jobid); user_struct *vuser; if (!pjob || !user) return False; if ((vuser = get_valid_user_struct(user->vuid)) != NULL) { return strequal(pjob->user, vuser->user.smb_name); } else { return strequal(pjob->user, uidtoname(user->ut.uid)); } } /**************************************************************************** Delete a print job. ****************************************************************************/ BOOL print_job_delete(struct current_user *user, int snum, uint32 jobid, WERROR *errcode) { const char* sharename = lp_const_servicename( snum ); struct printjob *pjob; BOOL owner; char *fname; *errcode = WERR_OK; owner = is_owner(user, lp_const_servicename(snum), jobid); /* Check access against security descriptor or whether the user owns their job. */ if (!owner && !print_access_check(user, snum, JOB_ACCESS_ADMINISTER)) { DEBUG(3, ("delete denied by security descriptor\n")); *errcode = WERR_ACCESS_DENIED; /* BEGIN_ADMIN_LOG */ sys_adminlog( LOG_ERR, "Permission denied-- user not allowed to delete, \ pause, or resume print job. User name: %s. Printer name: %s.", uidtoname(user->ut.uid), PRINTERNAME(snum) ); /* END_ADMIN_LOG */ return False; } /* * get the spooled filename of the print job * if this works, then the file has not been spooled * to the underlying print system. Just delete the * spool file & return. */ if ( (fname = print_job_fname( sharename, jobid )) != NULL ) { /* remove the spool file */ DEBUG(10,("print_job_delete: Removing spool file [%s]\n", fname )); if ( unlink( fname ) == -1 ) { *errcode = map_werror_from_unix(errno); return False; } } if (!print_job_delete1(snum, jobid)) { *errcode = WERR_ACCESS_DENIED; return False; } /* force update the database and say the delete failed if the job still exists */ print_queue_update(snum, True); pjob = print_job_find(sharename, jobid); if ( pjob && (pjob->status != LPQ_DELETING) ) *errcode = WERR_ACCESS_DENIED; return (pjob == NULL ); } /**************************************************************************** Pause a job. ****************************************************************************/ BOOL print_job_pause(struct current_user *user, int snum, uint32 jobid, WERROR *errcode) { const char* sharename = lp_const_servicename(snum); struct printjob *pjob; int ret = -1; struct printif *current_printif = get_printer_fns( snum ); pjob = print_job_find(sharename, jobid); if (!pjob || !user) { DEBUG(10, ("print_job_pause: no pjob or user for jobid %u\n", (unsigned int)jobid )); return False; } if (!pjob->spooled || pjob->sysjob == -1) { DEBUG(10, ("print_job_pause: not spooled or bad sysjob = %d for jobid %u\n", (int)pjob->sysjob, (unsigned int)jobid )); return False; } if (!is_owner(user, lp_const_servicename(snum), jobid) && !print_access_check(user, snum, JOB_ACCESS_ADMINISTER)) { DEBUG(3, ("pause denied by security descriptor\n")); /* BEGIN_ADMIN_LOG */ sys_adminlog( LOG_ERR, "Permission denied-- user not allowed to delete, \ pause, or resume print job. User name: %s. Printer name: %s.", uidtoname(user->ut.uid), PRINTERNAME(snum) ); /* END_ADMIN_LOG */ *errcode = WERR_ACCESS_DENIED; return False; } /* need to pause the spooled entry */ ret = (*(current_printif->job_pause))(snum, pjob); if (ret != 0) { *errcode = WERR_INVALID_PARAM; return False; } /* force update the database */ print_cache_flush(lp_const_servicename(snum)); /* Send a printer notify message */ notify_job_status(sharename, jobid, JOB_STATUS_PAUSED); /* how do we tell if this succeeded? */ return True; } /**************************************************************************** Resume a job. ****************************************************************************/ BOOL print_job_resume(struct current_user *user, int snum, uint32 jobid, WERROR *errcode) { const char *sharename = lp_const_servicename(snum); struct printjob *pjob; int ret; struct printif *current_printif = get_printer_fns( snum ); pjob = print_job_find(sharename, jobid); if (!pjob || !user) { DEBUG(10, ("print_job_resume: no pjob or user for jobid %u\n", (unsigned int)jobid )); return False; } if (!pjob->spooled || pjob->sysjob == -1) { DEBUG(10, ("print_job_resume: not spooled or bad sysjob = %d for jobid %u\n", (int)pjob->sysjob, (unsigned int)jobid )); return False; } if (!is_owner(user, lp_const_servicename(snum), jobid) && !print_access_check(user, snum, JOB_ACCESS_ADMINISTER)) { DEBUG(3, ("resume denied by security descriptor\n")); *errcode = WERR_ACCESS_DENIED; /* BEGIN_ADMIN_LOG */ sys_adminlog( LOG_ERR, "Permission denied-- user not allowed to delete, \ pause, or resume print job. User name: %s. Printer name: %s.", uidtoname(user->ut.uid), PRINTERNAME(snum) ); /* END_ADMIN_LOG */ return False; } ret = (*(current_printif->job_resume))(snum, pjob); if (ret != 0) { *errcode = WERR_INVALID_PARAM; return False; } /* force update the database */ print_cache_flush(lp_const_servicename(snum)); /* Send a printer notify message */ notify_job_status(sharename, jobid, JOB_STATUS_QUEUED); return True; } /**************************************************************************** Write to a print file. ****************************************************************************/ ssize_t print_job_write(int snum, uint32 jobid, const char *buf, SMB_OFF_T pos, size_t size) { const char* sharename = lp_const_servicename(snum); int return_code; struct printjob *pjob; pjob = print_job_find(sharename, jobid); if (!pjob) return -1; /* don't allow another process to get this info - it is meaningless */ if (pjob->pid != sys_getpid()) return -1; return_code = write_data_at_offset(pjob->fd, buf, size, pos); if (return_code>0) { pjob->size += size; pjob_store(sharename, jobid, pjob); } return return_code; } /**************************************************************************** Get the queue status - do not update if db is out of date. ****************************************************************************/ static int get_queue_status(const char* sharename, print_status_struct *status) { fstring keystr; TDB_DATA data; struct tdb_print_db *pdb = get_print_db_byname(sharename); int len; if (status) { ZERO_STRUCTP(status); } if (!pdb) return 0; if (status) { fstr_sprintf(keystr, "STATUS/%s", sharename); data = tdb_fetch(pdb->tdb, string_tdb_data(keystr)); if (data.dptr) { if (data.dsize == sizeof(print_status_struct)) /* this memcpy is ok since the status struct was not packed before storing it in the tdb */ memcpy(status, data.dptr, sizeof(print_status_struct)); SAFE_FREE(data.dptr); } } len = tdb_fetch_int32(pdb->tdb, "INFO/total_jobs"); release_print_db(pdb); return (len == -1 ? 0 : len); } /**************************************************************************** Determine the number of jobs in a queue. ****************************************************************************/ int print_queue_length(int snum, print_status_struct *pstatus) { const char* sharename = lp_const_servicename( snum ); print_status_struct status; int len; ZERO_STRUCT( status ); /* make sure the database is up to date */ if (print_cache_expired(lp_const_servicename(snum), True)) print_queue_update(snum, False); /* also fetch the queue status */ memset(&status, 0, sizeof(status)); len = get_queue_status(sharename, &status); if (pstatus) *pstatus = status; return len; } /*************************************************************************** Allocate a jobid. Hold the lock for as short a time as possible. ***************************************************************************/ static BOOL allocate_print_jobid(struct tdb_print_db *pdb, int snum, const char *sharename, uint32 *pjobid) { int i; uint32 jobid; *pjobid = (uint32)-1; for (i = 0; i < 3; i++) { /* Lock the database - only wait 20 seconds. */ if (tdb_lock_bystring_with_timeout(pdb->tdb, "INFO/nextjob", 20) == -1) { DEBUG(0,("allocate_print_jobid: failed to lock printing database %s\n", sharename)); return False; } if (!tdb_fetch_uint32(pdb->tdb, "INFO/nextjob", &jobid)) { if (tdb_error(pdb->tdb) != TDB_ERR_NOEXIST) { DEBUG(0, ("allocate_print_jobid: failed to fetch INFO/nextjob for print queue %s\n", sharename)); return False; } jobid = 0; } jobid = NEXT_JOBID(jobid); if (tdb_store_int32(pdb->tdb, "INFO/nextjob", jobid)==-1) { DEBUG(3, ("allocate_print_jobid: failed to store INFO/nextjob.\n")); tdb_unlock_bystring(pdb->tdb, "INFO/nextjob"); return False; } /* We've finished with the INFO/nextjob lock. */ tdb_unlock_bystring(pdb->tdb, "INFO/nextjob"); if (!print_job_exists(sharename, jobid)) break; } if (i > 2) { DEBUG(0, ("allocate_print_jobid: failed to allocate a print job for queue %s\n", sharename)); /* Probably full... */ errno = ENOSPC; return False; } /* Store a dummy placeholder. */ { TDB_DATA dum; dum.dptr = NULL; dum.dsize = 0; if (tdb_store(pdb->tdb, print_key(jobid), dum, TDB_INSERT) == -1) { DEBUG(3, ("allocate_print_jobid: jobid (%d) failed to store placeholder.\n", jobid )); return False; } } *pjobid = jobid; return True; } /*************************************************************************** Append a jobid to the 'jobs changed' list. ***************************************************************************/ static BOOL add_to_jobs_changed(struct tdb_print_db *pdb, uint32 jobid) { TDB_DATA data; uint32 store_jobid; SIVAL(&store_jobid, 0, jobid); data.dptr = (char *)&store_jobid; data.dsize = 4; DEBUG(10,("add_to_jobs_changed: Added jobid %u\n", (unsigned int)jobid )); return (tdb_append(pdb->tdb, string_tdb_data("INFO/jobs_changed"), data) == 0); } /*************************************************************************** Start spooling a job - return the jobid. ***************************************************************************/ uint32 print_job_start(struct current_user *user, int snum, char *jobname, NT_DEVICEMODE *nt_devmode ) { uint32 jobid; char *path; struct printjob pjob; user_struct *vuser; const char *sharename = lp_const_servicename(snum); struct tdb_print_db *pdb = get_print_db_byname(sharename); int njobs; errno = 0; if (!pdb) return (uint32)-1; if (!print_access_check(user, snum, PRINTER_ACCESS_USE)) { DEBUG(3, ("print_job_start: job start denied by security descriptor\n")); release_print_db(pdb); return (uint32)-1; } if (!print_time_access_check(lp_servicename(snum))) { DEBUG(3, ("print_job_start: job start denied by time check\n")); release_print_db(pdb); return (uint32)-1; } path = lp_pathname(snum); /* see if we have sufficient disk space */ if (lp_minprintspace(snum)) { SMB_BIG_UINT dspace, dsize; if (sys_fsusage(path, &dspace, &dsize) == 0 && dspace < 2*(SMB_BIG_UINT)lp_minprintspace(snum)) { DEBUG(3, ("print_job_start: disk space check failed.\n")); release_print_db(pdb); errno = ENOSPC; return (uint32)-1; } } /* for autoloaded printers, check that the printcap entry still exists */ if (lp_autoloaded(snum) && !pcap_printername_ok(lp_const_servicename(snum))) { DEBUG(3, ("print_job_start: printer name %s check failed.\n", lp_const_servicename(snum) )); release_print_db(pdb); errno = ENOENT; return (uint32)-1; } /* Insure the maximum queue size is not violated */ if ((njobs = print_queue_length(snum,NULL)) > lp_maxprintjobs(snum)) { DEBUG(3, ("print_job_start: Queue %s number of jobs (%d) larger than max printjobs per queue (%d).\n", sharename, njobs, lp_maxprintjobs(snum) )); release_print_db(pdb); errno = ENOSPC; return (uint32)-1; } DEBUG(10,("print_job_start: Queue %s number of jobs (%d), max printjobs = %d\n", sharename, njobs, lp_maxprintjobs(snum) )); if (!allocate_print_jobid(pdb, snum, sharename, &jobid)) goto fail; /* create the database entry */ ZERO_STRUCT(pjob); pjob.pid = sys_getpid(); pjob.sysjob = -1; pjob.fd = -1; pjob.starttime = time(NULL); pjob.status = LPQ_SPOOLING; pjob.size = 0; pjob.spooled = False; pjob.smbjob = True; pjob.nt_devmode = nt_devmode; fstrcpy(pjob.jobname, jobname); if ((vuser = get_valid_user_struct(user->vuid)) != NULL) { fstrcpy(pjob.user, lp_printjob_username(snum)); standard_sub_basic(vuser->user.smb_name, vuser->user.domain, pjob.user, sizeof(pjob.user)-1); /* ensure NULL termination */ pjob.user[sizeof(pjob.user)-1] = '\0'; } else { fstrcpy(pjob.user, uidtoname(user->ut.uid)); } fstrcpy(pjob.queuename, lp_const_servicename(snum)); /* we have a job entry - now create the spool file */ slprintf(pjob.filename, sizeof(pjob.filename)-1, "%s/%s%.8u.XXXXXX", path, PRINT_SPOOL_PREFIX, (unsigned int)jobid); pjob.fd = smb_mkstemp(pjob.filename); if (pjob.fd == -1) { if (errno == EACCES) { /* Common setup error, force a report. */ DEBUG(0, ("print_job_start: insufficient permissions \ to open spool file %s.\n", pjob.filename)); } else { /* Normal case, report at level 3 and above. */ DEBUG(3, ("print_job_start: can't open spool file %s,\n", pjob.filename)); DEBUGADD(3, ("errno = %d (%s).\n", errno, strerror(errno))); } goto fail; } pjob_store(sharename, jobid, &pjob); /* Update the 'jobs changed' entry used by print_queue_status. */ add_to_jobs_changed(pdb, jobid); /* Ensure we keep a rough count of the number of total jobs... */ tdb_change_int32_atomic(pdb->tdb, "INFO/total_jobs", &njobs, 1); release_print_db(pdb); return jobid; fail: if (jobid != -1) pjob_delete(sharename, jobid); release_print_db(pdb); DEBUG(3, ("print_job_start: returning fail. Error = %s\n", strerror(errno) )); return (uint32)-1; } /**************************************************************************** Update the number of pages spooled to jobid ****************************************************************************/ void print_job_endpage(int snum, uint32 jobid) { const char* sharename = lp_const_servicename(snum); struct printjob *pjob; pjob = print_job_find(sharename, jobid); if (!pjob) return; /* don't allow another process to get this info - it is meaningless */ if (pjob->pid != sys_getpid()) return; pjob->page_count++; pjob_store(sharename, jobid, pjob); } /**************************************************************************** Print a file - called on closing the file. This spools the job. If normal close is false then we're tearing down the jobs - treat as an error. ****************************************************************************/ BOOL print_job_end(int snum, uint32 jobid, enum file_close_type close_type) { const char* sharename = lp_const_servicename(snum); struct printjob *pjob; int ret; SMB_STRUCT_STAT sbuf; struct printif *current_printif = get_printer_fns( snum ); pjob = print_job_find(sharename, jobid); if (!pjob) return False; if (pjob->spooled || pjob->pid != sys_getpid()) return False; if ((close_type == NORMAL_CLOSE || close_type == SHUTDOWN_CLOSE) && (sys_fstat(pjob->fd, &sbuf) == 0)) { pjob->size = sbuf.st_size; close(pjob->fd); pjob->fd = -1; } else { /* * Not a normal close or we couldn't stat the job file, * so something has gone wrong. Cleanup. */ close(pjob->fd); pjob->fd = -1; DEBUG(3,("print_job_end: failed to stat file for jobid %d\n", jobid )); goto fail; } /* Technically, this is not quite right. If the printer has a separator * page turned on, the NT spooler prints the separator page even if the * print job is 0 bytes. 010215 JRR */ if (pjob->size == 0 || pjob->status == LPQ_DELETING) { /* don't bother spooling empty files or something being deleted. */ DEBUG(5,("print_job_end: canceling spool of %s (%s)\n", pjob->filename, pjob->size ? "deleted" : "zero length" )); unlink(pjob->filename); pjob_delete(sharename, jobid); return True; } pjob->smbjob = jobid; ret = (*(current_printif->job_submit))(snum, pjob); if (ret) goto fail; /* The print job has been sucessfully handed over to the back-end */ pjob->spooled = True; pjob->status = LPQ_QUEUED; pjob_store(sharename, jobid, pjob); /* make sure the database is up to date */ if (print_cache_expired(lp_const_servicename(snum), True)) print_queue_update(snum, False); return True; fail: /* The print job was not succesfully started. Cleanup */ /* Still need to add proper error return propagation! 010122:JRR */ unlink(pjob->filename); pjob_delete(sharename, jobid); return False; } /**************************************************************************** Get a snapshot of jobs in the system without traversing. ****************************************************************************/ static BOOL get_stored_queue_info(struct tdb_print_db *pdb, int snum, int *pcount, print_queue_struct **ppqueue) { TDB_DATA data, cgdata; print_queue_struct *queue = NULL; uint32 qcount = 0; uint32 extra_count = 0; int total_count = 0; size_t len = 0; uint32 i; int max_reported_jobs = lp_max_reported_jobs(snum); BOOL ret = False; const char* sharename = lp_servicename(snum); /* make sure the database is up to date */ if (print_cache_expired(lp_const_servicename(snum), True)) print_queue_update(snum, False); *pcount = 0; *ppqueue = NULL; ZERO_STRUCT(data); ZERO_STRUCT(cgdata); /* Get the stored queue data. */ data = tdb_fetch(pdb->tdb, string_tdb_data("INFO/linear_queue_array")); if (data.dptr && data.dsize >= sizeof(qcount)) len += tdb_unpack(data.dptr + len, data.dsize - len, "d", &qcount); /* Get the changed jobs list. */ cgdata = tdb_fetch(pdb->tdb, string_tdb_data("INFO/jobs_changed")); if (cgdata.dptr != NULL && (cgdata.dsize % 4 == 0)) extra_count = cgdata.dsize/4; DEBUG(5,("get_stored_queue_info: qcount = %u, extra_count = %u\n", (unsigned int)qcount, (unsigned int)extra_count)); /* Allocate the queue size. */ if (qcount == 0 && extra_count == 0) goto out; if ((queue = SMB_MALLOC_ARRAY(print_queue_struct, qcount + extra_count)) == NULL) goto out; /* Retrieve the linearised queue data. */ for( i = 0; i < qcount; i++) { uint32 qjob, qsize, qpage_count, qstatus, qpriority, qtime; len += tdb_unpack(data.dptr + len, data.dsize - len, "ddddddff", &qjob, &qsize, &qpage_count, &qstatus, &qpriority, &qtime, queue[i].fs_user, queue[i].fs_file); queue[i].job = qjob; queue[i].size = qsize; queue[i].page_count = qpage_count; queue[i].status = qstatus; queue[i].priority = qpriority; queue[i].time = qtime; } total_count = qcount; /* Add in the changed jobids. */ for( i = 0; i < extra_count; i++) { uint32 jobid; struct printjob *pjob; jobid = IVAL(cgdata.dptr, i*4); DEBUG(5,("get_stored_queue_info: changed job = %u\n", (unsigned int)jobid)); pjob = print_job_find(lp_const_servicename(snum), jobid); if (!pjob) { DEBUG(5,("get_stored_queue_info: failed to find changed job = %u\n", (unsigned int)jobid)); remove_from_jobs_changed(sharename, jobid); continue; } queue[total_count].job = jobid; queue[total_count].size = pjob->size; queue[total_count].page_count = pjob->page_count; queue[total_count].status = pjob->status; queue[total_count].priority = 1; queue[total_count].time = pjob->starttime; fstrcpy(queue[total_count].fs_user, pjob->user); fstrcpy(queue[total_count].fs_file, pjob->jobname); total_count++; } /* Sort the queue by submission time otherwise they are displayed in hash order. */ qsort(queue, total_count, sizeof(print_queue_struct), QSORT_CAST(printjob_comp)); DEBUG(5,("get_stored_queue_info: total_count = %u\n", (unsigned int)total_count)); if (max_reported_jobs && total_count > max_reported_jobs) total_count = max_reported_jobs; *ppqueue = queue; *pcount = total_count; ret = True; out: SAFE_FREE(data.dptr); SAFE_FREE(cgdata.dptr); return ret; } /**************************************************************************** Get a printer queue listing. set queue = NULL and status = NULL if you just want to update the cache ****************************************************************************/ int print_queue_status(int snum, print_queue_struct **ppqueue, print_status_struct *status) { fstring keystr; TDB_DATA data, key; const char *sharename; struct tdb_print_db *pdb; int count = 0; /* make sure the database is up to date */ if (print_cache_expired(lp_const_servicename(snum), True)) print_queue_update(snum, False); /* return if we are done */ if ( !ppqueue || !status ) return 0; *ppqueue = NULL; sharename = lp_const_servicename(snum); pdb = get_print_db_byname(sharename); if (!pdb) return 0; /* * Fetch the queue status. We must do this first, as there may * be no jobs in the queue. */ ZERO_STRUCTP(status); slprintf(keystr, sizeof(keystr)-1, "STATUS/%s", sharename); key.dptr = keystr; key.dsize = strlen(keystr); data = tdb_fetch(pdb->tdb, key); if (data.dptr) { if (data.dsize == sizeof(*status)) { /* this memcpy is ok since the status struct was not packed before storing it in the tdb */ memcpy(status, data.dptr, sizeof(*status)); } SAFE_FREE(data.dptr); } /* * Now, fetch the print queue information. We first count the number * of entries, and then only retrieve the queue if necessary. */ if (!get_stored_queue_info(pdb, snum, &count, ppqueue)) { release_print_db(pdb); return 0; } release_print_db(pdb); return count; } /**************************************************************************** Pause a queue. ****************************************************************************/ BOOL print_queue_pause(struct current_user *user, int snum, WERROR *errcode) { int ret; struct printif *current_printif = get_printer_fns( snum ); if (!print_access_check(user, snum, PRINTER_ACCESS_ADMINISTER)) { *errcode = WERR_ACCESS_DENIED; return False; } become_root(); ret = (*(current_printif->queue_pause))(snum); unbecome_root(); if (ret != 0) { *errcode = WERR_INVALID_PARAM; return False; } /* force update the database */ print_cache_flush(lp_const_servicename(snum)); /* Send a printer notify message */ notify_printer_status(snum, PRINTER_STATUS_PAUSED); return True; } /**************************************************************************** Resume a queue. ****************************************************************************/ BOOL print_queue_resume(struct current_user *user, int snum, WERROR *errcode) { int ret; struct printif *current_printif = get_printer_fns( snum ); if (!print_access_check(user, snum, PRINTER_ACCESS_ADMINISTER)) { *errcode = WERR_ACCESS_DENIED; return False; } become_root(); ret = (*(current_printif->queue_resume))(snum); unbecome_root(); if (ret != 0) { *errcode = WERR_INVALID_PARAM; return False; } /* make sure the database is up to date */ if (print_cache_expired(lp_const_servicename(snum), True)) print_queue_update(snum, True); /* Send a printer notify message */ notify_printer_status(snum, PRINTER_STATUS_OK); return True; } /**************************************************************************** Purge a queue - implemented by deleting all jobs that we can delete. ****************************************************************************/ BOOL print_queue_purge(struct current_user *user, int snum, WERROR *errcode) { print_queue_struct *queue; print_status_struct status; int njobs, i; BOOL can_job_admin; /* Force and update so the count is accurate (i.e. not a cached count) */ print_queue_update(snum, True); can_job_admin = print_access_check(user, snum, JOB_ACCESS_ADMINISTER); njobs = print_queue_status(snum, &queue, &status); if ( can_job_admin ) become_root(); for (i=0;i<njobs;i++) { BOOL owner = is_owner(user, lp_const_servicename(snum), queue[i].job); if (owner || can_job_admin) { print_job_delete1(snum, queue[i].job); } } if ( can_job_admin ) unbecome_root(); /* update the cache */ print_queue_update( snum, True ); SAFE_FREE(queue); return True; }
gpl-2.0
goldelico/letux-400
arch/blackfin/mach-common/ints-priority-sc.c
18
22746
/* * File: arch/blackfin/mach-common/ints-priority-sc.c * Based on: * Author: * * Created: ? * Description: Set up the interrupt priorities * * Modified: * 1996 Roman Zippel * 1999 D. Jeff Dionne <jeff@uclinux.org> * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca> * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca> * 2003 Metrowerks/Motorola * 2003 Bas Vermeulen <bas@buyways.nl> * Copyright 2004-2007 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/kernel_stat.h> #include <linux/seq_file.h> #include <linux/irq.h> #ifdef CONFIG_KGDB #include <linux/kgdb.h> #endif #include <asm/traps.h> #include <asm/blackfin.h> #include <asm/gpio.h> #include <asm/irq_handler.h> #ifdef BF537_FAMILY # define BF537_GENERIC_ERROR_INT_DEMUX #else # undef BF537_GENERIC_ERROR_INT_DEMUX #endif /* * NOTES: * - we have separated the physical Hardware interrupt from the * levels that the LINUX kernel sees (see the description in irq.h) * - */ /* Initialize this to an actual value to force it into the .data * section so that we know it is properly initialized at entry into * the kernel but before bss is initialized to zero (which is where * it would live otherwise). The 0x1f magic represents the IRQs we * cannot actually mask out in hardware. */ unsigned long irq_flags = 0x1f; /* The number of spurious interrupts */ atomic_t num_spurious; struct ivgx { /* irq number for request_irq, available in mach-bf533/irq.h */ unsigned int irqno; /* corresponding bit in the SIC_ISR register */ unsigned int isrflag; } ivg_table[NR_PERI_INTS]; struct ivg_slice { /* position of first irq in ivg_table for given ivg */ struct ivgx *ifirst; struct ivgx *istop; } ivg7_13[IVG13 - IVG7 + 1]; static void search_IAR(void); /* * Search SIC_IAR and fill tables with the irqvalues * and their positions in the SIC_ISR register. */ static void __init search_IAR(void) { unsigned ivg, irq_pos = 0; for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) { int irqn; ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos]; for (irqn = 0; irqn < NR_PERI_INTS; irqn++) { int iar_shift = (irqn & 7) * 4; if (ivg == (0xf & #ifndef CONFIG_BF52x bfin_read32((unsigned long *)SIC_IAR0 + (irqn >> 3)) >> iar_shift)) { #else bfin_read32((unsigned long *)SIC_IAR0 + ((irqn%32) >> 3) + ((irqn / 32) * 16)) >> iar_shift)) { #endif ivg_table[irq_pos].irqno = IVG7 + irqn; ivg_table[irq_pos].isrflag = 1 << (irqn % 32); ivg7_13[ivg].istop++; irq_pos++; } } } } /* * This is for BF533 internal IRQs */ static void ack_noop(unsigned int irq) { /* Dummy function. */ } static void bfin_core_mask_irq(unsigned int irq) { irq_flags &= ~(1 << irq); if (!irqs_disabled()) local_irq_enable(); } static void bfin_core_unmask_irq(unsigned int irq) { irq_flags |= 1 << irq; /* * If interrupts are enabled, IMASK must contain the same value * as irq_flags. Make sure that invariant holds. If interrupts * are currently disabled we need not do anything; one of the * callers will take care of setting IMASK to the proper value * when reenabling interrupts. * local_irq_enable just does "STI irq_flags", so it's exactly * what we need. */ if (!irqs_disabled()) local_irq_enable(); return; } static void bfin_internal_mask_irq(unsigned int irq) { #ifdef CONFIG_BF53x bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & ~(1 << (irq - (IRQ_CORETMR + 1)))); #else unsigned mask_bank, mask_bit; mask_bank = (irq - (IRQ_CORETMR + 1)) / 32; mask_bit = (irq - (IRQ_CORETMR + 1)) % 32; bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & ~(1 << mask_bit)); #endif SSYNC(); } static void bfin_internal_unmask_irq(unsigned int irq) { #ifdef CONFIG_BF53x bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | (1 << (irq - (IRQ_CORETMR + 1)))); #else unsigned mask_bank, mask_bit; mask_bank = (irq - (IRQ_CORETMR + 1)) / 32; mask_bit = (irq - (IRQ_CORETMR + 1)) % 32; bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) | (1 << mask_bit)); #endif SSYNC(); } static struct irq_chip bfin_core_irqchip = { .ack = ack_noop, .mask = bfin_core_mask_irq, .unmask = bfin_core_unmask_irq, }; static struct irq_chip bfin_internal_irqchip = { .ack = ack_noop, .mask = bfin_internal_mask_irq, .unmask = bfin_internal_unmask_irq, }; #ifdef BF537_GENERIC_ERROR_INT_DEMUX static int error_int_mask; static void bfin_generic_error_ack_irq(unsigned int irq) { } static void bfin_generic_error_mask_irq(unsigned int irq) { error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR)); if (!error_int_mask) { local_irq_disable(); bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & ~(1 << (IRQ_GENERIC_ERROR - (IRQ_CORETMR + 1)))); SSYNC(); local_irq_enable(); } } static void bfin_generic_error_unmask_irq(unsigned int irq) { local_irq_disable(); bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 1 << (IRQ_GENERIC_ERROR - (IRQ_CORETMR + 1))); SSYNC(); local_irq_enable(); error_int_mask |= 1L << (irq - IRQ_PPI_ERROR); } static struct irq_chip bfin_generic_error_irqchip = { .ack = bfin_generic_error_ack_irq, .mask = bfin_generic_error_mask_irq, .unmask = bfin_generic_error_unmask_irq, }; static void bfin_demux_error_irq(unsigned int int_err_irq, struct irq_desc *intb_desc) { int irq = 0; SSYNC(); #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK) irq = IRQ_MAC_ERROR; else #endif if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK) irq = IRQ_SPORT0_ERROR; else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK) irq = IRQ_SPORT1_ERROR; else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK) irq = IRQ_PPI_ERROR; else if (bfin_read_CAN_GIF() & CAN_ERR_MASK) irq = IRQ_CAN_ERROR; else if (bfin_read_SPI_STAT() & SPI_ERR_MASK) irq = IRQ_SPI_ERROR; else if ((bfin_read_UART0_IIR() & UART_ERR_MASK_STAT1) && (bfin_read_UART0_IIR() & UART_ERR_MASK_STAT0)) irq = IRQ_UART0_ERROR; else if ((bfin_read_UART1_IIR() & UART_ERR_MASK_STAT1) && (bfin_read_UART1_IIR() & UART_ERR_MASK_STAT0)) irq = IRQ_UART1_ERROR; if (irq) { if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) { struct irq_desc *desc = irq_desc + irq; desc->handle_irq(irq, desc); } else { switch (irq) { case IRQ_PPI_ERROR: bfin_write_PPI_STATUS(PPI_ERR_MASK); break; #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) case IRQ_MAC_ERROR: bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK); break; #endif case IRQ_SPORT0_ERROR: bfin_write_SPORT0_STAT(SPORT_ERR_MASK); break; case IRQ_SPORT1_ERROR: bfin_write_SPORT1_STAT(SPORT_ERR_MASK); break; case IRQ_CAN_ERROR: bfin_write_CAN_GIS(CAN_ERR_MASK); break; case IRQ_SPI_ERROR: bfin_write_SPI_STAT(SPI_ERR_MASK); break; default: break; } pr_debug("IRQ %d:" " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n", irq); } } else printk(KERN_ERR "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR" " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n", __FUNCTION__, __FILE__, __LINE__); } #endif /* BF537_GENERIC_ERROR_INT_DEMUX */ #if !defined(CONFIG_BF54x) static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)]; static unsigned short gpio_edge_triggered[gpio_bank(MAX_BLACKFIN_GPIOS)]; static void bfin_gpio_ack_irq(unsigned int irq) { u16 gpionr = irq - IRQ_PF0; if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) { set_gpio_data(gpionr, 0); SSYNC(); } } static void bfin_gpio_mask_ack_irq(unsigned int irq) { u16 gpionr = irq - IRQ_PF0; if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) { set_gpio_data(gpionr, 0); SSYNC(); } set_gpio_maska(gpionr, 0); SSYNC(); } static void bfin_gpio_mask_irq(unsigned int irq) { set_gpio_maska(irq - IRQ_PF0, 0); SSYNC(); } static void bfin_gpio_unmask_irq(unsigned int irq) { set_gpio_maska(irq - IRQ_PF0, 1); SSYNC(); } static unsigned int bfin_gpio_irq_startup(unsigned int irq) { unsigned int ret; u16 gpionr = irq - IRQ_PF0; if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) { ret = gpio_request(gpionr, "IRQ"); if (ret) return ret; } gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr); bfin_gpio_unmask_irq(irq); return ret; } static void bfin_gpio_irq_shutdown(unsigned int irq) { bfin_gpio_mask_irq(irq); gpio_free(irq - IRQ_PF0); gpio_enabled[gpio_bank(irq - IRQ_PF0)] &= ~gpio_bit(irq - IRQ_PF0); } static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) { unsigned int ret; u16 gpionr = irq - IRQ_PF0; if (type == IRQ_TYPE_PROBE) { /* only probe unenabled GPIO interrupt lines */ if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr)) return 0; type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; } if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) { ret = gpio_request(gpionr, "IRQ"); if (ret) return ret; } gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr); } else { gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr); return 0; } set_gpio_dir(gpionr, 0); set_gpio_inen(gpionr, 1); if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { gpio_edge_triggered[gpio_bank(gpionr)] |= gpio_bit(gpionr); set_gpio_edge(gpionr, 1); } else { set_gpio_edge(gpionr, 0); gpio_edge_triggered[gpio_bank(gpionr)] &= ~gpio_bit(gpionr); } if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) set_gpio_both(gpionr, 1); else set_gpio_both(gpionr, 0); if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW))) set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */ else set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */ SSYNC(); if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) set_irq_handler(irq, handle_edge_irq); else set_irq_handler(irq, handle_level_irq); return 0; } static struct irq_chip bfin_gpio_irqchip = { .ack = bfin_gpio_ack_irq, .mask = bfin_gpio_mask_irq, .mask_ack = bfin_gpio_mask_ack_irq, .unmask = bfin_gpio_unmask_irq, .set_type = bfin_gpio_irq_type, .startup = bfin_gpio_irq_startup, .shutdown = bfin_gpio_irq_shutdown }; static void bfin_demux_gpio_irq(unsigned int intb_irq, struct irq_desc *intb_desc) { u16 i; struct irq_desc *desc; for (i = 0; i < MAX_BLACKFIN_GPIOS; i += 16) { int irq = IRQ_PF0 + i; int flag_d = get_gpiop_data(i); int mask = flag_d & (gpio_enabled[gpio_bank(i)] & get_gpiop_maska(i)); while (mask) { if (mask & 1) { desc = irq_desc + irq; desc->handle_irq(irq, desc); } irq++; mask >>= 1; } } } #else /* CONFIG_BF54x */ #define NR_PINT_SYS_IRQS 4 #define NR_PINT_BITS 32 #define NR_PINTS 160 #define IRQ_NOT_AVAIL 0xFF #define PINT_2_BANK(x) ((x) >> 5) #define PINT_2_BIT(x) ((x) & 0x1F) #define PINT_BIT(x) (1 << (PINT_2_BIT(x))) static unsigned char irq2pint_lut[NR_PINTS]; static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS]; struct pin_int_t { unsigned int mask_set; unsigned int mask_clear; unsigned int request; unsigned int assign; unsigned int edge_set; unsigned int edge_clear; unsigned int invert_set; unsigned int invert_clear; unsigned int pinstate; unsigned int latch; }; static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = { (struct pin_int_t *)PINT0_MASK_SET, (struct pin_int_t *)PINT1_MASK_SET, (struct pin_int_t *)PINT2_MASK_SET, (struct pin_int_t *)PINT3_MASK_SET, }; unsigned short get_irq_base(u8 bank, u8 bmap) { u16 irq_base; if (bank < 2) { /*PA-PB */ irq_base = IRQ_PA0 + bmap * 16; } else { /*PC-PJ */ irq_base = IRQ_PC0 + bmap * 16; } return irq_base; } /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */ void init_pint_lut(void) { u16 bank, bit, irq_base, bit_pos; u32 pint_assign; u8 bmap; memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut)); for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) { pint_assign = pint[bank]->assign; for (bit = 0; bit < NR_PINT_BITS; bit++) { bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF; irq_base = get_irq_base(bank, bmap); irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0); bit_pos = bit + bank * NR_PINT_BITS; pint2irq_lut[bit_pos] = irq_base - SYS_IRQS; irq2pint_lut[irq_base - SYS_IRQS] = bit_pos; } } } static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)]; static void bfin_gpio_ack_irq(unsigned int irq) { u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; pint[PINT_2_BANK(pint_val)]->request = PINT_BIT(pint_val); SSYNC(); } static void bfin_gpio_mask_ack_irq(unsigned int irq) { u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; u32 pintbit = PINT_BIT(pint_val); u8 bank = PINT_2_BANK(pint_val); pint[bank]->request = pintbit; pint[bank]->mask_clear = pintbit; SSYNC(); } static void bfin_gpio_mask_irq(unsigned int irq) { u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val); SSYNC(); } static void bfin_gpio_unmask_irq(unsigned int irq) { u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; u32 pintbit = PINT_BIT(pint_val); u8 bank = PINT_2_BANK(pint_val); pint[bank]->request = pintbit; pint[bank]->mask_set = pintbit; SSYNC(); } static unsigned int bfin_gpio_irq_startup(unsigned int irq) { unsigned int ret; u16 gpionr = irq - IRQ_PA0; u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; if (pint_val == IRQ_NOT_AVAIL) { printk(KERN_ERR "GPIO IRQ %d :Not in PINT Assign table " "Reconfigure Interrupt to Port Assignemt\n", irq); return -ENODEV; } if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) { ret = gpio_request(gpionr, "IRQ"); if (ret) return ret; } gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr); bfin_gpio_unmask_irq(irq); return ret; } static void bfin_gpio_irq_shutdown(unsigned int irq) { bfin_gpio_mask_irq(irq); gpio_free(irq - IRQ_PA0); gpio_enabled[gpio_bank(irq - IRQ_PA0)] &= ~gpio_bit(irq - IRQ_PA0); } static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) { unsigned int ret; u16 gpionr = irq - IRQ_PA0; u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; u32 pintbit = PINT_BIT(pint_val); u8 bank = PINT_2_BANK(pint_val); if (pint_val == IRQ_NOT_AVAIL) return -ENODEV; if (type == IRQ_TYPE_PROBE) { /* only probe unenabled GPIO interrupt lines */ if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr)) return 0; type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; } if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) { ret = gpio_request(gpionr, "IRQ"); if (ret) return ret; } gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr); } else { gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr); return 0; } gpio_direction_input(gpionr); if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { pint[bank]->edge_set = pintbit; } else { pint[bank]->edge_clear = pintbit; } if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW))) pint[bank]->invert_set = pintbit; /* low or falling edge denoted by one */ else pint[bank]->invert_set = pintbit; /* high or rising edge denoted by zero */ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) pint[bank]->invert_set = pintbit; else pint[bank]->invert_set = pintbit; SSYNC(); if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) set_irq_handler(irq, handle_edge_irq); else set_irq_handler(irq, handle_level_irq); return 0; } static struct irq_chip bfin_gpio_irqchip = { .ack = bfin_gpio_ack_irq, .mask = bfin_gpio_mask_irq, .mask_ack = bfin_gpio_mask_ack_irq, .unmask = bfin_gpio_unmask_irq, .set_type = bfin_gpio_irq_type, .startup = bfin_gpio_irq_startup, .shutdown = bfin_gpio_irq_shutdown }; static void bfin_demux_gpio_irq(unsigned int intb_irq, struct irq_desc *intb_desc) { u8 bank, pint_val; u32 request, irq; struct irq_desc *desc; switch (intb_irq) { case IRQ_PINT0: bank = 0; break; case IRQ_PINT2: bank = 2; break; case IRQ_PINT3: bank = 3; break; case IRQ_PINT1: bank = 1; break; default: return; } pint_val = bank * NR_PINT_BITS; request = pint[bank]->request; while (request) { if (request & 1) { irq = pint2irq_lut[pint_val] + SYS_IRQS; desc = irq_desc + irq; desc->handle_irq(irq, desc); } pint_val++; request >>= 1; } } #endif void __init init_exception_vectors(void) { SSYNC(); /* cannot program in software: * evt0 - emulation (jtag) * evt1 - reset */ bfin_write_EVT2(evt_nmi); bfin_write_EVT3(trap); bfin_write_EVT5(evt_ivhw); bfin_write_EVT6(evt_timer); bfin_write_EVT7(evt_evt7); bfin_write_EVT8(evt_evt8); bfin_write_EVT9(evt_evt9); bfin_write_EVT10(evt_evt10); bfin_write_EVT11(evt_evt11); bfin_write_EVT12(evt_evt12); bfin_write_EVT13(evt_evt13); bfin_write_EVT14(evt14_softirq); bfin_write_EVT15(evt_system_call); CSYNC(); } /* * This function should be called during kernel startup to initialize * the BFin IRQ handling routines. */ int __init init_arch_irq(void) { int irq; unsigned long ilat = 0; /* Disable all the peripheral intrs - page 4-29 HW Ref manual */ #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) bfin_write_SIC_IMASK0(SIC_UNMASK_ALL); bfin_write_SIC_IMASK1(SIC_UNMASK_ALL); bfin_write_SIC_IWR0(IWR_ENABLE_ALL); bfin_write_SIC_IWR1(IWR_ENABLE_ALL); # ifdef CONFIG_BF54x bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); bfin_write_SIC_IWR2(IWR_ENABLE_ALL); # endif #else bfin_write_SIC_IMASK(SIC_UNMASK_ALL); bfin_write_SIC_IWR(IWR_ENABLE_ALL); #endif SSYNC(); local_irq_disable(); #ifdef CONFIG_BF54x # ifdef CONFIG_PINTx_REASSIGN pint[0]->assign = CONFIG_PINT0_ASSIGN; pint[1]->assign = CONFIG_PINT1_ASSIGN; pint[2]->assign = CONFIG_PINT2_ASSIGN; pint[3]->assign = CONFIG_PINT3_ASSIGN; # endif /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */ init_pint_lut(); #endif for (irq = 0; irq <= SYS_IRQS; irq++) { if (irq <= IRQ_CORETMR) set_irq_chip(irq, &bfin_core_irqchip); else set_irq_chip(irq, &bfin_internal_irqchip); #ifdef BF537_GENERIC_ERROR_INT_DEMUX if (irq != IRQ_GENERIC_ERROR) { #endif switch (irq) { #if defined(CONFIG_BF53x) case IRQ_PROG_INTA: set_irq_chained_handler(irq, bfin_demux_gpio_irq); break; # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) case IRQ_MAC_RX: set_irq_chained_handler(irq, bfin_demux_gpio_irq); break; # endif #elif defined(CONFIG_BF54x) case IRQ_PINT0: set_irq_chained_handler(irq, bfin_demux_gpio_irq); break; case IRQ_PINT1: set_irq_chained_handler(irq, bfin_demux_gpio_irq); break; case IRQ_PINT2: set_irq_chained_handler(irq, bfin_demux_gpio_irq); break; case IRQ_PINT3: set_irq_chained_handler(irq, bfin_demux_gpio_irq); break; #elif defined(CONFIG_BF52x) case IRQ_PORTF_INTA: set_irq_chained_handler(irq, bfin_demux_gpio_irq); break; case IRQ_PORTG_INTA: set_irq_chained_handler(irq, bfin_demux_gpio_irq); break; case IRQ_PORTH_INTA: set_irq_chained_handler(irq, bfin_demux_gpio_irq); break; #endif default: set_irq_handler(irq, handle_simple_irq); break; } #ifdef BF537_GENERIC_ERROR_INT_DEMUX } else { set_irq_handler(irq, bfin_demux_error_irq); } #endif } #ifdef BF537_GENERIC_ERROR_INT_DEMUX for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) { set_irq_chip(irq, &bfin_generic_error_irqchip); set_irq_handler(irq, handle_level_irq); } #endif #ifndef CONFIG_BF54x for (irq = IRQ_PF0; irq < NR_IRQS; irq++) { #else for (irq = IRQ_PA0; irq < NR_IRQS; irq++) { #endif set_irq_chip(irq, &bfin_gpio_irqchip); /* if configured as edge, then will be changed to do_edge_IRQ */ set_irq_handler(irq, handle_level_irq); } bfin_write_IMASK(0); CSYNC(); ilat = bfin_read_ILAT(); CSYNC(); bfin_write_ILAT(ilat); CSYNC(); printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n"); /* IMASK=xxx is equivalent to STI xx or irq_flags=xx, * local_irq_enable() */ program_IAR(); /* Therefore it's better to setup IARs before interrupts enabled */ search_IAR(); /* Enable interrupts IVG7-15 */ irq_flags = irq_flags | IMASK_IVG15 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; return 0; } #ifdef CONFIG_DO_IRQ_L1 __attribute__((l1_text)) #endif void do_irq(int vec, struct pt_regs *fp) { if (vec == EVT_IVTMR_P) { vec = IRQ_CORETMR; } else { struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst; struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop; #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) unsigned long sic_status[3]; SSYNC(); sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0(); sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1(); #ifdef CONFIG_BF54x sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2(); #endif for (;; ivg++) { if (ivg >= ivg_stop) { atomic_inc(&num_spurious); return; } if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag) break; } #else unsigned long sic_status; SSYNC(); sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR(); for (;; ivg++) { if (ivg >= ivg_stop) { atomic_inc(&num_spurious); return; } else if (sic_status & ivg->isrflag) break; } #endif vec = ivg->irqno; } asm_do_IRQ(vec, fp); #ifdef CONFIG_KGDB kgdb_process_breakpoint(); #endif }
gpl-2.0
friedrich420/HTC-ONE-M7-AEL-Kernel-5.0.2
drivers/misc/mpu3050/slaveirq.c
18
38856
/* $License: Copyright (C) 2010 InvenSense Corporation, All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. $ */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/stat.h> #include <linux/irq.h> #include <linux/signal.h> #include <linux/miscdevice.h> #include <linux/i2c.h> #include <linux/i2c-dev.h> #include <linux/poll.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/wakelock.h> #include "mpu.h" #include "slaveirq.h" #include "mldl_cfg.h" #include "mpu-i2c.h" #define SENSOR_NAME "bma250" #define ABSMIN -512 #define ABSMAX 512 #define SLOPE_THRESHOLD_VALUE 32 #define SLOPE_DURATION_VALUE 1 #define INTERRUPT_LATCH_MODE 13 #define INTERRUPT_ENABLE 1 #define INTERRUPT_DISABLE 0 #define MAP_SLOPE_INTERRUPT 2 #define SLOPE_X_INDEX 5 #define SLOPE_Y_INDEX 6 #define SLOPE_Z_INDEX 7 #define BMA250_MAX_DELAY 200 #define BMA250_CHIP_ID 3 #define BMA250_RANGE_SET 0 #define BMA250_BW_SET 4 #define LOW_G_INTERRUPT REL_Z #define HIGH_G_INTERRUPT REL_HWHEEL #define SLOP_INTERRUPT REL_DIAL #define DOUBLE_TAP_INTERRUPT REL_WHEEL #define SINGLE_TAP_INTERRUPT REL_MISC #define ORIENT_INTERRUPT ABS_PRESSURE #define FLAT_INTERRUPT REL_MISC #define HIGH_G_INTERRUPT_X_HAPPENED 1 #define HIGH_G_INTERRUPT_Y_HAPPENED 2 #define HIGH_G_INTERRUPT_Z_HAPPENED 3 #define HIGH_G_INTERRUPT_X_NEGATIVE_HAPPENED 4 #define HIGH_G_INTERRUPT_Y_NEGATIVE_HAPPENED 5 #define HIGH_G_INTERRUPT_Z_NEGATIVE_HAPPENED 6 #define SLOPE_INTERRUPT_X_HAPPENED 7 #define SLOPE_INTERRUPT_Y_HAPPENED 8 #define SLOPE_INTERRUPT_Z_HAPPENED 9 #define SLOPE_INTERRUPT_X_NEGATIVE_HAPPENED 10 #define SLOPE_INTERRUPT_Y_NEGATIVE_HAPPENED 11 #define SLOPE_INTERRUPT_Z_NEGATIVE_HAPPENED 12 #define DOUBLE_TAP_INTERRUPT_HAPPENED 13 #define SINGLE_TAP_INTERRUPT_HAPPENED 14 #define UPWARD_PORTRAIT_UP_INTERRUPT_HAPPENED 15 #define UPWARD_PORTRAIT_DOWN_INTERRUPT_HAPPENED 16 #define UPWARD_LANDSCAPE_LEFT_INTERRUPT_HAPPENED 17 #define UPWARD_LANDSCAPE_RIGHT_INTERRUPT_HAPPENED 18 #define DOWNWARD_PORTRAIT_UP_INTERRUPT_HAPPENED 19 #define DOWNWARD_PORTRAIT_DOWN_INTERRUPT_HAPPENED 20 #define DOWNWARD_LANDSCAPE_LEFT_INTERRUPT_HAPPENED 21 #define DOWNWARD_LANDSCAPE_RIGHT_INTERRUPT_HAPPENED 22 #define FLAT_INTERRUPT_TURE_HAPPENED 23 #define FLAT_INTERRUPT_FALSE_HAPPENED 24 #define LOW_G_INTERRUPT_HAPPENED 25 #define PAD_LOWG 0 #define PAD_HIGHG 1 #define PAD_SLOP 2 #define PAD_DOUBLE_TAP 3 #define PAD_SINGLE_TAP 4 #define PAD_ORIENT 5 #define PAD_FLAT 6 #define BMA250_CHIP_ID_REG 0x00 #define BMA250_VERSION_REG 0x01 #define BMA250_X_AXIS_LSB_REG 0x02 #define BMA250_X_AXIS_MSB_REG 0x03 #define BMA250_Y_AXIS_LSB_REG 0x04 #define BMA250_Y_AXIS_MSB_REG 0x05 #define BMA250_Z_AXIS_LSB_REG 0x06 #define BMA250_Z_AXIS_MSB_REG 0x07 #define BMA250_TEMP_RD_REG 0x08 #define BMA250_STATUS1_REG 0x09 #define BMA250_STATUS2_REG 0x0A #define BMA250_STATUS_TAP_SLOPE_REG 0x0B #define BMA250_STATUS_ORIENT_HIGH_REG 0x0C #define BMA250_RANGE_SEL_REG 0x0F #define BMA250_BW_SEL_REG 0x10 #define BMA250_MODE_CTRL_REG 0x11 #define BMA250_LOW_NOISE_CTRL_REG 0x12 #define BMA250_DATA_CTRL_REG 0x13 #define BMA250_RESET_REG 0x14 #define BMA250_INT_ENABLE1_REG 0x16 #define BMA250_INT_ENABLE2_REG 0x17 #define BMA250_INT1_PAD_SEL_REG 0x19 #define BMA250_INT_DATA_SEL_REG 0x1A #define BMA250_INT2_PAD_SEL_REG 0x1B #define BMA250_INT_SRC_REG 0x1E #define BMA250_INT_SET_REG 0x20 #define BMA250_INT_CTRL_REG 0x21 #define BMA250_LOW_DURN_REG 0x22 #define BMA250_LOW_THRES_REG 0x23 #define BMA250_LOW_HIGH_HYST_REG 0x24 #define BMA250_HIGH_DURN_REG 0x25 #define BMA250_HIGH_THRES_REG 0x26 #define BMA250_SLOPE_DURN_REG 0x27 #define BMA250_SLOPE_THRES_REG 0x28 #define BMA250_TAP_PARAM_REG 0x2A #define BMA250_TAP_THRES_REG 0x2B #define BMA250_ORIENT_PARAM_REG 0x2C #define BMA250_THETA_BLOCK_REG 0x2D #define BMA250_THETA_FLAT_REG 0x2E #define BMA250_FLAT_HOLD_TIME_REG 0x2F #define BMA250_STATUS_LOW_POWER_REG 0x31 #define BMA250_SELF_TEST_REG 0x32 #define BMA250_EEPROM_CTRL_REG 0x33 #define BMA250_SERIAL_CTRL_REG 0x34 #define BMA250_CTRL_UNLOCK_REG 0x35 #define BMA250_OFFSET_CTRL_REG 0x36 #define BMA250_OFFSET_PARAMS_REG 0x37 #define BMA250_OFFSET_FILT_X_REG 0x38 #define BMA250_OFFSET_FILT_Y_REG 0x39 #define BMA250_OFFSET_FILT_Z_REG 0x3A #define BMA250_OFFSET_UNFILT_X_REG 0x3B #define BMA250_OFFSET_UNFILT_Y_REG 0x3C #define BMA250_OFFSET_UNFILT_Z_REG 0x3D #define BMA250_SPARE_0_REG 0x3E #define BMA250_SPARE_1_REG 0x3F #define BMA250_ACC_X_LSB__POS 6 #define BMA250_ACC_X_LSB__LEN 2 #define BMA250_ACC_X_LSB__MSK 0xC0 #define BMA250_ACC_X_LSB__REG BMA250_X_AXIS_LSB_REG #define BMA250_ACC_X_MSB__POS 0 #define BMA250_ACC_X_MSB__LEN 8 #define BMA250_ACC_X_MSB__MSK 0xFF #define BMA250_ACC_X_MSB__REG BMA250_X_AXIS_MSB_REG #define BMA250_ACC_Y_LSB__POS 6 #define BMA250_ACC_Y_LSB__LEN 2 #define BMA250_ACC_Y_LSB__MSK 0xC0 #define BMA250_ACC_Y_LSB__REG BMA250_Y_AXIS_LSB_REG #define BMA250_ACC_Y_MSB__POS 0 #define BMA250_ACC_Y_MSB__LEN 8 #define BMA250_ACC_Y_MSB__MSK 0xFF #define BMA250_ACC_Y_MSB__REG BMA250_Y_AXIS_MSB_REG #define BMA250_ACC_Z_LSB__POS 6 #define BMA250_ACC_Z_LSB__LEN 2 #define BMA250_ACC_Z_LSB__MSK 0xC0 #define BMA250_ACC_Z_LSB__REG BMA250_Z_AXIS_LSB_REG #define BMA250_ACC_Z_MSB__POS 0 #define BMA250_ACC_Z_MSB__LEN 8 #define BMA250_ACC_Z_MSB__MSK 0xFF #define BMA250_ACC_Z_MSB__REG BMA250_Z_AXIS_MSB_REG #define BMA250_RANGE_SEL__POS 0 #define BMA250_RANGE_SEL__LEN 4 #define BMA250_RANGE_SEL__MSK 0x0F #define BMA250_RANGE_SEL__REG BMA250_RANGE_SEL_REG #define BMA250_BANDWIDTH__POS 0 #define BMA250_BANDWIDTH__LEN 5 #define BMA250_BANDWIDTH__MSK 0x1F #define BMA250_BANDWIDTH__REG BMA250_BW_SEL_REG #define BMA250_EN_LOW_POWER__POS 6 #define BMA250_EN_LOW_POWER__LEN 1 #define BMA250_EN_LOW_POWER__MSK 0x40 #define BMA250_EN_LOW_POWER__REG BMA250_MODE_CTRL_REG #define BMA250_EN_SUSPEND__POS 7 #define BMA250_EN_SUSPEND__LEN 1 #define BMA250_EN_SUSPEND__MSK 0x80 #define BMA250_EN_SUSPEND__REG BMA250_MODE_CTRL_REG #define BMA250_INT_MODE_SEL__POS 0 #define BMA250_INT_MODE_SEL__LEN 4 #define BMA250_INT_MODE_SEL__MSK 0x0F #define BMA250_INT_MODE_SEL__REG BMA250_INT_CTRL_REG #define BMA250_LOWG_INT_S__POS 0 #define BMA250_LOWG_INT_S__LEN 1 #define BMA250_LOWG_INT_S__MSK 0x01 #define BMA250_LOWG_INT_S__REG BMA250_STATUS1_REG #define BMA250_HIGHG_INT_S__POS 1 #define BMA250_HIGHG_INT_S__LEN 1 #define BMA250_HIGHG_INT_S__MSK 0x02 #define BMA250_HIGHG_INT_S__REG BMA250_STATUS1_REG #define BMA250_SLOPE_INT_S__POS 2 #define BMA250_SLOPE_INT_S__LEN 1 #define BMA250_SLOPE_INT_S__MSK 0x04 #define BMA250_SLOPE_INT_S__REG BMA250_STATUS1_REG #define BMA250_DOUBLE_TAP_INT_S__POS 4 #define BMA250_DOUBLE_TAP_INT_S__LEN 1 #define BMA250_DOUBLE_TAP_INT_S__MSK 0x10 #define BMA250_DOUBLE_TAP_INT_S__REG BMA250_STATUS1_REG #define BMA250_SINGLE_TAP_INT_S__POS 5 #define BMA250_SINGLE_TAP_INT_S__LEN 1 #define BMA250_SINGLE_TAP_INT_S__MSK 0x20 #define BMA250_SINGLE_TAP_INT_S__REG BMA250_STATUS1_REG #define BMA250_ORIENT_INT_S__POS 6 #define BMA250_ORIENT_INT_S__LEN 1 #define BMA250_ORIENT_INT_S__MSK 0x40 #define BMA250_ORIENT_INT_S__REG BMA250_STATUS1_REG #define BMA250_FLAT_INT_S__POS 7 #define BMA250_FLAT_INT_S__LEN 1 #define BMA250_FLAT_INT_S__MSK 0x80 #define BMA250_FLAT_INT_S__REG BMA250_STATUS1_REG #define BMA250_DATA_INT_S__POS 7 #define BMA250_DATA_INT_S__LEN 1 #define BMA250_DATA_INT_S__MSK 0x80 #define BMA250_DATA_INT_S__REG BMA250_STATUS2_REG #define BMA250_SLOPE_FIRST_X__POS 0 #define BMA250_SLOPE_FIRST_X__LEN 1 #define BMA250_SLOPE_FIRST_X__MSK 0x01 #define BMA250_SLOPE_FIRST_X__REG BMA250_STATUS_TAP_SLOPE_REG #define BMA250_SLOPE_FIRST_Y__POS 1 #define BMA250_SLOPE_FIRST_Y__LEN 1 #define BMA250_SLOPE_FIRST_Y__MSK 0x02 #define BMA250_SLOPE_FIRST_Y__REG BMA250_STATUS_TAP_SLOPE_REG #define BMA250_SLOPE_FIRST_Z__POS 2 #define BMA250_SLOPE_FIRST_Z__LEN 1 #define BMA250_SLOPE_FIRST_Z__MSK 0x04 #define BMA250_SLOPE_FIRST_Z__REG BMA250_STATUS_TAP_SLOPE_REG #define BMA250_SLOPE_SIGN_S__POS 3 #define BMA250_SLOPE_SIGN_S__LEN 1 #define BMA250_SLOPE_SIGN_S__MSK 0x08 #define BMA250_SLOPE_SIGN_S__REG BMA250_STATUS_TAP_SLOPE_REG #define BMA250_TAP_FIRST_X__POS 4 #define BMA250_TAP_FIRST_X__LEN 1 #define BMA250_TAP_FIRST_X__MSK 0x10 #define BMA250_TAP_FIRST_X__REG BMA250_STATUS_TAP_SLOPE_REG #define BMA250_TAP_FIRST_Y__POS 5 #define BMA250_TAP_FIRST_Y__LEN 1 #define BMA250_TAP_FIRST_Y__MSK 0x20 #define BMA250_TAP_FIRST_Y__REG BMA250_STATUS_TAP_SLOPE_REG #define BMA250_TAP_FIRST_Z__POS 6 #define BMA250_TAP_FIRST_Z__LEN 1 #define BMA250_TAP_FIRST_Z__MSK 0x40 #define BMA250_TAP_FIRST_Z__REG BMA250_STATUS_TAP_SLOPE_REG #define BMA250_TAP_FIRST_XYZ__POS 4 #define BMA250_TAP_FIRST_XYZ__LEN 3 #define BMA250_TAP_FIRST_XYZ__MSK 0x70 #define BMA250_TAP_FIRST_XYZ__REG BMA250_STATUS_TAP_SLOPE_REG #define BMA250_TAP_SIGN_S__POS 7 #define BMA250_TAP_SIGN_S__LEN 1 #define BMA250_TAP_SIGN_S__MSK 0x80 #define BMA250_TAP_SIGN_S__REG BMA250_STATUS_TAP_SLOPE_REG #define BMA250_HIGHG_FIRST_X__POS 0 #define BMA250_HIGHG_FIRST_X__LEN 1 #define BMA250_HIGHG_FIRST_X__MSK 0x01 #define BMA250_HIGHG_FIRST_X__REG BMA250_STATUS_ORIENT_HIGH_REG #define BMA250_HIGHG_FIRST_Y__POS 1 #define BMA250_HIGHG_FIRST_Y__LEN 1 #define BMA250_HIGHG_FIRST_Y__MSK 0x02 #define BMA250_HIGHG_FIRST_Y__REG BMA250_STATUS_ORIENT_HIGH_REG #define BMA250_HIGHG_FIRST_Z__POS 2 #define BMA250_HIGHG_FIRST_Z__LEN 1 #define BMA250_HIGHG_FIRST_Z__MSK 0x04 #define BMA250_HIGHG_FIRST_Z__REG BMA250_STATUS_ORIENT_HIGH_REG #define BMA250_HIGHG_SIGN_S__POS 3 #define BMA250_HIGHG_SIGN_S__LEN 1 #define BMA250_HIGHG_SIGN_S__MSK 0x08 #define BMA250_HIGHG_SIGN_S__REG BMA250_STATUS_ORIENT_HIGH_REG #define BMA250_ORIENT_S__POS 4 #define BMA250_ORIENT_S__LEN 3 #define BMA250_ORIENT_S__MSK 0x70 #define BMA250_ORIENT_S__REG BMA250_STATUS_ORIENT_HIGH_REG #define BMA250_FLAT_S__POS 7 #define BMA250_FLAT_S__LEN 1 #define BMA250_FLAT_S__MSK 0x80 #define BMA250_FLAT_S__REG BMA250_STATUS_ORIENT_HIGH_REG #define BMA250_EN_SLOPE_X_INT__POS 0 #define BMA250_EN_SLOPE_X_INT__LEN 1 #define BMA250_EN_SLOPE_X_INT__MSK 0x01 #define BMA250_EN_SLOPE_X_INT__REG BMA250_INT_ENABLE1_REG #define BMA250_EN_SLOPE_Y_INT__POS 1 #define BMA250_EN_SLOPE_Y_INT__LEN 1 #define BMA250_EN_SLOPE_Y_INT__MSK 0x02 #define BMA250_EN_SLOPE_Y_INT__REG BMA250_INT_ENABLE1_REG #define BMA250_EN_SLOPE_Z_INT__POS 2 #define BMA250_EN_SLOPE_Z_INT__LEN 1 #define BMA250_EN_SLOPE_Z_INT__MSK 0x04 #define BMA250_EN_SLOPE_Z_INT__REG BMA250_INT_ENABLE1_REG #define BMA250_EN_SLOPE_XYZ_INT__POS 0 #define BMA250_EN_SLOPE_XYZ_INT__LEN 3 #define BMA250_EN_SLOPE_XYZ_INT__MSK 0x07 #define BMA250_EN_SLOPE_XYZ_INT__REG BMA250_INT_ENABLE1_REG #define BMA250_EN_DOUBLE_TAP_INT__POS 4 #define BMA250_EN_DOUBLE_TAP_INT__LEN 1 #define BMA250_EN_DOUBLE_TAP_INT__MSK 0x10 #define BMA250_EN_DOUBLE_TAP_INT__REG BMA250_INT_ENABLE1_REG #define BMA250_EN_SINGLE_TAP_INT__POS 5 #define BMA250_EN_SINGLE_TAP_INT__LEN 1 #define BMA250_EN_SINGLE_TAP_INT__MSK 0x20 #define BMA250_EN_SINGLE_TAP_INT__REG BMA250_INT_ENABLE1_REG #define BMA250_EN_ORIENT_INT__POS 6 #define BMA250_EN_ORIENT_INT__LEN 1 #define BMA250_EN_ORIENT_INT__MSK 0x40 #define BMA250_EN_ORIENT_INT__REG BMA250_INT_ENABLE1_REG #define BMA250_EN_FLAT_INT__POS 7 #define BMA250_EN_FLAT_INT__LEN 1 #define BMA250_EN_FLAT_INT__MSK 0x80 #define BMA250_EN_FLAT_INT__REG BMA250_INT_ENABLE1_REG #define BMA250_EN_HIGHG_X_INT__POS 0 #define BMA250_EN_HIGHG_X_INT__LEN 1 #define BMA250_EN_HIGHG_X_INT__MSK 0x01 #define BMA250_EN_HIGHG_X_INT__REG BMA250_INT_ENABLE2_REG #define BMA250_EN_HIGHG_Y_INT__POS 1 #define BMA250_EN_HIGHG_Y_INT__LEN 1 #define BMA250_EN_HIGHG_Y_INT__MSK 0x02 #define BMA250_EN_HIGHG_Y_INT__REG BMA250_INT_ENABLE2_REG #define BMA250_EN_HIGHG_Z_INT__POS 2 #define BMA250_EN_HIGHG_Z_INT__LEN 1 #define BMA250_EN_HIGHG_Z_INT__MSK 0x04 #define BMA250_EN_HIGHG_Z_INT__REG BMA250_INT_ENABLE2_REG #define BMA250_EN_HIGHG_XYZ_INT__POS 2 #define BMA250_EN_HIGHG_XYZ_INT__LEN 1 #define BMA250_EN_HIGHG_XYZ_INT__MSK 0x04 #define BMA250_EN_HIGHG_XYZ_INT__REG BMA250_INT_ENABLE2_REG #define BMA250_EN_LOWG_INT__POS 3 #define BMA250_EN_LOWG_INT__LEN 1 #define BMA250_EN_LOWG_INT__MSK 0x08 #define BMA250_EN_LOWG_INT__REG BMA250_INT_ENABLE2_REG #define BMA250_EN_NEW_DATA_INT__POS 4 #define BMA250_EN_NEW_DATA_INT__LEN 1 #define BMA250_EN_NEW_DATA_INT__MSK 0x10 #define BMA250_EN_NEW_DATA_INT__REG BMA250_INT_ENABLE2_REG #define BMA250_EN_INT1_PAD_LOWG__POS 0 #define BMA250_EN_INT1_PAD_LOWG__LEN 1 #define BMA250_EN_INT1_PAD_LOWG__MSK 0x01 #define BMA250_EN_INT1_PAD_LOWG__REG BMA250_INT1_PAD_SEL_REG #define BMA250_EN_INT1_PAD_HIGHG__POS 1 #define BMA250_EN_INT1_PAD_HIGHG__LEN 1 #define BMA250_EN_INT1_PAD_HIGHG__MSK 0x02 #define BMA250_EN_INT1_PAD_HIGHG__REG BMA250_INT1_PAD_SEL_REG #define BMA250_EN_INT1_PAD_SLOPE__POS 2 #define BMA250_EN_INT1_PAD_SLOPE__LEN 1 #define BMA250_EN_INT1_PAD_SLOPE__MSK 0x04 #define BMA250_EN_INT1_PAD_SLOPE__REG BMA250_INT1_PAD_SEL_REG #define BMA250_EN_INT1_PAD_DB_TAP__POS 4 #define BMA250_EN_INT1_PAD_DB_TAP__LEN 1 #define BMA250_EN_INT1_PAD_DB_TAP__MSK 0x10 #define BMA250_EN_INT1_PAD_DB_TAP__REG BMA250_INT1_PAD_SEL_REG #define BMA250_EN_INT1_PAD_SNG_TAP__POS 5 #define BMA250_EN_INT1_PAD_SNG_TAP__LEN 1 #define BMA250_EN_INT1_PAD_SNG_TAP__MSK 0x20 #define BMA250_EN_INT1_PAD_SNG_TAP__REG BMA250_INT1_PAD_SEL_REG #define BMA250_EN_INT1_PAD_ORIENT__POS 6 #define BMA250_EN_INT1_PAD_ORIENT__LEN 1 #define BMA250_EN_INT1_PAD_ORIENT__MSK 0x40 #define BMA250_EN_INT1_PAD_ORIENT__REG BMA250_INT1_PAD_SEL_REG #define BMA250_EN_INT1_PAD_FLAT__POS 7 #define BMA250_EN_INT1_PAD_FLAT__LEN 1 #define BMA250_EN_INT1_PAD_FLAT__MSK 0x80 #define BMA250_EN_INT1_PAD_FLAT__REG BMA250_INT1_PAD_SEL_REG #define BMA250_EN_INT2_PAD_LOWG__POS 0 #define BMA250_EN_INT2_PAD_LOWG__LEN 1 #define BMA250_EN_INT2_PAD_LOWG__MSK 0x01 #define BMA250_EN_INT2_PAD_LOWG__REG BMA250_INT2_PAD_SEL_REG #define BMA250_EN_INT2_PAD_HIGHG__POS 1 #define BMA250_EN_INT2_PAD_HIGHG__LEN 1 #define BMA250_EN_INT2_PAD_HIGHG__MSK 0x02 #define BMA250_EN_INT2_PAD_HIGHG__REG BMA250_INT2_PAD_SEL_REG #define BMA250_EN_INT2_PAD_SLOPE__POS 2 #define BMA250_EN_INT2_PAD_SLOPE__LEN 1 #define BMA250_EN_INT2_PAD_SLOPE__MSK 0x04 #define BMA250_EN_INT2_PAD_SLOPE__REG BMA250_INT2_PAD_SEL_REG #define BMA250_EN_INT2_PAD_DB_TAP__POS 4 #define BMA250_EN_INT2_PAD_DB_TAP__LEN 1 #define BMA250_EN_INT2_PAD_DB_TAP__MSK 0x10 #define BMA250_EN_INT2_PAD_DB_TAP__REG BMA250_INT2_PAD_SEL_REG #define BMA250_EN_INT2_PAD_SNG_TAP__POS 5 #define BMA250_EN_INT2_PAD_SNG_TAP__LEN 1 #define BMA250_EN_INT2_PAD_SNG_TAP__MSK 0x20 #define BMA250_EN_INT2_PAD_SNG_TAP__REG BMA250_INT2_PAD_SEL_REG #define BMA250_EN_INT2_PAD_ORIENT__POS 6 #define BMA250_EN_INT2_PAD_ORIENT__LEN 1 #define BMA250_EN_INT2_PAD_ORIENT__MSK 0x40 #define BMA250_EN_INT2_PAD_ORIENT__REG BMA250_INT2_PAD_SEL_REG #define BMA250_EN_INT2_PAD_FLAT__POS 7 #define BMA250_EN_INT2_PAD_FLAT__LEN 1 #define BMA250_EN_INT2_PAD_FLAT__MSK 0x80 #define BMA250_EN_INT2_PAD_FLAT__REG BMA250_INT2_PAD_SEL_REG #define BMA250_EN_INT1_PAD_NEWDATA__POS 0 #define BMA250_EN_INT1_PAD_NEWDATA__LEN 1 #define BMA250_EN_INT1_PAD_NEWDATA__MSK 0x01 #define BMA250_EN_INT1_PAD_NEWDATA__REG BMA250_INT_DATA_SEL_REG #define BMA250_EN_INT2_PAD_NEWDATA__POS 7 #define BMA250_EN_INT2_PAD_NEWDATA__LEN 1 #define BMA250_EN_INT2_PAD_NEWDATA__MSK 0x80 #define BMA250_EN_INT2_PAD_NEWDATA__REG BMA250_INT_DATA_SEL_REG #define BMA250_UNFILT_INT_SRC_LOWG__POS 0 #define BMA250_UNFILT_INT_SRC_LOWG__LEN 1 #define BMA250_UNFILT_INT_SRC_LOWG__MSK 0x01 #define BMA250_UNFILT_INT_SRC_LOWG__REG BMA250_INT_SRC_REG #define BMA250_UNFILT_INT_SRC_HIGHG__POS 1 #define BMA250_UNFILT_INT_SRC_HIGHG__LEN 1 #define BMA250_UNFILT_INT_SRC_HIGHG__MSK 0x02 #define BMA250_UNFILT_INT_SRC_HIGHG__REG BMA250_INT_SRC_REG #define BMA250_UNFILT_INT_SRC_SLOPE__POS 2 #define BMA250_UNFILT_INT_SRC_SLOPE__LEN 1 #define BMA250_UNFILT_INT_SRC_SLOPE__MSK 0x04 #define BMA250_UNFILT_INT_SRC_SLOPE__REG BMA250_INT_SRC_REG #define BMA250_UNFILT_INT_SRC_TAP__POS 4 #define BMA250_UNFILT_INT_SRC_TAP__LEN 1 #define BMA250_UNFILT_INT_SRC_TAP__MSK 0x10 #define BMA250_UNFILT_INT_SRC_TAP__REG BMA250_INT_SRC_REG #define BMA250_UNFILT_INT_SRC_DATA__POS 5 #define BMA250_UNFILT_INT_SRC_DATA__LEN 1 #define BMA250_UNFILT_INT_SRC_DATA__MSK 0x20 #define BMA250_UNFILT_INT_SRC_DATA__REG BMA250_INT_SRC_REG #define BMA250_INT1_PAD_ACTIVE_LEVEL__POS 0 #define BMA250_INT1_PAD_ACTIVE_LEVEL__LEN 1 #define BMA250_INT1_PAD_ACTIVE_LEVEL__MSK 0x01 #define BMA250_INT1_PAD_ACTIVE_LEVEL__REG BMA250_INT_SET_REG #define BMA250_INT2_PAD_ACTIVE_LEVEL__POS 2 #define BMA250_INT2_PAD_ACTIVE_LEVEL__LEN 1 #define BMA250_INT2_PAD_ACTIVE_LEVEL__MSK 0x04 #define BMA250_INT2_PAD_ACTIVE_LEVEL__REG BMA250_INT_SET_REG #define BMA250_INT1_PAD_OUTPUT_TYPE__POS 1 #define BMA250_INT1_PAD_OUTPUT_TYPE__LEN 1 #define BMA250_INT1_PAD_OUTPUT_TYPE__MSK 0x02 #define BMA250_INT1_PAD_OUTPUT_TYPE__REG BMA250_INT_SET_REG #define BMA250_INT2_PAD_OUTPUT_TYPE__POS 3 #define BMA250_INT2_PAD_OUTPUT_TYPE__LEN 1 #define BMA250_INT2_PAD_OUTPUT_TYPE__MSK 0x08 #define BMA250_INT2_PAD_OUTPUT_TYPE__REG BMA250_INT_SET_REG #define BMA250_INT_MODE_SEL__POS 0 #define BMA250_INT_MODE_SEL__LEN 4 #define BMA250_INT_MODE_SEL__MSK 0x0F #define BMA250_INT_MODE_SEL__REG BMA250_INT_CTRL_REG #define BMA250_INT_RESET_LATCHED__POS 7 #define BMA250_INT_RESET_LATCHED__LEN 1 #define BMA250_INT_RESET_LATCHED__MSK 0x80 #define BMA250_INT_RESET_LATCHED__REG BMA250_INT_CTRL_REG #define BMA250_LOWG_DUR__POS 0 #define BMA250_LOWG_DUR__LEN 8 #define BMA250_LOWG_DUR__MSK 0xFF #define BMA250_LOWG_DUR__REG BMA250_LOW_DURN_REG #define BMA250_LOWG_THRES__POS 0 #define BMA250_LOWG_THRES__LEN 8 #define BMA250_LOWG_THRES__MSK 0xFF #define BMA250_LOWG_THRES__REG BMA250_LOW_THRES_REG #define BMA250_LOWG_HYST__POS 0 #define BMA250_LOWG_HYST__LEN 2 #define BMA250_LOWG_HYST__MSK 0x03 #define BMA250_LOWG_HYST__REG BMA250_LOW_HIGH_HYST_REG #define BMA250_LOWG_INT_MODE__POS 2 #define BMA250_LOWG_INT_MODE__LEN 1 #define BMA250_LOWG_INT_MODE__MSK 0x04 #define BMA250_LOWG_INT_MODE__REG BMA250_LOW_HIGH_HYST_REG #define BMA250_HIGHG_DUR__POS 0 #define BMA250_HIGHG_DUR__LEN 8 #define BMA250_HIGHG_DUR__MSK 0xFF #define BMA250_HIGHG_DUR__REG BMA250_HIGH_DURN_REG #define BMA250_HIGHG_THRES__POS 0 #define BMA250_HIGHG_THRES__LEN 8 #define BMA250_HIGHG_THRES__MSK 0xFF #define BMA250_HIGHG_THRES__REG BMA250_HIGH_THRES_REG #define BMA250_HIGHG_HYST__POS 6 #define BMA250_HIGHG_HYST__LEN 2 #define BMA250_HIGHG_HYST__MSK 0xC0 #define BMA250_HIGHG_HYST__REG BMA250_LOW_HIGH_HYST_REG #define BMA250_SLOPE_DUR__POS 0 #define BMA250_SLOPE_DUR__LEN 2 #define BMA250_SLOPE_DUR__MSK 0x03 #define BMA250_SLOPE_DUR__REG BMA250_SLOPE_DURN_REG #define BMA250_SLOPE_THRES__POS 0 #define BMA250_SLOPE_THRES__LEN 8 #define BMA250_SLOPE_THRES__MSK 0xFF #define BMA250_SLOPE_THRES__REG BMA250_SLOPE_THRES_REG #define BMA250_TAP_DUR__POS 0 #define BMA250_TAP_DUR__LEN 3 #define BMA250_TAP_DUR__MSK 0x07 #define BMA250_TAP_DUR__REG BMA250_TAP_PARAM_REG #define BMA250_TAP_SHOCK_DURN__POS 6 #define BMA250_TAP_SHOCK_DURN__LEN 1 #define BMA250_TAP_SHOCK_DURN__MSK 0x40 #define BMA250_TAP_SHOCK_DURN__REG BMA250_TAP_PARAM_REG #define BMA250_TAP_QUIET_DURN__POS 7 #define BMA250_TAP_QUIET_DURN__LEN 1 #define BMA250_TAP_QUIET_DURN__MSK 0x80 #define BMA250_TAP_QUIET_DURN__REG BMA250_TAP_PARAM_REG #define BMA250_TAP_THRES__POS 0 #define BMA250_TAP_THRES__LEN 5 #define BMA250_TAP_THRES__MSK 0x1F #define BMA250_TAP_THRES__REG BMA250_TAP_THRES_REG #define BMA250_TAP_SAMPLES__POS 6 #define BMA250_TAP_SAMPLES__LEN 2 #define BMA250_TAP_SAMPLES__MSK 0xC0 #define BMA250_TAP_SAMPLES__REG BMA250_TAP_THRES_REG #define BMA250_ORIENT_MODE__POS 0 #define BMA250_ORIENT_MODE__LEN 2 #define BMA250_ORIENT_MODE__MSK 0x03 #define BMA250_ORIENT_MODE__REG BMA250_ORIENT_PARAM_REG #define BMA250_ORIENT_BLOCK__POS 2 #define BMA250_ORIENT_BLOCK__LEN 2 #define BMA250_ORIENT_BLOCK__MSK 0x0C #define BMA250_ORIENT_BLOCK__REG BMA250_ORIENT_PARAM_REG #define BMA250_ORIENT_HYST__POS 4 #define BMA250_ORIENT_HYST__LEN 3 #define BMA250_ORIENT_HYST__MSK 0x70 #define BMA250_ORIENT_HYST__REG BMA250_ORIENT_PARAM_REG #define BMA250_ORIENT_AXIS__POS 7 #define BMA250_ORIENT_AXIS__LEN 1 #define BMA250_ORIENT_AXIS__MSK 0x80 #define BMA250_ORIENT_AXIS__REG BMA250_THETA_BLOCK_REG #define BMA250_THETA_BLOCK__POS 0 #define BMA250_THETA_BLOCK__LEN 6 #define BMA250_THETA_BLOCK__MSK 0x3F #define BMA250_THETA_BLOCK__REG BMA250_THETA_BLOCK_REG #define BMA250_THETA_FLAT__POS 0 #define BMA250_THETA_FLAT__LEN 6 #define BMA250_THETA_FLAT__MSK 0x3F #define BMA250_THETA_FLAT__REG BMA250_THETA_FLAT_REG #define BMA250_FLAT_HOLD_TIME__POS 4 #define BMA250_FLAT_HOLD_TIME__LEN 2 #define BMA250_FLAT_HOLD_TIME__MSK 0x30 #define BMA250_FLAT_HOLD_TIME__REG BMA250_FLAT_HOLD_TIME_REG #define BMA250_EN_SELF_TEST__POS 0 #define BMA250_EN_SELF_TEST__LEN 2 #define BMA250_EN_SELF_TEST__MSK 0x03 #define BMA250_EN_SELF_TEST__REG BMA250_SELF_TEST_REG #define BMA250_NEG_SELF_TEST__POS 2 #define BMA250_NEG_SELF_TEST__LEN 1 #define BMA250_NEG_SELF_TEST__MSK 0x04 #define BMA250_NEG_SELF_TEST__REG BMA250_SELF_TEST_REG #define BMA250_LOW_POWER_MODE_S__POS 0 #define BMA250_LOW_POWER_MODE_S__LEN 1 #define BMA250_LOW_POWER_MODE_S__MSK 0x01 #define BMA250_LOW_POWER_MODE_S__REG BMA250_STATUS_LOW_POWER_REG #define BMA250_EN_FAST_COMP__POS 5 #define BMA250_EN_FAST_COMP__LEN 2 #define BMA250_EN_FAST_COMP__MSK 0x60 #define BMA250_EN_FAST_COMP__REG BMA250_OFFSET_CTRL_REG #define BMA250_FAST_COMP_RDY_S__POS 4 #define BMA250_FAST_COMP_RDY_S__LEN 1 #define BMA250_FAST_COMP_RDY_S__MSK 0x10 #define BMA250_FAST_COMP_RDY_S__REG BMA250_OFFSET_CTRL_REG #define BMA250_COMP_TARGET_OFFSET_X__POS 1 #define BMA250_COMP_TARGET_OFFSET_X__LEN 2 #define BMA250_COMP_TARGET_OFFSET_X__MSK 0x06 #define BMA250_COMP_TARGET_OFFSET_X__REG BMA250_OFFSET_PARAMS_REG #define BMA250_COMP_TARGET_OFFSET_Y__POS 3 #define BMA250_COMP_TARGET_OFFSET_Y__LEN 2 #define BMA250_COMP_TARGET_OFFSET_Y__MSK 0x18 #define BMA250_COMP_TARGET_OFFSET_Y__REG BMA250_OFFSET_PARAMS_REG #define BMA250_COMP_TARGET_OFFSET_Z__POS 5 #define BMA250_COMP_TARGET_OFFSET_Z__LEN 2 #define BMA250_COMP_TARGET_OFFSET_Z__MSK 0x60 #define BMA250_COMP_TARGET_OFFSET_Z__REG BMA250_OFFSET_PARAMS_REG #define BMA250_UNLOCK_EE_WRITE_SETTING__POS 0 #define BMA250_UNLOCK_EE_WRITE_SETTING__LEN 1 #define BMA250_UNLOCK_EE_WRITE_SETTING__MSK 0x01 #define BMA250_UNLOCK_EE_WRITE_SETTING__REG BMA250_EEPROM_CTRL_REG #define BMA250_START_EE_WRITE_SETTING__POS 1 #define BMA250_START_EE_WRITE_SETTING__LEN 1 #define BMA250_START_EE_WRITE_SETTING__MSK 0x02 #define BMA250_START_EE_WRITE_SETTING__REG BMA250_EEPROM_CTRL_REG #define BMA250_EE_WRITE_SETTING_S__POS 2 #define BMA250_EE_WRITE_SETTING_S__LEN 1 #define BMA250_EE_WRITE_SETTING_S__MSK 0x04 #define BMA250_EE_WRITE_SETTING_S__REG BMA250_EEPROM_CTRL_REG #define BMA250_EN_SOFT_RESET__POS 0 #define BMA250_EN_SOFT_RESET__LEN 8 #define BMA250_EN_SOFT_RESET__MSK 0xFF #define BMA250_EN_SOFT_RESET__REG BMA250_RESET_REG #define BMA250_EN_SOFT_RESET_VALUE 0xB6 #define BMA250_RANGE_2G 0 #define BMA250_RANGE_4G 1 #define BMA250_RANGE_8G 2 #define BMA250_RANGE_16G 3 #define BMA250_BW_7_81HZ 0x08 #define BMA250_BW_15_63HZ 0x09 #define BMA250_BW_31_25HZ 0x0A #define BMA250_BW_62_50HZ 0x0B #define BMA250_BW_125HZ 0x0C #define BMA250_BW_250HZ 0x0D #define BMA250_BW_500HZ 0x0E #define BMA250_BW_1000HZ 0x0F #define BMA250_MODE_NORMAL 0 #define BMA250_MODE_LOWPOWER 1 #define BMA250_MODE_SUSPEND 2 #define BMA250_GET_BITSLICE(regvar, bitname)\ ((regvar & bitname##__MSK) >> bitname##__POS) #define BMA250_SET_BITSLICE(regvar, bitname, val)\ ((regvar & ~bitname##__MSK) | ((val<<bitname##__POS)&bitname##__MSK)) /* function which gets slave data and sends it to SLAVE */ struct slaveirq_dev_data { struct miscdevice dev; #ifdef CONFIG_CIR_ALWAYS_READY struct i2c_client *slave_client; #endif struct mpuirq_data data; wait_queue_head_t slaveirq_wait; int irq; int pid; int data_ready; int timeout; struct work_struct bma_irq_work; struct i2c_adapter * adapter; struct input_dev *input; #ifdef CONFIG_CIR_ALWAYS_READY struct input_dev *input_cir; struct wake_lock cir_always_ready_wake_lock; int wake_lock_inited; #endif }; /* The following depends on patch fa1f68db6ca7ebb6fc4487ac215bffba06c01c28 * drivers: misc: pass miscdevice pointer via file private data */ static int slaveirq_open(struct inode *inode, struct file *file) { /* Device node is availabe in the file->private_data, this is * exactly what we want so we leave it there */ struct slaveirq_dev_data *data = container_of(file->private_data, struct slaveirq_dev_data, dev); dev_dbg(data->dev.this_device, "%s current->pid %d\n", __func__, current->pid); data->pid = current->pid; return 0; } static int slaveirq_release(struct inode *inode, struct file *file) { struct slaveirq_dev_data *data = container_of(file->private_data, struct slaveirq_dev_data, dev); dev_dbg(data->dev.this_device, "slaveirq_release\n"); return 0; } /* read function called when from /dev/slaveirq is read */ static ssize_t slaveirq_read(struct file *file, char *buf, size_t count, loff_t *ppos) { int len, err; struct slaveirq_dev_data *data = container_of(file->private_data, struct slaveirq_dev_data, dev); if (!data->data_ready) { wait_event_interruptible_timeout(data->slaveirq_wait, data->data_ready, data->timeout); } if (data->data_ready && NULL != buf && count >= sizeof(data->data)) { err = copy_to_user(buf, &data->data, sizeof(data->data)); data->data.data_type = 0; } else { return 0; } if (err != 0) { dev_err(data->dev.this_device, "Copy to user returned %d\n", err); return -EFAULT; } data->data_ready = 0; len = sizeof(data->data); return len; } unsigned int slaveirq_poll(struct file *file, struct poll_table_struct *poll) { int mask = 0; struct slaveirq_dev_data *data = container_of(file->private_data, struct slaveirq_dev_data, dev); poll_wait(file, &data->slaveirq_wait, poll); if (data->data_ready) mask |= POLLIN | POLLRDNORM; return mask; } /* ioctl - I/O control */ static long slaveirq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int retval = 0; int tmp; struct slaveirq_dev_data *data = container_of(file->private_data, struct slaveirq_dev_data, dev); switch (cmd) { case SLAVEIRQ_SET_TIMEOUT: data->timeout = arg; break; case SLAVEIRQ_GET_INTERRUPT_CNT: tmp = data->data.interruptcount - 1; if (data->data.interruptcount > 1) data->data.interruptcount = 1; if (copy_to_user((int *) arg, &tmp, sizeof(int))) return -EFAULT; break; case SLAVEIRQ_GET_IRQ_TIME: if (copy_to_user((int *) arg, &data->data.irqtime, sizeof(data->data.irqtime))) return -EFAULT; data->data.irqtime = 0; break; default: retval = -EINVAL; } return retval; } static irqreturn_t slaveirq_handler(int irq, void *dev_id) { struct slaveirq_dev_data *data = (struct slaveirq_dev_data *)dev_id; static int mycount; struct timeval irqtime; mycount++; data->data.interruptcount++; /* wake up (unblock) for reading data from userspace */ /* and ignore first interrupt generated in module init */ data->data_ready = 1; do_gettimeofday(&irqtime); data->data.irqtime = (((long long) irqtime.tv_sec) << 32); data->data.irqtime += irqtime.tv_usec; data->data.data_type |= 1; wake_up_interruptible(&data->slaveirq_wait); return IRQ_HANDLED; } /* define which file operations are supported */ static const struct file_operations slaveirq_fops = { .owner = THIS_MODULE, .read = slaveirq_read, .poll = slaveirq_poll, #if HAVE_COMPAT_IOCTL .compat_ioctl = slaveirq_ioctl, #endif #if HAVE_UNLOCKED_IOCTL .unlocked_ioctl = slaveirq_ioctl, #endif .open = slaveirq_open, .release = slaveirq_release, }; #ifdef CONFIG_CIR_ALWAYS_READY static void bma250_irq_work_func(struct work_struct *work) { struct slaveirq_dev_data *bma250 = container_of((struct work_struct *)work, struct slaveirq_dev_data, bma_irq_work); wake_lock_timeout(&(bma250->cir_always_ready_wake_lock), 1*HZ); input_report_rel(bma250->input_cir, SLOP_INTERRUPT, SLOPE_INTERRUPT_X_NEGATIVE_HAPPENED); input_report_rel(bma250->input_cir, SLOP_INTERRUPT, SLOPE_INTERRUPT_Y_NEGATIVE_HAPPENED); input_report_rel(bma250->input_cir, SLOP_INTERRUPT, SLOPE_INTERRUPT_X_HAPPENED); input_report_rel(bma250->input_cir, SLOP_INTERRUPT, SLOPE_INTERRUPT_Y_HAPPENED); input_sync(bma250->input_cir); enable_irq(bma250->irq); } static irqreturn_t bma250_irq_handler(int irq, void *handle) { struct slaveirq_dev_data *data = handle; if (data) disable_irq_nosync(data->irq); if (data == NULL) return IRQ_HANDLED; if (data->slave_client == NULL) return IRQ_HANDLED; schedule_work(&data->bma_irq_work); return IRQ_HANDLED; } #endif int slaveirq_init(struct i2c_adapter *slave_adapter, #ifdef CONFIG_CIR_ALWAYS_READY struct i2c_client *client, #endif struct ext_slave_platform_data *pdata, char *name) { int res; struct slaveirq_dev_data *data; struct input_dev *dev = NULL; #ifdef CONFIG_CIR_ALWAYS_READY struct input_dev *dev_cir = NULL; #endif if (!pdata->irq) return -EINVAL; pdata->irq_data = kzalloc(sizeof(*data), GFP_KERNEL); data = (struct slaveirq_dev_data *) pdata->irq_data; if (!data) return -ENOMEM; data->dev.minor = MISC_DYNAMIC_MINOR; data->dev.name = name; data->dev.fops = &slaveirq_fops; data->irq = pdata->irq; data->pid = 0; data->data_ready = 0; data->timeout = 0; #ifdef CONFIG_CIR_ALWAYS_READY data->wake_lock_inited = 0; data->slave_client = client; #endif data->adapter = slave_adapter; init_waitqueue_head(&data->slaveirq_wait); if(strncmp(name,"accelirq",strlen("accelirq")) == 0){ dev = input_allocate_device(); if (!dev) return -ENOMEM; dev->name = SENSOR_NAME; dev->id.bustype = BUS_I2C; input_set_abs_params(dev, ABS_X, ABSMIN, ABSMAX, 0, 0); input_set_abs_params(dev, ABS_Y, ABSMIN, ABSMAX, 0, 0); input_set_abs_params(dev, ABS_Z, ABSMIN, ABSMAX, 0, 0); input_set_drvdata(dev, data); res = input_register_device(dev); if (res < 0) { goto err_register_input_device; } data->input = dev; #ifdef CONFIG_CIR_ALWAYS_READY dev_cir = input_allocate_device(); if (!dev_cir) { goto err_allocate_input_cir_devive; } dev_cir->name = "CIRSensor"; dev_cir->id.bustype = BUS_I2C; input_set_capability(dev_cir, EV_REL, SLOP_INTERRUPT); res = input_register_device(dev_cir); if (res < 0) { goto err_register_cir_input_device; } data->input_cir = dev_cir; INIT_WORK(&data->bma_irq_work, bma250_irq_work_func); res = request_irq(data->irq, bma250_irq_handler, IRQF_TRIGGER_RISING, "bma250", data); enable_irq_wake(data->irq); wake_lock_init(&(data->cir_always_ready_wake_lock), WAKE_LOCK_SUSPEND, "cir_always_ready"); data->wake_lock_inited = 1; #endif }else res = request_irq(data->irq, slaveirq_handler, IRQF_TRIGGER_RISING, data->dev.name, data); if (res) { dev_err(&slave_adapter->dev, "myirqtest: cannot register IRQ %d\n", data->irq); goto out_request_irq; } res = misc_register(&data->dev); if (res < 0) { dev_err(&slave_adapter->dev, "misc_register returned %d\n", res); goto out_misc_register; } return res; out_misc_register: free_irq(data->irq, data); out_request_irq: kfree(pdata->irq_data); pdata->irq_data = NULL; #ifdef CONFIG_CIR_ALWAYS_READY if(dev_cir != NULL) input_unregister_device(data->input_cir); err_register_cir_input_device: if(dev_cir != NULL) input_free_device(dev_cir); err_allocate_input_cir_devive: if(dev != NULL) input_unregister_device(data->input); #endif err_register_input_device: if(dev != NULL) input_free_device(dev); kfree(data); return res; } void slaveirq_exit(struct ext_slave_platform_data *pdata) { struct slaveirq_dev_data *data = pdata->irq_data; if (!pdata->irq_data || data->irq <= 0) return; dev_info(data->dev.this_device, "Unregistering %s\n", data->dev.name); #ifdef CONFIG_CIR_ALWAYS_READY if (data->wake_lock_inited == 1) { dev_info(data->dev.this_device, "Destroy always_ready_wake_lock\n"); wake_lock_destroy(&(data->cir_always_ready_wake_lock)); } #endif free_irq(data->irq, data); misc_deregister(&data->dev); kfree(pdata->irq_data); pdata->irq_data = NULL; }
gpl-2.0
joninvski/ts_7500_kernel
lib/kobject_uevent.c
18
7436
/* * kernel userspace event delivery * * Copyright (C) 2004 Red Hat, Inc. All rights reserved. * Copyright (C) 2004 Novell, Inc. All rights reserved. * Copyright (C) 2004 IBM, Inc. All rights reserved. * * Licensed under the GNU GPL v2. * * Authors: * Robert Love <rml@novell.com> * Kay Sievers <kay.sievers@vrfy.org> * Arjan van de Ven <arjanv@redhat.com> * Greg Kroah-Hartman <greg@kroah.com> */ #include <linux/spinlock.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/string.h> #include <linux/kobject.h> #include <net/sock.h> u64 uevent_seqnum; char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; static DEFINE_SPINLOCK(sequence_lock); #if defined(CONFIG_NET) static struct sock *uevent_sock; #endif /* the strings here must match the enum in include/linux/kobject.h */ static const char *kobject_actions[] = { [KOBJ_ADD] = "add", [KOBJ_REMOVE] = "remove", [KOBJ_CHANGE] = "change", [KOBJ_MOVE] = "move", [KOBJ_ONLINE] = "online", [KOBJ_OFFLINE] = "offline", }; /** * kobject_action_type - translate action string to numeric type * * @buf: buffer containing the action string, newline is ignored * @len: length of buffer * @type: pointer to the location to store the action type * * Returns 0 if the action string was recognized. */ int kobject_action_type(const char *buf, size_t count, enum kobject_action *type) { enum kobject_action action; int ret = -EINVAL; if (count && buf[count-1] == '\n') count--; if (!count) goto out; for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) { if (strncmp(kobject_actions[action], buf, count) != 0) continue; if (kobject_actions[action][count] != '\0') continue; *type = action; ret = 0; break; } out: return ret; } /** * kobject_uevent_env - send an uevent with environmental data * * @action: action that is happening * @kobj: struct kobject that the action is happening to * @envp_ext: pointer to environmental data * * Returns 0 if kobject_uevent() is completed with success or the * corresponding error when it fails. */ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, char *envp_ext[]) { struct kobj_uevent_env *env; const char *action_string = kobject_actions[action]; const char *devpath = NULL; const char *subsystem; struct kobject *top_kobj; struct kset *kset; struct kset_uevent_ops *uevent_ops; u64 seq; int i = 0; int retval = 0; pr_debug("%s\n", __FUNCTION__); /* search the kset we belong to */ top_kobj = kobj; while (!top_kobj->kset && top_kobj->parent) top_kobj = top_kobj->parent; if (!top_kobj->kset) { pr_debug("kobject attempted to send uevent without kset!\n"); return -EINVAL; } kset = top_kobj->kset; uevent_ops = kset->uevent_ops; /* skip the event, if the filter returns zero. */ if (uevent_ops && uevent_ops->filter) if (!uevent_ops->filter(kset, kobj)) { pr_debug("kobject filter function caused the event to drop!\n"); return 0; } /* originating subsystem */ if (uevent_ops && uevent_ops->name) subsystem = uevent_ops->name(kset, kobj); else subsystem = kobject_name(&kset->kobj); if (!subsystem) { pr_debug("unset subsystem caused the event to drop!\n"); return 0; } /* environment buffer */ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); if (!env) return -ENOMEM; /* complete object path */ devpath = kobject_get_path(kobj, GFP_KERNEL); if (!devpath) { retval = -ENOENT; goto exit; } /* default keys */ retval = add_uevent_var(env, "ACTION=%s", action_string); if (retval) goto exit; retval = add_uevent_var(env, "DEVPATH=%s", devpath); if (retval) goto exit; retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem); if (retval) goto exit; /* keys passed in from the caller */ if (envp_ext) { for (i = 0; envp_ext[i]; i++) { retval = add_uevent_var(env, envp_ext[i]); if (retval) goto exit; } } /* let the kset specific function add its stuff */ if (uevent_ops && uevent_ops->uevent) { retval = uevent_ops->uevent(kset, kobj, env); if (retval) { pr_debug ("%s - uevent() returned %d\n", __FUNCTION__, retval); goto exit; } } /* we will send an event, so request a new sequence number */ spin_lock(&sequence_lock); seq = ++uevent_seqnum; spin_unlock(&sequence_lock); retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq); if (retval) goto exit; #if defined(CONFIG_NET) /* send netlink message */ if (uevent_sock) { struct sk_buff *skb; size_t len; /* allocate message with the maximum possible size */ len = strlen(action_string) + strlen(devpath) + 2; skb = alloc_skb(len + env->buflen, GFP_KERNEL); if (skb) { char *scratch; /* add header */ scratch = skb_put(skb, len); sprintf(scratch, "%s@%s", action_string, devpath); /* copy keys to our continuous event payload buffer */ for (i = 0; i < env->envp_idx; i++) { len = strlen(env->envp[i]) + 1; scratch = skb_put(skb, len); strcpy(scratch, env->envp[i]); } NETLINK_CB(skb).dst_group = 1; netlink_broadcast(uevent_sock, skb, 0, 1, GFP_KERNEL); } } #endif /* call uevent_helper, usually only enabled during early boot */ if (uevent_helper[0]) { char *argv [3]; argv [0] = uevent_helper; argv [1] = (char *)subsystem; argv [2] = NULL; retval = add_uevent_var(env, "HOME=/"); if (retval) goto exit; retval = add_uevent_var(env, "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); if (retval) goto exit; call_usermodehelper (argv[0], argv, env->envp, UMH_WAIT_EXEC); } exit: kfree(devpath); kfree(env); return retval; } EXPORT_SYMBOL_GPL(kobject_uevent_env); /** * kobject_uevent - notify userspace by ending an uevent * * @action: action that is happening * @kobj: struct kobject that the action is happening to * * Returns 0 if kobject_uevent() is completed with success or the * corresponding error when it fails. */ int kobject_uevent(struct kobject *kobj, enum kobject_action action) { return kobject_uevent_env(kobj, action, NULL); } EXPORT_SYMBOL_GPL(kobject_uevent); /** * add_uevent_var - add key value string to the environment buffer * @env: environment buffer structure * @format: printf format for the key=value pair * * Returns 0 if environment variable was added successfully or -ENOMEM * if no space was available. */ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) { va_list args; int len; if (env->envp_idx >= ARRAY_SIZE(env->envp)) { printk(KERN_ERR "add_uevent_var: too many keys\n"); WARN_ON(1); return -ENOMEM; } va_start(args, format); len = vsnprintf(&env->buf[env->buflen], sizeof(env->buf) - env->buflen, format, args); va_end(args); if (len >= (sizeof(env->buf) - env->buflen)) { printk(KERN_ERR "add_uevent_var: buffer size too small\n"); WARN_ON(1); return -ENOMEM; } env->envp[env->envp_idx++] = &env->buf[env->buflen]; env->buflen += len + 1; return 0; } EXPORT_SYMBOL_GPL(add_uevent_var); #if defined(CONFIG_NET) static int __init kobject_uevent_init(void) { uevent_sock = netlink_kernel_create(&init_net, NETLINK_KOBJECT_UEVENT, 1, NULL, NULL, THIS_MODULE); if (!uevent_sock) { printk(KERN_ERR "kobject_uevent: unable to create netlink socket!\n"); return -ENODEV; } return 0; } postcore_initcall(kobject_uevent_init); #endif
gpl-2.0
mixianghang/mptcp
drivers/gpio/gpiolib.c
18
67257
#include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/device.h> #include <linux/err.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/idr.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/gpio/driver.h> #include "gpiolib.h" #define CREATE_TRACE_POINTS #include <trace/events/gpio.h> /* Implementation infrastructure for GPIO interfaces. * * The GPIO programming interface allows for inlining speed-critical * get/set operations for common cases, so that access to SOC-integrated * GPIOs can sometimes cost only an instruction or two per bit. */ /* When debugging, extend minimal trust to callers and platform code. * Also emit diagnostic messages that may help initial bringup, when * board setup or driver bugs are most common. * * Otherwise, minimize overhead in what may be bitbanging codepaths. */ #ifdef DEBUG #define extra_checks 1 #else #define extra_checks 0 #endif /* gpio_lock prevents conflicts during gpio_desc[] table updates. * While any GPIO is requested, its gpio_chip is not removable; * each GPIO's "requested" flag serves as a lock and refcount. */ static DEFINE_SPINLOCK(gpio_lock); struct gpio_desc { struct gpio_chip *chip; unsigned long flags; /* flag symbols are bit numbers */ #define FLAG_REQUESTED 0 #define FLAG_IS_OUT 1 #define FLAG_EXPORT 2 /* protected by sysfs_lock */ #define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */ #define FLAG_TRIG_FALL 4 /* trigger on falling edge */ #define FLAG_TRIG_RISE 5 /* trigger on rising edge */ #define FLAG_ACTIVE_LOW 6 /* value has active low */ #define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */ #define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */ #define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */ #define ID_SHIFT 16 /* add new flags before this one */ #define GPIO_FLAGS_MASK ((1 << ID_SHIFT) - 1) #define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE)) #ifdef CONFIG_DEBUG_FS const char *label; #endif }; static struct gpio_desc gpio_desc[ARCH_NR_GPIOS]; #define GPIO_OFFSET_VALID(chip, offset) (offset >= 0 && offset < chip->ngpio) static DEFINE_MUTEX(gpio_lookup_lock); static LIST_HEAD(gpio_lookup_list); static LIST_HEAD(gpio_chips); #ifdef CONFIG_GPIO_SYSFS static DEFINE_IDR(dirent_idr); #endif static int gpiod_request(struct gpio_desc *desc, const char *label); static void gpiod_free(struct gpio_desc *desc); /* With descriptor prefix */ #ifdef CONFIG_DEBUG_FS #define gpiod_emerg(desc, fmt, ...) \ pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\ ##__VA_ARGS__) #define gpiod_crit(desc, fmt, ...) \ pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ ##__VA_ARGS__) #define gpiod_err(desc, fmt, ...) \ pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ ##__VA_ARGS__) #define gpiod_warn(desc, fmt, ...) \ pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ ##__VA_ARGS__) #define gpiod_info(desc, fmt, ...) \ pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ ##__VA_ARGS__) #define gpiod_dbg(desc, fmt, ...) \ pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\ ##__VA_ARGS__) #else #define gpiod_emerg(desc, fmt, ...) \ pr_emerg("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) #define gpiod_crit(desc, fmt, ...) \ pr_crit("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) #define gpiod_err(desc, fmt, ...) \ pr_err("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) #define gpiod_warn(desc, fmt, ...) \ pr_warn("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) #define gpiod_info(desc, fmt, ...) \ pr_info("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) #define gpiod_dbg(desc, fmt, ...) \ pr_debug("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) #endif /* With chip prefix */ #define chip_emerg(chip, fmt, ...) \ pr_emerg("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) #define chip_crit(chip, fmt, ...) \ pr_crit("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) #define chip_err(chip, fmt, ...) \ pr_err("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) #define chip_warn(chip, fmt, ...) \ pr_warn("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) #define chip_info(chip, fmt, ...) \ pr_info("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) #define chip_dbg(chip, fmt, ...) \ pr_debug("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) static inline void desc_set_label(struct gpio_desc *d, const char *label) { #ifdef CONFIG_DEBUG_FS d->label = label; #endif } /* * Return the GPIO number of the passed descriptor relative to its chip */ static int gpio_chip_hwgpio(const struct gpio_desc *desc) { return desc - &desc->chip->desc[0]; } /** * Convert a GPIO number to its descriptor */ struct gpio_desc *gpio_to_desc(unsigned gpio) { if (WARN(!gpio_is_valid(gpio), "invalid GPIO %d\n", gpio)) return NULL; else return &gpio_desc[gpio]; } EXPORT_SYMBOL_GPL(gpio_to_desc); /** * Convert an offset on a certain chip to a corresponding descriptor */ static struct gpio_desc *gpiochip_offset_to_desc(struct gpio_chip *chip, unsigned int offset) { if (offset >= chip->ngpio) return ERR_PTR(-EINVAL); return &chip->desc[offset]; } /** * Convert a GPIO descriptor to the integer namespace. * This should disappear in the future but is needed since we still * use GPIO numbers for error messages and sysfs nodes */ int desc_to_gpio(const struct gpio_desc *desc) { return desc - &gpio_desc[0]; } EXPORT_SYMBOL_GPL(desc_to_gpio); /* Warn when drivers omit gpio_request() calls -- legal but ill-advised * when setting direction, and otherwise illegal. Until board setup code * and drivers use explicit requests everywhere (which won't happen when * those calls have no teeth) we can't avoid autorequesting. This nag * message should motivate switching to explicit requests... so should * the weaker cleanup after faults, compared to gpio_request(). * * NOTE: the autorequest mechanism is going away; at this point it's * only "legal" in the sense that (old) code using it won't break yet, * but instead only triggers a WARN() stack dump. */ static int gpio_ensure_requested(struct gpio_desc *desc) { const struct gpio_chip *chip = desc->chip; const int gpio = desc_to_gpio(desc); if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0, "autorequest GPIO-%d\n", gpio)) { if (!try_module_get(chip->owner)) { gpiod_err(desc, "%s: module can't be gotten\n", __func__); clear_bit(FLAG_REQUESTED, &desc->flags); /* lose */ return -EIO; } desc_set_label(desc, "[auto]"); /* caller must chip->request() w/o spinlock */ if (chip->request) return 1; } return 0; } /** * gpiod_to_chip - Return the GPIO chip to which a GPIO descriptor belongs * @desc: descriptor to return the chip of */ struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) { return desc ? desc->chip : NULL; } EXPORT_SYMBOL_GPL(gpiod_to_chip); /* dynamic allocation of GPIOs, e.g. on a hotplugged device */ static int gpiochip_find_base(int ngpio) { struct gpio_chip *chip; int base = ARCH_NR_GPIOS - ngpio; list_for_each_entry_reverse(chip, &gpio_chips, list) { /* found a free space? */ if (chip->base + chip->ngpio <= base) break; else /* nope, check the space right before the chip */ base = chip->base - ngpio; } if (gpio_is_valid(base)) { pr_debug("%s: found new base at %d\n", __func__, base); return base; } else { pr_err("%s: cannot find free range\n", __func__); return -ENOSPC; } } /** * gpiod_get_direction - return the current direction of a GPIO * @desc: GPIO to get the direction of * * Return GPIOF_DIR_IN or GPIOF_DIR_OUT, or an error code in case of error. * * This function may sleep if gpiod_cansleep() is true. */ int gpiod_get_direction(const struct gpio_desc *desc) { struct gpio_chip *chip; unsigned offset; int status = -EINVAL; chip = gpiod_to_chip(desc); offset = gpio_chip_hwgpio(desc); if (!chip->get_direction) return status; status = chip->get_direction(chip, offset); if (status > 0) { /* GPIOF_DIR_IN, or other positive */ status = 1; /* FLAG_IS_OUT is just a cache of the result of get_direction(), * so it does not affect constness per se */ clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags); } if (status == 0) { /* GPIOF_DIR_OUT */ set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags); } return status; } EXPORT_SYMBOL_GPL(gpiod_get_direction); #ifdef CONFIG_GPIO_SYSFS /* lock protects against unexport_gpio() being called while * sysfs files are active. */ static DEFINE_MUTEX(sysfs_lock); /* * /sys/class/gpio/gpioN... only for GPIOs that are exported * /direction * * MAY BE OMITTED if kernel won't allow direction changes * * is read/write as "in" or "out" * * may also be written as "high" or "low", initializing * output value as specified ("out" implies "low") * /value * * always readable, subject to hardware behavior * * may be writable, as zero/nonzero * /edge * * configures behavior of poll(2) on /value * * available only if pin can generate IRQs on input * * is read/write as "none", "falling", "rising", or "both" * /active_low * * configures polarity of /value * * is read/write as zero/nonzero * * also affects existing and subsequent "falling" and "rising" * /edge configuration */ static ssize_t gpio_direction_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) { status = -EIO; } else { gpiod_get_direction(desc); status = sprintf(buf, "%s\n", test_bit(FLAG_IS_OUT, &desc->flags) ? "out" : "in"); } mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_direction_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else if (sysfs_streq(buf, "high")) status = gpiod_direction_output(desc, 1); else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low")) status = gpiod_direction_output(desc, 0); else if (sysfs_streq(buf, "in")) status = gpiod_direction_input(desc); else status = -EINVAL; mutex_unlock(&sysfs_lock); return status ? : size; } static /* const */ DEVICE_ATTR(direction, 0644, gpio_direction_show, gpio_direction_store); static ssize_t gpio_value_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc)); mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else if (!test_bit(FLAG_IS_OUT, &desc->flags)) status = -EPERM; else { long value; status = kstrtol(buf, 0, &value); if (status == 0) { gpiod_set_value_cansleep(desc, value); status = size; } } mutex_unlock(&sysfs_lock); return status; } static DEVICE_ATTR(value, 0644, gpio_value_show, gpio_value_store); static irqreturn_t gpio_sysfs_irq(int irq, void *priv) { struct kernfs_node *value_sd = priv; sysfs_notify_dirent(value_sd); return IRQ_HANDLED; } static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev, unsigned long gpio_flags) { struct kernfs_node *value_sd; unsigned long irq_flags; int ret, irq, id; if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags) return 0; irq = gpiod_to_irq(desc); if (irq < 0) return -EIO; id = desc->flags >> ID_SHIFT; value_sd = idr_find(&dirent_idr, id); if (value_sd) free_irq(irq, value_sd); desc->flags &= ~GPIO_TRIGGER_MASK; if (!gpio_flags) { gpiod_unlock_as_irq(desc); ret = 0; goto free_id; } irq_flags = IRQF_SHARED; if (test_bit(FLAG_TRIG_FALL, &gpio_flags)) irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; if (test_bit(FLAG_TRIG_RISE, &gpio_flags)) irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; if (!value_sd) { value_sd = sysfs_get_dirent(dev->kobj.sd, "value"); if (!value_sd) { ret = -ENODEV; goto err_out; } ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL); if (ret < 0) goto free_sd; id = ret; desc->flags &= GPIO_FLAGS_MASK; desc->flags |= (unsigned long)id << ID_SHIFT; if (desc->flags >> ID_SHIFT != id) { ret = -ERANGE; goto free_id; } } ret = request_any_context_irq(irq, gpio_sysfs_irq, irq_flags, "gpiolib", value_sd); if (ret < 0) goto free_id; ret = gpiod_lock_as_irq(desc); if (ret < 0) { gpiod_warn(desc, "failed to flag the GPIO for IRQ\n"); goto free_id; } desc->flags |= gpio_flags; return 0; free_id: idr_remove(&dirent_idr, id); desc->flags &= GPIO_FLAGS_MASK; free_sd: if (value_sd) sysfs_put(value_sd); err_out: return ret; } static const struct { const char *name; unsigned long flags; } trigger_types[] = { { "none", 0 }, { "falling", BIT(FLAG_TRIG_FALL) }, { "rising", BIT(FLAG_TRIG_RISE) }, { "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) }, }; static ssize_t gpio_edge_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else { int i; status = 0; for (i = 0; i < ARRAY_SIZE(trigger_types); i++) if ((desc->flags & GPIO_TRIGGER_MASK) == trigger_types[i].flags) { status = sprintf(buf, "%s\n", trigger_types[i].name); break; } } mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_edge_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; int i; for (i = 0; i < ARRAY_SIZE(trigger_types); i++) if (sysfs_streq(trigger_types[i].name, buf)) goto found; return -EINVAL; found: mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else { status = gpio_setup_irq(desc, dev, trigger_types[i].flags); if (!status) status = size; } mutex_unlock(&sysfs_lock); return status; } static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store); static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev, int value) { int status = 0; if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value) return 0; if (value) set_bit(FLAG_ACTIVE_LOW, &desc->flags); else clear_bit(FLAG_ACTIVE_LOW, &desc->flags); /* reconfigure poll(2) support if enabled on one edge only */ if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^ !!test_bit(FLAG_TRIG_FALL, &desc->flags))) { unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK; gpio_setup_irq(desc, dev, 0); status = gpio_setup_irq(desc, dev, trigger_flags); } return status; } static ssize_t gpio_active_low_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else status = sprintf(buf, "%d\n", !!test_bit(FLAG_ACTIVE_LOW, &desc->flags)); mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_active_low_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) { status = -EIO; } else { long value; status = kstrtol(buf, 0, &value); if (status == 0) status = sysfs_set_active_low(desc, dev, value != 0); } mutex_unlock(&sysfs_lock); return status ? : size; } static DEVICE_ATTR(active_low, 0644, gpio_active_low_show, gpio_active_low_store); static struct attribute *gpio_attrs[] = { &dev_attr_value.attr, &dev_attr_active_low.attr, NULL, }; ATTRIBUTE_GROUPS(gpio); /* * /sys/class/gpio/gpiochipN/ * /base ... matching gpio_chip.base (N) * /label ... matching gpio_chip.label * /ngpio ... matching gpio_chip.ngpio */ static ssize_t chip_base_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%d\n", chip->base); } static DEVICE_ATTR(base, 0444, chip_base_show, NULL); static ssize_t chip_label_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%s\n", chip->label ? : ""); } static DEVICE_ATTR(label, 0444, chip_label_show, NULL); static ssize_t chip_ngpio_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%u\n", chip->ngpio); } static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); static struct attribute *gpiochip_attrs[] = { &dev_attr_base.attr, &dev_attr_label.attr, &dev_attr_ngpio.attr, NULL, }; ATTRIBUTE_GROUPS(gpiochip); /* * /sys/class/gpio/export ... write-only * integer N ... number of GPIO to export (full access) * /sys/class/gpio/unexport ... write-only * integer N ... number of GPIO to unexport */ static ssize_t export_store(struct class *class, struct class_attribute *attr, const char *buf, size_t len) { long gpio; struct gpio_desc *desc; int status; status = kstrtol(buf, 0, &gpio); if (status < 0) goto done; desc = gpio_to_desc(gpio); /* reject invalid GPIOs */ if (!desc) { pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); return -EINVAL; } /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so * they may be undone on its behalf too. */ status = gpiod_request(desc, "sysfs"); if (status < 0) { if (status == -EPROBE_DEFER) status = -ENODEV; goto done; } status = gpiod_export(desc, true); if (status < 0) gpiod_free(desc); else set_bit(FLAG_SYSFS, &desc->flags); done: if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; } static ssize_t unexport_store(struct class *class, struct class_attribute *attr, const char *buf, size_t len) { long gpio; struct gpio_desc *desc; int status; status = kstrtol(buf, 0, &gpio); if (status < 0) goto done; desc = gpio_to_desc(gpio); /* reject bogus commands (gpio_unexport ignores them) */ if (!desc) { pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); return -EINVAL; } status = -EINVAL; /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so * they may be undone on its behalf too. */ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) { status = 0; gpiod_free(desc); } done: if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; } static struct class_attribute gpio_class_attrs[] = { __ATTR(export, 0200, NULL, export_store), __ATTR(unexport, 0200, NULL, unexport_store), __ATTR_NULL, }; static struct class gpio_class = { .name = "gpio", .owner = THIS_MODULE, .class_attrs = gpio_class_attrs, }; /** * gpiod_export - export a GPIO through sysfs * @gpio: gpio to make available, already requested * @direction_may_change: true if userspace may change gpio direction * Context: arch_initcall or later * * When drivers want to make a GPIO accessible to userspace after they * have requested it -- perhaps while debugging, or as part of their * public interface -- they may use this routine. If the GPIO can * change direction (some can't) and the caller allows it, userspace * will see "direction" sysfs attribute which may be used to change * the gpio's direction. A "value" attribute will always be provided. * * Returns zero on success, else an error. */ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) { unsigned long flags; int status; const char *ioname = NULL; struct device *dev; int offset; /* can't export until sysfs is available ... */ if (!gpio_class.p) { pr_debug("%s: called too early!\n", __func__); return -ENOENT; } if (!desc) { pr_debug("%s: invalid gpio descriptor\n", __func__); return -EINVAL; } mutex_lock(&sysfs_lock); spin_lock_irqsave(&gpio_lock, flags); if (!test_bit(FLAG_REQUESTED, &desc->flags) || test_bit(FLAG_EXPORT, &desc->flags)) { spin_unlock_irqrestore(&gpio_lock, flags); gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n", __func__, test_bit(FLAG_REQUESTED, &desc->flags), test_bit(FLAG_EXPORT, &desc->flags)); status = -EPERM; goto fail_unlock; } if (!desc->chip->direction_input || !desc->chip->direction_output) direction_may_change = false; spin_unlock_irqrestore(&gpio_lock, flags); offset = gpio_chip_hwgpio(desc); if (desc->chip->names && desc->chip->names[offset]) ioname = desc->chip->names[offset]; dev = device_create_with_groups(&gpio_class, desc->chip->dev, MKDEV(0, 0), desc, gpio_groups, ioname ? ioname : "gpio%u", desc_to_gpio(desc)); if (IS_ERR(dev)) { status = PTR_ERR(dev); goto fail_unlock; } if (direction_may_change) { status = device_create_file(dev, &dev_attr_direction); if (status) goto fail_unregister_device; } if (gpiod_to_irq(desc) >= 0 && (direction_may_change || !test_bit(FLAG_IS_OUT, &desc->flags))) { status = device_create_file(dev, &dev_attr_edge); if (status) goto fail_remove_attr_direction; } set_bit(FLAG_EXPORT, &desc->flags); mutex_unlock(&sysfs_lock); return 0; fail_remove_attr_direction: device_remove_file(dev, &dev_attr_direction); fail_unregister_device: device_unregister(dev); fail_unlock: mutex_unlock(&sysfs_lock); gpiod_dbg(desc, "%s: status %d\n", __func__, status); return status; } EXPORT_SYMBOL_GPL(gpiod_export); static int match_export(struct device *dev, const void *data) { return dev_get_drvdata(dev) == data; } /** * gpiod_export_link - create a sysfs link to an exported GPIO node * @dev: device under which to create symlink * @name: name of the symlink * @gpio: gpio to create symlink to, already exported * * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN * node. Caller is responsible for unlinking. * * Returns zero on success, else an error. */ int gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc) { int status = -EINVAL; if (!desc) { pr_warn("%s: invalid GPIO\n", __func__); return -EINVAL; } mutex_lock(&sysfs_lock); if (test_bit(FLAG_EXPORT, &desc->flags)) { struct device *tdev; tdev = class_find_device(&gpio_class, NULL, desc, match_export); if (tdev != NULL) { status = sysfs_create_link(&dev->kobj, &tdev->kobj, name); put_device(tdev); } else { status = -ENODEV; } } mutex_unlock(&sysfs_lock); if (status) gpiod_dbg(desc, "%s: status %d\n", __func__, status); return status; } EXPORT_SYMBOL_GPL(gpiod_export_link); /** * gpiod_sysfs_set_active_low - set the polarity of gpio sysfs value * @gpio: gpio to change * @value: non-zero to use active low, i.e. inverted values * * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute. * The GPIO does not have to be exported yet. If poll(2) support has * been enabled for either rising or falling edge, it will be * reconfigured to follow the new polarity. * * Returns zero on success, else an error. */ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value) { struct device *dev = NULL; int status = -EINVAL; if (!desc) { pr_warn("%s: invalid GPIO\n", __func__); return -EINVAL; } mutex_lock(&sysfs_lock); if (test_bit(FLAG_EXPORT, &desc->flags)) { dev = class_find_device(&gpio_class, NULL, desc, match_export); if (dev == NULL) { status = -ENODEV; goto unlock; } } status = sysfs_set_active_low(desc, dev, value); put_device(dev); unlock: mutex_unlock(&sysfs_lock); if (status) gpiod_dbg(desc, "%s: status %d\n", __func__, status); return status; } EXPORT_SYMBOL_GPL(gpiod_sysfs_set_active_low); /** * gpiod_unexport - reverse effect of gpio_export() * @gpio: gpio to make unavailable * * This is implicit on gpio_free(). */ void gpiod_unexport(struct gpio_desc *desc) { int status = 0; struct device *dev = NULL; if (!desc) { pr_warn("%s: invalid GPIO\n", __func__); return; } mutex_lock(&sysfs_lock); if (test_bit(FLAG_EXPORT, &desc->flags)) { dev = class_find_device(&gpio_class, NULL, desc, match_export); if (dev) { gpio_setup_irq(desc, dev, 0); clear_bit(FLAG_EXPORT, &desc->flags); } else status = -ENODEV; } mutex_unlock(&sysfs_lock); if (dev) { device_remove_file(dev, &dev_attr_edge); device_remove_file(dev, &dev_attr_direction); device_unregister(dev); put_device(dev); } if (status) gpiod_dbg(desc, "%s: status %d\n", __func__, status); } EXPORT_SYMBOL_GPL(gpiod_unexport); static int gpiochip_export(struct gpio_chip *chip) { int status; struct device *dev; /* Many systems register gpio chips for SOC support very early, * before driver model support is available. In those cases we * export this later, in gpiolib_sysfs_init() ... here we just * verify that _some_ field of gpio_class got initialized. */ if (!gpio_class.p) return 0; /* use chip->base for the ID; it's already known to be unique */ mutex_lock(&sysfs_lock); dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0), chip, gpiochip_groups, "gpiochip%d", chip->base); if (IS_ERR(dev)) status = PTR_ERR(dev); else status = 0; chip->exported = (status == 0); mutex_unlock(&sysfs_lock); if (status) { unsigned long flags; unsigned gpio; spin_lock_irqsave(&gpio_lock, flags); gpio = 0; while (gpio < chip->ngpio) chip->desc[gpio++].chip = NULL; spin_unlock_irqrestore(&gpio_lock, flags); chip_dbg(chip, "%s: status %d\n", __func__, status); } return status; } static void gpiochip_unexport(struct gpio_chip *chip) { int status; struct device *dev; mutex_lock(&sysfs_lock); dev = class_find_device(&gpio_class, NULL, chip, match_export); if (dev) { put_device(dev); device_unregister(dev); chip->exported = false; status = 0; } else status = -ENODEV; mutex_unlock(&sysfs_lock); if (status) chip_dbg(chip, "%s: status %d\n", __func__, status); } static int __init gpiolib_sysfs_init(void) { int status; unsigned long flags; struct gpio_chip *chip; status = class_register(&gpio_class); if (status < 0) return status; /* Scan and register the gpio_chips which registered very * early (e.g. before the class_register above was called). * * We run before arch_initcall() so chip->dev nodes can have * registered, and so arch_initcall() can always gpio_export(). */ spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(chip, &gpio_chips, list) { if (!chip || chip->exported) continue; spin_unlock_irqrestore(&gpio_lock, flags); status = gpiochip_export(chip); spin_lock_irqsave(&gpio_lock, flags); } spin_unlock_irqrestore(&gpio_lock, flags); return status; } postcore_initcall(gpiolib_sysfs_init); #else static inline int gpiochip_export(struct gpio_chip *chip) { return 0; } static inline void gpiochip_unexport(struct gpio_chip *chip) { } #endif /* CONFIG_GPIO_SYSFS */ /* * Add a new chip to the global chips list, keeping the list of chips sorted * by base order. * * Return -EBUSY if the new chip overlaps with some other chip's integer * space. */ static int gpiochip_add_to_list(struct gpio_chip *chip) { struct list_head *pos = &gpio_chips; struct gpio_chip *_chip; int err = 0; /* find where to insert our chip */ list_for_each(pos, &gpio_chips) { _chip = list_entry(pos, struct gpio_chip, list); /* shall we insert before _chip? */ if (_chip->base >= chip->base + chip->ngpio) break; } /* are we stepping on the chip right before? */ if (pos != &gpio_chips && pos->prev != &gpio_chips) { _chip = list_entry(pos->prev, struct gpio_chip, list); if (_chip->base + _chip->ngpio > chip->base) { dev_err(chip->dev, "GPIO integer space overlap, cannot add chip\n"); err = -EBUSY; } } if (!err) list_add_tail(&chip->list, pos); return err; } /** * gpiochip_add() - register a gpio_chip * @chip: the chip to register, with chip->base initialized * Context: potentially before irqs or kmalloc will work * * Returns a negative errno if the chip can't be registered, such as * because the chip->base is invalid or already associated with a * different chip. Otherwise it returns zero as a success code. * * When gpiochip_add() is called very early during boot, so that GPIOs * can be freely used, the chip->dev device must be registered before * the gpio framework's arch_initcall(). Otherwise sysfs initialization * for GPIOs will fail rudely. * * If chip->base is negative, this requests dynamic assignment of * a range of valid GPIOs. */ int gpiochip_add(struct gpio_chip *chip) { unsigned long flags; int status = 0; unsigned id; int base = chip->base; if ((!gpio_is_valid(base) || !gpio_is_valid(base + chip->ngpio - 1)) && base >= 0) { status = -EINVAL; goto fail; } spin_lock_irqsave(&gpio_lock, flags); if (base < 0) { base = gpiochip_find_base(chip->ngpio); if (base < 0) { status = base; goto unlock; } chip->base = base; } status = gpiochip_add_to_list(chip); if (status == 0) { chip->desc = &gpio_desc[chip->base]; for (id = 0; id < chip->ngpio; id++) { struct gpio_desc *desc = &chip->desc[id]; desc->chip = chip; /* REVISIT: most hardware initializes GPIOs as * inputs (often with pullups enabled) so power * usage is minimized. Linux code should set the * gpio direction first thing; but until it does, * and in case chip->get_direction is not set, * we may expose the wrong direction in sysfs. */ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; } } spin_unlock_irqrestore(&gpio_lock, flags); if (status) goto fail; #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&chip->pin_ranges); #endif of_gpiochip_add(chip); acpi_gpiochip_add(chip); status = gpiochip_export(chip); if (status) { acpi_gpiochip_remove(chip); of_gpiochip_remove(chip); goto fail; } pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__, chip->base, chip->base + chip->ngpio - 1, chip->label ? : "generic"); return 0; unlock: spin_unlock_irqrestore(&gpio_lock, flags); fail: /* failures here can mean systems won't boot... */ pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__, chip->base, chip->base + chip->ngpio - 1, chip->label ? : "generic"); return status; } EXPORT_SYMBOL_GPL(gpiochip_add); /** * gpiochip_remove() - unregister a gpio_chip * @chip: the chip to unregister * * A gpio_chip with any GPIOs still requested may not be removed. */ int gpiochip_remove(struct gpio_chip *chip) { unsigned long flags; int status = 0; unsigned id; spin_lock_irqsave(&gpio_lock, flags); gpiochip_remove_pin_ranges(chip); of_gpiochip_remove(chip); acpi_gpiochip_remove(chip); for (id = 0; id < chip->ngpio; id++) { if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) { status = -EBUSY; break; } } if (status == 0) { for (id = 0; id < chip->ngpio; id++) chip->desc[id].chip = NULL; list_del(&chip->list); } spin_unlock_irqrestore(&gpio_lock, flags); if (status == 0) gpiochip_unexport(chip); return status; } EXPORT_SYMBOL_GPL(gpiochip_remove); /** * gpiochip_find() - iterator for locating a specific gpio_chip * @data: data to pass to match function * @callback: Callback function to check gpio_chip * * Similar to bus_find_device. It returns a reference to a gpio_chip as * determined by a user supplied @match callback. The callback should return * 0 if the device doesn't match and non-zero if it does. If the callback is * non-zero, this function will return to the caller and not iterate over any * more gpio_chips. */ struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *chip, void *data)) { struct gpio_chip *chip; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(chip, &gpio_chips, list) if (match(chip, data)) break; /* No match? */ if (&chip->list == &gpio_chips) chip = NULL; spin_unlock_irqrestore(&gpio_lock, flags); return chip; } EXPORT_SYMBOL_GPL(gpiochip_find); static int gpiochip_match_name(struct gpio_chip *chip, void *data) { const char *name = data; return !strcmp(chip->label, name); } static struct gpio_chip *find_chip_by_name(const char *name) { return gpiochip_find((void *)name, gpiochip_match_name); } #ifdef CONFIG_PINCTRL /** * gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping * @chip: the gpiochip to add the range for * @pinctrl: the dev_name() of the pin controller to map to * @gpio_offset: the start offset in the current gpio_chip number space * @pin_group: name of the pin group inside the pin controller */ int gpiochip_add_pingroup_range(struct gpio_chip *chip, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group) { struct gpio_pin_range *pin_range; int ret; pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL); if (!pin_range) { chip_err(chip, "failed to allocate pin ranges\n"); return -ENOMEM; } /* Use local offset as range ID */ pin_range->range.id = gpio_offset; pin_range->range.gc = chip; pin_range->range.name = chip->label; pin_range->range.base = chip->base + gpio_offset; pin_range->pctldev = pctldev; ret = pinctrl_get_group_pins(pctldev, pin_group, &pin_range->range.pins, &pin_range->range.npins); if (ret < 0) { kfree(pin_range); return ret; } pinctrl_add_gpio_range(pctldev, &pin_range->range); chip_dbg(chip, "created GPIO range %d->%d ==> %s PINGRP %s\n", gpio_offset, gpio_offset + pin_range->range.npins - 1, pinctrl_dev_get_devname(pctldev), pin_group); list_add_tail(&pin_range->node, &chip->pin_ranges); return 0; } EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range); /** * gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping * @chip: the gpiochip to add the range for * @pinctrl_name: the dev_name() of the pin controller to map to * @gpio_offset: the start offset in the current gpio_chip number space * @pin_offset: the start offset in the pin controller number space * @npins: the number of pins from the offset of each pin space (GPIO and * pin controller) to accumulate in this range */ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins) { struct gpio_pin_range *pin_range; int ret; pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL); if (!pin_range) { chip_err(chip, "failed to allocate pin ranges\n"); return -ENOMEM; } /* Use local offset as range ID */ pin_range->range.id = gpio_offset; pin_range->range.gc = chip; pin_range->range.name = chip->label; pin_range->range.base = chip->base + gpio_offset; pin_range->range.pin_base = pin_offset; pin_range->range.npins = npins; pin_range->pctldev = pinctrl_find_and_add_gpio_range(pinctl_name, &pin_range->range); if (IS_ERR(pin_range->pctldev)) { ret = PTR_ERR(pin_range->pctldev); chip_err(chip, "could not create pin range\n"); kfree(pin_range); return ret; } chip_dbg(chip, "created GPIO range %d->%d ==> %s PIN %d->%d\n", gpio_offset, gpio_offset + npins - 1, pinctl_name, pin_offset, pin_offset + npins - 1); list_add_tail(&pin_range->node, &chip->pin_ranges); return 0; } EXPORT_SYMBOL_GPL(gpiochip_add_pin_range); /** * gpiochip_remove_pin_ranges() - remove all the GPIO <-> pin mappings * @chip: the chip to remove all the mappings for */ void gpiochip_remove_pin_ranges(struct gpio_chip *chip) { struct gpio_pin_range *pin_range, *tmp; list_for_each_entry_safe(pin_range, tmp, &chip->pin_ranges, node) { list_del(&pin_range->node); pinctrl_remove_gpio_range(pin_range->pctldev, &pin_range->range); kfree(pin_range); } } EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges); #endif /* CONFIG_PINCTRL */ /* These "optional" allocation calls help prevent drivers from stomping * on each other, and help provide better diagnostics in debugfs. * They're called even less than the "set direction" calls. */ static int gpiod_request(struct gpio_desc *desc, const char *label) { struct gpio_chip *chip; int status = -EPROBE_DEFER; unsigned long flags; if (!desc) { pr_warn("%s: invalid GPIO\n", __func__); return -EINVAL; } spin_lock_irqsave(&gpio_lock, flags); chip = desc->chip; if (chip == NULL) goto done; if (!try_module_get(chip->owner)) goto done; /* NOTE: gpio_request() can be called in early boot, * before IRQs are enabled, for non-sleeping (SOC) GPIOs. */ if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { desc_set_label(desc, label ? : "?"); status = 0; } else { status = -EBUSY; module_put(chip->owner); goto done; } if (chip->request) { /* chip->request may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); status = chip->request(chip, gpio_chip_hwgpio(desc)); spin_lock_irqsave(&gpio_lock, flags); if (status < 0) { desc_set_label(desc, NULL); module_put(chip->owner); clear_bit(FLAG_REQUESTED, &desc->flags); goto done; } } if (chip->get_direction) { /* chip->get_direction may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); gpiod_get_direction(desc); spin_lock_irqsave(&gpio_lock, flags); } done: if (status) gpiod_dbg(desc, "%s: status %d\n", __func__, status); spin_unlock_irqrestore(&gpio_lock, flags); return status; } int gpio_request(unsigned gpio, const char *label) { return gpiod_request(gpio_to_desc(gpio), label); } EXPORT_SYMBOL_GPL(gpio_request); static void gpiod_free(struct gpio_desc *desc) { unsigned long flags; struct gpio_chip *chip; might_sleep(); if (!desc) { WARN_ON(extra_checks); return; } gpiod_unexport(desc); spin_lock_irqsave(&gpio_lock, flags); chip = desc->chip; if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) { if (chip->free) { spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(chip->can_sleep); chip->free(chip, gpio_chip_hwgpio(desc)); spin_lock_irqsave(&gpio_lock, flags); } desc_set_label(desc, NULL); module_put(desc->chip->owner); clear_bit(FLAG_ACTIVE_LOW, &desc->flags); clear_bit(FLAG_REQUESTED, &desc->flags); clear_bit(FLAG_OPEN_DRAIN, &desc->flags); clear_bit(FLAG_OPEN_SOURCE, &desc->flags); } else WARN_ON(extra_checks); spin_unlock_irqrestore(&gpio_lock, flags); } void gpio_free(unsigned gpio) { gpiod_free(gpio_to_desc(gpio)); } EXPORT_SYMBOL_GPL(gpio_free); /** * gpio_request_one - request a single GPIO with initial configuration * @gpio: the GPIO number * @flags: GPIO configuration as specified by GPIOF_* * @label: a literal description string of this GPIO */ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) { struct gpio_desc *desc; int err; desc = gpio_to_desc(gpio); err = gpiod_request(desc, label); if (err) return err; if (flags & GPIOF_OPEN_DRAIN) set_bit(FLAG_OPEN_DRAIN, &desc->flags); if (flags & GPIOF_OPEN_SOURCE) set_bit(FLAG_OPEN_SOURCE, &desc->flags); if (flags & GPIOF_DIR_IN) err = gpiod_direction_input(desc); else err = gpiod_direction_output(desc, (flags & GPIOF_INIT_HIGH) ? 1 : 0); if (err) goto free_gpio; if (flags & GPIOF_EXPORT) { err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE); if (err) goto free_gpio; } return 0; free_gpio: gpiod_free(desc); return err; } EXPORT_SYMBOL_GPL(gpio_request_one); /** * gpio_request_array - request multiple GPIOs in a single call * @array: array of the 'struct gpio' * @num: how many GPIOs in the array */ int gpio_request_array(const struct gpio *array, size_t num) { int i, err; for (i = 0; i < num; i++, array++) { err = gpio_request_one(array->gpio, array->flags, array->label); if (err) goto err_free; } return 0; err_free: while (i--) gpio_free((--array)->gpio); return err; } EXPORT_SYMBOL_GPL(gpio_request_array); /** * gpio_free_array - release multiple GPIOs in a single call * @array: array of the 'struct gpio' * @num: how many GPIOs in the array */ void gpio_free_array(const struct gpio *array, size_t num) { while (num--) gpio_free((array++)->gpio); } EXPORT_SYMBOL_GPL(gpio_free_array); /** * gpiochip_is_requested - return string iff signal was requested * @chip: controller managing the signal * @offset: of signal within controller's 0..(ngpio - 1) range * * Returns NULL if the GPIO is not currently requested, else a string. * If debugfs support is enabled, the string returned is the label passed * to gpio_request(); otherwise it is a meaningless constant. * * This function is for use by GPIO controller drivers. The label can * help with diagnostics, and knowing that the signal is used as a GPIO * can help avoid accidentally multiplexing it to another controller. */ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset) { struct gpio_desc *desc; if (!GPIO_OFFSET_VALID(chip, offset)) return NULL; desc = &chip->desc[offset]; if (test_bit(FLAG_REQUESTED, &desc->flags) == 0) return NULL; #ifdef CONFIG_DEBUG_FS return desc->label; #else return "?"; #endif } EXPORT_SYMBOL_GPL(gpiochip_is_requested); /* Drivers MUST set GPIO direction before making get/set calls. In * some cases this is done in early boot, before IRQs are enabled. * * As a rule these aren't called more than once (except for drivers * using the open-drain emulation idiom) so these are natural places * to accumulate extra debugging checks. Note that we can't (yet) * rely on gpio_request() having been called beforehand. */ /** * gpiod_direction_input - set the GPIO direction to input * @desc: GPIO to set to input * * Set the direction of the passed GPIO to input, such as gpiod_get_value() can * be called safely on it. * * Return 0 in case of success, else an error code. */ int gpiod_direction_input(struct gpio_desc *desc) { unsigned long flags; struct gpio_chip *chip; int status = -EINVAL; int offset; if (!desc || !desc->chip) { pr_warn("%s: invalid GPIO\n", __func__); return -EINVAL; } chip = desc->chip; if (!chip->get || !chip->direction_input) { gpiod_warn(desc, "%s: missing get() or direction_input() operations\n", __func__); return -EIO; } spin_lock_irqsave(&gpio_lock, flags); status = gpio_ensure_requested(desc); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(chip->can_sleep); offset = gpio_chip_hwgpio(desc); if (status) { status = chip->request(chip, offset); if (status < 0) { gpiod_dbg(desc, "%s: chip request fail, %d\n", __func__, status); /* and it's not available to anyone else ... * gpio_request() is the fully clean solution. */ goto lose; } } status = chip->direction_input(chip, offset); if (status == 0) clear_bit(FLAG_IS_OUT, &desc->flags); trace_gpio_direction(desc_to_gpio(desc), 1, status); lose: return status; fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) gpiod_dbg(desc, "%s: status %d\n", __func__, status); return status; } EXPORT_SYMBOL_GPL(gpiod_direction_input); /** * gpiod_direction_output - set the GPIO direction to input * @desc: GPIO to set to output * @value: initial output value of the GPIO * * Set the direction of the passed GPIO to output, such as gpiod_set_value() can * be called safely on it. The initial value of the output must be specified. * * Return 0 in case of success, else an error code. */ int gpiod_direction_output(struct gpio_desc *desc, int value) { unsigned long flags; struct gpio_chip *chip; int status = -EINVAL; int offset; if (!desc || !desc->chip) { pr_warn("%s: invalid GPIO\n", __func__); return -EINVAL; } /* GPIOs used for IRQs shall not be set as output */ if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) { gpiod_err(desc, "%s: tried to set a GPIO tied to an IRQ as output\n", __func__); return -EIO; } /* Open drain pin should not be driven to 1 */ if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags)) return gpiod_direction_input(desc); /* Open source pin should not be driven to 0 */ if (!value && test_bit(FLAG_OPEN_SOURCE, &desc->flags)) return gpiod_direction_input(desc); chip = desc->chip; if (!chip->set || !chip->direction_output) { gpiod_warn(desc, "%s: missing set() or direction_output() operations\n", __func__); return -EIO; } spin_lock_irqsave(&gpio_lock, flags); status = gpio_ensure_requested(desc); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(chip->can_sleep); offset = gpio_chip_hwgpio(desc); if (status) { status = chip->request(chip, offset); if (status < 0) { gpiod_dbg(desc, "%s: chip request fail, %d\n", __func__, status); /* and it's not available to anyone else ... * gpio_request() is the fully clean solution. */ goto lose; } } status = chip->direction_output(chip, offset, value); if (status == 0) set_bit(FLAG_IS_OUT, &desc->flags); trace_gpio_value(desc_to_gpio(desc), 0, value); trace_gpio_direction(desc_to_gpio(desc), 0, status); lose: return status; fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) gpiod_dbg(desc, "%s: gpio status %d\n", __func__, status); return status; } EXPORT_SYMBOL_GPL(gpiod_direction_output); /** * gpiod_set_debounce - sets @debounce time for a @gpio * @gpio: the gpio to set debounce time * @debounce: debounce time is microseconds * * returns -ENOTSUPP if the controller does not support setting * debounce. */ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { unsigned long flags; struct gpio_chip *chip; int status = -EINVAL; int offset; if (!desc || !desc->chip) { pr_warn("%s: invalid GPIO\n", __func__); return -EINVAL; } chip = desc->chip; if (!chip->set || !chip->set_debounce) { gpiod_dbg(desc, "%s: missing set() or set_debounce() operations\n", __func__); return -ENOTSUPP; } spin_lock_irqsave(&gpio_lock, flags); status = gpio_ensure_requested(desc); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(chip->can_sleep); offset = gpio_chip_hwgpio(desc); return chip->set_debounce(chip, offset, debounce); fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) gpiod_dbg(desc, "%s: status %d\n", __func__, status); return status; } EXPORT_SYMBOL_GPL(gpiod_set_debounce); /** * gpiod_is_active_low - test whether a GPIO is active-low or not * @desc: the gpio descriptor to test * * Returns 1 if the GPIO is active-low, 0 otherwise. */ int gpiod_is_active_low(const struct gpio_desc *desc) { return test_bit(FLAG_ACTIVE_LOW, &desc->flags); } EXPORT_SYMBOL_GPL(gpiod_is_active_low); /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. * * "Get" operations are often inlinable as reading a pin value register, * and masking the relevant bit in that register. * * When "set" operations are inlinable, they involve writing that mask to * one register to set a low value, or a different register to set it high. * Otherwise locking is needed, so there may be little value to inlining. * *------------------------------------------------------------------------ * * IMPORTANT!!! The hot paths -- get/set value -- assume that callers * have requested the GPIO. That can include implicit requesting by * a direction setting call. Marking a gpio as requested locks its chip * in memory, guaranteeing that these table lookups need no more locking * and that gpiochip_remove() will fail. * * REVISIT when debugging, consider adding some instrumentation to ensure * that the GPIO was actually requested. */ static int _gpiod_get_raw_value(const struct gpio_desc *desc) { struct gpio_chip *chip; int value; int offset; chip = desc->chip; offset = gpio_chip_hwgpio(desc); value = chip->get ? chip->get(chip, offset) : 0; trace_gpio_value(desc_to_gpio(desc), 1, value); return value; } /** * gpiod_get_raw_value() - return a gpio's raw value * @desc: gpio whose value will be returned * * Return the GPIO's raw value, i.e. the value of the physical line disregarding * its ACTIVE_LOW status. * * This function should be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ int gpiod_get_raw_value(const struct gpio_desc *desc) { if (!desc) return 0; /* Should be using gpio_get_value_cansleep() */ WARN_ON(desc->chip->can_sleep); return _gpiod_get_raw_value(desc); } EXPORT_SYMBOL_GPL(gpiod_get_raw_value); /** * gpiod_get_value() - return a gpio's value * @desc: gpio whose value will be returned * * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into * account. * * This function should be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ int gpiod_get_value(const struct gpio_desc *desc) { int value; if (!desc) return 0; /* Should be using gpio_get_value_cansleep() */ WARN_ON(desc->chip->can_sleep); value = _gpiod_get_raw_value(desc); if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; return value; } EXPORT_SYMBOL_GPL(gpiod_get_value); /* * _gpio_set_open_drain_value() - Set the open drain gpio's value. * @desc: gpio descriptor whose state need to be set. * @value: Non-zero for setting it HIGH otherise it will set to LOW. */ static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value) { int err = 0; struct gpio_chip *chip = desc->chip; int offset = gpio_chip_hwgpio(desc); if (value) { err = chip->direction_input(chip, offset); if (!err) clear_bit(FLAG_IS_OUT, &desc->flags); } else { err = chip->direction_output(chip, offset, 0); if (!err) set_bit(FLAG_IS_OUT, &desc->flags); } trace_gpio_direction(desc_to_gpio(desc), value, err); if (err < 0) gpiod_err(desc, "%s: Error in set_value for open drain err %d\n", __func__, err); } /* * _gpio_set_open_source_value() - Set the open source gpio's value. * @desc: gpio descriptor whose state need to be set. * @value: Non-zero for setting it HIGH otherise it will set to LOW. */ static void _gpio_set_open_source_value(struct gpio_desc *desc, int value) { int err = 0; struct gpio_chip *chip = desc->chip; int offset = gpio_chip_hwgpio(desc); if (value) { err = chip->direction_output(chip, offset, 1); if (!err) set_bit(FLAG_IS_OUT, &desc->flags); } else { err = chip->direction_input(chip, offset); if (!err) clear_bit(FLAG_IS_OUT, &desc->flags); } trace_gpio_direction(desc_to_gpio(desc), !value, err); if (err < 0) gpiod_err(desc, "%s: Error in set_value for open source err %d\n", __func__, err); } static void _gpiod_set_raw_value(struct gpio_desc *desc, int value) { struct gpio_chip *chip; chip = desc->chip; trace_gpio_value(desc_to_gpio(desc), 0, value); if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) _gpio_set_open_drain_value(desc, value); else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) _gpio_set_open_source_value(desc, value); else chip->set(chip, gpio_chip_hwgpio(desc), value); } /** * gpiod_set_raw_value() - assign a gpio's raw value * @desc: gpio whose value will be assigned * @value: value to assign * * Set the raw value of the GPIO, i.e. the value of its physical line without * regard for its ACTIVE_LOW status. * * This function should be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ void gpiod_set_raw_value(struct gpio_desc *desc, int value) { if (!desc) return; /* Should be using gpio_set_value_cansleep() */ WARN_ON(desc->chip->can_sleep); _gpiod_set_raw_value(desc, value); } EXPORT_SYMBOL_GPL(gpiod_set_raw_value); /** * gpiod_set_value() - assign a gpio's value * @desc: gpio whose value will be assigned * @value: value to assign * * Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into * account * * This function should be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ void gpiod_set_value(struct gpio_desc *desc, int value) { if (!desc) return; /* Should be using gpio_set_value_cansleep() */ WARN_ON(desc->chip->can_sleep); if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; _gpiod_set_raw_value(desc, value); } EXPORT_SYMBOL_GPL(gpiod_set_value); /** * gpiod_cansleep() - report whether gpio value access may sleep * @desc: gpio to check * */ int gpiod_cansleep(const struct gpio_desc *desc) { if (!desc) return 0; return desc->chip->can_sleep; } EXPORT_SYMBOL_GPL(gpiod_cansleep); /** * gpiod_to_irq() - return the IRQ corresponding to a GPIO * @desc: gpio whose IRQ will be returned (already requested) * * Return the IRQ corresponding to the passed GPIO, or an error code in case of * error. */ int gpiod_to_irq(const struct gpio_desc *desc) { struct gpio_chip *chip; int offset; if (!desc) return -EINVAL; chip = desc->chip; offset = gpio_chip_hwgpio(desc); return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO; } EXPORT_SYMBOL_GPL(gpiod_to_irq); /** * gpiod_lock_as_irq() - lock a GPIO to be used as IRQ * @gpio: the GPIO line to lock as used for IRQ * * This is used directly by GPIO drivers that want to lock down * a certain GPIO line to be used as IRQs, for example in the * .to_irq() callback of their gpio_chip, or in the .irq_enable() * of its irq_chip implementation if the GPIO is known from that * code. */ int gpiod_lock_as_irq(struct gpio_desc *desc) { if (!desc) return -EINVAL; if (test_bit(FLAG_IS_OUT, &desc->flags)) { gpiod_err(desc, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); return -EIO; } set_bit(FLAG_USED_AS_IRQ, &desc->flags); return 0; } EXPORT_SYMBOL_GPL(gpiod_lock_as_irq); int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset) { return gpiod_lock_as_irq(gpiochip_offset_to_desc(chip, offset)); } EXPORT_SYMBOL_GPL(gpio_lock_as_irq); /** * gpiod_unlock_as_irq() - unlock a GPIO used as IRQ * @gpio: the GPIO line to unlock from IRQ usage * * This is used directly by GPIO drivers that want to indicate * that a certain GPIO is no longer used exclusively for IRQ. */ void gpiod_unlock_as_irq(struct gpio_desc *desc) { if (!desc) return; clear_bit(FLAG_USED_AS_IRQ, &desc->flags); } EXPORT_SYMBOL_GPL(gpiod_unlock_as_irq); void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset) { return gpiod_unlock_as_irq(gpiochip_offset_to_desc(chip, offset)); } EXPORT_SYMBOL_GPL(gpio_unlock_as_irq); /** * gpiod_get_raw_value_cansleep() - return a gpio's raw value * @desc: gpio whose value will be returned * * Return the GPIO's raw value, i.e. the value of the physical line disregarding * its ACTIVE_LOW status. * * This function is to be called from contexts that can sleep. */ int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) { might_sleep_if(extra_checks); if (!desc) return 0; return _gpiod_get_raw_value(desc); } EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep); /** * gpiod_get_value_cansleep() - return a gpio's value * @desc: gpio whose value will be returned * * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into * account. * * This function is to be called from contexts that can sleep. */ int gpiod_get_value_cansleep(const struct gpio_desc *desc) { int value; might_sleep_if(extra_checks); if (!desc) return 0; value = _gpiod_get_raw_value(desc); if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; return value; } EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep); /** * gpiod_set_raw_value_cansleep() - assign a gpio's raw value * @desc: gpio whose value will be assigned * @value: value to assign * * Set the raw value of the GPIO, i.e. the value of its physical line without * regard for its ACTIVE_LOW status. * * This function is to be called from contexts that can sleep. */ void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) { might_sleep_if(extra_checks); if (!desc) return; _gpiod_set_raw_value(desc, value); } EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep); /** * gpiod_set_value_cansleep() - assign a gpio's value * @desc: gpio whose value will be assigned * @value: value to assign * * Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into * account * * This function is to be called from contexts that can sleep. */ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) { might_sleep_if(extra_checks); if (!desc) return; if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; _gpiod_set_raw_value(desc, value); } EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep); /** * gpiod_add_lookup_table() - register GPIO device consumers * @table: table of consumers to register */ void gpiod_add_lookup_table(struct gpiod_lookup_table *table) { mutex_lock(&gpio_lookup_lock); list_add_tail(&table->list, &gpio_lookup_list); mutex_unlock(&gpio_lookup_lock); } #ifdef CONFIG_OF static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, unsigned int idx, enum gpio_lookup_flags *flags) { char prop_name[32]; /* 32 is max size of property name */ enum of_gpio_flags of_flags; struct gpio_desc *desc; if (con_id) snprintf(prop_name, 32, "%s-gpios", con_id); else snprintf(prop_name, 32, "gpios"); desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, &of_flags); if (IS_ERR(desc)) return desc; if (of_flags & OF_GPIO_ACTIVE_LOW) *flags |= GPIO_ACTIVE_LOW; return desc; } #else static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, unsigned int idx, enum gpio_lookup_flags *flags) { return ERR_PTR(-ENODEV); } #endif static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, unsigned int idx, enum gpio_lookup_flags *flags) { struct acpi_gpio_info info; struct gpio_desc *desc; desc = acpi_get_gpiod_by_index(dev, idx, &info); if (IS_ERR(desc)) return desc; if (info.gpioint && info.active_low) *flags |= GPIO_ACTIVE_LOW; return desc; } static struct gpiod_lookup_table *gpiod_find_lookup_table(struct device *dev) { const char *dev_id = dev ? dev_name(dev) : NULL; struct gpiod_lookup_table *table; mutex_lock(&gpio_lookup_lock); list_for_each_entry(table, &gpio_lookup_list, list) { if (table->dev_id && dev_id) { /* * Valid strings on both ends, must be identical to have * a match */ if (!strcmp(table->dev_id, dev_id)) goto found; } else { /* * One of the pointers is NULL, so both must be to have * a match */ if (dev_id == table->dev_id) goto found; } } table = NULL; found: mutex_unlock(&gpio_lookup_lock); return table; } static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, unsigned int idx, enum gpio_lookup_flags *flags) { struct gpio_desc *desc = ERR_PTR(-ENOENT); struct gpiod_lookup_table *table; struct gpiod_lookup *p; table = gpiod_find_lookup_table(dev); if (!table) return desc; for (p = &table->table[0]; p->chip_label; p++) { struct gpio_chip *chip; /* idx must always match exactly */ if (p->idx != idx) continue; /* If the lookup entry has a con_id, require exact match */ if (p->con_id && (!con_id || strcmp(p->con_id, con_id))) continue; chip = find_chip_by_name(p->chip_label); if (!chip) { dev_err(dev, "cannot find GPIO chip %s\n", p->chip_label); return ERR_PTR(-ENODEV); } if (chip->ngpio <= p->chip_hwnum) { dev_err(dev, "requested GPIO %d is out of range [0..%d] for chip %s\n", idx, chip->ngpio, chip->label); return ERR_PTR(-EINVAL); } desc = gpiochip_offset_to_desc(chip, p->chip_hwnum); *flags = p->flags; return desc; } return desc; } /** * gpio_get - obtain a GPIO for a given GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * * Return the GPIO descriptor corresponding to the function con_id of device * dev, -ENOENT if no GPIO has been assigned to the requested function, or * another IS_ERR() code if an error occured while trying to acquire the GPIO. */ struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id) { return gpiod_get_index(dev, con_id, 0); } EXPORT_SYMBOL_GPL(gpiod_get); /** * gpiod_get_index - obtain a GPIO from a multi-index GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * * This variant of gpiod_get() allows to access GPIOs other than the first * defined one for functions that define several GPIOs. * * Return a valid GPIO descriptor, -ENOENT if no GPIO has been assigned to the * requested function and/or index, or another IS_ERR() code if an error * occured while trying to acquire the GPIO. */ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx) { struct gpio_desc *desc = NULL; int status; enum gpio_lookup_flags flags = 0; dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id); /* Using device tree? */ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) { dev_dbg(dev, "using device tree for GPIO lookup\n"); desc = of_find_gpio(dev, con_id, idx, &flags); } else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) { dev_dbg(dev, "using ACPI for GPIO lookup\n"); desc = acpi_find_gpio(dev, con_id, idx, &flags); } /* * Either we are not using DT or ACPI, or their lookup did not return * a result. In that case, use platform lookup as a fallback. */ if (!desc || desc == ERR_PTR(-ENOENT)) { dev_dbg(dev, "using lookup tables for GPIO lookup"); desc = gpiod_find(dev, con_id, idx, &flags); } if (IS_ERR(desc)) { dev_dbg(dev, "lookup for GPIO %s failed\n", con_id); return desc; } status = gpiod_request(desc, con_id); if (status < 0) return ERR_PTR(status); if (flags & GPIO_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); if (flags & GPIO_OPEN_DRAIN) set_bit(FLAG_OPEN_DRAIN, &desc->flags); if (flags & GPIO_OPEN_SOURCE) set_bit(FLAG_OPEN_SOURCE, &desc->flags); return desc; } EXPORT_SYMBOL_GPL(gpiod_get_index); /** * gpiod_put - dispose of a GPIO descriptor * @desc: GPIO descriptor to dispose of * * No descriptor can be used after gpiod_put() has been called on it. */ void gpiod_put(struct gpio_desc *desc) { gpiod_free(desc); } EXPORT_SYMBOL_GPL(gpiod_put); #ifdef CONFIG_DEBUG_FS static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip) { unsigned i; unsigned gpio = chip->base; struct gpio_desc *gdesc = &chip->desc[0]; int is_out; int is_irq; for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) { if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) continue; gpiod_get_direction(gdesc); is_out = test_bit(FLAG_IS_OUT, &gdesc->flags); is_irq = test_bit(FLAG_USED_AS_IRQ, &gdesc->flags); seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s", gpio, gdesc->label, is_out ? "out" : "in ", chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? ", is_irq ? "IRQ" : " "); seq_printf(s, "\n"); } } static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos) { unsigned long flags; struct gpio_chip *chip = NULL; loff_t index = *pos; s->private = ""; spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(chip, &gpio_chips, list) if (index-- == 0) { spin_unlock_irqrestore(&gpio_lock, flags); return chip; } spin_unlock_irqrestore(&gpio_lock, flags); return NULL; } static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos) { unsigned long flags; struct gpio_chip *chip = v; void *ret = NULL; spin_lock_irqsave(&gpio_lock, flags); if (list_is_last(&chip->list, &gpio_chips)) ret = NULL; else ret = list_entry(chip->list.next, struct gpio_chip, list); spin_unlock_irqrestore(&gpio_lock, flags); s->private = "\n"; ++*pos; return ret; } static void gpiolib_seq_stop(struct seq_file *s, void *v) { } static int gpiolib_seq_show(struct seq_file *s, void *v) { struct gpio_chip *chip = v; struct device *dev; seq_printf(s, "%sGPIOs %d-%d", (char *)s->private, chip->base, chip->base + chip->ngpio - 1); dev = chip->dev; if (dev) seq_printf(s, ", %s/%s", dev->bus ? dev->bus->name : "no-bus", dev_name(dev)); if (chip->label) seq_printf(s, ", %s", chip->label); if (chip->can_sleep) seq_printf(s, ", can sleep"); seq_printf(s, ":\n"); if (chip->dbg_show) chip->dbg_show(s, chip); else gpiolib_dbg_show(s, chip); return 0; } static const struct seq_operations gpiolib_seq_ops = { .start = gpiolib_seq_start, .next = gpiolib_seq_next, .stop = gpiolib_seq_stop, .show = gpiolib_seq_show, }; static int gpiolib_open(struct inode *inode, struct file *file) { return seq_open(file, &gpiolib_seq_ops); } static const struct file_operations gpiolib_operations = { .owner = THIS_MODULE, .open = gpiolib_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init gpiolib_debugfs_init(void) { /* /sys/kernel/debug/gpio */ (void) debugfs_create_file("gpio", S_IFREG | S_IRUGO, NULL, NULL, &gpiolib_operations); return 0; } subsys_initcall(gpiolib_debugfs_init); #endif /* DEBUG_FS */
gpl-2.0
pseudonymous-foss/clydefs
drivers/net/can/at91_can.c
274
35963
/* * at91_can.c - CAN network driver for AT91 SoC CAN controller * * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de> * * This software may be distributed under the terms of the GNU General * Public License ("GPL") version 2 as distributed in the 'COPYING' * file from the main directory of the linux kernel source. * * * Your platform definition file should specify something like: * * static struct at91_can_data ek_can_data = { * transceiver_switch = sam9263ek_transceiver_switch, * }; * * at91_add_device_can(&ek_can_data); * */ #include <linux/clk.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/platform_data/atmel.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/led.h> #define AT91_MB_MASK(i) ((1 << (i)) - 1) /* Common registers */ enum at91_reg { AT91_MR = 0x000, AT91_IER = 0x004, AT91_IDR = 0x008, AT91_IMR = 0x00C, AT91_SR = 0x010, AT91_BR = 0x014, AT91_TIM = 0x018, AT91_TIMESTP = 0x01C, AT91_ECR = 0x020, AT91_TCR = 0x024, AT91_ACR = 0x028, }; /* Mailbox registers (0 <= i <= 15) */ #define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20)) #define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20)) #define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20)) #define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20)) #define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20)) #define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20)) #define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20)) #define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20)) /* Register bits */ #define AT91_MR_CANEN BIT(0) #define AT91_MR_LPM BIT(1) #define AT91_MR_ABM BIT(2) #define AT91_MR_OVL BIT(3) #define AT91_MR_TEOF BIT(4) #define AT91_MR_TTM BIT(5) #define AT91_MR_TIMFRZ BIT(6) #define AT91_MR_DRPT BIT(7) #define AT91_SR_RBSY BIT(29) #define AT91_MMR_PRIO_SHIFT (16) #define AT91_MID_MIDE BIT(29) #define AT91_MSR_MRTR BIT(20) #define AT91_MSR_MABT BIT(22) #define AT91_MSR_MRDY BIT(23) #define AT91_MSR_MMI BIT(24) #define AT91_MCR_MRTR BIT(20) #define AT91_MCR_MTCR BIT(23) /* Mailbox Modes */ enum at91_mb_mode { AT91_MB_MODE_DISABLED = 0, AT91_MB_MODE_RX = 1, AT91_MB_MODE_RX_OVRWR = 2, AT91_MB_MODE_TX = 3, AT91_MB_MODE_CONSUMER = 4, AT91_MB_MODE_PRODUCER = 5, }; /* Interrupt mask bits */ #define AT91_IRQ_ERRA (1 << 16) #define AT91_IRQ_WARN (1 << 17) #define AT91_IRQ_ERRP (1 << 18) #define AT91_IRQ_BOFF (1 << 19) #define AT91_IRQ_SLEEP (1 << 20) #define AT91_IRQ_WAKEUP (1 << 21) #define AT91_IRQ_TOVF (1 << 22) #define AT91_IRQ_TSTP (1 << 23) #define AT91_IRQ_CERR (1 << 24) #define AT91_IRQ_SERR (1 << 25) #define AT91_IRQ_AERR (1 << 26) #define AT91_IRQ_FERR (1 << 27) #define AT91_IRQ_BERR (1 << 28) #define AT91_IRQ_ERR_ALL (0x1fff0000) #define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) #define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ AT91_IRQ_ERRP | AT91_IRQ_BOFF) #define AT91_IRQ_ALL (0x1fffffff) enum at91_devtype { AT91_DEVTYPE_SAM9263, AT91_DEVTYPE_SAM9X5, }; struct at91_devtype_data { unsigned int rx_first; unsigned int rx_split; unsigned int rx_last; unsigned int tx_shift; enum at91_devtype type; }; struct at91_priv { struct can_priv can; /* must be the first member! */ struct net_device *dev; struct napi_struct napi; void __iomem *reg_base; u32 reg_sr; unsigned int tx_next; unsigned int tx_echo; unsigned int rx_next; struct at91_devtype_data devtype_data; struct clk *clk; struct at91_can_data *pdata; canid_t mb0_id; }; static const struct at91_devtype_data at91_at91sam9263_data = { .rx_first = 1, .rx_split = 8, .rx_last = 11, .tx_shift = 2, .type = AT91_DEVTYPE_SAM9263, }; static const struct at91_devtype_data at91_at91sam9x5_data = { .rx_first = 0, .rx_split = 4, .rx_last = 5, .tx_shift = 1, .type = AT91_DEVTYPE_SAM9X5, }; static const struct can_bittiming_const at91_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 2, .brp_max = 128, .brp_inc = 1, }; #define AT91_IS(_model) \ static inline int at91_is_sam##_model(const struct at91_priv *priv) \ { \ return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \ } AT91_IS(9263); AT91_IS(9X5); static inline unsigned int get_mb_rx_first(const struct at91_priv *priv) { return priv->devtype_data.rx_first; } static inline unsigned int get_mb_rx_last(const struct at91_priv *priv) { return priv->devtype_data.rx_last; } static inline unsigned int get_mb_rx_split(const struct at91_priv *priv) { return priv->devtype_data.rx_split; } static inline unsigned int get_mb_rx_num(const struct at91_priv *priv) { return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1; } static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv) { return get_mb_rx_split(priv) - 1; } static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_rx_split(priv)) & ~AT91_MB_MASK(get_mb_rx_first(priv)); } static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv) { return priv->devtype_data.tx_shift; } static inline unsigned int get_mb_tx_num(const struct at91_priv *priv) { return 1 << get_mb_tx_shift(priv); } static inline unsigned int get_mb_tx_first(const struct at91_priv *priv) { return get_mb_rx_last(priv) + 1; } static inline unsigned int get_mb_tx_last(const struct at91_priv *priv) { return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1; } static inline unsigned int get_next_prio_shift(const struct at91_priv *priv) { return get_mb_tx_shift(priv); } static inline unsigned int get_next_prio_mask(const struct at91_priv *priv) { return 0xf << get_mb_tx_shift(priv); } static inline unsigned int get_next_mb_mask(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_tx_shift(priv)); } static inline unsigned int get_next_mask(const struct at91_priv *priv) { return get_next_mb_mask(priv) | get_next_prio_mask(priv); } static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_rx_last(priv) + 1) & ~AT91_MB_MASK(get_mb_rx_first(priv)); } static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_tx_last(priv) + 1) & ~AT91_MB_MASK(get_mb_tx_first(priv)); } static inline unsigned int get_tx_next_mb(const struct at91_priv *priv) { return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv); } static inline unsigned int get_tx_next_prio(const struct at91_priv *priv) { return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf; } static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv) { return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv); } static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) { return __raw_readl(priv->reg_base + reg); } static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, u32 value) { __raw_writel(value, priv->reg_base + reg); } static inline void set_mb_mode_prio(const struct at91_priv *priv, unsigned int mb, enum at91_mb_mode mode, int prio) { at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16)); } static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, enum at91_mb_mode mode) { set_mb_mode_prio(priv, mb, mode, 0); } static inline u32 at91_can_id_to_reg_mid(canid_t can_id) { u32 reg_mid; if (can_id & CAN_EFF_FLAG) reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE; else reg_mid = (can_id & CAN_SFF_MASK) << 18; return reg_mid; } /* * Swtich transceiver on or off */ static void at91_transceiver_switch(const struct at91_priv *priv, int on) { if (priv->pdata && priv->pdata->transceiver_switch) priv->pdata->transceiver_switch(on); } static void at91_setup_mailboxes(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); unsigned int i; u32 reg_mid; /* * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first * mailbox is disabled. The next 11 mailboxes are used as a * reception FIFO. The last mailbox is configured with * overwrite option. The overwrite flag indicates a FIFO * overflow. */ reg_mid = at91_can_id_to_reg_mid(priv->mb0_id); for (i = 0; i < get_mb_rx_first(priv); i++) { set_mb_mode(priv, i, AT91_MB_MODE_DISABLED); at91_write(priv, AT91_MID(i), reg_mid); at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */ } for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++) set_mb_mode(priv, i, AT91_MB_MODE_RX); set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR); /* reset acceptance mask and id register */ for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) { at91_write(priv, AT91_MAM(i), 0x0); at91_write(priv, AT91_MID(i), AT91_MID_MIDE); } /* The last 4 mailboxes are used for transmitting. */ for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++) set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); /* Reset tx and rx helper pointers */ priv->tx_next = priv->tx_echo = 0; priv->rx_next = get_mb_rx_first(priv); } static int at91_set_bittiming(struct net_device *dev) { const struct at91_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; u32 reg_br; reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) | ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) | ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) | ((bt->phase_seg2 - 1) << 0); netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br); at91_write(priv, AT91_BR, reg_br); return 0; } static int at91_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { const struct at91_priv *priv = netdev_priv(dev); u32 reg_ecr = at91_read(priv, AT91_ECR); bec->rxerr = reg_ecr & 0xff; bec->txerr = reg_ecr >> 16; return 0; } static void at91_chip_start(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); u32 reg_mr, reg_ier; /* disable interrupts */ at91_write(priv, AT91_IDR, AT91_IRQ_ALL); /* disable chip */ reg_mr = at91_read(priv, AT91_MR); at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); at91_set_bittiming(dev); at91_setup_mailboxes(dev); at91_transceiver_switch(priv, 1); /* enable chip */ at91_write(priv, AT91_MR, AT91_MR_CANEN); priv->can.state = CAN_STATE_ERROR_ACTIVE; /* Enable interrupts */ reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME; at91_write(priv, AT91_IDR, AT91_IRQ_ALL); at91_write(priv, AT91_IER, reg_ier); } static void at91_chip_stop(struct net_device *dev, enum can_state state) { struct at91_priv *priv = netdev_priv(dev); u32 reg_mr; /* disable interrupts */ at91_write(priv, AT91_IDR, AT91_IRQ_ALL); reg_mr = at91_read(priv, AT91_MR); at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); at91_transceiver_switch(priv, 0); priv->can.state = state; } /* * theory of operation: * * According to the datasheet priority 0 is the highest priority, 15 * is the lowest. If two mailboxes have the same priority level the * message of the mailbox with the lowest number is sent first. * * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then * the next mailbox with prio 0, and so on, until all mailboxes are * used. Then we start from the beginning with mailbox * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1 * prio 1. When we reach the last mailbox with prio 15, we have to * stop sending, waiting for all messages to be delivered, then start * again with mailbox AT91_MB_TX_FIRST prio 0. * * We use the priv->tx_next as counter for the next transmission * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits * encode the mailbox number, the upper 4 bits the mailbox priority: * * priv->tx_next = (prio << get_next_prio_shift(priv)) | * (mb - get_mb_tx_first(priv)); * */ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf = (struct can_frame *)skb->data; unsigned int mb, prio; u32 reg_mid, reg_mcr; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; mb = get_tx_next_mb(priv); prio = get_tx_next_prio(priv); if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) { netif_stop_queue(dev); netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); return NETDEV_TX_BUSY; } reg_mid = at91_can_id_to_reg_mid(cf->can_id); reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | (cf->can_dlc << 16) | AT91_MCR_MTCR; /* disable MB while writing ID (see datasheet) */ set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED); at91_write(priv, AT91_MID(mb), reg_mid); set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio); at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0)); at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4)); /* This triggers transmission */ at91_write(priv, AT91_MCR(mb), reg_mcr); stats->tx_bytes += cf->can_dlc; /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv)); /* * we have to stop the queue and deliver all messages in case * of a prio+mb counter wrap around. This is the case if * tx_next buffer prio and mailbox equals 0. * * also stop the queue if next buffer is still in use * (== not ready) */ priv->tx_next++; if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) & AT91_MSR_MRDY) || (priv->tx_next & get_next_mask(priv)) == 0) netif_stop_queue(dev); /* Enable interrupt for this mailbox */ at91_write(priv, AT91_IER, 1 << mb); return NETDEV_TX_OK; } /** * at91_activate_rx_low - activate lower rx mailboxes * @priv: a91 context * * Reenables the lower mailboxes for reception of new CAN messages */ static inline void at91_activate_rx_low(const struct at91_priv *priv) { u32 mask = get_mb_rx_low_mask(priv); at91_write(priv, AT91_TCR, mask); } /** * at91_activate_rx_mb - reactive single rx mailbox * @priv: a91 context * @mb: mailbox to reactivate * * Reenables given mailbox for reception of new CAN messages */ static inline void at91_activate_rx_mb(const struct at91_priv *priv, unsigned int mb) { u32 mask = 1 << mb; at91_write(priv, AT91_TCR, mask); } /** * at91_rx_overflow_err - send error frame due to rx overflow * @dev: net device */ static void at91_rx_overflow_err(struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; struct can_frame *cf; netdev_dbg(dev, "RX buffer overflow\n"); stats->rx_over_errors++; stats->rx_errors++; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; } /** * at91_read_mb - read CAN msg from mailbox (lowlevel impl) * @dev: net device * @mb: mailbox number to read from * @cf: can frame where to store message * * Reads a CAN message from the given mailbox and stores data into * given can frame. "mb" and "cf" must be valid. */ static void at91_read_mb(struct net_device *dev, unsigned int mb, struct can_frame *cf) { const struct at91_priv *priv = netdev_priv(dev); u32 reg_msr, reg_mid; reg_mid = at91_read(priv, AT91_MID(mb)); if (reg_mid & AT91_MID_MIDE) cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK; reg_msr = at91_read(priv, AT91_MSR(mb)); cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf); if (reg_msr & AT91_MSR_MRTR) cf->can_id |= CAN_RTR_FLAG; else { *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); } /* allow RX of extended frames */ at91_write(priv, AT91_MID(mb), AT91_MID_MIDE); if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI)) at91_rx_overflow_err(dev); } /** * at91_read_msg - read CAN message from mailbox * @dev: net device * @mb: mail box to read from * * Reads a CAN message from given mailbox, and put into linux network * RX queue, does all housekeeping chores (stats, ...) */ static void at91_read_msg(struct net_device *dev, unsigned int mb) { struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; skb = alloc_can_skb(dev, &cf); if (unlikely(!skb)) { stats->rx_dropped++; return; } at91_read_mb(dev, mb, cf); netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; can_led_event(dev, CAN_LED_EVENT_RX); } /** * at91_poll_rx - read multiple CAN messages from mailboxes * @dev: net device * @quota: max number of pkgs we're allowed to receive * * Theory of Operation: * * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last()) * on the chip are reserved for RX. We split them into 2 groups. The * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last(). * * Like it or not, but the chip always saves a received CAN message * into the first free mailbox it finds (starting with the * lowest). This makes it very difficult to read the messages in the * right order from the chip. This is how we work around that problem: * * The first message goes into mb nr. 1 and issues an interrupt. All * rx ints are disabled in the interrupt handler and a napi poll is * scheduled. We read the mailbox, but do _not_ reenable the mb (to * receive another message). * * lower mbxs upper * ____^______ __^__ * / \ / \ * +-+-+-+-+-+-+-+-++-+-+-+-+ * | |x|x|x|x|x|x|x|| | | | | * +-+-+-+-+-+-+-+-++-+-+-+-+ * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail * 0 1 2 3 4 5 6 7 8 9 0 1 / box * ^ * | * \ * unused, due to chip bug * * The variable priv->rx_next points to the next mailbox to read a * message from. As long we're in the lower mailboxes we just read the * mailbox but not reenable it. * * With completion of the last of the lower mailboxes, we reenable the * whole first group, but continue to look for filled mailboxes in the * upper mailboxes. Imagine the second group like overflow mailboxes, * which takes CAN messages if the lower goup is full. While in the * upper group we reenable the mailbox right after reading it. Giving * the chip more room to store messages. * * After finishing we look again in the lower group if we've still * quota. * */ static int at91_poll_rx(struct net_device *dev, int quota) { struct at91_priv *priv = netdev_priv(dev); u32 reg_sr = at91_read(priv, AT91_SR); const unsigned long *addr = (unsigned long *)&reg_sr; unsigned int mb; int received = 0; if (priv->rx_next > get_mb_rx_low_last(priv) && reg_sr & get_mb_rx_low_mask(priv)) netdev_info(dev, "order of incoming frames cannot be guaranteed\n"); again: for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next); mb < get_mb_tx_first(priv) && quota > 0; reg_sr = at91_read(priv, AT91_SR), mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) { at91_read_msg(dev, mb); /* reactivate mailboxes */ if (mb == get_mb_rx_low_last(priv)) /* all lower mailboxed, if just finished it */ at91_activate_rx_low(priv); else if (mb > get_mb_rx_low_last(priv)) /* only the mailbox we read */ at91_activate_rx_mb(priv, mb); received++; quota--; } /* upper group completed, look again in lower */ if (priv->rx_next > get_mb_rx_low_last(priv) && quota > 0 && mb > get_mb_rx_last(priv)) { priv->rx_next = get_mb_rx_first(priv); goto again; } return received; } static void at91_poll_err_frame(struct net_device *dev, struct can_frame *cf, u32 reg_sr) { struct at91_priv *priv = netdev_priv(dev); /* CRC error */ if (reg_sr & AT91_IRQ_CERR) { netdev_dbg(dev, "CERR irq\n"); dev->stats.rx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; } /* Stuffing Error */ if (reg_sr & AT91_IRQ_SERR) { netdev_dbg(dev, "SERR irq\n"); dev->stats.rx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_STUFF; } /* Acknowledgement Error */ if (reg_sr & AT91_IRQ_AERR) { netdev_dbg(dev, "AERR irq\n"); dev->stats.tx_errors++; cf->can_id |= CAN_ERR_ACK; } /* Form error */ if (reg_sr & AT91_IRQ_FERR) { netdev_dbg(dev, "FERR irq\n"); dev->stats.rx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_FORM; } /* Bit Error */ if (reg_sr & AT91_IRQ_BERR) { netdev_dbg(dev, "BERR irq\n"); dev->stats.tx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_BIT; } } static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) { struct sk_buff *skb; struct can_frame *cf; if (quota == 0) return 0; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return 0; at91_poll_err_frame(dev, cf, reg_sr); netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; return 1; } static int at91_poll(struct napi_struct *napi, int quota) { struct net_device *dev = napi->dev; const struct at91_priv *priv = netdev_priv(dev); u32 reg_sr = at91_read(priv, AT91_SR); int work_done = 0; if (reg_sr & get_irq_mb_rx(priv)) work_done += at91_poll_rx(dev, quota - work_done); /* * The error bits are clear on read, * so use saved value from irq handler. */ reg_sr |= priv->reg_sr; if (reg_sr & AT91_IRQ_ERR_FRAME) work_done += at91_poll_err(dev, quota - work_done, reg_sr); if (work_done < quota) { /* enable IRQs for frame errors and all mailboxes >= rx_next */ u32 reg_ier = AT91_IRQ_ERR_FRAME; reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next); napi_complete(napi); at91_write(priv, AT91_IER, reg_ier); } return work_done; } /* * theory of operation: * * priv->tx_echo holds the number of the oldest can_frame put for * transmission into the hardware, but not yet ACKed by the CAN tx * complete IRQ. * * We iterate from priv->tx_echo to priv->tx_next and check if the * packet has been transmitted, echo it back to the CAN framework. If * we discover a not yet transmitted package, stop looking for more. * */ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) { struct at91_priv *priv = netdev_priv(dev); u32 reg_msr; unsigned int mb; /* masking of reg_sr not needed, already done by at91_irq */ for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { mb = get_tx_echo_mb(priv); /* no event in mailbox? */ if (!(reg_sr & (1 << mb))) break; /* Disable irq for this TX mailbox */ at91_write(priv, AT91_IDR, 1 << mb); /* * only echo if mailbox signals us a transfer * complete (MSR_MRDY). Otherwise it's a tansfer * abort. "can_bus_off()" takes care about the skbs * parked in the echo queue. */ reg_msr = at91_read(priv, AT91_MSR(mb)); if (likely(reg_msr & AT91_MSR_MRDY && ~reg_msr & AT91_MSR_MABT)) { /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ can_get_echo_skb(dev, mb - get_mb_tx_first(priv)); dev->stats.tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); } } /* * restart queue if we don't have a wrap around but restart if * we get a TX int for the last can frame directly before a * wrap around. */ if ((priv->tx_next & get_next_mask(priv)) != 0 || (priv->tx_echo & get_next_mask(priv)) == 0) netif_wake_queue(dev); } static void at91_irq_err_state(struct net_device *dev, struct can_frame *cf, enum can_state new_state) { struct at91_priv *priv = netdev_priv(dev); u32 reg_idr = 0, reg_ier = 0; struct can_berr_counter bec; at91_get_berr_counter(dev, &bec); switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: /* * from: ERROR_ACTIVE * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF * => : there was a warning int */ if (new_state >= CAN_STATE_ERROR_WARNING && new_state <= CAN_STATE_BUS_OFF) { netdev_dbg(dev, "Error Warning IRQ\n"); priv->can.can_stats.error_warning++; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } case CAN_STATE_ERROR_WARNING: /* fallthrough */ /* * from: ERROR_ACTIVE, ERROR_WARNING * to : ERROR_PASSIVE, BUS_OFF * => : error passive int */ if (new_state >= CAN_STATE_ERROR_PASSIVE && new_state <= CAN_STATE_BUS_OFF) { netdev_dbg(dev, "Error Passive IRQ\n"); priv->can.can_stats.error_passive++; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } break; case CAN_STATE_BUS_OFF: /* * from: BUS_OFF * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE */ if (new_state <= CAN_STATE_ERROR_PASSIVE) { cf->can_id |= CAN_ERR_RESTARTED; netdev_dbg(dev, "restarted\n"); priv->can.can_stats.restarts++; netif_carrier_on(dev); netif_wake_queue(dev); } break; default: break; } /* process state changes depending on the new state */ switch (new_state) { case CAN_STATE_ERROR_ACTIVE: /* * actually we want to enable AT91_IRQ_WARN here, but * it screws up the system under certain * circumstances. so just enable AT91_IRQ_ERRP, thus * the "fallthrough" */ netdev_dbg(dev, "Error Active\n"); cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; case CAN_STATE_ERROR_WARNING: /* fallthrough */ reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; reg_ier = AT91_IRQ_ERRP; break; case CAN_STATE_ERROR_PASSIVE: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP; reg_ier = AT91_IRQ_BOFF; break; case CAN_STATE_BUS_OFF: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP | AT91_IRQ_WARN | AT91_IRQ_BOFF; reg_ier = 0; cf->can_id |= CAN_ERR_BUSOFF; netdev_dbg(dev, "bus-off\n"); netif_carrier_off(dev); priv->can.can_stats.bus_off++; /* turn off chip, if restart is disabled */ if (!priv->can.restart_ms) { at91_chip_stop(dev, CAN_STATE_BUS_OFF); return; } break; default: break; } at91_write(priv, AT91_IDR, reg_idr); at91_write(priv, AT91_IER, reg_ier); } static int at91_get_state_by_bec(const struct net_device *dev, enum can_state *state) { struct can_berr_counter bec; int err; err = at91_get_berr_counter(dev, &bec); if (err) return err; if (bec.txerr < 96 && bec.rxerr < 96) *state = CAN_STATE_ERROR_ACTIVE; else if (bec.txerr < 128 && bec.rxerr < 128) *state = CAN_STATE_ERROR_WARNING; else if (bec.txerr < 256 && bec.rxerr < 256) *state = CAN_STATE_ERROR_PASSIVE; else *state = CAN_STATE_BUS_OFF; return 0; } static void at91_irq_err(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; enum can_state new_state; u32 reg_sr; int err; if (at91_is_sam9263(priv)) { reg_sr = at91_read(priv, AT91_SR); /* we need to look at the unmasked reg_sr */ if (unlikely(reg_sr & AT91_IRQ_BOFF)) new_state = CAN_STATE_BUS_OFF; else if (unlikely(reg_sr & AT91_IRQ_ERRP)) new_state = CAN_STATE_ERROR_PASSIVE; else if (unlikely(reg_sr & AT91_IRQ_WARN)) new_state = CAN_STATE_ERROR_WARNING; else if (likely(reg_sr & AT91_IRQ_ERRA)) new_state = CAN_STATE_ERROR_ACTIVE; else { netdev_err(dev, "BUG! hardware in undefined state\n"); return; } } else { err = at91_get_state_by_bec(dev, &new_state); if (err) return; } /* state hasn't changed */ if (likely(new_state == priv->can.state)) return; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return; at91_irq_err_state(dev, cf, new_state); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; priv->can.state = new_state; } /* * interrupt handler */ static irqreturn_t at91_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct at91_priv *priv = netdev_priv(dev); irqreturn_t handled = IRQ_NONE; u32 reg_sr, reg_imr; reg_sr = at91_read(priv, AT91_SR); reg_imr = at91_read(priv, AT91_IMR); /* Ignore masked interrupts */ reg_sr &= reg_imr; if (!reg_sr) goto exit; handled = IRQ_HANDLED; /* Receive or error interrupt? -> napi */ if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) { /* * The error bits are clear on read, * save for later use. */ priv->reg_sr = reg_sr; at91_write(priv, AT91_IDR, get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME); napi_schedule(&priv->napi); } /* Transmission complete interrupt */ if (reg_sr & get_irq_mb_tx(priv)) at91_irq_tx(dev, reg_sr); at91_irq_err(dev); exit: return handled; } static int at91_open(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); int err; clk_enable(priv->clk); /* check or determine and set bittime */ err = open_candev(dev); if (err) goto out; /* register interrupt handler */ if (request_irq(dev->irq, at91_irq, IRQF_SHARED, dev->name, dev)) { err = -EAGAIN; goto out_close; } can_led_event(dev, CAN_LED_EVENT_OPEN); /* start chip and queuing */ at91_chip_start(dev); napi_enable(&priv->napi); netif_start_queue(dev); return 0; out_close: close_candev(dev); out: clk_disable(priv->clk); return err; } /* * stop CAN bus activity */ static int at91_close(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); netif_stop_queue(dev); napi_disable(&priv->napi); at91_chip_stop(dev, CAN_STATE_STOPPED); free_irq(dev->irq, dev); clk_disable(priv->clk); close_candev(dev); can_led_event(dev, CAN_LED_EVENT_STOP); return 0; } static int at91_set_mode(struct net_device *dev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: at91_chip_start(dev); netif_wake_queue(dev); break; default: return -EOPNOTSUPP; } return 0; } static const struct net_device_ops at91_netdev_ops = { .ndo_open = at91_open, .ndo_stop = at91_close, .ndo_start_xmit = at91_start_xmit, }; static ssize_t at91_sysfs_show_mb0_id(struct device *dev, struct device_attribute *attr, char *buf) { struct at91_priv *priv = netdev_priv(to_net_dev(dev)); if (priv->mb0_id & CAN_EFF_FLAG) return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id); else return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id); } static ssize_t at91_sysfs_set_mb0_id(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct net_device *ndev = to_net_dev(dev); struct at91_priv *priv = netdev_priv(ndev); unsigned long can_id; ssize_t ret; int err; rtnl_lock(); if (ndev->flags & IFF_UP) { ret = -EBUSY; goto out; } err = strict_strtoul(buf, 0, &can_id); if (err) { ret = err; goto out; } if (can_id & CAN_EFF_FLAG) can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; else can_id &= CAN_SFF_MASK; priv->mb0_id = can_id; ret = count; out: rtnl_unlock(); return ret; } static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); static struct attribute *at91_sysfs_attrs[] = { &dev_attr_mb0_id.attr, NULL, }; static struct attribute_group at91_sysfs_attr_group = { .attrs = at91_sysfs_attrs, }; #if defined(CONFIG_OF) static const struct of_device_id at91_can_dt_ids[] = { { .compatible = "atmel,at91sam9x5-can", .data = &at91_at91sam9x5_data, }, { .compatible = "atmel,at91sam9263-can", .data = &at91_at91sam9263_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, at91_can_dt_ids); #else #define at91_can_dt_ids NULL #endif static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev) { if (pdev->dev.of_node) { const struct of_device_id *match; match = of_match_node(at91_can_dt_ids, pdev->dev.of_node); if (!match) { dev_err(&pdev->dev, "no matching node found in dtb\n"); return NULL; } return (const struct at91_devtype_data *)match->data; } return (const struct at91_devtype_data *) platform_get_device_id(pdev)->driver_data; } static int at91_can_probe(struct platform_device *pdev) { const struct at91_devtype_data *devtype_data; struct net_device *dev; struct at91_priv *priv; struct resource *res; struct clk *clk; void __iomem *addr; int err, irq; devtype_data = at91_can_get_driver_data(pdev); if (!devtype_data) { dev_err(&pdev->dev, "no driver data\n"); err = -ENODEV; goto exit; } clk = clk_get(&pdev->dev, "can_clk"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "no clock defined\n"); err = -ENODEV; goto exit; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || irq <= 0) { err = -ENODEV; goto exit_put; } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { err = -EBUSY; goto exit_put; } addr = ioremap_nocache(res->start, resource_size(res)); if (!addr) { err = -ENOMEM; goto exit_release; } dev = alloc_candev(sizeof(struct at91_priv), 1 << devtype_data->tx_shift); if (!dev) { err = -ENOMEM; goto exit_iounmap; } dev->netdev_ops = &at91_netdev_ops; dev->irq = irq; dev->flags |= IFF_ECHO; priv = netdev_priv(dev); priv->can.clock.freq = clk_get_rate(clk); priv->can.bittiming_const = &at91_bittiming_const; priv->can.do_set_mode = at91_set_mode; priv->can.do_get_berr_counter = at91_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; priv->dev = dev; priv->reg_base = addr; priv->devtype_data = *devtype_data; priv->clk = clk; priv->pdata = pdev->dev.platform_data; priv->mb0_id = 0x7ff; netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv)); if (at91_is_sam9263(priv)) dev->sysfs_groups[0] = &at91_sysfs_attr_group; dev_set_drvdata(&pdev->dev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_candev(dev); if (err) { dev_err(&pdev->dev, "registering netdev failed\n"); goto exit_free; } devm_can_led_init(dev); dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", priv->reg_base, dev->irq); return 0; exit_free: free_candev(dev); exit_iounmap: iounmap(addr); exit_release: release_mem_region(res->start, resource_size(res)); exit_put: clk_put(clk); exit: return err; } static int at91_can_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct at91_priv *priv = netdev_priv(dev); struct resource *res; unregister_netdev(dev); platform_set_drvdata(pdev, NULL); iounmap(priv->reg_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); clk_put(priv->clk); free_candev(dev); return 0; } static const struct platform_device_id at91_can_id_table[] = { { .name = "at91_can", .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data, }, { .name = "at91sam9x5_can", .driver_data = (kernel_ulong_t)&at91_at91sam9263_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, at91_can_id_table); static struct platform_driver at91_can_driver = { .probe = at91_can_probe, .remove = at91_can_remove, .driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, .of_match_table = at91_can_dt_ids, }, .id_table = at91_can_id_table, }; module_platform_driver(at91_can_driver); MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
gpl-2.0
theophile/amazon_fire_kffowi_bonus_modules
arch/x86/kvm/svm.c
786
113291
/* * Kernel-based Virtual Machine driver for Linux * * AMD SVM support * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Yaniv Kamay <yaniv@qumranet.com> * Avi Kivity <avi@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/kernel.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/ftrace_event.h> #include <linux/slab.h> #include <asm/perf_event.h> #include <asm/tlbflush.h> #include <asm/desc.h> #include <asm/kvm_para.h> #include <asm/virtext.h> #include "trace.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); static const struct x86_cpu_id svm_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_SVM), {} }; MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); #define IOPM_ALLOC_ORDER 2 #define MSRPM_ALLOC_ORDER 1 #define SEG_TYPE_LDT 2 #define SEG_TYPE_BUSY_TSS16 3 #define SVM_FEATURE_NPT (1 << 0) #define SVM_FEATURE_LBRV (1 << 1) #define SVM_FEATURE_SVML (1 << 2) #define SVM_FEATURE_NRIP (1 << 3) #define SVM_FEATURE_TSC_RATE (1 << 4) #define SVM_FEATURE_VMCB_CLEAN (1 << 5) #define SVM_FEATURE_FLUSH_ASID (1 << 6) #define SVM_FEATURE_DECODE_ASSIST (1 << 7) #define SVM_FEATURE_PAUSE_FILTER (1 << 10) #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) #define TSC_RATIO_RSVD 0xffffff0000000000ULL #define TSC_RATIO_MIN 0x0000000000000001ULL #define TSC_RATIO_MAX 0x000000ffffffffffULL static bool erratum_383_found __read_mostly; static const u32 host_save_user_msrs[] = { #ifdef CONFIG_X86_64 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, MSR_FS_BASE, #endif MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, }; #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) struct kvm_vcpu; struct nested_state { struct vmcb *hsave; u64 hsave_msr; u64 vm_cr_msr; u64 vmcb; /* These are the merged vectors */ u32 *msrpm; /* gpa pointers to the real vectors */ u64 vmcb_msrpm; u64 vmcb_iopm; /* A VMEXIT is required but not yet emulated */ bool exit_required; /* cache for intercepts of the guest */ u32 intercept_cr; u32 intercept_dr; u32 intercept_exceptions; u64 intercept; /* Nested Paging related state */ u64 nested_cr3; }; #define MSRPM_OFFSETS 16 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; /* * Set osvw_len to higher value when updated Revision Guides * are published and we know what the new status bits are */ static uint64_t osvw_len = 4, osvw_status; struct vcpu_svm { struct kvm_vcpu vcpu; struct vmcb *vmcb; unsigned long vmcb_pa; struct svm_cpu_data *svm_data; uint64_t asid_generation; uint64_t sysenter_esp; uint64_t sysenter_eip; u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; struct { u16 fs; u16 gs; u16 ldt; u64 gs_base; } host; u32 *msrpm; ulong nmi_iret_rip; struct nested_state nested; bool nmi_singlestep; unsigned int3_injected; unsigned long int3_rip; u32 apf_reason; u64 tsc_ratio; }; static DEFINE_PER_CPU(u64, current_tsc_ratio); #define TSC_RATIO_DEFAULT 0x0100000000ULL #define MSR_INVALID 0xffffffffU static const struct svm_direct_access_msrs { u32 index; /* Index of the MSR */ bool always; /* True if intercept is always on */ } direct_access_msrs[] = { { .index = MSR_STAR, .always = true }, { .index = MSR_IA32_SYSENTER_CS, .always = true }, #ifdef CONFIG_X86_64 { .index = MSR_GS_BASE, .always = true }, { .index = MSR_FS_BASE, .always = true }, { .index = MSR_KERNEL_GS_BASE, .always = true }, { .index = MSR_LSTAR, .always = true }, { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK, .always = true }, #endif { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, { .index = MSR_IA32_LASTINTTOIP, .always = false }, { .index = MSR_INVALID, .always = false }, }; /* enable NPT for AMD64 and X86 with PAE */ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) static bool npt_enabled = true; #else static bool npt_enabled; #endif /* allow nested paging (virtualized MMU) for all guests */ static int npt = true; module_param(npt, int, S_IRUGO); /* allow nested virtualization in KVM/SVM */ static int nested = true; module_param(nested, int, S_IRUGO); static void svm_flush_tlb(struct kvm_vcpu *vcpu); static void svm_complete_interrupts(struct vcpu_svm *svm); static int nested_svm_exit_handled(struct vcpu_svm *svm); static int nested_svm_intercept(struct vcpu_svm *svm); static int nested_svm_vmexit(struct vcpu_svm *svm); static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); static u64 __scale_tsc(u64 ratio, u64 tsc); enum { VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, pause filter count */ VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ VMCB_ASID, /* ASID */ VMCB_INTR, /* int_ctl, int_vector */ VMCB_NPT, /* npt_en, nCR3, gPAT */ VMCB_CR, /* CR0, CR3, CR4, EFER */ VMCB_DR, /* DR6, DR7 */ VMCB_DT, /* GDT, IDT */ VMCB_SEG, /* CS, DS, SS, ES, CPL */ VMCB_CR2, /* CR2 only */ VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ VMCB_DIRTY_MAX, }; /* TPR and CR2 are always written before VMRUN */ #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) static inline void mark_all_dirty(struct vmcb *vmcb) { vmcb->control.clean = 0; } static inline void mark_all_clean(struct vmcb *vmcb) { vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1) & ~VMCB_ALWAYS_DIRTY_MASK; } static inline void mark_dirty(struct vmcb *vmcb, int bit) { vmcb->control.clean &= ~(1 << bit); } static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_svm, vcpu); } static void recalc_intercepts(struct vcpu_svm *svm) { struct vmcb_control_area *c, *h; struct nested_state *g; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); if (!is_guest_mode(&svm->vcpu)) return; c = &svm->vmcb->control; h = &svm->nested.hsave->control; g = &svm->nested; c->intercept_cr = h->intercept_cr | g->intercept_cr; c->intercept_dr = h->intercept_dr | g->intercept_dr; c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; c->intercept = h->intercept | g->intercept; } static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) { if (is_guest_mode(&svm->vcpu)) return svm->nested.hsave; else return svm->vmcb; } static inline void set_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_cr |= (1U << bit); recalc_intercepts(svm); } static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_cr &= ~(1U << bit); recalc_intercepts(svm); } static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); return vmcb->control.intercept_cr & (1U << bit); } static inline void set_dr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_dr |= (1U << bit); recalc_intercepts(svm); } static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_dr &= ~(1U << bit); recalc_intercepts(svm); } static inline void set_exception_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_exceptions |= (1U << bit); recalc_intercepts(svm); } static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_exceptions &= ~(1U << bit); recalc_intercepts(svm); } static inline void set_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept |= (1ULL << bit); recalc_intercepts(svm); } static inline void clr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept &= ~(1ULL << bit); recalc_intercepts(svm); } static inline void enable_gif(struct vcpu_svm *svm) { svm->vcpu.arch.hflags |= HF_GIF_MASK; } static inline void disable_gif(struct vcpu_svm *svm) { svm->vcpu.arch.hflags &= ~HF_GIF_MASK; } static inline bool gif_set(struct vcpu_svm *svm) { return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); } static unsigned long iopm_base; struct kvm_ldttss_desc { u16 limit0; u16 base0; unsigned base1:8, type:5, dpl:2, p:1; unsigned limit1:4, zero0:3, g:1, base2:8; u32 base3; u32 zero1; } __attribute__((packed)); struct svm_cpu_data { int cpu; u64 asid_generation; u32 max_asid; u32 next_asid; struct kvm_ldttss_desc *tss_desc; struct page *save_area; }; static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); struct svm_init_data { int cpu; int r; }; static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) #define MSRS_RANGE_SIZE 2048 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) static u32 svm_msrpm_offset(u32 msr) { u32 offset; int i; for (i = 0; i < NUM_MSR_MAPS; i++) { if (msr < msrpm_ranges[i] || msr >= msrpm_ranges[i] + MSRS_IN_RANGE) continue; offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ offset += (i * MSRS_RANGE_SIZE); /* add range offset */ /* Now we have the u8 offset - but need the u32 offset */ return offset / 4; } /* MSR not in any range */ return MSR_INVALID; } #define MAX_INST_SIZE 15 static inline void clgi(void) { asm volatile (__ex(SVM_CLGI)); } static inline void stgi(void) { asm volatile (__ex(SVM_STGI)); } static inline void invlpga(unsigned long addr, u32 asid) { asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid)); } static int get_npt_level(void) { #ifdef CONFIG_X86_64 return PT64_ROOT_LEVEL; #else return PT32E_ROOT_LEVEL; #endif } static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) { vcpu->arch.efer = efer; if (!npt_enabled && !(efer & EFER_LMA)) efer &= ~EFER_LME; to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); } static int is_external_interrupt(u32 info) { info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); } static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) { struct vcpu_svm *svm = to_svm(vcpu); u32 ret = 0; if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; return ret & mask; } static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) { struct vcpu_svm *svm = to_svm(vcpu); if (mask == 0) svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; else svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; } static void skip_emulated_instruction(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if (svm->vmcb->control.next_rip != 0) svm->next_rip = svm->vmcb->control.next_rip; if (!svm->next_rip) { if (emulate_instruction(vcpu, EMULTYPE_SKIP) != EMULATE_DONE) printk(KERN_DEBUG "%s: NOP\n", __func__); return; } if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n", __func__, kvm_rip_read(vcpu), svm->next_rip); kvm_rip_write(vcpu, svm->next_rip); svm_set_interrupt_shadow(vcpu, 0); } static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code, bool reinject) { struct vcpu_svm *svm = to_svm(vcpu); /* * If we are within a nested VM we'd better #VMEXIT and let the guest * handle the exception */ if (!reinject && nested_svm_check_exception(svm, nr, has_error_code, error_code)) return; if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) { unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); /* * For guest debugging where we have to reinject #BP if some * INT3 is guest-owned: * Emulate nRIP by moving RIP forward. Will fail if injection * raises a fault that is not intercepted. Still better than * failing in all cases. */ skip_emulated_instruction(&svm->vcpu); rip = kvm_rip_read(&svm->vcpu); svm->int3_rip = rip + svm->vmcb->save.cs.base; svm->int3_injected = rip - old_rip; } svm->vmcb->control.event_inj = nr | SVM_EVTINJ_VALID | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) | SVM_EVTINJ_TYPE_EXEPT; svm->vmcb->control.event_inj_err = error_code; } static void svm_init_erratum_383(void) { u32 low, high; int err; u64 val; if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) return; /* Use _safe variants to not break nested virtualization */ val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); if (err) return; val |= (1ULL << 47); low = lower_32_bits(val); high = upper_32_bits(val); native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); erratum_383_found = true; } static void svm_init_osvw(struct kvm_vcpu *vcpu) { /* * Guests should see errata 400 and 415 as fixed (assuming that * HLT and IO instructions are intercepted). */ vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; vcpu->arch.osvw.status = osvw_status & ~(6ULL); /* * By increasing VCPU's osvw.length to 3 we are telling the guest that * all osvw.status bits inside that length, including bit 0 (which is * reserved for erratum 298), are valid. However, if host processor's * osvw_len is 0 then osvw_status[0] carries no information. We need to * be conservative here and therefore we tell the guest that erratum 298 * is present (because we really don't know). */ if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) vcpu->arch.osvw.status |= 1; } static int has_svm(void) { const char *msg; if (!cpu_has_svm(&msg)) { printk(KERN_INFO "has_svm: %s\n", msg); return 0; } return 1; } static void svm_hardware_disable(void *garbage) { /* Make sure we clean up behind us */ if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); cpu_svm_disable(); amd_pmu_disable_virt(); } static int svm_hardware_enable(void *garbage) { struct svm_cpu_data *sd; uint64_t efer; struct desc_ptr gdt_descr; struct desc_struct *gdt; int me = raw_smp_processor_id(); rdmsrl(MSR_EFER, efer); if (efer & EFER_SVME) return -EBUSY; if (!has_svm()) { pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me); return -EINVAL; } sd = per_cpu(svm_data, me); if (!sd) { pr_err("%s: svm_data is NULL on %d\n", __func__, me); return -EINVAL; } sd->asid_generation = 1; sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; sd->next_asid = sd->max_asid + 1; native_store_gdt(&gdt_descr); gdt = (struct desc_struct *)gdt_descr.address; sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); wrmsrl(MSR_EFER, efer | EFER_SVME); wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT; } /* * Get OSVW bits. * * Note that it is possible to have a system with mixed processor * revisions and therefore different OSVW bits. If bits are not the same * on different processors then choose the worst case (i.e. if erratum * is present on one processor and not on another then assume that the * erratum is present everywhere). */ if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { uint64_t len, status = 0; int err; len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); if (!err) status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &err); if (err) osvw_status = osvw_len = 0; else { if (len < osvw_len) osvw_len = len; osvw_status |= status; osvw_status &= (1ULL << osvw_len) - 1; } } else osvw_status = osvw_len = 0; svm_init_erratum_383(); amd_pmu_enable_virt(); return 0; } static void svm_cpu_uninit(int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id()); if (!sd) return; per_cpu(svm_data, raw_smp_processor_id()) = NULL; __free_page(sd->save_area); kfree(sd); } static int svm_cpu_init(int cpu) { struct svm_cpu_data *sd; int r; sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); if (!sd) return -ENOMEM; sd->cpu = cpu; sd->save_area = alloc_page(GFP_KERNEL); r = -ENOMEM; if (!sd->save_area) goto err_1; per_cpu(svm_data, cpu) = sd; return 0; err_1: kfree(sd); return r; } static bool valid_msr_intercept(u32 index) { int i; for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) if (direct_access_msrs[i].index == index) return true; return false; } static void set_msr_interception(u32 *msrpm, unsigned msr, int read, int write) { u8 bit_read, bit_write; unsigned long tmp; u32 offset; /* * If this warning triggers extend the direct_access_msrs list at the * beginning of the file */ WARN_ON(!valid_msr_intercept(msr)); offset = svm_msrpm_offset(msr); bit_read = 2 * (msr & 0x0f); bit_write = 2 * (msr & 0x0f) + 1; tmp = msrpm[offset]; BUG_ON(offset == MSR_INVALID); read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp); write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp); msrpm[offset] = tmp; } static void svm_vcpu_init_msrpm(u32 *msrpm) { int i; memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { if (!direct_access_msrs[i].always) continue; set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1); } } static void add_msr_offset(u32 offset) { int i; for (i = 0; i < MSRPM_OFFSETS; ++i) { /* Offset already in list? */ if (msrpm_offsets[i] == offset) return; /* Slot used by another offset? */ if (msrpm_offsets[i] != MSR_INVALID) continue; /* Add offset to list */ msrpm_offsets[i] = offset; return; } /* * If this BUG triggers the msrpm_offsets table has an overflow. Just * increase MSRPM_OFFSETS in this case. */ BUG(); } static void init_msrpm_offsets(void) { int i; memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { u32 offset; offset = svm_msrpm_offset(direct_access_msrs[i].index); BUG_ON(offset == MSR_INVALID); add_msr_offset(offset); } } static void svm_enable_lbrv(struct vcpu_svm *svm) { u32 *msrpm = svm->msrpm; svm->vmcb->control.lbr_ctl = 1; set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1); } static void svm_disable_lbrv(struct vcpu_svm *svm) { u32 *msrpm = svm->msrpm; svm->vmcb->control.lbr_ctl = 0; set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); } static __init int svm_hardware_setup(void) { int cpu; struct page *iopm_pages; void *iopm_va; int r; iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); if (!iopm_pages) return -ENOMEM; iopm_va = page_address(iopm_pages); memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; init_msrpm_offsets(); if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) kvm_enable_efer_bits(EFER_FFXSR); if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { u64 max; kvm_has_tsc_control = true; /* * Make sure the user can only configure tsc_khz values that * fit into a signed integer. * A min value is not calculated needed because it will always * be 1 on all machines and a value of 0 is used to disable * tsc-scaling for the vcpu. */ max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX)); kvm_max_guest_tsc_khz = max; } if (nested) { printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); } for_each_possible_cpu(cpu) { r = svm_cpu_init(cpu); if (r) goto err; } if (!boot_cpu_has(X86_FEATURE_NPT)) npt_enabled = false; if (npt_enabled && !npt) { printk(KERN_INFO "kvm: Nested Paging disabled\n"); npt_enabled = false; } if (npt_enabled) { printk(KERN_INFO "kvm: Nested Paging enabled\n"); kvm_enable_tdp(); } else kvm_disable_tdp(); return 0; err: __free_pages(iopm_pages, IOPM_ALLOC_ORDER); iopm_base = 0; return r; } static __exit void svm_hardware_unsetup(void) { int cpu; for_each_possible_cpu(cpu) svm_cpu_uninit(cpu); __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); iopm_base = 0; } static void init_seg(struct vmcb_seg *seg) { seg->selector = 0; seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ seg->limit = 0xffff; seg->base = 0; } static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) { seg->selector = 0; seg->attrib = SVM_SELECTOR_P_MASK | type; seg->limit = 0xffff; seg->base = 0; } static u64 __scale_tsc(u64 ratio, u64 tsc) { u64 mult, frac, _tsc; mult = ratio >> 32; frac = ratio & ((1ULL << 32) - 1); _tsc = tsc; _tsc *= mult; _tsc += (tsc >> 32) * frac; _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32; return _tsc; } static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) { struct vcpu_svm *svm = to_svm(vcpu); u64 _tsc = tsc; if (svm->tsc_ratio != TSC_RATIO_DEFAULT) _tsc = __scale_tsc(svm->tsc_ratio, tsc); return _tsc; } static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) { struct vcpu_svm *svm = to_svm(vcpu); u64 ratio; u64 khz; /* Guest TSC same frequency as host TSC? */ if (!scale) { svm->tsc_ratio = TSC_RATIO_DEFAULT; return; } /* TSC scaling supported? */ if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { if (user_tsc_khz > tsc_khz) { vcpu->arch.tsc_catchup = 1; vcpu->arch.tsc_always_catchup = 1; } else WARN(1, "user requested TSC rate below hardware speed\n"); return; } khz = user_tsc_khz; /* TSC scaling required - calculate ratio */ ratio = khz << 32; do_div(ratio, tsc_khz); if (ratio == 0 || ratio & TSC_RATIO_RSVD) { WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n", user_tsc_khz); return; } svm->tsc_ratio = ratio; } static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); return svm->vmcb->control.tsc_offset; } static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { struct vcpu_svm *svm = to_svm(vcpu); u64 g_tsc_offset = 0; if (is_guest_mode(vcpu)) { g_tsc_offset = svm->vmcb->control.tsc_offset - svm->nested.hsave->control.tsc_offset; svm->nested.hsave->control.tsc_offset = offset; } svm->vmcb->control.tsc_offset = offset + g_tsc_offset; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host) { struct vcpu_svm *svm = to_svm(vcpu); WARN_ON(adjustment < 0); if (host) adjustment = svm_scale_tsc(vcpu, adjustment); svm->vmcb->control.tsc_offset += adjustment; if (is_guest_mode(vcpu)) svm->nested.hsave->control.tsc_offset += adjustment; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { u64 tsc; tsc = svm_scale_tsc(vcpu, native_read_tsc()); return target_tsc - tsc; } static void init_vmcb(struct vcpu_svm *svm) { struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_save_area *save = &svm->vmcb->save; svm->vcpu.fpu_active = 1; svm->vcpu.arch.hflags = 0; set_cr_intercept(svm, INTERCEPT_CR0_READ); set_cr_intercept(svm, INTERCEPT_CR3_READ); set_cr_intercept(svm, INTERCEPT_CR4_READ); set_cr_intercept(svm, INTERCEPT_CR0_WRITE); set_cr_intercept(svm, INTERCEPT_CR3_WRITE); set_cr_intercept(svm, INTERCEPT_CR4_WRITE); set_cr_intercept(svm, INTERCEPT_CR8_WRITE); set_dr_intercept(svm, INTERCEPT_DR0_READ); set_dr_intercept(svm, INTERCEPT_DR1_READ); set_dr_intercept(svm, INTERCEPT_DR2_READ); set_dr_intercept(svm, INTERCEPT_DR3_READ); set_dr_intercept(svm, INTERCEPT_DR4_READ); set_dr_intercept(svm, INTERCEPT_DR5_READ); set_dr_intercept(svm, INTERCEPT_DR6_READ); set_dr_intercept(svm, INTERCEPT_DR7_READ); set_dr_intercept(svm, INTERCEPT_DR0_WRITE); set_dr_intercept(svm, INTERCEPT_DR1_WRITE); set_dr_intercept(svm, INTERCEPT_DR2_WRITE); set_dr_intercept(svm, INTERCEPT_DR3_WRITE); set_dr_intercept(svm, INTERCEPT_DR4_WRITE); set_dr_intercept(svm, INTERCEPT_DR5_WRITE); set_dr_intercept(svm, INTERCEPT_DR6_WRITE); set_dr_intercept(svm, INTERCEPT_DR7_WRITE); set_exception_intercept(svm, PF_VECTOR); set_exception_intercept(svm, UD_VECTOR); set_exception_intercept(svm, MC_VECTOR); set_intercept(svm, INTERCEPT_INTR); set_intercept(svm, INTERCEPT_NMI); set_intercept(svm, INTERCEPT_SMI); set_intercept(svm, INTERCEPT_SELECTIVE_CR0); set_intercept(svm, INTERCEPT_RDPMC); set_intercept(svm, INTERCEPT_CPUID); set_intercept(svm, INTERCEPT_INVD); set_intercept(svm, INTERCEPT_HLT); set_intercept(svm, INTERCEPT_INVLPG); set_intercept(svm, INTERCEPT_INVLPGA); set_intercept(svm, INTERCEPT_IOIO_PROT); set_intercept(svm, INTERCEPT_MSR_PROT); set_intercept(svm, INTERCEPT_TASK_SWITCH); set_intercept(svm, INTERCEPT_SHUTDOWN); set_intercept(svm, INTERCEPT_VMRUN); set_intercept(svm, INTERCEPT_VMMCALL); set_intercept(svm, INTERCEPT_VMLOAD); set_intercept(svm, INTERCEPT_VMSAVE); set_intercept(svm, INTERCEPT_STGI); set_intercept(svm, INTERCEPT_CLGI); set_intercept(svm, INTERCEPT_SKINIT); set_intercept(svm, INTERCEPT_WBINVD); set_intercept(svm, INTERCEPT_MONITOR); set_intercept(svm, INTERCEPT_MWAIT); set_intercept(svm, INTERCEPT_XSETBV); control->iopm_base_pa = iopm_base; control->msrpm_base_pa = __pa(svm->msrpm); control->int_ctl = V_INTR_MASKING_MASK; init_seg(&save->es); init_seg(&save->ss); init_seg(&save->ds); init_seg(&save->fs); init_seg(&save->gs); save->cs.selector = 0xf000; save->cs.base = 0xffff0000; /* Executable/Readable Code Segment */ save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; save->cs.limit = 0xffff; save->gdtr.limit = 0xffff; save->idtr.limit = 0xffff; init_sys_seg(&save->ldtr, SEG_TYPE_LDT); init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); svm_set_efer(&svm->vcpu, 0); save->dr6 = 0xffff0ff0; kvm_set_rflags(&svm->vcpu, 2); save->rip = 0x0000fff0; svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; /* * This is the guest-visible cr0 value. * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. */ svm->vcpu.arch.cr0 = 0; (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); save->cr4 = X86_CR4_PAE; /* rdx = ?? */ if (npt_enabled) { /* Setup VMCB for Nested Paging */ control->nested_ctl = 1; clr_intercept(svm, INTERCEPT_INVLPG); clr_exception_intercept(svm, PF_VECTOR); clr_cr_intercept(svm, INTERCEPT_CR3_READ); clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); save->g_pat = 0x0007040600070406ULL; save->cr3 = 0; save->cr4 = 0; } svm->asid_generation = 0; svm->nested.vmcb = 0; svm->vcpu.arch.hflags = 0; if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { control->pause_filter_count = 3000; set_intercept(svm, INTERCEPT_PAUSE); } mark_all_dirty(svm->vmcb); enable_gif(svm); } static void svm_vcpu_reset(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); u32 dummy; u32 eax = 1; init_vmcb(svm); kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy); kvm_register_write(vcpu, VCPU_REGS_RDX, eax); } static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) { struct vcpu_svm *svm; struct page *page; struct page *msrpm_pages; struct page *hsave_page; struct page *nested_msrpm_pages; int err; svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!svm) { err = -ENOMEM; goto out; } svm->tsc_ratio = TSC_RATIO_DEFAULT; err = kvm_vcpu_init(&svm->vcpu, kvm, id); if (err) goto free_svm; err = -ENOMEM; page = alloc_page(GFP_KERNEL); if (!page) goto uninit; msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!msrpm_pages) goto free_page1; nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!nested_msrpm_pages) goto free_page2; hsave_page = alloc_page(GFP_KERNEL); if (!hsave_page) goto free_page3; svm->nested.hsave = page_address(hsave_page); svm->msrpm = page_address(msrpm_pages); svm_vcpu_init_msrpm(svm->msrpm); svm->nested.msrpm = page_address(nested_msrpm_pages); svm_vcpu_init_msrpm(svm->nested.msrpm); svm->vmcb = page_address(page); clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; svm->asid_generation = 0; init_vmcb(svm); svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_bsp(&svm->vcpu)) svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; svm_init_osvw(&svm->vcpu); return &svm->vcpu; free_page3: __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); free_page2: __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); free_page1: __free_page(page); uninit: kvm_vcpu_uninit(&svm->vcpu); free_svm: kmem_cache_free(kvm_vcpu_cache, svm); out: return ERR_PTR(err); } static void svm_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); __free_page(virt_to_page(svm->nested.hsave)); __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_svm *svm = to_svm(vcpu); int i; if (unlikely(cpu != vcpu->cpu)) { svm->asid_generation = 0; mark_all_dirty(svm->vmcb); } #ifdef CONFIG_X86_64 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base); #endif savesegment(fs, svm->host.fs); savesegment(gs, svm->host.gs); svm->host.ldt = kvm_read_ldt(); for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); if (static_cpu_has(X86_FEATURE_TSCRATEMSR) && svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) { __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio; wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); } } static void svm_vcpu_put(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); int i; ++vcpu->stat.host_state_reload; kvm_load_ldt(svm->host.ldt); #ifdef CONFIG_X86_64 loadsegment(fs, svm->host.fs); wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); load_gs_index(svm->host.gs); #else #ifdef CONFIG_X86_32_LAZY_GS loadsegment(gs, svm->host.gs); #endif #endif for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } static void svm_update_cpl(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); int cpl; if (!is_protmode(vcpu)) cpl = 0; else if (svm->vmcb->save.rflags & X86_EFLAGS_VM) cpl = 3; else cpl = svm->vmcb->save.cs.selector & 0x3; svm->vmcb->save.cpl = cpl; } static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) { return to_svm(vcpu)->vmcb->save.rflags; } static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { unsigned long old_rflags = to_svm(vcpu)->vmcb->save.rflags; to_svm(vcpu)->vmcb->save.rflags = rflags; if ((old_rflags ^ rflags) & X86_EFLAGS_VM) svm_update_cpl(vcpu); } static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { switch (reg) { case VCPU_EXREG_PDPTR: BUG_ON(!npt_enabled); load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); break; default: BUG(); } } static void svm_set_vintr(struct vcpu_svm *svm) { set_intercept(svm, INTERCEPT_VINTR); } static void svm_clear_vintr(struct vcpu_svm *svm) { clr_intercept(svm, INTERCEPT_VINTR); } static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) { struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; switch (seg) { case VCPU_SREG_CS: return &save->cs; case VCPU_SREG_DS: return &save->ds; case VCPU_SREG_ES: return &save->es; case VCPU_SREG_FS: return &save->fs; case VCPU_SREG_GS: return &save->gs; case VCPU_SREG_SS: return &save->ss; case VCPU_SREG_TR: return &save->tr; case VCPU_SREG_LDTR: return &save->ldtr; } BUG(); return NULL; } static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) { struct vmcb_seg *s = svm_seg(vcpu, seg); return s->base; } static void svm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vmcb_seg *s = svm_seg(vcpu, seg); var->base = s->base; var->limit = s->limit; var->selector = s->selector; var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; /* * AMD's VMCB does not have an explicit unusable field, so emulate it * for cross vendor migration purposes by "not present" */ var->unusable = !var->present || (var->type == 0); switch (seg) { case VCPU_SREG_CS: /* * SVM always stores 0 for the 'G' bit in the CS selector in * the VMCB on a VMEXIT. This hurts cross-vendor migration: * Intel's VMENTRY has a check on the 'G' bit. */ var->g = s->limit > 0xfffff; break; case VCPU_SREG_TR: /* * Work around a bug where the busy flag in the tr selector * isn't exposed */ var->type |= 0x2; break; case VCPU_SREG_DS: case VCPU_SREG_ES: case VCPU_SREG_FS: case VCPU_SREG_GS: /* * The accessed bit must always be set in the segment * descriptor cache, although it can be cleared in the * descriptor, the cached bit always remains at 1. Since * Intel has a check on this, set it here to support * cross-vendor migration. */ if (!var->unusable) var->type |= 0x1; break; case VCPU_SREG_SS: /* * On AMD CPUs sometimes the DB bit in the segment * descriptor is left as 1, although the whole segment has * been made unusable. Clear it here to pass an Intel VMX * entry check when cross vendor migrating. */ if (var->unusable) var->db = 0; break; } } static int svm_get_cpl(struct kvm_vcpu *vcpu) { struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; return save->cpl; } static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); dt->size = svm->vmcb->save.idtr.limit; dt->address = svm->vmcb->save.idtr.base; } static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.idtr.limit = dt->size; svm->vmcb->save.idtr.base = dt->address ; mark_dirty(svm->vmcb, VMCB_DT); } static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); dt->size = svm->vmcb->save.gdtr.limit; dt->address = svm->vmcb->save.gdtr.base; } static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.gdtr.limit = dt->size; svm->vmcb->save.gdtr.base = dt->address ; mark_dirty(svm->vmcb, VMCB_DT); } static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { } static void svm_decache_cr3(struct kvm_vcpu *vcpu) { } static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { } static void update_cr0_intercept(struct vcpu_svm *svm) { ulong gcr0 = svm->vcpu.arch.cr0; u64 *hcr0 = &svm->vmcb->save.cr0; if (!svm->vcpu.fpu_active) *hcr0 |= SVM_CR0_SELECTIVE_MASK; else *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) | (gcr0 & SVM_CR0_SELECTIVE_MASK); mark_dirty(svm->vmcb, VMCB_CR); if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { clr_cr_intercept(svm, INTERCEPT_CR0_READ); clr_cr_intercept(svm, INTERCEPT_CR0_WRITE); } else { set_cr_intercept(svm, INTERCEPT_CR0_READ); set_cr_intercept(svm, INTERCEPT_CR0_WRITE); } } static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_svm *svm = to_svm(vcpu); #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { vcpu->arch.efer |= EFER_LMA; svm->vmcb->save.efer |= EFER_LMA | EFER_LME; } if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { vcpu->arch.efer &= ~EFER_LMA; svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); } } #endif vcpu->arch.cr0 = cr0; if (!npt_enabled) cr0 |= X86_CR0_PG | X86_CR0_WP; if (!vcpu->fpu_active) cr0 |= X86_CR0_TS; /* * re-enable caching here because the QEMU bios * does not do it - this results in some delay at * reboot */ cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); update_cr0_intercept(svm); } static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE; unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; if (cr4 & X86_CR4_VMXE) return 1; if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) svm_flush_tlb(vcpu); vcpu->arch.cr4 = cr4; if (!npt_enabled) cr4 |= X86_CR4_PAE; cr4 |= host_cr4_mce; to_svm(vcpu)->vmcb->save.cr4 = cr4; mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); return 0; } static void svm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_seg *s = svm_seg(vcpu, seg); s->base = var->base; s->limit = var->limit; s->selector = var->selector; if (var->unusable) s->attrib = 0; else { s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; } if (seg == VCPU_SREG_CS) svm_update_cpl(vcpu); mark_dirty(svm->vmcb, VMCB_SEG); } static void update_db_bp_intercept(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); clr_exception_intercept(svm, DB_VECTOR); clr_exception_intercept(svm, BP_VECTOR); if (svm->nmi_singlestep) set_exception_intercept(svm, DB_VECTOR); if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { if (vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) set_exception_intercept(svm, DB_VECTOR); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) set_exception_intercept(svm, BP_VECTOR); } else vcpu->guest_debug = 0; } static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) { if (sd->next_asid > sd->max_asid) { ++sd->asid_generation; sd->next_asid = 1; svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; } svm->asid_generation = sd->asid_generation; svm->vmcb->control.asid = sd->next_asid++; mark_dirty(svm->vmcb, VMCB_ASID); } static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.dr7 = value; mark_dirty(svm->vmcb, VMCB_DR); } static int pf_interception(struct vcpu_svm *svm) { u64 fault_address = svm->vmcb->control.exit_info_2; u32 error_code; int r = 1; switch (svm->apf_reason) { default: error_code = svm->vmcb->control.exit_info_1; trace_kvm_page_fault(fault_address, error_code); if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, svm->vmcb->control.insn_bytes, svm->vmcb->control.insn_len); break; case KVM_PV_REASON_PAGE_NOT_PRESENT: svm->apf_reason = 0; local_irq_disable(); kvm_async_pf_task_wait(fault_address); local_irq_enable(); break; case KVM_PV_REASON_PAGE_READY: svm->apf_reason = 0; local_irq_disable(); kvm_async_pf_task_wake(fault_address); local_irq_enable(); break; } return r; } static int db_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; if (!(svm->vcpu.guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && !svm->nmi_singlestep) { kvm_queue_exception(&svm->vcpu, DB_VECTOR); return 1; } if (svm->nmi_singlestep) { svm->nmi_singlestep = false; if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); update_db_bp_intercept(&svm->vcpu); } if (svm->vcpu.guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; kvm_run->debug.arch.exception = DB_VECTOR; return 0; } return 1; } static int bp_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; kvm_run->debug.arch.exception = BP_VECTOR; return 0; } static int ud_interception(struct vcpu_svm *svm) { int er; er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } static void svm_fpu_activate(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); clr_exception_intercept(svm, NM_VECTOR); svm->vcpu.fpu_active = 1; update_cr0_intercept(svm); } static int nm_interception(struct vcpu_svm *svm) { svm_fpu_activate(&svm->vcpu); return 1; } static bool is_erratum_383(void) { int err, i; u64 value; if (!erratum_383_found) return false; value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); if (err) return false; /* Bit 62 may or may not be set for this mce */ value &= ~(1ULL << 62); if (value != 0xb600000000010015ULL) return false; /* Clear MCi_STATUS registers */ for (i = 0; i < 6; ++i) native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); if (!err) { u32 low, high; value &= ~(1ULL << 2); low = lower_32_bits(value); high = upper_32_bits(value); native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); } /* Flush tlb to evict multi-match entries */ __flush_tlb_all(); return true; } static void svm_handle_mce(struct vcpu_svm *svm) { if (is_erratum_383()) { /* * Erratum 383 triggered. Guest state is corrupt so kill the * guest. */ pr_err("KVM: Guest triggered AMD Erratum 383\n"); kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); return; } /* * On an #MC intercept the MCE handler is not called automatically in * the host. So do it by hand here. */ asm volatile ( "int $0x12\n"); /* not sure if we ever come back to this point */ return; } static int mc_interception(struct vcpu_svm *svm) { return 1; } static int shutdown_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; /* * VMCB is undefined after a SHUTDOWN intercept * so reinitialize it. */ clear_page(svm->vmcb); init_vmcb(svm); kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } static int io_interception(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu = &svm->vcpu; u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ int size, in, string; unsigned port; ++svm->vcpu.stat.io_exits; string = (io_info & SVM_IOIO_STR_MASK) != 0; in = (io_info & SVM_IOIO_TYPE_MASK) != 0; if (string || in) return emulate_instruction(vcpu, 0) == EMULATE_DONE; port = io_info >> 16; size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; svm->next_rip = svm->vmcb->control.exit_info_2; skip_emulated_instruction(&svm->vcpu); return kvm_fast_pio_out(vcpu, size, port); } static int nmi_interception(struct vcpu_svm *svm) { return 1; } static int intr_interception(struct vcpu_svm *svm) { ++svm->vcpu.stat.irq_exits; return 1; } static int nop_on_interception(struct vcpu_svm *svm) { return 1; } static int halt_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; skip_emulated_instruction(&svm->vcpu); return kvm_emulate_halt(&svm->vcpu); } static int vmmcall_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); kvm_emulate_hypercall(&svm->vcpu); return 1; } static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); return svm->nested.nested_cr3; } static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) { struct vcpu_svm *svm = to_svm(vcpu); u64 cr3 = svm->nested.nested_cr3; u64 pdpte; int ret; ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte, offset_in_page(cr3) + index * 8, 8); if (ret) return 0; return pdpte; } static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.nested_cr3 = root; mark_dirty(svm->vmcb, VMCB_NPT); svm_flush_tlb(vcpu); } static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.exit_code = SVM_EXIT_NPF; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = fault->error_code; svm->vmcb->control.exit_info_2 = fault->address; nested_svm_vmexit(svm); } static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) { int r; r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu); vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu.shadow_root_level = get_npt_level(); vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; return r; } static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) { vcpu->arch.walk_mmu = &vcpu->arch.mmu; } static int nested_svm_check_permissions(struct vcpu_svm *svm) { if (!(svm->vcpu.arch.efer & EFER_SVME) || !is_paging(&svm->vcpu)) { kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } if (svm->vmcb->save.cpl) { kvm_inject_gp(&svm->vcpu, 0); return 1; } return 0; } static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code) { int vmexit; if (!is_guest_mode(&svm->vcpu)) return 0; svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = error_code; svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; vmexit = nested_svm_intercept(svm); if (vmexit == NESTED_EXIT_DONE) svm->nested.exit_required = true; return vmexit; } /* This function returns true if it is save to enable the irq window */ static inline bool nested_svm_intr(struct vcpu_svm *svm) { if (!is_guest_mode(&svm->vcpu)) return true; if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) return true; if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) return false; /* * if vmexit was already requested (by intercepted exception * for instance) do not overwrite it with "external interrupt" * vmexit. */ if (svm->nested.exit_required) return false; svm->vmcb->control.exit_code = SVM_EXIT_INTR; svm->vmcb->control.exit_info_1 = 0; svm->vmcb->control.exit_info_2 = 0; if (svm->nested.intercept & 1ULL) { /* * The #vmexit can't be emulated here directly because this * code path runs with irqs and preemption disabled. A * #vmexit emulation might sleep. Only signal request for * the #vmexit here. */ svm->nested.exit_required = true; trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); return false; } return true; } /* This function returns true if it is save to enable the nmi window */ static inline bool nested_svm_nmi(struct vcpu_svm *svm) { if (!is_guest_mode(&svm->vcpu)) return true; if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) return true; svm->vmcb->control.exit_code = SVM_EXIT_NMI; svm->nested.exit_required = true; return false; } static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page) { struct page *page; might_sleep(); page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT); if (is_error_page(page)) goto error; *_page = page; return kmap(page); error: kvm_inject_gp(&svm->vcpu, 0); return NULL; } static void nested_svm_unmap(struct page *page) { kunmap(page); kvm_release_page_dirty(page); } static int nested_svm_intercept_ioio(struct vcpu_svm *svm) { unsigned port; u8 val, bit; u64 gpa; if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) return NESTED_EXIT_HOST; port = svm->vmcb->control.exit_info_1 >> 16; gpa = svm->nested.vmcb_iopm + (port / 8); bit = port % 8; val = 0; if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1)) val &= (1 << bit); return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; } static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) { u32 offset, msr, value; int write, mask; if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) return NESTED_EXIT_HOST; msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; offset = svm_msrpm_offset(msr); write = svm->vmcb->control.exit_info_1 & 1; mask = 1 << ((2 * (msr & 0xf)) + write); if (offset == MSR_INVALID) return NESTED_EXIT_DONE; /* Offset is in 32 bit units but need in 8 bit units */ offset *= 4; if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4)) return NESTED_EXIT_DONE; return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; } static int nested_svm_exit_special(struct vcpu_svm *svm) { u32 exit_code = svm->vmcb->control.exit_code; switch (exit_code) { case SVM_EXIT_INTR: case SVM_EXIT_NMI: case SVM_EXIT_EXCP_BASE + MC_VECTOR: return NESTED_EXIT_HOST; case SVM_EXIT_NPF: /* For now we are always handling NPFs when using them */ if (npt_enabled) return NESTED_EXIT_HOST; break; case SVM_EXIT_EXCP_BASE + PF_VECTOR: /* When we're shadowing, trap PFs, but not async PF */ if (!npt_enabled && svm->apf_reason == 0) return NESTED_EXIT_HOST; break; case SVM_EXIT_EXCP_BASE + NM_VECTOR: nm_interception(svm); break; default: break; } return NESTED_EXIT_CONTINUE; } /* * If this function returns true, this #vmexit was already handled */ static int nested_svm_intercept(struct vcpu_svm *svm) { u32 exit_code = svm->vmcb->control.exit_code; int vmexit = NESTED_EXIT_HOST; switch (exit_code) { case SVM_EXIT_MSR: vmexit = nested_svm_exit_handled_msr(svm); break; case SVM_EXIT_IOIO: vmexit = nested_svm_intercept_ioio(svm); break; case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); if (svm->nested.intercept_cr & bit) vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0); if (svm->nested.intercept_dr & bit) vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); if (svm->nested.intercept_exceptions & excp_bits) vmexit = NESTED_EXIT_DONE; /* async page fault always cause vmexit */ else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) && svm->apf_reason != 0) vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_ERR: { vmexit = NESTED_EXIT_DONE; break; } default: { u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); if (svm->nested.intercept & exit_bits) vmexit = NESTED_EXIT_DONE; } } return vmexit; } static int nested_svm_exit_handled(struct vcpu_svm *svm) { int vmexit; vmexit = nested_svm_intercept(svm); if (vmexit == NESTED_EXIT_DONE) nested_svm_vmexit(svm); return vmexit; } static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb) { struct vmcb_control_area *dst = &dst_vmcb->control; struct vmcb_control_area *from = &from_vmcb->control; dst->intercept_cr = from->intercept_cr; dst->intercept_dr = from->intercept_dr; dst->intercept_exceptions = from->intercept_exceptions; dst->intercept = from->intercept; dst->iopm_base_pa = from->iopm_base_pa; dst->msrpm_base_pa = from->msrpm_base_pa; dst->tsc_offset = from->tsc_offset; dst->asid = from->asid; dst->tlb_ctl = from->tlb_ctl; dst->int_ctl = from->int_ctl; dst->int_vector = from->int_vector; dst->int_state = from->int_state; dst->exit_code = from->exit_code; dst->exit_code_hi = from->exit_code_hi; dst->exit_info_1 = from->exit_info_1; dst->exit_info_2 = from->exit_info_2; dst->exit_int_info = from->exit_int_info; dst->exit_int_info_err = from->exit_int_info_err; dst->nested_ctl = from->nested_ctl; dst->event_inj = from->event_inj; dst->event_inj_err = from->event_inj_err; dst->nested_cr3 = from->nested_cr3; dst->lbr_ctl = from->lbr_ctl; } static int nested_svm_vmexit(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; struct page *page; trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, vmcb->control.exit_info_1, vmcb->control.exit_info_2, vmcb->control.exit_int_info, vmcb->control.exit_int_info_err, KVM_ISA_SVM); nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page); if (!nested_vmcb) return 1; /* Exit Guest-Mode */ leave_guest_mode(&svm->vcpu); svm->nested.vmcb = 0; /* Give the current vmcb to the guest */ disable_gif(svm); nested_vmcb->save.es = vmcb->save.es; nested_vmcb->save.cs = vmcb->save.cs; nested_vmcb->save.ss = vmcb->save.ss; nested_vmcb->save.ds = vmcb->save.ds; nested_vmcb->save.gdtr = vmcb->save.gdtr; nested_vmcb->save.idtr = vmcb->save.idtr; nested_vmcb->save.efer = svm->vcpu.arch.efer; nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); nested_vmcb->save.cr2 = vmcb->save.cr2; nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); nested_vmcb->save.rip = vmcb->save.rip; nested_vmcb->save.rsp = vmcb->save.rsp; nested_vmcb->save.rax = vmcb->save.rax; nested_vmcb->save.dr7 = vmcb->save.dr7; nested_vmcb->save.dr6 = vmcb->save.dr6; nested_vmcb->save.cpl = vmcb->save.cpl; nested_vmcb->control.int_ctl = vmcb->control.int_ctl; nested_vmcb->control.int_vector = vmcb->control.int_vector; nested_vmcb->control.int_state = vmcb->control.int_state; nested_vmcb->control.exit_code = vmcb->control.exit_code; nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi; nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1; nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info; nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err; nested_vmcb->control.next_rip = vmcb->control.next_rip; /* * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have * to make sure that we do not lose injected events. So check event_inj * here and copy it to exit_int_info if it is valid. * Exit_int_info and event_inj can't be both valid because the case * below only happens on a VMRUN instruction intercept which has * no valid exit_int_info set. */ if (vmcb->control.event_inj & SVM_EVTINJ_VALID) { struct vmcb_control_area *nc = &nested_vmcb->control; nc->exit_int_info = vmcb->control.event_inj; nc->exit_int_info_err = vmcb->control.event_inj_err; } nested_vmcb->control.tlb_ctl = 0; nested_vmcb->control.event_inj = 0; nested_vmcb->control.event_inj_err = 0; /* We always set V_INTR_MASKING and remember the old value in hflags */ if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; /* Restore the original control entries */ copy_vmcb_control_area(vmcb, hsave); kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); svm->nested.nested_cr3 = 0; /* Restore selected save entries */ svm->vmcb->save.es = hsave->save.es; svm->vmcb->save.cs = hsave->save.cs; svm->vmcb->save.ss = hsave->save.ss; svm->vmcb->save.ds = hsave->save.ds; svm->vmcb->save.gdtr = hsave->save.gdtr; svm->vmcb->save.idtr = hsave->save.idtr; kvm_set_rflags(&svm->vcpu, hsave->save.rflags); svm_set_efer(&svm->vcpu, hsave->save.efer); svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); svm_set_cr4(&svm->vcpu, hsave->save.cr4); if (npt_enabled) { svm->vmcb->save.cr3 = hsave->save.cr3; svm->vcpu.arch.cr3 = hsave->save.cr3; } else { (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); } kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip); svm->vmcb->save.dr7 = 0; svm->vmcb->save.cpl = 0; svm->vmcb->control.exit_int_info = 0; mark_all_dirty(svm->vmcb); nested_svm_unmap(page); nested_svm_uninit_mmu_context(&svm->vcpu); kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_load(&svm->vcpu); return 0; } static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) { /* * This function merges the msr permission bitmaps of kvm and the * nested vmcb. It is optimized in that it only merges the parts where * the kvm msr permission bitmap may contain zero bits */ int i; if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) return true; for (i = 0; i < MSRPM_OFFSETS; i++) { u32 value, p; u64 offset; if (msrpm_offsets[i] == 0xffffffff) break; p = msrpm_offsets[i]; offset = svm->nested.vmcb_msrpm + (p * 4); if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4)) return false; svm->nested.msrpm[p] = svm->msrpm[p] | value; } svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); return true; } static bool nested_vmcb_checks(struct vmcb *vmcb) { if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0) return false; if (vmcb->control.asid == 0) return false; if (vmcb->control.nested_ctl && !npt_enabled) return false; return true; } static bool nested_svm_vmrun(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; struct page *page; u64 vmcb_gpa; vmcb_gpa = svm->vmcb->save.rax; nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return false; if (!nested_vmcb_checks(nested_vmcb)) { nested_vmcb->control.exit_code = SVM_EXIT_ERR; nested_vmcb->control.exit_code_hi = 0; nested_vmcb->control.exit_info_1 = 0; nested_vmcb->control.exit_info_2 = 0; nested_svm_unmap(page); return false; } trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, nested_vmcb->save.rip, nested_vmcb->control.int_ctl, nested_vmcb->control.event_inj, nested_vmcb->control.nested_ctl); trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, nested_vmcb->control.intercept_cr >> 16, nested_vmcb->control.intercept_exceptions, nested_vmcb->control.intercept); /* Clear internal status */ kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); /* * Save the old vmcb, so we don't need to pick what we save, but can * restore everything when a VMEXIT occurs */ hsave->save.es = vmcb->save.es; hsave->save.cs = vmcb->save.cs; hsave->save.ss = vmcb->save.ss; hsave->save.ds = vmcb->save.ds; hsave->save.gdtr = vmcb->save.gdtr; hsave->save.idtr = vmcb->save.idtr; hsave->save.efer = svm->vcpu.arch.efer; hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); hsave->save.cr4 = svm->vcpu.arch.cr4; hsave->save.rflags = kvm_get_rflags(&svm->vcpu); hsave->save.rip = kvm_rip_read(&svm->vcpu); hsave->save.rsp = vmcb->save.rsp; hsave->save.rax = vmcb->save.rax; if (npt_enabled) hsave->save.cr3 = vmcb->save.cr3; else hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); copy_vmcb_control_area(hsave, vmcb); if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) svm->vcpu.arch.hflags |= HF_HIF_MASK; else svm->vcpu.arch.hflags &= ~HF_HIF_MASK; if (nested_vmcb->control.nested_ctl) { kvm_mmu_unload(&svm->vcpu); svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; nested_svm_init_mmu_context(&svm->vcpu); } /* Load the nested guest state */ svm->vmcb->save.es = nested_vmcb->save.es; svm->vmcb->save.cs = nested_vmcb->save.cs; svm->vmcb->save.ss = nested_vmcb->save.ss; svm->vmcb->save.ds = nested_vmcb->save.ds; svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; svm->vmcb->save.idtr = nested_vmcb->save.idtr; kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); if (npt_enabled) { svm->vmcb->save.cr3 = nested_vmcb->save.cr3; svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; } else (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); /* Guest paging mode is active - reset mmu */ kvm_mmu_reset_context(&svm->vcpu); svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); /* In case we don't even reach vcpu_run, the fields are not updated */ svm->vmcb->save.rax = nested_vmcb->save.rax; svm->vmcb->save.rsp = nested_vmcb->save.rsp; svm->vmcb->save.rip = nested_vmcb->save.rip; svm->vmcb->save.dr7 = nested_vmcb->save.dr7; svm->vmcb->save.dr6 = nested_vmcb->save.dr6; svm->vmcb->save.cpl = nested_vmcb->save.cpl; svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL; svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; /* cache intercepts */ svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; svm->nested.intercept = nested_vmcb->control.intercept; svm_flush_tlb(&svm->vcpu); svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) svm->vcpu.arch.hflags |= HF_VINTR_MASK; else svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { /* We only want the cr8 intercept bits of the guest */ clr_cr_intercept(svm, INTERCEPT_CR8_READ); clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); } /* We don't want to see VMMCALLs from a nested guest */ clr_intercept(svm, INTERCEPT_VMMCALL); svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl; svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; svm->vmcb->control.int_state = nested_vmcb->control.int_state; svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; nested_svm_unmap(page); /* Enter Guest-Mode */ enter_guest_mode(&svm->vcpu); /* * Merge guest and host intercepts - must be called with vcpu in * guest-mode to take affect here */ recalc_intercepts(svm); svm->nested.vmcb = vmcb_gpa; enable_gif(svm); mark_all_dirty(svm->vmcb); return true; } static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) { to_vmcb->save.fs = from_vmcb->save.fs; to_vmcb->save.gs = from_vmcb->save.gs; to_vmcb->save.tr = from_vmcb->save.tr; to_vmcb->save.ldtr = from_vmcb->save.ldtr; to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; to_vmcb->save.star = from_vmcb->save.star; to_vmcb->save.lstar = from_vmcb->save.lstar; to_vmcb->save.cstar = from_vmcb->save.cstar; to_vmcb->save.sfmask = from_vmcb->save.sfmask; to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; } static int vmload_interception(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct page *page; if (nested_svm_check_permissions(svm)) return 1; nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); nested_svm_vmloadsave(nested_vmcb, svm->vmcb); nested_svm_unmap(page); return 1; } static int vmsave_interception(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct page *page; if (nested_svm_check_permissions(svm)) return 1; nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); nested_svm_vmloadsave(svm->vmcb, nested_vmcb); nested_svm_unmap(page); return 1; } static int vmrun_interception(struct vcpu_svm *svm) { if (nested_svm_check_permissions(svm)) return 1; /* Save rip after vmrun instruction */ kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3); if (!nested_svm_vmrun(svm)) return 1; if (!nested_svm_vmrun_msrpm(svm)) goto failed; return 1; failed: svm->vmcb->control.exit_code = SVM_EXIT_ERR; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = 0; svm->vmcb->control.exit_info_2 = 0; nested_svm_vmexit(svm); return 1; } static int stgi_interception(struct vcpu_svm *svm) { if (nested_svm_check_permissions(svm)) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); enable_gif(svm); return 1; } static int clgi_interception(struct vcpu_svm *svm) { if (nested_svm_check_permissions(svm)) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); disable_gif(svm); /* After a CLGI no interrupts should come */ svm_clear_vintr(svm); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; mark_dirty(svm->vmcb, VMCB_INTR); return 1; } static int invlpga_interception(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu = &svm->vcpu; trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX], vcpu->arch.regs[VCPU_REGS_RAX]); /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]); svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); return 1; } static int skinit_interception(struct vcpu_svm *svm) { trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]); kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } static int xsetbv_interception(struct vcpu_svm *svm) { u64 new_bv = kvm_read_edx_eax(&svm->vcpu); u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); } return 1; } static int invalid_op_interception(struct vcpu_svm *svm) { kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } static int task_switch_interception(struct vcpu_svm *svm) { u16 tss_selector; int reason; int int_type = svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; uint32_t type = svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; uint32_t idt_v = svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; bool has_error_code = false; u32 error_code = 0; tss_selector = (u16)svm->vmcb->control.exit_info_1; if (svm->vmcb->control.exit_info_2 & (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) reason = TASK_SWITCH_IRET; else if (svm->vmcb->control.exit_info_2 & (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) reason = TASK_SWITCH_JMP; else if (idt_v) reason = TASK_SWITCH_GATE; else reason = TASK_SWITCH_CALL; if (reason == TASK_SWITCH_GATE) { switch (type) { case SVM_EXITINTINFO_TYPE_NMI: svm->vcpu.arch.nmi_injected = false; break; case SVM_EXITINTINFO_TYPE_EXEPT: if (svm->vmcb->control.exit_info_2 & (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { has_error_code = true; error_code = (u32)svm->vmcb->control.exit_info_2; } kvm_clear_exception_queue(&svm->vcpu); break; case SVM_EXITINTINFO_TYPE_INTR: kvm_clear_interrupt_queue(&svm->vcpu); break; default: break; } } if (reason != TASK_SWITCH_GATE || int_type == SVM_EXITINTINFO_TYPE_SOFT || (int_type == SVM_EXITINTINFO_TYPE_EXEPT && (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) skip_emulated_instruction(&svm->vcpu); if (int_type != SVM_EXITINTINFO_TYPE_SOFT) int_vec = -1; if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, has_error_code, error_code) == EMULATE_FAIL) { svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR; svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; svm->vcpu.run->internal.ndata = 0; return 0; } return 1; } static int cpuid_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; kvm_emulate_cpuid(&svm->vcpu); return 1; } static int iret_interception(struct vcpu_svm *svm) { ++svm->vcpu.stat.nmi_window_exits; clr_intercept(svm, INTERCEPT_IRET); svm->vcpu.arch.hflags |= HF_IRET_MASK; svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); return 1; } static int invlpg_interception(struct vcpu_svm *svm) { if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); skip_emulated_instruction(&svm->vcpu); return 1; } static int emulate_on_interception(struct vcpu_svm *svm) { return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; } static int rdpmc_interception(struct vcpu_svm *svm) { int err; if (!static_cpu_has(X86_FEATURE_NRIPS)) return emulate_on_interception(svm); err = kvm_rdpmc(&svm->vcpu); kvm_complete_insn_gp(&svm->vcpu, err); return 1; } bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val) { unsigned long cr0 = svm->vcpu.arch.cr0; bool ret = false; u64 intercept; intercept = svm->nested.intercept; if (!is_guest_mode(&svm->vcpu) || (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))) return false; cr0 &= ~SVM_CR0_SELECTIVE_MASK; val &= ~SVM_CR0_SELECTIVE_MASK; if (cr0 ^ val) { svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); } return ret; } #define CR_VALID (1ULL << 63) static int cr_interception(struct vcpu_svm *svm) { int reg, cr; unsigned long val; int err; if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) return emulate_on_interception(svm); if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) return emulate_on_interception(svm); reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; err = 0; if (cr >= 16) { /* mov to cr */ cr -= 16; val = kvm_register_read(&svm->vcpu, reg); switch (cr) { case 0: if (!check_selective_cr0_intercepted(svm, val)) err = kvm_set_cr0(&svm->vcpu, val); else return 1; break; case 3: err = kvm_set_cr3(&svm->vcpu, val); break; case 4: err = kvm_set_cr4(&svm->vcpu, val); break; case 8: err = kvm_set_cr8(&svm->vcpu, val); break; default: WARN(1, "unhandled write to CR%d", cr); kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } } else { /* mov from cr */ switch (cr) { case 0: val = kvm_read_cr0(&svm->vcpu); break; case 2: val = svm->vcpu.arch.cr2; break; case 3: val = kvm_read_cr3(&svm->vcpu); break; case 4: val = kvm_read_cr4(&svm->vcpu); break; case 8: val = kvm_get_cr8(&svm->vcpu); break; default: WARN(1, "unhandled read from CR%d", cr); kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } kvm_register_write(&svm->vcpu, reg, val); } kvm_complete_insn_gp(&svm->vcpu, err); return 1; } static int dr_interception(struct vcpu_svm *svm) { int reg, dr; unsigned long val; int err; if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) return emulate_on_interception(svm); reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; if (dr >= 16) { /* mov to DRn */ val = kvm_register_read(&svm->vcpu, reg); kvm_set_dr(&svm->vcpu, dr - 16, val); } else { err = kvm_get_dr(&svm->vcpu, dr, &val); if (!err) kvm_register_write(&svm->vcpu, reg, val); } skip_emulated_instruction(&svm->vcpu); return 1; } static int cr8_write_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; int r; u8 cr8_prev = kvm_get_cr8(&svm->vcpu); /* instruction emulation calls kvm_set_cr8() */ r = cr_interception(svm); if (irqchip_in_kernel(svm->vcpu.kvm)) return r; if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) return r; kvm_run->exit_reason = KVM_EXIT_SET_TPR; return 0; } u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) { struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu)); return vmcb->control.tsc_offset + svm_scale_tsc(vcpu, host_tsc); } static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) { struct vcpu_svm *svm = to_svm(vcpu); switch (ecx) { case MSR_IA32_TSC: { *data = svm->vmcb->control.tsc_offset + svm_scale_tsc(vcpu, native_read_tsc()); break; } case MSR_STAR: *data = svm->vmcb->save.star; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: *data = svm->vmcb->save.lstar; break; case MSR_CSTAR: *data = svm->vmcb->save.cstar; break; case MSR_KERNEL_GS_BASE: *data = svm->vmcb->save.kernel_gs_base; break; case MSR_SYSCALL_MASK: *data = svm->vmcb->save.sfmask; break; #endif case MSR_IA32_SYSENTER_CS: *data = svm->vmcb->save.sysenter_cs; break; case MSR_IA32_SYSENTER_EIP: *data = svm->sysenter_eip; break; case MSR_IA32_SYSENTER_ESP: *data = svm->sysenter_esp; break; /* * Nobody will change the following 5 values in the VMCB so we can * safely return them on rdmsr. They will always be 0 until LBRV is * implemented. */ case MSR_IA32_DEBUGCTLMSR: *data = svm->vmcb->save.dbgctl; break; case MSR_IA32_LASTBRANCHFROMIP: *data = svm->vmcb->save.br_from; break; case MSR_IA32_LASTBRANCHTOIP: *data = svm->vmcb->save.br_to; break; case MSR_IA32_LASTINTFROMIP: *data = svm->vmcb->save.last_excp_from; break; case MSR_IA32_LASTINTTOIP: *data = svm->vmcb->save.last_excp_to; break; case MSR_VM_HSAVE_PA: *data = svm->nested.hsave_msr; break; case MSR_VM_CR: *data = svm->nested.vm_cr_msr; break; case MSR_IA32_UCODE_REV: *data = 0x01000065; break; default: return kvm_get_msr_common(vcpu, ecx, data); } return 0; } static int rdmsr_interception(struct vcpu_svm *svm) { u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; u64 data; if (svm_get_msr(&svm->vcpu, ecx, &data)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(&svm->vcpu, 0); } else { trace_kvm_msr_read(ecx, data); svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; skip_emulated_instruction(&svm->vcpu); } return 1; } static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) { struct vcpu_svm *svm = to_svm(vcpu); int svm_dis, chg_mask; if (data & ~SVM_VM_CR_VALID_MASK) return 1; chg_mask = SVM_VM_CR_VALID_MASK; if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); svm->nested.vm_cr_msr &= ~chg_mask; svm->nested.vm_cr_msr |= (data & chg_mask); svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; /* check for svm_disable while efer.svme is set */ if (svm_dis && (vcpu->arch.efer & EFER_SVME)) return 1; return 0; } static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct vcpu_svm *svm = to_svm(vcpu); u32 ecx = msr->index; u64 data = msr->data; switch (ecx) { case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; case MSR_STAR: svm->vmcb->save.star = data; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: svm->vmcb->save.lstar = data; break; case MSR_CSTAR: svm->vmcb->save.cstar = data; break; case MSR_KERNEL_GS_BASE: svm->vmcb->save.kernel_gs_base = data; break; case MSR_SYSCALL_MASK: svm->vmcb->save.sfmask = data; break; #endif case MSR_IA32_SYSENTER_CS: svm->vmcb->save.sysenter_cs = data; break; case MSR_IA32_SYSENTER_EIP: svm->sysenter_eip = data; svm->vmcb->save.sysenter_eip = data; break; case MSR_IA32_SYSENTER_ESP: svm->sysenter_esp = data; svm->vmcb->save.sysenter_esp = data; break; case MSR_IA32_DEBUGCTLMSR: if (!boot_cpu_has(X86_FEATURE_LBRV)) { vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", __func__, data); break; } if (data & DEBUGCTL_RESERVED_BITS) return 1; svm->vmcb->save.dbgctl = data; mark_dirty(svm->vmcb, VMCB_LBR); if (data & (1ULL<<0)) svm_enable_lbrv(svm); else svm_disable_lbrv(svm); break; case MSR_VM_HSAVE_PA: svm->nested.hsave_msr = data; break; case MSR_VM_CR: return svm_set_vm_cr(vcpu, data); case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; default: return kvm_set_msr_common(vcpu, msr); } return 0; } static int wrmsr_interception(struct vcpu_svm *svm) { struct msr_data msr; u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); msr.data = data; msr.index = ecx; msr.host_initiated = false; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; if (svm_set_msr(&svm->vcpu, &msr)) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(&svm->vcpu, 0); } else { trace_kvm_msr_write(ecx, data); skip_emulated_instruction(&svm->vcpu); } return 1; } static int msr_interception(struct vcpu_svm *svm) { if (svm->vmcb->control.exit_info_1) return wrmsr_interception(svm); else return rdmsr_interception(svm); } static int interrupt_window_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm_clear_vintr(svm); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; mark_dirty(svm->vmcb, VMCB_INTR); ++svm->vcpu.stat.irq_window_exits; /* * If the user space waits to inject interrupts, exit as soon as * possible */ if (!irqchip_in_kernel(svm->vcpu.kvm) && kvm_run->request_interrupt_window && !kvm_cpu_has_interrupt(&svm->vcpu)) { kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; return 0; } return 1; } static int pause_interception(struct vcpu_svm *svm) { kvm_vcpu_on_spin(&(svm->vcpu)); return 1; } static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_READ_CR0] = cr_interception, [SVM_EXIT_READ_CR3] = cr_interception, [SVM_EXIT_READ_CR4] = cr_interception, [SVM_EXIT_READ_CR8] = cr_interception, [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, [SVM_EXIT_WRITE_CR0] = cr_interception, [SVM_EXIT_WRITE_CR3] = cr_interception, [SVM_EXIT_WRITE_CR4] = cr_interception, [SVM_EXIT_WRITE_CR8] = cr8_write_interception, [SVM_EXIT_READ_DR0] = dr_interception, [SVM_EXIT_READ_DR1] = dr_interception, [SVM_EXIT_READ_DR2] = dr_interception, [SVM_EXIT_READ_DR3] = dr_interception, [SVM_EXIT_READ_DR4] = dr_interception, [SVM_EXIT_READ_DR5] = dr_interception, [SVM_EXIT_READ_DR6] = dr_interception, [SVM_EXIT_READ_DR7] = dr_interception, [SVM_EXIT_WRITE_DR0] = dr_interception, [SVM_EXIT_WRITE_DR1] = dr_interception, [SVM_EXIT_WRITE_DR2] = dr_interception, [SVM_EXIT_WRITE_DR3] = dr_interception, [SVM_EXIT_WRITE_DR4] = dr_interception, [SVM_EXIT_WRITE_DR5] = dr_interception, [SVM_EXIT_WRITE_DR6] = dr_interception, [SVM_EXIT_WRITE_DR7] = dr_interception, [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, [SVM_EXIT_INTR] = intr_interception, [SVM_EXIT_NMI] = nmi_interception, [SVM_EXIT_SMI] = nop_on_interception, [SVM_EXIT_INIT] = nop_on_interception, [SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_RDPMC] = rdpmc_interception, [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_PAUSE] = pause_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, [SVM_EXIT_INVLPGA] = invlpga_interception, [SVM_EXIT_IOIO] = io_interception, [SVM_EXIT_MSR] = msr_interception, [SVM_EXIT_TASK_SWITCH] = task_switch_interception, [SVM_EXIT_SHUTDOWN] = shutdown_interception, [SVM_EXIT_VMRUN] = vmrun_interception, [SVM_EXIT_VMMCALL] = vmmcall_interception, [SVM_EXIT_VMLOAD] = vmload_interception, [SVM_EXIT_VMSAVE] = vmsave_interception, [SVM_EXIT_STGI] = stgi_interception, [SVM_EXIT_CLGI] = clgi_interception, [SVM_EXIT_SKINIT] = skinit_interception, [SVM_EXIT_WBINVD] = emulate_on_interception, [SVM_EXIT_MONITOR] = invalid_op_interception, [SVM_EXIT_MWAIT] = invalid_op_interception, [SVM_EXIT_XSETBV] = xsetbv_interception, [SVM_EXIT_NPF] = pf_interception, }; static void dump_vmcb(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_save_area *save = &svm->vmcb->save; pr_err("VMCB Control Area:\n"); pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff); pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16); pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff); pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16); pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions); pr_err("%-20s%016llx\n", "intercepts:", control->intercept); pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); pr_err("%-20s%d\n", "asid:", control->asid); pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); pr_err("%-20s%08x\n", "int_vector:", control->int_vector); pr_err("%-20s%08x\n", "int_state:", control->int_state); pr_err("%-20s%08x\n", "exit_code:", control->exit_code); pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); pr_err("%-20s%08x\n", "event_inj:", control->event_inj); pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl); pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); pr_err("VMCB State Save Area:\n"); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "es:", save->es.selector, save->es.attrib, save->es.limit, save->es.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "cs:", save->cs.selector, save->cs.attrib, save->cs.limit, save->cs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "ss:", save->ss.selector, save->ss.attrib, save->ss.limit, save->ss.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "ds:", save->ds.selector, save->ds.attrib, save->ds.limit, save->ds.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "fs:", save->fs.selector, save->fs.attrib, save->fs.limit, save->fs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "gs:", save->gs.selector, save->gs.attrib, save->gs.limit, save->gs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "gdtr:", save->gdtr.selector, save->gdtr.attrib, save->gdtr.limit, save->gdtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "ldtr:", save->ldtr.selector, save->ldtr.attrib, save->ldtr.limit, save->ldtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "idtr:", save->idtr.selector, save->idtr.attrib, save->idtr.limit, save->idtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "tr:", save->tr.selector, save->tr.attrib, save->tr.limit, save->tr.base); pr_err("cpl: %d efer: %016llx\n", save->cpl, save->efer); pr_err("%-15s %016llx %-13s %016llx\n", "cr0:", save->cr0, "cr2:", save->cr2); pr_err("%-15s %016llx %-13s %016llx\n", "cr3:", save->cr3, "cr4:", save->cr4); pr_err("%-15s %016llx %-13s %016llx\n", "dr6:", save->dr6, "dr7:", save->dr7); pr_err("%-15s %016llx %-13s %016llx\n", "rip:", save->rip, "rflags:", save->rflags); pr_err("%-15s %016llx %-13s %016llx\n", "rsp:", save->rsp, "rax:", save->rax); pr_err("%-15s %016llx %-13s %016llx\n", "star:", save->star, "lstar:", save->lstar); pr_err("%-15s %016llx %-13s %016llx\n", "cstar:", save->cstar, "sfmask:", save->sfmask); pr_err("%-15s %016llx %-13s %016llx\n", "kernel_gs_base:", save->kernel_gs_base, "sysenter_cs:", save->sysenter_cs); pr_err("%-15s %016llx %-13s %016llx\n", "sysenter_esp:", save->sysenter_esp, "sysenter_eip:", save->sysenter_eip); pr_err("%-15s %016llx %-13s %016llx\n", "gpat:", save->g_pat, "dbgctl:", save->dbgctl); pr_err("%-15s %016llx %-13s %016llx\n", "br_from:", save->br_from, "br_to:", save->br_to); pr_err("%-15s %016llx %-13s %016llx\n", "excp_from:", save->last_excp_from, "excp_to:", save->last_excp_to); } static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; *info1 = control->exit_info_1; *info2 = control->exit_info_2; } static int handle_exit(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_run *kvm_run = vcpu->run; u32 exit_code = svm->vmcb->control.exit_code; if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) vcpu->arch.cr0 = svm->vmcb->save.cr0; if (npt_enabled) vcpu->arch.cr3 = svm->vmcb->save.cr3; if (unlikely(svm->nested.exit_required)) { nested_svm_vmexit(svm); svm->nested.exit_required = false; return 1; } if (is_guest_mode(vcpu)) { int vmexit; trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, svm->vmcb->control.exit_info_1, svm->vmcb->control.exit_info_2, svm->vmcb->control.exit_int_info, svm->vmcb->control.exit_int_info_err, KVM_ISA_SVM); vmexit = nested_svm_exit_special(svm); if (vmexit == NESTED_EXIT_CONTINUE) vmexit = nested_svm_exit_handled(svm); if (vmexit == NESTED_EXIT_DONE) return 1; } svm_complete_interrupts(svm); if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->fail_entry.hardware_entry_failure_reason = svm->vmcb->control.exit_code; pr_err("KVM: FAILED VMRUN WITH VMCB:\n"); dump_vmcb(vcpu); return 0; } if (is_external_interrupt(svm->vmcb->control.exit_int_info) && exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " "exit_code 0x%x\n", __func__, svm->vmcb->control.exit_int_info, exit_code); if (exit_code >= ARRAY_SIZE(svm_exit_handlers) || !svm_exit_handlers[exit_code]) { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = exit_code; return 0; } return svm_exit_handlers[exit_code](svm); } static void reload_tss(struct kvm_vcpu *vcpu) { int cpu = raw_smp_processor_id(); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); sd->tss_desc->type = 9; /* available 32/64-bit TSS */ load_TR_desc(); } static void pre_svm_run(struct vcpu_svm *svm) { int cpu = raw_smp_processor_id(); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); /* FIXME: handle wraparound of asid_generation */ if (svm->asid_generation != sd->asid_generation) new_asid(svm, sd); } static void svm_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; vcpu->arch.hflags |= HF_NMI_MASK; set_intercept(svm, INTERCEPT_IRET); ++vcpu->stat.nmi_injections; } static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) { struct vmcb_control_area *control; control = &svm->vmcb->control; control->int_vector = irq; control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl |= V_IRQ_MASK | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); mark_dirty(svm->vmcb, VMCB_INTR); } static void svm_set_irq(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); BUG_ON(!(gif_set(svm))); trace_kvm_inj_virq(vcpu->arch.interrupt.nr); ++vcpu->stat.irq_injections; svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; } static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vcpu_svm *svm = to_svm(vcpu); if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); if (irr == -1) return; if (tpr >= irr) set_cr_intercept(svm, INTERCEPT_CR8_WRITE); } static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) { return; } static int svm_vm_has_apicv(struct kvm *kvm) { return 0; } static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) { return; } static void svm_hwapic_isr_update(struct kvm *kvm, int isr) { return; } static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) { return; } static int svm_nmi_allowed(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; int ret; ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && !(svm->vcpu.arch.hflags & HF_NMI_MASK); ret = ret && gif_set(svm) && nested_svm_nmi(svm); return ret; } static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); } static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_svm *svm = to_svm(vcpu); if (masked) { svm->vcpu.arch.hflags |= HF_NMI_MASK; set_intercept(svm, INTERCEPT_IRET); } else { svm->vcpu.arch.hflags &= ~HF_NMI_MASK; clr_intercept(svm, INTERCEPT_IRET); } } static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; int ret; if (!gif_set(svm) || (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)) return 0; ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF); if (is_guest_mode(vcpu)) return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); return ret; } static int enable_irq_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); /* * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes * 1, because that's a separate STGI/VMRUN intercept. The next time we * get that intercept, this function will be called again though and * we'll get the vintr intercept. */ if (gif_set(svm) && nested_svm_intr(svm)) { svm_set_vintr(svm); svm_inject_irq(svm, 0x0); } return 0; } static int enable_nmi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK) return 0; /* IRET will cause a vm exit */ /* * Something prevents NMI from been injected. Single step over possible * problem (IRET or exception injection or interrupt shadow) */ svm->nmi_singlestep = true; svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); update_db_bp_intercept(vcpu); return 0; } static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) { return 0; } static void svm_flush_tlb(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; else svm->asid_generation--; } static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { } static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) { int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; kvm_set_cr8(vcpu, cr8); } } static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); u64 cr8; if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; cr8 = kvm_get_cr8(vcpu); svm->vmcb->control.int_ctl &= ~V_TPR_MASK; svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; } static void svm_complete_interrupts(struct vcpu_svm *svm) { u8 vector; int type; u32 exitintinfo = svm->vmcb->control.exit_int_info; unsigned int3_injected = svm->int3_injected; svm->int3_injected = 0; /* * If we've made progress since setting HF_IRET_MASK, we've * executed an IRET and can allow NMI injection. */ if ((svm->vcpu.arch.hflags & HF_IRET_MASK) && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); } svm->vcpu.arch.nmi_injected = false; kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); if (!(exitintinfo & SVM_EXITINTINFO_VALID)) return; kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; switch (type) { case SVM_EXITINTINFO_TYPE_NMI: svm->vcpu.arch.nmi_injected = true; break; case SVM_EXITINTINFO_TYPE_EXEPT: /* * In case of software exceptions, do not reinject the vector, * but re-execute the instruction instead. Rewind RIP first * if we emulated INT3 before. */ if (kvm_exception_is_soft(vector)) { if (vector == BP_VECTOR && int3_injected && kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) - int3_injected); break; } if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { u32 err = svm->vmcb->control.exit_int_info_err; kvm_requeue_exception_e(&svm->vcpu, vector, err); } else kvm_requeue_exception(&svm->vcpu, vector); break; case SVM_EXITINTINFO_TYPE_INTR: kvm_queue_interrupt(&svm->vcpu, vector, false); break; default: break; } } static void svm_cancel_injection(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; control->exit_int_info = control->event_inj; control->exit_int_info_err = control->event_inj_err; control->event_inj = 0; svm_complete_interrupts(svm); } static void svm_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; /* * A vmexit emulation is required before the vcpu can be executed * again. */ if (unlikely(svm->nested.exit_required)) return; pre_svm_run(svm); sync_lapic_to_cr8(vcpu); svm->vmcb->save.cr2 = vcpu->arch.cr2; clgi(); local_irq_enable(); asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t" "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t" "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t" "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t" "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t" #ifdef CONFIG_X86_64 "mov %c[r8](%[svm]), %%r8 \n\t" "mov %c[r9](%[svm]), %%r9 \n\t" "mov %c[r10](%[svm]), %%r10 \n\t" "mov %c[r11](%[svm]), %%r11 \n\t" "mov %c[r12](%[svm]), %%r12 \n\t" "mov %c[r13](%[svm]), %%r13 \n\t" "mov %c[r14](%[svm]), %%r14 \n\t" "mov %c[r15](%[svm]), %%r15 \n\t" #endif /* Enter guest mode */ "push %%" _ASM_AX " \n\t" "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t" __ex(SVM_VMLOAD) "\n\t" __ex(SVM_VMRUN) "\n\t" __ex(SVM_VMSAVE) "\n\t" "pop %%" _ASM_AX " \n\t" /* Save guest registers, load host registers */ "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t" "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t" "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t" "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t" "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t" "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t" #ifdef CONFIG_X86_64 "mov %%r8, %c[r8](%[svm]) \n\t" "mov %%r9, %c[r9](%[svm]) \n\t" "mov %%r10, %c[r10](%[svm]) \n\t" "mov %%r11, %c[r11](%[svm]) \n\t" "mov %%r12, %c[r12](%[svm]) \n\t" "mov %%r13, %c[r13](%[svm]) \n\t" "mov %%r14, %c[r14](%[svm]) \n\t" "mov %%r15, %c[r15](%[svm]) \n\t" #endif "pop %%" _ASM_BP : : [svm]"a"(svm), [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])), [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])), [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])), [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP])) #ifdef CONFIG_X86_64 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])), [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])), [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])), [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])), [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])), [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])), [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])), [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15])) #endif : "cc", "memory" #ifdef CONFIG_X86_64 , "rbx", "rcx", "rdx", "rsi", "rdi" , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15" #else , "ebx", "ecx", "edx", "esi", "edi" #endif ); #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else loadsegment(fs, svm->host.fs); #ifndef CONFIG_X86_32_LAZY_GS loadsegment(gs, svm->host.gs); #endif #endif reload_tss(vcpu); local_irq_disable(); vcpu->arch.cr2 = svm->vmcb->save.cr2; vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM); if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_before_handle_nmi(&svm->vcpu); stgi(); /* Any pending NMI will happen here */ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_after_handle_nmi(&svm->vcpu); sync_cr8_to_lapic(vcpu); svm->next_rip = 0; svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; /* if exit due to PF check for async PF */ if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) svm->apf_reason = kvm_read_and_reset_pf_reason(); if (npt_enabled) { vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); } /* * We need to handle MC intercepts here before the vcpu has a chance to * change the physical cpu */ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + MC_VECTOR)) svm_handle_mce(svm); mark_all_clean(svm->vmcb); } static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.cr3 = root; mark_dirty(svm->vmcb, VMCB_CR); svm_flush_tlb(vcpu); } static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.nested_cr3 = root; mark_dirty(svm->vmcb, VMCB_NPT); /* Also sync guest cr3 here in case we live migrate */ svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); mark_dirty(svm->vmcb, VMCB_CR); svm_flush_tlb(vcpu); } static int is_disabled(void) { u64 vm_cr; rdmsrl(MSR_VM_CR, vm_cr); if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) return 1; return 0; } static void svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) { /* * Patch in the VMMCALL instruction: */ hypercall[0] = 0x0f; hypercall[1] = 0x01; hypercall[2] = 0xd9; } static void svm_check_processor_compat(void *rtn) { *(int *)rtn = 0; } static bool svm_cpu_has_accelerated_tpr(void) { return false; } static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { return 0; } static void svm_cpuid_update(struct kvm_vcpu *vcpu) { } static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { switch (func) { case 0x80000001: if (nested) entry->ecx |= (1 << 2); /* Set SVM bit */ break; case 0x8000000A: entry->eax = 1; /* SVM revision 1 */ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper ASID emulation to nested SVM */ entry->ecx = 0; /* Reserved */ entry->edx = 0; /* Per default do not support any additional features */ /* Support next_rip if host supports it */ if (boot_cpu_has(X86_FEATURE_NRIPS)) entry->edx |= SVM_FEATURE_NRIP; /* Support NPT for the guest if enabled */ if (npt_enabled) entry->edx |= SVM_FEATURE_NPT; break; } } static int svm_get_lpage_level(void) { return PT_PDPE_LEVEL; } static bool svm_rdtscp_supported(void) { return false; } static bool svm_invpcid_supported(void) { return false; } static bool svm_has_wbinvd_exit(void) { return true; } static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); set_exception_intercept(svm, NM_VECTOR); update_cr0_intercept(svm); } #define PRE_EX(exit) { .exit_code = (exit), \ .stage = X86_ICPT_PRE_EXCEPT, } #define POST_EX(exit) { .exit_code = (exit), \ .stage = X86_ICPT_POST_EXCEPT, } #define POST_MEM(exit) { .exit_code = (exit), \ .stage = X86_ICPT_POST_MEMACCESS, } static const struct __x86_intercept { u32 exit_code; enum x86_intercept_stage stage; } x86_intercept_map[] = { [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), }; #undef PRE_EX #undef POST_EX #undef POST_MEM static int svm_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) { struct vcpu_svm *svm = to_svm(vcpu); int vmexit, ret = X86EMUL_CONTINUE; struct __x86_intercept icpt_info; struct vmcb *vmcb = svm->vmcb; if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) goto out; icpt_info = x86_intercept_map[info->intercept]; if (stage != icpt_info.stage) goto out; switch (icpt_info.exit_code) { case SVM_EXIT_READ_CR0: if (info->intercept == x86_intercept_cr_read) icpt_info.exit_code += info->modrm_reg; break; case SVM_EXIT_WRITE_CR0: { unsigned long cr0, val; u64 intercept; if (info->intercept == x86_intercept_cr_write) icpt_info.exit_code += info->modrm_reg; if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0) break; intercept = svm->nested.intercept; if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))) break; cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; if (info->intercept == x86_intercept_lmsw) { cr0 &= 0xfUL; val &= 0xfUL; /* lmsw can't clear PE - catch this here */ if (cr0 & X86_CR0_PE) val |= X86_CR0_PE; } if (cr0 ^ val) icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; break; } case SVM_EXIT_READ_DR0: case SVM_EXIT_WRITE_DR0: icpt_info.exit_code += info->modrm_reg; break; case SVM_EXIT_MSR: if (info->intercept == x86_intercept_wrmsr) vmcb->control.exit_info_1 = 1; else vmcb->control.exit_info_1 = 0; break; case SVM_EXIT_PAUSE: /* * We get this for NOP only, but pause * is rep not, check this here */ if (info->rep_prefix != REPE_PREFIX) goto out; case SVM_EXIT_IOIO: { u64 exit_info; u32 bytes; exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16; if (info->intercept == x86_intercept_in || info->intercept == x86_intercept_ins) { exit_info |= SVM_IOIO_TYPE_MASK; bytes = info->src_bytes; } else { bytes = info->dst_bytes; } if (info->intercept == x86_intercept_outs || info->intercept == x86_intercept_ins) exit_info |= SVM_IOIO_STR_MASK; if (info->rep_prefix) exit_info |= SVM_IOIO_REP_MASK; bytes = min(bytes, 4u); exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); vmcb->control.exit_info_1 = exit_info; vmcb->control.exit_info_2 = info->next_rip; break; } default: break; } vmcb->control.next_rip = info->next_rip; vmcb->control.exit_code = icpt_info.exit_code; vmexit = nested_svm_exit_handled(svm); ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED : X86EMUL_CONTINUE; out: return ret; } static void svm_handle_external_intr(struct kvm_vcpu *vcpu) { local_irq_enable(); } static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, .hardware_setup = svm_hardware_setup, .hardware_unsetup = svm_hardware_unsetup, .check_processor_compatibility = svm_check_processor_compat, .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, .vcpu_create = svm_create_vcpu, .vcpu_free = svm_free_vcpu, .vcpu_reset = svm_vcpu_reset, .prepare_guest_switch = svm_prepare_guest_switch, .vcpu_load = svm_vcpu_load, .vcpu_put = svm_vcpu_put, .update_db_bp_intercept = update_db_bp_intercept, .get_msr = svm_get_msr, .set_msr = svm_set_msr, .get_segment_base = svm_get_segment_base, .get_segment = svm_get_segment, .set_segment = svm_set_segment, .get_cpl = svm_get_cpl, .get_cs_db_l_bits = kvm_get_cs_db_l_bits, .decache_cr0_guest_bits = svm_decache_cr0_guest_bits, .decache_cr3 = svm_decache_cr3, .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, .set_cr0 = svm_set_cr0, .set_cr3 = svm_set_cr3, .set_cr4 = svm_set_cr4, .set_efer = svm_set_efer, .get_idt = svm_get_idt, .set_idt = svm_set_idt, .get_gdt = svm_get_gdt, .set_gdt = svm_set_gdt, .set_dr7 = svm_set_dr7, .cache_reg = svm_cache_reg, .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, .fpu_activate = svm_fpu_activate, .fpu_deactivate = svm_fpu_deactivate, .tlb_flush = svm_flush_tlb, .run = svm_vcpu_run, .handle_exit = handle_exit, .skip_emulated_instruction = skip_emulated_instruction, .set_interrupt_shadow = svm_set_interrupt_shadow, .get_interrupt_shadow = svm_get_interrupt_shadow, .patch_hypercall = svm_patch_hypercall, .set_irq = svm_set_irq, .set_nmi = svm_inject_nmi, .queue_exception = svm_queue_exception, .cancel_injection = svm_cancel_injection, .interrupt_allowed = svm_interrupt_allowed, .nmi_allowed = svm_nmi_allowed, .get_nmi_mask = svm_get_nmi_mask, .set_nmi_mask = svm_set_nmi_mask, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, .vm_has_apicv = svm_vm_has_apicv, .load_eoi_exitmap = svm_load_eoi_exitmap, .hwapic_isr_update = svm_hwapic_isr_update, .sync_pir_to_irr = svm_sync_pir_to_irr, .set_tss_addr = svm_set_tss_addr, .get_tdp_level = get_npt_level, .get_mt_mask = svm_get_mt_mask, .get_exit_info = svm_get_exit_info, .get_lpage_level = svm_get_lpage_level, .cpuid_update = svm_cpuid_update, .rdtscp_supported = svm_rdtscp_supported, .invpcid_supported = svm_invpcid_supported, .set_supported_cpuid = svm_set_supported_cpuid, .has_wbinvd_exit = svm_has_wbinvd_exit, .set_tsc_khz = svm_set_tsc_khz, .read_tsc_offset = svm_read_tsc_offset, .write_tsc_offset = svm_write_tsc_offset, .adjust_tsc_offset = svm_adjust_tsc_offset, .compute_tsc_offset = svm_compute_tsc_offset, .read_l1_tsc = svm_read_l1_tsc, .set_tdp_cr3 = set_tdp_cr3, .check_intercept = svm_check_intercept, .handle_external_intr = svm_handle_external_intr, }; static int __init svm_init(void) { return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm), THIS_MODULE); } static void __exit svm_exit(void) { kvm_exit(); } module_init(svm_init) module_exit(svm_exit)
gpl-2.0
tjcgj/linux-xlnx-soc
arch/arm/mach-omap2/clockdomains7xx_data.c
1298
23060
/* * DRA7xx Clock domains framework * * Copyright (C) 2009-2013 Texas Instruments, Inc. * Copyright (C) 2009-2011 Nokia Corporation * * Generated by code originally written by: * Abhijit Pagare (abhijitpagare@ti.com) * Benoit Cousson (b-cousson@ti.com) * Paul Walmsley (paul@pwsan.com) * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public linux-omap@vger.kernel.org mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "cm1_7xx.h" #include "cm2_7xx.h" #include "cm-regbits-7xx.h" #include "prm7xx.h" #include "prcm44xx.h" #include "prcm_mpu7xx.h" /* Static Dependencies for DRA7xx Clock Domains */ static struct clkdm_dep cam_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { NULL }, }; static struct clkdm_dep dma_wkup_sleep_deps[] = { { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep dsp1_wkup_sleep_deps[] = { { .clkdm_name = "atl_clkdm" }, { .clkdm_name = "cam_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep dsp2_wkup_sleep_deps[] = { { .clkdm_name = "atl_clkdm" }, { .clkdm_name = "cam_clkdm" }, { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep dss_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep eve1_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep eve2_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep eve3_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep eve4_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep gmac_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { NULL }, }; static struct clkdm_dep gpu_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { NULL }, }; static struct clkdm_dep ipu1_wkup_sleep_deps[] = { { .clkdm_name = "atl_clkdm" }, { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep ipu2_wkup_sleep_deps[] = { { .clkdm_name = "atl_clkdm" }, { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep iva_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { NULL }, }; static struct clkdm_dep l3init_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep l4per2_wkup_sleep_deps[] = { { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { NULL }, }; static struct clkdm_dep l4sec_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { NULL }, }; static struct clkdm_dep mpu_wkup_sleep_deps[] = { { .clkdm_name = "cam_clkdm" }, { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "ipu2_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "pcie_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep pcie_wkup_sleep_deps[] = { { .clkdm_name = "atl_clkdm" }, { .clkdm_name = "cam_clkdm" }, { .clkdm_name = "dsp1_clkdm" }, { .clkdm_name = "dsp2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "eve1_clkdm" }, { .clkdm_name = "eve2_clkdm" }, { .clkdm_name = "eve3_clkdm" }, { .clkdm_name = "eve4_clkdm" }, { .clkdm_name = "gmac_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "ipu1_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4per2_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "vpe_clkdm" }, { NULL }, }; static struct clkdm_dep vpe_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "l4per3_clkdm" }, { NULL }, }; static struct clockdomain l4per3_7xx_clkdm = { .name = "l4per3_clkdm", .pwrdm = { .name = "l4per_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L4PER_INST, .clkdm_offs = DRA7XX_CM_CORE_L4PER_L4PER3_CDOFFS, .dep_bit = DRA7XX_L4PER3_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4per2_7xx_clkdm = { .name = "l4per2_clkdm", .pwrdm = { .name = "l4per_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L4PER_INST, .clkdm_offs = DRA7XX_CM_CORE_L4PER_L4PER2_CDOFFS, .dep_bit = DRA7XX_L4PER2_STATDEP_SHIFT, .wkdep_srcs = l4per2_wkup_sleep_deps, .sleepdep_srcs = l4per2_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain mpu0_7xx_clkdm = { .name = "mpu0_clkdm", .pwrdm = { .name = "cpu0_pwrdm" }, .prcm_partition = DRA7XX_MPU_PRCM_PARTITION, .cm_inst = DRA7XX_MPU_PRCM_CM_C0_INST, .clkdm_offs = DRA7XX_MPU_PRCM_CM_C0_CPU0_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain iva_7xx_clkdm = { .name = "iva_clkdm", .pwrdm = { .name = "iva_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_IVA_INST, .clkdm_offs = DRA7XX_CM_CORE_IVA_IVA_CDOFFS, .dep_bit = DRA7XX_IVA_STATDEP_SHIFT, .wkdep_srcs = iva_wkup_sleep_deps, .sleepdep_srcs = iva_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain coreaon_7xx_clkdm = { .name = "coreaon_clkdm", .pwrdm = { .name = "coreaon_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_COREAON_INST, .clkdm_offs = DRA7XX_CM_CORE_COREAON_COREAON_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain ipu1_7xx_clkdm = { .name = "ipu1_clkdm", .pwrdm = { .name = "ipu_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU1_CDOFFS, .dep_bit = DRA7XX_IPU1_STATDEP_SHIFT, .wkdep_srcs = ipu1_wkup_sleep_deps, .sleepdep_srcs = ipu1_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain ipu2_7xx_clkdm = { .name = "ipu2_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_IPU2_CDOFFS, .dep_bit = DRA7XX_IPU2_STATDEP_SHIFT, .wkdep_srcs = ipu2_wkup_sleep_deps, .sleepdep_srcs = ipu2_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l3init_7xx_clkdm = { .name = "l3init_clkdm", .pwrdm = { .name = "l3init_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L3INIT_INST, .clkdm_offs = DRA7XX_CM_CORE_L3INIT_L3INIT_CDOFFS, .dep_bit = DRA7XX_L3INIT_STATDEP_SHIFT, .wkdep_srcs = l3init_wkup_sleep_deps, .sleepdep_srcs = l3init_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4sec_7xx_clkdm = { .name = "l4sec_clkdm", .pwrdm = { .name = "l4per_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L4PER_INST, .clkdm_offs = DRA7XX_CM_CORE_L4PER_L4SEC_CDOFFS, .dep_bit = DRA7XX_L4SEC_STATDEP_SHIFT, .wkdep_srcs = l4sec_wkup_sleep_deps, .sleepdep_srcs = l4sec_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l3main1_7xx_clkdm = { .name = "l3main1_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_L3MAIN1_CDOFFS, .dep_bit = DRA7XX_L3MAIN1_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain vpe_7xx_clkdm = { .name = "vpe_clkdm", .pwrdm = { .name = "vpe_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_VPE_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_VPE_VPE_CDOFFS, .dep_bit = DRA7XX_VPE_STATDEP_SHIFT, .wkdep_srcs = vpe_wkup_sleep_deps, .sleepdep_srcs = vpe_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain mpu_7xx_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_MPU_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_MPU_MPU_CDOFFS, .wkdep_srcs = mpu_wkup_sleep_deps, .sleepdep_srcs = mpu_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain custefuse_7xx_clkdm = { .name = "custefuse_clkdm", .pwrdm = { .name = "custefuse_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CUSTEFUSE_INST, .clkdm_offs = DRA7XX_CM_CORE_CUSTEFUSE_CUSTEFUSE_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain ipu_7xx_clkdm = { .name = "ipu_clkdm", .pwrdm = { .name = "ipu_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS, .dep_bit = DRA7XX_IPU_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain mpu1_7xx_clkdm = { .name = "mpu1_clkdm", .pwrdm = { .name = "cpu1_pwrdm" }, .prcm_partition = DRA7XX_MPU_PRCM_PARTITION, .cm_inst = DRA7XX_MPU_PRCM_CM_C1_INST, .clkdm_offs = DRA7XX_MPU_PRCM_CM_C1_CPU1_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain gmac_7xx_clkdm = { .name = "gmac_clkdm", .pwrdm = { .name = "l3init_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L3INIT_INST, .clkdm_offs = DRA7XX_CM_CORE_L3INIT_GMAC_CDOFFS, .dep_bit = DRA7XX_GMAC_STATDEP_SHIFT, .wkdep_srcs = gmac_wkup_sleep_deps, .sleepdep_srcs = gmac_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4cfg_7xx_clkdm = { .name = "l4cfg_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_L4CFG_CDOFFS, .dep_bit = DRA7XX_L4CFG_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain dma_7xx_clkdm = { .name = "dma_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_DMA_CDOFFS, .wkdep_srcs = dma_wkup_sleep_deps, .sleepdep_srcs = dma_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain rtc_7xx_clkdm = { .name = "rtc_clkdm", .pwrdm = { .name = "rtc_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_RTC_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_RTC_RTC_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain pcie_7xx_clkdm = { .name = "pcie_clkdm", .pwrdm = { .name = "l3init_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L3INIT_INST, .clkdm_offs = DRA7XX_CM_CORE_L3INIT_PCIE_CDOFFS, .dep_bit = DRA7XX_PCIE_STATDEP_SHIFT, .wkdep_srcs = pcie_wkup_sleep_deps, .sleepdep_srcs = pcie_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain atl_7xx_clkdm = { .name = "atl_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_ATL_CDOFFS, .dep_bit = DRA7XX_ATL_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l3instr_7xx_clkdm = { .name = "l3instr_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_L3INSTR_CDOFFS, }; static struct clockdomain dss_7xx_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "dss_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_DSS_INST, .clkdm_offs = DRA7XX_CM_CORE_DSS_DSS_CDOFFS, .dep_bit = DRA7XX_DSS_STATDEP_SHIFT, .wkdep_srcs = dss_wkup_sleep_deps, .sleepdep_srcs = dss_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain emif_7xx_clkdm = { .name = "emif_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CORE_INST, .clkdm_offs = DRA7XX_CM_CORE_CORE_EMIF_CDOFFS, .dep_bit = DRA7XX_EMIF_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain emu_7xx_clkdm = { .name = "emu_clkdm", .pwrdm = { .name = "emu_pwrdm" }, .prcm_partition = DRA7XX_PRM_PARTITION, .cm_inst = DRA7XX_PRM_EMU_CM_INST, .clkdm_offs = DRA7XX_PRM_EMU_CM_EMU_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain dsp2_7xx_clkdm = { .name = "dsp2_clkdm", .pwrdm = { .name = "dsp2_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_DSP2_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_DSP2_DSP2_CDOFFS, .dep_bit = DRA7XX_DSP2_STATDEP_SHIFT, .wkdep_srcs = dsp2_wkup_sleep_deps, .sleepdep_srcs = dsp2_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain dsp1_7xx_clkdm = { .name = "dsp1_clkdm", .pwrdm = { .name = "dsp1_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_DSP1_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_DSP1_DSP1_CDOFFS, .dep_bit = DRA7XX_DSP1_STATDEP_SHIFT, .wkdep_srcs = dsp1_wkup_sleep_deps, .sleepdep_srcs = dsp1_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain cam_7xx_clkdm = { .name = "cam_clkdm", .pwrdm = { .name = "cam_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_CAM_INST, .clkdm_offs = DRA7XX_CM_CORE_CAM_CAM_CDOFFS, .dep_bit = DRA7XX_CAM_STATDEP_SHIFT, .wkdep_srcs = cam_wkup_sleep_deps, .sleepdep_srcs = cam_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4per_7xx_clkdm = { .name = "l4per_clkdm", .pwrdm = { .name = "l4per_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_L4PER_INST, .clkdm_offs = DRA7XX_CM_CORE_L4PER_L4PER_CDOFFS, .dep_bit = DRA7XX_L4PER_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain gpu_7xx_clkdm = { .name = "gpu_clkdm", .pwrdm = { .name = "gpu_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_PARTITION, .cm_inst = DRA7XX_CM_CORE_GPU_INST, .clkdm_offs = DRA7XX_CM_CORE_GPU_GPU_CDOFFS, .dep_bit = DRA7XX_GPU_STATDEP_SHIFT, .wkdep_srcs = gpu_wkup_sleep_deps, .sleepdep_srcs = gpu_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain eve4_7xx_clkdm = { .name = "eve4_clkdm", .pwrdm = { .name = "eve4_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_EVE4_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_EVE4_EVE4_CDOFFS, .dep_bit = DRA7XX_EVE4_STATDEP_SHIFT, .wkdep_srcs = eve4_wkup_sleep_deps, .sleepdep_srcs = eve4_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain eve2_7xx_clkdm = { .name = "eve2_clkdm", .pwrdm = { .name = "eve2_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_EVE2_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_EVE2_EVE2_CDOFFS, .dep_bit = DRA7XX_EVE2_STATDEP_SHIFT, .wkdep_srcs = eve2_wkup_sleep_deps, .sleepdep_srcs = eve2_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain eve3_7xx_clkdm = { .name = "eve3_clkdm", .pwrdm = { .name = "eve3_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_EVE3_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_EVE3_EVE3_CDOFFS, .dep_bit = DRA7XX_EVE3_STATDEP_SHIFT, .wkdep_srcs = eve3_wkup_sleep_deps, .sleepdep_srcs = eve3_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain wkupaon_7xx_clkdm = { .name = "wkupaon_clkdm", .pwrdm = { .name = "wkupaon_pwrdm" }, .prcm_partition = DRA7XX_PRM_PARTITION, .cm_inst = DRA7XX_PRM_WKUPAON_CM_INST, .clkdm_offs = DRA7XX_PRM_WKUPAON_CM_WKUPAON_CDOFFS, .dep_bit = DRA7XX_WKUPAON_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain eve1_7xx_clkdm = { .name = "eve1_clkdm", .pwrdm = { .name = "eve1_pwrdm" }, .prcm_partition = DRA7XX_CM_CORE_AON_PARTITION, .cm_inst = DRA7XX_CM_CORE_AON_EVE1_INST, .clkdm_offs = DRA7XX_CM_CORE_AON_EVE1_EVE1_CDOFFS, .dep_bit = DRA7XX_EVE1_STATDEP_SHIFT, .wkdep_srcs = eve1_wkup_sleep_deps, .sleepdep_srcs = eve1_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; /* As clockdomains are added or removed above, this list must also be changed */ static struct clockdomain *clockdomains_dra7xx[] __initdata = { &l4per3_7xx_clkdm, &l4per2_7xx_clkdm, &mpu0_7xx_clkdm, &iva_7xx_clkdm, &coreaon_7xx_clkdm, &ipu1_7xx_clkdm, &ipu2_7xx_clkdm, &l3init_7xx_clkdm, &l4sec_7xx_clkdm, &l3main1_7xx_clkdm, &vpe_7xx_clkdm, &mpu_7xx_clkdm, &custefuse_7xx_clkdm, &ipu_7xx_clkdm, &mpu1_7xx_clkdm, &gmac_7xx_clkdm, &l4cfg_7xx_clkdm, &dma_7xx_clkdm, &rtc_7xx_clkdm, &pcie_7xx_clkdm, &atl_7xx_clkdm, &l3instr_7xx_clkdm, &dss_7xx_clkdm, &emif_7xx_clkdm, &emu_7xx_clkdm, &dsp2_7xx_clkdm, &dsp1_7xx_clkdm, &cam_7xx_clkdm, &l4per_7xx_clkdm, &gpu_7xx_clkdm, &eve4_7xx_clkdm, &eve2_7xx_clkdm, &eve3_7xx_clkdm, &wkupaon_7xx_clkdm, &eve1_7xx_clkdm, NULL }; void __init dra7xx_clockdomains_init(void) { clkdm_register_platform_funcs(&omap4_clkdm_operations); clkdm_register_clkdms(clockdomains_dra7xx); clkdm_complete_init(); }
gpl-2.0
Shaaan/android_kernel_samsung_u8500-common
mm/kmemleak-test.c
1554
3402
/* * mm/kmemleak-test.c * * Copyright (C) 2008 ARM Limited * Written by Catalin Marinas <catalin.marinas@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/list.h> #include <linux/percpu.h> #include <linux/fdtable.h> #include <linux/kmemleak.h> struct test_node { long header[25]; struct list_head list; long footer[25]; }; static LIST_HEAD(test_list); static DEFINE_PER_CPU(void *, kmemleak_test_pointer); /* * Some very simple testing. This function needs to be extended for * proper testing. */ static int __init kmemleak_test_init(void) { struct test_node *elem; int i; printk(KERN_INFO "Kmemleak testing\n"); /* make some orphan objects */ pr_info("kmemleak: kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); pr_info("kmemleak: kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); pr_info("kmemleak: kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); pr_info("kmemleak: kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); pr_info("kmemleak: kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); pr_info("kmemleak: kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); pr_info("kmemleak: kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); pr_info("kmemleak: kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); #ifndef CONFIG_MODULES pr_info("kmemleak: kmem_cache_alloc(files_cachep) = %p\n", kmem_cache_alloc(files_cachep, GFP_KERNEL)); pr_info("kmemleak: kmem_cache_alloc(files_cachep) = %p\n", kmem_cache_alloc(files_cachep, GFP_KERNEL)); #endif pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); /* * Add elements to a list. They should only appear as orphan * after the module is removed. */ for (i = 0; i < 10; i++) { elem = kmalloc(sizeof(*elem), GFP_KERNEL); pr_info("kmemleak: kmalloc(sizeof(*elem)) = %p\n", elem); if (!elem) return -ENOMEM; memset(elem, 0, sizeof(*elem)); INIT_LIST_HEAD(&elem->list); list_add_tail(&elem->list, &test_list); } for_each_possible_cpu(i) { per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL); pr_info("kmemleak: kmalloc(129) = %p\n", per_cpu(kmemleak_test_pointer, i)); } return 0; } module_init(kmemleak_test_init); static void __exit kmemleak_test_exit(void) { struct test_node *elem, *tmp; /* * Remove the list elements without actually freeing the * memory. */ list_for_each_entry_safe(elem, tmp, &test_list, list) list_del(&elem->list); } module_exit(kmemleak_test_exit); MODULE_LICENSE("GPL");
gpl-2.0
KyLinOS/android_kernel_motorola_omap4-common
arch/mips/fw/arc/identify.c
2834
2420
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * identify.c: identify machine by looking up system identifier * * Copyright (C) 1998 Thomas Bogendoerfer * * This code is based on arch/mips/sgi/kernel/system.c, which is * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <asm/sgialib.h> #include <asm/bootinfo.h> struct smatch { char *arcname; char *liname; int flags; }; static struct smatch mach_table[] = { { .arcname = "SGI-IP22", .liname = "SGI Indy", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP27", .liname = "SGI Origin", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP28", .liname = "SGI IP28", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP30", .liname = "SGI Octane", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP32", .liname = "SGI O2", .flags = PROM_FLAG_ARCS, }, { .arcname = "Microsoft-Jazz", .liname = "Jazz MIPS_Magnum_4000", .flags = 0, }, { .arcname = "PICA-61", .liname = "Jazz Acer_PICA_61", .flags = 0, }, { .arcname = "RM200PCI", .liname = "SNI RM200_PCI", .flags = PROM_FLAG_DONT_FREE_TEMP, }, { .arcname = "RM200PCI-R5K", .liname = "SNI RM200_PCI-R5K", .flags = PROM_FLAG_DONT_FREE_TEMP, } }; int prom_flags; static struct smatch * __init string_to_mach(const char *s) { int i; for (i = 0; i < ARRAY_SIZE(mach_table); i++) { if (!strcmp(s, mach_table[i].arcname)) return &mach_table[i]; } panic("Yeee, could not determine architecture type <%s>", s); } char *system_type; const char *get_system_type(void) { return system_type; } void __init prom_identify_arch(void) { pcomponent *p; struct smatch *mach; const char *iname; /* * The root component tells us what machine architecture we have here. */ p = ArcGetChild(PROM_NULL_COMPONENT); if (p == NULL) { #ifdef CONFIG_SGI_IP27 /* IP27 PROM misbehaves, seems to not implement ARC GetChild(). So we just assume it's an IP27. */ iname = "SGI-IP27"; #else iname = "Unknown"; #endif } else iname = (char *) (long) p->iname; printk("ARCH: %s\n", iname); mach = string_to_mach(iname); system_type = mach->liname; prom_flags = mach->flags; }
gpl-2.0
kirananto/ONEPLUS2RAZOR
sound/soc/sh/ssi.c
3090
10390
/* * Serial Sound Interface (I2S) support for SH7760/SH7780 * * Copyright (c) 2007 Manuel Lauss <mano@roarinelk.homelinux.net> * * licensed under the terms outlined in the file COPYING at the root * of the linux kernel sources. * * dont forget to set IPSEL/OMSEL register bits (in your board code) to * enable SSI output pins! */ /* * LIMITATIONS: * The SSI unit has only one physical data line, so full duplex is * impossible. This can be remedied on the SH7760 by using the * other SSI unit for recording; however the SH7780 has only 1 SSI * unit, and its pins are shared with the AC97 unit, among others. * * FEATURES: * The SSI features "compressed mode": in this mode it continuously * streams PCM data over the I2S lines and uses LRCK as a handshake * signal. Can be used to send compressed data (AC3/DTS) to a DSP. * The number of bits sent over the wire in a frame can be adjusted * and can be independent from the actual sample bit depth. This is * useful to support TDM mode codecs like the AD1939 which have a * fixed TDM slot size, regardless of sample resolution. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/soc.h> #include <asm/io.h> #define SSICR 0x00 #define SSISR 0x04 #define CR_DMAEN (1 << 28) #define CR_CHNL_SHIFT 22 #define CR_CHNL_MASK (3 << CR_CHNL_SHIFT) #define CR_DWL_SHIFT 19 #define CR_DWL_MASK (7 << CR_DWL_SHIFT) #define CR_SWL_SHIFT 16 #define CR_SWL_MASK (7 << CR_SWL_SHIFT) #define CR_SCK_MASTER (1 << 15) /* bitclock master bit */ #define CR_SWS_MASTER (1 << 14) /* wordselect master bit */ #define CR_SCKP (1 << 13) /* I2Sclock polarity */ #define CR_SWSP (1 << 12) /* LRCK polarity */ #define CR_SPDP (1 << 11) #define CR_SDTA (1 << 10) /* i2s alignment (msb/lsb) */ #define CR_PDTA (1 << 9) /* fifo data alignment */ #define CR_DEL (1 << 8) /* delay data by 1 i2sclk */ #define CR_BREN (1 << 7) /* clock gating in burst mode */ #define CR_CKDIV_SHIFT 4 #define CR_CKDIV_MASK (7 << CR_CKDIV_SHIFT) /* bitclock divider */ #define CR_MUTE (1 << 3) /* SSI mute */ #define CR_CPEN (1 << 2) /* compressed mode */ #define CR_TRMD (1 << 1) /* transmit/receive select */ #define CR_EN (1 << 0) /* enable SSI */ #define SSIREG(reg) (*(unsigned long *)(ssi->mmio + (reg))) struct ssi_priv { unsigned long mmio; unsigned long sysclk; int inuse; } ssi_cpu_data[] = { #if defined(CONFIG_CPU_SUBTYPE_SH7760) { .mmio = 0xFE680000, }, { .mmio = 0xFE690000, }, #elif defined(CONFIG_CPU_SUBTYPE_SH7780) { .mmio = 0xFFE70000, }, #else #error "Unsupported SuperH SoC" #endif }; /* * track usage of the SSI; it is simplex-only so prevent attempts of * concurrent playback + capture. FIXME: any locking required? */ static int ssi_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct ssi_priv *ssi = &ssi_cpu_data[dai->id]; if (ssi->inuse) { pr_debug("ssi: already in use!\n"); return -EBUSY; } else ssi->inuse = 1; return 0; } static void ssi_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct ssi_priv *ssi = &ssi_cpu_data[dai->id]; ssi->inuse = 0; } static int ssi_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct ssi_priv *ssi = &ssi_cpu_data[dai->id]; switch (cmd) { case SNDRV_PCM_TRIGGER_START: SSIREG(SSICR) |= CR_DMAEN | CR_EN; break; case SNDRV_PCM_TRIGGER_STOP: SSIREG(SSICR) &= ~(CR_DMAEN | CR_EN); break; default: return -EINVAL; } return 0; } static int ssi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct ssi_priv *ssi = &ssi_cpu_data[dai->id]; unsigned long ssicr = SSIREG(SSICR); unsigned int bits, channels, swl, recv, i; channels = params_channels(params); bits = params->msbits; recv = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? 0 : 1; pr_debug("ssi_hw_params() enter\nssicr was %08lx\n", ssicr); pr_debug("bits: %u channels: %u\n", bits, channels); ssicr &= ~(CR_TRMD | CR_CHNL_MASK | CR_DWL_MASK | CR_PDTA | CR_SWL_MASK); /* direction (send/receive) */ if (!recv) ssicr |= CR_TRMD; /* transmit */ /* channels */ if ((channels < 2) || (channels > 8) || (channels & 1)) { pr_debug("ssi: invalid number of channels\n"); return -EINVAL; } ssicr |= ((channels >> 1) - 1) << CR_CHNL_SHIFT; /* DATA WORD LENGTH (DWL): databits in audio sample */ i = 0; switch (bits) { case 32: ++i; case 24: ++i; case 22: ++i; case 20: ++i; case 18: ++i; case 16: ++i; ssicr |= i << CR_DWL_SHIFT; case 8: break; default: pr_debug("ssi: invalid sample width\n"); return -EINVAL; } /* * SYSTEM WORD LENGTH: size in bits of half a frame over the I2S * wires. This is usually bits_per_sample x channels/2; i.e. in * Stereo mode the SWL equals DWL. SWL can be bigger than the * product of (channels_per_slot x samplebits), e.g. for codecs * like the AD1939 which only accept 32bit wide TDM slots. For * "standard" I2S operation we set SWL = chans / 2 * DWL here. * Waiting for ASoC to get TDM support ;-) */ if ((bits > 16) && (bits <= 24)) { bits = 24; /* these are padded by the SSI */ /*ssicr |= CR_PDTA;*/ /* cpu/data endianness ? */ } i = 0; swl = (bits * channels) / 2; switch (swl) { case 256: ++i; case 128: ++i; case 64: ++i; case 48: ++i; case 32: ++i; case 16: ++i; ssicr |= i << CR_SWL_SHIFT; case 8: break; default: pr_debug("ssi: invalid system word length computed\n"); return -EINVAL; } SSIREG(SSICR) = ssicr; pr_debug("ssi_hw_params() leave\nssicr is now %08lx\n", ssicr); return 0; } static int ssi_set_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct ssi_priv *ssi = &ssi_cpu_data[cpu_dai->id]; ssi->sysclk = freq; return 0; } /* * This divider is used to generate the SSI_SCK (I2S bitclock) from the * clock at the HAC_BIT_CLK ("oversampling clock") pin. */ static int ssi_set_clkdiv(struct snd_soc_dai *dai, int did, int div) { struct ssi_priv *ssi = &ssi_cpu_data[dai->id]; unsigned long ssicr; int i; i = 0; ssicr = SSIREG(SSICR) & ~CR_CKDIV_MASK; switch (div) { case 16: ++i; case 8: ++i; case 4: ++i; case 2: ++i; SSIREG(SSICR) = ssicr | (i << CR_CKDIV_SHIFT); case 1: break; default: pr_debug("ssi: invalid sck divider %d\n", div); return -EINVAL; } return 0; } static int ssi_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct ssi_priv *ssi = &ssi_cpu_data[dai->id]; unsigned long ssicr = SSIREG(SSICR); pr_debug("ssi_set_fmt()\nssicr was 0x%08lx\n", ssicr); ssicr &= ~(CR_DEL | CR_PDTA | CR_BREN | CR_SWSP | CR_SCKP | CR_SWS_MASTER | CR_SCK_MASTER); switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: break; case SND_SOC_DAIFMT_RIGHT_J: ssicr |= CR_DEL | CR_PDTA; break; case SND_SOC_DAIFMT_LEFT_J: ssicr |= CR_DEL; break; default: pr_debug("ssi: unsupported format\n"); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_CLOCK_MASK) { case SND_SOC_DAIFMT_CONT: break; case SND_SOC_DAIFMT_GATED: ssicr |= CR_BREN; break; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: ssicr |= CR_SCKP; /* sample data at low clkedge */ break; case SND_SOC_DAIFMT_NB_IF: ssicr |= CR_SCKP | CR_SWSP; break; case SND_SOC_DAIFMT_IB_NF: break; case SND_SOC_DAIFMT_IB_IF: ssicr |= CR_SWSP; /* word select starts low */ break; default: pr_debug("ssi: invalid inversion\n"); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: break; case SND_SOC_DAIFMT_CBS_CFM: ssicr |= CR_SCK_MASTER; break; case SND_SOC_DAIFMT_CBM_CFS: ssicr |= CR_SWS_MASTER; break; case SND_SOC_DAIFMT_CBS_CFS: ssicr |= CR_SWS_MASTER | CR_SCK_MASTER; break; default: pr_debug("ssi: invalid master/slave configuration\n"); return -EINVAL; } SSIREG(SSICR) = ssicr; pr_debug("ssi_set_fmt() leave\nssicr is now 0x%08lx\n", ssicr); return 0; } /* the SSI depends on an external clocksource (at HAC_BIT_CLK) even in * Master mode, so really this is board specific; the SSI can do any * rate with the right bitclk and divider settings. */ #define SSI_RATES \ SNDRV_PCM_RATE_8000_192000 /* the SSI can do 8-32 bit samples, with 8 possible channels */ #define SSI_FMTS \ (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | \ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE | \ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_U20_3LE | \ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_U24_3LE | \ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_U32_LE) static const struct snd_soc_dai_ops ssi_dai_ops = { .startup = ssi_startup, .shutdown = ssi_shutdown, .trigger = ssi_trigger, .hw_params = ssi_hw_params, .set_sysclk = ssi_set_sysclk, .set_clkdiv = ssi_set_clkdiv, .set_fmt = ssi_set_fmt, }; static struct snd_soc_dai_driver sh4_ssi_dai[] = { { .name = "ssi-dai.0", .playback = { .rates = SSI_RATES, .formats = SSI_FMTS, .channels_min = 2, .channels_max = 8, }, .capture = { .rates = SSI_RATES, .formats = SSI_FMTS, .channels_min = 2, .channels_max = 8, }, .ops = &ssi_dai_ops, }, #ifdef CONFIG_CPU_SUBTYPE_SH7760 { .name = "ssi-dai.1", .playback = { .rates = SSI_RATES, .formats = SSI_FMTS, .channels_min = 2, .channels_max = 8, }, .capture = { .rates = SSI_RATES, .formats = SSI_FMTS, .channels_min = 2, .channels_max = 8, }, .ops = &ssi_dai_ops, }, #endif }; static const struct snd_soc_component_driver sh4_ssi_component = { .name = "sh4-ssi", }; static int sh4_soc_dai_probe(struct platform_device *pdev) { return snd_soc_register_component(&pdev->dev, &sh4_ssi_component, sh4_ssi_dai, ARRAY_SIZE(sh4_ssi_dai)); } static int sh4_soc_dai_remove(struct platform_device *pdev) { snd_soc_unregister_component(&pdev->dev); return 0; } static struct platform_driver sh4_ssi_driver = { .driver = { .name = "sh4-ssi-dai", .owner = THIS_MODULE, }, .probe = sh4_soc_dai_probe, .remove = sh4_soc_dai_remove, }; module_platform_driver(sh4_ssi_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SuperH onchip SSI (I2S) audio driver"); MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>");
gpl-2.0
venkatkamesh/android_kernel_sony_msm8994
drivers/video/auo_k1900fb.c
3090
5321
/* * auok190xfb.c -- FB driver for AUO-K1900 controllers * * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de> * * based on broadsheetfb.c * * Copyright (C) 2008, Jaya Kumar * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven. * * This driver is written to be used with the AUO-K1900 display controller. * * It is intended to be architecture independent. A board specific driver * must be used to perform all the physical IO interactions. * * The controller supports different update modes: * mode0+1 16 step gray (4bit) * mode2 4 step gray (2bit) - FIXME: add strange refresh * mode3 2 step gray (1bit) - FIXME: add strange refresh * mode4 handwriting mode (strange behaviour) * mode5 automatic selection of update mode */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/list.h> #include <linux/firmware.h> #include <linux/gpio.h> #include <linux/pm_runtime.h> #include <video/auo_k190xfb.h> #include "auo_k190x.h" /* * AUO-K1900 specific commands */ #define AUOK1900_CMD_PARTIALDISP 0x1001 #define AUOK1900_CMD_ROTATION 0x1006 #define AUOK1900_CMD_LUT_STOP 0x1009 #define AUOK1900_INIT_TEMP_AVERAGE (1 << 13) #define AUOK1900_INIT_ROTATE(_x) ((_x & 0x3) << 10) #define AUOK1900_INIT_RESOLUTION(_res) ((_res & 0x7) << 2) static void auok1900_init(struct auok190xfb_par *par) { struct device *dev = par->info->device; struct auok190x_board *board = par->board; u16 init_param = 0; pm_runtime_get_sync(dev); init_param |= AUOK1900_INIT_TEMP_AVERAGE; init_param |= AUOK1900_INIT_ROTATE(par->rotation); init_param |= AUOK190X_INIT_INVERSE_WHITE; init_param |= AUOK190X_INIT_FORMAT0; init_param |= AUOK1900_INIT_RESOLUTION(par->resolution); init_param |= AUOK190X_INIT_SHIFT_RIGHT; auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param); /* let the controller finish */ board->wait_for_rdy(par); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } static void auok1900_update_region(struct auok190xfb_par *par, int mode, u16 y1, u16 y2) { struct device *dev = par->info->device; unsigned char *buf = (unsigned char *)par->info->screen_base; int xres = par->info->var.xres; int line_length = par->info->fix.line_length; u16 args[4]; pm_runtime_get_sync(dev); mutex_lock(&(par->io_lock)); /* y1 and y2 must be a multiple of 2 so drop the lowest bit */ y1 &= 0xfffe; y2 &= 0xfffe; dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n", 1, y1+1, xres, y2-y1, mode); /* to FIX handle different partial update modes */ args[0] = mode | 1; args[1] = y1 + 1; args[2] = xres; args[3] = y2 - y1; buf += y1 * line_length; auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args, ((y2 - y1) * line_length)/2, (u16 *) buf); auok190x_send_command(par, AUOK190X_CMD_DATA_STOP); par->update_cnt++; mutex_unlock(&(par->io_lock)); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par, u16 y1, u16 y2) { int mode; if (par->update_mode < 0) { mode = AUOK190X_UPDATE_MODE(1); par->last_mode = -1; } else { mode = AUOK190X_UPDATE_MODE(par->update_mode); par->last_mode = par->update_mode; } if (par->flash) mode |= AUOK190X_UPDATE_NONFLASH; auok1900_update_region(par, mode, y1, y2); } static void auok1900fb_dpy_update(struct auok190xfb_par *par) { int mode; if (par->update_mode < 0) { mode = AUOK190X_UPDATE_MODE(0); par->last_mode = -1; } else { mode = AUOK190X_UPDATE_MODE(par->update_mode); par->last_mode = par->update_mode; } if (par->flash) mode |= AUOK190X_UPDATE_NONFLASH; auok1900_update_region(par, mode, 0, par->info->var.yres); par->update_cnt = 0; } static bool auok1900fb_need_refresh(struct auok190xfb_par *par) { return (par->update_cnt > 10); } static int auok1900fb_probe(struct platform_device *pdev) { struct auok190x_init_data init; struct auok190x_board *board; /* pick up board specific routines */ board = pdev->dev.platform_data; if (!board) return -EINVAL; /* fill temporary init struct for common init */ init.id = "auo_k1900fb"; init.board = board; init.update_partial = auok1900fb_dpy_update_pages; init.update_all = auok1900fb_dpy_update; init.need_refresh = auok1900fb_need_refresh; init.init = auok1900_init; return auok190x_common_probe(pdev, &init); } static int auok1900fb_remove(struct platform_device *pdev) { return auok190x_common_remove(pdev); } static struct platform_driver auok1900fb_driver = { .probe = auok1900fb_probe, .remove = auok1900fb_remove, .driver = { .owner = THIS_MODULE, .name = "auo_k1900fb", .pm = &auok190x_pm, }, }; module_platform_driver(auok1900fb_driver); MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller"); MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); MODULE_LICENSE("GPL");
gpl-2.0
gallagth/keecker_kernel
crypto/xts.c
3090
7256
/* XTS: as defined in IEEE1619/D16 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf * (sector sizes which are not a multiple of 16 bytes are, * however currently unsupported) * * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> * * Based om ecb.c * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <crypto/algapi.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <crypto/b128ops.h> #include <crypto/gf128mul.h> struct priv { struct crypto_cipher *child; struct crypto_cipher *tweak; }; static int setkey(struct crypto_tfm *parent, const u8 *key, unsigned int keylen) { struct priv *ctx = crypto_tfm_ctx(parent); struct crypto_cipher *child = ctx->tweak; u32 *flags = &parent->crt_flags; int err; /* key consists of keys of equal size concatenated, therefore * the length must be even */ if (keylen % 2) { /* tell the user why there was an error */ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } /* we need two cipher instances: one to compute the initial 'tweak' * by encrypting the IV (usually the 'plain' iv) and the other * one to encrypt and decrypt the data */ /* tweak cipher, uses Key2 i.e. the second half of *key */ crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(child, key + keylen/2, keylen/2); if (err) return err; crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & CRYPTO_TFM_RES_MASK); child = ctx->child; /* data cipher, uses Key1 i.e. the first half of *key */ crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(child, key, keylen/2); if (err) return err; crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & CRYPTO_TFM_RES_MASK); return 0; } struct sinfo { be128 *t; struct crypto_tfm *tfm; void (*fn)(struct crypto_tfm *, u8 *, const u8 *); }; static inline void xts_round(struct sinfo *s, void *dst, const void *src) { be128_xor(dst, s->t, src); /* PP <- T xor P */ s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */ be128_xor(dst, dst, s->t); /* C <- T xor CC */ } static int crypt(struct blkcipher_desc *d, struct blkcipher_walk *w, struct priv *ctx, void (*tw)(struct crypto_tfm *, u8 *, const u8 *), void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { int err; unsigned int avail; const int bs = crypto_cipher_blocksize(ctx->child); struct sinfo s = { .tfm = crypto_cipher_tfm(ctx->child), .fn = fn }; u8 *wsrc; u8 *wdst; err = blkcipher_walk_virt(d, w); if (!w->nbytes) return err; s.t = (be128 *)w->iv; avail = w->nbytes; wsrc = w->src.virt.addr; wdst = w->dst.virt.addr; /* calculate first value of T */ tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv); goto first; for (;;) { do { gf128mul_x_ble(s.t, s.t); first: xts_round(&s, wdst, wsrc); wsrc += bs; wdst += bs; } while ((avail -= bs) >= bs); err = blkcipher_walk_done(d, w, avail); if (!w->nbytes) break; avail = w->nbytes; wsrc = w->src.virt.addr; wdst = w->dst.virt.addr; } return err; } static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk w; blkcipher_walk_init(&w, dst, src, nbytes); return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, crypto_cipher_alg(ctx->child)->cia_encrypt); } static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk w; blkcipher_walk_init(&w, dst, src, nbytes); return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, crypto_cipher_alg(ctx->child)->cia_decrypt); } static int init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct priv *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); if (crypto_cipher_blocksize(cipher) != 16) { *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; crypto_free_cipher(cipher); return -EINVAL; } ctx->child = cipher; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) { crypto_free_cipher(ctx->child); return PTR_ERR(cipher); } /* this check isn't really needed, leave it here just in case */ if (crypto_cipher_blocksize(cipher) != 16) { crypto_free_cipher(cipher); crypto_free_cipher(ctx->child); *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; return -EINVAL; } ctx->tweak = cipher; return 0; } static void exit_tfm(struct crypto_tfm *tfm) { struct priv *ctx = crypto_tfm_ctx(tfm); crypto_free_cipher(ctx->child); crypto_free_cipher(ctx->tweak); } static struct crypto_instance *alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); inst = crypto_alloc_instance("xts", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; else inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; inst->alg.cra_blkcipher.min_keysize = 2 * alg->cra_cipher.cia_min_keysize; inst->alg.cra_blkcipher.max_keysize = 2 * alg->cra_cipher.cia_max_keysize; inst->alg.cra_ctxsize = sizeof(struct priv); inst->alg.cra_init = init_tfm; inst->alg.cra_exit = exit_tfm; inst->alg.cra_blkcipher.setkey = setkey; inst->alg.cra_blkcipher.encrypt = encrypt; inst->alg.cra_blkcipher.decrypt = decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); } static struct crypto_template crypto_tmpl = { .name = "xts", .alloc = alloc, .free = free, .module = THIS_MODULE, }; static int __init crypto_module_init(void) { return crypto_register_template(&crypto_tmpl); } static void __exit crypto_module_exit(void) { crypto_unregister_template(&crypto_tmpl); } module_init(crypto_module_init); module_exit(crypto_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XTS block cipher mode");
gpl-2.0
mfd10000/Itab1011
drivers/net/rrunner.c
3346
42744
/* * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board. * * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>. * * Thanks to Essential Communication for providing us with hardware * and very comprehensive documentation without which I would not have * been able to write this driver. A special thank you to John Gibbon * for sorting out the legal issues, with the NDA, allowing the code to * be released under the GPL. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the * stupid bugs in my code. * * Softnet support and various other patches from Val Henson of * ODS/Essential. * * PCI DMA mapping code partly based on work by Francois Romieu. */ #define DEBUG 1 #define RX_DMA_SKBUFF 1 #define PKT_COPY_THRESHOLD 512 #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/hippidevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/slab.h> #include <net/sock.h> #include <asm/system.h> #include <asm/cache.h> #include <asm/byteorder.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> #define rr_if_busy(dev) netif_queue_stopped(dev) #define rr_if_running(dev) netif_running(dev) #include "rrunner.h" #define RUN_AT(x) (jiffies + (x)) MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>"); MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver"); MODULE_LICENSE("GPL"); static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n"; static const struct net_device_ops rr_netdev_ops = { .ndo_open = rr_open, .ndo_stop = rr_close, .ndo_do_ioctl = rr_ioctl, .ndo_start_xmit = rr_start_xmit, .ndo_change_mtu = hippi_change_mtu, .ndo_set_mac_address = hippi_mac_addr, }; /* * Implementation notes: * * The DMA engine only allows for DMA within physical 64KB chunks of * memory. The current approach of the driver (and stack) is to use * linear blocks of memory for the skbuffs. However, as the data block * is always the first part of the skb and skbs are 2^n aligned so we * are guarantted to get the whole block within one 64KB align 64KB * chunk. * * On the long term, relying on being able to allocate 64KB linear * chunks of memory is not feasible and the skb handling code and the * stack will need to know about I/O vectors or something similar. */ static int __devinit rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; static int version_disp; u8 pci_latency; struct rr_private *rrpriv; void *tmpptr; dma_addr_t ring_dma; int ret = -ENOMEM; dev = alloc_hippi_dev(sizeof(struct rr_private)); if (!dev) goto out3; ret = pci_enable_device(pdev); if (ret) { ret = -ENODEV; goto out2; } rrpriv = netdev_priv(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, "rrunner")) { ret = -EIO; goto out; } pci_set_drvdata(pdev, dev); rrpriv->pci_dev = pdev; spin_lock_init(&rrpriv->lock); dev->irq = pdev->irq; dev->netdev_ops = &rr_netdev_ops; dev->base_addr = pci_resource_start(pdev, 0); /* display version info if adapter is found */ if (!version_disp) { /* set display flag to TRUE so that */ /* we only display this string ONCE */ version_disp = 1; printk(version); } pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); if (pci_latency <= 0x58){ pci_latency = 0x58; pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency); } pci_set_master(pdev); printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI " "at 0x%08lx, irq %i, PCI latency %i\n", dev->name, dev->base_addr, dev->irq, pci_latency); /* * Remap the regs into kernel space. */ rrpriv->regs = ioremap(dev->base_addr, 0x1000); if (!rrpriv->regs){ printk(KERN_ERR "%s: Unable to map I/O register, " "RoadRunner will be disabled.\n", dev->name); ret = -EIO; goto out; } tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); rrpriv->tx_ring = tmpptr; rrpriv->tx_ring_dma = ring_dma; if (!tmpptr) { ret = -ENOMEM; goto out; } tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); rrpriv->rx_ring = tmpptr; rrpriv->rx_ring_dma = ring_dma; if (!tmpptr) { ret = -ENOMEM; goto out; } tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma); rrpriv->evt_ring = tmpptr; rrpriv->evt_ring_dma = ring_dma; if (!tmpptr) { ret = -ENOMEM; goto out; } /* * Don't access any register before this point! */ #ifdef __BIG_ENDIAN writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP, &rrpriv->regs->HostCtrl); #endif /* * Need to add a case for little-endian 64-bit hosts here. */ rr_init(dev); dev->base_addr = 0; ret = register_netdev(dev); if (ret) goto out; return 0; out: if (rrpriv->rx_ring) pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, rrpriv->rx_ring_dma); if (rrpriv->tx_ring) pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring, rrpriv->tx_ring_dma); if (rrpriv->regs) iounmap(rrpriv->regs); if (pdev) { pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); } out2: free_netdev(dev); out3: return ret; } static void __devexit rr_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct rr_private *rr = netdev_priv(dev); if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){ printk(KERN_ERR "%s: trying to unload running NIC\n", dev->name); writel(HALT_NIC, &rr->regs->HostCtrl); } pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring, rr->evt_ring_dma); pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring, rr->rx_ring_dma); pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring, rr->tx_ring_dma); unregister_netdev(dev); iounmap(rr->regs); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } /* * Commands are considered to be slow, thus there is no reason to * inline this. */ static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd) { struct rr_regs __iomem *regs; u32 idx; regs = rrpriv->regs; /* * This is temporary - it will go away in the final version. * We probably also want to make this function inline. */ if (readl(&regs->HostCtrl) & NIC_HALTED){ printk("issuing command for halted NIC, code 0x%x, " "HostCtrl %08x\n", cmd->code, readl(&regs->HostCtrl)); if (readl(&regs->Mode) & FATAL_ERR) printk("error codes Fail1 %02x, Fail2 %02x\n", readl(&regs->Fail1), readl(&regs->Fail2)); } idx = rrpriv->info->cmd_ctrl.pi; writel(*(u32*)(cmd), &regs->CmdRing[idx]); wmb(); idx = (idx - 1) % CMD_RING_ENTRIES; rrpriv->info->cmd_ctrl.pi = idx; wmb(); if (readl(&regs->Mode) & FATAL_ERR) printk("error code %02x\n", readl(&regs->Fail1)); } /* * Reset the board in a sensible manner. The NIC is already halted * when we get here and a spin-lock is held. */ static int rr_reset(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; u32 start_pc; int i; rrpriv = netdev_priv(dev); regs = rrpriv->regs; rr_load_firmware(dev); writel(0x01000000, &regs->TX_state); writel(0xff800000, &regs->RX_state); writel(0, &regs->AssistState); writel(CLEAR_INTA, &regs->LocalCtrl); writel(0x01, &regs->BrkPt); writel(0, &regs->Timer); writel(0, &regs->TimerRef); writel(RESET_DMA, &regs->DmaReadState); writel(RESET_DMA, &regs->DmaWriteState); writel(0, &regs->DmaWriteHostHi); writel(0, &regs->DmaWriteHostLo); writel(0, &regs->DmaReadHostHi); writel(0, &regs->DmaReadHostLo); writel(0, &regs->DmaReadLen); writel(0, &regs->DmaWriteLen); writel(0, &regs->DmaWriteLcl); writel(0, &regs->DmaWriteIPchecksum); writel(0, &regs->DmaReadLcl); writel(0, &regs->DmaReadIPchecksum); writel(0, &regs->PciState); #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, &regs->Mode); #elif (BITS_PER_LONG == 64) writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, &regs->Mode); #else writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, &regs->Mode); #endif #if 0 /* * Don't worry, this is just black magic. */ writel(0xdf000, &regs->RxBase); writel(0xdf000, &regs->RxPrd); writel(0xdf000, &regs->RxCon); writel(0xce000, &regs->TxBase); writel(0xce000, &regs->TxPrd); writel(0xce000, &regs->TxCon); writel(0, &regs->RxIndPro); writel(0, &regs->RxIndCon); writel(0, &regs->RxIndRef); writel(0, &regs->TxIndPro); writel(0, &regs->TxIndCon); writel(0, &regs->TxIndRef); writel(0xcc000, &regs->pad10[0]); writel(0, &regs->DrCmndPro); writel(0, &regs->DrCmndCon); writel(0, &regs->DwCmndPro); writel(0, &regs->DwCmndCon); writel(0, &regs->DwCmndRef); writel(0, &regs->DrDataPro); writel(0, &regs->DrDataCon); writel(0, &regs->DrDataRef); writel(0, &regs->DwDataPro); writel(0, &regs->DwDataCon); writel(0, &regs->DwDataRef); #endif writel(0xffffffff, &regs->MbEvent); writel(0, &regs->Event); writel(0, &regs->TxPi); writel(0, &regs->IpRxPi); writel(0, &regs->EvtCon); writel(0, &regs->EvtPrd); rrpriv->info->evt_ctrl.pi = 0; for (i = 0; i < CMD_RING_ENTRIES; i++) writel(0, &regs->CmdRing[i]); /* * Why 32 ? is this not cache line size dependent? */ writel(RBURST_64|WBURST_64, &regs->PciState); wmb(); start_pc = rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, rncd_info.FwStart)); #if (DEBUG > 1) printk("%s: Executing firmware at address 0x%06x\n", dev->name, start_pc); #endif writel(start_pc + 0x800, &regs->Pc); wmb(); udelay(5); writel(start_pc, &regs->Pc); wmb(); return 0; } /* * Read a string from the EEPROM. */ static unsigned int rr_read_eeprom(struct rr_private *rrpriv, unsigned long offset, unsigned char *buf, unsigned long length) { struct rr_regs __iomem *regs = rrpriv->regs; u32 misc, io, host, i; io = readl(&regs->ExtIo); writel(0, &regs->ExtIo); misc = readl(&regs->LocalCtrl); writel(0, &regs->LocalCtrl); host = readl(&regs->HostCtrl); writel(host | HALT_NIC, &regs->HostCtrl); mb(); for (i = 0; i < length; i++){ writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase); mb(); buf[i] = (readl(&regs->WinData) >> 24) & 0xff; mb(); } writel(host, &regs->HostCtrl); writel(misc, &regs->LocalCtrl); writel(io, &regs->ExtIo); mb(); return i; } /* * Shortcut to read one word (4 bytes) out of the EEPROM and convert * it to our CPU byte-order. */ static u32 rr_read_eeprom_word(struct rr_private *rrpriv, size_t offset) { __be32 word; if ((rr_read_eeprom(rrpriv, offset, (unsigned char *)&word, 4) == 4)) return be32_to_cpu(word); return 0; } /* * Write a string to the EEPROM. * * This is only called when the firmware is not running. */ static unsigned int write_eeprom(struct rr_private *rrpriv, unsigned long offset, unsigned char *buf, unsigned long length) { struct rr_regs __iomem *regs = rrpriv->regs; u32 misc, io, data, i, j, ready, error = 0; io = readl(&regs->ExtIo); writel(0, &regs->ExtIo); misc = readl(&regs->LocalCtrl); writel(ENABLE_EEPROM_WRITE, &regs->LocalCtrl); mb(); for (i = 0; i < length; i++){ writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase); mb(); data = buf[i] << 24; /* * Only try to write the data if it is not the same * value already. */ if ((readl(&regs->WinData) & 0xff000000) != data){ writel(data, &regs->WinData); ready = 0; j = 0; mb(); while(!ready){ udelay(20); if ((readl(&regs->WinData) & 0xff000000) == data) ready = 1; mb(); if (j++ > 5000){ printk("data mismatch: %08x, " "WinData %08x\n", data, readl(&regs->WinData)); ready = 1; error = 1; } } } } writel(misc, &regs->LocalCtrl); writel(io, &regs->ExtIo); mb(); return error; } static int __devinit rr_init(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; u32 sram_size, rev; rrpriv = netdev_priv(dev); regs = rrpriv->regs; rev = readl(&regs->FwRev); rrpriv->fw_rev = rev; if (rev > 0x00020024) printk(" Firmware revision: %i.%i.%i\n", (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff)); else if (rev >= 0x00020000) { printk(" Firmware revision: %i.%i.%i (2.0.37 or " "later is recommended)\n", (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff)); }else{ printk(" Firmware revision too old: %i.%i.%i, please " "upgrade to 2.0.37 or later.\n", (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff)); } #if (DEBUG > 2) printk(" Maximum receive rings %i\n", readl(&regs->MaxRxRng)); #endif /* * Read the hardware address from the eeprom. The HW address * is not really necessary for HIPPI but awfully convenient. * The pointer arithmetic to put it in dev_addr is ugly, but * Donald Becker does it this way for the GigE version of this * card and it's shorter and more portable than any * other method I've seen. -VAL */ *(__be16 *)(dev->dev_addr) = htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA))); *(__be32 *)(dev->dev_addr+2) = htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4]))); printk(" MAC: %pM\n", dev->dev_addr); sram_size = rr_read_eeprom_word(rrpriv, 8); printk(" SRAM size 0x%06x\n", sram_size); return 0; } static int rr_init1(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; unsigned long myjif, flags; struct cmd cmd; u32 hostctrl; int ecode = 0; short i; rrpriv = netdev_priv(dev); regs = rrpriv->regs; spin_lock_irqsave(&rrpriv->lock, flags); hostctrl = readl(&regs->HostCtrl); writel(hostctrl | HALT_NIC | RR_CLEAR_INT, &regs->HostCtrl); wmb(); if (hostctrl & PARITY_ERR){ printk("%s: Parity error halting NIC - this is serious!\n", dev->name); spin_unlock_irqrestore(&rrpriv->lock, flags); ecode = -EFAULT; goto error; } set_rxaddr(regs, rrpriv->rx_ctrl_dma); set_infoaddr(regs, rrpriv->info_dma); rrpriv->info->evt_ctrl.entry_size = sizeof(struct event); rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES; rrpriv->info->evt_ctrl.mode = 0; rrpriv->info->evt_ctrl.pi = 0; set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma); rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd); rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES; rrpriv->info->cmd_ctrl.mode = 0; rrpriv->info->cmd_ctrl.pi = 15; for (i = 0; i < CMD_RING_ENTRIES; i++) { writel(0, &regs->CmdRing[i]); } for (i = 0; i < TX_RING_ENTRIES; i++) { rrpriv->tx_ring[i].size = 0; set_rraddr(&rrpriv->tx_ring[i].addr, 0); rrpriv->tx_skbuff[i] = NULL; } rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc); rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES; rrpriv->info->tx_ctrl.mode = 0; rrpriv->info->tx_ctrl.pi = 0; set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma); /* * Set dirty_tx before we start receiving interrupts, otherwise * the interrupt handler might think it is supposed to process * tx ints before we are up and running, which may cause a null * pointer access in the int handler. */ rrpriv->tx_full = 0; rrpriv->cur_rx = 0; rrpriv->dirty_rx = rrpriv->dirty_tx = 0; rr_reset(dev); /* Tuning values */ writel(0x5000, &regs->ConRetry); writel(0x100, &regs->ConRetryTmr); writel(0x500000, &regs->ConTmout); writel(0x60, &regs->IntrTmr); writel(0x500000, &regs->TxDataMvTimeout); writel(0x200000, &regs->RxDataMvTimeout); writel(0x80, &regs->WriteDmaThresh); writel(0x80, &regs->ReadDmaThresh); rrpriv->fw_running = 0; wmb(); hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR); writel(hostctrl, &regs->HostCtrl); wmb(); spin_unlock_irqrestore(&rrpriv->lock, flags); for (i = 0; i < RX_RING_ENTRIES; i++) { struct sk_buff *skb; dma_addr_t addr; rrpriv->rx_ring[i].mode = 0; skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "%s: Unable to allocate memory " "for receive ring - halting NIC\n", dev->name); ecode = -ENOMEM; goto error; } rrpriv->rx_skbuff[i] = skb; addr = pci_map_single(rrpriv->pci_dev, skb->data, dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); /* * Sanity test to see if we conflict with the DMA * limitations of the Roadrunner. */ if ((((unsigned long)skb->data) & 0xfff) > ~65320) printk("skb alloc error\n"); set_rraddr(&rrpriv->rx_ring[i].addr, addr); rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN; } rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc); rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES; rrpriv->rx_ctrl[4].mode = 8; rrpriv->rx_ctrl[4].pi = 0; wmb(); set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma); udelay(1000); /* * Now start the FirmWare. */ cmd.code = C_START_FW; cmd.ring = 0; cmd.index = 0; rr_issue_cmd(rrpriv, &cmd); /* * Give the FirmWare time to chew on the `get running' command. */ myjif = jiffies + 5 * HZ; while (time_before(jiffies, myjif) && !rrpriv->fw_running) cpu_relax(); netif_start_queue(dev); return ecode; error: /* * We might have gotten here because we are out of memory, * make sure we release everything we allocated before failing */ for (i = 0; i < RX_RING_ENTRIES; i++) { struct sk_buff *skb = rrpriv->rx_skbuff[i]; if (skb) { pci_unmap_single(rrpriv->pci_dev, rrpriv->rx_ring[i].addr.addrlo, dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); rrpriv->rx_ring[i].size = 0; set_rraddr(&rrpriv->rx_ring[i].addr, 0); dev_kfree_skb(skb); rrpriv->rx_skbuff[i] = NULL; } } return ecode; } /* * All events are considered to be slow (RX/TX ints do not generate * events) and are handled here, outside the main interrupt handler, * to reduce the size of the handler. */ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; u32 tmp; rrpriv = netdev_priv(dev); regs = rrpriv->regs; while (prodidx != eidx){ switch (rrpriv->evt_ring[eidx].code){ case E_NIC_UP: tmp = readl(&regs->FwRev); printk(KERN_INFO "%s: Firmware revision %i.%i.%i " "up and running\n", dev->name, (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff)); rrpriv->fw_running = 1; writel(RX_RING_ENTRIES - 1, &regs->IpRxPi); wmb(); break; case E_LINK_ON: printk(KERN_INFO "%s: Optical link ON\n", dev->name); break; case E_LINK_OFF: printk(KERN_INFO "%s: Optical link OFF\n", dev->name); break; case E_RX_IDLE: printk(KERN_WARNING "%s: RX data not moving\n", dev->name); goto drop; case E_WATCHDOG: printk(KERN_INFO "%s: The watchdog is here to see " "us\n", dev->name); break; case E_INTERN_ERR: printk(KERN_ERR "%s: HIPPI Internal NIC error\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_HOST_ERR: printk(KERN_ERR "%s: Host software error\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; /* * TX events. */ case E_CON_REJ: printk(KERN_WARNING "%s: Connection rejected\n", dev->name); dev->stats.tx_aborted_errors++; break; case E_CON_TMOUT: printk(KERN_WARNING "%s: Connection timeout\n", dev->name); break; case E_DISC_ERR: printk(KERN_WARNING "%s: HIPPI disconnect error\n", dev->name); dev->stats.tx_aborted_errors++; break; case E_INT_PRTY: printk(KERN_ERR "%s: HIPPI Internal Parity error\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_TX_IDLE: printk(KERN_WARNING "%s: Transmitter idle\n", dev->name); break; case E_TX_LINK_DROP: printk(KERN_WARNING "%s: Link lost during transmit\n", dev->name); dev->stats.tx_aborted_errors++; writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_TX_INV_RNG: printk(KERN_ERR "%s: Invalid send ring block\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_TX_INV_BUF: printk(KERN_ERR "%s: Invalid send buffer address\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_TX_INV_DSC: printk(KERN_ERR "%s: Invalid descriptor address\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; /* * RX events. */ case E_RX_RNG_OUT: printk(KERN_INFO "%s: Receive ring full\n", dev->name); break; case E_RX_PAR_ERR: printk(KERN_WARNING "%s: Receive parity error\n", dev->name); goto drop; case E_RX_LLRC_ERR: printk(KERN_WARNING "%s: Receive LLRC error\n", dev->name); goto drop; case E_PKT_LN_ERR: printk(KERN_WARNING "%s: Receive packet length " "error\n", dev->name); goto drop; case E_DTA_CKSM_ERR: printk(KERN_WARNING "%s: Data checksum error\n", dev->name); goto drop; case E_SHT_BST: printk(KERN_WARNING "%s: Unexpected short burst " "error\n", dev->name); goto drop; case E_STATE_ERR: printk(KERN_WARNING "%s: Recv. state transition" " error\n", dev->name); goto drop; case E_UNEXP_DATA: printk(KERN_WARNING "%s: Unexpected data error\n", dev->name); goto drop; case E_LST_LNK_ERR: printk(KERN_WARNING "%s: Link lost error\n", dev->name); goto drop; case E_FRM_ERR: printk(KERN_WARNING "%s: Framming Error\n", dev->name); goto drop; case E_FLG_SYN_ERR: printk(KERN_WARNING "%s: Flag sync. lost during " "packet\n", dev->name); goto drop; case E_RX_INV_BUF: printk(KERN_ERR "%s: Invalid receive buffer " "address\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_RX_INV_DSC: printk(KERN_ERR "%s: Invalid receive descriptor " "address\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; case E_RNG_BLK: printk(KERN_ERR "%s: Invalid ring block\n", dev->name); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); wmb(); break; drop: /* Label packet to be dropped. * Actual dropping occurs in rx * handling. * * The index of packet we get to drop is * the index of the packet following * the bad packet. -kbf */ { u16 index = rrpriv->evt_ring[eidx].index; index = (index + (RX_RING_ENTRIES - 1)) % RX_RING_ENTRIES; rrpriv->rx_ring[index].mode |= (PACKET_BAD | PACKET_END); } break; default: printk(KERN_WARNING "%s: Unhandled event 0x%02x\n", dev->name, rrpriv->evt_ring[eidx].code); } eidx = (eidx + 1) % EVT_RING_ENTRIES; } rrpriv->info->evt_ctrl.pi = eidx; wmb(); return eidx; } static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) { struct rr_private *rrpriv = netdev_priv(dev); struct rr_regs __iomem *regs = rrpriv->regs; do { struct rx_desc *desc; u32 pkt_len; desc = &(rrpriv->rx_ring[index]); pkt_len = desc->size; #if (DEBUG > 2) printk("index %i, rxlimit %i\n", index, rxlimit); printk("len %x, mode %x\n", pkt_len, desc->mode); #endif if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){ dev->stats.rx_dropped++; goto defer; } if (pkt_len > 0){ struct sk_buff *skb, *rx_skb; rx_skb = rrpriv->rx_skbuff[index]; if (pkt_len < PKT_COPY_THRESHOLD) { skb = alloc_skb(pkt_len, GFP_ATOMIC); if (skb == NULL){ printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len); dev->stats.rx_dropped++; goto defer; } else { pci_dma_sync_single_for_cpu(rrpriv->pci_dev, desc->addr.addrlo, pkt_len, PCI_DMA_FROMDEVICE); memcpy(skb_put(skb, pkt_len), rx_skb->data, pkt_len); pci_dma_sync_single_for_device(rrpriv->pci_dev, desc->addr.addrlo, pkt_len, PCI_DMA_FROMDEVICE); } }else{ struct sk_buff *newskb; newskb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC); if (newskb){ dma_addr_t addr; pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); skb = rx_skb; skb_put(skb, pkt_len); rrpriv->rx_skbuff[index] = newskb; addr = pci_map_single(rrpriv->pci_dev, newskb->data, dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); set_rraddr(&desc->addr, addr); } else { printk("%s: Out of memory, deferring " "packet\n", dev->name); dev->stats.rx_dropped++; goto defer; } } skb->protocol = hippi_type_trans(skb, dev); netif_rx(skb); /* send it up */ dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } defer: desc->mode = 0; desc->size = dev->mtu + HIPPI_HLEN; if ((index & 7) == 7) writel(index, &regs->IpRxPi); index = (index + 1) % RX_RING_ENTRIES; } while(index != rxlimit); rrpriv->cur_rx = index; wmb(); } static irqreturn_t rr_interrupt(int irq, void *dev_id) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; struct net_device *dev = (struct net_device *)dev_id; u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon; rrpriv = netdev_priv(dev); regs = rrpriv->regs; if (!(readl(&regs->HostCtrl) & RR_INT)) return IRQ_NONE; spin_lock(&rrpriv->lock); prodidx = readl(&regs->EvtPrd); txcsmr = (prodidx >> 8) & 0xff; rxlimit = (prodidx >> 16) & 0xff; prodidx &= 0xff; #if (DEBUG > 2) printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name, prodidx, rrpriv->info->evt_ctrl.pi); #endif /* * Order here is important. We must handle events * before doing anything else in order to catch * such things as LLRC errors, etc -kbf */ eidx = rrpriv->info->evt_ctrl.pi; if (prodidx != eidx) eidx = rr_handle_event(dev, prodidx, eidx); rxindex = rrpriv->cur_rx; if (rxindex != rxlimit) rx_int(dev, rxlimit, rxindex); txcon = rrpriv->dirty_tx; if (txcsmr != txcon) { do { /* Due to occational firmware TX producer/consumer out * of sync. error need to check entry in ring -kbf */ if(rrpriv->tx_skbuff[txcon]){ struct tx_desc *desc; struct sk_buff *skb; desc = &(rrpriv->tx_ring[txcon]); skb = rrpriv->tx_skbuff[txcon]; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); rrpriv->tx_skbuff[txcon] = NULL; desc->size = 0; set_rraddr(&rrpriv->tx_ring[txcon].addr, 0); desc->mode = 0; } txcon = (txcon + 1) % TX_RING_ENTRIES; } while (txcsmr != txcon); wmb(); rrpriv->dirty_tx = txcon; if (rrpriv->tx_full && rr_if_busy(dev) && (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES) != rrpriv->dirty_tx)){ rrpriv->tx_full = 0; netif_wake_queue(dev); } } eidx |= ((txcsmr << 8) | (rxlimit << 16)); writel(eidx, &regs->EvtCon); wmb(); spin_unlock(&rrpriv->lock); return IRQ_HANDLED; } static inline void rr_raz_tx(struct rr_private *rrpriv, struct net_device *dev) { int i; for (i = 0; i < TX_RING_ENTRIES; i++) { struct sk_buff *skb = rrpriv->tx_skbuff[i]; if (skb) { struct tx_desc *desc = &(rrpriv->tx_ring[i]); pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, skb->len, PCI_DMA_TODEVICE); desc->size = 0; set_rraddr(&desc->addr, 0); dev_kfree_skb(skb); rrpriv->tx_skbuff[i] = NULL; } } } static inline void rr_raz_rx(struct rr_private *rrpriv, struct net_device *dev) { int i; for (i = 0; i < RX_RING_ENTRIES; i++) { struct sk_buff *skb = rrpriv->rx_skbuff[i]; if (skb) { struct rx_desc *desc = &(rrpriv->rx_ring[i]); pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); desc->size = 0; set_rraddr(&desc->addr, 0); dev_kfree_skb(skb); rrpriv->rx_skbuff[i] = NULL; } } } static void rr_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct rr_private *rrpriv = netdev_priv(dev); struct rr_regs __iomem *regs = rrpriv->regs; unsigned long flags; if (readl(&regs->HostCtrl) & NIC_HALTED){ printk("%s: Restarting nic\n", dev->name); memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl)); memset(rrpriv->info, 0, sizeof(struct rr_info)); wmb(); rr_raz_tx(rrpriv, dev); rr_raz_rx(rrpriv, dev); if (rr_init1(dev)) { spin_lock_irqsave(&rrpriv->lock, flags); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); spin_unlock_irqrestore(&rrpriv->lock, flags); } } rrpriv->timer.expires = RUN_AT(5*HZ); add_timer(&rrpriv->timer); } static int rr_open(struct net_device *dev) { struct rr_private *rrpriv = netdev_priv(dev); struct pci_dev *pdev = rrpriv->pci_dev; struct rr_regs __iomem *regs; int ecode = 0; unsigned long flags; dma_addr_t dma_addr; regs = rrpriv->regs; if (rrpriv->fw_rev < 0x00020000) { printk(KERN_WARNING "%s: trying to configure device with " "obsolete firmware\n", dev->name); ecode = -EBUSY; goto error; } rrpriv->rx_ctrl = pci_alloc_consistent(pdev, 256 * sizeof(struct ring_ctrl), &dma_addr); if (!rrpriv->rx_ctrl) { ecode = -ENOMEM; goto error; } rrpriv->rx_ctrl_dma = dma_addr; memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl)); rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info), &dma_addr); if (!rrpriv->info) { ecode = -ENOMEM; goto error; } rrpriv->info_dma = dma_addr; memset(rrpriv->info, 0, sizeof(struct rr_info)); wmb(); spin_lock_irqsave(&rrpriv->lock, flags); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); readl(&regs->HostCtrl); spin_unlock_irqrestore(&rrpriv->lock, flags); if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", dev->name, dev->irq); ecode = -EAGAIN; goto error; } if ((ecode = rr_init1(dev))) goto error; /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ init_timer(&rrpriv->timer); rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */ rrpriv->timer.data = (unsigned long)dev; rrpriv->timer.function = rr_timer; /* timer handler */ add_timer(&rrpriv->timer); netif_start_queue(dev); return ecode; error: spin_lock_irqsave(&rrpriv->lock, flags); writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl); spin_unlock_irqrestore(&rrpriv->lock, flags); if (rrpriv->info) { pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info, rrpriv->info_dma); rrpriv->info = NULL; } if (rrpriv->rx_ctrl) { pci_free_consistent(pdev, sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; } netif_stop_queue(dev); return ecode; } static void rr_dump(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; u32 index, cons; short i; int len; rrpriv = netdev_priv(dev); regs = rrpriv->regs; printk("%s: dumping NIC TX rings\n", dev->name); printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n", readl(&regs->RxPrd), readl(&regs->TxPrd), readl(&regs->EvtPrd), readl(&regs->TxPi), rrpriv->info->tx_ctrl.pi); printk("Error code 0x%x\n", readl(&regs->Fail1)); index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES; cons = rrpriv->dirty_tx; printk("TX ring index %i, TX consumer %i\n", index, cons); if (rrpriv->tx_skbuff[index]){ len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len); printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size); for (i = 0; i < len; i++){ if (!(i & 7)) printk("\n"); printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]); } printk("\n"); } if (rrpriv->tx_skbuff[cons]){ len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len); printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len); printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n", rrpriv->tx_ring[cons].mode, rrpriv->tx_ring[cons].size, (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo, (unsigned long)rrpriv->tx_skbuff[cons]->data, (unsigned int)rrpriv->tx_skbuff[cons]->truesize); for (i = 0; i < len; i++){ if (!(i & 7)) printk("\n"); printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size); } printk("\n"); } printk("dumping TX ring info:\n"); for (i = 0; i < TX_RING_ENTRIES; i++) printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n", rrpriv->tx_ring[i].mode, rrpriv->tx_ring[i].size, (unsigned long long) rrpriv->tx_ring[i].addr.addrlo); } static int rr_close(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; unsigned long flags; u32 tmp; short i; netif_stop_queue(dev); rrpriv = netdev_priv(dev); regs = rrpriv->regs; /* * Lock to make sure we are not cleaning up while another CPU * is handling interrupts. */ spin_lock_irqsave(&rrpriv->lock, flags); tmp = readl(&regs->HostCtrl); if (tmp & NIC_HALTED){ printk("%s: NIC already halted\n", dev->name); rr_dump(dev); }else{ tmp |= HALT_NIC | RR_CLEAR_INT; writel(tmp, &regs->HostCtrl); readl(&regs->HostCtrl); } rrpriv->fw_running = 0; del_timer_sync(&rrpriv->timer); writel(0, &regs->TxPi); writel(0, &regs->IpRxPi); writel(0, &regs->EvtCon); writel(0, &regs->EvtPrd); for (i = 0; i < CMD_RING_ENTRIES; i++) writel(0, &regs->CmdRing[i]); rrpriv->info->tx_ctrl.entries = 0; rrpriv->info->cmd_ctrl.pi = 0; rrpriv->info->evt_ctrl.pi = 0; rrpriv->rx_ctrl[4].entries = 0; rr_raz_tx(rrpriv, dev); rr_raz_rx(rrpriv, dev); pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info), rrpriv->info, rrpriv->info_dma); rrpriv->info = NULL; free_irq(dev->irq, dev); spin_unlock_irqrestore(&rrpriv->lock, flags); return 0; } static netdev_tx_t rr_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct rr_private *rrpriv = netdev_priv(dev); struct rr_regs __iomem *regs = rrpriv->regs; struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; struct ring_ctrl *txctrl; unsigned long flags; u32 index, len = skb->len; u32 *ifield; struct sk_buff *new_skb; if (readl(&regs->Mode) & FATAL_ERR) printk("error codes Fail1 %02x, Fail2 %02x\n", readl(&regs->Fail1), readl(&regs->Fail2)); /* * We probably need to deal with tbusy here to prevent overruns. */ if (skb_headroom(skb) < 8){ printk("incoming skb too small - reallocating\n"); if (!(new_skb = dev_alloc_skb(len + 8))) { dev_kfree_skb(skb); netif_wake_queue(dev); return NETDEV_TX_OK; } skb_reserve(new_skb, 8); skb_put(new_skb, len); skb_copy_from_linear_data(skb, new_skb->data, len); dev_kfree_skb(skb); skb = new_skb; } ifield = (u32 *)skb_push(skb, 8); ifield[0] = 0; ifield[1] = hcb->ifield; /* * We don't need the lock before we are actually going to start * fiddling with the control blocks. */ spin_lock_irqsave(&rrpriv->lock, flags); txctrl = &rrpriv->info->tx_ctrl; index = txctrl->pi; rrpriv->tx_skbuff[index] = skb; set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single( rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE)); rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */ rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END; txctrl->pi = (index + 1) % TX_RING_ENTRIES; wmb(); writel(txctrl->pi, &regs->TxPi); if (txctrl->pi == rrpriv->dirty_tx){ rrpriv->tx_full = 1; netif_stop_queue(dev); } spin_unlock_irqrestore(&rrpriv->lock, flags); return NETDEV_TX_OK; } /* * Read the firmware out of the EEPROM and put it into the SRAM * (or from user space - later) * * This operation requires the NIC to be halted and is performed with * interrupts disabled and with the spinlock hold. */ static int rr_load_firmware(struct net_device *dev) { struct rr_private *rrpriv; struct rr_regs __iomem *regs; size_t eptr, segptr; int i, j; u32 localctrl, sptr, len, tmp; u32 p2len, p2size, nr_seg, revision, io, sram_size; rrpriv = netdev_priv(dev); regs = rrpriv->regs; if (dev->flags & IFF_UP) return -EBUSY; if (!(readl(&regs->HostCtrl) & NIC_HALTED)){ printk("%s: Trying to load firmware to a running NIC.\n", dev->name); return -EBUSY; } localctrl = readl(&regs->LocalCtrl); writel(0, &regs->LocalCtrl); writel(0, &regs->EvtPrd); writel(0, &regs->RxPrd); writel(0, &regs->TxPrd); /* * First wipe the entire SRAM, otherwise we might run into all * kinds of trouble ... sigh, this took almost all afternoon * to track down ;-( */ io = readl(&regs->ExtIo); writel(0, &regs->ExtIo); sram_size = rr_read_eeprom_word(rrpriv, 8); for (i = 200; i < sram_size / 4; i++){ writel(i * 4, &regs->WinBase); mb(); writel(0, &regs->WinData); mb(); } writel(io, &regs->ExtIo); mb(); eptr = rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, rncd_info.AddrRunCodeSegs)); eptr = ((eptr & 0x1fffff) >> 3); p2len = rr_read_eeprom_word(rrpriv, 0x83*4); p2len = (p2len << 2); p2size = rr_read_eeprom_word(rrpriv, 0x84*4); p2size = ((p2size & 0x1fffff) >> 3); if ((eptr < p2size) || (eptr > (p2size + p2len))){ printk("%s: eptr is invalid\n", dev->name); goto out; } revision = rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.HeaderFmt)); if (revision != 1){ printk("%s: invalid firmware format (%i)\n", dev->name, revision); goto out; } nr_seg = rr_read_eeprom_word(rrpriv, eptr); eptr +=4; #if (DEBUG > 1) printk("%s: nr_seg %i\n", dev->name, nr_seg); #endif for (i = 0; i < nr_seg; i++){ sptr = rr_read_eeprom_word(rrpriv, eptr); eptr += 4; len = rr_read_eeprom_word(rrpriv, eptr); eptr += 4; segptr = rr_read_eeprom_word(rrpriv, eptr); segptr = ((segptr & 0x1fffff) >> 3); eptr += 4; #if (DEBUG > 1) printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n", dev->name, i, sptr, len, segptr); #endif for (j = 0; j < len; j++){ tmp = rr_read_eeprom_word(rrpriv, segptr); writel(sptr, &regs->WinBase); mb(); writel(tmp, &regs->WinData); mb(); segptr += 4; sptr += 4; } } out: writel(localctrl, &regs->LocalCtrl); mb(); return 0; } static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct rr_private *rrpriv; unsigned char *image, *oldimage; unsigned long flags; unsigned int i; int error = -EOPNOTSUPP; rrpriv = netdev_priv(dev); switch(cmd){ case SIOCRRGFW: if (!capable(CAP_SYS_RAWIO)){ return -EPERM; } image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); if (!image){ printk(KERN_ERR "%s: Unable to allocate memory " "for EEPROM image\n", dev->name); return -ENOMEM; } if (rrpriv->fw_running){ printk("%s: Firmware already running\n", dev->name); error = -EPERM; goto gf_out; } spin_lock_irqsave(&rrpriv->lock, flags); i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES); spin_unlock_irqrestore(&rrpriv->lock, flags); if (i != EEPROM_BYTES){ printk(KERN_ERR "%s: Error reading EEPROM\n", dev->name); error = -EFAULT; goto gf_out; } error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES); if (error) error = -EFAULT; gf_out: kfree(image); return error; case SIOCRRPFW: if (!capable(CAP_SYS_RAWIO)){ return -EPERM; } image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); if (!image || !oldimage) { printk(KERN_ERR "%s: Unable to allocate memory " "for EEPROM image\n", dev->name); error = -ENOMEM; goto wf_out; } error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES); if (error) { error = -EFAULT; goto wf_out; } if (rrpriv->fw_running){ printk("%s: Firmware already running\n", dev->name); error = -EPERM; goto wf_out; } printk("%s: Updating EEPROM firmware\n", dev->name); spin_lock_irqsave(&rrpriv->lock, flags); error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES); if (error) printk(KERN_ERR "%s: Error writing EEPROM\n", dev->name); i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES); spin_unlock_irqrestore(&rrpriv->lock, flags); if (i != EEPROM_BYTES) printk(KERN_ERR "%s: Error reading back EEPROM " "image\n", dev->name); error = memcmp(image, oldimage, EEPROM_BYTES); if (error){ printk(KERN_ERR "%s: Error verifying EEPROM image\n", dev->name); error = -EFAULT; } wf_out: kfree(oldimage); kfree(image); return error; case SIOCRRID: return put_user(0x52523032, (int __user *)rq->ifr_data); default: return error; } } static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = { { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER, PCI_ANY_ID, PCI_ANY_ID, }, { 0,} }; MODULE_DEVICE_TABLE(pci, rr_pci_tbl); static struct pci_driver rr_driver = { .name = "rrunner", .id_table = rr_pci_tbl, .probe = rr_init_one, .remove = __devexit_p(rr_remove_one), }; static int __init rr_init_module(void) { return pci_register_driver(&rr_driver); } static void __exit rr_cleanup_module(void) { pci_unregister_driver(&rr_driver); } module_init(rr_init_module); module_exit(rr_cleanup_module);
gpl-2.0
zephiK/android_kernel_moto_shamu
drivers/net/wireless/ath/ath5k/sysfs.c
4882
3589
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/pci.h> #include "ath5k.h" #include "reg.h" #define SIMPLE_SHOW_STORE(name, get, set) \ static ssize_t ath5k_attr_show_##name(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct ieee80211_hw *hw = dev_get_drvdata(dev); \ struct ath5k_hw *ah = hw->priv; \ return snprintf(buf, PAGE_SIZE, "%d\n", get); \ } \ \ static ssize_t ath5k_attr_store_##name(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct ieee80211_hw *hw = dev_get_drvdata(dev); \ struct ath5k_hw *ah = hw->priv; \ int val, ret; \ \ ret = kstrtoint(buf, 10, &val); \ if (ret < 0) \ return ret; \ set(ah, val); \ return count; \ } \ static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \ ath5k_attr_show_##name, ath5k_attr_store_##name) #define SIMPLE_SHOW(name, get) \ static ssize_t ath5k_attr_show_##name(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct ieee80211_hw *hw = dev_get_drvdata(dev); \ struct ath5k_hw *ah = hw->priv; \ return snprintf(buf, PAGE_SIZE, "%d\n", get); \ } \ static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL) /*** ANI ***/ SIMPLE_SHOW_STORE(ani_mode, ah->ani_state.ani_mode, ath5k_ani_init); SIMPLE_SHOW_STORE(noise_immunity_level, ah->ani_state.noise_imm_level, ath5k_ani_set_noise_immunity_level); SIMPLE_SHOW_STORE(spur_level, ah->ani_state.spur_level, ath5k_ani_set_spur_immunity_level); SIMPLE_SHOW_STORE(firstep_level, ah->ani_state.firstep_level, ath5k_ani_set_firstep_level); SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, ah->ani_state.ofdm_weak_sig, ath5k_ani_set_ofdm_weak_signal_detection); SIMPLE_SHOW_STORE(cck_weak_signal_detection, ah->ani_state.cck_weak_sig, ath5k_ani_set_cck_weak_signal_detection); SIMPLE_SHOW(spur_level_max, ah->ani_state.max_spur_level); static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL); } static DEVICE_ATTR(noise_immunity_level_max, S_IRUGO, ath5k_attr_show_noise_immunity_level_max, NULL); static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL); } static DEVICE_ATTR(firstep_level_max, S_IRUGO, ath5k_attr_show_firstep_level_max, NULL); static struct attribute *ath5k_sysfs_entries_ani[] = { &dev_attr_ani_mode.attr, &dev_attr_noise_immunity_level.attr, &dev_attr_spur_level.attr, &dev_attr_firstep_level.attr, &dev_attr_ofdm_weak_signal_detection.attr, &dev_attr_cck_weak_signal_detection.attr, &dev_attr_noise_immunity_level_max.attr, &dev_attr_spur_level_max.attr, &dev_attr_firstep_level_max.attr, NULL }; static struct attribute_group ath5k_attribute_group_ani = { .name = "ani", .attrs = ath5k_sysfs_entries_ani, }; /*** register / unregister ***/ int ath5k_sysfs_register(struct ath5k_hw *ah) { struct device *dev = ah->dev; int err; err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani); if (err) { ATH5K_ERR(ah, "failed to create sysfs group\n"); return err; } return 0; } void ath5k_sysfs_unregister(struct ath5k_hw *ah) { struct device *dev = ah->dev; sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani); }
gpl-2.0
jbott/android_kernel_rpi_rpi
drivers/input/joystick/iforce/iforce-packets.c
4882
8008
/* * Copyright (c) 2000-2002 Vojtech Pavlik <vojtech@ucw.cz> * Copyright (c) 2001-2002, 2007 Johann Deneux <johann.deneux@gmail.com> * * USB/RS232 I-Force joysticks and wheels. */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include "iforce.h" static struct { __s32 x; __s32 y; } iforce_hat_to_axis[16] = {{ 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}}; void iforce_dump_packet(char *msg, u16 cmd, unsigned char *data) { int i; printk(KERN_DEBUG __FILE__ ": %s cmd = %04x, data = ", msg, cmd); for (i = 0; i < LO(cmd); i++) printk("%02x ", data[i]); printk("\n"); } /* * Send a packet of bytes to the device */ int iforce_send_packet(struct iforce *iforce, u16 cmd, unsigned char* data) { /* Copy data to buffer */ int n = LO(cmd); int c; int empty; int head, tail; unsigned long flags; /* * Update head and tail of xmit buffer */ spin_lock_irqsave(&iforce->xmit_lock, flags); head = iforce->xmit.head; tail = iforce->xmit.tail; if (CIRC_SPACE(head, tail, XMIT_SIZE) < n+2) { dev_warn(&iforce->dev->dev, "not enough space in xmit buffer to send new packet\n"); spin_unlock_irqrestore(&iforce->xmit_lock, flags); return -1; } empty = head == tail; XMIT_INC(iforce->xmit.head, n+2); /* * Store packet in xmit buffer */ iforce->xmit.buf[head] = HI(cmd); XMIT_INC(head, 1); iforce->xmit.buf[head] = LO(cmd); XMIT_INC(head, 1); c = CIRC_SPACE_TO_END(head, tail, XMIT_SIZE); if (n < c) c=n; memcpy(&iforce->xmit.buf[head], data, c); if (n != c) { memcpy(&iforce->xmit.buf[0], data + c, n - c); } XMIT_INC(head, n); spin_unlock_irqrestore(&iforce->xmit_lock, flags); /* * If necessary, start the transmission */ switch (iforce->bus) { #ifdef CONFIG_JOYSTICK_IFORCE_232 case IFORCE_232: if (empty) iforce_serial_xmit(iforce); break; #endif #ifdef CONFIG_JOYSTICK_IFORCE_USB case IFORCE_USB: if (iforce->usbdev && empty && !test_and_set_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags)) { iforce_usb_xmit(iforce); } break; #endif } return 0; } /* Start or stop an effect */ int iforce_control_playback(struct iforce* iforce, u16 id, unsigned int value) { unsigned char data[3]; data[0] = LO(id); data[1] = (value > 0) ? ((value > 1) ? 0x41 : 0x01) : 0; data[2] = LO(value); return iforce_send_packet(iforce, FF_CMD_PLAY, data); } /* Mark an effect that was being updated as ready. That means it can be updated * again */ static int mark_core_as_ready(struct iforce *iforce, unsigned short addr) { int i; if (!iforce->dev->ff) return 0; for (i = 0; i < iforce->dev->ff->max_effects; ++i) { if (test_bit(FF_CORE_IS_USED, iforce->core_effects[i].flags) && (iforce->core_effects[i].mod1_chunk.start == addr || iforce->core_effects[i].mod2_chunk.start == addr)) { clear_bit(FF_CORE_UPDATE, iforce->core_effects[i].flags); return 0; } } dev_warn(&iforce->dev->dev, "unused effect %04x updated !!!\n", addr); return -1; } void iforce_process_packet(struct iforce *iforce, u16 cmd, unsigned char *data) { struct input_dev *dev = iforce->dev; int i; static int being_used = 0; if (being_used) dev_warn(&iforce->dev->dev, "re-entrant call to iforce_process %d\n", being_used); being_used++; #ifdef CONFIG_JOYSTICK_IFORCE_232 if (HI(iforce->expect_packet) == HI(cmd)) { iforce->expect_packet = 0; iforce->ecmd = cmd; memcpy(iforce->edata, data, IFORCE_MAX_LENGTH); } #endif wake_up(&iforce->wait); if (!iforce->type) { being_used--; return; } switch (HI(cmd)) { case 0x01: /* joystick position data */ case 0x03: /* wheel position data */ if (HI(cmd) == 1) { input_report_abs(dev, ABS_X, (__s16) (((__s16)data[1] << 8) | data[0])); input_report_abs(dev, ABS_Y, (__s16) (((__s16)data[3] << 8) | data[2])); input_report_abs(dev, ABS_THROTTLE, 255 - data[4]); if (LO(cmd) >= 8 && test_bit(ABS_RUDDER ,dev->absbit)) input_report_abs(dev, ABS_RUDDER, (__s8)data[7]); } else { input_report_abs(dev, ABS_WHEEL, (__s16) (((__s16)data[1] << 8) | data[0])); input_report_abs(dev, ABS_GAS, 255 - data[2]); input_report_abs(dev, ABS_BRAKE, 255 - data[3]); } input_report_abs(dev, ABS_HAT0X, iforce_hat_to_axis[data[6] >> 4].x); input_report_abs(dev, ABS_HAT0Y, iforce_hat_to_axis[data[6] >> 4].y); for (i = 0; iforce->type->btn[i] >= 0; i++) input_report_key(dev, iforce->type->btn[i], data[(i >> 3) + 5] & (1 << (i & 7))); /* If there are untouched bits left, interpret them as the second hat */ if (i <= 8) { int btns = data[6]; if (test_bit(ABS_HAT1X, dev->absbit)) { if (btns & 8) input_report_abs(dev, ABS_HAT1X, -1); else if (btns & 2) input_report_abs(dev, ABS_HAT1X, 1); else input_report_abs(dev, ABS_HAT1X, 0); } if (test_bit(ABS_HAT1Y, dev->absbit)) { if (btns & 1) input_report_abs(dev, ABS_HAT1Y, -1); else if (btns & 4) input_report_abs(dev, ABS_HAT1Y, 1); else input_report_abs(dev, ABS_HAT1Y, 0); } } input_sync(dev); break; case 0x02: /* status report */ input_report_key(dev, BTN_DEAD, data[0] & 0x02); input_sync(dev); /* Check if an effect was just started or stopped */ i = data[1] & 0x7f; if (data[1] & 0x80) { if (!test_and_set_bit(FF_CORE_IS_PLAYED, iforce->core_effects[i].flags)) { /* Report play event */ input_report_ff_status(dev, i, FF_STATUS_PLAYING); } } else if (test_and_clear_bit(FF_CORE_IS_PLAYED, iforce->core_effects[i].flags)) { /* Report stop event */ input_report_ff_status(dev, i, FF_STATUS_STOPPED); } if (LO(cmd) > 3) { int j; for (j = 3; j < LO(cmd); j += 2) mark_core_as_ready(iforce, data[j] | (data[j+1]<<8)); } break; } being_used--; } int iforce_get_id_packet(struct iforce *iforce, char *packet) { switch (iforce->bus) { case IFORCE_USB: { #ifdef CONFIG_JOYSTICK_IFORCE_USB int status; iforce->cr.bRequest = packet[0]; iforce->ctrl->dev = iforce->usbdev; status = usb_submit_urb(iforce->ctrl, GFP_ATOMIC); if (status) { dev_err(&iforce->intf->dev, "usb_submit_urb failed %d\n", status); return -1; } wait_event_interruptible_timeout(iforce->wait, iforce->ctrl->status != -EINPROGRESS, HZ); if (iforce->ctrl->status) { dev_dbg(&iforce->intf->dev, "iforce->ctrl->status = %d\n", iforce->ctrl->status); usb_unlink_urb(iforce->ctrl); return -1; } #else printk(KERN_DEBUG "iforce_get_id_packet: iforce->bus = USB!\n"); #endif } break; case IFORCE_232: #ifdef CONFIG_JOYSTICK_IFORCE_232 iforce->expect_packet = FF_CMD_QUERY; iforce_send_packet(iforce, FF_CMD_QUERY, packet); wait_event_interruptible_timeout(iforce->wait, !iforce->expect_packet, HZ); if (iforce->expect_packet) { iforce->expect_packet = 0; return -1; } #else dev_err(&iforce->dev->dev, "iforce_get_id_packet: iforce->bus = SERIO!\n"); #endif break; default: dev_err(&iforce->dev->dev, "iforce_get_id_packet: iforce->bus = %d\n", iforce->bus); break; } return -(iforce->edata[0] != packet[0]); }
gpl-2.0
ramosian-glider/kasan
drivers/input/joystick/iforce/iforce-packets.c
4882
8008
/* * Copyright (c) 2000-2002 Vojtech Pavlik <vojtech@ucw.cz> * Copyright (c) 2001-2002, 2007 Johann Deneux <johann.deneux@gmail.com> * * USB/RS232 I-Force joysticks and wheels. */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include "iforce.h" static struct { __s32 x; __s32 y; } iforce_hat_to_axis[16] = {{ 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}}; void iforce_dump_packet(char *msg, u16 cmd, unsigned char *data) { int i; printk(KERN_DEBUG __FILE__ ": %s cmd = %04x, data = ", msg, cmd); for (i = 0; i < LO(cmd); i++) printk("%02x ", data[i]); printk("\n"); } /* * Send a packet of bytes to the device */ int iforce_send_packet(struct iforce *iforce, u16 cmd, unsigned char* data) { /* Copy data to buffer */ int n = LO(cmd); int c; int empty; int head, tail; unsigned long flags; /* * Update head and tail of xmit buffer */ spin_lock_irqsave(&iforce->xmit_lock, flags); head = iforce->xmit.head; tail = iforce->xmit.tail; if (CIRC_SPACE(head, tail, XMIT_SIZE) < n+2) { dev_warn(&iforce->dev->dev, "not enough space in xmit buffer to send new packet\n"); spin_unlock_irqrestore(&iforce->xmit_lock, flags); return -1; } empty = head == tail; XMIT_INC(iforce->xmit.head, n+2); /* * Store packet in xmit buffer */ iforce->xmit.buf[head] = HI(cmd); XMIT_INC(head, 1); iforce->xmit.buf[head] = LO(cmd); XMIT_INC(head, 1); c = CIRC_SPACE_TO_END(head, tail, XMIT_SIZE); if (n < c) c=n; memcpy(&iforce->xmit.buf[head], data, c); if (n != c) { memcpy(&iforce->xmit.buf[0], data + c, n - c); } XMIT_INC(head, n); spin_unlock_irqrestore(&iforce->xmit_lock, flags); /* * If necessary, start the transmission */ switch (iforce->bus) { #ifdef CONFIG_JOYSTICK_IFORCE_232 case IFORCE_232: if (empty) iforce_serial_xmit(iforce); break; #endif #ifdef CONFIG_JOYSTICK_IFORCE_USB case IFORCE_USB: if (iforce->usbdev && empty && !test_and_set_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags)) { iforce_usb_xmit(iforce); } break; #endif } return 0; } /* Start or stop an effect */ int iforce_control_playback(struct iforce* iforce, u16 id, unsigned int value) { unsigned char data[3]; data[0] = LO(id); data[1] = (value > 0) ? ((value > 1) ? 0x41 : 0x01) : 0; data[2] = LO(value); return iforce_send_packet(iforce, FF_CMD_PLAY, data); } /* Mark an effect that was being updated as ready. That means it can be updated * again */ static int mark_core_as_ready(struct iforce *iforce, unsigned short addr) { int i; if (!iforce->dev->ff) return 0; for (i = 0; i < iforce->dev->ff->max_effects; ++i) { if (test_bit(FF_CORE_IS_USED, iforce->core_effects[i].flags) && (iforce->core_effects[i].mod1_chunk.start == addr || iforce->core_effects[i].mod2_chunk.start == addr)) { clear_bit(FF_CORE_UPDATE, iforce->core_effects[i].flags); return 0; } } dev_warn(&iforce->dev->dev, "unused effect %04x updated !!!\n", addr); return -1; } void iforce_process_packet(struct iforce *iforce, u16 cmd, unsigned char *data) { struct input_dev *dev = iforce->dev; int i; static int being_used = 0; if (being_used) dev_warn(&iforce->dev->dev, "re-entrant call to iforce_process %d\n", being_used); being_used++; #ifdef CONFIG_JOYSTICK_IFORCE_232 if (HI(iforce->expect_packet) == HI(cmd)) { iforce->expect_packet = 0; iforce->ecmd = cmd; memcpy(iforce->edata, data, IFORCE_MAX_LENGTH); } #endif wake_up(&iforce->wait); if (!iforce->type) { being_used--; return; } switch (HI(cmd)) { case 0x01: /* joystick position data */ case 0x03: /* wheel position data */ if (HI(cmd) == 1) { input_report_abs(dev, ABS_X, (__s16) (((__s16)data[1] << 8) | data[0])); input_report_abs(dev, ABS_Y, (__s16) (((__s16)data[3] << 8) | data[2])); input_report_abs(dev, ABS_THROTTLE, 255 - data[4]); if (LO(cmd) >= 8 && test_bit(ABS_RUDDER ,dev->absbit)) input_report_abs(dev, ABS_RUDDER, (__s8)data[7]); } else { input_report_abs(dev, ABS_WHEEL, (__s16) (((__s16)data[1] << 8) | data[0])); input_report_abs(dev, ABS_GAS, 255 - data[2]); input_report_abs(dev, ABS_BRAKE, 255 - data[3]); } input_report_abs(dev, ABS_HAT0X, iforce_hat_to_axis[data[6] >> 4].x); input_report_abs(dev, ABS_HAT0Y, iforce_hat_to_axis[data[6] >> 4].y); for (i = 0; iforce->type->btn[i] >= 0; i++) input_report_key(dev, iforce->type->btn[i], data[(i >> 3) + 5] & (1 << (i & 7))); /* If there are untouched bits left, interpret them as the second hat */ if (i <= 8) { int btns = data[6]; if (test_bit(ABS_HAT1X, dev->absbit)) { if (btns & 8) input_report_abs(dev, ABS_HAT1X, -1); else if (btns & 2) input_report_abs(dev, ABS_HAT1X, 1); else input_report_abs(dev, ABS_HAT1X, 0); } if (test_bit(ABS_HAT1Y, dev->absbit)) { if (btns & 1) input_report_abs(dev, ABS_HAT1Y, -1); else if (btns & 4) input_report_abs(dev, ABS_HAT1Y, 1); else input_report_abs(dev, ABS_HAT1Y, 0); } } input_sync(dev); break; case 0x02: /* status report */ input_report_key(dev, BTN_DEAD, data[0] & 0x02); input_sync(dev); /* Check if an effect was just started or stopped */ i = data[1] & 0x7f; if (data[1] & 0x80) { if (!test_and_set_bit(FF_CORE_IS_PLAYED, iforce->core_effects[i].flags)) { /* Report play event */ input_report_ff_status(dev, i, FF_STATUS_PLAYING); } } else if (test_and_clear_bit(FF_CORE_IS_PLAYED, iforce->core_effects[i].flags)) { /* Report stop event */ input_report_ff_status(dev, i, FF_STATUS_STOPPED); } if (LO(cmd) > 3) { int j; for (j = 3; j < LO(cmd); j += 2) mark_core_as_ready(iforce, data[j] | (data[j+1]<<8)); } break; } being_used--; } int iforce_get_id_packet(struct iforce *iforce, char *packet) { switch (iforce->bus) { case IFORCE_USB: { #ifdef CONFIG_JOYSTICK_IFORCE_USB int status; iforce->cr.bRequest = packet[0]; iforce->ctrl->dev = iforce->usbdev; status = usb_submit_urb(iforce->ctrl, GFP_ATOMIC); if (status) { dev_err(&iforce->intf->dev, "usb_submit_urb failed %d\n", status); return -1; } wait_event_interruptible_timeout(iforce->wait, iforce->ctrl->status != -EINPROGRESS, HZ); if (iforce->ctrl->status) { dev_dbg(&iforce->intf->dev, "iforce->ctrl->status = %d\n", iforce->ctrl->status); usb_unlink_urb(iforce->ctrl); return -1; } #else printk(KERN_DEBUG "iforce_get_id_packet: iforce->bus = USB!\n"); #endif } break; case IFORCE_232: #ifdef CONFIG_JOYSTICK_IFORCE_232 iforce->expect_packet = FF_CMD_QUERY; iforce_send_packet(iforce, FF_CMD_QUERY, packet); wait_event_interruptible_timeout(iforce->wait, !iforce->expect_packet, HZ); if (iforce->expect_packet) { iforce->expect_packet = 0; return -1; } #else dev_err(&iforce->dev->dev, "iforce_get_id_packet: iforce->bus = SERIO!\n"); #endif break; default: dev_err(&iforce->dev->dev, "iforce_get_id_packet: iforce->bus = %d\n", iforce->bus); break; } return -(iforce->edata[0] != packet[0]); }
gpl-2.0
Schischu/android_kernel_samsung_lt03lte
net/bridge/netfilter/ebtable_nat.c
9234
3080
/* * ebtable_nat * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * April, 2002 * */ #include <linux/netfilter_bridge/ebtables.h> #include <linux/module.h> #define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \ (1 << NF_BR_POST_ROUTING)) static struct ebt_entries initial_chains[] = { { .name = "PREROUTING", .policy = EBT_ACCEPT, }, { .name = "OUTPUT", .policy = EBT_ACCEPT, }, { .name = "POSTROUTING", .policy = EBT_ACCEPT, } }; static struct ebt_replace_kernel initial_table = { .name = "nat", .valid_hooks = NAT_VALID_HOOKS, .entries_size = 3 * sizeof(struct ebt_entries), .hook_entry = { [NF_BR_PRE_ROUTING] = &initial_chains[0], [NF_BR_LOCAL_OUT] = &initial_chains[1], [NF_BR_POST_ROUTING] = &initial_chains[2], }, .entries = (char *)initial_chains, }; static int check(const struct ebt_table_info *info, unsigned int valid_hooks) { if (valid_hooks & ~NAT_VALID_HOOKS) return -EINVAL; return 0; } static struct ebt_table frame_nat = { .name = "nat", .table = &initial_table, .valid_hooks = NAT_VALID_HOOKS, .check = check, .me = THIS_MODULE, }; static unsigned int ebt_nat_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in , const struct net_device *out, int (*okfn)(struct sk_buff *)) { return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_nat); } static unsigned int ebt_nat_out(unsigned int hook, struct sk_buff *skb, const struct net_device *in , const struct net_device *out, int (*okfn)(struct sk_buff *)) { return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_nat); } static struct nf_hook_ops ebt_ops_nat[] __read_mostly = { { .hook = ebt_nat_out, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_LOCAL_OUT, .priority = NF_BR_PRI_NAT_DST_OTHER, }, { .hook = ebt_nat_out, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_POST_ROUTING, .priority = NF_BR_PRI_NAT_SRC, }, { .hook = ebt_nat_in, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_PRE_ROUTING, .priority = NF_BR_PRI_NAT_DST_BRIDGED, }, }; static int __net_init frame_nat_net_init(struct net *net) { net->xt.frame_nat = ebt_register_table(net, &frame_nat); if (IS_ERR(net->xt.frame_nat)) return PTR_ERR(net->xt.frame_nat); return 0; } static void __net_exit frame_nat_net_exit(struct net *net) { ebt_unregister_table(net, net->xt.frame_nat); } static struct pernet_operations frame_nat_net_ops = { .init = frame_nat_net_init, .exit = frame_nat_net_exit, }; static int __init ebtable_nat_init(void) { int ret; ret = register_pernet_subsys(&frame_nat_net_ops); if (ret < 0) return ret; ret = nf_register_hooks(ebt_ops_nat, ARRAY_SIZE(ebt_ops_nat)); if (ret < 0) unregister_pernet_subsys(&frame_nat_net_ops); return ret; } static void __exit ebtable_nat_fini(void) { nf_unregister_hooks(ebt_ops_nat, ARRAY_SIZE(ebt_ops_nat)); unregister_pernet_subsys(&frame_nat_net_ops); } module_init(ebtable_nat_init); module_exit(ebtable_nat_fini); MODULE_LICENSE("GPL");
gpl-2.0
tilaksidduram/android_kernel_samsung_smdk4412
arch/mips/mti-malta/malta-amon.c
9234
2176
/* * Copyright (C) 2007 MIPS Technologies, Inc. * All rights reserved. * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Arbitrary Monitor interface */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/addrspace.h> #include <asm/mips-boards/launch.h> #include <asm/mipsmtregs.h> int amon_cpu_avail(int cpu) { struct cpulaunch *launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); if (cpu < 0 || cpu >= NCPULAUNCH) { pr_debug("avail: cpu%d is out of range\n", cpu); return 0; } launch += cpu; if (!(launch->flags & LAUNCH_FREADY)) { pr_debug("avail: cpu%d is not ready\n", cpu); return 0; } if (launch->flags & (LAUNCH_FGO|LAUNCH_FGONE)) { pr_debug("avail: too late.. cpu%d is already gone\n", cpu); return 0; } return 1; } void amon_cpu_start(int cpu, unsigned long pc, unsigned long sp, unsigned long gp, unsigned long a0) { volatile struct cpulaunch *launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); if (!amon_cpu_avail(cpu)) return; if (cpu == smp_processor_id()) { pr_debug("launch: I am cpu%d!\n", cpu); return; } launch += cpu; pr_debug("launch: starting cpu%d\n", cpu); launch->pc = pc; launch->gp = gp; launch->sp = sp; launch->a0 = a0; smp_wmb(); /* Target must see parameters before go */ launch->flags |= LAUNCH_FGO; smp_wmb(); /* Target must see go before we poll */ while ((launch->flags & LAUNCH_FGONE) == 0) ; smp_rmb(); /* Target will be updating flags soon */ pr_debug("launch: cpu%d gone!\n", cpu); }
gpl-2.0
wolverine2k/android_kernel_oppo_n1
net/bridge/netfilter/ebtable_filter.c
9234
3158
/* * ebtable_filter * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * April, 2002 * */ #include <linux/netfilter_bridge/ebtables.h> #include <linux/module.h> #define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \ (1 << NF_BR_LOCAL_OUT)) static struct ebt_entries initial_chains[] = { { .name = "INPUT", .policy = EBT_ACCEPT, }, { .name = "FORWARD", .policy = EBT_ACCEPT, }, { .name = "OUTPUT", .policy = EBT_ACCEPT, }, }; static struct ebt_replace_kernel initial_table = { .name = "filter", .valid_hooks = FILTER_VALID_HOOKS, .entries_size = 3 * sizeof(struct ebt_entries), .hook_entry = { [NF_BR_LOCAL_IN] = &initial_chains[0], [NF_BR_FORWARD] = &initial_chains[1], [NF_BR_LOCAL_OUT] = &initial_chains[2], }, .entries = (char *)initial_chains, }; static int check(const struct ebt_table_info *info, unsigned int valid_hooks) { if (valid_hooks & ~FILTER_VALID_HOOKS) return -EINVAL; return 0; } static const struct ebt_table frame_filter = { .name = "filter", .table = &initial_table, .valid_hooks = FILTER_VALID_HOOKS, .check = check, .me = THIS_MODULE, }; static unsigned int ebt_in_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_filter); } static unsigned int ebt_out_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_filter); } static struct nf_hook_ops ebt_ops_filter[] __read_mostly = { { .hook = ebt_in_hook, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_LOCAL_IN, .priority = NF_BR_PRI_FILTER_BRIDGED, }, { .hook = ebt_in_hook, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_FORWARD, .priority = NF_BR_PRI_FILTER_BRIDGED, }, { .hook = ebt_out_hook, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_LOCAL_OUT, .priority = NF_BR_PRI_FILTER_OTHER, }, }; static int __net_init frame_filter_net_init(struct net *net) { net->xt.frame_filter = ebt_register_table(net, &frame_filter); if (IS_ERR(net->xt.frame_filter)) return PTR_ERR(net->xt.frame_filter); return 0; } static void __net_exit frame_filter_net_exit(struct net *net) { ebt_unregister_table(net, net->xt.frame_filter); } static struct pernet_operations frame_filter_net_ops = { .init = frame_filter_net_init, .exit = frame_filter_net_exit, }; static int __init ebtable_filter_init(void) { int ret; ret = register_pernet_subsys(&frame_filter_net_ops); if (ret < 0) return ret; ret = nf_register_hooks(ebt_ops_filter, ARRAY_SIZE(ebt_ops_filter)); if (ret < 0) unregister_pernet_subsys(&frame_filter_net_ops); return ret; } static void __exit ebtable_filter_fini(void) { nf_unregister_hooks(ebt_ops_filter, ARRAY_SIZE(ebt_ops_filter)); unregister_pernet_subsys(&frame_filter_net_ops); } module_init(ebtable_filter_init); module_exit(ebtable_filter_fini); MODULE_LICENSE("GPL");
gpl-2.0
3nids/QGIS
src/app/qgsappcoordinateoperationhandlers.cpp
19
19217
/*************************************************************************** qgsappcoordinateoperationhandlers.cpp ------------------------- begin : May 2019 copyright : (C) 2019 by Nyall Dawson email : nyall dot dawson at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgsappcoordinateoperationhandlers.h" #include "qgscoordinatetransform.h" #include "qgisapp.h" #include "qgsmessagebar.h" #include "qgsmessagebaritem.h" #include "qgsmessageoutput.h" #include "qgsproject.h" #include "qgsinstallgridshiftdialog.h" // // QgsAppMissingRequiredGridHandler // QgsAppMissingGridHandler::QgsAppMissingGridHandler( QObject *parent ) : QObject( parent ) { QgsCoordinateTransform::setCustomMissingRequiredGridHandler( [ = ]( const QgsCoordinateReferenceSystem & sourceCrs, const QgsCoordinateReferenceSystem & destinationCrs, const QgsDatumTransform::GridDetails & grid ) { emit missingRequiredGrid( sourceCrs, destinationCrs, grid ); } ); QgsCoordinateTransform::setCustomMissingPreferredGridHandler( [ = ]( const QgsCoordinateReferenceSystem & sourceCrs, const QgsCoordinateReferenceSystem & destinationCrs, const QgsDatumTransform::TransformDetails & preferredOperation, const QgsDatumTransform::TransformDetails & availableOperation ) { emit missingPreferredGrid( sourceCrs, destinationCrs, preferredOperation, availableOperation ); } ); QgsCoordinateTransform::setCustomCoordinateOperationCreationErrorHandler( [ = ]( const QgsCoordinateReferenceSystem & sourceCrs, const QgsCoordinateReferenceSystem & destinationCrs, const QString & error ) { emit coordinateOperationCreationError( sourceCrs, destinationCrs, error ); } ); QgsCoordinateTransform::setCustomMissingGridUsedByContextHandler( [ = ]( const QgsCoordinateReferenceSystem & sourceCrs, const QgsCoordinateReferenceSystem & destinationCrs, const QgsDatumTransform::TransformDetails & desired ) { emit missingGridUsedByContextHandler( sourceCrs, destinationCrs, desired ); } ); QgsCoordinateTransform::setFallbackOperationOccurredHandler( [ = ]( const QgsCoordinateReferenceSystem & sourceCrs, const QgsCoordinateReferenceSystem & destinationCrs, const QString & desired ) { emit fallbackOperationOccurred( sourceCrs, destinationCrs, desired ); } ); QgsCoordinateTransform::setDynamicCrsToDynamicCrsWarningHandler( [ = ]( const QgsCoordinateReferenceSystem & sourceCrs, const QgsCoordinateReferenceSystem & destinationCrs ) { emit dynamicToDynamicWarning( sourceCrs, destinationCrs ); } ); connect( this, &QgsAppMissingGridHandler::missingRequiredGrid, this, &QgsAppMissingGridHandler::onMissingRequiredGrid, Qt::QueuedConnection ); connect( this, &QgsAppMissingGridHandler::missingPreferredGrid, this, &QgsAppMissingGridHandler::onMissingPreferredGrid, Qt::QueuedConnection ); connect( this, &QgsAppMissingGridHandler::coordinateOperationCreationError, this, &QgsAppMissingGridHandler::onCoordinateOperationCreationError, Qt::QueuedConnection ); connect( this, &QgsAppMissingGridHandler::missingGridUsedByContextHandler, this, &QgsAppMissingGridHandler::onMissingGridUsedByContextHandler, Qt::QueuedConnection ); connect( this, &QgsAppMissingGridHandler::fallbackOperationOccurred, this, &QgsAppMissingGridHandler::onFallbackOperationOccurred, Qt::QueuedConnection ); connect( this, &QgsAppMissingGridHandler::dynamicToDynamicWarning, this, &QgsAppMissingGridHandler::onDynamicToDynamicWarning, Qt::QueuedConnection ); connect( QgsProject::instance(), &QgsProject::cleared, this, [ = ] { mAlreadyWarnedPairsForProject.clear(); mAlreadyWarnedBallparkPairsForProject.clear(); } ); } void QgsAppMissingGridHandler::onMissingRequiredGrid( const QgsCoordinateReferenceSystem &sourceCrs, const QgsCoordinateReferenceSystem &destinationCrs, const QgsDatumTransform::GridDetails &grid ) { if ( !shouldWarnAboutPair( sourceCrs, destinationCrs ) ) return; const QString shortMessage = tr( "No transform available between %1 and %2" ).arg( sourceCrs.userFriendlyIdentifier( QgsCoordinateReferenceSystem::ShortString ), destinationCrs.userFriendlyIdentifier( QgsCoordinateReferenceSystem::ShortString ) ); QString downloadMessage; const QString gridName = grid.shortName; if ( !grid.url.isEmpty() ) { if ( !grid.packageName.isEmpty() ) { downloadMessage = tr( "This grid is part of the “<i>%1</i>” package, available for download from <a href=\"%2\">%2</a>." ).arg( grid.packageName, grid.url ); } else { downloadMessage = tr( "This grid is available for download from <a href=\"%1\">%1</a>." ).arg( grid.url ); } } const QString longMessage = tr( "<p>No transform is available between <i>%1</i> and <i>%2</i>.</p>" "<p>This transformation requires the grid file “%3”, which is not available for use on the system.</p>" ).arg( sourceCrs.userFriendlyIdentifier(), destinationCrs.userFriendlyIdentifier(), grid.shortName ); QgsMessageBar *bar = QgisApp::instance()->messageBar(); QgsMessageBarItem *widget = QgsMessageBar::createMessage( QString(), shortMessage ); QPushButton *detailsButton = new QPushButton( tr( "Details" ) ); connect( detailsButton, &QPushButton::clicked, this, [longMessage, downloadMessage, bar, widget, gridName] { QgsInstallGridShiftFileDialog *dlg = new QgsInstallGridShiftFileDialog( gridName, QgisApp::instance() ); dlg->setAttribute( Qt::WA_DeleteOnClose ); dlg->setWindowTitle( tr( "No Transformations Available" ) ); dlg->setDescription( longMessage ); dlg->setDownloadMessage( downloadMessage ); if ( dlg->exec() ) { bar->popWidget( widget ); } } ); widget->layout()->addWidget( detailsButton ); bar->pushWidget( widget, Qgis::MessageLevel::Critical, 0 ); } void QgsAppMissingGridHandler::onMissingPreferredGrid( const QgsCoordinateReferenceSystem &sourceCrs, const QgsCoordinateReferenceSystem &destinationCrs, const QgsDatumTransform::TransformDetails &preferredOperation, const QgsDatumTransform::TransformDetails &availableOperation ) { if ( !shouldWarnAboutPair( sourceCrs, destinationCrs ) ) return; const QString shortMessage = tr( "Cannot use preferred transform between %1 and %2" ).arg( sourceCrs.userFriendlyIdentifier( QgsCoordinateReferenceSystem::ShortString ), destinationCrs.userFriendlyIdentifier( QgsCoordinateReferenceSystem::ShortString ) ); QString gridMessage; QString downloadMessage; QString gridName; for ( const QgsDatumTransform::GridDetails &grid : preferredOperation.grids ) { if ( !grid.isAvailable ) { const QString m = tr( "This transformation requires the grid file “%1”, which is not available for use on the system." ).arg( grid.shortName ); gridName = grid.shortName; if ( !grid.url.isEmpty() ) { if ( !grid.packageName.isEmpty() ) { downloadMessage = tr( "This grid is part of the <i>%1</i> package, available for download from <a href=\"%2\">%2</a>." ).arg( grid.packageName, grid.url ); } else { downloadMessage = tr( "This grid is available for download from <a href=\"%1\">%1</a>." ).arg( grid.url ); } } gridMessage += QStringLiteral( "<li>%1</li>" ).arg( m ); } } if ( !gridMessage.isEmpty() ) { gridMessage = "<ul>" + gridMessage + "</ul>"; } QString accuracyMessage; if ( availableOperation.accuracy >= 0 && preferredOperation.accuracy >= 0 ) accuracyMessage = tr( "<p>Current transform “<i>%1</i>” has an accuracy of %2 meters, while the preferred transformation “<i>%3</i>” has accuracy %4 meters.</p>" ).arg( availableOperation.name ) .arg( availableOperation.accuracy ).arg( preferredOperation.name ).arg( preferredOperation.accuracy ); else if ( preferredOperation.accuracy >= 0 ) accuracyMessage = tr( "<p>Current transform “<i>%1</i>” has an unknown accuracy, while the preferred transformation “<i>%2</i>” has accuracy %3 meters.</p>" ).arg( availableOperation.name ) .arg( preferredOperation.name ).arg( preferredOperation.accuracy ); const QString longMessage = tr( "<p>The preferred transform between <i>%1</i> and <i>%2</i> is not available for use on the system.</p>" ).arg( sourceCrs.userFriendlyIdentifier(), destinationCrs.userFriendlyIdentifier() ) + gridMessage + accuracyMessage; QgsMessageBar *bar = QgisApp::instance()->messageBar(); QgsMessageBarItem *widget = QgsMessageBar::createMessage( QString(), shortMessage ); QPushButton *detailsButton = new QPushButton( tr( "Details" ) ); connect( detailsButton, &QPushButton::clicked, this, [longMessage, downloadMessage, gridName, widget, bar] { QgsInstallGridShiftFileDialog *dlg = new QgsInstallGridShiftFileDialog( gridName, QgisApp::instance() ); dlg->setAttribute( Qt::WA_DeleteOnClose ); dlg->setWindowTitle( tr( "Preferred Transformation Not Available" ) ); dlg->setDescription( longMessage ); dlg->setDownloadMessage( downloadMessage ); if ( dlg->exec() ) { bar->popWidget( widget ); } } ); widget->layout()->addWidget( detailsButton ); bar->pushWidget( widget, Qgis::MessageLevel::Warning, 0 ); } void QgsAppMissingGridHandler::onCoordinateOperationCreationError( const QgsCoordinateReferenceSystem &sourceCrs, const QgsCoordinateReferenceSystem &destinationCrs, const QString &error ) { if ( !shouldWarnAboutPairForCurrentProject( sourceCrs, destinationCrs ) ) return; const QString shortMessage = tr( "No transform available between %1 and %2" ).arg( sourceCrs.userFriendlyIdentifier( QgsCoordinateReferenceSystem::ShortString ), destinationCrs.userFriendlyIdentifier( QgsCoordinateReferenceSystem::ShortString ) ); const QString longMessage = tr( "<p>No transform is available between <i>%1</i> and <i>%2</i>.</p><p style=\"color: red\">%3</p>" ).arg( sourceCrs.userFriendlyIdentifier(), destinationCrs.userFriendlyIdentifier(), error ); QgsMessageBar *bar = QgisApp::instance()->messageBar(); QgsMessageBarItem *widget = QgsMessageBar::createMessage( QString(), shortMessage ); QPushButton *detailsButton = new QPushButton( tr( "Details" ) ); connect( detailsButton, &QPushButton::clicked, this, [longMessage] { // dlg has deleted on close QgsMessageOutput * dlg( QgsMessageOutput::createMessageOutput() ); dlg->setTitle( tr( "No Transformations Available" ) ); dlg->setMessage( longMessage, QgsMessageOutput::MessageHtml ); dlg->showMessage(); } ); widget->layout()->addWidget( detailsButton ); bar->pushWidget( widget, Qgis::MessageLevel::Critical, 0 ); } void QgsAppMissingGridHandler::onMissingGridUsedByContextHandler( const QgsCoordinateReferenceSystem &sourceCrs, const QgsCoordinateReferenceSystem &destinationCrs, const QgsDatumTransform::TransformDetails &desired ) { if ( !shouldWarnAboutPairForCurrentProject( sourceCrs, destinationCrs ) ) return; const QString shortMessage = tr( "Cannot use project transform between %1 and %2" ).arg( sourceCrs.userFriendlyIdentifier( QgsCoordinateReferenceSystem::ShortString ), destinationCrs.userFriendlyIdentifier( QgsCoordinateReferenceSystem::ShortString ) ); QString gridMessage; QString downloadMessage; QString gridName; for ( const QgsDatumTransform::GridDetails &grid : desired.grids ) { if ( !grid.isAvailable ) { gridName = grid.shortName; const QString m = tr( "This transformation requires the grid file “%1”, which is not available for use on the system." ).arg( grid.shortName ); if ( !grid.url.isEmpty() ) { if ( !grid.packageName.isEmpty() ) { downloadMessage = tr( "This grid is part of the <i>%1</i> package, available for download from <a href=\"%2\">%2</a>." ).arg( grid.packageName, grid.url ); } else { downloadMessage = tr( "This grid is available for download from <a href=\"%1\">%1</a>." ).arg( grid.url ); } } gridMessage += QStringLiteral( "<li>%1</li>" ).arg( m ); } } if ( !gridMessage.isEmpty() ) { gridMessage = "<ul>" + gridMessage + "</ul>"; } const QString longMessage = tr( "<p>This project specifies a preset transform between <i>%1</i> and <i>%2</i>, which is not available for use on the system.</p>" ).arg( sourceCrs.userFriendlyIdentifier(), destinationCrs.userFriendlyIdentifier() ) + gridMessage + tr( "<p>The operation specified for use in the project is:</p><p><code>%1</code></p>" ).arg( desired.proj ) ; QgsMessageBar *bar = QgisApp::instance()->messageBar(); QgsMessageBarItem *widget = QgsMessageBar::createMessage( QString(), shortMessage ); QPushButton *detailsButton = new QPushButton( tr( "Details" ) ); connect( detailsButton, &QPushButton::clicked, this, [longMessage, gridName, downloadMessage, bar, widget] { QgsInstallGridShiftFileDialog *dlg = new QgsInstallGridShiftFileDialog( gridName, QgisApp::instance() ); dlg->setAttribute( Qt::WA_DeleteOnClose ); dlg->setWindowTitle( tr( "Project Transformation Not Available" ) ); dlg->setDescription( longMessage ); dlg->setDownloadMessage( downloadMessage ); if ( dlg->exec() ) { bar->popWidget( widget ); } } ); widget->layout()->addWidget( detailsButton ); bar->pushWidget( widget, Qgis::MessageLevel::Critical, 0 ); } void QgsAppMissingGridHandler::onFallbackOperationOccurred( const QgsCoordinateReferenceSystem &sourceCrs, const QgsCoordinateReferenceSystem &destinationCrs, const QString &desired ) { if ( !shouldWarnAboutBallparkPairForCurrentProject( sourceCrs, destinationCrs ) ) return; const QString shortMessage = tr( "Used a ballpark transform from %1 to %2" ).arg( sourceCrs.userFriendlyIdentifier( QgsCoordinateReferenceSystem::ShortString ), destinationCrs.userFriendlyIdentifier( QgsCoordinateReferenceSystem::ShortString ) ); const QString longMessage = tr( "<p>An alternative, ballpark-only transform was used when transforming coordinates between <i>%1</i> and <i>%2</i>. The results may not match those obtained by using the preferred operation:</p><code>%3</code><p style=\"font-weight: bold\">Possibly an incorrect choice of operation was made for transformations between these reference systems. Check the Project Properties and ensure that the selected transform operations are applicable over the whole extent of the current project." ).arg( sourceCrs.userFriendlyIdentifier(), destinationCrs.userFriendlyIdentifier(), desired ); QgsMessageBar *bar = QgisApp::instance()->messageBar(); QgsMessageBarItem *widget = QgsMessageBar::createMessage( QString(), shortMessage ); QPushButton *detailsButton = new QPushButton( tr( "Details" ) ); connect( detailsButton, &QPushButton::clicked, this, [longMessage] { // dlg has deleted on close QgsMessageOutput * dlg( QgsMessageOutput::createMessageOutput() ); dlg->setTitle( tr( "Ballpark Transform Occurred" ) ); dlg->setMessage( longMessage, QgsMessageOutput::MessageHtml ); dlg->showMessage(); } ); widget->layout()->addWidget( detailsButton ); bar->pushWidget( widget, Qgis::MessageLevel::Warning, 0 ); } void QgsAppMissingGridHandler::onDynamicToDynamicWarning( const QgsCoordinateReferenceSystem &sourceCrs, const QgsCoordinateReferenceSystem &destinationCrs ) { if ( !shouldWarnAboutDynamicCrsForCurrentProject( sourceCrs, destinationCrs ) ) return; const QString shortMessage = tr( "Cannot transform between dynamic CRS at difference coordinate epochs" ); const QString longMessage = tr( "<p>Transformation between %1 and %2 is not currently supported.</p><p><b>The results will be unpredictable and should not be used for high accuracy work.</b>" ).arg( sourceCrs.userFriendlyIdentifier(), destinationCrs.userFriendlyIdentifier() ); QgsMessageBar *bar = QgisApp::instance()->messageBar(); QgsMessageBarItem *widget = QgsMessageBar::createMessage( QString(), shortMessage ); QPushButton *detailsButton = new QPushButton( tr( "Details" ) ); connect( detailsButton, &QPushButton::clicked, this, [longMessage] { // dlg has deleted on close QgsMessageOutput * dlg( QgsMessageOutput::createMessageOutput() ); dlg->setTitle( tr( "Unsupported Transformation" ) ); dlg->setMessage( longMessage, QgsMessageOutput::MessageHtml ); dlg->showMessage(); } ); widget->layout()->addWidget( detailsButton ); bar->pushWidget( widget, Qgis::MessageLevel::Critical, 0 ); } bool QgsAppMissingGridHandler::shouldWarnAboutPair( const QgsCoordinateReferenceSystem &source, const QgsCoordinateReferenceSystem &dest ) { if ( mAlreadyWarnedPairs.contains( qMakePair( source, dest ) ) || mAlreadyWarnedPairs.contains( qMakePair( dest, source ) ) ) { return false; } mAlreadyWarnedPairs.append( qMakePair( source, dest ) ); return true; } bool QgsAppMissingGridHandler::shouldWarnAboutPairForCurrentProject( const QgsCoordinateReferenceSystem &source, const QgsCoordinateReferenceSystem &dest ) { if ( mAlreadyWarnedPairsForProject.contains( qMakePair( source, dest ) ) || mAlreadyWarnedPairsForProject.contains( qMakePair( dest, source ) ) ) { return false; } mAlreadyWarnedPairsForProject.append( qMakePair( source, dest ) ); return true; } bool QgsAppMissingGridHandler::shouldWarnAboutBallparkPairForCurrentProject( const QgsCoordinateReferenceSystem &source, const QgsCoordinateReferenceSystem &dest ) { if ( mAlreadyWarnedBallparkPairsForProject.contains( qMakePair( source, dest ) ) || mAlreadyWarnedBallparkPairsForProject.contains( qMakePair( dest, source ) ) ) { return false; } mAlreadyWarnedBallparkPairsForProject.append( qMakePair( source, dest ) ); return true; } bool QgsAppMissingGridHandler::shouldWarnAboutDynamicCrsForCurrentProject( const QgsCoordinateReferenceSystem &source, const QgsCoordinateReferenceSystem &dest ) { if ( mAlreadyWarnedDynamicCrsForProject.contains( qMakePair( source, dest ) ) || mAlreadyWarnedDynamicCrsForProject.contains( qMakePair( dest, source ) ) ) { return false; } mAlreadyWarnedDynamicCrsForProject.append( qMakePair( source, dest ) ); return true; }
gpl-2.0
djwong/linux-xfs-dev
sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
19
43385
/* * Mediatek ALSA SoC AFE platform driver for 2701 * * Copyright (c) 2016 MediaTek Inc. * Author: Garlic Tseng <garlic.tseng@mediatek.com> * Ir Lian <ir.lian@mediatek.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/mfd/syscon.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pm_runtime.h> #include "mt2701-afe-common.h" #include "mt2701-afe-clock-ctrl.h" #include "../common/mtk-afe-platform-driver.h" #include "../common/mtk-afe-fe-dai.h" static const struct snd_pcm_hardware mt2701_afe_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, .period_bytes_min = 1024, .period_bytes_max = 1024 * 256, .periods_min = 4, .periods_max = 1024, .buffer_bytes_max = 1024 * 1024 * 16, .fifo_size = 0, }; struct mt2701_afe_rate { unsigned int rate; unsigned int regvalue; }; static const struct mt2701_afe_rate mt2701_afe_i2s_rates[] = { { .rate = 8000, .regvalue = 0 }, { .rate = 12000, .regvalue = 1 }, { .rate = 16000, .regvalue = 2 }, { .rate = 24000, .regvalue = 3 }, { .rate = 32000, .regvalue = 4 }, { .rate = 48000, .regvalue = 5 }, { .rate = 96000, .regvalue = 6 }, { .rate = 192000, .regvalue = 7 }, { .rate = 384000, .regvalue = 8 }, { .rate = 7350, .regvalue = 16 }, { .rate = 11025, .regvalue = 17 }, { .rate = 14700, .regvalue = 18 }, { .rate = 22050, .regvalue = 19 }, { .rate = 29400, .regvalue = 20 }, { .rate = 44100, .regvalue = 21 }, { .rate = 88200, .regvalue = 22 }, { .rate = 176400, .regvalue = 23 }, { .rate = 352800, .regvalue = 24 }, }; static int mt2701_dai_num_to_i2s(struct mtk_base_afe *afe, int num) { int val = num - MT2701_IO_I2S; if (val < 0 || val >= MT2701_I2S_NUM) { dev_err(afe->dev, "%s, num not available, num %d, val %d\n", __func__, num, val); return -EINVAL; } return val; } static int mt2701_afe_i2s_fs(unsigned int sample_rate) { int i; for (i = 0; i < ARRAY_SIZE(mt2701_afe_i2s_rates); i++) if (mt2701_afe_i2s_rates[i].rate == sample_rate) return mt2701_afe_i2s_rates[i].regvalue; return -EINVAL; } static int mt2701_afe_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id); if (i2s_num < 0) return i2s_num; return mt2701_afe_enable_mclk(afe, i2s_num); } static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai, int i2s_num, int dir_invert) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mt2701_afe_private *afe_priv = afe->platform_priv; struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i2s_num]; const struct mt2701_i2s_data *i2s_data; int stream_dir = substream->stream; if (dir_invert) { if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK) stream_dir = SNDRV_PCM_STREAM_CAPTURE; else stream_dir = SNDRV_PCM_STREAM_PLAYBACK; } i2s_data = i2s_path->i2s_data[stream_dir]; i2s_path->on[stream_dir]--; if (i2s_path->on[stream_dir] < 0) { dev_warn(afe->dev, "i2s_path->on: %d, dir: %d\n", i2s_path->on[stream_dir], stream_dir); i2s_path->on[stream_dir] = 0; } if (i2s_path->on[stream_dir]) return 0; /* disable i2s */ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg, ASYS_I2S_CON_I2S_EN, 0); mt2701_afe_disable_i2s(afe, i2s_num, stream_dir); return 0; } static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mt2701_afe_private *afe_priv = afe->platform_priv; int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id); struct mt2701_i2s_path *i2s_path; if (i2s_num < 0) return; i2s_path = &afe_priv->i2s_path[i2s_num]; if (i2s_path->occupied[substream->stream]) i2s_path->occupied[substream->stream] = 0; else goto I2S_UNSTART; mt2701_afe_i2s_path_shutdown(substream, dai, i2s_num, 0); /* need to disable i2s-out path when disable i2s-in */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) mt2701_afe_i2s_path_shutdown(substream, dai, i2s_num, 1); I2S_UNSTART: /* disable mclk */ mt2701_afe_disable_mclk(afe, i2s_num); } static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream, struct snd_soc_dai *dai, int i2s_num, int dir_invert) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mt2701_afe_private *afe_priv = afe->platform_priv; struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i2s_num]; const struct mt2701_i2s_data *i2s_data; struct snd_pcm_runtime * const runtime = substream->runtime; int reg, fs, w_len = 1; /* now we support bck 64bits only */ int stream_dir = substream->stream; unsigned int mask = 0, val = 0; if (dir_invert) { if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK) stream_dir = SNDRV_PCM_STREAM_CAPTURE; else stream_dir = SNDRV_PCM_STREAM_PLAYBACK; } i2s_data = i2s_path->i2s_data[stream_dir]; /* no need to enable if already done */ i2s_path->on[stream_dir]++; if (i2s_path->on[stream_dir] != 1) return 0; fs = mt2701_afe_i2s_fs(runtime->rate); mask = ASYS_I2S_CON_FS | ASYS_I2S_CON_I2S_COUPLE_MODE | /* 0 */ ASYS_I2S_CON_I2S_MODE | ASYS_I2S_CON_WIDE_MODE; val = ASYS_I2S_CON_FS_SET(fs) | ASYS_I2S_CON_I2S_MODE | ASYS_I2S_CON_WIDE_MODE_SET(w_len); if (stream_dir == SNDRV_PCM_STREAM_CAPTURE) { mask |= ASYS_I2S_IN_PHASE_FIX; val |= ASYS_I2S_IN_PHASE_FIX; } regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg, mask, val); if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK) reg = ASMO_TIMING_CON1; else reg = ASMI_TIMING_CON1; regmap_update_bits(afe->regmap, reg, i2s_data->i2s_asrc_fs_mask << i2s_data->i2s_asrc_fs_shift, fs << i2s_data->i2s_asrc_fs_shift); /* enable i2s */ mt2701_afe_enable_i2s(afe, i2s_num, stream_dir); /* reset i2s hw status before enable */ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg, ASYS_I2S_CON_RESET, ASYS_I2S_CON_RESET); udelay(1); regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg, ASYS_I2S_CON_RESET, 0); udelay(1); regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg, ASYS_I2S_CON_I2S_EN, ASYS_I2S_CON_I2S_EN); return 0; } static int mt2701_afe_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { int clk_domain; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mt2701_afe_private *afe_priv = afe->platform_priv; int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id); struct mt2701_i2s_path *i2s_path; int mclk_rate; if (i2s_num < 0) return i2s_num; i2s_path = &afe_priv->i2s_path[i2s_num]; mclk_rate = i2s_path->mclk_rate; if (i2s_path->occupied[substream->stream]) return -EBUSY; i2s_path->occupied[substream->stream] = 1; if (MT2701_PLL_DOMAIN_0_RATE % mclk_rate == 0) { clk_domain = 0; } else if (MT2701_PLL_DOMAIN_1_RATE % mclk_rate == 0) { clk_domain = 1; } else { dev_err(dai->dev, "%s() bad mclk rate %d\n", __func__, mclk_rate); return -EINVAL; } mt2701_mclk_configuration(afe, i2s_num, clk_domain, mclk_rate); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 0); } else { /* need to enable i2s-out path when enable i2s-in */ /* prepare for another direction "out" */ mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 1); /* prepare for "in" */ mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 0); } return 0; } static int mt2701_afe_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct mtk_base_afe *afe = dev_get_drvdata(dai->dev); struct mt2701_afe_private *afe_priv = afe->platform_priv; int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id); if (i2s_num < 0) return i2s_num; /* mclk */ if (dir == SND_SOC_CLOCK_IN) { dev_warn(dai->dev, "%s() warning: mt2701 doesn't support mclk input\n", __func__); return -EINVAL; } afe_priv->i2s_path[i2s_num].mclk_rate = freq; return 0; } static int mt2701_btmrg_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mt2701_afe_private *afe_priv = afe->platform_priv; int ret; ret = mt2701_enable_btmrg_clk(afe); if (ret) return ret; afe_priv->mrg_enable[substream->stream] = 1; return 0; } static int mt2701_btmrg_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); int stream_fs; u32 val, msk; stream_fs = params_rate(params); if ((stream_fs != 8000) && (stream_fs != 16000)) { dev_err(afe->dev, "%s() btmgr not supprt this stream_fs %d\n", __func__, stream_fs); return -EINVAL; } regmap_update_bits(afe->regmap, AFE_MRGIF_CON, AFE_MRGIF_CON_I2S_MODE_MASK, AFE_MRGIF_CON_I2S_MODE_32K); val = AFE_DAIBT_CON0_BT_FUNC_EN | AFE_DAIBT_CON0_BT_FUNC_RDY | AFE_DAIBT_CON0_MRG_USE; msk = val; if (stream_fs == 16000) val |= AFE_DAIBT_CON0_BT_WIDE_MODE_EN; msk |= AFE_DAIBT_CON0_BT_WIDE_MODE_EN; regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, msk, val); regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, AFE_DAIBT_CON0_DAIBT_EN, AFE_DAIBT_CON0_DAIBT_EN); regmap_update_bits(afe->regmap, AFE_MRGIF_CON, AFE_MRGIF_CON_MRG_I2S_EN, AFE_MRGIF_CON_MRG_I2S_EN); regmap_update_bits(afe->regmap, AFE_MRGIF_CON, AFE_MRGIF_CON_MRG_EN, AFE_MRGIF_CON_MRG_EN); return 0; } static void mt2701_btmrg_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mt2701_afe_private *afe_priv = afe->platform_priv; /* if the other direction stream is not occupied */ if (!afe_priv->mrg_enable[!substream->stream]) { regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, AFE_DAIBT_CON0_DAIBT_EN, 0); regmap_update_bits(afe->regmap, AFE_MRGIF_CON, AFE_MRGIF_CON_MRG_EN, 0); regmap_update_bits(afe->regmap, AFE_MRGIF_CON, AFE_MRGIF_CON_MRG_I2S_EN, 0); mt2701_disable_btmrg_clk(afe); } afe_priv->mrg_enable[substream->stream] = 0; } static int mt2701_simple_fe_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); int stream_dir = substream->stream; int memif_num = rtd->cpu_dai->id; struct mtk_base_afe_memif *memif_tmp; /* can't run single DL & DLM at the same time */ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK) { memif_tmp = &afe->memif[MT2701_MEMIF_DLM]; if (memif_tmp->substream) { dev_warn(afe->dev, "%s memif is not available, stream_dir %d, memif_num %d\n", __func__, stream_dir, memif_num); return -EBUSY; } } return mtk_afe_fe_startup(substream, dai); } static int mt2701_simple_fe_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); int stream_dir = substream->stream; /* single DL use PAIR_INTERLEAVE */ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK) { regmap_update_bits(afe->regmap, AFE_MEMIF_PBUF_SIZE, AFE_MEMIF_PBUF_SIZE_DLM_MASK, AFE_MEMIF_PBUF_SIZE_PAIR_INTERLEAVE); } return mtk_afe_fe_hw_params(substream, params, dai); } static int mt2701_dlm_fe_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mtk_base_afe_memif *memif_tmp; const struct mtk_base_memif_data *memif_data; int i; for (i = MT2701_MEMIF_DL1; i < MT2701_MEMIF_DL_SINGLE_NUM; ++i) { memif_tmp = &afe->memif[i]; if (memif_tmp->substream) return -EBUSY; } /* enable agent for all signal DL (due to hw design) */ for (i = MT2701_MEMIF_DL1; i < MT2701_MEMIF_DL_SINGLE_NUM; ++i) { memif_data = afe->memif[i].data; regmap_update_bits(afe->regmap, memif_data->agent_disable_reg, 1 << memif_data->agent_disable_shift, 0 << memif_data->agent_disable_shift); } return mtk_afe_fe_startup(substream, dai); } static void mt2701_dlm_fe_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); const struct mtk_base_memif_data *memif_data; int i; for (i = MT2701_MEMIF_DL1; i < MT2701_MEMIF_DL_SINGLE_NUM; ++i) { memif_data = afe->memif[i].data; regmap_update_bits(afe->regmap, memif_data->agent_disable_reg, 1 << memif_data->agent_disable_shift, 1 << memif_data->agent_disable_shift); } return mtk_afe_fe_shutdown(substream, dai); } static int mt2701_dlm_fe_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); int channels = params_channels(params); regmap_update_bits(afe->regmap, AFE_MEMIF_PBUF_SIZE, AFE_MEMIF_PBUF_SIZE_DLM_MASK, AFE_MEMIF_PBUF_SIZE_FULL_INTERLEAVE); regmap_update_bits(afe->regmap, AFE_MEMIF_PBUF_SIZE, AFE_MEMIF_PBUF_SIZE_DLM_BYTE_MASK, AFE_MEMIF_PBUF_SIZE_DLM_32BYTES); regmap_update_bits(afe->regmap, AFE_MEMIF_PBUF_SIZE, AFE_MEMIF_PBUF_SIZE_DLM_CH_MASK, AFE_MEMIF_PBUF_SIZE_DLM_CH(channels)); return mtk_afe_fe_hw_params(substream, params, dai); } static int mt2701_dlm_fe_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mtk_base_afe_memif *memif_tmp = &afe->memif[MT2701_MEMIF_DL1]; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: regmap_update_bits(afe->regmap, memif_tmp->data->enable_reg, 1 << memif_tmp->data->enable_shift, 1 << memif_tmp->data->enable_shift); mtk_afe_fe_trigger(substream, cmd, dai); return 0; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: mtk_afe_fe_trigger(substream, cmd, dai); regmap_update_bits(afe->regmap, memif_tmp->data->enable_reg, 1 << memif_tmp->data->enable_shift, 0); return 0; default: return -EINVAL; } } static int mt2701_memif_fs(struct snd_pcm_substream *substream, unsigned int rate) { struct snd_soc_pcm_runtime *rtd = substream->private_data; int fs; if (rtd->cpu_dai->id != MT2701_MEMIF_ULBT) fs = mt2701_afe_i2s_fs(rate); else fs = (rate == 16000 ? 1 : 0); return fs; } static int mt2701_irq_fs(struct snd_pcm_substream *substream, unsigned int rate) { return mt2701_afe_i2s_fs(rate); } /* FE DAIs */ static const struct snd_soc_dai_ops mt2701_single_memif_dai_ops = { .startup = mt2701_simple_fe_startup, .shutdown = mtk_afe_fe_shutdown, .hw_params = mt2701_simple_fe_hw_params, .hw_free = mtk_afe_fe_hw_free, .prepare = mtk_afe_fe_prepare, .trigger = mtk_afe_fe_trigger, }; static const struct snd_soc_dai_ops mt2701_dlm_memif_dai_ops = { .startup = mt2701_dlm_fe_startup, .shutdown = mt2701_dlm_fe_shutdown, .hw_params = mt2701_dlm_fe_hw_params, .hw_free = mtk_afe_fe_hw_free, .prepare = mtk_afe_fe_prepare, .trigger = mt2701_dlm_fe_trigger, }; /* I2S BE DAIs */ static const struct snd_soc_dai_ops mt2701_afe_i2s_ops = { .startup = mt2701_afe_i2s_startup, .shutdown = mt2701_afe_i2s_shutdown, .prepare = mt2701_afe_i2s_prepare, .set_sysclk = mt2701_afe_i2s_set_sysclk, }; /* MRG BE DAIs */ static const struct snd_soc_dai_ops mt2701_btmrg_ops = { .startup = mt2701_btmrg_startup, .shutdown = mt2701_btmrg_shutdown, .hw_params = mt2701_btmrg_hw_params, }; static struct snd_soc_dai_driver mt2701_afe_pcm_dais[] = { /* FE DAIs: memory intefaces to CPU */ { .name = "PCMO0", .id = MT2701_MEMIF_DL1, .suspend = mtk_afe_dai_suspend, .resume = mtk_afe_dai_resume, .playback = { .stream_name = "DL1", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .ops = &mt2701_single_memif_dai_ops, }, { .name = "PCM_multi", .id = MT2701_MEMIF_DLM, .suspend = mtk_afe_dai_suspend, .resume = mtk_afe_dai_resume, .playback = { .stream_name = "DLM", .channels_min = 1, .channels_max = 8, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .ops = &mt2701_dlm_memif_dai_ops, }, { .name = "PCM0", .id = MT2701_MEMIF_UL1, .suspend = mtk_afe_dai_suspend, .resume = mtk_afe_dai_resume, .capture = { .stream_name = "UL1", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .ops = &mt2701_single_memif_dai_ops, }, { .name = "PCM1", .id = MT2701_MEMIF_UL2, .suspend = mtk_afe_dai_suspend, .resume = mtk_afe_dai_resume, .capture = { .stream_name = "UL2", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .ops = &mt2701_single_memif_dai_ops, }, { .name = "PCM_BT_DL", .id = MT2701_MEMIF_DLBT, .suspend = mtk_afe_dai_suspend, .resume = mtk_afe_dai_resume, .playback = { .stream_name = "DLBT", .channels_min = 1, .channels_max = 1, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000), .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &mt2701_single_memif_dai_ops, }, { .name = "PCM_BT_UL", .id = MT2701_MEMIF_ULBT, .suspend = mtk_afe_dai_suspend, .resume = mtk_afe_dai_resume, .capture = { .stream_name = "ULBT", .channels_min = 1, .channels_max = 1, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000), .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &mt2701_single_memif_dai_ops, }, /* BE DAIs */ { .name = "I2S0", .id = MT2701_IO_I2S, .playback = { .stream_name = "I2S0 Playback", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .capture = { .stream_name = "I2S0 Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .ops = &mt2701_afe_i2s_ops, .symmetric_rates = 1, }, { .name = "I2S1", .id = MT2701_IO_2ND_I2S, .playback = { .stream_name = "I2S1 Playback", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .capture = { .stream_name = "I2S1 Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .ops = &mt2701_afe_i2s_ops, .symmetric_rates = 1, }, { .name = "I2S2", .id = MT2701_IO_3RD_I2S, .playback = { .stream_name = "I2S2 Playback", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .capture = { .stream_name = "I2S2 Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .ops = &mt2701_afe_i2s_ops, .symmetric_rates = 1, }, { .name = "I2S3", .id = MT2701_IO_4TH_I2S, .playback = { .stream_name = "I2S3 Playback", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .capture = { .stream_name = "I2S3 Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) }, .ops = &mt2701_afe_i2s_ops, .symmetric_rates = 1, }, { .name = "MRG BT", .id = MT2701_IO_MRG, .playback = { .stream_name = "BT Playback", .channels_min = 1, .channels_max = 1, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000), .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "BT Capture", .channels_min = 1, .channels_max = 1, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000), .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &mt2701_btmrg_ops, .symmetric_rates = 1, } }; static const struct snd_kcontrol_new mt2701_afe_o00_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I00 Switch", AFE_CONN0, 0, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o01_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I01 Switch", AFE_CONN1, 1, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o02_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I02 Switch", AFE_CONN2, 2, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o03_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I03 Switch", AFE_CONN3, 3, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o14_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I26 Switch", AFE_CONN14, 26, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o15_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I12 Switch", AFE_CONN15, 12, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o16_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I13 Switch", AFE_CONN16, 13, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o17_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I14 Switch", AFE_CONN17, 14, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o18_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I15 Switch", AFE_CONN18, 15, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o19_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I16 Switch", AFE_CONN19, 16, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o20_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I17 Switch", AFE_CONN20, 17, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o21_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I18 Switch", AFE_CONN21, 18, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o22_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I19 Switch", AFE_CONN22, 19, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o23_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I20 Switch", AFE_CONN23, 20, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o24_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I21 Switch", AFE_CONN24, 21, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_o31_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I35 Switch", AFE_CONN41, 9, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_i02_mix[] = { SOC_DAPM_SINGLE("I2S0 Switch", SND_SOC_NOPM, 0, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s0[] = { SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S0 Out Switch", ASYS_I2SO1_CON, 26, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s1[] = { SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S1 Out Switch", ASYS_I2SO2_CON, 26, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s2[] = { SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S2 Out Switch", PWR2_TOP_CON, 17, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s3[] = { SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S3 Out Switch", PWR2_TOP_CON, 18, 1, 0), }; static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s4[] = { SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S4 Out Switch", PWR2_TOP_CON, 19, 1, 0), }; static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = { /* inter-connections */ SND_SOC_DAPM_MIXER("I00", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I01", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I02", SND_SOC_NOPM, 0, 0, mt2701_afe_i02_mix, ARRAY_SIZE(mt2701_afe_i02_mix)), SND_SOC_DAPM_MIXER("I03", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I12", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I13", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I14", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I15", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I16", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I17", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I18", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I19", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I26", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I35", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("O00", SND_SOC_NOPM, 0, 0, mt2701_afe_o00_mix, ARRAY_SIZE(mt2701_afe_o00_mix)), SND_SOC_DAPM_MIXER("O01", SND_SOC_NOPM, 0, 0, mt2701_afe_o01_mix, ARRAY_SIZE(mt2701_afe_o01_mix)), SND_SOC_DAPM_MIXER("O02", SND_SOC_NOPM, 0, 0, mt2701_afe_o02_mix, ARRAY_SIZE(mt2701_afe_o02_mix)), SND_SOC_DAPM_MIXER("O03", SND_SOC_NOPM, 0, 0, mt2701_afe_o03_mix, ARRAY_SIZE(mt2701_afe_o03_mix)), SND_SOC_DAPM_MIXER("O14", SND_SOC_NOPM, 0, 0, mt2701_afe_o14_mix, ARRAY_SIZE(mt2701_afe_o14_mix)), SND_SOC_DAPM_MIXER("O15", SND_SOC_NOPM, 0, 0, mt2701_afe_o15_mix, ARRAY_SIZE(mt2701_afe_o15_mix)), SND_SOC_DAPM_MIXER("O16", SND_SOC_NOPM, 0, 0, mt2701_afe_o16_mix, ARRAY_SIZE(mt2701_afe_o16_mix)), SND_SOC_DAPM_MIXER("O17", SND_SOC_NOPM, 0, 0, mt2701_afe_o17_mix, ARRAY_SIZE(mt2701_afe_o17_mix)), SND_SOC_DAPM_MIXER("O18", SND_SOC_NOPM, 0, 0, mt2701_afe_o18_mix, ARRAY_SIZE(mt2701_afe_o18_mix)), SND_SOC_DAPM_MIXER("O19", SND_SOC_NOPM, 0, 0, mt2701_afe_o19_mix, ARRAY_SIZE(mt2701_afe_o19_mix)), SND_SOC_DAPM_MIXER("O20", SND_SOC_NOPM, 0, 0, mt2701_afe_o20_mix, ARRAY_SIZE(mt2701_afe_o20_mix)), SND_SOC_DAPM_MIXER("O21", SND_SOC_NOPM, 0, 0, mt2701_afe_o21_mix, ARRAY_SIZE(mt2701_afe_o21_mix)), SND_SOC_DAPM_MIXER("O22", SND_SOC_NOPM, 0, 0, mt2701_afe_o22_mix, ARRAY_SIZE(mt2701_afe_o22_mix)), SND_SOC_DAPM_MIXER("O31", SND_SOC_NOPM, 0, 0, mt2701_afe_o31_mix, ARRAY_SIZE(mt2701_afe_o31_mix)), SND_SOC_DAPM_MIXER("I12I13", SND_SOC_NOPM, 0, 0, mt2701_afe_multi_ch_out_i2s0, ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s0)), SND_SOC_DAPM_MIXER("I14I15", SND_SOC_NOPM, 0, 0, mt2701_afe_multi_ch_out_i2s1, ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s1)), SND_SOC_DAPM_MIXER("I16I17", SND_SOC_NOPM, 0, 0, mt2701_afe_multi_ch_out_i2s2, ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s2)), SND_SOC_DAPM_MIXER("I18I19", SND_SOC_NOPM, 0, 0, mt2701_afe_multi_ch_out_i2s3, ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s3)), }; static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = { {"I12", NULL, "DL1"}, {"I13", NULL, "DL1"}, {"I35", NULL, "DLBT"}, {"I2S0 Playback", NULL, "O15"}, {"I2S0 Playback", NULL, "O16"}, {"I2S1 Playback", NULL, "O17"}, {"I2S1 Playback", NULL, "O18"}, {"I2S2 Playback", NULL, "O19"}, {"I2S2 Playback", NULL, "O20"}, {"I2S3 Playback", NULL, "O21"}, {"I2S3 Playback", NULL, "O22"}, {"BT Playback", NULL, "O31"}, {"UL1", NULL, "O00"}, {"UL1", NULL, "O01"}, {"UL2", NULL, "O02"}, {"UL2", NULL, "O03"}, {"ULBT", NULL, "O14"}, {"I00", NULL, "I2S0 Capture"}, {"I01", NULL, "I2S0 Capture"}, {"I02", NULL, "I2S1 Capture"}, {"I03", NULL, "I2S1 Capture"}, /* I02,03 link to UL2, also need to open I2S0 */ {"I02", "I2S0 Switch", "I2S0 Capture"}, {"I26", NULL, "BT Capture"}, {"I12I13", "Multich I2S0 Out Switch", "DLM"}, {"I14I15", "Multich I2S1 Out Switch", "DLM"}, {"I16I17", "Multich I2S2 Out Switch", "DLM"}, {"I18I19", "Multich I2S3 Out Switch", "DLM"}, { "I12", NULL, "I12I13" }, { "I13", NULL, "I12I13" }, { "I14", NULL, "I14I15" }, { "I15", NULL, "I14I15" }, { "I16", NULL, "I16I17" }, { "I17", NULL, "I16I17" }, { "I18", NULL, "I18I19" }, { "I19", NULL, "I18I19" }, { "O00", "I00 Switch", "I00" }, { "O01", "I01 Switch", "I01" }, { "O02", "I02 Switch", "I02" }, { "O03", "I03 Switch", "I03" }, { "O14", "I26 Switch", "I26" }, { "O15", "I12 Switch", "I12" }, { "O16", "I13 Switch", "I13" }, { "O17", "I14 Switch", "I14" }, { "O18", "I15 Switch", "I15" }, { "O19", "I16 Switch", "I16" }, { "O20", "I17 Switch", "I17" }, { "O21", "I18 Switch", "I18" }, { "O22", "I19 Switch", "I19" }, { "O31", "I35 Switch", "I35" }, }; static const struct snd_soc_component_driver mt2701_afe_pcm_dai_component = { .name = "mt2701-afe-pcm-dai", .dapm_widgets = mt2701_afe_pcm_widgets, .num_dapm_widgets = ARRAY_SIZE(mt2701_afe_pcm_widgets), .dapm_routes = mt2701_afe_pcm_routes, .num_dapm_routes = ARRAY_SIZE(mt2701_afe_pcm_routes), }; static const struct mtk_base_memif_data memif_data[MT2701_MEMIF_NUM] = { { .name = "DL1", .id = MT2701_MEMIF_DL1, .reg_ofs_base = AFE_DL1_BASE, .reg_ofs_cur = AFE_DL1_CUR, .fs_reg = AFE_DAC_CON1, .fs_shift = 0, .fs_maskbit = 0x1f, .mono_reg = AFE_DAC_CON3, .mono_shift = 16, .enable_reg = AFE_DAC_CON0, .enable_shift = 1, .hd_reg = AFE_MEMIF_HD_CON0, .hd_shift = 0, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 6, .msb_reg = -1, .msb_shift = -1, }, { .name = "DL2", .id = MT2701_MEMIF_DL2, .reg_ofs_base = AFE_DL2_BASE, .reg_ofs_cur = AFE_DL2_CUR, .fs_reg = AFE_DAC_CON1, .fs_shift = 5, .fs_maskbit = 0x1f, .mono_reg = AFE_DAC_CON3, .mono_shift = 17, .enable_reg = AFE_DAC_CON0, .enable_shift = 2, .hd_reg = AFE_MEMIF_HD_CON0, .hd_shift = 2, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 7, .msb_reg = -1, .msb_shift = -1, }, { .name = "DL3", .id = MT2701_MEMIF_DL3, .reg_ofs_base = AFE_DL3_BASE, .reg_ofs_cur = AFE_DL3_CUR, .fs_reg = AFE_DAC_CON1, .fs_shift = 10, .fs_maskbit = 0x1f, .mono_reg = AFE_DAC_CON3, .mono_shift = 18, .enable_reg = AFE_DAC_CON0, .enable_shift = 3, .hd_reg = AFE_MEMIF_HD_CON0, .hd_shift = 4, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 8, .msb_reg = -1, .msb_shift = -1, }, { .name = "DL4", .id = MT2701_MEMIF_DL4, .reg_ofs_base = AFE_DL4_BASE, .reg_ofs_cur = AFE_DL4_CUR, .fs_reg = AFE_DAC_CON1, .fs_shift = 15, .fs_maskbit = 0x1f, .mono_reg = AFE_DAC_CON3, .mono_shift = 19, .enable_reg = AFE_DAC_CON0, .enable_shift = 4, .hd_reg = AFE_MEMIF_HD_CON0, .hd_shift = 6, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 9, .msb_reg = -1, .msb_shift = -1, }, { .name = "DL5", .id = MT2701_MEMIF_DL5, .reg_ofs_base = AFE_DL5_BASE, .reg_ofs_cur = AFE_DL5_CUR, .fs_reg = AFE_DAC_CON1, .fs_shift = 20, .fs_maskbit = 0x1f, .mono_reg = AFE_DAC_CON3, .mono_shift = 20, .enable_reg = AFE_DAC_CON0, .enable_shift = 5, .hd_reg = AFE_MEMIF_HD_CON0, .hd_shift = 8, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 10, .msb_reg = -1, .msb_shift = -1, }, { .name = "DLM", .id = MT2701_MEMIF_DLM, .reg_ofs_base = AFE_DLMCH_BASE, .reg_ofs_cur = AFE_DLMCH_CUR, .fs_reg = AFE_DAC_CON1, .fs_shift = 0, .fs_maskbit = 0x1f, .mono_reg = -1, .mono_shift = -1, .enable_reg = AFE_DAC_CON0, .enable_shift = 7, .hd_reg = AFE_MEMIF_PBUF_SIZE, .hd_shift = 28, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 12, .msb_reg = -1, .msb_shift = -1, }, { .name = "UL1", .id = MT2701_MEMIF_UL1, .reg_ofs_base = AFE_VUL_BASE, .reg_ofs_cur = AFE_VUL_CUR, .fs_reg = AFE_DAC_CON2, .fs_shift = 0, .fs_maskbit = 0x1f, .mono_reg = AFE_DAC_CON4, .mono_shift = 0, .enable_reg = AFE_DAC_CON0, .enable_shift = 10, .hd_reg = AFE_MEMIF_HD_CON1, .hd_shift = 0, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 0, .msb_reg = -1, .msb_shift = -1, }, { .name = "UL2", .id = MT2701_MEMIF_UL2, .reg_ofs_base = AFE_UL2_BASE, .reg_ofs_cur = AFE_UL2_CUR, .fs_reg = AFE_DAC_CON2, .fs_shift = 5, .fs_maskbit = 0x1f, .mono_reg = AFE_DAC_CON4, .mono_shift = 2, .enable_reg = AFE_DAC_CON0, .enable_shift = 11, .hd_reg = AFE_MEMIF_HD_CON1, .hd_shift = 2, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 1, .msb_reg = -1, .msb_shift = -1, }, { .name = "UL3", .id = MT2701_MEMIF_UL3, .reg_ofs_base = AFE_UL3_BASE, .reg_ofs_cur = AFE_UL3_CUR, .fs_reg = AFE_DAC_CON2, .fs_shift = 10, .fs_maskbit = 0x1f, .mono_reg = AFE_DAC_CON4, .mono_shift = 4, .enable_reg = AFE_DAC_CON0, .enable_shift = 12, .hd_reg = AFE_MEMIF_HD_CON0, .hd_shift = 0, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 2, .msb_reg = -1, .msb_shift = -1, }, { .name = "UL4", .id = MT2701_MEMIF_UL4, .reg_ofs_base = AFE_UL4_BASE, .reg_ofs_cur = AFE_UL4_CUR, .fs_reg = AFE_DAC_CON2, .fs_shift = 15, .fs_maskbit = 0x1f, .mono_reg = AFE_DAC_CON4, .mono_shift = 6, .enable_reg = AFE_DAC_CON0, .enable_shift = 13, .hd_reg = AFE_MEMIF_HD_CON0, .hd_shift = 6, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 3, .msb_reg = -1, .msb_shift = -1, }, { .name = "UL5", .id = MT2701_MEMIF_UL5, .reg_ofs_base = AFE_UL5_BASE, .reg_ofs_cur = AFE_UL5_CUR, .fs_reg = AFE_DAC_CON2, .fs_shift = 20, .mono_reg = AFE_DAC_CON4, .mono_shift = 8, .fs_maskbit = 0x1f, .enable_reg = AFE_DAC_CON0, .enable_shift = 14, .hd_reg = AFE_MEMIF_HD_CON0, .hd_shift = 8, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 4, .msb_reg = -1, .msb_shift = -1, }, { .name = "DLBT", .id = MT2701_MEMIF_DLBT, .reg_ofs_base = AFE_ARB1_BASE, .reg_ofs_cur = AFE_ARB1_CUR, .fs_reg = AFE_DAC_CON3, .fs_shift = 10, .fs_maskbit = 0x1f, .mono_reg = AFE_DAC_CON3, .mono_shift = 22, .enable_reg = AFE_DAC_CON0, .enable_shift = 8, .hd_reg = AFE_MEMIF_HD_CON0, .hd_shift = 14, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 13, .msb_reg = -1, .msb_shift = -1, }, { .name = "ULBT", .id = MT2701_MEMIF_ULBT, .reg_ofs_base = AFE_DAI_BASE, .reg_ofs_cur = AFE_DAI_CUR, .fs_reg = AFE_DAC_CON2, .fs_shift = 30, .fs_maskbit = 0x1, .mono_reg = -1, .mono_shift = -1, .enable_reg = AFE_DAC_CON0, .enable_shift = 17, .hd_reg = AFE_MEMIF_HD_CON1, .hd_shift = 20, .agent_disable_reg = AUDIO_TOP_CON5, .agent_disable_shift = 16, .msb_reg = -1, .msb_shift = -1, }, }; static const struct mtk_base_irq_data irq_data[MT2701_IRQ_ASYS_END] = { { .id = MT2701_IRQ_ASYS_IRQ1, .irq_cnt_reg = ASYS_IRQ1_CON, .irq_cnt_shift = 0, .irq_cnt_maskbit = 0xffffff, .irq_fs_reg = ASYS_IRQ1_CON, .irq_fs_shift = 24, .irq_fs_maskbit = 0x1f, .irq_en_reg = ASYS_IRQ1_CON, .irq_en_shift = 31, .irq_clr_reg = ASYS_IRQ_CLR, .irq_clr_shift = 0, }, { .id = MT2701_IRQ_ASYS_IRQ2, .irq_cnt_reg = ASYS_IRQ2_CON, .irq_cnt_shift = 0, .irq_cnt_maskbit = 0xffffff, .irq_fs_reg = ASYS_IRQ2_CON, .irq_fs_shift = 24, .irq_fs_maskbit = 0x1f, .irq_en_reg = ASYS_IRQ2_CON, .irq_en_shift = 31, .irq_clr_reg = ASYS_IRQ_CLR, .irq_clr_shift = 1, }, { .id = MT2701_IRQ_ASYS_IRQ3, .irq_cnt_reg = ASYS_IRQ3_CON, .irq_cnt_shift = 0, .irq_cnt_maskbit = 0xffffff, .irq_fs_reg = ASYS_IRQ3_CON, .irq_fs_shift = 24, .irq_fs_maskbit = 0x1f, .irq_en_reg = ASYS_IRQ3_CON, .irq_en_shift = 31, .irq_clr_reg = ASYS_IRQ_CLR, .irq_clr_shift = 2, } }; static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = { { { .i2s_ctrl_reg = ASYS_I2SO1_CON, .i2s_asrc_fs_shift = 0, .i2s_asrc_fs_mask = 0x1f, }, { .i2s_ctrl_reg = ASYS_I2SIN1_CON, .i2s_asrc_fs_shift = 0, .i2s_asrc_fs_mask = 0x1f, }, }, { { .i2s_ctrl_reg = ASYS_I2SO2_CON, .i2s_asrc_fs_shift = 5, .i2s_asrc_fs_mask = 0x1f, }, { .i2s_ctrl_reg = ASYS_I2SIN2_CON, .i2s_asrc_fs_shift = 5, .i2s_asrc_fs_mask = 0x1f, }, }, { { .i2s_ctrl_reg = ASYS_I2SO3_CON, .i2s_asrc_fs_shift = 10, .i2s_asrc_fs_mask = 0x1f, }, { .i2s_ctrl_reg = ASYS_I2SIN3_CON, .i2s_asrc_fs_shift = 10, .i2s_asrc_fs_mask = 0x1f, }, }, { { .i2s_ctrl_reg = ASYS_I2SO4_CON, .i2s_asrc_fs_shift = 15, .i2s_asrc_fs_mask = 0x1f, }, { .i2s_ctrl_reg = ASYS_I2SIN4_CON, .i2s_asrc_fs_shift = 15, .i2s_asrc_fs_mask = 0x1f, }, }, }; static irqreturn_t mt2701_asys_isr(int irq_id, void *dev) { int id; struct mtk_base_afe *afe = dev; struct mtk_base_afe_memif *memif; struct mtk_base_afe_irq *irq; u32 status; regmap_read(afe->regmap, ASYS_IRQ_STATUS, &status); regmap_write(afe->regmap, ASYS_IRQ_CLR, status); for (id = 0; id < MT2701_MEMIF_NUM; ++id) { memif = &afe->memif[id]; if (memif->irq_usage < 0) continue; irq = &afe->irqs[memif->irq_usage]; if (status & 1 << (irq->irq_data->irq_clr_shift)) snd_pcm_period_elapsed(memif->substream); } return IRQ_HANDLED; } static int mt2701_afe_runtime_suspend(struct device *dev) { struct mtk_base_afe *afe = dev_get_drvdata(dev); return mt2701_afe_disable_clock(afe); } static int mt2701_afe_runtime_resume(struct device *dev) { struct mtk_base_afe *afe = dev_get_drvdata(dev); return mt2701_afe_enable_clock(afe); } static int mt2701_afe_add_component(struct mtk_base_afe *afe) { struct snd_soc_component *component; component = kzalloc(sizeof(*component), GFP_KERNEL); if (!component) return -ENOMEM; component->regmap = afe->regmap; return snd_soc_add_component(afe->dev, component, &mt2701_afe_pcm_dai_component, mt2701_afe_pcm_dais, ARRAY_SIZE(mt2701_afe_pcm_dais)); } static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev) { struct mtk_base_afe *afe; struct mt2701_afe_private *afe_priv; struct device *dev; int i, irq_id, ret; afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL); if (!afe) return -ENOMEM; afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv), GFP_KERNEL); if (!afe->platform_priv) return -ENOMEM; afe_priv = afe->platform_priv; afe->dev = &pdev->dev; dev = afe->dev; irq_id = platform_get_irq_byname(pdev, "asys"); if (irq_id < 0) { dev_err(dev, "unable to get ASYS IRQ\n"); return irq_id; } ret = devm_request_irq(dev, irq_id, mt2701_asys_isr, IRQF_TRIGGER_NONE, "asys-isr", (void *)afe); if (ret) { dev_err(dev, "could not request_irq for asys-isr\n"); return ret; } afe->regmap = syscon_node_to_regmap(dev->parent->of_node); if (IS_ERR(afe->regmap)) { dev_err(dev, "could not get regmap from parent\n"); return PTR_ERR(afe->regmap); } mutex_init(&afe->irq_alloc_lock); /* memif initialize */ afe->memif_size = MT2701_MEMIF_NUM; afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif), GFP_KERNEL); if (!afe->memif) return -ENOMEM; for (i = 0; i < afe->memif_size; i++) { afe->memif[i].data = &memif_data[i]; afe->memif[i].irq_usage = -1; } /* irq initialize */ afe->irqs_size = MT2701_IRQ_ASYS_END; afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs), GFP_KERNEL); if (!afe->irqs) return -ENOMEM; for (i = 0; i < afe->irqs_size; i++) afe->irqs[i].irq_data = &irq_data[i]; /* I2S initialize */ for (i = 0; i < MT2701_I2S_NUM; i++) { afe_priv->i2s_path[i].i2s_data[I2S_OUT] = &mt2701_i2s_data[i][I2S_OUT]; afe_priv->i2s_path[i].i2s_data[I2S_IN] = &mt2701_i2s_data[i][I2S_IN]; } afe->mtk_afe_hardware = &mt2701_afe_hardware; afe->memif_fs = mt2701_memif_fs; afe->irq_fs = mt2701_irq_fs; afe->reg_back_up_list = mt2701_afe_backup_list; afe->reg_back_up_list_num = ARRAY_SIZE(mt2701_afe_backup_list); afe->runtime_resume = mt2701_afe_runtime_resume; afe->runtime_suspend = mt2701_afe_runtime_suspend; /* initial audio related clock */ ret = mt2701_init_clock(afe); if (ret) { dev_err(dev, "init clock error\n"); return ret; } platform_set_drvdata(pdev, afe); pm_runtime_enable(dev); if (!pm_runtime_enabled(dev)) { ret = mt2701_afe_runtime_resume(dev); if (ret) goto err_pm_disable; } pm_runtime_get_sync(dev); ret = snd_soc_register_platform(dev, &mtk_afe_pcm_platform); if (ret) { dev_warn(dev, "err_platform\n"); goto err_platform; } ret = mt2701_afe_add_component(afe); if (ret) { dev_warn(dev, "err_dai_component\n"); goto err_dai_component; } return 0; err_dai_component: snd_soc_unregister_platform(dev); err_platform: pm_runtime_put_sync(dev); err_pm_disable: pm_runtime_disable(dev); return ret; } static int mt2701_afe_pcm_dev_remove(struct platform_device *pdev) { pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) mt2701_afe_runtime_suspend(&pdev->dev); snd_soc_unregister_component(&pdev->dev); snd_soc_unregister_platform(&pdev->dev); return 0; } static const struct of_device_id mt2701_afe_pcm_dt_match[] = { { .compatible = "mediatek,mt2701-audio", }, {}, }; MODULE_DEVICE_TABLE(of, mt2701_afe_pcm_dt_match); static const struct dev_pm_ops mt2701_afe_pm_ops = { SET_RUNTIME_PM_OPS(mt2701_afe_runtime_suspend, mt2701_afe_runtime_resume, NULL) }; static struct platform_driver mt2701_afe_pcm_driver = { .driver = { .name = "mt2701-audio", .of_match_table = mt2701_afe_pcm_dt_match, #ifdef CONFIG_PM .pm = &mt2701_afe_pm_ops, #endif }, .probe = mt2701_afe_pcm_dev_probe, .remove = mt2701_afe_pcm_dev_remove, }; module_platform_driver(mt2701_afe_pcm_driver); MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 2701"); MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
alexbevi/scummvm
engines/titanic/moves/exit_bridge.cpp
19
1765
/* ScummVM - Graphic Adventure Engine * * ScummVM is the legal property of its developers, whose names * are too numerous to list here. Please refer to the COPYRIGHT * file distributed with this source distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #include "titanic/moves/exit_bridge.h" #include "titanic/translation.h" namespace Titanic { BEGIN_MESSAGE_MAP(CExitBridge, CMovePlayerTo) ON_MESSAGE(MouseButtonDownMsg) END_MESSAGE_MAP() CExitBridge::CExitBridge() : CMovePlayerTo(), _viewName("Titania.Node 1.S") { } void CExitBridge::save(SimpleFile *file, int indent) { file->writeNumberLine(1, indent); file->writeQuotedLine(_viewName, indent); CMovePlayerTo::save(file, indent); } void CExitBridge::load(SimpleFile *file) { file->readNumber(); _viewName = file->readString(); CMovePlayerTo::load(file); } bool CExitBridge::MouseButtonDownMsg(CMouseButtonDownMsg *msg) { if (getGameManager()) { changeView(_destination); playSound(TRANSLATE("a#53.wav", "a#46.wav")); changeView(_viewName); } return true; } } // End of namespace Titanic
gpl-2.0
alephzain/archos-gpl-gen8-kernel
arch/x86/kernel/bootflag.c
787
1698
/* * Implement 'Simple Boot Flag Specification 2.0' */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/acpi.h> #include <asm/io.h> #include <linux/mc146818rtc.h> #define SBF_RESERVED (0x78) #define SBF_PNPOS (1<<0) #define SBF_BOOTING (1<<1) #define SBF_DIAG (1<<2) #define SBF_PARITY (1<<7) int sbf_port __initdata = -1; /* set via acpi_boot_init() */ static int __init parity(u8 v) { int x = 0; int i; for (i = 0; i < 8; i++) { x ^= (v & 1); v >>= 1; } return x; } static void __init sbf_write(u8 v) { unsigned long flags; if (sbf_port != -1) { v &= ~SBF_PARITY; if (!parity(v)) v |= SBF_PARITY; printk(KERN_INFO "Simple Boot Flag at 0x%x set to 0x%x\n", sbf_port, v); spin_lock_irqsave(&rtc_lock, flags); CMOS_WRITE(v, sbf_port); spin_unlock_irqrestore(&rtc_lock, flags); } } static u8 __init sbf_read(void) { unsigned long flags; u8 v; if (sbf_port == -1) return 0; spin_lock_irqsave(&rtc_lock, flags); v = CMOS_READ(sbf_port); spin_unlock_irqrestore(&rtc_lock, flags); return v; } static int __init sbf_value_valid(u8 v) { if (v & SBF_RESERVED) /* Reserved bits */ return 0; if (!parity(v)) return 0; return 1; } static int __init sbf_init(void) { u8 v; if (sbf_port == -1) return 0; v = sbf_read(); if (!sbf_value_valid(v)) { printk(KERN_WARNING "Simple Boot Flag value 0x%x read from " "CMOS RAM was invalid\n", v); } v &= ~SBF_RESERVED; v &= ~SBF_BOOTING; v &= ~SBF_DIAG; #if defined(CONFIG_ISAPNP) v |= SBF_PNPOS; #endif sbf_write(v); return 0; } module_init(sbf_init);
gpl-2.0
GrandPrime/stock_kernel_grandprime
net/ieee802154/6lowpan.c
1555
38506
/* * Copyright 2011, Siemens AG * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ /* * Based on patches from Jon Smirl <jonsmirl@gmail.com> * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* Jon's code is based on 6lowpan implementation for Contiki which is: * Copyright (c) 2008, Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <linux/bitops.h> #include <linux/if_arp.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <net/af_ieee802154.h> #include <net/ieee802154.h> #include <net/ieee802154_netdev.h> #include <net/ipv6.h> #include "6lowpan.h" /* TTL uncompression values */ static const u8 lowpan_ttl_values[] = {0, 1, 64, 255}; static LIST_HEAD(lowpan_devices); /* * Uncompression of linklocal: * 0 -> 16 bytes from packet * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet * 2 -> 2 bytes from prefix - zeroes + 2 from packet * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr * * NOTE: => the uncompress function does change 0xf to 0x10 * NOTE: 0x00 => no-autoconfig => unspecified */ static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20}; /* * Uncompression of ctx-based: * 0 -> 0 bits from packet [unspecified / reserved] * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet * 2 -> 8 bytes from prefix - zeroes + 2 from packet * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr */ static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80}; /* * Uncompression of ctx-base * 0 -> 0 bits from packet * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet * 2 -> 2 bytes from prefix - zeroes + 3 from packet * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr */ static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21}; /* Link local prefix */ static const u8 lowpan_llprefix[] = {0xfe, 0x80}; /* private device info */ struct lowpan_dev_info { struct net_device *real_dev; /* real WPAN device ptr */ struct mutex dev_list_mtx; /* mutex for list ops */ unsigned short fragment_tag; }; struct lowpan_dev_record { struct net_device *ldev; struct list_head list; }; struct lowpan_fragment { struct sk_buff *skb; /* skb to be assembled */ u16 length; /* length to be assemled */ u32 bytes_rcv; /* bytes received */ u16 tag; /* current fragment tag */ struct timer_list timer; /* assembling timer */ struct list_head list; /* fragments list */ }; static LIST_HEAD(lowpan_fragments); static DEFINE_SPINLOCK(flist_lock); static inline struct lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) { return netdev_priv(dev); } static inline void lowpan_address_flip(u8 *src, u8 *dest) { int i; for (i = 0; i < IEEE802154_ADDR_LEN; i++) (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i]; } /* list of all 6lowpan devices, uses for package delivering */ /* print data in line */ static inline void lowpan_raw_dump_inline(const char *caller, char *msg, unsigned char *buf, int len) { #ifdef DEBUG if (msg) pr_debug("(%s) %s: ", caller, msg); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 1, buf, len, false); #endif /* DEBUG */ } /* * print data in a table format: * * addr: xx xx xx xx xx xx * addr: xx xx xx xx xx xx * ... */ static inline void lowpan_raw_dump_table(const char *caller, char *msg, unsigned char *buf, int len) { #ifdef DEBUG if (msg) pr_debug("(%s) %s:\n", caller, msg); print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); #endif /* DEBUG */ } static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr, const unsigned char *lladdr) { u8 val = 0; if (is_addr_mac_addr_based(ipaddr, lladdr)) val = 3; /* 0-bits */ else if (lowpan_is_iid_16_bit_compressable(ipaddr)) { /* compress IID to 16 bits xxxx::XXXX */ memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2); *hc06_ptr += 2; val = 2; /* 16-bits */ } else { /* do not compress IID => xxxx::IID */ memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8); *hc06_ptr += 8; val = 1; /* 64-bits */ } return rol8(val, shift); } static void lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr) { memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN); /* second bit-flip (Universe/Local) is done according RFC2464 */ ipaddr->s6_addr[8] ^= 0x02; } /* * Uncompress addresses based on a prefix and a postfix with zeroes in * between. If the postfix is zero in length it will use the link address * to configure the IP address (autoconf style). * pref_post_count takes a byte where the first nibble specify prefix count * and the second postfix count (NOTE: 15/0xf => 16 bytes copy). */ static int lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, u8 const *prefix, u8 pref_post_count, unsigned char *lladdr) { u8 prefcount = pref_post_count >> 4; u8 postcount = pref_post_count & 0x0f; /* full nibble 15 => 16 */ prefcount = (prefcount == 15 ? 16 : prefcount); postcount = (postcount == 15 ? 16 : postcount); if (lladdr) lowpan_raw_dump_inline(__func__, "linklocal address", lladdr, IEEE802154_ADDR_LEN); if (prefcount > 0) memcpy(ipaddr, prefix, prefcount); if (prefcount + postcount < 16) memset(&ipaddr->s6_addr[prefcount], 0, 16 - (prefcount + postcount)); if (postcount > 0) { memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount); skb_pull(skb, postcount); } else if (prefcount > 0) { if (lladdr == NULL) return -EINVAL; /* no IID based configuration if no prefix and no data */ lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr); } pr_debug("uncompressing %d + %d => ", prefcount, postcount); lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16); return 0; } static void lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb) { struct udphdr *uh = udp_hdr(skb); if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) == LOWPAN_NHC_UDP_4BIT_PORT) && ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) == LOWPAN_NHC_UDP_4BIT_PORT)) { pr_debug("UDP header: both ports compression to 4 bits\n"); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11; **(hc06_ptr + 1) = /* subtraction is faster */ (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) + ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4)); *hc06_ptr += 2; } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) == LOWPAN_NHC_UDP_8BIT_PORT) { pr_debug("UDP header: remove 8 bits of dest\n"); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01; memcpy(*hc06_ptr + 1, &uh->source, 2); **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT); *hc06_ptr += 4; } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) == LOWPAN_NHC_UDP_8BIT_PORT) { pr_debug("UDP header: remove 8 bits of source\n"); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10; memcpy(*hc06_ptr + 1, &uh->dest, 2); **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT); *hc06_ptr += 4; } else { pr_debug("UDP header: can't compress\n"); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00; memcpy(*hc06_ptr + 1, &uh->source, 2); memcpy(*hc06_ptr + 3, &uh->dest, 2); *hc06_ptr += 5; } /* checksum is always inline */ memcpy(*hc06_ptr, &uh->check, 2); *hc06_ptr += 2; /* skip the UDP header */ skb_pull(skb, sizeof(struct udphdr)); } static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val) { if (unlikely(!pskb_may_pull(skb, 1))) return -EINVAL; *val = skb->data[0]; skb_pull(skb, 1); return 0; } static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val) { if (unlikely(!pskb_may_pull(skb, 2))) return -EINVAL; *val = (skb->data[0] << 8) | skb->data[1]; skb_pull(skb, 2); return 0; } static int lowpan_uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh) { u8 tmp; if (!uh) goto err; if (lowpan_fetch_skb_u8(skb, &tmp)) goto err; if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { pr_debug("UDP header uncompression\n"); switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { case LOWPAN_NHC_UDP_CS_P_00: memcpy(&uh->source, &skb->data[0], 2); memcpy(&uh->dest, &skb->data[2], 2); skb_pull(skb, 4); break; case LOWPAN_NHC_UDP_CS_P_01: memcpy(&uh->source, &skb->data[0], 2); uh->dest = skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT; skb_pull(skb, 3); break; case LOWPAN_NHC_UDP_CS_P_10: uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT; memcpy(&uh->dest, &skb->data[1], 2); skb_pull(skb, 3); break; case LOWPAN_NHC_UDP_CS_P_11: uh->source = LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4); uh->dest = LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f); skb_pull(skb, 1); break; default: pr_debug("ERROR: unknown UDP format\n"); goto err; break; } pr_debug("uncompressed UDP ports: src = %d, dst = %d\n", uh->source, uh->dest); /* copy checksum */ memcpy(&uh->check, &skb->data[0], 2); skb_pull(skb, 2); /* * UDP lenght needs to be infered from the lower layers * here, we obtain the hint from the remaining size of the * frame */ uh->len = htons(skb->len + sizeof(struct udphdr)); pr_debug("uncompressed UDP length: src = %d", uh->len); } else { pr_debug("ERROR: unsupported NH format\n"); goto err; } return 0; err: return -EINVAL; } static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, const void *_saddr, unsigned int len) { u8 tmp, iphc0, iphc1, *hc06_ptr; struct ipv6hdr *hdr; const u8 *saddr = _saddr; const u8 *daddr = _daddr; u8 head[100]; struct ieee802154_addr sa, da; /* TODO: * if this package isn't ipv6 one, where should it be routed? */ if (type != ETH_P_IPV6) return 0; hdr = ipv6_hdr(skb); hc06_ptr = head + 2; pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n" "\tnexthdr = 0x%02x\n\thop_lim = %d\n", hdr->version, ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit); lowpan_raw_dump_table(__func__, "raw skb network header dump", skb_network_header(skb), sizeof(struct ipv6hdr)); if (!saddr) saddr = dev->dev_addr; lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8); /* * As we copy some bit-length fields, in the IPHC encoding bytes, * we sometimes use |= * If the field is 0, and the current bit value in memory is 1, * this does not work. We therefore reset the IPHC encoding here */ iphc0 = LOWPAN_DISPATCH_IPHC; iphc1 = 0; /* TODO: context lookup */ lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8); /* * Traffic class, flow label * If flow label is 0, compress it. If traffic class is 0, compress it * We have to process both in the same time as the offset of traffic * class depends on the presence of version and flow label */ /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */ tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4); tmp = ((tmp & 0x03) << 6) | (tmp >> 2); if (((hdr->flow_lbl[0] & 0x0F) == 0) && (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) { /* flow label can be compressed */ iphc0 |= LOWPAN_IPHC_FL_C; if ((hdr->priority == 0) && ((hdr->flow_lbl[0] & 0xF0) == 0)) { /* compress (elide) all */ iphc0 |= LOWPAN_IPHC_TC_C; } else { /* compress only the flow label */ *hc06_ptr = tmp; hc06_ptr += 1; } } else { /* Flow label cannot be compressed */ if ((hdr->priority == 0) && ((hdr->flow_lbl[0] & 0xF0) == 0)) { /* compress only traffic class */ iphc0 |= LOWPAN_IPHC_TC_C; *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F); memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2); hc06_ptr += 3; } else { /* compress nothing */ memcpy(hc06_ptr, &hdr, 4); /* replace the top byte with new ECN | DSCP format */ *hc06_ptr = tmp; hc06_ptr += 4; } } /* NOTE: payload length is always compressed */ /* Next Header is compress if UDP */ if (hdr->nexthdr == UIP_PROTO_UDP) iphc0 |= LOWPAN_IPHC_NH_C; if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { *hc06_ptr = hdr->nexthdr; hc06_ptr += 1; } /* * Hop limit * if 1: compress, encoding is 01 * if 64: compress, encoding is 10 * if 255: compress, encoding is 11 * else do not compress */ switch (hdr->hop_limit) { case 1: iphc0 |= LOWPAN_IPHC_TTL_1; break; case 64: iphc0 |= LOWPAN_IPHC_TTL_64; break; case 255: iphc0 |= LOWPAN_IPHC_TTL_255; break; default: *hc06_ptr = hdr->hop_limit; hc06_ptr += 1; break; } /* source address compression */ if (is_addr_unspecified(&hdr->saddr)) { pr_debug("source address is unspecified, setting SAC\n"); iphc1 |= LOWPAN_IPHC_SAC; /* TODO: context lookup */ } else if (is_addr_link_local(&hdr->saddr)) { pr_debug("source address is link-local\n"); iphc1 |= lowpan_compress_addr_64(&hc06_ptr, LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr); } else { pr_debug("send the full source address\n"); memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16); hc06_ptr += 16; } /* destination address compression */ if (is_addr_mcast(&hdr->daddr)) { pr_debug("destination address is multicast: "); iphc1 |= LOWPAN_IPHC_M; if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) { pr_debug("compressed to 1 octet\n"); iphc1 |= LOWPAN_IPHC_DAM_11; /* use last byte */ *hc06_ptr = hdr->daddr.s6_addr[15]; hc06_ptr += 1; } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) { pr_debug("compressed to 4 octets\n"); iphc1 |= LOWPAN_IPHC_DAM_10; /* second byte + the last three */ *hc06_ptr = hdr->daddr.s6_addr[1]; memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3); hc06_ptr += 4; } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) { pr_debug("compressed to 6 octets\n"); iphc1 |= LOWPAN_IPHC_DAM_01; /* second byte + the last five */ *hc06_ptr = hdr->daddr.s6_addr[1]; memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5); hc06_ptr += 6; } else { pr_debug("using full address\n"); iphc1 |= LOWPAN_IPHC_DAM_00; memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16); hc06_ptr += 16; } } else { /* TODO: context lookup */ if (is_addr_link_local(&hdr->daddr)) { pr_debug("dest address is unicast and link-local\n"); iphc1 |= lowpan_compress_addr_64(&hc06_ptr, LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr); } else { pr_debug("dest address is unicast: using full one\n"); memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16); hc06_ptr += 16; } } /* UDP header compression */ if (hdr->nexthdr == UIP_PROTO_UDP) lowpan_compress_udp_header(&hc06_ptr, skb); head[0] = iphc0; head[1] = iphc1; skb_pull(skb, sizeof(struct ipv6hdr)); memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head); lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); /* * NOTE1: I'm still unsure about the fact that compression and WPAN * header are created here and not later in the xmit. So wait for * an opinion of net maintainers. */ /* * NOTE2: to be absolutely correct, we must derive PANid information * from MAC subif of the 'dev' and 'real_dev' network devices, but * this isn't implemented in mainline yet, so currently we assign 0xff */ { mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev); /* prepare wpan address data */ sa.addr_type = IEEE802154_ADDR_LONG; sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); memcpy(&(sa.hwaddr), saddr, 8); /* intra-PAN communications */ da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); /* * if the destination address is the broadcast address, use the * corresponding short address */ if (lowpan_is_addr_broadcast(daddr)) { da.addr_type = IEEE802154_ADDR_SHORT; da.short_addr = IEEE802154_ADDR_BROADCAST; } else { da.addr_type = IEEE802154_ADDR_LONG; memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN); /* request acknowledgment */ mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ; } return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, type, (void *)&da, (void *)&sa, skb->len); } } static int lowpan_give_skb_to_devices(struct sk_buff *skb) { struct lowpan_dev_record *entry; struct sk_buff *skb_cp; int stat = NET_RX_SUCCESS; rcu_read_lock(); list_for_each_entry_rcu(entry, &lowpan_devices, list) if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) { skb_cp = skb_copy(skb, GFP_ATOMIC); if (!skb_cp) { stat = -ENOMEM; break; } skb_cp->dev = entry->ldev; stat = netif_rx(skb_cp); } rcu_read_unlock(); return stat; } static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr) { struct sk_buff *new; int stat = NET_RX_SUCCESS; new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb), GFP_ATOMIC); kfree_skb(skb); if (!new) return -ENOMEM; skb_push(new, sizeof(struct ipv6hdr)); skb_reset_network_header(new); skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr)); new->protocol = htons(ETH_P_IPV6); new->pkt_type = PACKET_HOST; stat = lowpan_give_skb_to_devices(new); kfree_skb(new); return stat; } static void lowpan_fragment_timer_expired(unsigned long entry_addr) { struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr; pr_debug("timer expired for frame with tag %d\n", entry->tag); list_del(&entry->list); dev_kfree_skb(entry->skb); kfree(entry); } static struct lowpan_fragment * lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag) { struct lowpan_fragment *frame; frame = kzalloc(sizeof(struct lowpan_fragment), GFP_ATOMIC); if (!frame) goto frame_err; INIT_LIST_HEAD(&frame->list); frame->length = len; frame->tag = tag; /* allocate buffer for frame assembling */ frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length + sizeof(struct ipv6hdr)); if (!frame->skb) goto skb_err; frame->skb->priority = skb->priority; frame->skb->dev = skb->dev; /* reserve headroom for uncompressed ipv6 header */ skb_reserve(frame->skb, sizeof(struct ipv6hdr)); skb_put(frame->skb, frame->length); init_timer(&frame->timer); /* time out is the same as for ipv6 - 60 sec */ frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; frame->timer.data = (unsigned long)frame; frame->timer.function = lowpan_fragment_timer_expired; add_timer(&frame->timer); list_add_tail(&frame->list, &lowpan_fragments); return frame; skb_err: kfree(frame); frame_err: return NULL; } static int lowpan_process_data(struct sk_buff *skb) { struct ipv6hdr hdr; u8 tmp, iphc0, iphc1, num_context = 0; u8 *_saddr, *_daddr; int err; lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); /* at least two bytes will be used for the encoding */ if (skb->len < 2) goto drop; if (lowpan_fetch_skb_u8(skb, &iphc0)) goto drop; /* fragments assembling */ switch (iphc0 & LOWPAN_DISPATCH_MASK) { case LOWPAN_DISPATCH_FRAG1: case LOWPAN_DISPATCH_FRAGN: { struct lowpan_fragment *frame; /* slen stores the rightmost 8 bits of the 11 bits length */ u8 slen, offset = 0; u16 len, tag; bool found = false; if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */ lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */ goto drop; /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */ len = ((iphc0 & 7) << 8) | slen; if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) { pr_debug("%s received a FRAG1 packet (tag: %d, " "size of the entire IP packet: %d)", __func__, tag, len); } else { /* FRAGN */ if (lowpan_fetch_skb_u8(skb, &offset)) goto unlock_and_drop; pr_debug("%s received a FRAGN packet (tag: %d, " "size of the entire IP packet: %d, " "offset: %d)", __func__, tag, len, offset * 8); } /* * check if frame assembling with the same tag is * already in progress */ spin_lock_bh(&flist_lock); list_for_each_entry(frame, &lowpan_fragments, list) if (frame->tag == tag) { found = true; break; } /* alloc new frame structure */ if (!found) { pr_debug("%s first fragment received for tag %d, " "begin packet reassembly", __func__, tag); frame = lowpan_alloc_new_frame(skb, len, tag); if (!frame) goto unlock_and_drop; } /* if payload fits buffer, copy it */ if (likely((offset * 8 + skb->len) <= frame->length)) skb_copy_to_linear_data_offset(frame->skb, offset * 8, skb->data, skb->len); else goto unlock_and_drop; frame->bytes_rcv += skb->len; /* frame assembling complete */ if ((frame->bytes_rcv == frame->length) && frame->timer.expires > jiffies) { /* if timer haven't expired - first of all delete it */ del_timer_sync(&frame->timer); list_del(&frame->list); spin_unlock_bh(&flist_lock); pr_debug("%s successfully reassembled fragment " "(tag %d)", __func__, tag); dev_kfree_skb(skb); skb = frame->skb; kfree(frame); if (lowpan_fetch_skb_u8(skb, &iphc0)) goto drop; break; } spin_unlock_bh(&flist_lock); return kfree_skb(skb), 0; } default: break; } if (lowpan_fetch_skb_u8(skb, &iphc1)) goto drop; _saddr = mac_cb(skb)->sa.hwaddr; _daddr = mac_cb(skb)->da.hwaddr; pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1); /* another if the CID flag is set */ if (iphc1 & LOWPAN_IPHC_CID) { pr_debug("CID flag is set, increase header with one\n"); if (lowpan_fetch_skb_u8(skb, &num_context)) goto drop; } hdr.version = 6; /* Traffic Class and Flow Label */ switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) { /* * Traffic Class and FLow Label carried in-line * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) */ case 0: /* 00b */ if (lowpan_fetch_skb_u8(skb, &tmp)) goto drop; memcpy(&hdr.flow_lbl, &skb->data[0], 3); skb_pull(skb, 3); hdr.priority = ((tmp >> 2) & 0x0f); hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) | (hdr.flow_lbl[0] & 0x0f); break; /* * Traffic class carried in-line * ECN + DSCP (1 byte), Flow Label is elided */ case 2: /* 10b */ if (lowpan_fetch_skb_u8(skb, &tmp)) goto drop; hdr.priority = ((tmp >> 2) & 0x0f); hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30); hdr.flow_lbl[1] = 0; hdr.flow_lbl[2] = 0; break; /* * Flow Label carried in-line * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided */ case 1: /* 01b */ if (lowpan_fetch_skb_u8(skb, &tmp)) goto drop; hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); memcpy(&hdr.flow_lbl[1], &skb->data[0], 2); skb_pull(skb, 2); break; /* Traffic Class and Flow Label are elided */ case 3: /* 11b */ hdr.priority = 0; hdr.flow_lbl[0] = 0; hdr.flow_lbl[1] = 0; hdr.flow_lbl[2] = 0; break; default: break; } /* Next Header */ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { /* Next header is carried inline */ if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr))) goto drop; pr_debug("NH flag is set, next header carried inline: %02x\n", hdr.nexthdr); } /* Hop Limit */ if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03]; else { if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit))) goto drop; } /* Extract SAM to the tmp variable */ tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03; /* Source address uncompression */ pr_debug("source address stateless compression\n"); err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix, lowpan_unc_llconf[tmp], skb->data); if (err) goto drop; /* Extract DAM to the tmp variable */ tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03; /* check for Multicast Compression */ if (iphc1 & LOWPAN_IPHC_M) { if (iphc1 & LOWPAN_IPHC_DAC) { pr_debug("dest: context-based mcast compression\n"); /* TODO: implement this */ } else { u8 prefix[] = {0xff, 0x02}; pr_debug("dest: non context-based mcast compression\n"); if (0 < tmp && tmp < 3) { if (lowpan_fetch_skb_u8(skb, &prefix[1])) goto drop; } err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix, lowpan_unc_mxconf[tmp], NULL); if (err) goto drop; } } else { pr_debug("dest: stateless compression\n"); err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix, lowpan_unc_llconf[tmp], skb->data); if (err) goto drop; } /* UDP data uncompression */ if (iphc0 & LOWPAN_IPHC_NH_C) { struct udphdr uh; struct sk_buff *new; if (lowpan_uncompress_udp_header(skb, &uh)) goto drop; /* * replace the compressed UDP head by the uncompressed UDP * header */ new = skb_copy_expand(skb, sizeof(struct udphdr), skb_tailroom(skb), GFP_ATOMIC); kfree_skb(skb); if (!new) return -ENOMEM; skb = new; skb_push(skb, sizeof(struct udphdr)); skb_reset_transport_header(skb); skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr)); lowpan_raw_dump_table(__func__, "raw UDP header dump", (u8 *)&uh, sizeof(uh)); hdr.nexthdr = UIP_PROTO_UDP; } /* Not fragmented package */ hdr.payload_len = htons(skb->len); pr_debug("skb headroom size = %d, data length = %d\n", skb_headroom(skb), skb->len); pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t" "nexthdr = 0x%02x\n\thop_lim = %d\n", hdr.version, ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit); lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr)); return lowpan_skb_deliver(skb, &hdr); unlock_and_drop: spin_unlock_bh(&flist_lock); drop: kfree_skb(skb); return -EINVAL; } static int lowpan_set_address(struct net_device *dev, void *p) { struct sockaddr *sa = p; if (netif_running(dev)) return -EBUSY; /* TODO: validate addr */ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); return 0; } static int lowpan_get_mac_header_length(struct sk_buff *skb) { /* * Currently long addressing mode is supported only, so the overall * header size is 21: * FC SeqNum DPAN DA SA Sec * 2 + 1 + 2 + 8 + 8 + 0 = 21 */ return 21; } static int lowpan_fragment_xmit(struct sk_buff *skb, u8 *head, int mlen, int plen, int offset, int type) { struct sk_buff *frag; int hlen, ret; hlen = (type == LOWPAN_DISPATCH_FRAG1) ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE; lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE); if (!frag) return -ENOMEM; frag->priority = skb->priority; frag->dev = skb->dev; /* copy header, MFR and payload */ memcpy(skb_put(frag, mlen), skb->data, mlen); memcpy(skb_put(frag, hlen), head, hlen); if (plen) skb_copy_from_linear_data_offset(skb, offset + mlen, skb_put(frag, plen), plen); lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len); ret = dev_queue_xmit(frag); return ret; } static int lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev) { int err, header_length, payload_length, tag, offset = 0; u8 head[5]; header_length = lowpan_get_mac_header_length(skb); payload_length = skb->len - header_length; tag = lowpan_dev_info(dev)->fragment_tag++; /* first fragment header */ head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7); head[1] = payload_length & 0xff; head[2] = tag >> 8; head[3] = tag & 0xff; err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE, 0, LOWPAN_DISPATCH_FRAG1); if (err) { pr_debug("%s unable to send FRAG1 packet (tag: %d)", __func__, tag); goto exit; } offset = LOWPAN_FRAG_SIZE; /* next fragment header */ head[0] &= ~LOWPAN_DISPATCH_FRAG1; head[0] |= LOWPAN_DISPATCH_FRAGN; while ((payload_length - offset > 0) && (err >= 0)) { int len = LOWPAN_FRAG_SIZE; head[4] = offset / 8; if (payload_length - offset < len) len = payload_length - offset; err = lowpan_fragment_xmit(skb, head, header_length, len, offset, LOWPAN_DISPATCH_FRAGN); if (err) { pr_debug("%s unable to send a subsequent FRAGN packet " "(tag: %d, offset: %d", __func__, tag, offset); goto exit; } offset += len; } exit: return err; } static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) { int err = -1; pr_debug("package xmit\n"); skb->dev = lowpan_dev_info(dev)->real_dev; if (skb->dev == NULL) { pr_debug("ERROR: no real wpan device found\n"); goto error; } /* Send directly if less than the MTU minus the 2 checksum bytes. */ if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) { err = dev_queue_xmit(skb); goto out; } pr_debug("frame is too big, fragmentation is needed\n"); err = lowpan_skb_fragmentation(skb, dev); error: dev_kfree_skb(skb); out: if (err) pr_debug("ERROR: xmit failed\n"); return (err < 0) ? NET_XMIT_DROP : err; } static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_phy(real_dev); } static u16 lowpan_get_pan_id(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev); } static u16 lowpan_get_short_addr(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); } static u8 lowpan_get_dsn(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev); } static struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; static struct lock_class_key lowpan_tx_busylock; static struct lock_class_key lowpan_netdev_xmit_lock_key; static void lowpan_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &lowpan_netdev_xmit_lock_key); } static int lowpan_dev_init(struct net_device *dev) { netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL); dev->qdisc_tx_busylock = &lowpan_tx_busylock; return 0; } static const struct net_device_ops lowpan_netdev_ops = { .ndo_init = lowpan_dev_init, .ndo_start_xmit = lowpan_xmit, .ndo_set_mac_address = lowpan_set_address, }; static struct ieee802154_mlme_ops lowpan_mlme = { .get_pan_id = lowpan_get_pan_id, .get_phy = lowpan_get_phy, .get_short_addr = lowpan_get_short_addr, .get_dsn = lowpan_get_dsn, }; static void lowpan_setup(struct net_device *dev) { dev->addr_len = IEEE802154_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); dev->type = ARPHRD_IEEE802154; /* Frame Control + Sequence Number + Address fields + Security Header */ dev->hard_header_len = 2 + 1 + 20 + 14; dev->needed_tailroom = 2; /* FCS */ dev->mtu = 1281; dev->tx_queue_len = 0; dev->flags = IFF_BROADCAST | IFF_MULTICAST; dev->watchdog_timeo = 0; dev->netdev_ops = &lowpan_netdev_ops; dev->header_ops = &lowpan_header_ops; dev->ml_priv = &lowpan_mlme; dev->destructor = free_netdev; } static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) return -EINVAL; } return 0; } static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sk_buff *local_skb; if (!netif_running(dev)) goto drop; if (dev->type != ARPHRD_IEEE802154) goto drop; /* check that it's our buffer */ if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { /* Copy the packet so that the IPv6 header is * properly aligned. */ local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1, skb_tailroom(skb), GFP_ATOMIC); if (!local_skb) goto drop; local_skb->protocol = htons(ETH_P_IPV6); local_skb->pkt_type = PACKET_HOST; /* Pull off the 1-byte of 6lowpan header. */ skb_pull(local_skb, 1); skb_reset_network_header(local_skb); skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); lowpan_give_skb_to_devices(local_skb); kfree_skb(local_skb); kfree_skb(skb); } else { switch (skb->data[0] & 0xe0) { case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ local_skb = skb_clone(skb, GFP_ATOMIC); if (!local_skb) goto drop; lowpan_process_data(local_skb); kfree_skb(skb); break; default: break; } } return NET_RX_SUCCESS; drop: kfree_skb(skb); return NET_RX_DROP; } static int lowpan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_device *real_dev; struct lowpan_dev_record *entry; pr_debug("adding new link\n"); if (!tb[IFLA_LINK]) return -EINVAL; /* find and hold real wpan device */ real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; lowpan_dev_info(dev)->real_dev = real_dev; lowpan_dev_info(dev)->fragment_tag = 0; mutex_init(&lowpan_dev_info(dev)->dev_list_mtx); entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL); if (!entry) { dev_put(real_dev); lowpan_dev_info(dev)->real_dev = NULL; return -ENOMEM; } entry->ldev = dev; mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); INIT_LIST_HEAD(&entry->list); list_add_tail(&entry->list, &lowpan_devices); mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); register_netdevice(dev); return 0; } static void lowpan_dellink(struct net_device *dev, struct list_head *head) { struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); struct net_device *real_dev = lowpan_dev->real_dev; struct lowpan_dev_record *entry, *tmp; ASSERT_RTNL(); mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { if (entry->ldev == dev) { list_del(&entry->list); kfree(entry); } } mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx); unregister_netdevice_queue(dev, head); dev_put(real_dev); } static struct rtnl_link_ops lowpan_link_ops __read_mostly = { .kind = "lowpan", .priv_size = sizeof(struct lowpan_dev_info), .setup = lowpan_setup, .newlink = lowpan_newlink, .dellink = lowpan_dellink, .validate = lowpan_validate, }; static inline int __init lowpan_netlink_init(void) { return rtnl_link_register(&lowpan_link_ops); } static inline void lowpan_netlink_fini(void) { rtnl_link_unregister(&lowpan_link_ops); } static int lowpan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; LIST_HEAD(del_list); struct lowpan_dev_record *entry, *tmp; if (dev->type != ARPHRD_IEEE802154) goto out; if (event == NETDEV_UNREGISTER) { list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { if (lowpan_dev_info(entry->ldev)->real_dev == dev) lowpan_dellink(entry->ldev, &del_list); } unregister_netdevice_many(&del_list); } out: return NOTIFY_DONE; } static struct notifier_block lowpan_dev_notifier = { .notifier_call = lowpan_device_event, }; static struct packet_type lowpan_packet_type = { .type = __constant_htons(ETH_P_IEEE802154), .func = lowpan_rcv, }; static int __init lowpan_init_module(void) { int err = 0; err = lowpan_netlink_init(); if (err < 0) goto out; dev_add_pack(&lowpan_packet_type); err = register_netdevice_notifier(&lowpan_dev_notifier); if (err < 0) { dev_remove_pack(&lowpan_packet_type); lowpan_netlink_fini(); } out: return err; } static void __exit lowpan_cleanup_module(void) { struct lowpan_fragment *frame, *tframe; lowpan_netlink_fini(); dev_remove_pack(&lowpan_packet_type); unregister_netdevice_notifier(&lowpan_dev_notifier); /* Now 6lowpan packet_type is removed, so no new fragments are * expected on RX, therefore that's the time to clean incomplete * fragments. */ spin_lock_bh(&flist_lock); list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) { del_timer_sync(&frame->timer); list_del(&frame->list); dev_kfree_skb(frame->skb); kfree(frame); } spin_unlock_bh(&flist_lock); } module_init(lowpan_init_module); module_exit(lowpan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("lowpan");
gpl-2.0
StelixROM/kernel_htc_msm8974
arch/arm/mach-msm/clock-rpm.c
1811
8664
/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/mutex.h> #include <mach/clk-provider.h> #include "rpm_resources.h" #include "clock-rpm.h" #define __clk_rpmrs_set_rate(r, value, ctx) \ ((r)->rpmrs_data->set_rate_fn((r), (value), (ctx))) #define clk_rpmrs_set_rate_sleep(r, value) \ __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id) #define clk_rpmrs_set_rate_active(r, value) \ __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id) static int clk_rpmrs_set_rate(struct rpm_clk *r, uint32_t value, uint32_t context) { struct msm_rpm_iv_pair iv = { .id = r->rpm_clk_id, .value = value, }; return msm_rpmrs_set(context, &iv, 1); } static int clk_rpmrs_get_rate(struct rpm_clk *r) { int rc; struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, }; rc = msm_rpm_get_status(&iv, 1); return (rc < 0) ? rc : iv.value * 1000; } static int clk_rpmrs_handoff(struct rpm_clk *r) { struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, }; int rc = msm_rpm_get_status(&iv, 1); if (rc < 0) return rc; if (!r->branch) r->c.rate = iv.value * 1000; return 0; } static int clk_rpmrs_is_enabled(struct rpm_clk *r) { return !!clk_rpmrs_get_rate(r); } static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value, uint32_t context) { struct msm_rpm_kvp kvp = { .key = r->rpm_key, .data = (void *)&value, .length = sizeof(value), }; return msm_rpm_send_message(context, r->rpm_res_type, r->rpm_clk_id, &kvp, 1); } static int clk_rpmrs_handoff_smd(struct rpm_clk *r) { if (!r->branch) r->c.rate = INT_MAX; return 0; } static int clk_rpmrs_is_enabled_smd(struct rpm_clk *r) { return !!r->c.prepare_count; } struct clk_rpmrs_data { int (*set_rate_fn)(struct rpm_clk *r, uint32_t value, uint32_t context); int (*get_rate_fn)(struct rpm_clk *r); int (*handoff_fn)(struct rpm_clk *r); int (*is_enabled)(struct rpm_clk *r); int ctx_active_id; int ctx_sleep_id; }; struct clk_rpmrs_data clk_rpmrs_data = { .set_rate_fn = clk_rpmrs_set_rate, .get_rate_fn = clk_rpmrs_get_rate, .handoff_fn = clk_rpmrs_handoff, .is_enabled = clk_rpmrs_is_enabled, .ctx_active_id = MSM_RPM_CTX_SET_0, .ctx_sleep_id = MSM_RPM_CTX_SET_SLEEP, }; struct clk_rpmrs_data clk_rpmrs_data_smd = { .set_rate_fn = clk_rpmrs_set_rate_smd, .handoff_fn = clk_rpmrs_handoff_smd, .is_enabled = clk_rpmrs_is_enabled_smd, .ctx_active_id = MSM_RPM_CTX_ACTIVE_SET, .ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET, }; static DEFINE_MUTEX(rpm_clock_lock); static void to_active_sleep_khz(struct rpm_clk *r, unsigned long rate, unsigned long *active_khz, unsigned long *sleep_khz) { /* Convert the rate (hz) to khz */ *active_khz = DIV_ROUND_UP(rate, 1000); /* * Active-only clocks don't care what the rate is during sleep. So, * they vote for zero. */ if (r->active_only) *sleep_khz = 0; else *sleep_khz = *active_khz; } static int rpm_clk_prepare(struct clk *clk) { struct rpm_clk *r = to_rpm_clk(clk); uint32_t value; int rc = 0; unsigned long this_khz, this_sleep_khz; unsigned long peer_khz = 0, peer_sleep_khz = 0; struct rpm_clk *peer = r->peer; mutex_lock(&rpm_clock_lock); to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz); /* Don't send requests to the RPM if the rate has not been set. */ if (this_khz == 0) goto out; /* Take peer clock's rate into account only if it's enabled. */ if (peer->enabled) to_active_sleep_khz(peer, peer->c.rate, &peer_khz, &peer_sleep_khz); value = max(this_khz, peer_khz); if (r->branch) value = !!value; rc = clk_rpmrs_set_rate_active(r, value); if (rc) goto out; value = max(this_sleep_khz, peer_sleep_khz); if (r->branch) value = !!value; rc = clk_rpmrs_set_rate_sleep(r, value); if (rc) { /* Undo the active set vote and restore it to peer_khz */ value = peer_khz; rc = clk_rpmrs_set_rate_active(r, value); } out: if (!rc) r->enabled = true; mutex_unlock(&rpm_clock_lock); return rc; } static void rpm_clk_unprepare(struct clk *clk) { struct rpm_clk *r = to_rpm_clk(clk); mutex_lock(&rpm_clock_lock); if (r->c.rate) { uint32_t value; struct rpm_clk *peer = r->peer; unsigned long peer_khz = 0, peer_sleep_khz = 0; int rc; /* Take peer clock's rate into account only if it's enabled. */ if (peer->enabled) to_active_sleep_khz(peer, peer->c.rate, &peer_khz, &peer_sleep_khz); value = r->branch ? !!peer_khz : peer_khz; rc = clk_rpmrs_set_rate_active(r, value); if (rc) goto out; value = r->branch ? !!peer_sleep_khz : peer_sleep_khz; rc = clk_rpmrs_set_rate_sleep(r, value); } r->enabled = false; out: mutex_unlock(&rpm_clock_lock); return; } static int rpm_clk_set_rate(struct clk *clk, unsigned long rate) { struct rpm_clk *r = to_rpm_clk(clk); unsigned long this_khz, this_sleep_khz; int rc = 0; mutex_lock(&rpm_clock_lock); if (r->enabled) { uint32_t value; struct rpm_clk *peer = r->peer; unsigned long peer_khz = 0, peer_sleep_khz = 0; to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz); /* Take peer clock's rate into account only if it's enabled. */ if (peer->enabled) to_active_sleep_khz(peer, peer->c.rate, &peer_khz, &peer_sleep_khz); value = max(this_khz, peer_khz); rc = clk_rpmrs_set_rate_active(r, value); if (rc) goto out; value = max(this_sleep_khz, peer_sleep_khz); rc = clk_rpmrs_set_rate_sleep(r, value); } out: mutex_unlock(&rpm_clock_lock); return rc; } static int rpm_branch_clk_set_rate(struct clk *clk, unsigned long rate) { if (rate == clk->rate) return 0; return -EPERM; } static unsigned long rpm_clk_get_rate(struct clk *clk) { struct rpm_clk *r = to_rpm_clk(clk); if (r->rpmrs_data->get_rate_fn) return r->rpmrs_data->get_rate_fn(r); else return clk->rate; } static int rpm_clk_is_enabled(struct clk *clk) { struct rpm_clk *r = to_rpm_clk(clk); return r->rpmrs_data->is_enabled(r); } static long rpm_clk_round_rate(struct clk *clk, unsigned long rate) { /* Not supported. */ return rate; } static bool rpm_clk_is_local(struct clk *clk) { return false; } static enum handoff rpm_clk_handoff(struct clk *clk) { struct rpm_clk *r = to_rpm_clk(clk); int rc; /* * Querying an RPM clock's status will return 0 unless the clock's * rate has previously been set through the RPM. When handing off, * assume these clocks are enabled (unless the RPM call fails) so * child clocks of these RPM clocks can still be handed off. */ rc = r->rpmrs_data->handoff_fn(r); if (rc < 0) return HANDOFF_DISABLED_CLK; /* * Since RPM handoff code may update the software rate of the clock by * querying the RPM, we need to make sure our request to RPM now * matches the software rate of the clock. When we send the request * to RPM, we also need to update any other state info we would * normally update. So, call the appropriate clock function instead * of directly using the RPM driver APIs. */ rc = rpm_clk_prepare(clk); if (rc < 0) return HANDOFF_DISABLED_CLK; return HANDOFF_ENABLED_CLK; } #define RPM_MISC_CLK_TYPE 0x306b6c63 #define RPM_SCALING_ENABLE_ID 0x2 void enable_rpm_scaling(void) { int rc, value = 0x1; struct msm_rpm_kvp kvp = { .key = RPM_SMD_KEY_ENABLE, .data = (void *)&value, .length = sizeof(value), }; rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_SLEEP_SET, RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1); WARN(rc < 0, "RPM clock scaling (sleep set) did not enable!\n"); rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET, RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1); WARN(rc < 0, "RPM clock scaling (active set) did not enable!\n"); } struct clk_ops clk_ops_rpm = { .prepare = rpm_clk_prepare, .unprepare = rpm_clk_unprepare, .set_rate = rpm_clk_set_rate, .get_rate = rpm_clk_get_rate, .is_enabled = rpm_clk_is_enabled, .round_rate = rpm_clk_round_rate, .is_local = rpm_clk_is_local, .handoff = rpm_clk_handoff, }; struct clk_ops clk_ops_rpm_branch = { .prepare = rpm_clk_prepare, .unprepare = rpm_clk_unprepare, .set_rate = rpm_branch_clk_set_rate, .is_local = rpm_clk_is_local, .handoff = rpm_clk_handoff, };
gpl-2.0
Cardinal97/android_kernel_msm8939
fs/exofs/ore_raid.c
2579
19507
/* * Copyright (C) 2011 * Boaz Harrosh <bharrosh@panasas.com> * * This file is part of the objects raid engine (ore). * * It is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * You should have received a copy of the GNU General Public License * along with "ore". If not, write to the Free Software Foundation, Inc: * "Free Software Foundation <info@fsf.org>" */ #include <linux/gfp.h> #include <linux/async_tx.h> #include "ore_raid.h" #undef ORE_DBGMSG2 #define ORE_DBGMSG2 ORE_DBGMSG struct page *_raid_page_alloc(void) { return alloc_page(GFP_KERNEL); } void _raid_page_free(struct page *p) { __free_page(p); } /* This struct is forward declare in ore_io_state, but is private to here. * It is put on ios->sp2d for RAID5/6 writes only. See _gen_xor_unit. * * __stripe_pages_2d is a 2d array of pages, and it is also a corner turn. * Ascending page index access is sp2d(p-minor, c-major). But storage is * sp2d[p-minor][c-major], so it can be properlly presented to the async-xor * API. */ struct __stripe_pages_2d { /* Cache some hot path repeated calculations */ unsigned parity; unsigned data_devs; unsigned pages_in_unit; bool needed ; /* Array size is pages_in_unit (layout->stripe_unit / PAGE_SIZE) */ struct __1_page_stripe { bool alloc; unsigned write_count; struct async_submit_ctl submit; struct dma_async_tx_descriptor *tx; /* The size of this array is data_devs + parity */ struct page **pages; struct page **scribble; /* bool array, size of this array is data_devs */ char *page_is_read; } _1p_stripes[]; }; /* This can get bigger then a page. So support multiple page allocations * _sp2d_free should be called even if _sp2d_alloc fails (by returning * none-zero). */ static int _sp2d_alloc(unsigned pages_in_unit, unsigned group_width, unsigned parity, struct __stripe_pages_2d **psp2d) { struct __stripe_pages_2d *sp2d; unsigned data_devs = group_width - parity; struct _alloc_all_bytes { struct __alloc_stripe_pages_2d { struct __stripe_pages_2d sp2d; struct __1_page_stripe _1p_stripes[pages_in_unit]; } __asp2d; struct __alloc_1p_arrays { struct page *pages[group_width]; struct page *scribble[group_width]; char page_is_read[data_devs]; } __a1pa[pages_in_unit]; } *_aab; struct __alloc_1p_arrays *__a1pa; struct __alloc_1p_arrays *__a1pa_end; const unsigned sizeof__a1pa = sizeof(_aab->__a1pa[0]); unsigned num_a1pa, alloc_size, i; /* FIXME: check these numbers in ore_verify_layout */ BUG_ON(sizeof(_aab->__asp2d) > PAGE_SIZE); BUG_ON(sizeof__a1pa > PAGE_SIZE); if (sizeof(*_aab) > PAGE_SIZE) { num_a1pa = (PAGE_SIZE - sizeof(_aab->__asp2d)) / sizeof__a1pa; alloc_size = sizeof(_aab->__asp2d) + sizeof__a1pa * num_a1pa; } else { num_a1pa = pages_in_unit; alloc_size = sizeof(*_aab); } _aab = kzalloc(alloc_size, GFP_KERNEL); if (unlikely(!_aab)) { ORE_DBGMSG("!! Failed to alloc sp2d size=%d\n", alloc_size); return -ENOMEM; } sp2d = &_aab->__asp2d.sp2d; *psp2d = sp2d; /* From here Just call _sp2d_free */ __a1pa = _aab->__a1pa; __a1pa_end = __a1pa + num_a1pa; for (i = 0; i < pages_in_unit; ++i) { if (unlikely(__a1pa >= __a1pa_end)) { num_a1pa = min_t(unsigned, PAGE_SIZE / sizeof__a1pa, pages_in_unit - i); __a1pa = kzalloc(num_a1pa * sizeof__a1pa, GFP_KERNEL); if (unlikely(!__a1pa)) { ORE_DBGMSG("!! Failed to _alloc_1p_arrays=%d\n", num_a1pa); return -ENOMEM; } __a1pa_end = __a1pa + num_a1pa; /* First *pages is marked for kfree of the buffer */ sp2d->_1p_stripes[i].alloc = true; } sp2d->_1p_stripes[i].pages = __a1pa->pages; sp2d->_1p_stripes[i].scribble = __a1pa->scribble ; sp2d->_1p_stripes[i].page_is_read = __a1pa->page_is_read; ++__a1pa; } sp2d->parity = parity; sp2d->data_devs = data_devs; sp2d->pages_in_unit = pages_in_unit; return 0; } static void _sp2d_reset(struct __stripe_pages_2d *sp2d, const struct _ore_r4w_op *r4w, void *priv) { unsigned data_devs = sp2d->data_devs; unsigned group_width = data_devs + sp2d->parity; int p, c; if (!sp2d->needed) return; for (c = data_devs - 1; c >= 0; --c) for (p = sp2d->pages_in_unit - 1; p >= 0; --p) { struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; if (_1ps->page_is_read[c]) { struct page *page = _1ps->pages[c]; r4w->put_page(priv, page); _1ps->page_is_read[c] = false; } } for (p = 0; p < sp2d->pages_in_unit; p++) { struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages)); _1ps->write_count = 0; _1ps->tx = NULL; } sp2d->needed = false; } static void _sp2d_free(struct __stripe_pages_2d *sp2d) { unsigned i; if (!sp2d) return; for (i = 0; i < sp2d->pages_in_unit; ++i) { if (sp2d->_1p_stripes[i].alloc) kfree(sp2d->_1p_stripes[i].pages); } kfree(sp2d); } static unsigned _sp2d_min_pg(struct __stripe_pages_2d *sp2d) { unsigned p; for (p = 0; p < sp2d->pages_in_unit; p++) { struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; if (_1ps->write_count) return p; } return ~0; } static unsigned _sp2d_max_pg(struct __stripe_pages_2d *sp2d) { int p; for (p = sp2d->pages_in_unit - 1; p >= 0; --p) { struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; if (_1ps->write_count) return p; } return ~0; } static void _gen_xor_unit(struct __stripe_pages_2d *sp2d) { unsigned p; for (p = 0; p < sp2d->pages_in_unit; p++) { struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; if (!_1ps->write_count) continue; init_async_submit(&_1ps->submit, ASYNC_TX_XOR_ZERO_DST | ASYNC_TX_ACK, NULL, NULL, NULL, (addr_conv_t *)_1ps->scribble); /* TODO: raid6 */ _1ps->tx = async_xor(_1ps->pages[sp2d->data_devs], _1ps->pages, 0, sp2d->data_devs, PAGE_SIZE, &_1ps->submit); } for (p = 0; p < sp2d->pages_in_unit; p++) { struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; /* NOTE: We wait for HW synchronously (I don't have such HW * to test with.) Is parallelism needed with today's multi * cores? */ async_tx_issue_pending(_1ps->tx); } } void _ore_add_stripe_page(struct __stripe_pages_2d *sp2d, struct ore_striping_info *si, struct page *page) { struct __1_page_stripe *_1ps; sp2d->needed = true; _1ps = &sp2d->_1p_stripes[si->cur_pg]; _1ps->pages[si->cur_comp] = page; ++_1ps->write_count; si->cur_pg = (si->cur_pg + 1) % sp2d->pages_in_unit; /* si->cur_comp is advanced outside at main loop */ } void _ore_add_sg_seg(struct ore_per_dev_state *per_dev, unsigned cur_len, bool not_last) { struct osd_sg_entry *sge; ORE_DBGMSG("dev=%d cur_len=0x%x not_last=%d cur_sg=%d " "offset=0x%llx length=0x%x last_sgs_total=0x%x\n", per_dev->dev, cur_len, not_last, per_dev->cur_sg, _LLU(per_dev->offset), per_dev->length, per_dev->last_sgs_total); if (!per_dev->cur_sg) { sge = per_dev->sglist; /* First time we prepare two entries */ if (per_dev->length) { ++per_dev->cur_sg; sge->offset = per_dev->offset; sge->len = per_dev->length; } else { /* Here the parity is the first unit of this object. * This happens every time we reach a parity device on * the same stripe as the per_dev->offset. We need to * just skip this unit. */ per_dev->offset += cur_len; return; } } else { /* finalize the last one */ sge = &per_dev->sglist[per_dev->cur_sg - 1]; sge->len = per_dev->length - per_dev->last_sgs_total; } if (not_last) { /* Partly prepare the next one */ struct osd_sg_entry *next_sge = sge + 1; ++per_dev->cur_sg; next_sge->offset = sge->offset + sge->len + cur_len; /* Save cur len so we know how mutch was added next time */ per_dev->last_sgs_total = per_dev->length; next_sge->len = 0; } else if (!sge->len) { /* Optimize for when the last unit is a parity */ --per_dev->cur_sg; } } static int _alloc_read_4_write(struct ore_io_state *ios) { struct ore_layout *layout = ios->layout; int ret; /* We want to only read those pages not in cache so worst case * is a stripe populated with every other page */ unsigned sgs_per_dev = ios->sp2d->pages_in_unit + 2; ret = _ore_get_io_state(layout, ios->oc, layout->group_width * layout->mirrors_p1, sgs_per_dev, 0, &ios->ios_read_4_write); return ret; } /* @si contains info of the to-be-inserted page. Update of @si should be * maintained by caller. Specificaly si->dev, si->obj_offset, ... */ static int _add_to_r4w(struct ore_io_state *ios, struct ore_striping_info *si, struct page *page, unsigned pg_len) { struct request_queue *q; struct ore_per_dev_state *per_dev; struct ore_io_state *read_ios; unsigned first_dev = si->dev - (si->dev % (ios->layout->group_width * ios->layout->mirrors_p1)); unsigned comp = si->dev - first_dev; unsigned added_len; if (!ios->ios_read_4_write) { int ret = _alloc_read_4_write(ios); if (unlikely(ret)) return ret; } read_ios = ios->ios_read_4_write; read_ios->numdevs = ios->layout->group_width * ios->layout->mirrors_p1; per_dev = &read_ios->per_dev[comp]; if (!per_dev->length) { per_dev->bio = bio_kmalloc(GFP_KERNEL, ios->sp2d->pages_in_unit); if (unlikely(!per_dev->bio)) { ORE_DBGMSG("Failed to allocate BIO size=%u\n", ios->sp2d->pages_in_unit); return -ENOMEM; } per_dev->offset = si->obj_offset; per_dev->dev = si->dev; } else if (si->obj_offset != (per_dev->offset + per_dev->length)) { u64 gap = si->obj_offset - (per_dev->offset + per_dev->length); _ore_add_sg_seg(per_dev, gap, true); } q = osd_request_queue(ore_comp_dev(read_ios->oc, per_dev->dev)); added_len = bio_add_pc_page(q, per_dev->bio, page, pg_len, si->obj_offset % PAGE_SIZE); if (unlikely(added_len != pg_len)) { ORE_DBGMSG("Failed to bio_add_pc_page bi_vcnt=%d\n", per_dev->bio->bi_vcnt); return -ENOMEM; } per_dev->length += pg_len; return 0; } /* read the beginning of an unaligned first page */ static int _add_to_r4w_first_page(struct ore_io_state *ios, struct page *page) { struct ore_striping_info si; unsigned pg_len; ore_calc_stripe_info(ios->layout, ios->offset, 0, &si); pg_len = si.obj_offset % PAGE_SIZE; si.obj_offset -= pg_len; ORE_DBGMSG("offset=0x%llx len=0x%x index=0x%lx dev=%x\n", _LLU(si.obj_offset), pg_len, page->index, si.dev); return _add_to_r4w(ios, &si, page, pg_len); } /* read the end of an incomplete last page */ static int _add_to_r4w_last_page(struct ore_io_state *ios, u64 *offset) { struct ore_striping_info si; struct page *page; unsigned pg_len, p, c; ore_calc_stripe_info(ios->layout, *offset, 0, &si); p = si.unit_off / PAGE_SIZE; c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1, ios->layout->mirrors_p1, si.par_dev, si.dev); page = ios->sp2d->_1p_stripes[p].pages[c]; pg_len = PAGE_SIZE - (si.unit_off % PAGE_SIZE); *offset += pg_len; ORE_DBGMSG("p=%d, c=%d next-offset=0x%llx len=0x%x dev=%x par_dev=%d\n", p, c, _LLU(*offset), pg_len, si.dev, si.par_dev); BUG_ON(!page); return _add_to_r4w(ios, &si, page, pg_len); } static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret) { struct bio_vec *bv; unsigned i, d; /* loop on all devices all pages */ for (d = 0; d < ios->numdevs; d++) { struct bio *bio = ios->per_dev[d].bio; if (!bio) continue; bio_for_each_segment_all(bv, bio, i) { struct page *page = bv->bv_page; SetPageUptodate(page); if (PageError(page)) ClearPageError(page); } } } /* read_4_write is hacked to read the start of the first stripe and/or * the end of the last stripe. If needed, with an sg-gap at each device/page. * It is assumed to be called after the to_be_written pages of the first stripe * are populating ios->sp2d[][] * * NOTE: We call ios->r4w->lock_fn for all pages needed for parity calculations * These pages are held at sp2d[p].pages[c] but with * sp2d[p].page_is_read[c] = true. At _sp2d_reset these pages are * ios->r4w->lock_fn(). The ios->r4w->lock_fn might signal that the page is * @uptodate=true, so we don't need to read it, only unlock, after IO. * * TODO: The read_4_write should calc a need_to_read_pages_count, if bigger then * to-be-written count, we should consider the xor-in-place mode. * need_to_read_pages_count is the actual number of pages not present in cache. * maybe "devs_in_group - ios->sp2d[p].write_count" is a good enough * approximation? In this mode the read pages are put in the empty places of * ios->sp2d[p][*], xor is calculated the same way. These pages are * allocated/freed and don't go through cache */ static int _read_4_write_first_stripe(struct ore_io_state *ios) { struct ore_striping_info read_si; struct __stripe_pages_2d *sp2d = ios->sp2d; u64 offset = ios->si.first_stripe_start; unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1; if (offset == ios->offset) /* Go to start collect $200 */ goto read_last_stripe; min_p = _sp2d_min_pg(sp2d); max_p = _sp2d_max_pg(sp2d); ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n", offset, ios->offset, min_p, max_p); for (c = 0; ; c++) { ore_calc_stripe_info(ios->layout, offset, 0, &read_si); read_si.obj_offset += min_p * PAGE_SIZE; offset += min_p * PAGE_SIZE; for (p = min_p; p <= max_p; p++) { struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; struct page **pp = &_1ps->pages[c]; bool uptodate; if (*pp) { if (ios->offset % PAGE_SIZE) /* Read the remainder of the page */ _add_to_r4w_first_page(ios, *pp); /* to-be-written pages start here */ goto read_last_stripe; } *pp = ios->r4w->get_page(ios->private, offset, &uptodate); if (unlikely(!*pp)) return -ENOMEM; if (!uptodate) _add_to_r4w(ios, &read_si, *pp, PAGE_SIZE); /* Mark read-pages to be cache_released */ _1ps->page_is_read[c] = true; read_si.obj_offset += PAGE_SIZE; offset += PAGE_SIZE; } offset += (sp2d->pages_in_unit - p) * PAGE_SIZE; } read_last_stripe: return 0; } static int _read_4_write_last_stripe(struct ore_io_state *ios) { struct ore_striping_info read_si; struct __stripe_pages_2d *sp2d = ios->sp2d; u64 offset; u64 last_stripe_end; unsigned bytes_in_stripe = ios->si.bytes_in_stripe; unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1; offset = ios->offset + ios->length; if (offset % PAGE_SIZE) _add_to_r4w_last_page(ios, &offset); /* offset will be aligned to next page */ last_stripe_end = div_u64(offset + bytes_in_stripe - 1, bytes_in_stripe) * bytes_in_stripe; if (offset == last_stripe_end) /* Optimize for the aligned case */ goto read_it; ore_calc_stripe_info(ios->layout, offset, 0, &read_si); p = read_si.unit_off / PAGE_SIZE; c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1, ios->layout->mirrors_p1, read_si.par_dev, read_si.dev); if (min_p == sp2d->pages_in_unit) { /* Didn't do it yet */ min_p = _sp2d_min_pg(sp2d); max_p = _sp2d_max_pg(sp2d); } ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n", offset, last_stripe_end, min_p, max_p); while (offset < last_stripe_end) { struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; if ((min_p <= p) && (p <= max_p)) { struct page *page; bool uptodate; BUG_ON(_1ps->pages[c]); page = ios->r4w->get_page(ios->private, offset, &uptodate); if (unlikely(!page)) return -ENOMEM; _1ps->pages[c] = page; /* Mark read-pages to be cache_released */ _1ps->page_is_read[c] = true; if (!uptodate) _add_to_r4w(ios, &read_si, page, PAGE_SIZE); } offset += PAGE_SIZE; if (p == (sp2d->pages_in_unit - 1)) { ++c; p = 0; ore_calc_stripe_info(ios->layout, offset, 0, &read_si); } else { read_si.obj_offset += PAGE_SIZE; ++p; } } read_it: return 0; } static int _read_4_write_execute(struct ore_io_state *ios) { struct ore_io_state *ios_read; unsigned i; int ret; ios_read = ios->ios_read_4_write; if (!ios_read) return 0; /* FIXME: Ugly to signal _sbi_read_mirror that we have bio(s). Change * to check for per_dev->bio */ ios_read->pages = ios->pages; /* Now read these devices */ for (i = 0; i < ios_read->numdevs; i += ios_read->layout->mirrors_p1) { ret = _ore_read_mirror(ios_read, i); if (unlikely(ret)) return ret; } ret = ore_io_execute(ios_read); /* Synchronus execution */ if (unlikely(ret)) { ORE_DBGMSG("!! ore_io_execute => %d\n", ret); return ret; } _mark_read4write_pages_uptodate(ios_read, ret); ore_put_io_state(ios_read); ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */ return 0; } /* In writes @cur_len means length left. .i.e cur_len==0 is the last parity U */ int _ore_add_parity_unit(struct ore_io_state *ios, struct ore_striping_info *si, struct ore_per_dev_state *per_dev, unsigned cur_len) { if (ios->reading) { if (per_dev->cur_sg >= ios->sgs_per_dev) { ORE_DBGMSG("cur_sg(%d) >= sgs_per_dev(%d)\n" , per_dev->cur_sg, ios->sgs_per_dev); return -ENOMEM; } _ore_add_sg_seg(per_dev, cur_len, true); } else { struct __stripe_pages_2d *sp2d = ios->sp2d; struct page **pages = ios->parity_pages + ios->cur_par_page; unsigned num_pages; unsigned array_start = 0; unsigned i; int ret; si->cur_pg = _sp2d_min_pg(sp2d); num_pages = _sp2d_max_pg(sp2d) + 1 - si->cur_pg; if (!cur_len) /* If last stripe operate on parity comp */ si->cur_comp = sp2d->data_devs; if (!per_dev->length) { per_dev->offset += si->cur_pg * PAGE_SIZE; /* If first stripe, Read in all read4write pages * (if needed) before we calculate the first parity. */ _read_4_write_first_stripe(ios); } if (!cur_len) /* If last stripe r4w pages of last stripe */ _read_4_write_last_stripe(ios); _read_4_write_execute(ios); for (i = 0; i < num_pages; i++) { pages[i] = _raid_page_alloc(); if (unlikely(!pages[i])) return -ENOMEM; ++(ios->cur_par_page); } BUG_ON(si->cur_comp != sp2d->data_devs); BUG_ON(si->cur_pg + num_pages > sp2d->pages_in_unit); ret = _ore_add_stripe_unit(ios, &array_start, 0, pages, per_dev, num_pages * PAGE_SIZE); if (unlikely(ret)) return ret; /* TODO: raid6 if (last_parity_dev) */ _gen_xor_unit(sp2d); _sp2d_reset(sp2d, ios->r4w, ios->private); } return 0; } int _ore_post_alloc_raid_stuff(struct ore_io_state *ios) { if (ios->parity_pages) { struct ore_layout *layout = ios->layout; unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE; if (_sp2d_alloc(pages_in_unit, layout->group_width, layout->parity, &ios->sp2d)) { return -ENOMEM; } } return 0; } void _ore_free_raid_stuff(struct ore_io_state *ios) { if (ios->sp2d) { /* writing and raid */ unsigned i; for (i = 0; i < ios->cur_par_page; i++) { struct page *page = ios->parity_pages[i]; if (page) _raid_page_free(page); } if (ios->extra_part_alloc) kfree(ios->parity_pages); /* If IO returned an error pages might need unlocking */ _sp2d_reset(ios->sp2d, ios->r4w, ios->private); _sp2d_free(ios->sp2d); } else { /* Will only be set if raid reading && sglist is big */ if (ios->extra_part_alloc) kfree(ios->per_dev[0].sglist); } if (ios->ios_read_4_write) ore_put_io_state(ios->ios_read_4_write); }
gpl-2.0
nmenon/linux-omap-ti-pm
drivers/media/video/mem2mem_testdev.c
2579
23605
/* * A virtual v4l2-mem2mem example device. * * This is a virtual device driver for testing mem-to-mem videobuf framework. * It simulates a device that uses memory buffers for both source and * destination, processes the data and issues an "irq" (simulated by a timer). * The device is capable of multi-instance, multi-buffer-per-transaction * operation (via the mem2mem framework). * * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. * Pawel Osciak, <pawel@osciak.com> * Marek Szyprowski, <m.szyprowski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the * License, or (at your option) any later version */ #include <linux/module.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/version.h> #include <linux/timer.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-vmalloc.h> #define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev" MODULE_DESCRIPTION("Virtual device for mem2mem framework testing"); MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); MODULE_LICENSE("GPL"); #define MIN_W 32 #define MIN_H 32 #define MAX_W 640 #define MAX_H 480 #define DIM_ALIGN_MASK 0x08 /* 8-alignment for dimensions */ /* Flags that indicate a format can be used for capture/output */ #define MEM2MEM_CAPTURE (1 << 0) #define MEM2MEM_OUTPUT (1 << 1) #define MEM2MEM_NAME "m2m-testdev" /* Per queue */ #define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME /* In bytes, per queue */ #define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024) /* Default transaction time in msec */ #define MEM2MEM_DEF_TRANSTIME 1000 /* Default number of buffers per transaction */ #define MEM2MEM_DEF_TRANSLEN 1 #define MEM2MEM_COLOR_STEP (0xff >> 4) #define MEM2MEM_NUM_TILES 8 #define dprintk(dev, fmt, arg...) \ v4l2_dbg(1, 1, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) void m2mtest_dev_release(struct device *dev) {} static struct platform_device m2mtest_pdev = { .name = MEM2MEM_NAME, .dev.release = m2mtest_dev_release, }; struct m2mtest_fmt { char *name; u32 fourcc; int depth; /* Types the format can be used for */ u32 types; }; static struct m2mtest_fmt formats[] = { { .name = "RGB565 (BE)", .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */ .depth = 16, /* Both capture and output format */ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, }, { .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, /* Output-only format */ .types = MEM2MEM_OUTPUT, }, }; /* Per-queue, driver-specific private data */ struct m2mtest_q_data { unsigned int width; unsigned int height; unsigned int sizeimage; struct m2mtest_fmt *fmt; }; enum { V4L2_M2M_SRC = 0, V4L2_M2M_DST = 1, }; /* Source and destination queue data */ static struct m2mtest_q_data q_data[2]; static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: return &q_data[V4L2_M2M_SRC]; case V4L2_BUF_TYPE_VIDEO_CAPTURE: return &q_data[V4L2_M2M_DST]; default: BUG(); } return NULL; } #define V4L2_CID_TRANS_TIME_MSEC V4L2_CID_PRIVATE_BASE #define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_PRIVATE_BASE + 1) static struct v4l2_queryctrl m2mtest_ctrls[] = { { .id = V4L2_CID_TRANS_TIME_MSEC, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Transaction time (msec)", .minimum = 1, .maximum = 10000, .step = 100, .default_value = 1000, .flags = 0, }, { .id = V4L2_CID_TRANS_NUM_BUFS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Buffers per transaction", .minimum = 1, .maximum = MEM2MEM_DEF_NUM_BUFS, .step = 1, .default_value = 1, .flags = 0, }, }; #define NUM_FORMATS ARRAY_SIZE(formats) static struct m2mtest_fmt *find_format(struct v4l2_format *f) { struct m2mtest_fmt *fmt; unsigned int k; for (k = 0; k < NUM_FORMATS; k++) { fmt = &formats[k]; if (fmt->fourcc == f->fmt.pix.pixelformat) break; } if (k == NUM_FORMATS) return NULL; return &formats[k]; } struct m2mtest_dev { struct v4l2_device v4l2_dev; struct video_device *vfd; atomic_t num_inst; struct mutex dev_mutex; spinlock_t irqlock; struct timer_list timer; struct v4l2_m2m_dev *m2m_dev; }; struct m2mtest_ctx { struct m2mtest_dev *dev; /* Processed buffers in this transaction */ u8 num_processed; /* Transaction length (i.e. how many buffers per transaction) */ u32 translen; /* Transaction time (i.e. simulated processing time) in milliseconds */ u32 transtime; /* Abort requested by m2m */ int aborting; struct v4l2_m2m_ctx *m2m_ctx; }; static struct v4l2_queryctrl *get_ctrl(int id) { int i; for (i = 0; i < ARRAY_SIZE(m2mtest_ctrls); ++i) { if (id == m2mtest_ctrls[i].id) return &m2mtest_ctrls[i]; } return NULL; } static int device_process(struct m2mtest_ctx *ctx, struct vb2_buffer *in_vb, struct vb2_buffer *out_vb) { struct m2mtest_dev *dev = ctx->dev; struct m2mtest_q_data *q_data; u8 *p_in, *p_out; int x, y, t, w; int tile_w, bytes_left; int width, height, bytesperline; q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT); width = q_data->width; height = q_data->height; bytesperline = (q_data->width * q_data->fmt->depth) >> 3; p_in = vb2_plane_vaddr(in_vb, 0); p_out = vb2_plane_vaddr(out_vb, 0); if (!p_in || !p_out) { v4l2_err(&dev->v4l2_dev, "Acquiring kernel pointers to buffers failed\n"); return -EFAULT; } if (vb2_plane_size(in_vb, 0) > vb2_plane_size(out_vb, 0)) { v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n"); return -EINVAL; } tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3)) / MEM2MEM_NUM_TILES; bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES; w = 0; for (y = 0; y < height; ++y) { for (t = 0; t < MEM2MEM_NUM_TILES; ++t) { if (w & 0x1) { for (x = 0; x < tile_w; ++x) *p_out++ = *p_in++ + MEM2MEM_COLOR_STEP; } else { for (x = 0; x < tile_w; ++x) *p_out++ = *p_in++ - MEM2MEM_COLOR_STEP; } ++w; } p_in += bytes_left; p_out += bytes_left; } return 0; } static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout) { dprintk(dev, "Scheduling a simulated irq\n"); mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout)); } /* * mem2mem callbacks */ /** * job_ready() - check whether an instance is ready to be scheduled to run */ static int job_ready(void *priv) { struct m2mtest_ctx *ctx = priv; if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < ctx->translen || v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < ctx->translen) { dprintk(ctx->dev, "Not enough buffers available\n"); return 0; } return 1; } static void job_abort(void *priv) { struct m2mtest_ctx *ctx = priv; /* Will cancel the transaction in the next interrupt handler */ ctx->aborting = 1; } static void m2mtest_lock(void *priv) { struct m2mtest_ctx *ctx = priv; struct m2mtest_dev *dev = ctx->dev; mutex_lock(&dev->dev_mutex); } static void m2mtest_unlock(void *priv) { struct m2mtest_ctx *ctx = priv; struct m2mtest_dev *dev = ctx->dev; mutex_unlock(&dev->dev_mutex); } /* device_run() - prepares and starts the device * * This simulates all the immediate preparations required before starting * a device. This will be called by the framework when it decides to schedule * a particular instance. */ static void device_run(void *priv) { struct m2mtest_ctx *ctx = priv; struct m2mtest_dev *dev = ctx->dev; struct vb2_buffer *src_buf, *dst_buf; src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); device_process(ctx, src_buf, dst_buf); /* Run a timer, which simulates a hardware irq */ schedule_irq(dev, ctx->transtime); } static void device_isr(unsigned long priv) { struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv; struct m2mtest_ctx *curr_ctx; struct vb2_buffer *src_vb, *dst_vb; unsigned long flags; curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev); if (NULL == curr_ctx) { printk(KERN_ERR "Instance released before the end of transaction\n"); return; } src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx); dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx); curr_ctx->num_processed++; spin_lock_irqsave(&m2mtest_dev->irqlock, flags); v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE); v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE); spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags); if (curr_ctx->num_processed == curr_ctx->translen || curr_ctx->aborting) { dprintk(curr_ctx->dev, "Finishing transaction\n"); curr_ctx->num_processed = 0; v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx); } else { device_run(curr_ctx); } } /* * video ioctls */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); cap->bus_info[0] = 0; cap->version = KERNEL_VERSION(0, 1, 0); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; return 0; } static int enum_fmt(struct v4l2_fmtdesc *f, u32 type) { int i, num; struct m2mtest_fmt *fmt; num = 0; for (i = 0; i < NUM_FORMATS; ++i) { if (formats[i].types & type) { /* index-th format of type type found ? */ if (num == f->index) break; /* Correct type but haven't reached our index yet, * just increment per-type index */ ++num; } } if (i < NUM_FORMATS) { /* Format found */ fmt = &formats[i]; strncpy(f->description, fmt->name, sizeof(f->description) - 1); f->pixelformat = fmt->fourcc; return 0; } /* Format not found */ return -EINVAL; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return enum_fmt(f, MEM2MEM_CAPTURE); } static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return enum_fmt(f, MEM2MEM_OUTPUT); } static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f) { struct vb2_queue *vq; struct m2mtest_q_data *q_data; vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(f->type); f->fmt.pix.width = q_data->width; f->fmt.pix.height = q_data->height; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.pixelformat = q_data->fmt->fourcc; f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3; f->fmt.pix.sizeimage = q_data->sizeimage; return 0; } static int vidioc_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { return vidioc_g_fmt(priv, f); } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { return vidioc_g_fmt(priv, f); } static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt) { enum v4l2_field field; field = f->fmt.pix.field; if (field == V4L2_FIELD_ANY) field = V4L2_FIELD_NONE; else if (V4L2_FIELD_NONE != field) return -EINVAL; /* V4L2 specification suggests the driver corrects the format struct * if any of the dimensions is unsupported */ f->fmt.pix.field = field; if (f->fmt.pix.height < MIN_H) f->fmt.pix.height = MIN_H; else if (f->fmt.pix.height > MAX_H) f->fmt.pix.height = MAX_H; if (f->fmt.pix.width < MIN_W) f->fmt.pix.width = MIN_W; else if (f->fmt.pix.width > MAX_W) f->fmt.pix.width = MAX_W; f->fmt.pix.width &= ~DIM_ALIGN_MASK; f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct m2mtest_fmt *fmt; struct m2mtest_ctx *ctx = priv; fmt = find_format(f); if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) { v4l2_err(&ctx->dev->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } return vidioc_try_fmt(f, fmt); } static int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct m2mtest_fmt *fmt; struct m2mtest_ctx *ctx = priv; fmt = find_format(f); if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) { v4l2_err(&ctx->dev->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } return vidioc_try_fmt(f, fmt); } static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f) { struct m2mtest_q_data *q_data; struct vb2_queue *vq; vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(f->type); if (!q_data) return -EINVAL; if (vb2_is_busy(vq)) { v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__); return -EBUSY; } q_data->fmt = find_format(f); q_data->width = f->fmt.pix.width; q_data->height = f->fmt.pix.height; q_data->sizeimage = q_data->width * q_data->height * q_data->fmt->depth >> 3; dprintk(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d\n", f->type, q_data->width, q_data->height, q_data->fmt->fourcc); return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = vidioc_try_fmt_vid_cap(file, priv, f); if (ret) return ret; return vidioc_s_fmt(priv, f); } static int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = vidioc_try_fmt_vid_out(file, priv, f); if (ret) return ret; return vidioc_s_fmt(priv, f); } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { struct v4l2_queryctrl *c; c = get_ctrl(qc->id); if (!c) return -EINVAL; *qc = *c; return 0; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct m2mtest_ctx *ctx = priv; switch (ctrl->id) { case V4L2_CID_TRANS_TIME_MSEC: ctrl->value = ctx->transtime; break; case V4L2_CID_TRANS_NUM_BUFS: ctrl->value = ctx->translen; break; default: v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n"); return -EINVAL; } return 0; } static int check_ctrl_val(struct m2mtest_ctx *ctx, struct v4l2_control *ctrl) { struct v4l2_queryctrl *c; c = get_ctrl(ctrl->id); if (!c) return -EINVAL; if (ctrl->value < c->minimum || ctrl->value > c->maximum) { v4l2_err(&ctx->dev->v4l2_dev, "Value out of range\n"); return -ERANGE; } return 0; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct m2mtest_ctx *ctx = priv; int ret = 0; ret = check_ctrl_val(ctx, ctrl); if (ret != 0) return ret; switch (ctrl->id) { case V4L2_CID_TRANS_TIME_MSEC: ctx->transtime = ctrl->value; break; case V4L2_CID_TRANS_NUM_BUFS: ctx->translen = ctrl->value; break; default: v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n"); return -EINVAL; } return 0; } static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out, .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, }; /* * Queue operations */ static int m2mtest_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned long sizes[], void *alloc_ctxs[]) { struct m2mtest_ctx *ctx = vb2_get_drv_priv(vq); struct m2mtest_q_data *q_data; unsigned int size, count = *nbuffers; q_data = get_q_data(vq->type); size = q_data->width * q_data->height * q_data->fmt->depth >> 3; while (size * count > MEM2MEM_VID_MEM_LIMIT) (count)--; *nplanes = 1; *nbuffers = count; sizes[0] = size; /* * videobuf2-vmalloc allocator is context-less so no need to set * alloc_ctxs array. */ dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size); return 0; } static int m2mtest_buf_prepare(struct vb2_buffer *vb) { struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct m2mtest_q_data *q_data; dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type); q_data = get_q_data(vb->vb2_queue->type); if (vb2_plane_size(vb, 0) < q_data->sizeimage) { dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage); return -EINVAL; } vb2_set_plane_payload(vb, 0, q_data->sizeimage); return 0; } static void m2mtest_buf_queue(struct vb2_buffer *vb) { struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); } static struct vb2_ops m2mtest_qops = { .queue_setup = m2mtest_queue_setup, .buf_prepare = m2mtest_buf_prepare, .buf_queue = m2mtest_buf_queue, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct m2mtest_ctx *ctx = priv; int ret; memset(src_vq, 0, sizeof(*src_vq)); src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->ops = &m2mtest_qops; src_vq->mem_ops = &vb2_vmalloc_memops; ret = vb2_queue_init(src_vq); if (ret) return ret; memset(dst_vq, 0, sizeof(*dst_vq)); dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->ops = &m2mtest_qops; dst_vq->mem_ops = &vb2_vmalloc_memops; return vb2_queue_init(dst_vq); } /* * File operations */ static int m2mtest_open(struct file *file) { struct m2mtest_dev *dev = video_drvdata(file); struct m2mtest_ctx *ctx = NULL; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; file->private_data = ctx; ctx->dev = dev; ctx->translen = MEM2MEM_DEF_TRANSLEN; ctx->transtime = MEM2MEM_DEF_TRANSTIME; ctx->num_processed = 0; ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); if (IS_ERR(ctx->m2m_ctx)) { int ret = PTR_ERR(ctx->m2m_ctx); kfree(ctx); return ret; } atomic_inc(&dev->num_inst); dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx); return 0; } static int m2mtest_release(struct file *file) { struct m2mtest_dev *dev = video_drvdata(file); struct m2mtest_ctx *ctx = file->private_data; dprintk(dev, "Releasing instance %p\n", ctx); v4l2_m2m_ctx_release(ctx->m2m_ctx); kfree(ctx); atomic_dec(&dev->num_inst); return 0; } static unsigned int m2mtest_poll(struct file *file, struct poll_table_struct *wait) { struct m2mtest_ctx *ctx = file->private_data; return v4l2_m2m_poll(file, ctx->m2m_ctx, wait); } static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma) { struct m2mtest_ctx *ctx = file->private_data; return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); } static const struct v4l2_file_operations m2mtest_fops = { .owner = THIS_MODULE, .open = m2mtest_open, .release = m2mtest_release, .poll = m2mtest_poll, .unlocked_ioctl = video_ioctl2, .mmap = m2mtest_mmap, }; static struct video_device m2mtest_videodev = { .name = MEM2MEM_NAME, .fops = &m2mtest_fops, .ioctl_ops = &m2mtest_ioctl_ops, .minor = -1, .release = video_device_release, }; static struct v4l2_m2m_ops m2m_ops = { .device_run = device_run, .job_ready = job_ready, .job_abort = job_abort, .lock = m2mtest_lock, .unlock = m2mtest_unlock, }; static int m2mtest_probe(struct platform_device *pdev) { struct m2mtest_dev *dev; struct video_device *vfd; int ret; dev = kzalloc(sizeof *dev, GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->irqlock); ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) goto free_dev; atomic_set(&dev->num_inst, 0); mutex_init(&dev->dev_mutex); vfd = video_device_alloc(); if (!vfd) { v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto unreg_dev; } *vfd = m2mtest_videodev; vfd->lock = &dev->dev_mutex; ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto rel_vdev; } video_set_drvdata(vfd, dev); snprintf(vfd->name, sizeof(vfd->name), "%s", m2mtest_videodev.name); dev->vfd = vfd; v4l2_info(&dev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME "Device registered as /dev/video%d\n", vfd->num); setup_timer(&dev->timer, device_isr, (long)dev); platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&m2m_ops); if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(dev->m2m_dev); goto err_m2m; } q_data[V4L2_M2M_SRC].fmt = &formats[0]; q_data[V4L2_M2M_DST].fmt = &formats[0]; return 0; v4l2_m2m_release(dev->m2m_dev); err_m2m: video_unregister_device(dev->vfd); rel_vdev: video_device_release(vfd); unreg_dev: v4l2_device_unregister(&dev->v4l2_dev); free_dev: kfree(dev); return ret; } static int m2mtest_remove(struct platform_device *pdev) { struct m2mtest_dev *dev = (struct m2mtest_dev *)platform_get_drvdata(pdev); v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME); v4l2_m2m_release(dev->m2m_dev); del_timer_sync(&dev->timer); video_unregister_device(dev->vfd); v4l2_device_unregister(&dev->v4l2_dev); kfree(dev); return 0; } static struct platform_driver m2mtest_pdrv = { .probe = m2mtest_probe, .remove = m2mtest_remove, .driver = { .name = MEM2MEM_NAME, .owner = THIS_MODULE, }, }; static void __exit m2mtest_exit(void) { platform_driver_unregister(&m2mtest_pdrv); platform_device_unregister(&m2mtest_pdev); } static int __init m2mtest_init(void) { int ret; ret = platform_device_register(&m2mtest_pdev); if (ret) return ret; ret = platform_driver_register(&m2mtest_pdrv); if (ret) platform_device_unregister(&m2mtest_pdev); return 0; } module_init(m2mtest_init); module_exit(m2mtest_exit);
gpl-2.0
Caio99BR/FalconSSKernel
arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
3347
27131
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/module.h> #include <mach/msm_bus.h> #include <mach/msm_bus_board.h> #include <mach/board.h> #include <mach/rpm.h> #include "msm_bus_core.h" #include "../rpm_resources.h" void msm_bus_rpm_set_mt_mask() { #ifdef CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED struct msm_rpm_iv_pair mt[1]; int mask = MSM_RPMRS_MASK_RPM_CTL_MULTI_TIER; mt[0].id = MSM_RPM_ID_RPM_CTL; mt[0].value = 2; msm_rpmrs_set_bits_noirq(MSM_RPM_CTX_SET_0, mt, 1, &mask); #endif } bool msm_bus_rpm_is_mem_interleaved(void) { int status = 0; struct msm_rpm_iv_pair il[2]; uint16_t id[2]; il[0].value = 0; il[1].value = 0; status = msm_bus_board_rpm_get_il_ids(id); if (status) { MSM_BUS_DBG("Dynamic check not supported, " "default: Interleaved memory\n"); goto inter; } il[0].id = id[0]; il[1].id = id[1]; status = msm_rpm_get_status(il, ARRAY_SIZE(il)); if (status) { MSM_BUS_ERR("Status read for interleaving returned: %d\n" "Using interleaved memory by default\n", status); goto inter; } /* * If the start address of EBI1-CH0 is the same as * the start address of EBI1-CH1, the memory is interleaved. * The start addresses are stored in the 16 MSBs of the status * register */ if ((il[0].value & 0xFFFF0000) != (il[1].value & 0xFFFF0000)) { MSM_BUS_DBG("Non-interleaved memory\n"); return false; } inter: MSM_BUS_DBG("Interleaved memory\n"); return true; } #ifndef CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED struct commit_data { uint16_t *bwsum; uint16_t *arb; unsigned long *actarb; }; /* * The following macros are used for various operations on commit data. * Commit data is an array of 32 bit integers. The size of arrays is unique * to the fabric. Commit arrays are allocated at run-time based on the number * of masters, slaves and tiered-slaves registered. */ #define MSM_BUS_GET_BW_INFO(val, type, bw) \ do { \ (type) = MSM_BUS_GET_BW_TYPE(val); \ (bw) = MSM_BUS_GET_BW(val); \ } while (0) #define MSM_BUS_GET_BW_INFO_BYTES (val, type, bw) \ do { \ (type) = MSM_BUS_GET_BW_TYPE(val); \ (bw) = msm_bus_get_bw_bytes(val); \ } while (0) #define ROUNDED_BW_VAL_FROM_BYTES(bw) \ ((((bw) >> 17) + 1) & 0x8000 ? 0x7FFF : (((bw) >> 17) + 1)) #define BW_VAL_FROM_BYTES(bw) \ ((((bw) >> 17) & 0x8000) ? 0x7FFF : ((bw) >> 17)) static uint32_t msm_bus_set_bw_bytes(unsigned long bw) { return ((((bw) & 0x1FFFF) && (((bw) >> 17) == 0)) ? ROUNDED_BW_VAL_FROM_BYTES(bw) : BW_VAL_FROM_BYTES(bw)); } uint64_t msm_bus_get_bw_bytes(unsigned long val) { return ((val) & 0x7FFF) << 17; } uint16_t msm_bus_get_bw(unsigned long val) { return (val)&0x7FFF; } static uint16_t msm_bus_create_bw_tier_pair_bytes(uint8_t type, unsigned long bw) { return ((((type) == MSM_BUS_BW_TIER1 ? 1 : 0) << 15) | (msm_bus_set_bw_bytes(bw))); }; uint16_t msm_bus_create_bw_tier_pair(uint8_t type, unsigned long bw) { return (((type) == MSM_BUS_BW_TIER1 ? 1 : 0) << 15) | ((bw) & 0x7FFF); } void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size, void *cdata, int nmasters, int nslaves, int ntslaves) { int j, c; struct commit_data *cd = (struct commit_data *)cdata; *curr += scnprintf(buf + *curr, max_size - *curr, "BWSum:\n"); for (c = 0; c < nslaves; c++) *curr += scnprintf(buf + *curr, max_size - *curr, "0x%x\t", cd->bwsum[c]); *curr += scnprintf(buf + *curr, max_size - *curr, "\nArb:"); for (c = 0; c < ntslaves; c++) { *curr += scnprintf(buf + *curr, max_size - *curr, "\nTSlave %d:\n", c); for (j = 0; j < nmasters; j++) *curr += scnprintf(buf + *curr, max_size - *curr, " 0x%x\t", cd->arb[(c * nmasters) + j]); } } /** * allocate_commit_data() - Allocate the data for commit array in the * format specified by RPM * @fabric: Fabric device for which commit data is allocated */ static int msm_bus_rpm_allocate_commit_data(struct msm_bus_fabric_registration *fab_pdata, void **cdata, int ctx) { struct commit_data **cd = (struct commit_data **)cdata; *cd = kzalloc(sizeof(struct commit_data), GFP_KERNEL); if (!*cd) { MSM_BUS_DBG("Couldn't alloc mem for cdata\n"); return -ENOMEM; } (*cd)->bwsum = kzalloc((sizeof(uint16_t) * fab_pdata->nslaves), GFP_KERNEL); if (!(*cd)->bwsum) { MSM_BUS_DBG("Couldn't alloc mem for slaves\n"); kfree(*cd); return -ENOMEM; } (*cd)->arb = kzalloc(((sizeof(uint16_t *)) * (fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1), GFP_KERNEL); if (!(*cd)->arb) { MSM_BUS_DBG("Couldn't alloc memory for" " slaves\n"); kfree((*cd)->bwsum); kfree(*cd); return -ENOMEM; } (*cd)->actarb = kzalloc(((sizeof(unsigned long *)) * (fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1), GFP_KERNEL); if (!(*cd)->actarb) { MSM_BUS_DBG("Couldn't alloc memory for" " slaves\n"); kfree((*cd)->bwsum); kfree((*cd)->arb); kfree(*cd); return -ENOMEM; } return 0; } static void free_commit_data(void *cdata) { struct commit_data *cd = (struct commit_data *)cdata; kfree(cd->bwsum); kfree(cd->arb); kfree(cd->actarb); kfree(cd); } /** * allocate_rpm_data() - Allocate the id-value pairs to be * sent to RPM */ static void *msm_bus_rpm_allocate_rpm_data(struct platform_device *pdev, struct msm_bus_fabric_registration *fab_pdata) { struct msm_rpm_iv_pair *rpm_data; uint16_t count = ((fab_pdata->nmasters * fab_pdata->ntieredslaves) + fab_pdata->nslaves + 1)/2; rpm_data = kmalloc((sizeof(struct msm_rpm_iv_pair) * count), GFP_KERNEL); return (void *)rpm_data; } #define BWMASK 0x7FFF #define TIERMASK 0x8000 #define GET_TIER(n) (((n) & TIERMASK) >> 15) static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop, struct msm_bus_inode_info *info, struct msm_bus_fabric_registration *fab_pdata, void *sel_cdata, int *master_tiers, int64_t add_bw) { int index, i, j, tiers, ports; struct commit_data *sel_cd = (struct commit_data *)sel_cdata; add_bw = INTERLEAVED_BW(fab_pdata, add_bw, info->node_info->num_mports); ports = INTERLEAVED_VAL(fab_pdata, info->node_info->num_mports); tiers = INTERLEAVED_VAL(fab_pdata, hop->node_info->num_tiers); for (i = 0; i < tiers; i++) { for (j = 0; j < ports; j++) { uint16_t hop_tier; /* * For interleaved gateway ports and slave ports, * there is one-one mapping between gateway port and * the slave port */ if (info->node_info->gateway && i != j && (hop->node_info->num_sports > 1)) continue; if (!hop->node_info->tier) hop_tier = MSM_BUS_BW_TIER2 - 1; else hop_tier = hop->node_info->tier[i] - 1; index = ((hop_tier * fab_pdata->nmasters) + (info->node_info->masterp[j])); /* If there is tier, calculate arb for commit */ if (hop->node_info->tier) { uint16_t tier; unsigned long tieredbw = sel_cd->actarb[index]; if (GET_TIER(sel_cd->arb[index])) tier = MSM_BUS_BW_TIER1; else if (master_tiers) /* * By default master is only in the * tier specified by default. * To change the default tier, client * needs to explicitly request for a * different supported tier */ tier = master_tiers[0]; else tier = MSM_BUS_BW_TIER2; /* * Make sure gateway to slave port bandwidth * is not divided when slave is interleaved */ if (info->node_info->gateway && hop->node_info->num_sports > 1) tieredbw += add_bw; else tieredbw += INTERLEAVED_BW(fab_pdata, add_bw, hop->node_info-> num_sports); /* If bw is 0, update tier to default */ if (!tieredbw) tier = MSM_BUS_BW_TIER2; /* Update Arb for fab,get HW Mport from enum */ sel_cd->arb[index] = msm_bus_create_bw_tier_pair_bytes(tier, tieredbw); sel_cd->actarb[index] = tieredbw; MSM_BUS_DBG("tr:%d mpor:%d tbw:%ld bws: %lld\n", hop_tier, info->node_info->masterp[i], tieredbw, *hop->link_info.sel_bw); } } } /* Update bwsum for slaves on fabric */ ports = INTERLEAVED_VAL(fab_pdata, hop->node_info->num_sports); for (i = 0; i < ports; i++) { sel_cd->bwsum[hop->node_info->slavep[i]] = (uint16_t)msm_bus_create_bw_tier_pair_bytes(0, (uint32_t)msm_bus_div64(hop->node_info->num_sports, *hop->link_info.sel_bw)); MSM_BUS_DBG("slavep:%d, link_bw: %u\n", hop->node_info->slavep[i], (uint32_t) msm_bus_div64(hop->node_info->num_sports, *hop->link_info.sel_bw)); } } #define RPM_SHIFT_VAL 16 #define RPM_SHIFT(n) ((n) << RPM_SHIFT_VAL) static int msm_bus_rpm_compare_cdata( struct msm_bus_fabric_registration *fab_pdata, struct commit_data *cd1, struct commit_data *cd2) { size_t n; int ret; n = sizeof(uint16_t) * fab_pdata->nslaves; ret = memcmp(cd1->bwsum, cd2->bwsum, n); if (ret) { MSM_BUS_DBG("Commit Data bwsum not equal\n"); return ret; } n = sizeof(uint16_t *) * ((fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1); ret = memcmp(cd1->arb, cd2->arb, n); if (ret) { MSM_BUS_DBG("Commit Data arb[%d] not equal\n", n); return ret; } return 0; } static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration *fab_pdata, int ctx, struct msm_rpm_iv_pair *rpm_data, struct commit_data *cd, bool valid) { int i, j, offset = 0, status = 0, count, index = 0; /* * count is the number of 2-byte words required to commit the * data to rpm. This is calculated by the following formula. * Commit data is split into two arrays: * 1. arb[nmasters * ntieredslaves] * 2. bwsum[nslaves] */ count = ((fab_pdata->nmasters * fab_pdata->ntieredslaves) + (fab_pdata->nslaves) + 1)/2; offset = fab_pdata->offset; /* * Copy bwsum to rpm data * Since bwsum is uint16, the values need to be adjusted to * be copied to value field of rpm-data, which is 32 bits. */ for (i = 0; i < (fab_pdata->nslaves - 1); i += 2) { rpm_data[index].id = offset + index; rpm_data[index].value = RPM_SHIFT(*(cd->bwsum + i + 1)) | *(cd->bwsum + i); index++; } /* Account for odd number of slaves */ if (fab_pdata->nslaves & 1) { rpm_data[index].id = offset + index; rpm_data[index].value = *(cd->arb); rpm_data[index].value = RPM_SHIFT(rpm_data[index].value) | *(cd->bwsum + i); index++; i = 1; } else i = 0; /* Copy arb values to rpm data */ for (; i < (fab_pdata->ntieredslaves * fab_pdata->nmasters); i += 2) { rpm_data[index].id = offset + index; rpm_data[index].value = RPM_SHIFT(*(cd->arb + i + 1)) | *(cd->arb + i); index++; } MSM_BUS_DBG("rpm data for fab: %d\n", fab_pdata->id); for (i = 0; i < count; i++) MSM_BUS_DBG("%d %x\n", rpm_data[i].id, rpm_data[i].value); MSM_BUS_DBG("Commit Data: Fab: %d BWSum:\n", fab_pdata->id); for (i = 0; i < fab_pdata->nslaves; i++) MSM_BUS_DBG("fab_slaves:0x%x\n", cd->bwsum[i]); MSM_BUS_DBG("Commit Data: Fab: %d Arb:\n", fab_pdata->id); for (i = 0; i < fab_pdata->ntieredslaves; i++) { MSM_BUS_DBG("tiered-slave: %d\n", i); for (j = 0; j < fab_pdata->nmasters; j++) MSM_BUS_DBG(" 0x%x\n", cd->arb[(i * fab_pdata->nmasters) + j]); } MSM_BUS_DBG("calling msm_rpm_set: %d\n", status); msm_bus_dbg_commit_data(fab_pdata->name, cd, fab_pdata-> nmasters, fab_pdata->nslaves, fab_pdata->ntieredslaves, MSM_BUS_DBG_OP); if (fab_pdata->rpm_enabled) { if (valid) { if (ctx == ACTIVE_CTX) { status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, count); MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); } else if (ctx == DUAL_CTX) { status = msm_rpm_set(MSM_RPM_CTX_SET_SLEEP, rpm_data, count); MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); } } else { if (ctx == ACTIVE_CTX) { status = msm_rpm_clear(MSM_RPM_CTX_SET_0, rpm_data, count); MSM_BUS_DBG("msm_rpm_clear returned: %d\n", status); } else if (ctx == DUAL_CTX) { status = msm_rpm_clear(MSM_RPM_CTX_SET_SLEEP, rpm_data, count); MSM_BUS_DBG("msm_rpm_clear returned: %d\n", status); } } } return status; } #else #define NUM_TIERS 2 #define RPM_SHIFT24(n) ((n) << 24) #define RPM_SHIFT16(n) ((n) << 16) #define RPM_SHIFT8(n) ((n) << 8) struct commit_data { uint16_t *bwsum; uint8_t *arb[NUM_TIERS]; unsigned long *actarb[NUM_TIERS]; }; #define MODE_BIT(val) ((val) & 0x80) #define MODE0_IMM(val) ((val) & 0xF) #define MODE0_SHIFT(val) (((val) & 0x70) >> 4) #define MODE1_STEP 48 /* 48 MB */ #define MODE1_OFFSET 512 /* 512 MB */ #define MODE1_IMM(val) ((val) & 0x7F) #define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x)) static uint8_t msm_bus_set_bw_bytes(unsigned long val) { unsigned int shift; unsigned int intVal; unsigned char result; /* Convert to MB */ intVal = (unsigned int)((val + ((1 << 20) - 1)) >> 20); /** * Divide by 2^20 and round up * A value graeter than 0x1E0 will round up to 512 and overflow * Mode 0 so it should be made Mode 1 */ if (0x1E0 > intVal) { /** * MODE 0 * Compute the shift value * Shift value is 32 - the number of leading zeroes - * 4 to save the most significant 4 bits of the value */ shift = 32 - 4 - min((uint8_t)28, (uint8_t)__CLZ(intVal)); /* Add min value - 1 to force a round up when shifting right */ intVal += (1 << shift) - 1; /* Recompute the shift value in case there was an overflow */ shift = 32 - 4 - min((uint8_t)28, (uint8_t)__CLZ(intVal)); /* Clear the mode bit (msb) and fill in the fields */ result = ((0x70 & (shift << 4)) | (0x0F & (intVal >> shift))); } else { /* MODE 1 */ result = (unsigned char)(0x80 | ((intVal - MODE1_OFFSET + MODE1_STEP - 1) / MODE1_STEP)); } return result; } uint64_t msm_bus_get_bw(unsigned long val) { return MODE_BIT(val) ? /* Mode 1 */ (MODE1_IMM(val) * MODE1_STEP + MODE1_OFFSET) : /* Mode 0 */ (MODE0_IMM(val) << MODE0_SHIFT(val)); } uint64_t msm_bus_get_bw_bytes(unsigned long val) { return msm_bus_get_bw(val) << 20; } static uint8_t msm_bus_create_bw_tier_pair_bytes(uint8_t type, unsigned long bw) { return msm_bus_set_bw_bytes(bw); }; uint8_t msm_bus_create_bw_tier_pair(uint8_t type, unsigned long bw) { return msm_bus_create_bw_tier_pair_bytes(type, bw); }; static int msm_bus_rpm_allocate_commit_data(struct msm_bus_fabric_registration *fab_pdata, void **cdata, int ctx) { struct commit_data **cd = (struct commit_data **)cdata; int i; *cd = kzalloc(sizeof(struct commit_data), GFP_KERNEL); if (!*cd) { MSM_BUS_DBG("Couldn't alloc mem for cdata\n"); goto cdata_err; } (*cd)->bwsum = kzalloc((sizeof(uint16_t) * fab_pdata->nslaves), GFP_KERNEL); if (!(*cd)->bwsum) { MSM_BUS_DBG("Couldn't alloc mem for slaves\n"); goto bwsum_err; } for (i = 0; i < NUM_TIERS; i++) { (*cd)->arb[i] = kzalloc(((sizeof(uint8_t *)) * (fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1), GFP_KERNEL); if (!(*cd)->arb[i]) { MSM_BUS_DBG("Couldn't alloc memory for" " slaves\n"); goto arb_err; } (*cd)->actarb[i] = kzalloc(((sizeof(unsigned long *)) * (fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1), GFP_KERNEL); if (!(*cd)->actarb[i]) { MSM_BUS_DBG("Couldn't alloc memory for" " slaves\n"); kfree((*cd)->arb[i]); goto arb_err; } } return 0; arb_err: for (i = i - 1; i >= 0; i--) { kfree((*cd)->arb[i]); kfree((*cd)->actarb[i]); } bwsum_err: kfree((*cd)->bwsum); cdata_err: kfree(*cd); return -ENOMEM; } static void free_commit_data(void *cdata) { int i; struct commit_data *cd = (struct commit_data *)cdata; kfree(cd->bwsum); for (i = 0; i < NUM_TIERS; i++) { kfree(cd->arb[i]); kfree(cd->actarb[i]); } kfree(cd); } static void *msm_bus_rpm_allocate_rpm_data(struct platform_device *pdev, struct msm_bus_fabric_registration *fab_pdata) { struct msm_rpm_iv_pair *rpm_data; uint16_t count = (((fab_pdata->nmasters * fab_pdata->ntieredslaves * NUM_TIERS)/2) + fab_pdata->nslaves + 1)/2; rpm_data = kmalloc((sizeof(struct msm_rpm_iv_pair) * count), GFP_KERNEL); return (void *)rpm_data; } static int msm_bus_rpm_compare_cdata( struct msm_bus_fabric_registration *fab_pdata, struct commit_data *cd1, struct commit_data *cd2) { size_t n; int i, ret; n = sizeof(uint16_t) * fab_pdata->nslaves; ret = memcmp(cd1->bwsum, cd2->bwsum, n); if (ret) { MSM_BUS_DBG("Commit Data bwsum not equal\n"); return ret; } n = sizeof(uint8_t *) * ((fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1); for (i = 0; i < NUM_TIERS; i++) { ret = memcmp(cd1->arb[i], cd2->arb[i], n); if (ret) { MSM_BUS_DBG("Commit Data arb[%d] not equal\n", i); return ret; } } return 0; } static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration *fab_pdata, int ctx, struct msm_rpm_iv_pair *rpm_data, struct commit_data *cd, bool valid) { int i, j, k, offset = 0, status = 0, count, index = 0; /* * count is the number of 2-byte words required to commit the * data to rpm. This is calculated by the following formula. * Commit data is split into two arrays: * 1. arb[nmasters * ntieredslaves][num_tiers] * 2. bwsum[nslaves] */ count = (((fab_pdata->nmasters * fab_pdata->ntieredslaves * NUM_TIERS) /2) + fab_pdata->nslaves + 1)/2; offset = fab_pdata->offset; /* * Copy bwsum to rpm data * Since bwsum is uint16, the values need to be adjusted to * be copied to value field of rpm-data, which is 32 bits. */ for (i = 0; i < (fab_pdata->nslaves - 1); i += 2) { rpm_data[index].id = offset + index; rpm_data[index].value = RPM_SHIFT16(*(cd->bwsum + i + 1)) | *(cd->bwsum + i); index++; } /* Account for odd number of slaves */ if (fab_pdata->nslaves & 1) { rpm_data[index].id = offset + index; rpm_data[index].value = RPM_SHIFT8(*cd->arb[1]) | *(cd->arb[0]); rpm_data[index].value = RPM_SHIFT16(rpm_data[index].value) | *(cd->bwsum + i); index++; i = 1; } else i = 0; /* Copy arb values to rpm data */ for (; i < (fab_pdata->ntieredslaves * fab_pdata->nmasters); i += 2) { uint16_t tv1, tv0; rpm_data[index].id = offset + index; tv0 = RPM_SHIFT8(*(cd->arb[1] + i)) | (*(cd->arb[0] + i)); tv1 = RPM_SHIFT8(*(cd->arb[1] + i + 1)) | (*(cd->arb[0] + i + 1)); rpm_data[index].value = RPM_SHIFT16(tv1) | tv0; index++; } MSM_BUS_DBG("rpm data for fab: %d\n", fab_pdata->id); for (i = 0; i < count; i++) MSM_BUS_DBG("%d %x\n", rpm_data[i].id, rpm_data[i].value); MSM_BUS_DBG("Commit Data: Fab: %d BWSum:\n", fab_pdata->id); for (i = 0; i < fab_pdata->nslaves; i++) MSM_BUS_DBG("fab_slaves:0x%x\n", cd->bwsum[i]); MSM_BUS_DBG("Commit Data: Fab: %d Arb:\n", fab_pdata->id); for (k = 0; k < NUM_TIERS; k++) { MSM_BUS_DBG("Tier: %d\n", k); for (i = 0; i < fab_pdata->ntieredslaves; i++) { MSM_BUS_DBG("tiered-slave: %d\n", i); for (j = 0; j < fab_pdata->nmasters; j++) MSM_BUS_DBG(" 0x%x\n", cd->arb[k][(i * fab_pdata->nmasters) + j]); } } MSM_BUS_DBG("calling msm_rpm_set: %d\n", status); msm_bus_dbg_commit_data(fab_pdata->name, (void *)cd, fab_pdata-> nmasters, fab_pdata->nslaves, fab_pdata->ntieredslaves, MSM_BUS_DBG_OP); if (fab_pdata->rpm_enabled) { if (valid) { if (ctx == ACTIVE_CTX) { status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, count); MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); } else if (ctx == DUAL_CTX) { status = msm_rpm_set(MSM_RPM_CTX_SET_SLEEP, rpm_data, count); MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); } } else { if (ctx == ACTIVE_CTX) { status = msm_rpm_clear(MSM_RPM_CTX_SET_0, rpm_data, count); MSM_BUS_DBG("msm_rpm_clear returned: %d\n", status); } else if (ctx == DUAL_CTX) { status = msm_rpm_clear(MSM_RPM_CTX_SET_SLEEP, rpm_data, count); MSM_BUS_DBG("msm_rpm_clear returned: %d\n", status); } } } return status; } #define FORMAT_BW(x) \ ((x < 0) ? \ -(msm_bus_get_bw_bytes(msm_bus_create_bw_tier_pair_bytes(0, -(x)))) : \ (msm_bus_get_bw_bytes(msm_bus_create_bw_tier_pair_bytes(0, x)))) static uint16_t msm_bus_pack_bwsum_bytes(unsigned long bw) { return (bw + ((1 << 20) - 1)) >> 20; }; static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop, struct msm_bus_inode_info *info, struct msm_bus_fabric_registration *fab_pdata, void *sel_cdata, int *master_tiers, int64_t add_bw) { int index, i, j, tiers, ports; struct commit_data *sel_cd = (struct commit_data *)sel_cdata; add_bw = INTERLEAVED_BW(fab_pdata, add_bw, info->node_info->num_mports); ports = INTERLEAVED_VAL(fab_pdata, info->node_info->num_mports); tiers = INTERLEAVED_VAL(fab_pdata, hop->node_info->num_tiers); for (i = 0; i < tiers; i++) { for (j = 0; j < ports; j++) { uint16_t hop_tier; /* * For interleaved gateway ports and slave ports, * there is one-one mapping between gateway port and * the slave port */ if (info->node_info->gateway && i != j && hop->node_info->num_sports > 1) continue; if (!hop->node_info->tier) hop_tier = MSM_BUS_BW_TIER2 - 1; else hop_tier = hop->node_info->tier[i] - 1; index = ((hop_tier * fab_pdata->nmasters) + (info->node_info->masterp[j])); /* If there is tier, calculate arb for commit */ if (hop->node_info->tier) { uint16_t tier; unsigned long tieredbw; if (master_tiers) tier = master_tiers[0] - 1; else tier = MSM_BUS_BW_TIER2 - 1; tieredbw = sel_cd->actarb[tier][index]; /* * Make sure gateway to slave port bandwidth * is not divided when slave is interleaved */ if (info->node_info->gateway && hop->node_info->num_sports > 1) tieredbw += add_bw; else tieredbw += INTERLEAVED_BW(fab_pdata, add_bw, hop->node_info-> num_sports); /* Update Arb for fab,get HW Mport from enum */ sel_cd->arb[tier][index] = msm_bus_create_bw_tier_pair_bytes(0, tieredbw); sel_cd->actarb[tier][index] = tieredbw; MSM_BUS_DBG("tr:%d mpor:%d tbw:%lu bws: %lld\n", hop_tier, info->node_info->masterp[i], tieredbw, *hop->link_info.sel_bw); } } } /* Update bwsum for slaves on fabric */ ports = INTERLEAVED_VAL(fab_pdata, hop->node_info->num_sports); for (i = 0; i < ports; i++) { sel_cd->bwsum[hop->node_info->slavep[i]] = msm_bus_pack_bwsum_bytes((uint32_t) msm_bus_div64(hop->node_info->num_sports, *hop->link_info.sel_bw)); MSM_BUS_DBG("slavep:%d, link_bw: %lld\n", hop->node_info->slavep[i], msm_bus_div64(hop->node_info->num_sports, *hop->link_info.sel_bw)); } } void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size, void *cdata, int nmasters, int nslaves, int ntslaves) { int j, k, c; struct commit_data *cd = (struct commit_data *)cdata; *curr += scnprintf(buf + *curr, max_size - *curr, "BWSum:\n"); for (c = 0; c < nslaves; c++) *curr += scnprintf(buf + *curr, max_size - *curr, "0x%x\t", cd->bwsum[c]); *curr += scnprintf(buf + *curr, max_size - *curr, "\nArb:"); for (k = 0; k < NUM_TIERS; k++) { *curr += scnprintf(buf + *curr, max_size - *curr, "\nTier %d:\n", k); for (c = 0; c < ntslaves; c++) { *curr += scnprintf(buf + *curr, max_size - *curr, "TSlave %d:\n", c); for (j = 0; j < nmasters; j++) *curr += scnprintf(buf + *curr, max_size - *curr, " 0x%x\t", cd->arb[k][(c * nmasters) + j]); } } } #endif /** * msm_bus_rpm_commit() - Commit the arbitration data to RPM * @fabric: Fabric for which the data should be committed **/ static int msm_bus_rpm_commit(struct msm_bus_fabric_registration *fab_pdata, void *hw_data, void **cdata) { int ret; bool valid; struct commit_data *dual_cd, *act_cd; struct msm_rpm_iv_pair *rpm_data = (struct msm_rpm_iv_pair *)hw_data; dual_cd = (struct commit_data *)cdata[DUAL_CTX]; act_cd = (struct commit_data *)cdata[ACTIVE_CTX]; /* * If the arb data for active set and sleep set is * different, commit both sets. * If the arb data for active set and sleep set is * the same, invalidate the sleep set. */ ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd); if (!ret) /* Invalidate sleep set.*/ valid = false; else valid = true; ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data, dual_cd, valid); if (ret) MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n", fab_pdata->id, DUAL_CTX); valid = true; ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd, valid); if (ret) MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n", fab_pdata->id, ACTIVE_CTX); return ret; } static int msm_bus_rpm_port_halt(uint32_t haltid, uint8_t mport) { int status = 0; struct msm_bus_halt_vector hvector = {0, 0}; struct msm_rpm_iv_pair rpm_data[2]; MSM_BUS_MASTER_HALT(hvector.haltmask, hvector.haltval, mport); rpm_data[0].id = haltid; rpm_data[0].value = hvector.haltval; rpm_data[1].id = haltid + 1; rpm_data[1].value = hvector.haltmask; MSM_BUS_DBG("ctx: %d, id: %d, value: %d\n", MSM_RPM_CTX_SET_0, rpm_data[0].id, rpm_data[0].value); MSM_BUS_DBG("ctx: %d, id: %d, value: %d\n", MSM_RPM_CTX_SET_0, rpm_data[1].id, rpm_data[1].value); status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, 2); if (status) MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); return status; } static int msm_bus_rpm_port_unhalt(uint32_t haltid, uint8_t mport) { int status = 0; struct msm_bus_halt_vector hvector = {0, 0}; struct msm_rpm_iv_pair rpm_data[2]; MSM_BUS_MASTER_UNHALT(hvector.haltmask, hvector.haltval, mport); rpm_data[0].id = haltid; rpm_data[0].value = hvector.haltval; rpm_data[1].id = haltid + 1; rpm_data[1].value = hvector.haltmask; MSM_BUS_DBG("unalt: ctx: %d, id: %d, value: %d\n", MSM_RPM_CTX_SET_SLEEP, rpm_data[0].id, rpm_data[0].value); MSM_BUS_DBG("unhalt: ctx: %d, id: %d, value: %d\n", MSM_RPM_CTX_SET_SLEEP, rpm_data[1].id, rpm_data[1].value); status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, 2); if (status) MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); return status; } int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration *fab_pdata, void *hw_data, void **cdata) { return 0; } int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata, struct msm_bus_hw_algorithm *hw_algo) { pdata->il_flag = msm_bus_rpm_is_mem_interleaved(); hw_algo->allocate_commit_data = msm_bus_rpm_allocate_commit_data; hw_algo->allocate_hw_data = msm_bus_rpm_allocate_rpm_data; hw_algo->node_init = NULL; hw_algo->free_commit_data = free_commit_data; hw_algo->update_bw = msm_bus_rpm_update_bw; hw_algo->commit = msm_bus_rpm_commit; hw_algo->port_halt = msm_bus_rpm_port_halt; hw_algo->port_unhalt = msm_bus_rpm_port_unhalt; if (!pdata->ahb) pdata->rpm_enabled = 1; return 0; }
gpl-2.0
ChaOSChriS/android_kernel_asus_flo
kernel/srcu.c
4371
11471
/* * Sleepable Read-Copy Update mechanism for mutual exclusion. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2006 * * Author: Paul McKenney <paulmck@us.ibm.com> * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU/ *.txt * */ #include <linux/export.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/delay.h> #include <linux/srcu.h> static int init_srcu_struct_fields(struct srcu_struct *sp) { sp->completed = 0; mutex_init(&sp->mutex); sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); return sp->per_cpu_ref ? 0 : -ENOMEM; } #ifdef CONFIG_DEBUG_LOCK_ALLOC int __init_srcu_struct(struct srcu_struct *sp, const char *name, struct lock_class_key *key) { /* Don't re-initialize a lock while it is held. */ debug_check_no_locks_freed((void *)sp, sizeof(*sp)); lockdep_init_map(&sp->dep_map, name, key, 0); return init_srcu_struct_fields(sp); } EXPORT_SYMBOL_GPL(__init_srcu_struct); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /** * init_srcu_struct - initialize a sleep-RCU structure * @sp: structure to initialize. * * Must invoke this on a given srcu_struct before passing that srcu_struct * to any other function. Each srcu_struct represents a separate domain * of SRCU protection. */ int init_srcu_struct(struct srcu_struct *sp) { return init_srcu_struct_fields(sp); } EXPORT_SYMBOL_GPL(init_srcu_struct); #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /* * srcu_readers_active_idx -- returns approximate number of readers * active on the specified rank of per-CPU counters. */ static int srcu_readers_active_idx(struct srcu_struct *sp, int idx) { int cpu; int sum; sum = 0; for_each_possible_cpu(cpu) sum += per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]; return sum; } /** * srcu_readers_active - returns approximate number of readers. * @sp: which srcu_struct to count active readers (holding srcu_read_lock). * * Note that this is not an atomic primitive, and can therefore suffer * severe errors when invoked on an active srcu_struct. That said, it * can be useful as an error check at cleanup time. */ static int srcu_readers_active(struct srcu_struct *sp) { return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1); } /** * cleanup_srcu_struct - deconstruct a sleep-RCU structure * @sp: structure to clean up. * * Must invoke this after you are finished using a given srcu_struct that * was initialized via init_srcu_struct(), else you leak memory. */ void cleanup_srcu_struct(struct srcu_struct *sp) { int sum; sum = srcu_readers_active(sp); WARN_ON(sum); /* Leakage unless caller handles error. */ if (sum != 0) return; free_percpu(sp->per_cpu_ref); sp->per_cpu_ref = NULL; } EXPORT_SYMBOL_GPL(cleanup_srcu_struct); /* * Counts the new reader in the appropriate per-CPU element of the * srcu_struct. Must be called from process context. * Returns an index that must be passed to the matching srcu_read_unlock(). */ int __srcu_read_lock(struct srcu_struct *sp) { int idx; preempt_disable(); idx = sp->completed & 0x1; barrier(); /* ensure compiler looks -once- at sp->completed. */ per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]++; srcu_barrier(); /* ensure compiler won't misorder critical section. */ preempt_enable(); return idx; } EXPORT_SYMBOL_GPL(__srcu_read_lock); /* * Removes the count for the old reader from the appropriate per-CPU * element of the srcu_struct. Note that this may well be a different * CPU than that which was incremented by the corresponding srcu_read_lock(). * Must be called from process context. */ void __srcu_read_unlock(struct srcu_struct *sp, int idx) { preempt_disable(); srcu_barrier(); /* ensure compiler won't misorder critical section. */ per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; preempt_enable(); } EXPORT_SYMBOL_GPL(__srcu_read_unlock); /* * We use an adaptive strategy for synchronize_srcu() and especially for * synchronize_srcu_expedited(). We spin for a fixed time period * (defined below) to allow SRCU readers to exit their read-side critical * sections. If there are still some readers after 10 microseconds, * we repeatedly block for 1-millisecond time periods. This approach * has done well in testing, so there is no need for a config parameter. */ #define SYNCHRONIZE_SRCU_READER_DELAY 10 /* * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). */ static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) { int idx; rcu_lockdep_assert(!lock_is_held(&sp->dep_map) && !lock_is_held(&rcu_bh_lock_map) && !lock_is_held(&rcu_lock_map) && !lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); idx = sp->completed; mutex_lock(&sp->mutex); /* * Check to see if someone else did the work for us while we were * waiting to acquire the lock. We need -two- advances of * the counter, not just one. If there was but one, we might have * shown up -after- our helper's first synchronize_sched(), thus * having failed to prevent CPU-reordering races with concurrent * srcu_read_unlock()s on other CPUs (see comment below). So we * either (1) wait for two or (2) supply the second ourselves. */ if ((sp->completed - idx) >= 2) { mutex_unlock(&sp->mutex); return; } sync_func(); /* Force memory barrier on all CPUs. */ /* * The preceding synchronize_sched() ensures that any CPU that * sees the new value of sp->completed will also see any preceding * changes to data structures made by this CPU. This prevents * some other CPU from reordering the accesses in its SRCU * read-side critical section to precede the corresponding * srcu_read_lock() -- ensuring that such references will in * fact be protected. * * So it is now safe to do the flip. */ idx = sp->completed & 0x1; sp->completed++; sync_func(); /* Force memory barrier on all CPUs. */ /* * At this point, because of the preceding synchronize_sched(), * all srcu_read_lock() calls using the old counters have completed. * Their corresponding critical sections might well be still * executing, but the srcu_read_lock() primitives themselves * will have finished executing. We initially give readers * an arbitrarily chosen 10 microseconds to get out of their * SRCU read-side critical sections, then loop waiting 1/HZ * seconds per iteration. The 10-microsecond value has done * very well in testing. */ if (srcu_readers_active_idx(sp, idx)) udelay(SYNCHRONIZE_SRCU_READER_DELAY); while (srcu_readers_active_idx(sp, idx)) schedule_timeout_interruptible(1); sync_func(); /* Force memory barrier on all CPUs. */ /* * The preceding synchronize_sched() forces all srcu_read_unlock() * primitives that were executing concurrently with the preceding * for_each_possible_cpu() loop to have completed by this point. * More importantly, it also forces the corresponding SRCU read-side * critical sections to have also completed, and the corresponding * references to SRCU-protected data items to be dropped. * * Note: * * Despite what you might think at first glance, the * preceding synchronize_sched() -must- be within the * critical section ended by the following mutex_unlock(). * Otherwise, a task taking the early exit can race * with a srcu_read_unlock(), which might have executed * just before the preceding srcu_readers_active() check, * and whose CPU might have reordered the srcu_read_unlock() * with the preceding critical section. In this case, there * is nothing preventing the synchronize_sched() task that is * taking the early exit from freeing a data structure that * is still being referenced (out of order) by the task * doing the srcu_read_unlock(). * * Alternatively, the comparison with "2" on the early exit * could be changed to "3", but this increases synchronize_srcu() * latency for bulk loads. So the current code is preferred. */ mutex_unlock(&sp->mutex); } /** * synchronize_srcu - wait for prior SRCU read-side critical-section completion * @sp: srcu_struct with which to synchronize. * * Flip the completed counter, and wait for the old count to drain to zero. * As with classic RCU, the updater must use some separate means of * synchronizing concurrent updates. Can block; must be called from * process context. * * Note that it is illegal to call synchronize_srcu() from the corresponding * SRCU read-side critical section; doing so will result in deadlock. * However, it is perfectly legal to call synchronize_srcu() on one * srcu_struct from some other srcu_struct's read-side critical section. */ void synchronize_srcu(struct srcu_struct *sp) { __synchronize_srcu(sp, synchronize_sched); } EXPORT_SYMBOL_GPL(synchronize_srcu); /** * synchronize_srcu_expedited - Brute-force SRCU grace period * @sp: srcu_struct with which to synchronize. * * Wait for an SRCU grace period to elapse, but use a "big hammer" * approach to force the grace period to end quickly. This consumes * significant time on all CPUs and is unfriendly to real-time workloads, * so is thus not recommended for any sort of common-case code. In fact, * if you are using synchronize_srcu_expedited() in a loop, please * restructure your code to batch your updates, and then use a single * synchronize_srcu() instead. * * Note that it is illegal to call this function while holding any lock * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal * to call this function from a CPU-hotplug notifier. Failing to observe * these restriction will result in deadlock. It is also illegal to call * synchronize_srcu_expedited() from the corresponding SRCU read-side * critical section; doing so will result in deadlock. However, it is * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct * from some other srcu_struct's read-side critical section, as long as * the resulting graph of srcu_structs is acyclic. */ void synchronize_srcu_expedited(struct srcu_struct *sp) { __synchronize_srcu(sp, synchronize_sched_expedited); } EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); /** * srcu_batches_completed - return batches completed. * @sp: srcu_struct on which to report batch completion. * * Report the number of batches, correlated with, but not necessarily * precisely the same as, the number of grace periods that have elapsed. */ long srcu_batches_completed(struct srcu_struct *sp) { return sp->completed; } EXPORT_SYMBOL_GPL(srcu_batches_completed);
gpl-2.0
mifl/android_kernel_pantech_im-860s
arch/x86/vdso/vma.c
4371
5213
/* * Set up the VMAs to tell the VM about the vDSO. * Copyright 2007 Andi Kleen, SUSE Labs. * Subject to the GPL, v.2 */ #include <linux/mm.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/random.h> #include <linux/elf.h> #include <asm/vsyscall.h> #include <asm/vgtod.h> #include <asm/proto.h> #include <asm/vdso.h> #include <asm/page.h> unsigned int __read_mostly vdso_enabled = 1; extern char vdso_start[], vdso_end[]; extern unsigned short vdso_sync_cpuid; extern struct page *vdso_pages[]; static unsigned vdso_size; #ifdef CONFIG_X86_X32_ABI extern char vdsox32_start[], vdsox32_end[]; extern struct page *vdsox32_pages[]; static unsigned vdsox32_size; static void __init patch_vdsox32(void *vdso, size_t len) { Elf32_Ehdr *hdr = vdso; Elf32_Shdr *sechdrs, *alt_sec = 0; char *secstrings; void *alt_data; int i; BUG_ON(len < sizeof(Elf32_Ehdr)); BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0); sechdrs = (void *)hdr + hdr->e_shoff; secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (i = 1; i < hdr->e_shnum; i++) { Elf32_Shdr *shdr = &sechdrs[i]; if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) { alt_sec = shdr; goto found; } } /* If we get here, it's probably a bug. */ pr_warning("patch_vdsox32: .altinstructions not found\n"); return; /* nothing to patch */ found: alt_data = (void *)hdr + alt_sec->sh_offset; apply_alternatives(alt_data, alt_data + alt_sec->sh_size); } #endif static void __init patch_vdso64(void *vdso, size_t len) { Elf64_Ehdr *hdr = vdso; Elf64_Shdr *sechdrs, *alt_sec = 0; char *secstrings; void *alt_data; int i; BUG_ON(len < sizeof(Elf64_Ehdr)); BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0); sechdrs = (void *)hdr + hdr->e_shoff; secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (i = 1; i < hdr->e_shnum; i++) { Elf64_Shdr *shdr = &sechdrs[i]; if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) { alt_sec = shdr; goto found; } } /* If we get here, it's probably a bug. */ pr_warning("patch_vdso64: .altinstructions not found\n"); return; /* nothing to patch */ found: alt_data = (void *)hdr + alt_sec->sh_offset; apply_alternatives(alt_data, alt_data + alt_sec->sh_size); } static int __init init_vdso(void) { int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; int i; patch_vdso64(vdso_start, vdso_end - vdso_start); vdso_size = npages << PAGE_SHIFT; for (i = 0; i < npages; i++) vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE); #ifdef CONFIG_X86_X32_ABI patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start); npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE; vdsox32_size = npages << PAGE_SHIFT; for (i = 0; i < npages; i++) vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE); #endif return 0; } subsys_initcall(init_vdso); struct linux_binprm; /* Put the vdso above the (randomized) stack with another randomized offset. This way there is no hole in the middle of address space. To save memory make sure it is still in the same PTE as the stack top. This doesn't give that many random bits */ static unsigned long vdso_addr(unsigned long start, unsigned len) { unsigned long addr, end; unsigned offset; end = (start + PMD_SIZE - 1) & PMD_MASK; if (end >= TASK_SIZE_MAX) end = TASK_SIZE_MAX; end -= len; /* This loses some more bits than a modulo, but is cheaper */ offset = get_random_int() & (PTRS_PER_PTE - 1); addr = start + (offset << PAGE_SHIFT); if (addr >= end) addr = end; /* * page-align it here so that get_unmapped_area doesn't * align it wrongfully again to the next page. addr can come in 4K * unaligned here as a result of stack start randomization. */ addr = PAGE_ALIGN(addr); addr = align_addr(addr, NULL, ALIGN_VDSO); return addr; } /* Setup a VMA at program startup for the vsyscall page. Not called for compat tasks */ static int setup_additional_pages(struct linux_binprm *bprm, int uses_interp, struct page **pages, unsigned size) { struct mm_struct *mm = current->mm; unsigned long addr; int ret; if (!vdso_enabled) return 0; down_write(&mm->mmap_sem); addr = vdso_addr(mm->start_stack, size); addr = get_unmapped_area(NULL, addr, size, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } current->mm->context.vdso = (void *)addr; ret = install_special_mapping(mm, addr, size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, pages); if (ret) { current->mm->context.vdso = NULL; goto up_fail; } up_fail: up_write(&mm->mmap_sem); return ret; } int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { return setup_additional_pages(bprm, uses_interp, vdso_pages, vdso_size); } #ifdef CONFIG_X86_X32_ABI int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { return setup_additional_pages(bprm, uses_interp, vdsox32_pages, vdsox32_size); } #endif static __init int vdso_setup(char *s) { vdso_enabled = simple_strtoul(s, NULL, 0); return 0; } __setup("vdso=", vdso_setup);
gpl-2.0
crpalmer/samsung_kernels
Documentation/accounting/getdelays.c
7955
13567
/* getdelays.c * * Utility to get per-pid and per-tgid delay accounting statistics * Also illustrates usage of the taskstats interface * * Copyright (C) Shailabh Nagar, IBM Corp. 2005 * Copyright (C) Balbir Singh, IBM Corp. 2006 * Copyright (c) Jay Lan, SGI. 2006 * * Compile with * gcc -I/usr/src/linux/include getdelays.c -o getdelays */ #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <unistd.h> #include <poll.h> #include <string.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/socket.h> #include <sys/wait.h> #include <signal.h> #include <linux/genetlink.h> #include <linux/taskstats.h> #include <linux/cgroupstats.h> /* * Generic macros for dealing with netlink sockets. Might be duplicated * elsewhere. It is recommended that commercial grade applications use * libnl or libnetlink and use the interfaces provided by the library */ #define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN)) #define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN) #define NLA_DATA(na) ((void *)((char*)(na) + NLA_HDRLEN)) #define NLA_PAYLOAD(len) (len - NLA_HDRLEN) #define err(code, fmt, arg...) \ do { \ fprintf(stderr, fmt, ##arg); \ exit(code); \ } while (0) int done; int rcvbufsz; char name[100]; int dbg; int print_delays; int print_io_accounting; int print_task_context_switch_counts; __u64 stime, utime; #define PRINTF(fmt, arg...) { \ if (dbg) { \ printf(fmt, ##arg); \ } \ } /* Maximum size of response requested or message sent */ #define MAX_MSG_SIZE 1024 /* Maximum number of cpus expected to be specified in a cpumask */ #define MAX_CPUS 32 struct msgtemplate { struct nlmsghdr n; struct genlmsghdr g; char buf[MAX_MSG_SIZE]; }; char cpumask[100+6*MAX_CPUS]; static void usage(void) { fprintf(stderr, "getdelays [-dilv] [-w logfile] [-r bufsize] " "[-m cpumask] [-t tgid] [-p pid]\n"); fprintf(stderr, " -d: print delayacct stats\n"); fprintf(stderr, " -i: print IO accounting (works only with -p)\n"); fprintf(stderr, " -l: listen forever\n"); fprintf(stderr, " -v: debug on\n"); fprintf(stderr, " -C: container path\n"); } /* * Create a raw netlink socket and bind */ static int create_nl_socket(int protocol) { int fd; struct sockaddr_nl local; fd = socket(AF_NETLINK, SOCK_RAW, protocol); if (fd < 0) return -1; if (rcvbufsz) if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbufsz, sizeof(rcvbufsz)) < 0) { fprintf(stderr, "Unable to set socket rcv buf size " "to %d\n", rcvbufsz); return -1; } memset(&local, 0, sizeof(local)); local.nl_family = AF_NETLINK; if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0) goto error; return fd; error: close(fd); return -1; } static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid, __u8 genl_cmd, __u16 nla_type, void *nla_data, int nla_len) { struct nlattr *na; struct sockaddr_nl nladdr; int r, buflen; char *buf; struct msgtemplate msg; msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); msg.n.nlmsg_type = nlmsg_type; msg.n.nlmsg_flags = NLM_F_REQUEST; msg.n.nlmsg_seq = 0; msg.n.nlmsg_pid = nlmsg_pid; msg.g.cmd = genl_cmd; msg.g.version = 0x1; na = (struct nlattr *) GENLMSG_DATA(&msg); na->nla_type = nla_type; na->nla_len = nla_len + 1 + NLA_HDRLEN; memcpy(NLA_DATA(na), nla_data, nla_len); msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len); buf = (char *) &msg; buflen = msg.n.nlmsg_len ; memset(&nladdr, 0, sizeof(nladdr)); nladdr.nl_family = AF_NETLINK; while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr, sizeof(nladdr))) < buflen) { if (r > 0) { buf += r; buflen -= r; } else if (errno != EAGAIN) return -1; } return 0; } /* * Probe the controller in genetlink to find the family id * for the TASKSTATS family */ static int get_family_id(int sd) { struct { struct nlmsghdr n; struct genlmsghdr g; char buf[256]; } ans; int id = 0, rc; struct nlattr *na; int rep_len; strcpy(name, TASKSTATS_GENL_NAME); rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY, CTRL_ATTR_FAMILY_NAME, (void *)name, strlen(TASKSTATS_GENL_NAME)+1); if (rc < 0) return 0; /* sendto() failure? */ rep_len = recv(sd, &ans, sizeof(ans), 0); if (ans.n.nlmsg_type == NLMSG_ERROR || (rep_len < 0) || !NLMSG_OK((&ans.n), rep_len)) return 0; na = (struct nlattr *) GENLMSG_DATA(&ans); na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len)); if (na->nla_type == CTRL_ATTR_FAMILY_ID) { id = *(__u16 *) NLA_DATA(na); } return id; } #define average_ms(t, c) (t / 1000000ULL / (c ? c : 1)) static void print_delayacct(struct taskstats *t) { printf("\n\nCPU %15s%15s%15s%15s%15s\n" " %15llu%15llu%15llu%15llu%15.3fms\n" "IO %15s%15s%15s\n" " %15llu%15llu%15llums\n" "SWAP %15s%15s%15s\n" " %15llu%15llu%15llums\n" "RECLAIM %12s%15s%15s\n" " %15llu%15llu%15llums\n", "count", "real total", "virtual total", "delay total", "delay average", (unsigned long long)t->cpu_count, (unsigned long long)t->cpu_run_real_total, (unsigned long long)t->cpu_run_virtual_total, (unsigned long long)t->cpu_delay_total, average_ms((double)t->cpu_delay_total, t->cpu_count), "count", "delay total", "delay average", (unsigned long long)t->blkio_count, (unsigned long long)t->blkio_delay_total, average_ms(t->blkio_delay_total, t->blkio_count), "count", "delay total", "delay average", (unsigned long long)t->swapin_count, (unsigned long long)t->swapin_delay_total, average_ms(t->swapin_delay_total, t->swapin_count), "count", "delay total", "delay average", (unsigned long long)t->freepages_count, (unsigned long long)t->freepages_delay_total, average_ms(t->freepages_delay_total, t->freepages_count)); } static void task_context_switch_counts(struct taskstats *t) { printf("\n\nTask %15s%15s\n" " %15llu%15llu\n", "voluntary", "nonvoluntary", (unsigned long long)t->nvcsw, (unsigned long long)t->nivcsw); } static void print_cgroupstats(struct cgroupstats *c) { printf("sleeping %llu, blocked %llu, running %llu, stopped %llu, " "uninterruptible %llu\n", (unsigned long long)c->nr_sleeping, (unsigned long long)c->nr_io_wait, (unsigned long long)c->nr_running, (unsigned long long)c->nr_stopped, (unsigned long long)c->nr_uninterruptible); } static void print_ioacct(struct taskstats *t) { printf("%s: read=%llu, write=%llu, cancelled_write=%llu\n", t->ac_comm, (unsigned long long)t->read_bytes, (unsigned long long)t->write_bytes, (unsigned long long)t->cancelled_write_bytes); } int main(int argc, char *argv[]) { int c, rc, rep_len, aggr_len, len2; int cmd_type = TASKSTATS_CMD_ATTR_UNSPEC; __u16 id; __u32 mypid; struct nlattr *na; int nl_sd = -1; int len = 0; pid_t tid = 0; pid_t rtid = 0; int fd = 0; int count = 0; int write_file = 0; int maskset = 0; char *logfile = NULL; int loop = 0; int containerset = 0; char containerpath[1024]; int cfd = 0; int forking = 0; sigset_t sigset; struct msgtemplate msg; while (!forking) { c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:c:"); if (c < 0) break; switch (c) { case 'd': printf("print delayacct stats ON\n"); print_delays = 1; break; case 'i': printf("printing IO accounting\n"); print_io_accounting = 1; break; case 'q': printf("printing task/process context switch rates\n"); print_task_context_switch_counts = 1; break; case 'C': containerset = 1; strncpy(containerpath, optarg, strlen(optarg) + 1); break; case 'w': logfile = strdup(optarg); printf("write to file %s\n", logfile); write_file = 1; break; case 'r': rcvbufsz = atoi(optarg); printf("receive buf size %d\n", rcvbufsz); if (rcvbufsz < 0) err(1, "Invalid rcv buf size\n"); break; case 'm': strncpy(cpumask, optarg, sizeof(cpumask)); maskset = 1; printf("cpumask %s maskset %d\n", cpumask, maskset); break; case 't': tid = atoi(optarg); if (!tid) err(1, "Invalid tgid\n"); cmd_type = TASKSTATS_CMD_ATTR_TGID; break; case 'p': tid = atoi(optarg); if (!tid) err(1, "Invalid pid\n"); cmd_type = TASKSTATS_CMD_ATTR_PID; break; case 'c': /* Block SIGCHLD for sigwait() later */ if (sigemptyset(&sigset) == -1) err(1, "Failed to empty sigset"); if (sigaddset(&sigset, SIGCHLD)) err(1, "Failed to set sigchld in sigset"); sigprocmask(SIG_BLOCK, &sigset, NULL); /* fork/exec a child */ tid = fork(); if (tid < 0) err(1, "Fork failed\n"); if (tid == 0) if (execvp(argv[optind - 1], &argv[optind - 1]) < 0) exit(-1); /* Set the command type and avoid further processing */ cmd_type = TASKSTATS_CMD_ATTR_PID; forking = 1; break; case 'v': printf("debug on\n"); dbg = 1; break; case 'l': printf("listen forever\n"); loop = 1; break; default: usage(); exit(-1); } } if (write_file) { fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (fd == -1) { perror("Cannot open output file\n"); exit(1); } } if ((nl_sd = create_nl_socket(NETLINK_GENERIC)) < 0) err(1, "error creating Netlink socket\n"); mypid = getpid(); id = get_family_id(nl_sd); if (!id) { fprintf(stderr, "Error getting family id, errno %d\n", errno); goto err; } PRINTF("family id %d\n", id); if (maskset) { rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, TASKSTATS_CMD_ATTR_REGISTER_CPUMASK, &cpumask, strlen(cpumask) + 1); PRINTF("Sent register cpumask, retval %d\n", rc); if (rc < 0) { fprintf(stderr, "error sending register cpumask\n"); goto err; } } if (tid && containerset) { fprintf(stderr, "Select either -t or -C, not both\n"); goto err; } /* * If we forked a child, wait for it to exit. Cannot use waitpid() * as all the delicious data would be reaped as part of the wait */ if (tid && forking) { int sig_received; sigwait(&sigset, &sig_received); } if (tid) { rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, cmd_type, &tid, sizeof(__u32)); PRINTF("Sent pid/tgid, retval %d\n", rc); if (rc < 0) { fprintf(stderr, "error sending tid/tgid cmd\n"); goto done; } } if (containerset) { cfd = open(containerpath, O_RDONLY); if (cfd < 0) { perror("error opening container file"); goto err; } rc = send_cmd(nl_sd, id, mypid, CGROUPSTATS_CMD_GET, CGROUPSTATS_CMD_ATTR_FD, &cfd, sizeof(__u32)); if (rc < 0) { perror("error sending cgroupstats command"); goto err; } } if (!maskset && !tid && !containerset) { usage(); goto err; } do { rep_len = recv(nl_sd, &msg, sizeof(msg), 0); PRINTF("received %d bytes\n", rep_len); if (rep_len < 0) { fprintf(stderr, "nonfatal reply error: errno %d\n", errno); continue; } if (msg.n.nlmsg_type == NLMSG_ERROR || !NLMSG_OK((&msg.n), rep_len)) { struct nlmsgerr *err = NLMSG_DATA(&msg); fprintf(stderr, "fatal reply error, errno %d\n", err->error); goto done; } PRINTF("nlmsghdr size=%zu, nlmsg_len=%d, rep_len=%d\n", sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len); rep_len = GENLMSG_PAYLOAD(&msg.n); na = (struct nlattr *) GENLMSG_DATA(&msg); len = 0; while (len < rep_len) { len += NLA_ALIGN(na->nla_len); switch (na->nla_type) { case TASKSTATS_TYPE_AGGR_TGID: /* Fall through */ case TASKSTATS_TYPE_AGGR_PID: aggr_len = NLA_PAYLOAD(na->nla_len); len2 = 0; /* For nested attributes, na follows */ na = (struct nlattr *) NLA_DATA(na); done = 0; while (len2 < aggr_len) { switch (na->nla_type) { case TASKSTATS_TYPE_PID: rtid = *(int *) NLA_DATA(na); if (print_delays) printf("PID\t%d\n", rtid); break; case TASKSTATS_TYPE_TGID: rtid = *(int *) NLA_DATA(na); if (print_delays) printf("TGID\t%d\n", rtid); break; case TASKSTATS_TYPE_STATS: count++; if (print_delays) print_delayacct((struct taskstats *) NLA_DATA(na)); if (print_io_accounting) print_ioacct((struct taskstats *) NLA_DATA(na)); if (print_task_context_switch_counts) task_context_switch_counts((struct taskstats *) NLA_DATA(na)); if (fd) { if (write(fd, NLA_DATA(na), na->nla_len) < 0) { err(1,"write error\n"); } } if (!loop) goto done; break; default: fprintf(stderr, "Unknown nested" " nla_type %d\n", na->nla_type); break; } len2 += NLA_ALIGN(na->nla_len); na = (struct nlattr *) ((char *) na + len2); } break; case CGROUPSTATS_TYPE_CGROUP_STATS: print_cgroupstats(NLA_DATA(na)); break; default: fprintf(stderr, "Unknown nla_type %d\n", na->nla_type); case TASKSTATS_TYPE_NULL: break; } na = (struct nlattr *) (GENLMSG_DATA(&msg) + len); } } while (loop); done: if (maskset) { rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK, &cpumask, strlen(cpumask) + 1); printf("Sent deregister mask, retval %d\n", rc); if (rc < 0) err(rc, "error sending deregister cpumask\n"); } err: close(nl_sd); if (fd) close(fd); if (cfd) close(cfd); return 0; }
gpl-2.0
cwxda/android_kernel_xiaomi_armani_caf
drivers/net/mii.c
7955
12578
/* mii.c: MII interface library Maintained by Jeff Garzik <jgarzik@pobox.com> Copyright 2001,2002 Jeff Garzik Various code came from myson803.c and other files by Donald Becker. Copyright: Written 1998-2002 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> static u32 mii_get_an(struct mii_if_info *mii, u16 addr) { int advert; advert = mii->mdio_read(mii->dev, mii->phy_id, addr); return mii_lpa_to_ethtool_lpa_t(advert); } /** * mii_ethtool_gset - get settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * The @ecmd parameter is expected to have been cleared before calling * mii_ethtool_gset(). * * Returns 0 for success, negative on error. */ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; u32 nego; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); if (mii->supports_gmii) ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; /* only supports twisted-pair */ ecmd->port = PORT_MII; /* only supports internal transceiver */ ecmd->transceiver = XCVR_INTERNAL; /* this isn't fully supported at higher layers */ ecmd->phy_address = mii->phy_id; ecmd->mdio_support = ETH_MDIO_SUPPORTS_C22; ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); if (mii->supports_gmii) { ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } if (bmcr & BMCR_ANENABLE) { ecmd->advertising |= ADVERTISED_Autoneg; ecmd->autoneg = AUTONEG_ENABLE; ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); if (mii->supports_gmii) ecmd->advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmsr & BMSR_ANEGCOMPLETE) { ecmd->lp_advertising = mii_get_an(mii, MII_LPA); ecmd->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(stat1000); } else { ecmd->lp_advertising = 0; } nego = ecmd->advertising & ecmd->lp_advertising; if (nego & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_1000); ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full); } else if (nego & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_100); ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full); } else { ethtool_cmd_speed_set(ecmd, SPEED_10); ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full); } } else { ecmd->autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED1000 && (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10))); ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } mii->full_duplex = ecmd->duplex; /* ignore maxtxpkt, maxrxpkt for now */ return 0; } /** * mii_ethtool_sset - set settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * Returns 0 for success, negative on error. */ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u32 speed = ethtool_cmd_speed(ecmd); if (speed != SPEED_10 && speed != SPEED_100 && speed != SPEED_1000) return -EINVAL; if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) return -EINVAL; if (ecmd->port != PORT_MII) return -EINVAL; if (ecmd->transceiver != XCVR_INTERNAL) return -EINVAL; if (ecmd->phy_address != mii->phy_id) return -EINVAL; if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) return -EINVAL; if ((speed == SPEED_1000) && (!mii->supports_gmii)) return -EINVAL; /* ignore supported, maxtxpkt, maxrxpkt */ if (ecmd->autoneg == AUTONEG_ENABLE) { u32 bmcr, advert, tmp; u32 advert2 = 0, tmp2 = 0; if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) == 0) return -EINVAL; /* advertise only what has been requested */ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (mii->supports_gmii) { advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising); if (mii->supports_gmii) tmp2 |= ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; } if ((mii->supports_gmii) && (advert2 != tmp2)) mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); /* turn on autonegotiation, and force a renegotiate */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); mii->force_media = 0; } else { u32 bmcr, tmp; /* turn off auto negotiation, set speed and duplexity */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_FULLDPLX); if (speed == SPEED_1000) tmp |= BMCR_SPEED1000; else if (speed == SPEED_100) tmp |= BMCR_SPEED100; if (ecmd->duplex == DUPLEX_FULL) { tmp |= BMCR_FULLDPLX; mii->full_duplex = 1; } else mii->full_duplex = 0; if (bmcr != tmp) mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); mii->force_media = 1; } return 0; } /** * mii_check_gmii_support - check if the MII supports Gb interfaces * @mii: the MII interface */ int mii_check_gmii_support(struct mii_if_info *mii) { int reg; reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (reg & BMSR_ESTATEN) { reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS); if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) return 1; } return 0; } /** * mii_link_ok - is link status up/ok * @mii: the MII interface * * Returns 1 if the MII reports link status up/ok, 0 otherwise. */ int mii_link_ok (struct mii_if_info *mii) { /* first, a dummy read, needed to latch some MII phys */ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) return 1; return 0; } /** * mii_nway_restart - restart NWay (autonegotiation) for this interface * @mii: the MII interface * * Returns 0 on success, negative on error. */ int mii_nway_restart (struct mii_if_info *mii) { int bmcr; int r = -EINVAL; /* if autoneg is off, it's an error */ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); if (bmcr & BMCR_ANENABLE) { bmcr |= BMCR_ANRESTART; mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); r = 0; } return r; } /** * mii_check_link - check MII link status * @mii: MII interface * * If the link status changed (previous != current), call * netif_carrier_on() if current link status is Up or call * netif_carrier_off() if current link status is Down. */ void mii_check_link (struct mii_if_info *mii) { int cur_link = mii_link_ok(mii); int prev_link = netif_carrier_ok(mii->dev); if (cur_link && !prev_link) netif_carrier_on(mii->dev); else if (prev_link && !cur_link) netif_carrier_off(mii->dev); } /** * mii_check_media - check the MII interface for a duplex change * @mii: the MII interface * @ok_to_print: OK to print link up/down messages * @init_media: OK to save duplex mode in @mii * * Returns 1 if the duplex mode changed, 0 if not. * If the media type is forced, always returns 0. */ unsigned int mii_check_media (struct mii_if_info *mii, unsigned int ok_to_print, unsigned int init_media) { unsigned int old_carrier, new_carrier; int advertise, lpa, media, duplex; int lpa2 = 0; /* if forced media, go no further */ if (mii->force_media) return 0; /* duplex did not change */ /* check current and old link status */ old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; new_carrier = (unsigned int) mii_link_ok(mii); /* if carrier state did not change, this is a "bounce", * just exit as everything is already set correctly */ if ((!init_media) && (old_carrier == new_carrier)) return 0; /* duplex did not change */ /* no carrier, nothing much to do */ if (!new_carrier) { netif_carrier_off(mii->dev); if (ok_to_print) netdev_info(mii->dev, "link down\n"); return 0; /* duplex did not change */ } /* * we have carrier, see who's on the other end */ netif_carrier_on(mii->dev); /* get MII advertise and LPA values */ if ((!init_media) && (mii->advertising)) advertise = mii->advertising; else { advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); mii->advertising = advertise; } lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); if (mii->supports_gmii) lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000); /* figure out media and duplex from advertise and LPA values */ media = mii_nway_result(lpa & advertise); duplex = (media & ADVERTISE_FULL) ? 1 : 0; if (lpa2 & LPA_1000FULL) duplex = 1; if (ok_to_print) netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n", lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10, duplex ? "full" : "half", lpa); if ((init_media) || (mii->full_duplex != duplex)) { mii->full_duplex = duplex; return 1; /* duplex changed */ } return 0; /* duplex did not change */ } /** * generic_mii_ioctl - main MII ioctl interface * @mii_if: the MII interface * @mii_data: MII ioctl data structure * @cmd: MII ioctl command * @duplex_chg_out: pointer to @duplex_changed status if there was no * ioctl error * * Returns 0 on success, negative on error. */ int generic_mii_ioctl(struct mii_if_info *mii_if, struct mii_ioctl_data *mii_data, int cmd, unsigned int *duplex_chg_out) { int rc = 0; unsigned int duplex_changed = 0; if (duplex_chg_out) *duplex_chg_out = 0; mii_data->phy_id &= mii_if->phy_id_mask; mii_data->reg_num &= mii_if->reg_num_mask; switch(cmd) { case SIOCGMIIPHY: mii_data->phy_id = mii_if->phy_id; /* fall through */ case SIOCGMIIREG: mii_data->val_out = mii_if->mdio_read(mii_if->dev, mii_data->phy_id, mii_data->reg_num); break; case SIOCSMIIREG: { u16 val = mii_data->val_in; if (mii_data->phy_id == mii_if->phy_id) { switch(mii_data->reg_num) { case MII_BMCR: { unsigned int new_duplex = 0; if (val & (BMCR_RESET|BMCR_ANENABLE)) mii_if->force_media = 0; else mii_if->force_media = 1; if (mii_if->force_media && (val & BMCR_FULLDPLX)) new_duplex = 1; if (mii_if->full_duplex != new_duplex) { duplex_changed = 1; mii_if->full_duplex = new_duplex; } break; } case MII_ADVERTISE: mii_if->advertising = val; break; default: /* do nothing */ break; } } mii_if->mdio_write(mii_if->dev, mii_data->phy_id, mii_data->reg_num, val); break; } default: rc = -EOPNOTSUPP; break; } if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) *duplex_chg_out = 1; return rc; } MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); MODULE_DESCRIPTION ("MII hardware support library"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(mii_link_ok); EXPORT_SYMBOL(mii_nway_restart); EXPORT_SYMBOL(mii_ethtool_gset); EXPORT_SYMBOL(mii_ethtool_sset); EXPORT_SYMBOL(mii_check_link); EXPORT_SYMBOL(mii_check_media); EXPORT_SYMBOL(mii_check_gmii_support); EXPORT_SYMBOL(generic_mii_ioctl);
gpl-2.0
TheEdge-/Leaping_Lemur_kernel
drivers/input/touchscreen/wm9713.c
8467
12515
/* * wm9713.c -- Codec touch driver for Wolfson WM9713 AC97 Codec. * * Copyright 2003, 2004, 2005, 2006, 2007, 2008 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * Parts Copyright : Ian Molton <spyro@f2s.com> * Andrew Zabolotny <zap@homelink.ru> * Russell King <rmk@arm.linux.org.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/wm97xx.h> #define TS_NAME "wm97xx" #define WM9713_VERSION "1.00" #define DEFAULT_PRESSURE 0xb0c0 /* * Module parameters */ /* * Set internal pull up for pen detect. * * Pull up is in the range 1.02k (least sensitive) to 64k (most sensitive) * i.e. pull up resistance = 64k Ohms / rpu. * * Adjust this value if you are having problems with pen detect not * detecting any down event. */ static int rpu = 8; module_param(rpu, int, 0); MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect."); /* * Set current used for pressure measurement. * * Set pil = 2 to use 400uA * pil = 1 to use 200uA and * pil = 0 to disable pressure measurement. * * This is used to increase the range of values returned by the adc * when measureing touchpanel pressure. */ static int pil; module_param(pil, int, 0); MODULE_PARM_DESC(pil, "Set current used for pressure measurement."); /* * Set threshold for pressure measurement. * * Pen down pressure below threshold is ignored. */ static int pressure = DEFAULT_PRESSURE & 0xfff; module_param(pressure, int, 0); MODULE_PARM_DESC(pressure, "Set threshold for pressure measurement."); /* * Set adc sample delay. * * For accurate touchpanel measurements, some settling time may be * required between the switch matrix applying a voltage across the * touchpanel plate and the ADC sampling the signal. * * This delay can be set by setting delay = n, where n is the array * position of the delay in the array delay_table below. * Long delays > 1ms are supported for completeness, but are not * recommended. */ static int delay = 4; module_param(delay, int, 0); MODULE_PARM_DESC(delay, "Set adc sample delay."); /* * Set five_wire = 1 to use a 5 wire touchscreen. * * NOTE: Five wire mode does not allow for readback of pressure. */ static int five_wire; module_param(five_wire, int, 0); MODULE_PARM_DESC(five_wire, "Set to '1' to use 5-wire touchscreen."); /* * Set adc mask function. * * Sources of glitch noise, such as signals driving an LCD display, may feed * through to the touch screen plates and affect measurement accuracy. In * order to minimise this, a signal may be applied to the MASK pin to delay or * synchronise the sampling. * * 0 = No delay or sync * 1 = High on pin stops conversions * 2 = Edge triggered, edge on pin delays conversion by delay param (above) * 3 = Edge triggered, edge on pin starts conversion after delay param */ static int mask; module_param(mask, int, 0); MODULE_PARM_DESC(mask, "Set adc mask function."); /* * Coordinate Polling Enable. * * Set to 1 to enable coordinate polling. e.g. x,y[,p] is sampled together * for every poll. */ static int coord; module_param(coord, int, 0); MODULE_PARM_DESC(coord, "Polling coordinate mode"); /* * ADC sample delay times in uS */ static const int delay_table[] = { 21, /* 1 AC97 Link frames */ 42, /* 2 */ 84, /* 4 */ 167, /* 8 */ 333, /* 16 */ 667, /* 32 */ 1000, /* 48 */ 1333, /* 64 */ 2000, /* 96 */ 2667, /* 128 */ 3333, /* 160 */ 4000, /* 192 */ 4667, /* 224 */ 5333, /* 256 */ 6000, /* 288 */ 0 /* No delay, switch matrix always on */ }; /* * Delay after issuing a POLL command. * * The delay is 3 AC97 link frames + the touchpanel settling delay */ static inline void poll_delay(int d) { udelay(3 * AC97_LINK_FRAME + delay_table[d]); } /* * set up the physical settings of the WM9713 */ static void wm9713_phy_init(struct wm97xx *wm) { u16 dig1 = 0, dig2, dig3; /* default values */ dig2 = WM97XX_DELAY(4) | WM97XX_SLT(5); dig3 = WM9712_RPU(1); /* rpu */ if (rpu) { dig3 &= 0xffc0; dig3 |= WM9712_RPU(rpu); dev_info(wm->dev, "setting pen detect pull-up to %d Ohms\n", 64000 / rpu); } /* Five wire panel? */ if (five_wire) { dig3 |= WM9713_45W; dev_info(wm->dev, "setting 5-wire touchscreen mode."); if (pil) { dev_warn(wm->dev, "Pressure measurement not supported in 5 " "wire mode, disabling\n"); pil = 0; } } /* touchpanel pressure */ if (pil == 2) { dig3 |= WM9712_PIL; dev_info(wm->dev, "setting pressure measurement current to 400uA."); } else if (pil) dev_info(wm->dev, "setting pressure measurement current to 200uA."); if (!pil) pressure = 0; /* sample settling delay */ if (delay < 0 || delay > 15) { dev_info(wm->dev, "supplied delay out of range."); delay = 4; dev_info(wm->dev, "setting adc sample delay to %d u Secs.", delay_table[delay]); } dig2 &= 0xff0f; dig2 |= WM97XX_DELAY(delay); /* mask */ dig3 |= ((mask & 0x3) << 4); if (coord) dig3 |= WM9713_WAIT; wm->misc = wm97xx_reg_read(wm, 0x5a); wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1); wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2); wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3); wm97xx_reg_write(wm, AC97_GPIO_STICKY, 0x0); } static void wm9713_dig_enable(struct wm97xx *wm, int enable) { u16 val; if (enable) { val = wm97xx_reg_read(wm, AC97_EXTENDED_MID); wm97xx_reg_write(wm, AC97_EXTENDED_MID, val & 0x7fff); wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] | WM97XX_PRP_DET_DIG); wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* dummy read */ } else { wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] & ~WM97XX_PRP_DET_DIG); val = wm97xx_reg_read(wm, AC97_EXTENDED_MID); wm97xx_reg_write(wm, AC97_EXTENDED_MID, val | 0x8000); } } static void wm9713_dig_restore(struct wm97xx *wm) { wm97xx_reg_write(wm, AC97_WM9713_DIG1, wm->dig_save[0]); wm97xx_reg_write(wm, AC97_WM9713_DIG2, wm->dig_save[1]); wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig_save[2]); } static void wm9713_aux_prepare(struct wm97xx *wm) { memcpy(wm->dig_save, wm->dig, sizeof(wm->dig)); wm97xx_reg_write(wm, AC97_WM9713_DIG1, 0); wm97xx_reg_write(wm, AC97_WM9713_DIG2, 0); wm97xx_reg_write(wm, AC97_WM9713_DIG3, WM97XX_PRP_DET_DIG); } static inline int is_pden(struct wm97xx *wm) { return wm->dig[2] & WM9713_PDEN; } /* * Read a sample from the WM9713 adc in polling mode. */ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample) { u16 dig1; int timeout = 5 * delay; bool wants_pen = adcsel & WM97XX_PEN_DOWN; if (wants_pen && !wm->pen_probably_down) { u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); if (!(data & WM97XX_PEN_DOWN)) return RC_PENUP; wm->pen_probably_down = 1; } /* set up digitiser */ dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1); dig1 &= ~WM9713_ADCSEL_MASK; /* WM97XX_ADCSEL_* channels need to be converted to WM9713 format */ dig1 |= 1 << ((adcsel & WM97XX_ADCSEL_MASK) >> 12); if (wm->mach_ops && wm->mach_ops->pre_sample) wm->mach_ops->pre_sample(adcsel); wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | WM9713_POLL); /* wait 3 AC97 time slots + delay for conversion */ poll_delay(delay); /* wait for POLL to go low */ while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL) && timeout) { udelay(AC97_LINK_FRAME); timeout--; } if (timeout <= 0) { /* If PDEN is set, we can get a timeout when pen goes up */ if (is_pden(wm)) wm->pen_probably_down = 0; else dev_dbg(wm->dev, "adc sample timeout"); return RC_PENUP; } *sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); if (wm->mach_ops && wm->mach_ops->post_sample) wm->mach_ops->post_sample(adcsel); /* check we have correct sample */ if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) { dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x", adcsel & WM97XX_ADCSEL_MASK, *sample & WM97XX_ADCSEL_MASK); return RC_PENUP; } if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) { wm->pen_probably_down = 0; return RC_PENUP; } return RC_VALID; } /* * Read a coordinate from the WM9713 adc in polling mode. */ static int wm9713_poll_coord(struct wm97xx *wm, struct wm97xx_data *data) { u16 dig1; int timeout = 5 * delay; if (!wm->pen_probably_down) { u16 val = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); if (!(val & WM97XX_PEN_DOWN)) return RC_PENUP; wm->pen_probably_down = 1; } /* set up digitiser */ dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1); dig1 &= ~WM9713_ADCSEL_MASK; if (pil) dig1 |= WM9713_ADCSEL_PRES; if (wm->mach_ops && wm->mach_ops->pre_sample) wm->mach_ops->pre_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y); wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | WM9713_POLL | WM9713_COO); /* wait 3 AC97 time slots + delay for conversion */ poll_delay(delay); data->x = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* wait for POLL to go low */ while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL) && timeout) { udelay(AC97_LINK_FRAME); timeout--; } if (timeout <= 0) { /* If PDEN is set, we can get a timeout when pen goes up */ if (is_pden(wm)) wm->pen_probably_down = 0; else dev_dbg(wm->dev, "adc sample timeout"); return RC_PENUP; } /* read back data */ data->y = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); if (pil) data->p = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); else data->p = DEFAULT_PRESSURE; if (wm->mach_ops && wm->mach_ops->post_sample) wm->mach_ops->post_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y); /* check we have correct sample */ if (!(data->x & WM97XX_ADCSEL_X) || !(data->y & WM97XX_ADCSEL_Y)) goto err; if (pil && !(data->p & WM97XX_ADCSEL_PRES)) goto err; if (!(data->x & WM97XX_PEN_DOWN) || !(data->y & WM97XX_PEN_DOWN)) { wm->pen_probably_down = 0; return RC_PENUP; } return RC_VALID; err: return 0; } /* * Sample the WM9713 touchscreen in polling mode */ static int wm9713_poll_touch(struct wm97xx *wm, struct wm97xx_data *data) { int rc; if (coord) { rc = wm9713_poll_coord(wm, data); if (rc != RC_VALID) return rc; } else { rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x); if (rc != RC_VALID) return rc; rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y); if (rc != RC_VALID) return rc; if (pil) { rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN, &data->p); if (rc != RC_VALID) return rc; } else data->p = DEFAULT_PRESSURE; } return RC_VALID; } /* * Enable WM9713 continuous mode, i.e. touch data is streamed across * an AC97 slot */ static int wm9713_acc_enable(struct wm97xx *wm, int enable) { u16 dig1, dig2, dig3; int ret = 0; dig1 = wm->dig[0]; dig2 = wm->dig[1]; dig3 = wm->dig[2]; if (enable) { /* continuous mode */ if (wm->mach_ops->acc_startup && (ret = wm->mach_ops->acc_startup(wm)) < 0) return ret; dig1 &= ~WM9713_ADCSEL_MASK; dig1 |= WM9713_CTC | WM9713_COO | WM9713_ADCSEL_X | WM9713_ADCSEL_Y; if (pil) dig1 |= WM9713_ADCSEL_PRES; dig2 &= ~(WM97XX_DELAY_MASK | WM97XX_SLT_MASK | WM97XX_CM_RATE_MASK); dig2 |= WM97XX_SLEN | WM97XX_DELAY(delay) | WM97XX_SLT(wm->acc_slot) | WM97XX_RATE(wm->acc_rate); dig3 |= WM9713_PDEN; } else { dig1 &= ~(WM9713_CTC | WM9713_COO); dig2 &= ~WM97XX_SLEN; dig3 &= ~WM9713_PDEN; if (wm->mach_ops->acc_shutdown) wm->mach_ops->acc_shutdown(wm); } wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1); wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2); wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3); return ret; } struct wm97xx_codec_drv wm9713_codec = { .id = WM9713_ID2, .name = "wm9713", .poll_sample = wm9713_poll_sample, .poll_touch = wm9713_poll_touch, .acc_enable = wm9713_acc_enable, .phy_init = wm9713_phy_init, .dig_enable = wm9713_dig_enable, .dig_restore = wm9713_dig_restore, .aux_prepare = wm9713_aux_prepare, }; EXPORT_SYMBOL_GPL(wm9713_codec); /* Module information */ MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>"); MODULE_DESCRIPTION("WM9713 Touch Screen Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Shabbypenguin/Cayman-Island-Kernel
arch/mips/txx9/generic/setup_tx3927.c
9491
3728
/* * TX3927 setup routines * Based on linux/arch/mips/txx9/jmr3927/setup.c * * Copyright 2001 MontaVista Software Inc. * Copyright (C) 2000-2001 Toshiba Corporation * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/param.h> #include <linux/io.h> #include <linux/mtd/physmap.h> #include <asm/mipsregs.h> #include <asm/txx9irq.h> #include <asm/txx9tmr.h> #include <asm/txx9pio.h> #include <asm/txx9/generic.h> #include <asm/txx9/tx3927.h> void __init tx3927_wdt_init(void) { txx9_wdt_init(TX3927_TMR_REG(2)); } void __init tx3927_setup(void) { int i; unsigned int conf; txx9_reg_res_init(TX3927_REV_PCODE(), TX3927_REG_BASE, TX3927_REG_SIZE); /* SDRAMC,ROMC are configured by PROM */ for (i = 0; i < 8; i++) { if (!(tx3927_romcptr->cr[i] & 0x8)) continue; /* disabled */ txx9_ce_res[i].start = (unsigned long)TX3927_ROMC_BA(i); txx9_ce_res[i].end = txx9_ce_res[i].start + TX3927_ROMC_SIZE(i) - 1; request_resource(&iomem_resource, &txx9_ce_res[i]); } /* clocks */ txx9_gbus_clock = txx9_cpu_clock / 2; /* change default value to udelay/mdelay take reasonable time */ loops_per_jiffy = txx9_cpu_clock / HZ / 2; /* CCFG */ /* enable Timeout BusError */ if (txx9_ccfg_toeon) tx3927_ccfgptr->ccfg |= TX3927_CCFG_TOE; /* clear BusErrorOnWrite flag */ tx3927_ccfgptr->ccfg &= ~TX3927_CCFG_BEOW; if (read_c0_conf() & TX39_CONF_WBON) /* Disable PCI snoop */ tx3927_ccfgptr->ccfg &= ~TX3927_CCFG_PSNP; else /* Enable PCI SNOOP - with write through only */ tx3927_ccfgptr->ccfg |= TX3927_CCFG_PSNP; /* do reset on watchdog */ tx3927_ccfgptr->ccfg |= TX3927_CCFG_WR; printk(KERN_INFO "TX3927 -- CRIR:%08lx CCFG:%08lx PCFG:%08lx\n", tx3927_ccfgptr->crir, tx3927_ccfgptr->ccfg, tx3927_ccfgptr->pcfg); /* TMR */ for (i = 0; i < TX3927_NR_TMR; i++) txx9_tmr_init(TX3927_TMR_REG(i)); /* DMA */ tx3927_dmaptr->mcr = 0; for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) { /* reset channel */ tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST; tx3927_dmaptr->ch[i].ccr = 0; } /* enable DMA */ #ifdef __BIG_ENDIAN tx3927_dmaptr->mcr = TX3927_DMA_MCR_MSTEN; #else tx3927_dmaptr->mcr = TX3927_DMA_MCR_MSTEN | TX3927_DMA_MCR_LE; #endif /* PIO */ __raw_writel(0, &tx3927_pioptr->maskcpu); __raw_writel(0, &tx3927_pioptr->maskext); txx9_gpio_init(TX3927_PIO_REG, 0, 16); conf = read_c0_conf(); if (conf & TX39_CONF_DCE) { if (!(conf & TX39_CONF_WBON)) pr_info("TX3927 D-Cache WriteThrough.\n"); else if (!(conf & TX39_CONF_CWFON)) pr_info("TX3927 D-Cache WriteBack.\n"); else pr_info("TX3927 D-Cache WriteBack (CWF) .\n"); } } void __init tx3927_time_init(unsigned int evt_tmrnr, unsigned int src_tmrnr) { txx9_clockevent_init(TX3927_TMR_REG(evt_tmrnr), TXX9_IRQ_BASE + TX3927_IR_TMR(evt_tmrnr), TXX9_IMCLK); txx9_clocksource_init(TX3927_TMR_REG(src_tmrnr), TXX9_IMCLK); } void __init tx3927_sio_init(unsigned int sclk, unsigned int cts_mask) { int i; for (i = 0; i < 2; i++) txx9_sio_init(TX3927_SIO_REG(i), TXX9_IRQ_BASE + TX3927_IR_SIO(i), i, sclk, (1 << i) & cts_mask); } void __init tx3927_mtd_init(int ch) { struct physmap_flash_data pdata = { .width = TX3927_ROMC_WIDTH(ch) / 8, }; unsigned long start = txx9_ce_res[ch].start; unsigned long size = txx9_ce_res[ch].end - start + 1; if (!(tx3927_romcptr->cr[ch] & 0x8)) return; /* disabled */ txx9_physmap_flash_init(ch, start, size, &pdata); }
gpl-2.0
MoKee/android_kernel_samsung_t1
arch/mips/txx9/generic/setup_tx3927.c
9491
3728
/* * TX3927 setup routines * Based on linux/arch/mips/txx9/jmr3927/setup.c * * Copyright 2001 MontaVista Software Inc. * Copyright (C) 2000-2001 Toshiba Corporation * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/param.h> #include <linux/io.h> #include <linux/mtd/physmap.h> #include <asm/mipsregs.h> #include <asm/txx9irq.h> #include <asm/txx9tmr.h> #include <asm/txx9pio.h> #include <asm/txx9/generic.h> #include <asm/txx9/tx3927.h> void __init tx3927_wdt_init(void) { txx9_wdt_init(TX3927_TMR_REG(2)); } void __init tx3927_setup(void) { int i; unsigned int conf; txx9_reg_res_init(TX3927_REV_PCODE(), TX3927_REG_BASE, TX3927_REG_SIZE); /* SDRAMC,ROMC are configured by PROM */ for (i = 0; i < 8; i++) { if (!(tx3927_romcptr->cr[i] & 0x8)) continue; /* disabled */ txx9_ce_res[i].start = (unsigned long)TX3927_ROMC_BA(i); txx9_ce_res[i].end = txx9_ce_res[i].start + TX3927_ROMC_SIZE(i) - 1; request_resource(&iomem_resource, &txx9_ce_res[i]); } /* clocks */ txx9_gbus_clock = txx9_cpu_clock / 2; /* change default value to udelay/mdelay take reasonable time */ loops_per_jiffy = txx9_cpu_clock / HZ / 2; /* CCFG */ /* enable Timeout BusError */ if (txx9_ccfg_toeon) tx3927_ccfgptr->ccfg |= TX3927_CCFG_TOE; /* clear BusErrorOnWrite flag */ tx3927_ccfgptr->ccfg &= ~TX3927_CCFG_BEOW; if (read_c0_conf() & TX39_CONF_WBON) /* Disable PCI snoop */ tx3927_ccfgptr->ccfg &= ~TX3927_CCFG_PSNP; else /* Enable PCI SNOOP - with write through only */ tx3927_ccfgptr->ccfg |= TX3927_CCFG_PSNP; /* do reset on watchdog */ tx3927_ccfgptr->ccfg |= TX3927_CCFG_WR; printk(KERN_INFO "TX3927 -- CRIR:%08lx CCFG:%08lx PCFG:%08lx\n", tx3927_ccfgptr->crir, tx3927_ccfgptr->ccfg, tx3927_ccfgptr->pcfg); /* TMR */ for (i = 0; i < TX3927_NR_TMR; i++) txx9_tmr_init(TX3927_TMR_REG(i)); /* DMA */ tx3927_dmaptr->mcr = 0; for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) { /* reset channel */ tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST; tx3927_dmaptr->ch[i].ccr = 0; } /* enable DMA */ #ifdef __BIG_ENDIAN tx3927_dmaptr->mcr = TX3927_DMA_MCR_MSTEN; #else tx3927_dmaptr->mcr = TX3927_DMA_MCR_MSTEN | TX3927_DMA_MCR_LE; #endif /* PIO */ __raw_writel(0, &tx3927_pioptr->maskcpu); __raw_writel(0, &tx3927_pioptr->maskext); txx9_gpio_init(TX3927_PIO_REG, 0, 16); conf = read_c0_conf(); if (conf & TX39_CONF_DCE) { if (!(conf & TX39_CONF_WBON)) pr_info("TX3927 D-Cache WriteThrough.\n"); else if (!(conf & TX39_CONF_CWFON)) pr_info("TX3927 D-Cache WriteBack.\n"); else pr_info("TX3927 D-Cache WriteBack (CWF) .\n"); } } void __init tx3927_time_init(unsigned int evt_tmrnr, unsigned int src_tmrnr) { txx9_clockevent_init(TX3927_TMR_REG(evt_tmrnr), TXX9_IRQ_BASE + TX3927_IR_TMR(evt_tmrnr), TXX9_IMCLK); txx9_clocksource_init(TX3927_TMR_REG(src_tmrnr), TXX9_IMCLK); } void __init tx3927_sio_init(unsigned int sclk, unsigned int cts_mask) { int i; for (i = 0; i < 2; i++) txx9_sio_init(TX3927_SIO_REG(i), TXX9_IRQ_BASE + TX3927_IR_SIO(i), i, sclk, (1 << i) & cts_mask); } void __init tx3927_mtd_init(int ch) { struct physmap_flash_data pdata = { .width = TX3927_ROMC_WIDTH(ch) / 8, }; unsigned long start = txx9_ce_res[ch].start; unsigned long size = txx9_ce_res[ch].end - start + 1; if (!(tx3927_romcptr->cr[ch] & 0x8)) return; /* disabled */ txx9_physmap_flash_init(ch, start, size, &pdata); }
gpl-2.0
akhilnarang/ThugLife_falcon
arch/mips/dec/ioasic-irq.c
9747
1717
/* * DEC I/O ASIC interrupts. * * Copyright (c) 2002, 2003 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/types.h> #include <asm/dec/ioasic.h> #include <asm/dec/ioasic_addrs.h> #include <asm/dec/ioasic_ints.h> static int ioasic_irq_base; static void unmask_ioasic_irq(struct irq_data *d) { u32 simr; simr = ioasic_read(IO_REG_SIMR); simr |= (1 << (d->irq - ioasic_irq_base)); ioasic_write(IO_REG_SIMR, simr); } static void mask_ioasic_irq(struct irq_data *d) { u32 simr; simr = ioasic_read(IO_REG_SIMR); simr &= ~(1 << (d->irq - ioasic_irq_base)); ioasic_write(IO_REG_SIMR, simr); } static void ack_ioasic_irq(struct irq_data *d) { mask_ioasic_irq(d); fast_iob(); } static struct irq_chip ioasic_irq_type = { .name = "IO-ASIC", .irq_ack = ack_ioasic_irq, .irq_mask = mask_ioasic_irq, .irq_mask_ack = ack_ioasic_irq, .irq_unmask = unmask_ioasic_irq, }; static struct irq_chip ioasic_dma_irq_type = { .name = "IO-ASIC-DMA", .irq_ack = ack_ioasic_irq, .irq_mask = mask_ioasic_irq, .irq_mask_ack = ack_ioasic_irq, .irq_unmask = unmask_ioasic_irq, }; void __init init_ioasic_irqs(int base) { int i; /* Mask interrupts. */ ioasic_write(IO_REG_SIMR, 0); fast_iob(); for (i = base; i < base + IO_INR_DMA; i++) irq_set_chip_and_handler(i, &ioasic_irq_type, handle_level_irq); for (; i < base + IO_IRQ_LINES; i++) irq_set_chip(i, &ioasic_dma_irq_type); ioasic_irq_base = base; }
gpl-2.0
pershoot/kernel-2638
drivers/hid/hid-ezkey.c
9747
2453
/* * HID driver for some ezkey "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #define ez_map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c)) #define ez_map_key(c) hid_map_usage(hi, usage, bit, max, EV_KEY, (c)) static int ez_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x230: ez_map_key(BTN_MOUSE); break; case 0x231: ez_map_rel(REL_WHEEL); break; /* * this keyboard has a scrollwheel implemented in * totally broken way. We map this usage temporarily * to HWHEEL and handle it in the event quirk handler */ case 0x232: ez_map_rel(REL_HWHEEL); break; default: return 0; } return 1; } static int ez_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || !usage->type) return 0; /* handle the temporary quirky mapping to HWHEEL */ if (usage->type == EV_REL && usage->code == REL_HWHEEL) { struct input_dev *input = field->hidinput->input; input_event(input, usage->type, REL_WHEEL, -value); return 1; } return 0; } static const struct hid_device_id ez_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, { } }; MODULE_DEVICE_TABLE(hid, ez_devices); static struct hid_driver ez_driver = { .name = "ezkey", .id_table = ez_devices, .input_mapping = ez_input_mapping, .event = ez_event, }; static int __init ez_init(void) { return hid_register_driver(&ez_driver); } static void __exit ez_exit(void) { hid_unregister_driver(&ez_driver); } module_init(ez_init); module_exit(ez_exit); MODULE_LICENSE("GPL");
gpl-2.0
gunine/htc-rider-univ-kernel
net/netfilter/nf_sockopt.c
12307
3951
#include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/mutex.h> #include <net/sock.h> #include "nf_internals.h" /* Sockopts only registered and called from user context, so net locking would be overkill. Also, [gs]etsockopt calls may sleep. */ static DEFINE_MUTEX(nf_sockopt_mutex); static LIST_HEAD(nf_sockopts); /* Do exclusive ranges overlap? */ static inline int overlap(int min1, int max1, int min2, int max2) { return max1 > min2 && min1 < max2; } /* Functions to register sockopt ranges (exclusive). */ int nf_register_sockopt(struct nf_sockopt_ops *reg) { struct nf_sockopt_ops *ops; int ret = 0; if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) return -EINTR; list_for_each_entry(ops, &nf_sockopts, list) { if (ops->pf == reg->pf && (overlap(ops->set_optmin, ops->set_optmax, reg->set_optmin, reg->set_optmax) || overlap(ops->get_optmin, ops->get_optmax, reg->get_optmin, reg->get_optmax))) { NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n", ops->set_optmin, ops->set_optmax, ops->get_optmin, ops->get_optmax, reg->set_optmin, reg->set_optmax, reg->get_optmin, reg->get_optmax); ret = -EBUSY; goto out; } } list_add(&reg->list, &nf_sockopts); out: mutex_unlock(&nf_sockopt_mutex); return ret; } EXPORT_SYMBOL(nf_register_sockopt); void nf_unregister_sockopt(struct nf_sockopt_ops *reg) { mutex_lock(&nf_sockopt_mutex); list_del(&reg->list); mutex_unlock(&nf_sockopt_mutex); } EXPORT_SYMBOL(nf_unregister_sockopt); static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, u_int8_t pf, int val, int get) { struct nf_sockopt_ops *ops; if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) return ERR_PTR(-EINTR); list_for_each_entry(ops, &nf_sockopts, list) { if (ops->pf == pf) { if (!try_module_get(ops->owner)) goto out_nosup; if (get) { if (val >= ops->get_optmin && val < ops->get_optmax) goto out; } else { if (val >= ops->set_optmin && val < ops->set_optmax) goto out; } module_put(ops->owner); } } out_nosup: ops = ERR_PTR(-ENOPROTOOPT); out: mutex_unlock(&nf_sockopt_mutex); return ops; } /* Call get/setsockopt() */ static int nf_sockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, int *len, int get) { struct nf_sockopt_ops *ops; int ret; ops = nf_sockopt_find(sk, pf, val, get); if (IS_ERR(ops)) return PTR_ERR(ops); if (get) ret = ops->get(sk, val, opt, len); else ret = ops->set(sk, val, opt, *len); module_put(ops->owner); return ret; } int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, unsigned int len) { return nf_sockopt(sk, pf, val, opt, &len, 0); } EXPORT_SYMBOL(nf_setsockopt); int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, int *len) { return nf_sockopt(sk, pf, val, opt, len, 1); } EXPORT_SYMBOL(nf_getsockopt); #ifdef CONFIG_COMPAT static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, int *len, int get) { struct nf_sockopt_ops *ops; int ret; ops = nf_sockopt_find(sk, pf, val, get); if (IS_ERR(ops)) return PTR_ERR(ops); if (get) { if (ops->compat_get) ret = ops->compat_get(sk, val, opt, len); else ret = ops->get(sk, val, opt, len); } else { if (ops->compat_set) ret = ops->compat_set(sk, val, opt, *len); else ret = ops->set(sk, val, opt, *len); } module_put(ops->owner); return ret; } int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, unsigned int len) { return compat_nf_sockopt(sk, pf, val, opt, &len, 0); } EXPORT_SYMBOL(compat_nf_setsockopt); int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, int *len) { return compat_nf_sockopt(sk, pf, val, opt, len, 1); } EXPORT_SYMBOL(compat_nf_getsockopt); #endif
gpl-2.0
SlimRoms/kernel_samsung_aries
drivers/char/scx200_gpio.c
12819
3128
/* linux/drivers/char/scx200_gpio.c National Semiconductor SCx200 GPIO driver. Allows a user space process to play with the GPIO pins. Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */ #include <linux/device.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/types.h> #include <linux/cdev.h> #include <linux/scx200_gpio.h> #include <linux/nsc_gpio.h> #define DRVNAME "scx200_gpio" static struct platform_device *pdev; MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi/AMD SCx200 GPIO Pin Driver"); MODULE_LICENSE("GPL"); static int major = 0; /* default to dynamic major */ module_param(major, int, 0); MODULE_PARM_DESC(major, "Major device number"); #define MAX_PINS 32 /* 64 later, when known ok */ struct nsc_gpio_ops scx200_gpio_ops = { .owner = THIS_MODULE, .gpio_config = scx200_gpio_configure, .gpio_dump = nsc_gpio_dump, .gpio_get = scx200_gpio_get, .gpio_set = scx200_gpio_set, .gpio_change = scx200_gpio_change, .gpio_current = scx200_gpio_current }; EXPORT_SYMBOL_GPL(scx200_gpio_ops); static int scx200_gpio_open(struct inode *inode, struct file *file) { unsigned m = iminor(inode); file->private_data = &scx200_gpio_ops; if (m >= MAX_PINS) return -EINVAL; return nonseekable_open(inode, file); } static int scx200_gpio_release(struct inode *inode, struct file *file) { return 0; } static const struct file_operations scx200_gpio_fileops = { .owner = THIS_MODULE, .write = nsc_gpio_write, .read = nsc_gpio_read, .open = scx200_gpio_open, .release = scx200_gpio_release, .llseek = no_llseek, }; static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */ static int __init scx200_gpio_init(void) { int rc; dev_t devid; if (!scx200_gpio_present()) { printk(KERN_ERR DRVNAME ": no SCx200 gpio present\n"); return -ENODEV; } /* support dev_dbg() with pdev->dev */ pdev = platform_device_alloc(DRVNAME, 0); if (!pdev) return -ENOMEM; rc = platform_device_add(pdev); if (rc) goto undo_malloc; /* nsc_gpio uses dev_dbg(), so needs this */ scx200_gpio_ops.dev = &pdev->dev; if (major) { devid = MKDEV(major, 0); rc = register_chrdev_region(devid, MAX_PINS, "scx200_gpio"); } else { rc = alloc_chrdev_region(&devid, 0, MAX_PINS, "scx200_gpio"); major = MAJOR(devid); } if (rc < 0) { dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc); goto undo_platform_device_add; } cdev_init(&scx200_gpio_cdev, &scx200_gpio_fileops); cdev_add(&scx200_gpio_cdev, devid, MAX_PINS); return 0; /* succeed */ undo_platform_device_add: platform_device_del(pdev); undo_malloc: platform_device_put(pdev); return rc; } static void __exit scx200_gpio_cleanup(void) { cdev_del(&scx200_gpio_cdev); /* cdev_put(&scx200_gpio_cdev); */ unregister_chrdev_region(MKDEV(major, 0), MAX_PINS); platform_device_unregister(pdev); } module_init(scx200_gpio_init); module_exit(scx200_gpio_cleanup);
gpl-2.0
StarKissed/android_kernel_htc_mecha
arch/mips/sgi-ip32/crime.c
13843
2833
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001, 2003 Keith M Wesolowski * Copyright (C) 2005 Ilya A. Volynets <ilya@total-knowledge.com> */ #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/module.h> #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/mipsregs.h> #include <asm/page.h> #include <asm/ip32/crime.h> #include <asm/ip32/mace.h> struct sgi_crime __iomem *crime; struct sgi_mace __iomem *mace; EXPORT_SYMBOL_GPL(mace); void __init crime_init(void) { unsigned int id, rev; const int field = 2 * sizeof(unsigned long); set_io_port_base((unsigned long) ioremap(MACEPCI_LOW_IO, 0x2000000)); crime = ioremap(CRIME_BASE, sizeof(struct sgi_crime)); mace = ioremap(MACE_BASE, sizeof(struct sgi_mace)); id = crime->id; rev = id & CRIME_ID_REV; id = (id & CRIME_ID_IDBITS) >> 4; printk(KERN_INFO "CRIME id %1x rev %d at 0x%0*lx\n", id, rev, field, (unsigned long) CRIME_BASE); } irqreturn_t crime_memerr_intr(unsigned int irq, void *dev_id) { unsigned long stat, addr; int fatal = 0; stat = crime->mem_error_stat & CRIME_MEM_ERROR_STAT_MASK; addr = crime->mem_error_addr & CRIME_MEM_ERROR_ADDR_MASK; printk("CRIME memory error at 0x%08lx ST 0x%08lx<", addr, stat); if (stat & CRIME_MEM_ERROR_INV) printk("INV,"); if (stat & CRIME_MEM_ERROR_ECC) { unsigned long ecc_syn = crime->mem_ecc_syn & CRIME_MEM_ERROR_ECC_SYN_MASK; unsigned long ecc_gen = crime->mem_ecc_chk & CRIME_MEM_ERROR_ECC_CHK_MASK; printk("ECC,SYN=0x%08lx,GEN=0x%08lx,", ecc_syn, ecc_gen); } if (stat & CRIME_MEM_ERROR_MULTIPLE) { fatal = 1; printk("MULTIPLE,"); } if (stat & CRIME_MEM_ERROR_HARD_ERR) { fatal = 1; printk("HARD,"); } if (stat & CRIME_MEM_ERROR_SOFT_ERR) printk("SOFT,"); if (stat & CRIME_MEM_ERROR_CPU_ACCESS) printk("CPU,"); if (stat & CRIME_MEM_ERROR_VICE_ACCESS) printk("VICE,"); if (stat & CRIME_MEM_ERROR_GBE_ACCESS) printk("GBE,"); if (stat & CRIME_MEM_ERROR_RE_ACCESS) printk("RE,REID=0x%02lx,", (stat & CRIME_MEM_ERROR_RE_ID)>>8); if (stat & CRIME_MEM_ERROR_MACE_ACCESS) printk("MACE,MACEID=0x%02lx,", stat & CRIME_MEM_ERROR_MACE_ID); crime->mem_error_stat = 0; if (fatal) { printk("FATAL>\n"); panic("Fatal memory error."); } else printk("NONFATAL>\n"); return IRQ_HANDLED; } irqreturn_t crime_cpuerr_intr(unsigned int irq, void *dev_id) { unsigned long stat = crime->cpu_error_stat & CRIME_CPU_ERROR_MASK; unsigned long addr = crime->cpu_error_addr & CRIME_CPU_ERROR_ADDR_MASK; addr <<= 2; printk("CRIME CPU error at 0x%09lx status 0x%08lx\n", addr, stat); crime->cpu_error_stat = 0; return IRQ_HANDLED; }
gpl-2.0
HRTKernel/samsung_exynos_7420
arch/mips/sgi-ip32/crime.c
13843
2833
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001, 2003 Keith M Wesolowski * Copyright (C) 2005 Ilya A. Volynets <ilya@total-knowledge.com> */ #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/module.h> #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/mipsregs.h> #include <asm/page.h> #include <asm/ip32/crime.h> #include <asm/ip32/mace.h> struct sgi_crime __iomem *crime; struct sgi_mace __iomem *mace; EXPORT_SYMBOL_GPL(mace); void __init crime_init(void) { unsigned int id, rev; const int field = 2 * sizeof(unsigned long); set_io_port_base((unsigned long) ioremap(MACEPCI_LOW_IO, 0x2000000)); crime = ioremap(CRIME_BASE, sizeof(struct sgi_crime)); mace = ioremap(MACE_BASE, sizeof(struct sgi_mace)); id = crime->id; rev = id & CRIME_ID_REV; id = (id & CRIME_ID_IDBITS) >> 4; printk(KERN_INFO "CRIME id %1x rev %d at 0x%0*lx\n", id, rev, field, (unsigned long) CRIME_BASE); } irqreturn_t crime_memerr_intr(unsigned int irq, void *dev_id) { unsigned long stat, addr; int fatal = 0; stat = crime->mem_error_stat & CRIME_MEM_ERROR_STAT_MASK; addr = crime->mem_error_addr & CRIME_MEM_ERROR_ADDR_MASK; printk("CRIME memory error at 0x%08lx ST 0x%08lx<", addr, stat); if (stat & CRIME_MEM_ERROR_INV) printk("INV,"); if (stat & CRIME_MEM_ERROR_ECC) { unsigned long ecc_syn = crime->mem_ecc_syn & CRIME_MEM_ERROR_ECC_SYN_MASK; unsigned long ecc_gen = crime->mem_ecc_chk & CRIME_MEM_ERROR_ECC_CHK_MASK; printk("ECC,SYN=0x%08lx,GEN=0x%08lx,", ecc_syn, ecc_gen); } if (stat & CRIME_MEM_ERROR_MULTIPLE) { fatal = 1; printk("MULTIPLE,"); } if (stat & CRIME_MEM_ERROR_HARD_ERR) { fatal = 1; printk("HARD,"); } if (stat & CRIME_MEM_ERROR_SOFT_ERR) printk("SOFT,"); if (stat & CRIME_MEM_ERROR_CPU_ACCESS) printk("CPU,"); if (stat & CRIME_MEM_ERROR_VICE_ACCESS) printk("VICE,"); if (stat & CRIME_MEM_ERROR_GBE_ACCESS) printk("GBE,"); if (stat & CRIME_MEM_ERROR_RE_ACCESS) printk("RE,REID=0x%02lx,", (stat & CRIME_MEM_ERROR_RE_ID)>>8); if (stat & CRIME_MEM_ERROR_MACE_ACCESS) printk("MACE,MACEID=0x%02lx,", stat & CRIME_MEM_ERROR_MACE_ID); crime->mem_error_stat = 0; if (fatal) { printk("FATAL>\n"); panic("Fatal memory error."); } else printk("NONFATAL>\n"); return IRQ_HANDLED; } irqreturn_t crime_cpuerr_intr(unsigned int irq, void *dev_id) { unsigned long stat = crime->cpu_error_stat & CRIME_CPU_ERROR_MASK; unsigned long addr = crime->cpu_error_addr & CRIME_CPU_ERROR_ADDR_MASK; addr <<= 2; printk("CRIME CPU error at 0x%09lx status 0x%08lx\n", addr, stat); crime->cpu_error_stat = 0; return IRQ_HANDLED; }
gpl-2.0
Technux/linux
drivers/parisc/wax.c
14611
3227
/* * WAX Device Driver * * (c) Copyright 2000 The Puffin Group Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * by Helge Deller <deller@gmx.de> */ #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/types.h> #include <asm/io.h> #include <asm/hardware.h> #include "gsc.h" #define WAX_GSC_IRQ 7 /* Hardcoded Interrupt for GSC */ static void wax_choose_irq(struct parisc_device *dev, void *ctrl) { int irq; switch (dev->id.sversion) { case 0x73: irq = 1; break; /* i8042 General */ case 0x8c: irq = 6; break; /* Serial */ case 0x90: irq = 10; break; /* EISA */ default: return; /* Unknown */ } gsc_asic_assign_irq(ctrl, irq, &dev->irq); switch (dev->id.sversion) { case 0x73: irq = 2; break; /* i8042 High-priority */ case 0x90: irq = 0; break; /* EISA NMI */ default: return; /* No secondary IRQ */ } gsc_asic_assign_irq(ctrl, irq, &dev->aux_irq); } static void __init wax_init_irq(struct gsc_asic *wax) { unsigned long base = wax->hpa; /* Wax-off */ gsc_writel(0x00000000, base+OFFSET_IMR); /* clear pending interrupts */ gsc_readl(base+OFFSET_IRR); /* We're not really convinced we want to reset the onboard * devices. Firmware does it for us... */ /* Resets */ // gsc_writel(0xFFFFFFFF, base+0x1000); /* HIL */ // gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */ } static int __init wax_init_chip(struct parisc_device *dev) { struct gsc_asic *wax; struct parisc_device *parent; struct gsc_irq gsc_irq; int ret; wax = kzalloc(sizeof(*wax), GFP_KERNEL); if (!wax) return -ENOMEM; wax->name = "wax"; wax->hpa = dev->hpa.start; wax->version = 0; /* gsc_readb(wax->hpa+WAX_VER); */ printk(KERN_INFO "%s at 0x%lx found.\n", wax->name, wax->hpa); /* Stop wax hissing for a bit */ wax_init_irq(wax); /* the IRQ wax should use */ dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); kfree(wax); return -EBUSY; } wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); if (ret < 0) { kfree(wax); return ret; } /* enable IRQ's for devices below WAX */ gsc_writel(wax->eim, wax->hpa + OFFSET_IAR); /* Done init'ing, register this driver */ ret = gsc_common_setup(dev, wax); if (ret) { kfree(wax); return ret; } gsc_fixup_irqs(dev, wax, wax_choose_irq); /* On 715-class machines, Wax EISA is a sibling of Wax, not a child. */ parent = parisc_parent(dev); if (parent->id.hw_type != HPHW_IOA) { gsc_fixup_irqs(parent, wax, wax_choose_irq); } return ret; } static struct parisc_device_id wax_tbl[] = { { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008e }, { 0, } }; MODULE_DEVICE_TABLE(parisc, wax_tbl); struct parisc_driver wax_driver = { .name = "wax", .id_table = wax_tbl, .probe = wax_init_chip, };
gpl-2.0
ericli1989/ali_kernel
drivers/scsi/aic94xx/aic94xx_reg.c
14867
10895
/* * Aic94xx SAS/SATA driver register access. * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/pci.h> #include "aic94xx_reg.h" #include "aic94xx.h" /* Writing to device address space. * Offset comes before value to remind that the operation of * this function is *offs = val. */ static void asd_write_byte(struct asd_ha_struct *asd_ha, unsigned long offs, u8 val) { if (unlikely(asd_ha->iospace)) outb(val, (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); else writeb(val, asd_ha->io_handle[0].addr + offs); wmb(); } static void asd_write_word(struct asd_ha_struct *asd_ha, unsigned long offs, u16 val) { if (unlikely(asd_ha->iospace)) outw(val, (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); else writew(val, asd_ha->io_handle[0].addr + offs); wmb(); } static void asd_write_dword(struct asd_ha_struct *asd_ha, unsigned long offs, u32 val) { if (unlikely(asd_ha->iospace)) outl(val, (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); else writel(val, asd_ha->io_handle[0].addr + offs); wmb(); } /* Reading from device address space. */ static u8 asd_read_byte(struct asd_ha_struct *asd_ha, unsigned long offs) { u8 val; if (unlikely(asd_ha->iospace)) val = inb((unsigned long) asd_ha->io_handle[0].addr + (offs & 0xFF)); else val = readb(asd_ha->io_handle[0].addr + offs); rmb(); return val; } static u16 asd_read_word(struct asd_ha_struct *asd_ha, unsigned long offs) { u16 val; if (unlikely(asd_ha->iospace)) val = inw((unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); else val = readw(asd_ha->io_handle[0].addr + offs); rmb(); return val; } static u32 asd_read_dword(struct asd_ha_struct *asd_ha, unsigned long offs) { u32 val; if (unlikely(asd_ha->iospace)) val = inl((unsigned long) asd_ha->io_handle[0].addr + (offs & 0xFF)); else val = readl(asd_ha->io_handle[0].addr + offs); rmb(); return val; } static inline u32 asd_mem_offs_swa(void) { return 0; } static inline u32 asd_mem_offs_swc(void) { return asd_mem_offs_swa() + MBAR0_SWA_SIZE; } static inline u32 asd_mem_offs_swb(void) { return asd_mem_offs_swc() + MBAR0_SWC_SIZE + 0x20; } /* We know that the register wanted is in the range * of the sliding window. */ #define ASD_READ_SW(ww, type, ord) \ static type asd_read_##ww##_##ord(struct asd_ha_struct *asd_ha, \ u32 reg) \ { \ struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ u32 map_offs = (reg - io_handle->ww##_base) + asd_mem_offs_##ww();\ return asd_read_##ord(asd_ha, (unsigned long)map_offs); \ } #define ASD_WRITE_SW(ww, type, ord) \ static void asd_write_##ww##_##ord(struct asd_ha_struct *asd_ha, \ u32 reg, type val) \ { \ struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ u32 map_offs = (reg - io_handle->ww##_base) + asd_mem_offs_##ww();\ asd_write_##ord(asd_ha, (unsigned long)map_offs, val); \ } ASD_READ_SW(swa, u8, byte); ASD_READ_SW(swa, u16, word); ASD_READ_SW(swa, u32, dword); ASD_READ_SW(swb, u8, byte); ASD_READ_SW(swb, u16, word); ASD_READ_SW(swb, u32, dword); ASD_READ_SW(swc, u8, byte); ASD_READ_SW(swc, u16, word); ASD_READ_SW(swc, u32, dword); ASD_WRITE_SW(swa, u8, byte); ASD_WRITE_SW(swa, u16, word); ASD_WRITE_SW(swa, u32, dword); ASD_WRITE_SW(swb, u8, byte); ASD_WRITE_SW(swb, u16, word); ASD_WRITE_SW(swb, u32, dword); ASD_WRITE_SW(swc, u8, byte); ASD_WRITE_SW(swc, u16, word); ASD_WRITE_SW(swc, u32, dword); /* * A word about sliding windows: * MBAR0 is divided into sliding windows A, C and B, in that order. * SWA starts at offset 0 of MBAR0, up to 0x57, with size 0x58 bytes. * SWC starts at offset 0x58 of MBAR0, up to 0x60, with size 0x8 bytes. * From 0x60 to 0x7F, we have a copy of PCI config space 0x60-0x7F. * SWB starts at offset 0x80 of MBAR0 and extends to the end of MBAR0. * See asd_init_sw() in aic94xx_hwi.c * * We map the most common registers we'd access of the internal 4GB * host adapter memory space. If a register/internal memory location * is wanted which is not mapped, we slide SWB, by paging it, * see asd_move_swb() in aic94xx_reg.c. */ /** * asd_move_swb -- move sliding window B * @asd_ha: pointer to host adapter structure * @reg: register desired to be within range of the new window */ static void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg) { u32 base = reg & ~(MBAR0_SWB_SIZE-1); pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base); asd_ha->io_handle[0].swb_base = base; } static void __asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val) { struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); if (io_handle->swa_base <= reg && reg < io_handle->swa_base + MBAR0_SWA_SIZE) asd_write_swa_byte (asd_ha, reg,val); else if (io_handle->swb_base <= reg && reg < io_handle->swb_base + MBAR0_SWB_SIZE) asd_write_swb_byte (asd_ha, reg, val); else if (io_handle->swc_base <= reg && reg < io_handle->swc_base + MBAR0_SWC_SIZE) asd_write_swc_byte (asd_ha, reg, val); else { /* Ok, we have to move SWB */ asd_move_swb(asd_ha, reg); asd_write_swb_byte (asd_ha, reg, val); } } #define ASD_WRITE_REG(type, ord) \ void asd_write_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg, type val)\ { \ struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \ unsigned long flags; \ BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \ spin_lock_irqsave(&asd_ha->iolock, flags); \ if (io_handle->swa_base <= reg \ && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \ asd_write_swa_##ord (asd_ha, reg,val); \ else if (io_handle->swb_base <= reg \ && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \ asd_write_swb_##ord (asd_ha, reg, val); \ else if (io_handle->swc_base <= reg \ && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \ asd_write_swc_##ord (asd_ha, reg, val); \ else { \ /* Ok, we have to move SWB */ \ asd_move_swb(asd_ha, reg); \ asd_write_swb_##ord (asd_ha, reg, val); \ } \ spin_unlock_irqrestore(&asd_ha->iolock, flags); \ } ASD_WRITE_REG(u8, byte); ASD_WRITE_REG(u16,word); ASD_WRITE_REG(u32,dword); static u8 __asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg) { struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; u8 val; BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); if (io_handle->swa_base <= reg && reg < io_handle->swa_base + MBAR0_SWA_SIZE) val = asd_read_swa_byte (asd_ha, reg); else if (io_handle->swb_base <= reg && reg < io_handle->swb_base + MBAR0_SWB_SIZE) val = asd_read_swb_byte (asd_ha, reg); else if (io_handle->swc_base <= reg && reg < io_handle->swc_base + MBAR0_SWC_SIZE) val = asd_read_swc_byte (asd_ha, reg); else { /* Ok, we have to move SWB */ asd_move_swb(asd_ha, reg); val = asd_read_swb_byte (asd_ha, reg); } return val; } #define ASD_READ_REG(type, ord) \ type asd_read_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg) \ { \ struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \ type val; \ unsigned long flags; \ BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \ spin_lock_irqsave(&asd_ha->iolock, flags); \ if (io_handle->swa_base <= reg \ && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \ val = asd_read_swa_##ord (asd_ha, reg); \ else if (io_handle->swb_base <= reg \ && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \ val = asd_read_swb_##ord (asd_ha, reg); \ else if (io_handle->swc_base <= reg \ && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \ val = asd_read_swc_##ord (asd_ha, reg); \ else { \ /* Ok, we have to move SWB */ \ asd_move_swb(asd_ha, reg); \ val = asd_read_swb_##ord (asd_ha, reg); \ } \ spin_unlock_irqrestore(&asd_ha->iolock, flags); \ return val; \ } ASD_READ_REG(u8, byte); ASD_READ_REG(u16,word); ASD_READ_REG(u32,dword); /** * asd_read_reg_string -- read a string of bytes from io space memory * @asd_ha: pointer to host adapter structure * @dst: pointer to a destination buffer where data will be written to * @offs: start offset (register) to read from * @count: number of bytes to read */ void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst, u32 offs, int count) { u8 *p = dst; unsigned long flags; spin_lock_irqsave(&asd_ha->iolock, flags); for ( ; count > 0; count--, offs++, p++) *p = __asd_read_reg_byte(asd_ha, offs); spin_unlock_irqrestore(&asd_ha->iolock, flags); } /** * asd_write_reg_string -- write a string of bytes to io space memory * @asd_ha: pointer to host adapter structure * @src: pointer to source buffer where data will be read from * @offs: start offset (register) to write to * @count: number of bytes to write */ void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src, u32 offs, int count) { u8 *p = src; unsigned long flags; spin_lock_irqsave(&asd_ha->iolock, flags); for ( ; count > 0; count--, offs++, p++) __asd_write_reg_byte(asd_ha, offs, *p); spin_unlock_irqrestore(&asd_ha->iolock, flags); }
gpl-2.0
DooMLoRD/android_kernel_htc_tegra3
drivers/tty/serial/8250_early.c
20
7236
/* * Early serial console for 8250/16550 devices * * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <bjorn.helgaas@hp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Based on the 8250.c serial driver, Copyright (C) 2001 Russell King, * and on early_printk.c by Andi Kleen. * * This is for use before the serial driver has initialized, in * particular, before the UARTs have been discovered and named. * Instead of specifying the console device as, e.g., "ttyS0", * we locate the device directly by its MMIO or I/O port address. * * The user can specify the device directly, e.g., * earlycon=uart8250,io,0x3f8,9600n8 * earlycon=uart8250,mmio,0xff5e0000,115200n8 * earlycon=uart8250,mmio32,0xff5e0000,115200n8 * or * console=uart8250,io,0x3f8,9600n8 * console=uart8250,mmio,0xff5e0000,115200n8 * console=uart8250,mmio32,0xff5e0000,115200n8 */ #include <linux/tty.h> #include <linux/init.h> #include <linux/console.h> #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <asm/io.h> #include <asm/serial.h> #ifdef CONFIG_FIX_EARLYCON_MEM #include <asm/pgtable.h> #include <asm/fixmap.h> #endif struct early_serial8250_device { struct uart_port port; char options[16]; /* e.g., 115200n8 */ unsigned int baud; }; static struct early_serial8250_device early_device; static unsigned int __init serial_in(struct uart_port *port, int offset) { switch (port->iotype) { case UPIO_MEM: return readb(port->membase + offset); case UPIO_MEM32: return readl(port->membase + (offset << 2)); case UPIO_PORT: return inb(port->iobase + offset); default: return 0; } } static void __init serial_out(struct uart_port *port, int offset, int value) { switch (port->iotype) { case UPIO_MEM: writeb(value, port->membase + offset); break; case UPIO_MEM32: writel(value, port->membase + (offset << 2)); break; case UPIO_PORT: outb(value, port->iobase + offset); break; } } #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) static void __init wait_for_xmitr(struct uart_port *port) { unsigned int status; for (;;) { status = serial_in(port, UART_LSR); if ((status & BOTH_EMPTY) == BOTH_EMPTY) return; cpu_relax(); } } static void __init serial_putc(struct uart_port *port, int c) { wait_for_xmitr(port); serial_out(port, UART_TX, c); } static void __init early_serial8250_write(struct console *console, const char *s, unsigned int count) { struct uart_port *port = &early_device.port; unsigned int ier; /* Save the IER and disable interrupts */ ier = serial_in(port, UART_IER); serial_out(port, UART_IER, 0); uart_console_write(port, s, count, serial_putc); /* Wait for transmitter to become empty and restore the IER */ wait_for_xmitr(port); serial_out(port, UART_IER, ier); } static unsigned int __init probe_baud(struct uart_port *port) { unsigned char lcr, dll, dlm; unsigned int quot; lcr = serial_in(port, UART_LCR); serial_out(port, UART_LCR, lcr | UART_LCR_DLAB); dll = serial_in(port, UART_DLL); dlm = serial_in(port, UART_DLM); serial_out(port, UART_LCR, lcr); quot = (dlm << 8) | dll; return (port->uartclk / 16) / quot; } static void __init init_port(struct early_serial8250_device *device) { struct uart_port *port = &device->port; unsigned int divisor; unsigned char c; serial_out(port, UART_LCR, 0x3); /* 8n1 */ serial_out(port, UART_IER, 0); /* no interrupt */ serial_out(port, UART_FCR, 0); /* no fifo */ serial_out(port, UART_MCR, 0x3); /* DTR + RTS */ divisor = port->uartclk / (16 * device->baud); c = serial_in(port, UART_LCR); serial_out(port, UART_LCR, c | UART_LCR_DLAB); serial_out(port, UART_DLL, divisor & 0xff); serial_out(port, UART_DLM, (divisor >> 8) & 0xff); serial_out(port, UART_LCR, c & ~UART_LCR_DLAB); } static int __init parse_options(struct early_serial8250_device *device, char *options) { struct uart_port *port = &device->port; int mmio, mmio32, length; if (!options) return -ENODEV; port->uartclk = BASE_BAUD * 16; mmio = !strncmp(options, "mmio,", 5); mmio32 = !strncmp(options, "mmio32,", 7); if (mmio || mmio32) { port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32); port->mapbase = simple_strtoul(options + (mmio ? 5 : 7), &options, 0); if (mmio32) port->regshift = 2; #ifdef CONFIG_FIX_EARLYCON_MEM set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, port->mapbase & PAGE_MASK); port->membase = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE); port->membase += port->mapbase & ~PAGE_MASK; #else port->membase = ioremap_nocache(port->mapbase, 64); if (!port->membase) { printk(KERN_ERR "[SER] %s: Couldn't ioremap 0x%llx\n", __func__, (unsigned long long) port->mapbase); return -ENOMEM; } #endif } else if (!strncmp(options, "io,", 3)) { port->iotype = UPIO_PORT; port->iobase = simple_strtoul(options + 3, &options, 0); mmio = 0; } else return -EINVAL; options = strchr(options, ','); if (options) { options++; device->baud = simple_strtoul(options, NULL, 0); length = min(strcspn(options, " "), sizeof(device->options)); strncpy(device->options, options, length); } else { device->baud = probe_baud(port); snprintf(device->options, sizeof(device->options), "%u", device->baud); } if (mmio || mmio32) printk(KERN_INFO "[SER] Early serial console at MMIO%s 0x%llx (options '%s')\n", mmio32 ? "32" : "", (unsigned long long)port->mapbase, device->options); else printk(KERN_INFO "[SER] Early serial console at I/O port 0x%lx (options '%s')\n", port->iobase, device->options); return 0; } static struct console early_serial8250_console __initdata = { .name = "uart", .write = early_serial8250_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1, }; static int __init early_serial8250_setup(char *options) { struct early_serial8250_device *device = &early_device; int err; if (device->port.membase || device->port.iobase) return 0; err = parse_options(device, options); if (err < 0) return err; init_port(device); return 0; } int __init setup_early_serial8250_console(char *cmdline) { char *options; int err; options = strstr(cmdline, "uart8250,"); if (!options) { options = strstr(cmdline, "uart,"); if (!options) return 0; } options = strchr(cmdline, ',') + 1; err = early_serial8250_setup(options); if (err < 0) return err; register_console(&early_serial8250_console); return 0; } int serial8250_find_port_for_earlycon(void) { struct early_serial8250_device *device = &early_device; struct uart_port *port = &device->port; int line; int ret; if (!device->port.membase && !device->port.iobase) return -ENODEV; line = serial8250_find_port(port); if (line < 0) return -ENODEV; ret = update_console_cmdline("uart", 8250, "ttyS", line, device->options); if (ret < 0) ret = update_console_cmdline("uart", 0, "ttyS", line, device->options); return ret; } early_param("earlycon", setup_early_serial8250_console);
gpl-2.0
thrill007/ijkplayer
ijkmedia/ijksdl/android/ijksdl_aout_android_audiotrack.c
20
8781
/***************************************************************************** * ijksdl_aout_android_audiotrack.c ***************************************************************************** * * copyright (c) 2013 Zhang Rui <bbcallen@gmail.com> * * This file is part of ijkPlayer. * * ijkPlayer is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * ijkPlayer is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with ijkPlayer; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "ijksdl_aout_android_audiotrack.h" #include <stdbool.h> #include <assert.h> #include <jni.h> #include "../ijksdl_inc_internal.h" #include "../ijksdl_thread.h" #include "../ijksdl_aout_internal.h" #include "ijksdl_android_jni.h" #include "android_audiotrack.h" #ifdef SDLTRACE #undef SDLTRACE #define SDLTRACE(...) #endif static SDL_Class g_audiotrack_class = { .name = "AudioTrack", }; typedef struct SDL_Aout_Opaque { SDL_cond *wakeup_cond; SDL_mutex *wakeup_mutex; SDL_AudioSpec spec; SDL_Android_AudioTrack* atrack; uint8_t *buffer; int buffer_size; volatile bool need_flush; volatile bool pause_on; volatile bool abort_request; volatile bool need_set_volume; volatile float left_volume; volatile float right_volume; SDL_Thread *audio_tid; SDL_Thread _audio_tid; } SDL_Aout_Opaque; static int aout_thread_n(JNIEnv *env, SDL_Aout *aout) { SDL_Aout_Opaque *opaque = aout->opaque; SDL_Android_AudioTrack *atrack = opaque->atrack; SDL_AudioCallback audio_cblk = opaque->spec.callback; void *userdata = opaque->spec.userdata; uint8_t *buffer = opaque->buffer; int copy_size = 256; assert(atrack); assert(buffer); SDL_SetThreadPriority(SDL_THREAD_PRIORITY_HIGH); if (!opaque->abort_request && !opaque->pause_on) SDL_Android_AudioTrack_play(env, atrack); while (!opaque->abort_request) { SDL_LockMutex(opaque->wakeup_mutex); if (!opaque->abort_request && opaque->pause_on) { SDL_Android_AudioTrack_pause(env, atrack); while (!opaque->abort_request && opaque->pause_on) { SDL_CondWaitTimeout(opaque->wakeup_cond, opaque->wakeup_mutex, 1000); } if (!opaque->abort_request && !opaque->pause_on) SDL_Android_AudioTrack_play(env, atrack); } if (opaque->need_flush) { opaque->need_flush = 0; SDL_Android_AudioTrack_flush(env, atrack); } if (opaque->need_set_volume) { opaque->need_set_volume = 0; SDL_Android_AudioTrack_set_volume(env, atrack, opaque->left_volume, opaque->right_volume); } SDL_UnlockMutex(opaque->wakeup_mutex); audio_cblk(userdata, buffer, copy_size); if (opaque->need_flush) { SDL_Android_AudioTrack_flush(env, atrack); opaque->need_flush = false; } if (opaque->need_flush) { opaque->need_flush = 0; SDL_Android_AudioTrack_flush(env, atrack); } else { int written = SDL_Android_AudioTrack_write(env, atrack, buffer, copy_size); if (written != copy_size) { ALOGW("AudioTrack: not all data copied %d/%d", (int)written, (int)copy_size); } } // TODO: 1 if callback return -1 or 0 } SDL_Android_AudioTrack_free(env, atrack); return 0; } static int aout_thread(void *arg) { SDL_Aout *aout = arg; // SDL_Aout_Opaque *opaque = aout->opaque; JNIEnv *env = NULL; if (JNI_OK != SDL_JNI_SetupThreadEnv(&env)) { ALOGE("aout_thread: SDL_AndroidJni_SetupEnv: failed"); return -1; } return aout_thread_n(env, aout); } static int aout_open_audio_n(JNIEnv *env, SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained) { assert(desired); SDL_Aout_Opaque *opaque = aout->opaque; opaque->spec = *desired; opaque->atrack = SDL_Android_AudioTrack_new_from_sdl_spec(env, desired); if (!opaque->atrack) { ALOGE("aout_open_audio_n: failed to new AudioTrcak()"); return -1; } opaque->buffer_size = SDL_Android_AudioTrack_get_min_buffer_size(opaque->atrack); if (opaque->buffer_size <= 0) { ALOGE("aout_open_audio_n: failed to getMinBufferSize()"); SDL_Android_AudioTrack_free(env, opaque->atrack); opaque->atrack = NULL; return -1; } opaque->buffer = malloc(opaque->buffer_size); if (!opaque->buffer) { ALOGE("aout_open_audio_n: failed to allocate buffer"); SDL_Android_AudioTrack_free(env, opaque->atrack); opaque->atrack = NULL; return -1; } if (obtained) { SDL_Android_AudioTrack_get_target_spec(opaque->atrack, obtained); SDLTRACE("audio target format fmt:0x%x, channel:0x%x", (int)obtained->format, (int)obtained->channels); } opaque->pause_on = 1; opaque->abort_request = 0; opaque->audio_tid = SDL_CreateThreadEx(&opaque->_audio_tid, aout_thread, aout, "ff_aout_android"); if (!opaque->audio_tid) { ALOGE("aout_open_audio_n: failed to create audio thread"); SDL_Android_AudioTrack_free(env, opaque->atrack); opaque->atrack = NULL; return -1; } return 0; } static int aout_open_audio(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained) { // SDL_Aout_Opaque *opaque = aout->opaque; JNIEnv *env = NULL; if (JNI_OK != SDL_JNI_SetupThreadEnv(&env)) { ALOGE("aout_open_audio: AttachCurrentThread: failed"); return -1; } return aout_open_audio_n(env, aout, desired, obtained); } static void aout_pause_audio(SDL_Aout *aout, int pause_on) { SDL_Aout_Opaque *opaque = aout->opaque; SDL_LockMutex(opaque->wakeup_mutex); SDLTRACE("aout_pause_audio(%d)", pause_on); opaque->pause_on = pause_on; if (!pause_on) SDL_CondSignal(opaque->wakeup_cond); SDL_UnlockMutex(opaque->wakeup_mutex); } static void aout_flush_audio(SDL_Aout *aout) { SDL_Aout_Opaque *opaque = aout->opaque; SDL_LockMutex(opaque->wakeup_mutex); SDLTRACE("aout_flush_audio()"); opaque->need_flush = 1; SDL_CondSignal(opaque->wakeup_cond); SDL_UnlockMutex(opaque->wakeup_mutex); } static void aout_set_volume(SDL_Aout *aout, float left_volume, float right_volume) { SDL_Aout_Opaque *opaque = aout->opaque; SDL_LockMutex(opaque->wakeup_mutex); SDLTRACE("aout_flush_audio()"); opaque->left_volume = left_volume; opaque->right_volume = right_volume; opaque->need_set_volume = 1; SDL_CondSignal(opaque->wakeup_cond); SDL_UnlockMutex(opaque->wakeup_mutex); } static void aout_close_audio(SDL_Aout *aout) { SDL_Aout_Opaque *opaque = aout->opaque; SDL_LockMutex(opaque->wakeup_mutex); opaque->abort_request = true; SDL_CondSignal(opaque->wakeup_cond); SDL_UnlockMutex(opaque->wakeup_mutex); SDL_WaitThread(opaque->audio_tid, NULL); opaque->audio_tid = NULL; } static void aout_free_l(SDL_Aout *aout) { if (!aout) return; aout_close_audio(aout); SDL_Aout_Opaque *opaque = aout->opaque; if (opaque) { free(opaque->buffer); opaque->buffer = NULL; opaque->buffer_size = 0; SDL_DestroyCond(opaque->wakeup_cond); SDL_DestroyMutex(opaque->wakeup_mutex); } SDL_Aout_FreeInternal(aout); } SDL_Aout *SDL_AoutAndroid_CreateForAudioTrack() { SDL_Aout *aout = SDL_Aout_CreateInternal(sizeof(SDL_Aout_Opaque)); if (!aout) return NULL; SDL_Aout_Opaque *opaque = aout->opaque; opaque->wakeup_cond = SDL_CreateCond(); opaque->wakeup_mutex = SDL_CreateMutex(); aout->opaque_class = &g_audiotrack_class; aout->free_l = aout_free_l; aout->open_audio = aout_open_audio; aout->pause_audio = aout_pause_audio; aout->flush_audio = aout_flush_audio; aout->set_volume = aout_set_volume; aout->close_audio = aout_close_audio; return aout; } bool SDL_AoutAndroid_IsObjectOfAudioTrack(SDL_Aout *aout) { if (aout) return false; return aout->opaque_class == &g_audiotrack_class; } void SDL_Init_AoutAndroid(JNIEnv *env) { }
gpl-2.0
shminer/kernel-msm-3.18
drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
276
56335
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/qmi_encdec.h> #include <linux/ipa_qmi_service_v01.h> #include <soc/qcom/msm_qmi_interface.h> /* Type Definitions */ static struct elem_info ipa_hdr_tbl_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, modem_offset_start), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, modem_offset_end), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_route_tbl_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_route_tbl_info_type_v01, route_tbl_start_addr), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_route_tbl_info_type_v01, num_indices), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_modem_mem_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_modem_mem_info_type_v01, block_start_addr), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_modem_mem_info_type_v01, size), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_hdr_proc_ctx_tbl_info_type_v01, modem_offset_start), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_hdr_proc_ctx_tbl_info_type_v01, modem_offset_end), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_zip_tbl_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_zip_tbl_info_type_v01, modem_offset_start), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_zip_tbl_info_type_v01, modem_offset_end), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_range_eq_16_type_v01, offset), }, { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(uint16_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_range_eq_16_type_v01, range_low), }, { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(uint16_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_range_eq_16_type_v01, range_high), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_32_type_v01, offset), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_32_type_v01, mask), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_32_type_v01, value), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_ipfltr_eq_16_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_eq_16_type_v01, offset), }, { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(uint16_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_ipfltr_eq_16_type_v01, value), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_ipfltr_eq_32_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, offset), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, value), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_128_type_v01, offset), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 16, .elem_size = sizeof(uint8_t), .is_array = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_128_type_v01, mask), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 16, .elem_size = sizeof(uint8_t), .is_array = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_128_type_v01, value), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_filter_rule_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(uint16_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_rule_type_v01, rule_eq_bitmap), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_rule_type_v01, tos_eq_present), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, tos_eq), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, protocol_eq_present), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, protocol_eq), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, num_ihl_offset_range_16), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01, .elem_size = sizeof( struct ipa_ipfltr_range_eq_16_type_v01), .is_array = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_range_16), .ei_array = ipa_ipfltr_range_eq_16_type_data_v01_ei, }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, num_offset_meq_32), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01, .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), .is_array = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, offset_meq_32), .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei, }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, tc_eq_present), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, tc_eq), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, flow_eq_present), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, flow_eq), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_eq_16_present), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_ipfltr_eq_16_type_v01), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_eq_16), .ei_array = ipa_ipfltr_eq_16_type_data_v01_ei, }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_eq_32_present), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_ipfltr_eq_32_type_v01), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_eq_32), .ei_array = ipa_ipfltr_eq_32_type_data_v01_ei, }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, num_ihl_offset_meq_32), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01, .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), .is_array = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_meq_32), .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei, }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, num_offset_meq_128), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01, .elem_size = sizeof( struct ipa_ipfltr_mask_eq_128_type_v01), .is_array = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_rule_type_v01, offset_meq_128), .ei_array = ipa_ipfltr_mask_eq_128_type_data_v01_ei, }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, metadata_meq32_present), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, metadata_meq32), .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei, }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ipv4_frag_eq_present), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_filter_spec_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, filter_spec_identifier), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, ip_type), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_filter_rule_type_v01), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, filter_rule), .ei_array = ipa_filter_rule_type_data_v01_ei, }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, filter_action), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, is_routing_table_index_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, route_table_index), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, is_mux_id_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, mux_id), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_filter_rule_identifier_to_handle_map_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_rule_identifier_to_handle_map_v01, filter_spec_identifier), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_rule_identifier_to_handle_map_v01, filter_handle), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_handle_to_index_map_v01, filter_handle), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_handle_to_index_map_v01, filter_index), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, platform_type_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, platform_type), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, hdr_tbl_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_hdr_tbl_info_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, hdr_tbl_info), .ei_array = ipa_hdr_tbl_info_type_data_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, v4_route_tbl_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, v4_route_tbl_info), .ei_array = ipa_route_tbl_info_type_data_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, v6_route_tbl_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, v6_route_tbl_info), .ei_array = ipa_route_tbl_info_type_data_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, v4_filter_tbl_start_addr_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, v4_filter_tbl_start_addr), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, v6_filter_tbl_start_addr_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, v6_filter_tbl_start_addr), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, modem_mem_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_modem_mem_info_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, modem_mem_info), .ei_array = ipa_modem_mem_info_type_data_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, ctrl_comm_dest_end_pt_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, ctrl_comm_dest_end_pt), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, is_ssr_bootup_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, is_ssr_bootup), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, hdr_proc_ctx_tbl_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof( struct ipa_hdr_proc_ctx_tbl_info_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, hdr_proc_ctx_tbl_info), .ei_array = ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, zip_tbl_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_zip_tbl_info_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, zip_tbl_info), .ei_array = ipa_zip_tbl_info_type_data_v01_ei, }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, ctrl_comm_dest_end_pt_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, ctrl_comm_dest_end_pt), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, default_end_pt_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, default_end_pt), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_indication_reg_req_msg_v01, master_driver_init_complete_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_indication_reg_req_msg_v01, master_driver_init_complete), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_indication_reg_req_msg_v01, data_usage_quota_reached_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_indication_reg_req_msg_v01, data_usage_quota_reached), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_indication_reg_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct ipa_master_driver_init_complt_ind_msg_v01, master_driver_init_status), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, filter_spec_list_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, filter_spec_list_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof(struct ipa_filter_spec_type_v01), .is_array = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, filter_spec_list), .ei_array = ipa_filter_spec_type_data_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, source_pipe_index_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, source_pipe_index), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, num_ipv4_filters_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, num_ipv4_filters), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, num_ipv6_filters_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, num_ipv6_filters), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, xlat_filter_indices_list_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, xlat_filter_indices_list_len), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof(uint32_t), .is_array = VAR_LEN_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, xlat_filter_indices_list), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_install_fltr_rule_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_resp_msg_v01, filter_handle_list_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_resp_msg_v01, filter_handle_list_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof( struct ipa_filter_rule_identifier_to_handle_map_v01), .is_array = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_resp_msg_v01, filter_handle_list), .ei_array = ipa_filter_rule_identifier_to_handle_map_data_v01_ei, }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, source_pipe_index), }, { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(uint16_t), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, install_status), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x03, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, filter_index_list_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof( struct ipa_filter_handle_to_index_map_v01), .is_array = VAR_LEN_ARRAY, .tlv_type = 0x03, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, filter_index_list), .ei_array = ipa_filter_handle_to_index_map_data_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, embedded_pipe_index_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, embedded_pipe_index), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, retain_header_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, retain_header), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, embedded_call_mux_id_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, embedded_call_mux_id), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, num_ipv4_filters_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, num_ipv4_filters), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, num_ipv6_filters_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, num_ipv6_filters), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, start_ipv4_filter_idx_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, start_ipv4_filter_idx), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, start_ipv6_filter_idx_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, start_ipv6_filter_idx), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_fltr_installed_notif_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof( struct ipa_enable_force_clear_datapath_req_msg_v01, source_pipe_bitmask), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_enable_force_clear_datapath_req_msg_v01, request_id), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_enable_force_clear_datapath_req_msg_v01, throttle_source_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_enable_force_clear_datapath_req_msg_v01, throttle_source), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_enable_force_clear_datapath_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof( struct ipa_disable_force_clear_datapath_req_msg_v01, request_id), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_disable_force_clear_datapath_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_config_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_config_req_msg_v01, peripheral_type_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_config_req_msg_v01, peripheral_type), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_config_req_msg_v01, hw_deaggr_supported_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_config_req_msg_v01, hw_deaggr_supported), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_config_req_msg_v01, max_aggr_frame_size_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_config_req_msg_v01, max_aggr_frame_size), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_config_req_msg_v01, ipa_ingress_pipe_mode_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_config_req_msg_v01, ipa_ingress_pipe_mode), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_config_req_msg_v01, peripheral_speed_info_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_config_req_msg_v01, peripheral_speed_info), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_config_req_msg_v01, dl_accumulation_time_limit_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_config_req_msg_v01, dl_accumulation_time_limit), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_config_req_msg_v01, dl_accumulation_pkt_limit_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_config_req_msg_v01, dl_accumulation_pkt_limit), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_config_req_msg_v01, dl_accumulation_byte_limit_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_config_req_msg_v01, dl_accumulation_byte_limit), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_config_req_msg_v01, ul_accumulation_time_limit_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_config_req_msg_v01, ul_accumulation_time_limit), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof( struct ipa_config_req_msg_v01, hw_control_flags_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof( struct ipa_config_req_msg_v01, hw_control_flags), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof( struct ipa_config_req_msg_v01, ul_msi_event_threshold_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof( struct ipa_config_req_msg_v01, ul_msi_event_threshold), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof( struct ipa_config_req_msg_v01, dl_msi_event_threshold_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof( struct ipa_config_req_msg_v01, dl_msi_event_threshold), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_config_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_config_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = { { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof( struct ipa_get_data_stats_req_msg_v01, ipa_stats_type), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_data_stats_req_msg_v01, reset_stats_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_data_stats_req_msg_v01, reset_stats), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_pipe_stats_info_type_v01, pipe_index), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_pipe_stats_info_type_v01, num_ipv4_packets), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_pipe_stats_info_type_v01, num_ipv4_bytes), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_pipe_stats_info_type_v01, num_ipv6_packets), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_pipe_stats_info_type_v01, num_ipv6_bytes), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_stats_type_filter_rule_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_stats_type_filter_rule_v01, filter_rule_index), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_stats_type_filter_rule_v01, num_packets), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, ipa_stats_type_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, ipa_stats_type), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, ul_src_pipe_stats_list_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, ul_src_pipe_stats_list_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_PIPES_V01, .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), .is_array = VAR_LEN_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, ul_src_pipe_stats_list), .ei_array = ipa_pipe_stats_info_type_data_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, dl_dst_pipe_stats_list_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, dl_dst_pipe_stats_list_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_PIPES_V01, .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), .is_array = VAR_LEN_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, dl_dst_pipe_stats_list), .ei_array = ipa_pipe_stats_info_type_data_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, dl_filter_rule_stats_list_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, dl_filter_rule_stats_list_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), .is_array = VAR_LEN_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, dl_filter_rule_stats_list), .ei_array = ipa_stats_type_filter_rule_data_v01_ei, }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_apn_data_stats_info_type_v01, mux_id), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_apn_data_stats_info_type_v01, num_ul_packets), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_apn_data_stats_info_type_v01, num_ul_bytes), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_apn_data_stats_info_type_v01, num_dl_packets), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_apn_data_stats_info_type_v01, num_dl_bytes), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_req_msg_v01, mux_id_list_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_req_msg_v01, mux_id_list_len), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = QMI_IPA_MAX_APN_V01, .elem_size = sizeof(uint32_t), .is_array = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_req_msg_v01, mux_id_list), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_get_apn_data_stats_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_resp_msg_v01, apn_data_stats_list_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_resp_msg_v01, apn_data_stats_list_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_APN_V01, .elem_size = sizeof(struct ipa_apn_data_stats_info_type_v01), .is_array = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_resp_msg_v01, apn_data_stats_list), .ei_array = ipa_apn_data_stats_info_type_data_v01_ei, }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static struct elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_data_usage_quota_info_type_v01, mux_id), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_data_usage_quota_info_type_v01, num_Mbytes), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_set_data_usage_quota_req_msg_v01, apn_quota_list_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_set_data_usage_quota_req_msg_v01, apn_quota_list_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_APN_V01, .elem_size = sizeof(struct ipa_data_usage_quota_info_type_v01), .is_array = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_set_data_usage_quota_req_msg_v01, apn_quota_list), .ei_array = ipa_data_usage_quota_info_type_data_v01_ei, }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_set_data_usage_quota_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_data_usage_quota_info_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof( struct ipa_data_usage_quota_reached_ind_msg_v01, apn), .ei_array = ipa_data_usage_quota_info_type_data_v01_ei, }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[] = { /* ipa_stop_data_usage_quota_req_msg is empty */ { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_stop_data_usage_quota_resp_msg_v01, resp), .ei_array = get_qmi_response_type_v01_ei(), }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, };
gpl-2.0
reposte/android_kernel_xiaomi_msm8992
drivers/staging/vt6655/rf.c
532
44587
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: rf.c * * Purpose: rf function code * * Author: Jerry Chen * * Date: Feb. 19, 2004 * * Functions: * IFRFbWriteEmbedded - Embedded write RF register via MAC * * Revision History: * */ #include "mac.h" #include "srom.h" #include "rf.h" #include "baseband.h" /*--------------------- Static Definitions -------------------------*/ //static int msglevel =MSG_LEVEL_INFO; #define BY_AL2230_REG_LEN 23 //24bit #define CB_AL2230_INIT_SEQ 15 #define SWITCH_CHANNEL_DELAY_AL2230 200 //us #define AL2230_PWR_IDX_LEN 64 #define BY_AL7230_REG_LEN 23 //24bit #define CB_AL7230_INIT_SEQ 16 #define SWITCH_CHANNEL_DELAY_AL7230 200 //us #define AL7230_PWR_IDX_LEN 64 /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = { 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0BDFFC00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x00000D00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW }; const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = { 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz 0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz 0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW // channel = 14, Tf = 2412M }; const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = { 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz 0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW // channel = 14, Tf = 2412M }; unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = { 0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04043900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04044900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04045900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04046900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04047900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04048900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04049900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04050900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04051900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04052900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04053900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04054900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04055900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04056900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04057900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04058900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04059900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04060900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04061900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04062900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04063900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04064900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04065900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04066900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04067900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04068900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04069900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04070900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04071900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04072900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04073900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04074900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04075900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04076900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04077900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04078900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04079900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW }; //{{ RobertYu:20050104 // 40MHz reference frequency // Need to Pull PLLON(PE3) low when writing channel registers through 3-wire. const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = { 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a 0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 451FE2 0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 5FDFA3 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // 11b/g // Need modify for 11a //0x802B4500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 8D1B45 // RoberYu:20050113, Rev0.47 Regsiter Setting Guide 0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 8D1B55 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 860207 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: E0600A 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) //0x00093C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 00143C // RoberYu:20050113, Rev0.47 Regsiter Setting Guide 0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 00143C 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // Need modify for 11a: 12BACF }; const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = { 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g 0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g 0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g 0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // 11a // Need modify for 11b/g 0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g, RoberYu:20050113 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) 0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // Need modify for 11b/g }; const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = { 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz // 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20) 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21) 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22) // 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, // 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 5035MHz (23) 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 5040MHz (24) 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 5045MHz (25) 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 5055MHz (26) 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 5060MHz (27) 0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 16, Tf = 5080MHz (28) 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 34, Tf = 5170MHz (29) 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 36, Tf = 5180MHz (30) 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 40, Tf = 5200MHz (32) 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 42, Tf = 5210MHz (33) 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 44, Tf = 5220MHz (34) 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 46, Tf = 5230MHz (35) 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 48, Tf = 5240MHz (36) 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 52, Tf = 5260MHz (37) 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 56, Tf = 5280MHz (38) 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 60, Tf = 5300MHz (39) 0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 64, Tf = 5320MHz (40) 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41) 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42) 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43) 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44) 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45) 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46) 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47) 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48) 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49) 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50) 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51) 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52) 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53) 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54) 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55) 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56) }; const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = { 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz 0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz // 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16) 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17) 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19) 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22) // 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, // 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 5035MHz (23) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 5040MHz (24) 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 5045MHz (25) 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 5055MHz (26) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 5060MHz (27) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 16, Tf = 5080MHz (28) 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 34, Tf = 5170MHz (29) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 36, Tf = 5180MHz (30) 0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 38, Tf = 5190MHz (31) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 40, Tf = 5200MHz (32) 0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 42, Tf = 5210MHz (33) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 44, Tf = 5220MHz (34) 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 46, Tf = 5230MHz (35) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 48, Tf = 5240MHz (36) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 52, Tf = 5260MHz (37) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 56, Tf = 5280MHz (38) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 60, Tf = 5300MHz (39) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 64, Tf = 5320MHz (40) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51) 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52) 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53) 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54) 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55) 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56) }; const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = { 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz // 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22) // 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, // 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 5035MHz (23) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 5040MHz (24) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 5045MHz (25) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 5055MHz (26) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 5060MHz (27) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 16, Tf = 5080MHz (28) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 34, Tf = 5170MHz (29) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 36, Tf = 5180MHz (30) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 38, Tf = 5190MHz (31) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 40, Tf = 5200MHz (32) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 42, Tf = 5210MHz (33) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 44, Tf = 5220MHz (34) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 46, Tf = 5230MHz (35) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 48, Tf = 5240MHz (36) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 52, Tf = 5260MHz (37) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 56, Tf = 5280MHz (38) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 60, Tf = 5300MHz (39) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 64, Tf = 5320MHz (40) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56) }; //}} RobertYu /*--------------------- Static Functions --------------------------*/ /* * Description: AIROHA IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool s_bAL7230Init(unsigned long dwIoBase) { int ii; bool bResult; bResult = true; //3-wire control for normal mode VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0); MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); BBvPowerSaveModeOFF(dwIoBase); //RobertYu:20050106, have DC value for Calibration for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++) bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[ii]); // PLL On MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); //Calibration MACvTimer0MicroSDelay(dwIoBase, 150);//150us bResult &= IFRFbWriteEmbedded(dwIoBase, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); //TXDCOC:active, RCK:disable MACvTimer0MicroSDelay(dwIoBase, 30);//30us bResult &= IFRFbWriteEmbedded(dwIoBase, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); //TXDCOC:disable, RCK:active MACvTimer0MicroSDelay(dwIoBase, 30);//30us bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]); //TXDCOC:disable, RCK:disable MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); BBvPowerSaveModeON(dwIoBase); // RobertYu:20050106 // PE1: TX_ON, PE2: RX_ON, PE3: PLLON //3-wire control for power saving mode VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000 return bResult; } // Need to Pull PLLON low when writing channel registers through 3-wire interface bool s_bAL7230SelectChannel(unsigned long dwIoBase, unsigned char byChannel) { bool bResult; bResult = true; // PLLON Off MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable0[byChannel - 1]); //Reg0 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable1[byChannel - 1]); //Reg1 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable2[byChannel - 1]); //Reg4 // PLLOn On MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); // Set Channel[7] = 0 to tell H/W channel is changing now. VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F)); MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL7230); // Set Channel[7] = 1 to tell H/W channel change is done. VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80)); return bResult; } /* * Description: Select channel with UW2452 chip * * Parameters: * In: * dwIoBase - I/O base address * uChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ //{{ RobertYu: 20041210 /* * Description: UW2452 IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ //}} RobertYu //////////////////////////////////////////////////////////////////////////////// /* * Description: VT3226 IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: Select channel with VT3226 chip * * Parameters: * In: * dwIoBase - I/O base address * uChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /* * Description: Write to IF/RF, by embedded programming * * Parameters: * In: * dwIoBase - I/O base address * dwData - data to write * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool IFRFbWriteEmbedded(unsigned long dwIoBase, unsigned long dwData) { unsigned short ww; unsigned long dwValue; VNSvOutPortD(dwIoBase + MAC_REG_IFREGCTL, dwData); // W_MAX_TIMEOUT is the timeout period for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { VNSvInPortD(dwIoBase + MAC_REG_IFREGCTL, &dwValue); if (dwValue & IFREGCTL_DONE) break; } if (ww == W_MAX_TIMEOUT) { // DBG_PORT80_ALWAYS(0x32); return false; } return true; } /* * Description: RFMD RF2959 IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: Select channel with RFMD 2959 chip * * Parameters: * In: * dwIoBase - I/O base address * uChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: AIROHA IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbAL2230Init(unsigned long dwIoBase) { int ii; bool bResult; bResult = true; //3-wire control for normal mode VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0); MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); //2008-8-21 chester <add> // PLL Off MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); //patch abnormal AL2230 frequency output //2008-8-21 chester <add> IFRFbWriteEmbedded(dwIoBase, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++) bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230InitTable[ii]); //2008-8-21 chester <add> MACvTimer0MicroSDelay(dwIoBase, 30); //delay 30 us // PLL On MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); MACvTimer0MicroSDelay(dwIoBase, 150);//150us bResult &= IFRFbWriteEmbedded(dwIoBase, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); MACvTimer0MicroSDelay(dwIoBase, 30);//30us bResult &= IFRFbWriteEmbedded(dwIoBase, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); MACvTimer0MicroSDelay(dwIoBase, 30);//30us bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]); MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); //3-wire control for power saving mode VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000 return bResult; } bool RFbAL2230SelectChannel(unsigned long dwIoBase, unsigned char byChannel) { bool bResult; bResult = true; bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230ChannelTable0[byChannel - 1]); bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230ChannelTable1[byChannel - 1]); // Set Channel[7] = 0 to tell H/W channel is changing now. VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F)); MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL2230); // Set Channel[7] = 1 to tell H/W channel change is done. VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80)); return bResult; } /* * Description: UW2451 IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: Select channel with UW2451 chip * * Parameters: * In: * dwIoBase - I/O base address * uChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: Set sleep mode to UW2451 chip * * Parameters: * In: * dwIoBase - I/O base address * uChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: RF init function * * Parameters: * In: * byBBType * byRFType * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbInit( PSDevice pDevice ) { bool bResult = true; switch (pDevice->byRFType) { case RF_AIROHA: case RF_AL2230S: pDevice->byMaxPwrLevel = AL2230_PWR_IDX_LEN; bResult = RFbAL2230Init(pDevice->PortOffset); break; case RF_AIROHA7230: pDevice->byMaxPwrLevel = AL7230_PWR_IDX_LEN; bResult = s_bAL7230Init(pDevice->PortOffset); break; case RF_NOTHING: bResult = true; break; default: bResult = false; break; } return bResult; } /* * Description: RF ShutDown function * * Parameters: * In: * byBBType * byRFType * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbShutDown( PSDevice pDevice ) { bool bResult = true; switch (pDevice->byRFType) { case RF_AIROHA7230: bResult = IFRFbWriteEmbedded(pDevice->PortOffset, 0x1ABAEF00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW); break; default: bResult = true; break; } return bResult; } /* * Description: Select channel * * Parameters: * In: * byRFType * byChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbSelectChannel(unsigned long dwIoBase, unsigned char byRFType, unsigned char byChannel) { bool bResult = true; switch (byRFType) { case RF_AIROHA: case RF_AL2230S: bResult = RFbAL2230SelectChannel(dwIoBase, byChannel); break; //{{ RobertYu: 20050104 case RF_AIROHA7230: bResult = s_bAL7230SelectChannel(dwIoBase, byChannel); break; //}} RobertYu case RF_NOTHING: bResult = true; break; default: bResult = false; break; } return bResult; } /* * Description: Write WakeProgSyn * * Parameters: * In: * dwIoBase - I/O base address * uChannel - channel number * bySleepCnt - SleepProgSyn count * * Return Value: None. * */ bool RFvWriteWakeProgSyn(unsigned long dwIoBase, unsigned char byRFType, unsigned int uChannel) { int ii; unsigned char byInitCount = 0; unsigned char bySleepCount = 0; VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, 0); switch (byRFType) { case RF_AIROHA: case RF_AL2230S: if (uChannel > CB_MAX_CHANNEL_24G) return false; byInitCount = CB_AL2230_INIT_SEQ + 2; // Init Reg + Channel Reg (2) bySleepCount = 0; if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount)) { return false; } for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++) { MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]); } MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]); ii++; MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]); break; //{{ RobertYu: 20050104 // Need to check, PLLON need to be low for channel setting case RF_AIROHA7230: byInitCount = CB_AL7230_INIT_SEQ + 3; // Init Reg + Channel Reg (3) bySleepCount = 0; if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount)) { return false; } if (uChannel <= CB_MAX_CHANNEL_24G) { for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++) { MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]); } } else { for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++) { MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]); } } MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]); ii++; MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]); ii++; MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]); break; //}} RobertYu case RF_NOTHING: return true; break; default: return false; break; } MACvSetMISCFifo(dwIoBase, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(bySleepCount, byInitCount)); return true; } /* * Description: Set Tx power * * Parameters: * In: * dwIoBase - I/O base address * dwRFPowerTable - RF Tx Power Setting * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbSetPower( PSDevice pDevice, unsigned int uRATE, unsigned int uCH ) { bool bResult = true; unsigned char byPwr = 0; unsigned char byDec = 0; unsigned char byPwrdBm = 0; if (pDevice->dwDiagRefCount != 0) { return true; } if ((uCH < 1) || (uCH > CB_MAX_CHANNEL)) { return false; } switch (uRATE) { case RATE_1M: case RATE_2M: case RATE_5M: case RATE_11M: byPwr = pDevice->abyCCKPwrTbl[uCH]; byPwrdBm = pDevice->abyCCKDefaultPwr[uCH]; //PLICE_DEBUG-> //byPwr+=5; //PLICE_DEBUG <- break; case RATE_6M: case RATE_9M: case RATE_12M: case RATE_18M: byPwr = pDevice->abyOFDMPwrTbl[uCH]; if (pDevice->byRFType == RF_UW2452) { byDec = byPwr + 14; } else { byDec = byPwr + 10; } if (byDec >= pDevice->byMaxPwrLevel) { byDec = pDevice->byMaxPwrLevel-1; } if (pDevice->byRFType == RF_UW2452) { byPwrdBm = byDec - byPwr; byPwrdBm /= 3; } else { byPwrdBm = byDec - byPwr; byPwrdBm >>= 1; } byPwrdBm += pDevice->abyOFDMDefaultPwr[uCH]; byPwr = byDec; //PLICE_DEBUG-> //byPwr+=5; //PLICE_DEBUG<- break; case RATE_24M: case RATE_36M: case RATE_48M: case RATE_54M: byPwr = pDevice->abyOFDMPwrTbl[uCH]; byPwrdBm = pDevice->abyOFDMDefaultPwr[uCH]; //PLICE_DEBUG-> //byPwr+=5; //PLICE_DEBUG<- break; } if (pDevice->byCurPwr == byPwr) { return true; } bResult = RFbRawSetPower(pDevice, byPwr, uRATE); if (bResult == true) { pDevice->byCurPwr = byPwr; } return bResult; } /* * Description: Set Tx power * * Parameters: * In: * dwIoBase - I/O base address * dwRFPowerTable - RF Tx Power Setting * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbRawSetPower( PSDevice pDevice, unsigned char byPwr, unsigned int uRATE ) { bool bResult = true; unsigned long dwMax7230Pwr = 0; if (byPwr >= pDevice->byMaxPwrLevel) { return false; } switch (pDevice->byRFType) { case RF_AIROHA: bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwAL2230PowerTable[byPwr]); if (uRATE <= RATE_11M) { bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); } else { bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); } break; case RF_AL2230S: bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwAL2230PowerTable[byPwr]); if (uRATE <= RATE_11M) { bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); } else { bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); } break; case RF_AIROHA7230: // 0x080F1B00 for 3 wire control TxGain(D10) and 0x31 as TX Gain value dwMax7230Pwr = 0x080C0B00 | ((byPwr) << 12) | (BY_AL7230_REG_LEN << 3) | IFREGCTL_REGW; bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwMax7230Pwr); break; default: break; } return bResult; } /*+ * * Routine Description: * Translate RSSI to dBm * * Parameters: * In: * pDevice - The adapter to be translated * byCurrRSSI - RSSI to be translated * Out: * pdwdbm - Translated dbm number * * Return Value: none * -*/ void RFvRSSITodBm( PSDevice pDevice, unsigned char byCurrRSSI, long *pldBm ) { unsigned char byIdx = (((byCurrRSSI & 0xC0) >> 6) & 0x03); long b = (byCurrRSSI & 0x3F); long a = 0; unsigned char abyAIROHARF[4] = {0, 18, 0, 40}; switch (pDevice->byRFType) { case RF_AIROHA: case RF_AL2230S: case RF_AIROHA7230: //RobertYu: 20040104 a = abyAIROHARF[byIdx]; break; default: break; } *pldBm = -1 * (a + b * 2); } //////////////////////////////////////////////////////////////////////////////// //{{ RobertYu: 20050104 // Post processing for the 11b/g and 11a. // for save time on changing Reg2,3,5,7,10,12,15 bool RFbAL7230SelectChannelPostProcess(unsigned long dwIoBase, unsigned char byOldChannel, unsigned char byNewChannel) { bool bResult; bResult = true; // if change between 11 b/g and 11a need to update the following register // Channel Index 1~14 if ((byOldChannel <= CB_MAX_CHANNEL_24G) && (byNewChannel > CB_MAX_CHANNEL_24G)) { // Change from 2.4G to 5G bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[2]); //Reg2 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[3]); //Reg3 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[5]); //Reg5 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[7]); //Reg7 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[10]);//Reg10 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[12]);//Reg12 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[15]);//Reg15 } else if ((byOldChannel > CB_MAX_CHANNEL_24G) && (byNewChannel <= CB_MAX_CHANNEL_24G)) { // change from 5G to 2.4G bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[2]); //Reg2 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[3]); //Reg3 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[5]); //Reg5 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[7]); //Reg7 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[10]);//Reg10 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[12]);//Reg12 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[15]);//Reg15 } return bResult; } //}} RobertYu ////////////////////////////////////////////////////////////////////////////////
gpl-2.0
GAXUSXX/GalaxyS7edge_G935F_Kernel
drivers/mmc/host/sdhci-of-hlwd.c
532
2743
/* * drivers/mmc/host/sdhci-of-hlwd.c * * Nintendo Wii Secure Digital Host Controller Interface. * Copyright (C) 2009 The GameCube Linux Team * Copyright (C) 2009 Albert Herranz * * Based on sdhci-of-esdhc.c * * Copyright (c) 2007 Freescale Semiconductor, Inc. * Copyright (c) 2009 MontaVista Software, Inc. * * Authors: Xiaobo Xie <X.Xie@freescale.com> * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/mmc/host.h> #include "sdhci-pltfm.h" /* * Ops and quirks for the Nintendo Wii SDHCI controllers. */ /* * We need a small delay after each write, or things go horribly wrong. */ #define SDHCI_HLWD_WRITE_DELAY 5 /* usecs */ static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg) { sdhci_be32bs_writel(host, val, reg); udelay(SDHCI_HLWD_WRITE_DELAY); } static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg) { sdhci_be32bs_writew(host, val, reg); udelay(SDHCI_HLWD_WRITE_DELAY); } static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg) { sdhci_be32bs_writeb(host, val, reg); udelay(SDHCI_HLWD_WRITE_DELAY); } static const struct sdhci_ops sdhci_hlwd_ops = { .read_l = sdhci_be32bs_readl, .read_w = sdhci_be32bs_readw, .read_b = sdhci_be32bs_readb, .write_l = sdhci_hlwd_writel, .write_w = sdhci_hlwd_writew, .write_b = sdhci_hlwd_writeb, .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_hlwd_pdata = { .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE, .ops = &sdhci_hlwd_ops, }; static int sdhci_hlwd_probe(struct platform_device *pdev) { return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata, 0); } static int sdhci_hlwd_remove(struct platform_device *pdev) { return sdhci_pltfm_unregister(pdev); } static const struct of_device_id sdhci_hlwd_of_match[] = { { .compatible = "nintendo,hollywood-sdhci" }, { } }; MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match); static struct platform_driver sdhci_hlwd_driver = { .driver = { .name = "sdhci-hlwd", .of_match_table = sdhci_hlwd_of_match, .pm = SDHCI_PLTFM_PMOPS, }, .probe = sdhci_hlwd_probe, .remove = sdhci_hlwd_remove, }; module_platform_driver(sdhci_hlwd_driver); MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver"); MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz"); MODULE_LICENSE("GPL v2");
gpl-2.0
cameron581/kernel_msm
drivers/pci/probe.c
1044
48862
/* * probe.c - PCI detection and setup code */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/cpumask.h> #include <linux/pci-aspm.h> #include "pci.h" #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ #define CARDBUS_RESERVE_BUSNR 3 static LIST_HEAD(pci_host_bridges); /* Ugh. Need to stop exporting this to modules. */ LIST_HEAD(pci_root_buses); EXPORT_SYMBOL(pci_root_buses); static int find_anything(struct device *dev, void *data) { return 1; } /* * Some device drivers need know if pci is initiated. * Basically, we think pci is not initiated when there * is no device to be found on the pci_bus_type. */ int no_pci_devices(void) { struct device *dev; int no_devices; dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); no_devices = (dev == NULL); put_device(dev); return no_devices; } EXPORT_SYMBOL(no_pci_devices); static struct pci_host_bridge *pci_host_bridge(struct pci_dev *dev) { struct pci_bus *bus; struct pci_host_bridge *bridge; bus = dev->bus; while (bus->parent) bus = bus->parent; list_for_each_entry(bridge, &pci_host_bridges, list) { if (bridge->bus == bus) return bridge; } return NULL; } static bool resource_contains(struct resource *res1, struct resource *res2) { return res1->start <= res2->start && res1->end >= res2->end; } void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, struct resource *res) { struct pci_host_bridge *bridge = pci_host_bridge(dev); struct pci_host_bridge_window *window; resource_size_t offset = 0; list_for_each_entry(window, &bridge->windows, list) { if (resource_type(res) != resource_type(window->res)) continue; if (resource_contains(window->res, res)) { offset = window->offset; break; } } region->start = res->start - offset; region->end = res->end - offset; } EXPORT_SYMBOL(pcibios_resource_to_bus); static bool region_contains(struct pci_bus_region *region1, struct pci_bus_region *region2) { return region1->start <= region2->start && region1->end >= region2->end; } void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region) { struct pci_host_bridge *bridge = pci_host_bridge(dev); struct pci_host_bridge_window *window; struct pci_bus_region bus_region; resource_size_t offset = 0; list_for_each_entry(window, &bridge->windows, list) { if (resource_type(res) != resource_type(window->res)) continue; bus_region.start = window->res->start - window->offset; bus_region.end = window->res->end - window->offset; if (region_contains(&bus_region, region)) { offset = window->offset; break; } } res->start = region->start + offset; res->end = region->end + offset; } EXPORT_SYMBOL(pcibios_bus_to_resource); /* * PCI Bus Class */ static void release_pcibus_dev(struct device *dev) { struct pci_bus *pci_bus = to_pci_bus(dev); if (pci_bus->bridge) put_device(pci_bus->bridge); pci_bus_remove_resources(pci_bus); pci_release_bus_of_node(pci_bus); kfree(pci_bus); } static struct class pcibus_class = { .name = "pci_bus", .dev_release = &release_pcibus_dev, .dev_attrs = pcibus_dev_attrs, }; static int __init pcibus_class_init(void) { return class_register(&pcibus_class); } postcore_initcall(pcibus_class_init); static u64 pci_size(u64 base, u64 maxbase, u64 mask) { u64 size = mask & maxbase; /* Find the significant bits */ if (!size) return 0; /* Get the lowest of them to find the decode size, and from that the extent. */ size = (size & ~(size-1)) - 1; /* base == maxbase can be valid only if the BAR has already been programmed with all 1s. */ if (base == maxbase && ((base | size) & mask) != mask) return 0; return size; } static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) { u32 mem_type; unsigned long flags; if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; flags |= IORESOURCE_IO; return flags; } flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; flags |= IORESOURCE_MEM; if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) flags |= IORESOURCE_PREFETCH; mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_32: break; case PCI_BASE_ADDRESS_MEM_TYPE_1M: dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n"); break; case PCI_BASE_ADDRESS_MEM_TYPE_64: flags |= IORESOURCE_MEM_64; break; default: dev_warn(&dev->dev, "mem unknown type %x treated as 32-bit BAR\n", mem_type); break; } return flags; } /** * pci_read_base - read a PCI BAR * @dev: the PCI device * @type: type of the BAR * @res: resource buffer to be filled in * @pos: BAR position in the config space * * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. */ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int pos) { u32 l, sz, mask; u16 orig_cmd; struct pci_bus_region region; mask = type ? PCI_ROM_ADDRESS_MASK : ~0; if (!dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); pci_write_config_word(dev, PCI_COMMAND, orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)); } res->name = pci_name(dev); pci_read_config_dword(dev, pos, &l); pci_write_config_dword(dev, pos, l | mask); pci_read_config_dword(dev, pos, &sz); pci_write_config_dword(dev, pos, l); if (!dev->mmio_always_on) pci_write_config_word(dev, PCI_COMMAND, orig_cmd); /* * All bits set in sz means the device isn't working properly. * If the BAR isn't implemented, all bits must be 0. If it's a * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit * 1 must be clear. */ if (!sz || sz == 0xffffffff) goto fail; /* * I don't know how l can have all bits set. Copied from old code. * Maybe it fixes a bug on some ancient platform. */ if (l == 0xffffffff) l = 0; if (type == pci_bar_unknown) { res->flags = decode_bar(dev, l); res->flags |= IORESOURCE_SIZEALIGN; if (res->flags & IORESOURCE_IO) { l &= PCI_BASE_ADDRESS_IO_MASK; mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; } else { l &= PCI_BASE_ADDRESS_MEM_MASK; mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; } } else { res->flags |= (l & IORESOURCE_ROM_ENABLE); l &= PCI_ROM_ADDRESS_MASK; mask = (u32)PCI_ROM_ADDRESS_MASK; } if (res->flags & IORESOURCE_MEM_64) { u64 l64 = l; u64 sz64 = sz; u64 mask64 = mask | (u64)~0 << 32; pci_read_config_dword(dev, pos + 4, &l); pci_write_config_dword(dev, pos + 4, ~0); pci_read_config_dword(dev, pos + 4, &sz); pci_write_config_dword(dev, pos + 4, l); l64 |= ((u64)l << 32); sz64 |= ((u64)sz << 32); sz64 = pci_size(l64, sz64, mask64); if (!sz64) goto fail; if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); goto fail; } if ((sizeof(resource_size_t) < 8) && l) { /* Address above 32-bit boundary; disable the BAR */ pci_write_config_dword(dev, pos, 0); pci_write_config_dword(dev, pos + 4, 0); region.start = 0; region.end = sz64; pcibios_bus_to_resource(dev, res, &region); } else { region.start = l64; region.end = l64 + sz64; pcibios_bus_to_resource(dev, res, &region); dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); } } else { sz = pci_size(l, sz, mask); if (!sz) goto fail; region.start = l; region.end = l + sz; pcibios_bus_to_resource(dev, res, &region); dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); } out: return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; fail: res->flags = 0; goto out; } static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) { unsigned int pos, reg; for (pos = 0; pos < howmany; pos++) { struct resource *res = &dev->resource[pos]; reg = PCI_BASE_ADDRESS_0 + (pos << 2); pos += __pci_read_base(dev, pci_bar_unknown, res, reg); } if (rom) { struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; dev->rom_base_reg = rom; res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_READONLY | IORESOURCE_CACHEABLE | IORESOURCE_SIZEALIGN; __pci_read_base(dev, pci_bar_mem32, res, rom); } } static void __devinit pci_read_bridge_io(struct pci_bus *child) { struct pci_dev *dev = child->self; u8 io_base_lo, io_limit_lo; unsigned long base, limit; struct pci_bus_region region; struct resource *res, res2; res = child->resource[0]; pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); base = (io_base_lo & PCI_IO_RANGE_MASK) << 8; limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8; if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { u16 io_base_hi, io_limit_hi; pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); base |= (io_base_hi << 16); limit |= (io_limit_hi << 16); } if (base && base <= limit) { res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; res2.flags = res->flags; region.start = base; region.end = limit + 0xfff; pcibios_bus_to_resource(dev, &res2, &region); if (!res->start) res->start = res2.start; if (!res->end) res->end = res2.end; dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); } } static void __devinit pci_read_bridge_mmio(struct pci_bus *child) { struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; unsigned long base, limit; struct pci_bus_region region; struct resource *res; res = child->resource[1]; pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; if (base && base <= limit) { res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev, res, &region); dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); } } static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child) { struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; unsigned long base, limit; struct pci_bus_region region; struct resource *res; res = child->resource[2]; pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { u32 mem_base_hi, mem_limit_hi; pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); /* * Some bridges set the base > limit by default, and some * (broken) BIOSes do not initialize them. If we find * this, just assume they are not being used. */ if (mem_base_hi <= mem_limit_hi) { #if BITS_PER_LONG == 64 base |= ((long) mem_base_hi) << 32; limit |= ((long) mem_limit_hi) << 32; #else if (mem_base_hi || mem_limit_hi) { dev_err(&dev->dev, "can't handle 64-bit " "address space for bridge\n"); return; } #endif } } if (base && base <= limit) { res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; if (res->flags & PCI_PREF_RANGE_TYPE_64) res->flags |= IORESOURCE_MEM_64; region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev, res, &region); dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); } } void __devinit pci_read_bridge_bases(struct pci_bus *child) { struct pci_dev *dev = child->self; struct resource *res; int i; if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ return; dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", child->secondary, child->subordinate, dev->transparent ? " (subtractive decode)" : ""); pci_bus_remove_resources(child); for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; pci_read_bridge_io(child); pci_read_bridge_mmio(child); pci_read_bridge_mmio_pref(child); if (dev->transparent) { pci_bus_for_each_resource(child->parent, res, i) { if (res) { pci_bus_add_resource(child, res, PCI_SUBTRACTIVE_DECODE); dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR (subtractive decode)\n", res); } } } } static struct pci_bus * pci_alloc_bus(void) { struct pci_bus *b; b = kzalloc(sizeof(*b), GFP_KERNEL); if (b) { INIT_LIST_HEAD(&b->node); INIT_LIST_HEAD(&b->children); INIT_LIST_HEAD(&b->devices); INIT_LIST_HEAD(&b->slots); INIT_LIST_HEAD(&b->resources); b->max_bus_speed = PCI_SPEED_UNKNOWN; b->cur_bus_speed = PCI_SPEED_UNKNOWN; } return b; } static unsigned char pcix_bus_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCI_SPEED_66MHz_PCIX, /* 1 */ PCI_SPEED_100MHz_PCIX, /* 2 */ PCI_SPEED_133MHz_PCIX, /* 3 */ PCI_SPEED_UNKNOWN, /* 4 */ PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ PCI_SPEED_UNKNOWN, /* 8 */ PCI_SPEED_66MHz_PCIX_266, /* 9 */ PCI_SPEED_100MHz_PCIX_266, /* A */ PCI_SPEED_133MHz_PCIX_266, /* B */ PCI_SPEED_UNKNOWN, /* C */ PCI_SPEED_66MHz_PCIX_533, /* D */ PCI_SPEED_100MHz_PCIX_533, /* E */ PCI_SPEED_133MHz_PCIX_533 /* F */ }; static unsigned char pcie_link_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCIE_SPEED_2_5GT, /* 1 */ PCIE_SPEED_5_0GT, /* 2 */ PCIE_SPEED_8_0GT, /* 3 */ PCI_SPEED_UNKNOWN, /* 4 */ PCI_SPEED_UNKNOWN, /* 5 */ PCI_SPEED_UNKNOWN, /* 6 */ PCI_SPEED_UNKNOWN, /* 7 */ PCI_SPEED_UNKNOWN, /* 8 */ PCI_SPEED_UNKNOWN, /* 9 */ PCI_SPEED_UNKNOWN, /* A */ PCI_SPEED_UNKNOWN, /* B */ PCI_SPEED_UNKNOWN, /* C */ PCI_SPEED_UNKNOWN, /* D */ PCI_SPEED_UNKNOWN, /* E */ PCI_SPEED_UNKNOWN /* F */ }; void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) { bus->cur_bus_speed = pcie_link_speed[linksta & 0xf]; } EXPORT_SYMBOL_GPL(pcie_update_link_speed); static unsigned char agp_speeds[] = { AGP_UNKNOWN, AGP_1X, AGP_2X, AGP_4X, AGP_8X }; static enum pci_bus_speed agp_speed(int agp3, int agpstat) { int index = 0; if (agpstat & 4) index = 3; else if (agpstat & 2) index = 2; else if (agpstat & 1) index = 1; else goto out; if (agp3) { index += 2; if (index == 5) index = 0; } out: return agp_speeds[index]; } static void pci_set_bus_speed(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; int pos; pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); if (!pos) pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); if (pos) { u32 agpstat, agpcmd; pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); } pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); if (pos) { u16 status; enum pci_bus_speed max; pci_read_config_word(bridge, pos + 2, &status); if (status & 0x8000) { max = PCI_SPEED_133MHz_PCIX_533; } else if (status & 0x4000) { max = PCI_SPEED_133MHz_PCIX_266; } else if (status & 0x0002) { if (((status >> 12) & 0x3) == 2) { max = PCI_SPEED_133MHz_PCIX_ECC; } else { max = PCI_SPEED_133MHz_PCIX; } } else { max = PCI_SPEED_66MHz_PCIX; } bus->max_bus_speed = max; bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf]; return; } pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); if (pos) { u32 linkcap; u16 linksta; pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); pcie_update_link_speed(bus, linksta); } } static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr) { struct pci_bus *child; int i; /* * Allocate a new bus, and inherit stuff from the parent.. */ child = pci_alloc_bus(); if (!child) return NULL; child->parent = parent; child->ops = parent->ops; child->sysdata = parent->sysdata; child->bus_flags = parent->bus_flags; /* initialize some portions of the bus device, but don't register it * now as the parent is not properly set up yet. This device will get * registered later in pci_bus_add_devices() */ child->dev.class = &pcibus_class; dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); /* * Set up the primary, secondary and subordinate * bus numbers. */ child->number = child->secondary = busnr; child->primary = parent->secondary; child->subordinate = 0xff; if (!bridge) return child; child->self = bridge; child->bridge = get_device(&bridge->dev); pci_set_bus_of_node(child); pci_set_bus_speed(child); /* Set up default resource pointers and names.. */ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; child->resource[i]->name = child->name; } bridge->subordinate = child; return child; } struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) { struct pci_bus *child; child = pci_alloc_child_bus(parent, dev, busnr); if (child) { down_write(&pci_bus_sem); list_add_tail(&child->node, &parent->children); up_write(&pci_bus_sem); } return child; } static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) { struct pci_bus *parent = child->parent; /* Attempts to fix that up are really dangerous unless we're going to re-assign all bus numbers. */ if (!pcibios_assign_all_busses()) return; while (parent->parent && parent->subordinate < max) { parent->subordinate = max; pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); parent = parent->parent; } } /* * If it's a bridge, configure it and scan the bus behind it. * For CardBus bridges, we don't scan behind as the devices will * be handled by the bridge driver itself. * * We need to process bridges in two passes -- first we scan those * already configured by the BIOS and after we are done with all of * them, we proceed to assigning numbers to the remaining buses in * order to avoid overlaps between old and new bus numbers. */ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) { struct pci_bus *child; int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); u32 buses, i, j = 0; u16 bctl; u8 primary, secondary, subordinate; int broken = 0; pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); primary = buses & 0xFF; secondary = (buses >> 8) & 0xFF; subordinate = (buses >> 16) & 0xFF; dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", secondary, subordinate, pass); if (!primary && (primary != bus->number) && secondary && subordinate) { dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); primary = bus->number; } /* Check if setup is sensible at all */ if (!pass && (primary != bus->number || secondary <= bus->number || secondary > subordinate)) { dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", secondary, subordinate); broken = 1; } /* Disable MasterAbortMode during probing to avoid reporting of bus errors (in some architectures) */ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); if ((secondary || subordinate) && !pcibios_assign_all_busses() && !is_cardbus && !broken) { unsigned int cmax; /* * Bus already configured by firmware, process it in the first * pass and just note the configuration. */ if (pass) goto out; /* * If we already got to this bus through a different bridge, * don't re-add it. This can happen with the i450NX chipset. * * However, we continue to descend down the hierarchy and * scan remaining child buses. */ child = pci_find_bus(pci_domain_nr(bus), secondary); if (!child) { child = pci_add_new_bus(bus, dev, secondary); if (!child) goto out; child->primary = primary; child->subordinate = subordinate; child->bridge_ctl = bctl; } cmax = pci_scan_child_bus(child); if (cmax > max) max = cmax; if (child->subordinate > max) max = child->subordinate; } else { /* * We need to assign a number to this bus which we always * do in the second pass. */ if (!pass) { if (pcibios_assign_all_busses() || broken) /* Temporarily disable forwarding of the configuration cycles on all bridges in this bus segment to avoid possible conflicts in the second pass between two bridges programmed with overlapping bus ranges. */ pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses & ~0xffffff); goto out; } /* Clear errors */ pci_write_config_word(dev, PCI_STATUS, 0xffff); /* Prevent assigning a bus number that already exists. * This can happen when a bridge is hot-plugged, so in * this case we only re-scan this bus. */ child = pci_find_bus(pci_domain_nr(bus), max+1); if (!child) { child = pci_add_new_bus(bus, dev, ++max); if (!child) goto out; } buses = (buses & 0xff000000) | ((unsigned int)(child->primary) << 0) | ((unsigned int)(child->secondary) << 8) | ((unsigned int)(child->subordinate) << 16); /* * yenta.c forces a secondary latency timer of 176. * Copy that behaviour here. */ if (is_cardbus) { buses &= ~0xff000000; buses |= CARDBUS_LATENCY_TIMER << 24; } /* * We need to blast all three values with a single write. */ pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); if (!is_cardbus) { child->bridge_ctl = bctl; /* * Adjust subordinate busnr in parent buses. * We do this before scanning for children because * some devices may not be detected if the bios * was lazy. */ pci_fixup_parent_subordinate_busnr(child, max); /* Now we can scan all subordinate buses... */ max = pci_scan_child_bus(child); /* * now fix it up again since we have found * the real value of max. */ pci_fixup_parent_subordinate_busnr(child, max); } else { /* * For CardBus bridges, we leave 4 bus numbers * as cards with a PCI-to-PCI bridge can be * inserted later. */ for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) { struct pci_bus *parent = bus; if (pci_find_bus(pci_domain_nr(bus), max+i+1)) break; while (parent->parent) { if ((!pcibios_assign_all_busses()) && (parent->subordinate > max) && (parent->subordinate <= max+i)) { j = 1; } parent = parent->parent; } if (j) { /* * Often, there are two cardbus bridges * -- try to leave one valid bus number * for each one. */ i /= 2; break; } } max += i; pci_fixup_parent_subordinate_busnr(child, max); } /* * Set the subordinate bus number to its real value. */ child->subordinate = max; pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); } sprintf(child->name, (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), pci_domain_nr(bus), child->number); /* Has only triggered on CardBus, fixup is in yenta_socket */ while (bus->parent) { if ((child->subordinate > bus->subordinate) || (child->number > bus->subordinate) || (child->number < bus->number) || (child->subordinate < bus->number)) { dev_info(&child->dev, "[bus %02x-%02x] %s " "hidden behind%s bridge %s [bus %02x-%02x]\n", child->number, child->subordinate, (bus->number > child->subordinate && bus->subordinate < child->number) ? "wholly" : "partially", bus->self->transparent ? " transparent" : "", dev_name(&bus->dev), bus->number, bus->subordinate); } bus = bus->parent; } out: pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); return max; } /* * Read interrupt line and base address registers. * The architecture-dependent code can tweak these, of course. */ static void pci_read_irq(struct pci_dev *dev) { unsigned char irq; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); dev->pin = irq; if (irq) pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); dev->irq = irq; } void set_pcie_port_type(struct pci_dev *pdev) { int pos; u16 reg16; pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!pos) return; pdev->is_pcie = 1; pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16); pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; } void set_pcie_hotplug_bridge(struct pci_dev *pdev) { int pos; u16 reg16; u32 reg32; pos = pci_pcie_cap(pdev); if (!pos) return; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); if (!(reg16 & PCI_EXP_FLAGS_SLOT)) return; pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &reg32); if (reg32 & PCI_EXP_SLTCAP_HPC) pdev->is_hotplug_bridge = 1; } #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) /** * pci_setup_device - fill in class and map information of a device * @dev: the device structure to fill * * Initialize the device structure with information about the device's * vendor,class,memory and IO-space addresses,IRQ lines etc. * Called at initialisation of the PCI subsystem and by CardBus services. * Returns 0 on success and negative if unknown type of device (not normal, * bridge or CardBus). */ int pci_setup_device(struct pci_dev *dev) { u32 class; u8 hdr_type; struct pci_slot *slot; int pos = 0; struct pci_bus_region region; struct resource *res; if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) return -EIO; dev->sysdata = dev->bus->sysdata; dev->dev.parent = dev->bus->bridge; dev->dev.bus = &pci_bus_type; dev->hdr_type = hdr_type & 0x7f; dev->multifunction = !!(hdr_type & 0x80); dev->error_state = pci_channel_io_normal; set_pcie_port_type(dev); list_for_each_entry(slot, &dev->bus->slots, list) if (PCI_SLOT(dev->devfn) == slot->number) dev->slot = slot; /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) set this higher, assuming the system even supports it. */ dev->dma_mask = 0xffffffff; dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); dev->revision = class & 0xff; dev->class = class >> 8; /* upper 3 bytes */ dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n", dev->vendor, dev->device, dev->hdr_type, dev->class); /* need to have dev->class ready */ dev->cfg_size = pci_cfg_space_size(dev); /* "Unknown power state" */ dev->current_state = PCI_UNKNOWN; /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); /* device class may be changed after fixup */ class = dev->class >> 8; switch (dev->hdr_type) { /* header type */ case PCI_HEADER_TYPE_NORMAL: /* standard header */ if (class == PCI_CLASS_BRIDGE_PCI) goto bad; pci_read_irq(dev); pci_read_bases(dev, 6, PCI_ROM_ADDRESS); pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); /* * Do the ugly legacy mode stuff here rather than broken chip * quirk code. Legacy mode ATA controllers have fixed * addresses. These are not always echoed in BAR0-3, and * BAR0-3 in a few cases contain junk! */ if (class == PCI_CLASS_STORAGE_IDE) { u8 progif; pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); if ((progif & 1) == 0) { region.start = 0x1F0; region.end = 0x1F7; res = &dev->resource[0]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev, res, &region); region.start = 0x3F6; region.end = 0x3F6; res = &dev->resource[1]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev, res, &region); } if ((progif & 4) == 0) { region.start = 0x170; region.end = 0x177; res = &dev->resource[2]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev, res, &region); region.start = 0x376; region.end = 0x376; res = &dev->resource[3]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev, res, &region); } } break; case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ if (class != PCI_CLASS_BRIDGE_PCI) goto bad; /* The PCI-to-PCI bridge spec requires that subtractive decoding (i.e. transparent) bridge must have programming interface code of 0x01. */ pci_read_irq(dev); dev->transparent = ((dev->class & 0xff) == 1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); set_pcie_hotplug_bridge(dev); pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); if (pos) { pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); } break; case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ if (class != PCI_CLASS_BRIDGE_CARDBUS) goto bad; pci_read_irq(dev); pci_read_bases(dev, 1, 0); pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); break; default: /* unknown header */ dev_err(&dev->dev, "unknown header type %02x, " "ignoring device\n", dev->hdr_type); return -EIO; bad: dev_err(&dev->dev, "ignoring class %#08x (doesn't match header " "type %02x)\n", dev->class, dev->hdr_type); dev->class = PCI_CLASS_NOT_DEFINED; } /* We found a fine healthy device, go go go... */ return 0; } static void pci_release_capabilities(struct pci_dev *dev) { pci_vpd_release(dev); pci_iov_release(dev); pci_free_cap_save_buffers(dev); } /** * pci_release_dev - free a pci device structure when all users of it are finished. * @dev: device that's been disconnected * * Will be called only by the device core when all users of this pci device are * done. */ static void pci_release_dev(struct device *dev) { struct pci_dev *pci_dev; pci_dev = to_pci_dev(dev); pci_release_capabilities(pci_dev); pci_release_of_node(pci_dev); kfree(pci_dev); } /** * pci_cfg_space_size - get the configuration space size of the PCI device. * @dev: PCI device * * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices * have 4096 bytes. Even if the device is capable, that doesn't mean we can * access it. Maybe we don't have a way to generate extended config space * accesses, or the device is behind a reverse Express bridge. So we try * reading the dword at 0x100 which must either be 0 or a valid extended * capability header. */ int pci_cfg_space_size_ext(struct pci_dev *dev) { u32 status; int pos = PCI_CFG_SPACE_SIZE; if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) goto fail; if (status == 0xffffffff) goto fail; return PCI_CFG_SPACE_EXP_SIZE; fail: return PCI_CFG_SPACE_SIZE; } int pci_cfg_space_size(struct pci_dev *dev) { int pos; u32 status; u16 class; class = dev->class >> 8; if (class == PCI_CLASS_BRIDGE_HOST) return pci_cfg_space_size_ext(dev); pos = pci_pcie_cap(dev); if (!pos) { pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!pos) goto fail; pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) goto fail; } return pci_cfg_space_size_ext(dev); fail: return PCI_CFG_SPACE_SIZE; } static void pci_release_bus_bridge_dev(struct device *dev) { kfree(dev); } struct pci_dev *alloc_pci_dev(void) { struct pci_dev *dev; dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); if (!dev) return NULL; INIT_LIST_HEAD(&dev->bus_list); return dev; } EXPORT_SYMBOL(alloc_pci_dev); bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, int crs_timeout) { int delay = 1; if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) return false; /* some broken boards return 0 or ~0 if a slot is empty: */ if (*l == 0xffffffff || *l == 0x00000000 || *l == 0x0000ffff || *l == 0xffff0000) return false; /* Configuration request Retry Status */ while (*l == 0xffff0001) { if (!crs_timeout) return false; msleep(delay); delay *= 2; if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) return false; /* Card hasn't responded in 60 seconds? Must be stuck. */ if (delay > crs_timeout) { printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " "responding\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); return false; } } return true; } EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); /* * Read the config data for a PCI device, sanity-check it * and fill in the dev structure... */ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) { struct pci_dev *dev; u32 l; if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) return NULL; dev = alloc_pci_dev(); if (!dev) return NULL; dev->bus = bus; dev->devfn = devfn; dev->vendor = l & 0xffff; dev->device = (l >> 16) & 0xffff; pci_set_of_node(dev); if (pci_setup_device(dev)) { kfree(dev); return NULL; } return dev; } static void pci_init_capabilities(struct pci_dev *dev) { /* MSI/MSI-X list */ pci_msi_init_pci_dev(dev); /* Buffers for saving PCIe and PCI-X capabilities */ pci_allocate_cap_save_buffers(dev); /* Power Management */ pci_pm_init(dev); platform_pci_wakeup_init(dev); /* Vital Product Data */ pci_vpd_pci22_init(dev); /* Alternative Routing-ID Forwarding */ pci_enable_ari(dev); /* Single Root I/O Virtualization */ pci_iov_init(dev); /* Enable ACS P2P upstream forwarding */ pci_enable_acs(dev); } void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) { device_initialize(&dev->dev); dev->dev.release = pci_release_dev; pci_dev_get(dev); dev->dev.dma_mask = &dev->dma_mask; dev->dev.dma_parms = &dev->dma_parms; dev->dev.coherent_dma_mask = 0xffffffffull; pci_set_dma_max_seg_size(dev, 65536); pci_set_dma_seg_boundary(dev, 0xffffffff); /* Fix up broken headers */ pci_fixup_device(pci_fixup_header, dev); /* moved out from quirk header fixup code */ pci_reassigndev_resource_alignment(dev); /* Clear the state_saved flag. */ dev->state_saved = false; /* Initialize various capabilities */ pci_init_capabilities(dev); /* * Add the device to our list of discovered devices * and the bus list for fixup functions, etc. */ down_write(&pci_bus_sem); list_add_tail(&dev->bus_list, &bus->devices); up_write(&pci_bus_sem); } struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) { struct pci_dev *dev; dev = pci_get_slot(bus, devfn); if (dev) { pci_dev_put(dev); return dev; } dev = pci_scan_device(bus, devfn); if (!dev) return NULL; pci_device_add(dev, bus); return dev; } EXPORT_SYMBOL(pci_scan_single_device); static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn) { u16 cap; unsigned pos, next_fn; if (!dev) return 0; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); if (!pos) return 0; pci_read_config_word(dev, pos + 4, &cap); next_fn = cap >> 8; if (next_fn <= fn) return 0; return next_fn; } static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn) { return (fn + 1) % 8; } static unsigned no_next_fn(struct pci_dev *dev, unsigned fn) { return 0; } static int only_one_child(struct pci_bus *bus) { struct pci_dev *parent = bus->self; if (!parent || !pci_is_pcie(parent)) return 0; if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT || parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) return 1; return 0; } /** * pci_scan_slot - scan a PCI slot on a bus for devices. * @bus: PCI bus to scan * @devfn: slot number to scan (must have zero function.) * * Scan a PCI slot on the specified PCI bus for devices, adding * discovered devices to the @bus->devices list. New devices * will not have is_added set. * * Returns the number of new devices found. */ int pci_scan_slot(struct pci_bus *bus, int devfn) { unsigned fn, nr = 0; struct pci_dev *dev; unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn; if (only_one_child(bus) && (devfn > 0)) return 0; /* Already scanned the entire slot */ dev = pci_scan_single_device(bus, devfn); if (!dev) return 0; if (!dev->is_added) nr++; if (pci_ari_enabled(bus)) next_fn = next_ari_fn; else if (dev->multifunction) next_fn = next_trad_fn; for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) { dev = pci_scan_single_device(bus, devfn + fn); if (dev) { if (!dev->is_added) nr++; dev->multifunction = 1; } } /* only one slot has pcie device */ if (bus->self && nr) pcie_aspm_init_link_state(bus->self); return nr; } static int pcie_find_smpss(struct pci_dev *dev, void *data) { u8 *smpss = data; if (!pci_is_pcie(dev)) return 0; /* For PCIE hotplug enabled slots not connected directly to a * PCI-E root port, there can be problems when hotplugging * devices. This is due to the possibility of hotplugging a * device into the fabric with a smaller MPS that the devices * currently running have configured. Modifying the MPS on the * running devices could cause a fatal bus error due to an * incoming frame being larger than the newly configured MPS. * To work around this, the MPS for the entire fabric must be * set to the minimum size. Any devices hotplugged into this * fabric will have the minimum MPS set. If the PCI hotplug * slot is directly connected to the root port and there are not * other devices on the fabric (which seems to be the most * common case), then this is not an issue and MPS discovery * will occur as normal. */ if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || (dev->bus->self && dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) *smpss = 0; if (*smpss > dev->pcie_mpss) *smpss = dev->pcie_mpss; return 0; } static void pcie_write_mps(struct pci_dev *dev, int mps) { int rc; if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { mps = 128 << dev->pcie_mpss; if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self) /* For "Performance", the assumption is made that * downstream communication will never be larger than * the MRRS. So, the MPS only needs to be configured * for the upstream communication. This being the case, * walk from the top down and set the MPS of the child * to that of the parent bus. * * Configure the device MPS with the smaller of the * device MPSS or the bridge MPS (which is assumed to be * properly configured at this point to the largest * allowable MPS based on its parent bus). */ mps = min(mps, pcie_get_mps(dev->bus->self)); } rc = pcie_set_mps(dev, mps); if (rc) dev_err(&dev->dev, "Failed attempting to set the MPS\n"); } static void pcie_write_mrrs(struct pci_dev *dev) { int rc, mrrs; /* In the "safe" case, do not configure the MRRS. There appear to be * issues with setting MRRS to 0 on a number of devices. */ if (pcie_bus_config != PCIE_BUS_PERFORMANCE) return; /* For Max performance, the MRRS must be set to the largest supported * value. However, it cannot be configured larger than the MPS the * device or the bus can support. This should already be properly * configured by a prior call to pcie_write_mps. */ mrrs = pcie_get_mps(dev); /* MRRS is a R/W register. Invalid values can be written, but a * subsequent read will verify if the value is acceptable or not. * If the MRRS value provided is not acceptable (e.g., too large), * shrink the value until it is acceptable to the HW. */ while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { rc = pcie_set_readrq(dev, mrrs); if (!rc) break; dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); mrrs /= 2; } if (mrrs < 128) dev_err(&dev->dev, "MRRS was unable to be configured with a " "safe value. If problems are experienced, try running " "with pci=pcie_bus_safe.\n"); } static int pcie_bus_configure_set(struct pci_dev *dev, void *data) { int mps, orig_mps; if (!pci_is_pcie(dev)) return 0; mps = 128 << *(u8 *)data; orig_mps = pcie_get_mps(dev); pcie_write_mps(dev, mps); pcie_write_mrrs(dev); dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), " "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, orig_mps, pcie_get_readrq(dev)); return 0; } /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, * parents then children fashion. If this changes, then this code will not * work as designed. */ void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) { u8 smpss; if (!pci_is_pcie(bus->self)) return; if (pcie_bus_config == PCIE_BUS_TUNE_OFF) return; /* FIXME - Peer to peer DMA is possible, though the endpoint would need * to be aware to the MPS of the destination. To work around this, * simply force the MPS of the entire system to the smallest possible. */ if (pcie_bus_config == PCIE_BUS_PEER2PEER) smpss = 0; if (pcie_bus_config == PCIE_BUS_SAFE) { smpss = mpss; pcie_find_smpss(bus->self, &smpss); pci_walk_bus(bus, pcie_find_smpss, &smpss); } pcie_bus_configure_set(bus->self, &smpss); pci_walk_bus(bus, pcie_bus_configure_set, &smpss); } EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) { unsigned int devfn, pass, max = bus->secondary; struct pci_dev *dev; dev_dbg(&bus->dev, "scanning bus\n"); /* Go find them, Rover! */ for (devfn = 0; devfn < 0x100; devfn += 8) pci_scan_slot(bus, devfn); /* Reserve buses for SR-IOV capability. */ max += pci_iov_bus_range(bus); /* * After performing arch-dependent fixup of the bus, look behind * all PCI-to-PCI bridges on this bus. */ if (!bus->is_added) { dev_dbg(&bus->dev, "fixups for bus\n"); pcibios_fixup_bus(bus); if (pci_is_root_bus(bus)) bus->is_added = 1; } for (pass=0; pass < 2; pass++) list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) max = pci_scan_bridge(bus, dev, max, pass); } /* * We've scanned the bus and so we know all about what's on * the other side of any bridges that may be on this bus plus * any devices. * * Return how far we've got finding sub-buses. */ dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); return max; } struct pci_bus *pci_create_root_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata, struct list_head *resources) { int error; struct pci_host_bridge *bridge; struct pci_bus *b, *b2; struct device *dev; struct pci_host_bridge_window *window, *n; struct resource *res; resource_size_t offset; char bus_addr[64]; char *fmt; bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); if (!bridge) return NULL; b = pci_alloc_bus(); if (!b) goto err_bus; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) goto err_dev; b->sysdata = sysdata; b->ops = ops; b2 = pci_find_bus(pci_domain_nr(b), bus); if (b2) { /* If we already got to this bus through a different bridge, ignore it */ dev_dbg(&b2->dev, "bus already known\n"); goto err_out; } dev->parent = parent; dev->release = pci_release_bus_bridge_dev; dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus); error = device_register(dev); if (error) goto dev_reg_err; b->bridge = get_device(dev); device_enable_async_suspend(b->bridge); pci_set_bus_of_node(b); if (!parent) set_dev_node(b->bridge, pcibus_to_node(b)); b->dev.class = &pcibus_class; b->dev.parent = b->bridge; dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); error = device_register(&b->dev); if (error) goto class_dev_reg_err; /* Create legacy_io and legacy_mem files for this bus */ pci_create_legacy_files(b); b->number = b->secondary = bus; bridge->bus = b; INIT_LIST_HEAD(&bridge->windows); if (parent) dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev)); else printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); /* Add initial resources to the bus */ list_for_each_entry_safe(window, n, resources, list) { list_move_tail(&window->list, &bridge->windows); res = window->res; offset = window->offset; pci_bus_add_resource(b, res, 0); if (offset) { if (resource_type(res) == IORESOURCE_IO) fmt = " (bus address [%#06llx-%#06llx])"; else fmt = " (bus address [%#010llx-%#010llx])"; snprintf(bus_addr, sizeof(bus_addr), fmt, (unsigned long long) (res->start - offset), (unsigned long long) (res->end - offset)); } else bus_addr[0] = '\0'; dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr); } down_write(&pci_bus_sem); list_add_tail(&bridge->list, &pci_host_bridges); list_add_tail(&b->node, &pci_root_buses); up_write(&pci_bus_sem); return b; class_dev_reg_err: device_unregister(dev); dev_reg_err: down_write(&pci_bus_sem); list_del(&bridge->list); list_del(&b->node); up_write(&pci_bus_sem); err_out: kfree(dev); err_dev: kfree(b); err_bus: kfree(bridge); return NULL; } struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata, struct list_head *resources) { struct pci_bus *b; b = pci_create_root_bus(parent, bus, ops, sysdata, resources); if (!b) return NULL; b->subordinate = pci_scan_child_bus(b); pci_bus_add_devices(b); return b; } EXPORT_SYMBOL(pci_scan_root_bus); /* Deprecated; use pci_scan_root_bus() instead */ struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata) { LIST_HEAD(resources); struct pci_bus *b; pci_add_resource(&resources, &ioport_resource); pci_add_resource(&resources, &iomem_resource); b = pci_create_root_bus(parent, bus, ops, sysdata, &resources); if (b) b->subordinate = pci_scan_child_bus(b); else pci_free_resource_list(&resources); return b; } EXPORT_SYMBOL(pci_scan_bus_parented); struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata) { LIST_HEAD(resources); struct pci_bus *b; pci_add_resource(&resources, &ioport_resource); pci_add_resource(&resources, &iomem_resource); b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); if (b) { b->subordinate = pci_scan_child_bus(b); pci_bus_add_devices(b); } else { pci_free_resource_list(&resources); } return b; } EXPORT_SYMBOL(pci_scan_bus); #ifdef CONFIG_HOTPLUG /** * pci_rescan_bus_bridge_resize - scan a PCI bus for devices. * @bridge: PCI bridge for the bus to scan * * Scan a PCI bus and child buses for new devices, add them, * and enable them, resizing bridge mmio/io resource if necessary * and possible. The caller must ensure the child devices are already * removed for resizing to occur. * * Returns the max number of subordinate bus discovered. */ unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge) { unsigned int max; struct pci_bus *bus = bridge->subordinate; max = pci_scan_child_bus(bus); pci_assign_unassigned_bridge_resources(bridge); pci_bus_add_devices(bus); return max; } EXPORT_SYMBOL(pci_add_new_bus); EXPORT_SYMBOL(pci_scan_slot); EXPORT_SYMBOL(pci_scan_bridge); EXPORT_SYMBOL_GPL(pci_scan_child_bus); #endif static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) { const struct pci_dev *a = to_pci_dev(d_a); const struct pci_dev *b = to_pci_dev(d_b); if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; if (a->bus->number < b->bus->number) return -1; else if (a->bus->number > b->bus->number) return 1; if (a->devfn < b->devfn) return -1; else if (a->devfn > b->devfn) return 1; return 0; } void __init pci_sort_breadthfirst(void) { bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); }
gpl-2.0
pkirchhofer/nsa325-linux-upstream
net/netfilter/nf_conntrack_ftp.c
1300
17526
/* FTP extension for connection tracking. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> * (C) 2006-2012 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netfilter.h> #include <linux/ip.h> #include <linux/slab.h> #include <linux/ipv6.h> #include <linux/ctype.h> #include <linux/inet.h> #include <net/checksum.h> #include <net/tcp.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_helper.h> #include <linux/netfilter/nf_conntrack_ftp.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); MODULE_DESCRIPTION("ftp connection tracking helper"); MODULE_ALIAS("ip_conntrack_ftp"); MODULE_ALIAS_NFCT_HELPER("ftp"); /* This is slow, but it's simple. --RR */ static char *ftp_buffer; static DEFINE_SPINLOCK(nf_ftp_lock); #define MAX_PORTS 8 static u_int16_t ports[MAX_PORTS]; static unsigned int ports_c; module_param_array(ports, ushort, &ports_c, 0400); static bool loose; module_param(loose, bool, 0600); unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, enum ip_conntrack_info ctinfo, enum nf_ct_ftp_type type, unsigned int protoff, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp); EXPORT_SYMBOL_GPL(nf_nat_ftp_hook); static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); static int try_rfc1123(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); static struct ftp_search { const char *pattern; size_t plen; char skip; char term; enum nf_ct_ftp_type ftptype; int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); } search[IP_CT_DIR_MAX][2] = { [IP_CT_DIR_ORIGINAL] = { { .pattern = "PORT", .plen = sizeof("PORT") - 1, .skip = ' ', .term = '\r', .ftptype = NF_CT_FTP_PORT, .getnum = try_rfc959, }, { .pattern = "EPRT", .plen = sizeof("EPRT") - 1, .skip = ' ', .term = '\r', .ftptype = NF_CT_FTP_EPRT, .getnum = try_eprt, }, }, [IP_CT_DIR_REPLY] = { { .pattern = "227 ", .plen = sizeof("227 ") - 1, .ftptype = NF_CT_FTP_PASV, .getnum = try_rfc1123, }, { .pattern = "229 ", .plen = sizeof("229 ") - 1, .skip = '(', .term = ')', .ftptype = NF_CT_FTP_EPSV, .getnum = try_epsv_response, }, }, }; static int get_ipv6_addr(const char *src, size_t dlen, struct in6_addr *dst, u_int8_t term) { const char *end; int ret = in6_pton(src, min_t(size_t, dlen, 0xffff), (u8 *)dst, term, &end); if (ret > 0) return (int)(end - src); return 0; } static int try_number(const char *data, size_t dlen, u_int32_t array[], int array_size, char sep, char term) { u_int32_t i, len; memset(array, 0, sizeof(array[0])*array_size); /* Keep data pointing at next char. */ for (i = 0, len = 0; len < dlen && i < array_size; len++, data++) { if (*data >= '0' && *data <= '9') { array[i] = array[i]*10 + *data - '0'; } else if (*data == sep) i++; else { /* Unexpected character; true if it's the terminator (or we don't care about one) and we're finished. */ if ((*data == term || !term) && i == array_size - 1) return len; pr_debug("Char %u (got %u nums) `%u' unexpected\n", len, i, *data); return 0; } } pr_debug("Failed to fill %u numbers separated by %c\n", array_size, sep); return 0; } /* Returns 0, or length of numbers: 192,168,1,1,5,6 */ static int try_rfc959(const char *data, size_t dlen, struct nf_conntrack_man *cmd, char term, unsigned int *offset) { int length; u_int32_t array[6]; length = try_number(data, dlen, array, 6, ',', term); if (length == 0) return 0; cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) | (array[2] << 8) | array[3]); cmd->u.tcp.port = htons((array[4] << 8) | array[5]); return length; } /* * From RFC 1123: * The format of the 227 reply to a PASV command is not * well standardized. In particular, an FTP client cannot * assume that the parentheses shown on page 40 of RFC-959 * will be present (and in fact, Figure 3 on page 43 omits * them). Therefore, a User-FTP program that interprets * the PASV reply must scan the reply for the first digit * of the host and port numbers. */ static int try_rfc1123(const char *data, size_t dlen, struct nf_conntrack_man *cmd, char term, unsigned int *offset) { int i; for (i = 0; i < dlen; i++) if (isdigit(data[i])) break; if (i == dlen) return 0; *offset += i; return try_rfc959(data + i, dlen - i, cmd, 0, offset); } /* Grab port: number up to delimiter */ static int get_port(const char *data, int start, size_t dlen, char delim, __be16 *port) { u_int16_t tmp_port = 0; int i; for (i = start; i < dlen; i++) { /* Finished? */ if (data[i] == delim) { if (tmp_port == 0) break; *port = htons(tmp_port); pr_debug("get_port: return %d\n", tmp_port); return i + 1; } else if (data[i] >= '0' && data[i] <= '9') tmp_port = tmp_port*10 + data[i] - '0'; else { /* Some other crap */ pr_debug("get_port: invalid char.\n"); break; } } return 0; } /* Returns 0, or length of numbers: |1|132.235.1.2|6275| or |2|3ffe::1|6275| */ static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd, char term, unsigned int *offset) { char delim; int length; /* First character is delimiter, then "1" for IPv4 or "2" for IPv6, then delimiter again. */ if (dlen <= 3) { pr_debug("EPRT: too short\n"); return 0; } delim = data[0]; if (isdigit(delim) || delim < 33 || delim > 126 || data[2] != delim) { pr_debug("try_eprt: invalid delimitter.\n"); return 0; } if ((cmd->l3num == PF_INET && data[1] != '1') || (cmd->l3num == PF_INET6 && data[1] != '2')) { pr_debug("EPRT: invalid protocol number.\n"); return 0; } pr_debug("EPRT: Got %c%c%c\n", delim, data[1], delim); if (data[1] == '1') { u_int32_t array[4]; /* Now we have IP address. */ length = try_number(data + 3, dlen - 3, array, 4, '.', delim); if (length != 0) cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) | (array[2] << 8) | array[3]); } else { /* Now we have IPv6 address. */ length = get_ipv6_addr(data + 3, dlen - 3, (struct in6_addr *)cmd->u3.ip6, delim); } if (length == 0) return 0; pr_debug("EPRT: Got IP address!\n"); /* Start offset includes initial "|1|", and trailing delimiter */ return get_port(data, 3 + length + 1, dlen, delim, &cmd->u.tcp.port); } /* Returns 0, or length of numbers: |||6446| */ static int try_epsv_response(const char *data, size_t dlen, struct nf_conntrack_man *cmd, char term, unsigned int *offset) { char delim; /* Three delimiters. */ if (dlen <= 3) return 0; delim = data[0]; if (isdigit(delim) || delim < 33 || delim > 126 || data[1] != delim || data[2] != delim) return 0; return get_port(data, 3, dlen, delim, &cmd->u.tcp.port); } /* Return 1 for match, 0 for accept, -1 for partial. */ static int find_pattern(const char *data, size_t dlen, const char *pattern, size_t plen, char skip, char term, unsigned int *numoff, unsigned int *numlen, struct nf_conntrack_man *cmd, int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *)) { size_t i = plen; pr_debug("find_pattern `%s': dlen = %Zu\n", pattern, dlen); if (dlen == 0) return 0; if (dlen <= plen) { /* Short packet: try for partial? */ if (strncasecmp(data, pattern, dlen) == 0) return -1; else return 0; } if (strncasecmp(data, pattern, plen) != 0) { #if 0 size_t i; pr_debug("ftp: string mismatch\n"); for (i = 0; i < plen; i++) { pr_debug("ftp:char %u `%c'(%u) vs `%c'(%u)\n", i, data[i], data[i], pattern[i], pattern[i]); } #endif return 0; } pr_debug("Pattern matches!\n"); /* Now we've found the constant string, try to skip to the 'skip' character */ if (skip) { for (i = plen; data[i] != skip; i++) if (i == dlen - 1) return -1; /* Skip over the last character */ i++; } pr_debug("Skipped up to `%c'!\n", skip); *numoff = i; *numlen = getnum(data + i, dlen - i, cmd, term, numoff); if (!*numlen) return -1; pr_debug("Match succeeded!\n"); return 1; } /* Look up to see if we're just after a \n. */ static int find_nl_seq(u32 seq, const struct nf_ct_ftp_master *info, int dir) { unsigned int i; for (i = 0; i < info->seq_aft_nl_num[dir]; i++) if (info->seq_aft_nl[dir][i] == seq) return 1; return 0; } /* We don't update if it's older than what we have. */ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq, struct nf_ct_ftp_master *info, int dir, struct sk_buff *skb) { unsigned int i, oldest; /* Look for oldest: if we find exact match, we're done. */ for (i = 0; i < info->seq_aft_nl_num[dir]; i++) { if (info->seq_aft_nl[dir][i] == nl_seq) return; } if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; } else { if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1])) oldest = 0; else oldest = 1; if (after(nl_seq, info->seq_aft_nl[dir][oldest])) info->seq_aft_nl[dir][oldest] = nl_seq; } } static int help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { unsigned int dataoff, datalen; const struct tcphdr *th; struct tcphdr _tcph; const char *fb_ptr; int ret; u32 seq; int dir = CTINFO2DIR(ctinfo); unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff); struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct); struct nf_conntrack_expect *exp; union nf_inet_addr *daddr; struct nf_conntrack_man cmd = {}; unsigned int i; int found = 0, ends_in_nl; typeof(nf_nat_ftp_hook) nf_nat_ftp; /* Until there's been traffic both ways, don't look in packets. */ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) { pr_debug("ftp: Conntrackinfo = %u\n", ctinfo); return NF_ACCEPT; } th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); if (th == NULL) return NF_ACCEPT; dataoff = protoff + th->doff * 4; /* No data? */ if (dataoff >= skb->len) { pr_debug("ftp: dataoff(%u) >= skblen(%u)\n", dataoff, skb->len); return NF_ACCEPT; } datalen = skb->len - dataoff; spin_lock_bh(&nf_ftp_lock); fb_ptr = skb_header_pointer(skb, dataoff, datalen, ftp_buffer); BUG_ON(fb_ptr == NULL); ends_in_nl = (fb_ptr[datalen - 1] == '\n'); seq = ntohl(th->seq) + datalen; /* Look up to see if we're just after a \n. */ if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) { /* We're picking up this, clear flags and let it continue */ if (unlikely(ct_ftp_info->flags[dir] & NF_CT_FTP_SEQ_PICKUP)) { ct_ftp_info->flags[dir] ^= NF_CT_FTP_SEQ_PICKUP; goto skip_nl_seq; } /* Now if this ends in \n, update ftp info. */ pr_debug("nf_conntrack_ftp: wrong seq pos %s(%u) or %s(%u)\n", ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)", ct_ftp_info->seq_aft_nl[dir][0], ct_ftp_info->seq_aft_nl_num[dir] > 1 ? "" : "(UNSET)", ct_ftp_info->seq_aft_nl[dir][1]); ret = NF_ACCEPT; goto out_update_nl; } skip_nl_seq: /* Initialize IP/IPv6 addr to expected address (it's not mentioned in EPSV responses) */ cmd.l3num = nf_ct_l3num(ct); memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, sizeof(cmd.u3.all)); for (i = 0; i < ARRAY_SIZE(search[dir]); i++) { found = find_pattern(fb_ptr, datalen, search[dir][i].pattern, search[dir][i].plen, search[dir][i].skip, search[dir][i].term, &matchoff, &matchlen, &cmd, search[dir][i].getnum); if (found) break; } if (found == -1) { /* We don't usually drop packets. After all, this is connection tracking, not packet filtering. However, it is necessary for accurate tracking in this case. */ nf_ct_helper_log(skb, ct, "partial matching of `%s'", search[dir][i].pattern); ret = NF_DROP; goto out; } else if (found == 0) { /* No match */ ret = NF_ACCEPT; goto out_update_nl; } pr_debug("conntrack_ftp: match `%.*s' (%u bytes at %u)\n", matchlen, fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff); exp = nf_ct_expect_alloc(ct); if (exp == NULL) { nf_ct_helper_log(skb, ct, "cannot alloc expectation"); ret = NF_DROP; goto out; } /* We refer to the reverse direction ("!dir") tuples here, * because we're expecting something in the other direction. * Doesn't matter unless NAT is happening. */ daddr = &ct->tuplehash[!dir].tuple.dst.u3; /* Update the ftp info */ if ((cmd.l3num == nf_ct_l3num(ct)) && memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, sizeof(cmd.u3.all))) { /* Enrico Scholz's passive FTP to partially RNAT'd ftp server: it really wants us to connect to a different IP address. Simply don't record it for NAT. */ if (cmd.l3num == PF_INET) { pr_debug("conntrack_ftp: NOT RECORDING: %pI4 != %pI4\n", &cmd.u3.ip, &ct->tuplehash[dir].tuple.src.u3.ip); } else { pr_debug("conntrack_ftp: NOT RECORDING: %pI6 != %pI6\n", cmd.u3.ip6, ct->tuplehash[dir].tuple.src.u3.ip6); } /* Thanks to Cristiano Lincoln Mattos <lincoln@cesar.org.br> for reporting this potential problem (DMZ machines opening holes to internal networks, or the packet filter itself). */ if (!loose) { ret = NF_ACCEPT; goto out_put_expect; } daddr = &cmd.u3; } nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, cmd.l3num, &ct->tuplehash[!dir].tuple.src.u3, daddr, IPPROTO_TCP, NULL, &cmd.u.tcp.port); /* Now, NAT might want to mangle the packet, and register the * (possibly changed) expectation itself. */ nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook); if (nf_nat_ftp && ct->status & IPS_NAT_MASK) ret = nf_nat_ftp(skb, ctinfo, search[dir][i].ftptype, protoff, matchoff, matchlen, exp); else { /* Can't expect this? Best to drop packet now. */ if (nf_ct_expect_related(exp) != 0) { nf_ct_helper_log(skb, ct, "cannot add expectation"); ret = NF_DROP; } else ret = NF_ACCEPT; } out_put_expect: nf_ct_expect_put(exp); out_update_nl: /* Now if this ends in \n, update ftp info. Seq may have been * adjusted by NAT code. */ if (ends_in_nl) update_nl_seq(ct, seq, ct_ftp_info, dir, skb); out: spin_unlock_bh(&nf_ftp_lock); return ret; } static int nf_ct_ftp_from_nlattr(struct nlattr *attr, struct nf_conn *ct) { struct nf_ct_ftp_master *ftp = nfct_help_data(ct); /* This conntrack has been injected from user-space, always pick up * sequence tracking. Otherwise, the first FTP command after the * failover breaks. */ ftp->flags[IP_CT_DIR_ORIGINAL] |= NF_CT_FTP_SEQ_PICKUP; ftp->flags[IP_CT_DIR_REPLY] |= NF_CT_FTP_SEQ_PICKUP; return 0; } static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly; static const struct nf_conntrack_expect_policy ftp_exp_policy = { .max_expected = 1, .timeout = 5 * 60, }; /* don't make this __exit, since it's called from __init ! */ static void nf_conntrack_ftp_fini(void) { int i, j; for (i = 0; i < ports_c; i++) { for (j = 0; j < 2; j++) { if (ftp[i][j].me == NULL) continue; pr_debug("nf_ct_ftp: unregistering helper for pf: %d " "port: %d\n", ftp[i][j].tuple.src.l3num, ports[i]); nf_conntrack_helper_unregister(&ftp[i][j]); } } kfree(ftp_buffer); } static int __init nf_conntrack_ftp_init(void) { int i, j = -1, ret = 0; ftp_buffer = kmalloc(65536, GFP_KERNEL); if (!ftp_buffer) return -ENOMEM; if (ports_c == 0) ports[ports_c++] = FTP_PORT; /* FIXME should be configurable whether IPv4 and IPv6 FTP connections are tracked or not - YK */ for (i = 0; i < ports_c; i++) { ftp[i][0].tuple.src.l3num = PF_INET; ftp[i][1].tuple.src.l3num = PF_INET6; for (j = 0; j < 2; j++) { ftp[i][j].data_len = sizeof(struct nf_ct_ftp_master); ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]); ftp[i][j].tuple.dst.protonum = IPPROTO_TCP; ftp[i][j].expect_policy = &ftp_exp_policy; ftp[i][j].me = THIS_MODULE; ftp[i][j].help = help; ftp[i][j].from_nlattr = nf_ct_ftp_from_nlattr; if (ports[i] == FTP_PORT) sprintf(ftp[i][j].name, "ftp"); else sprintf(ftp[i][j].name, "ftp-%d", ports[i]); pr_debug("nf_ct_ftp: registering helper for pf: %d " "port: %d\n", ftp[i][j].tuple.src.l3num, ports[i]); ret = nf_conntrack_helper_register(&ftp[i][j]); if (ret) { printk(KERN_ERR "nf_ct_ftp: failed to register" " helper for pf: %d port: %d\n", ftp[i][j].tuple.src.l3num, ports[i]); nf_conntrack_ftp_fini(); return ret; } } } return 0; } module_init(nf_conntrack_ftp_init); module_exit(nf_conntrack_ftp_fini);
gpl-2.0
wenhulove333/LinuxKernelStable
arch/score/mm/tlb-score.c
2580
5478
/* * arch/score/mm/tlb-score.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Lennox Wu <lennox.wu@sunplusct.com> * Chen Liqin <liqin.chen@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/highmem.h> #include <linux/module.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/tlb.h> #define TLBSIZE 32 unsigned long asid_cache = ASID_FIRST_VERSION; EXPORT_SYMBOL(asid_cache); void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ASID; int entry; local_irq_save(flags); old_ASID = pevn_get() & ASID_MASK; pectx_set(0); /* invalid */ entry = tlblock_get(); /* skip locked entries*/ for (; entry < TLBSIZE; entry++) { tlbpt_set(entry); pevn_set(KSEG1); barrier(); tlb_write_indexed(); } pevn_set(old_ASID); local_irq_restore(flags); } /* * If mm is currently active_mm, we can't really drop it. Instead, * we will get a new one for it. */ static inline void drop_mmu_context(struct mm_struct *mm) { unsigned long flags; local_irq_save(flags); get_new_mmu_context(mm); pevn_set(mm->context & ASID_MASK); local_irq_restore(flags); } void local_flush_tlb_mm(struct mm_struct *mm) { if (mm->context != 0) drop_mmu_context(mm); } void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long vma_mm_context = mm->context; if (mm->context != 0) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= TLBSIZE) { int oldpid = pevn_get() & ASID_MASK; int newpid = vma_mm_context & ASID_MASK; start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; while (start < end) { int idx; pevn_set(start | newpid); start += PAGE_SIZE; barrier(); tlb_probe(); idx = tlbpt_get(); pectx_set(0); pevn_set(KSEG1); if (idx < 0) continue; tlb_write_indexed(); } pevn_set(oldpid); } else { /* Bigger than TLBSIZE, get new ASID directly */ get_new_mmu_context(mm); if (mm == current->active_mm) pevn_set(vma_mm_context & ASID_MASK); } local_irq_restore(flags); } } void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= TLBSIZE) { int pid = pevn_get(); start &= PAGE_MASK; end += PAGE_SIZE - 1; end &= PAGE_MASK; while (start < end) { long idx; pevn_set(start); start += PAGE_SIZE; tlb_probe(); idx = tlbpt_get(); if (idx < 0) continue; pectx_set(0); pevn_set(KSEG1); barrier(); tlb_write_indexed(); } pevn_set(pid); } else { local_flush_tlb_all(); } local_irq_restore(flags); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { if (vma && vma->vm_mm->context != 0) { unsigned long flags; int oldpid, newpid, idx; unsigned long vma_ASID = vma->vm_mm->context; newpid = vma_ASID & ASID_MASK; page &= PAGE_MASK; local_irq_save(flags); oldpid = pevn_get() & ASID_MASK; pevn_set(page | newpid); barrier(); tlb_probe(); idx = tlbpt_get(); pectx_set(0); pevn_set(KSEG1); if (idx < 0) /* p_bit(31) - 1: miss, 0: hit*/ goto finish; barrier(); tlb_write_indexed(); finish: pevn_set(oldpid); local_irq_restore(flags); } } /* * This one is only used for pages with the global bit set so we don't care * much about the ASID. */ void local_flush_tlb_one(unsigned long page) { unsigned long flags; int oldpid, idx; local_irq_save(flags); oldpid = pevn_get(); page &= (PAGE_MASK << 1); pevn_set(page); barrier(); tlb_probe(); idx = tlbpt_get(); pectx_set(0); if (idx >= 0) { /* Make sure all entries differ. */ pevn_set(KSEG1); barrier(); tlb_write_indexed(); } pevn_set(oldpid); local_irq_restore(flags); } void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long flags; int idx, pid; /* * Handle debugger faulting in for debugee. */ if (current->active_mm != vma->vm_mm) return; pid = pevn_get() & ASID_MASK; local_irq_save(flags); address &= PAGE_MASK; pevn_set(address | pid); barrier(); tlb_probe(); idx = tlbpt_get(); pectx_set(pte_val(pte)); pevn_set(address | pid); if (idx < 0) tlb_write_random(); else tlb_write_indexed(); pevn_set(pid); local_irq_restore(flags); } void tlb_init(void) { tlblock_set(0); local_flush_tlb_all(); memcpy((void *)(EXCEPTION_VECTOR_BASE_ADDR + 0x100), &score7_FTLB_refill_Handler, 0xFC); flush_icache_range(EXCEPTION_VECTOR_BASE_ADDR + 0x100, EXCEPTION_VECTOR_BASE_ADDR + 0x1FC); }
gpl-2.0
ls2uper/linux
arch/score/mm/tlb-score.c
2580
5478
/* * arch/score/mm/tlb-score.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Lennox Wu <lennox.wu@sunplusct.com> * Chen Liqin <liqin.chen@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/highmem.h> #include <linux/module.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/tlb.h> #define TLBSIZE 32 unsigned long asid_cache = ASID_FIRST_VERSION; EXPORT_SYMBOL(asid_cache); void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ASID; int entry; local_irq_save(flags); old_ASID = pevn_get() & ASID_MASK; pectx_set(0); /* invalid */ entry = tlblock_get(); /* skip locked entries*/ for (; entry < TLBSIZE; entry++) { tlbpt_set(entry); pevn_set(KSEG1); barrier(); tlb_write_indexed(); } pevn_set(old_ASID); local_irq_restore(flags); } /* * If mm is currently active_mm, we can't really drop it. Instead, * we will get a new one for it. */ static inline void drop_mmu_context(struct mm_struct *mm) { unsigned long flags; local_irq_save(flags); get_new_mmu_context(mm); pevn_set(mm->context & ASID_MASK); local_irq_restore(flags); } void local_flush_tlb_mm(struct mm_struct *mm) { if (mm->context != 0) drop_mmu_context(mm); } void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long vma_mm_context = mm->context; if (mm->context != 0) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= TLBSIZE) { int oldpid = pevn_get() & ASID_MASK; int newpid = vma_mm_context & ASID_MASK; start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; while (start < end) { int idx; pevn_set(start | newpid); start += PAGE_SIZE; barrier(); tlb_probe(); idx = tlbpt_get(); pectx_set(0); pevn_set(KSEG1); if (idx < 0) continue; tlb_write_indexed(); } pevn_set(oldpid); } else { /* Bigger than TLBSIZE, get new ASID directly */ get_new_mmu_context(mm); if (mm == current->active_mm) pevn_set(vma_mm_context & ASID_MASK); } local_irq_restore(flags); } } void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= TLBSIZE) { int pid = pevn_get(); start &= PAGE_MASK; end += PAGE_SIZE - 1; end &= PAGE_MASK; while (start < end) { long idx; pevn_set(start); start += PAGE_SIZE; tlb_probe(); idx = tlbpt_get(); if (idx < 0) continue; pectx_set(0); pevn_set(KSEG1); barrier(); tlb_write_indexed(); } pevn_set(pid); } else { local_flush_tlb_all(); } local_irq_restore(flags); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { if (vma && vma->vm_mm->context != 0) { unsigned long flags; int oldpid, newpid, idx; unsigned long vma_ASID = vma->vm_mm->context; newpid = vma_ASID & ASID_MASK; page &= PAGE_MASK; local_irq_save(flags); oldpid = pevn_get() & ASID_MASK; pevn_set(page | newpid); barrier(); tlb_probe(); idx = tlbpt_get(); pectx_set(0); pevn_set(KSEG1); if (idx < 0) /* p_bit(31) - 1: miss, 0: hit*/ goto finish; barrier(); tlb_write_indexed(); finish: pevn_set(oldpid); local_irq_restore(flags); } } /* * This one is only used for pages with the global bit set so we don't care * much about the ASID. */ void local_flush_tlb_one(unsigned long page) { unsigned long flags; int oldpid, idx; local_irq_save(flags); oldpid = pevn_get(); page &= (PAGE_MASK << 1); pevn_set(page); barrier(); tlb_probe(); idx = tlbpt_get(); pectx_set(0); if (idx >= 0) { /* Make sure all entries differ. */ pevn_set(KSEG1); barrier(); tlb_write_indexed(); } pevn_set(oldpid); local_irq_restore(flags); } void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long flags; int idx, pid; /* * Handle debugger faulting in for debugee. */ if (current->active_mm != vma->vm_mm) return; pid = pevn_get() & ASID_MASK; local_irq_save(flags); address &= PAGE_MASK; pevn_set(address | pid); barrier(); tlb_probe(); idx = tlbpt_get(); pectx_set(pte_val(pte)); pevn_set(address | pid); if (idx < 0) tlb_write_random(); else tlb_write_indexed(); pevn_set(pid); local_irq_restore(flags); } void tlb_init(void) { tlblock_set(0); local_flush_tlb_all(); memcpy((void *)(EXCEPTION_VECTOR_BASE_ADDR + 0x100), &score7_FTLB_refill_Handler, 0xFC); flush_icache_range(EXCEPTION_VECTOR_BASE_ADDR + 0x100, EXCEPTION_VECTOR_BASE_ADDR + 0x1FC); }
gpl-2.0
DerRomtester/android_kernel_samsung_aries
arch/mips/mm/c-tx39.c
2580
10856
/* * r2300.c: R2000 and R3000 specific mmu/cache code. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * * with a lot of changes to make this thing work for R3000s * Tx39XX R4k style caches added. HK * Copyright (C) 1998, 1999, 2000 Harald Koerfgen * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> #include <asm/cacheops.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/system.h> #include <asm/isadep.h> #include <asm/io.h> #include <asm/bootinfo.h> #include <asm/cpu.h> /* For R3000 cores with R4000 style caches */ static unsigned long icache_size, dcache_size; /* Size in bytes */ #include <asm/r4kcache.h> extern int r3k_have_wired_reg; /* in r3k-tlb.c */ /* This sequence is required to ensure icache is disabled immediately */ #define TX39_STOP_STREAMING() \ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ "b 1f\n\t" \ "nop\n\t" \ "1:\n\t" \ ".set pop" \ ) /* TX39H-style cache flush routines. */ static void tx39h_flush_icache_all(void) { unsigned long flags, config; /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); blast_icache16(); write_c0_conf(config); local_irq_restore(flags); } static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ BUG_ON(size == 0); iob(); blast_inv_dcache_range(addr, addr + size); } /* TX39H2,TX39H3 */ static inline void tx39_blast_dcache_page(unsigned long addr) { if (current_cpu_type() != CPU_TX3912) blast_dcache16_page(addr); } static inline void tx39_blast_dcache_page_indexed(unsigned long addr) { blast_dcache16_page_indexed(addr); } static inline void tx39_blast_dcache(void) { blast_dcache16(); } static inline void tx39_blast_icache_page(unsigned long addr) { unsigned long flags, config; /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); blast_icache16_page(addr); write_c0_conf(config); local_irq_restore(flags); } static inline void tx39_blast_icache_page_indexed(unsigned long addr) { unsigned long flags, config; /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); blast_icache16_page_indexed(addr); write_c0_conf(config); local_irq_restore(flags); } static inline void tx39_blast_icache(void) { unsigned long flags, config; /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); blast_icache16(); write_c0_conf(config); local_irq_restore(flags); } static void tx39__flush_cache_vmap(void) { tx39_blast_dcache(); } static void tx39__flush_cache_vunmap(void) { tx39_blast_dcache(); } static inline void tx39_flush_cache_all(void) { if (!cpu_has_dc_aliases) return; tx39_blast_dcache(); } static inline void tx39___flush_cache_all(void) { tx39_blast_dcache(); tx39_blast_icache(); } static void tx39_flush_cache_mm(struct mm_struct *mm) { if (!cpu_has_dc_aliases) return; if (cpu_context(smp_processor_id(), mm) != 0) tx39_blast_dcache(); } static void tx39_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (!cpu_has_dc_aliases) return; if (!(cpu_context(smp_processor_id(), vma->vm_mm))) return; tx39_blast_dcache(); } static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) { int exec = vma->vm_flags & VM_EXEC; struct mm_struct *mm = vma->vm_mm; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; /* * If ownes no valid ASID yet, cannot possibly have gotten * this page into the cache. */ if (cpu_context(smp_processor_id(), mm) == 0) return; page &= PAGE_MASK; pgdp = pgd_offset(mm, page); pudp = pud_offset(pgdp, page); pmdp = pmd_offset(pudp, page); ptep = pte_offset(pmdp, page); /* * If the page isn't marked valid, the page cannot possibly be * in the cache. */ if (!(pte_val(*ptep) & _PAGE_PRESENT)) return; /* * Doing flushes for another ASID than the current one is * too difficult since stupid R4k caches do a TLB translation * for every cache flush operation. So we do indexed flushes * in that case, which doesn't overly flush the cache too much. */ if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { if (cpu_has_dc_aliases || exec) tx39_blast_dcache_page(page); if (exec) tx39_blast_icache_page(page); return; } /* * Do indexed flush, too much work to get the (possible) TLB refills * to work correctly. */ if (cpu_has_dc_aliases || exec) tx39_blast_dcache_page_indexed(page); if (exec) tx39_blast_icache_page_indexed(page); } static void local_tx39_flush_data_cache_page(void * addr) { tx39_blast_dcache_page((unsigned long)addr); } static void tx39_flush_data_cache_page(unsigned long addr) { tx39_blast_dcache_page(addr); } static void tx39_flush_icache_range(unsigned long start, unsigned long end) { if (end - start > dcache_size) tx39_blast_dcache(); else protected_blast_dcache_range(start, end); if (end - start > icache_size) tx39_blast_icache(); else { unsigned long flags, config; /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); protected_blast_icache_range(start, end); write_c0_conf(config); local_irq_restore(flags); } } static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) { unsigned long end; if (((size | addr) & (PAGE_SIZE - 1)) == 0) { end = addr + size; do { tx39_blast_dcache_page(addr); addr += PAGE_SIZE; } while(addr != end); } else if (size > dcache_size) { tx39_blast_dcache(); } else { blast_dcache_range(addr, addr + size); } } static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) { unsigned long end; if (((size | addr) & (PAGE_SIZE - 1)) == 0) { end = addr + size; do { tx39_blast_dcache_page(addr); addr += PAGE_SIZE; } while(addr != end); } else if (size > dcache_size) { tx39_blast_dcache(); } else { blast_inv_dcache_range(addr, addr + size); } } static void tx39_flush_cache_sigtramp(unsigned long addr) { unsigned long ic_lsize = current_cpu_data.icache.linesz; unsigned long dc_lsize = current_cpu_data.dcache.linesz; unsigned long config; unsigned long flags; protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); protected_flush_icache_line(addr & ~(ic_lsize - 1)); write_c0_conf(config); local_irq_restore(flags); } static __init void tx39_probe_cache(void) { unsigned long config; config = read_c0_conf(); icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >> TX39_CONF_ICS_SHIFT)); dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >> TX39_CONF_DCS_SHIFT)); current_cpu_data.icache.linesz = 16; switch (current_cpu_type()) { case CPU_TX3912: current_cpu_data.icache.ways = 1; current_cpu_data.dcache.ways = 1; current_cpu_data.dcache.linesz = 4; break; case CPU_TX3927: current_cpu_data.icache.ways = 2; current_cpu_data.dcache.ways = 2; current_cpu_data.dcache.linesz = 16; break; case CPU_TX3922: default: current_cpu_data.icache.ways = 1; current_cpu_data.dcache.ways = 1; current_cpu_data.dcache.linesz = 16; break; } } void __cpuinit tx39_cache_init(void) { extern void build_clear_page(void); extern void build_copy_page(void); unsigned long config; config = read_c0_conf(); config &= ~TX39_CONF_WBON; write_c0_conf(config); tx39_probe_cache(); switch (current_cpu_type()) { case CPU_TX3912: /* TX39/H core (writethru direct-map cache) */ __flush_cache_vmap = tx39__flush_cache_vmap; __flush_cache_vunmap = tx39__flush_cache_vunmap; flush_cache_all = tx39h_flush_icache_all; __flush_cache_all = tx39h_flush_icache_all; flush_cache_mm = (void *) tx39h_flush_icache_all; flush_cache_range = (void *) tx39h_flush_icache_all; flush_cache_page = (void *) tx39h_flush_icache_all; flush_icache_range = (void *) tx39h_flush_icache_all; local_flush_icache_range = (void *) tx39h_flush_icache_all; flush_cache_sigtramp = (void *) tx39h_flush_icache_all; local_flush_data_cache_page = (void *) tx39h_flush_icache_all; flush_data_cache_page = (void *) tx39h_flush_icache_all; _dma_cache_wback_inv = tx39h_dma_cache_wback_inv; shm_align_mask = PAGE_SIZE - 1; break; case CPU_TX3922: case CPU_TX3927: default: /* TX39/H2,H3 core (writeback 2way-set-associative cache) */ r3k_have_wired_reg = 1; write_c0_wired(0); /* set 8 on reset... */ /* board-dependent init code may set WBON */ __flush_cache_vmap = tx39__flush_cache_vmap; __flush_cache_vunmap = tx39__flush_cache_vunmap; flush_cache_all = tx39_flush_cache_all; __flush_cache_all = tx39___flush_cache_all; flush_cache_mm = tx39_flush_cache_mm; flush_cache_range = tx39_flush_cache_range; flush_cache_page = tx39_flush_cache_page; flush_icache_range = tx39_flush_icache_range; local_flush_icache_range = tx39_flush_icache_range; flush_cache_sigtramp = tx39_flush_cache_sigtramp; local_flush_data_cache_page = local_tx39_flush_data_cache_page; flush_data_cache_page = tx39_flush_data_cache_page; _dma_cache_wback_inv = tx39_dma_cache_wback_inv; _dma_cache_wback = tx39_dma_cache_wback_inv; _dma_cache_inv = tx39_dma_cache_inv; shm_align_mask = max_t(unsigned long, (dcache_size / current_cpu_data.dcache.ways) - 1, PAGE_SIZE - 1); break; } current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways; current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways; current_cpu_data.icache.sets = current_cpu_data.icache.waysize / current_cpu_data.icache.linesz; current_cpu_data.dcache.sets = current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz; if (current_cpu_data.dcache.waysize > PAGE_SIZE) current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES; current_cpu_data.icache.waybit = 0; current_cpu_data.dcache.waybit = 0; printk("Primary instruction cache %ldkB, linesize %d bytes\n", icache_size >> 10, current_cpu_data.icache.linesz); printk("Primary data cache %ldkB, linesize %d bytes\n", dcache_size >> 10, current_cpu_data.dcache.linesz); build_clear_page(); build_copy_page(); tx39h_flush_icache_all(); }
gpl-2.0
bju2000/kernel_lge_msm8994
drivers/misc/ioc4.c
2836
14673
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005-2006 Silicon Graphics, Inc. All Rights Reserved. */ /* This file contains the master driver module for use by SGI IOC4 subdrivers. * * It allocates any resources shared between multiple subdevices, and * provides accessor functions (where needed) and the like for those * resources. It also provides a mechanism for the subdevice modules * to support loading and unloading. * * Non-shared resources (e.g. external interrupt A_INT_OUT register page * alias, serial port and UART registers) are handled by the subdevice * modules themselves. * * This is all necessary because IOC4 is not implemented as a multi-function * PCI device, but an amalgamation of disparate registers for several * types of device (ATA, serial, external interrupts). The normal * resource management in the kernel doesn't have quite the right interfaces * to handle this situation (e.g. multiple modules can't claim the same * PCI ID), thus this IOC4 master module. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ioc4.h> #include <linux/ktime.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/time.h> #include <asm/io.h> /*************** * Definitions * ***************/ /* Tweakable values */ /* PCI bus speed detection/calibration */ #define IOC4_CALIBRATE_COUNT 63 /* Calibration cycle period */ #define IOC4_CALIBRATE_CYCLES 256 /* Average over this many cycles */ #define IOC4_CALIBRATE_DISCARD 2 /* Discard first few cycles */ #define IOC4_CALIBRATE_LOW_MHZ 25 /* Lower bound on bus speed sanity */ #define IOC4_CALIBRATE_HIGH_MHZ 75 /* Upper bound on bus speed sanity */ #define IOC4_CALIBRATE_DEFAULT_MHZ 66 /* Assumed if sanity check fails */ /************************ * Submodule management * ************************/ static DEFINE_MUTEX(ioc4_mutex); static LIST_HEAD(ioc4_devices); static LIST_HEAD(ioc4_submodules); /* Register an IOC4 submodule */ int ioc4_register_submodule(struct ioc4_submodule *is) { struct ioc4_driver_data *idd; mutex_lock(&ioc4_mutex); list_add(&is->is_list, &ioc4_submodules); /* Initialize submodule for each IOC4 */ if (!is->is_probe) goto out; list_for_each_entry(idd, &ioc4_devices, idd_list) { if (is->is_probe(idd)) { printk(KERN_WARNING "%s: IOC4 submodule %s probe failed " "for pci_dev %s", __func__, module_name(is->is_owner), pci_name(idd->idd_pdev)); } } out: mutex_unlock(&ioc4_mutex); return 0; } /* Unregister an IOC4 submodule */ void ioc4_unregister_submodule(struct ioc4_submodule *is) { struct ioc4_driver_data *idd; mutex_lock(&ioc4_mutex); list_del(&is->is_list); /* Remove submodule for each IOC4 */ if (!is->is_remove) goto out; list_for_each_entry(idd, &ioc4_devices, idd_list) { if (is->is_remove(idd)) { printk(KERN_WARNING "%s: IOC4 submodule %s remove failed " "for pci_dev %s.\n", __func__, module_name(is->is_owner), pci_name(idd->idd_pdev)); } } out: mutex_unlock(&ioc4_mutex); } /********************* * Device management * *********************/ #define IOC4_CALIBRATE_LOW_LIMIT \ (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_LOW_MHZ) #define IOC4_CALIBRATE_HIGH_LIMIT \ (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_HIGH_MHZ) #define IOC4_CALIBRATE_DEFAULT \ (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_DEFAULT_MHZ) #define IOC4_CALIBRATE_END \ (IOC4_CALIBRATE_CYCLES + IOC4_CALIBRATE_DISCARD) #define IOC4_INT_OUT_MODE_TOGGLE 0x7 /* Toggle INT_OUT every COUNT+1 ticks */ /* Determines external interrupt output clock period of the PCI bus an * IOC4 is attached to. This value can be used to determine the PCI * bus speed. * * IOC4 has a design feature that various internal timers are derived from * the PCI bus clock. This causes IOC4 device drivers to need to take the * bus speed into account when setting various register values (e.g. INT_OUT * register COUNT field, UART divisors, etc). Since this information is * needed by several subdrivers, it is determined by the main IOC4 driver, * even though the following code utilizes external interrupt registers * to perform the speed calculation. */ static void ioc4_clock_calibrate(struct ioc4_driver_data *idd) { union ioc4_int_out int_out; union ioc4_gpcr gpcr; unsigned int state, last_state = 1; struct timespec start_ts, end_ts; uint64_t start, end, period; unsigned int count = 0; /* Enable output */ gpcr.raw = 0; gpcr.fields.dir = IOC4_GPCR_DIR_0; gpcr.fields.int_out_en = 1; writel(gpcr.raw, &idd->idd_misc_regs->gpcr_s.raw); /* Reset to power-on state */ writel(0, &idd->idd_misc_regs->int_out.raw); mmiowb(); /* Set up square wave */ int_out.raw = 0; int_out.fields.count = IOC4_CALIBRATE_COUNT; int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE; int_out.fields.diag = 0; writel(int_out.raw, &idd->idd_misc_regs->int_out.raw); mmiowb(); /* Check square wave period averaged over some number of cycles */ do { int_out.raw = readl(&idd->idd_misc_regs->int_out.raw); state = int_out.fields.int_out; if (!last_state && state) { count++; if (count == IOC4_CALIBRATE_END) { ktime_get_ts(&end_ts); break; } else if (count == IOC4_CALIBRATE_DISCARD) ktime_get_ts(&start_ts); } last_state = state; } while (1); /* Calculation rearranged to preserve intermediate precision. * Logically: * 1. "end - start" gives us the measurement period over all * the square wave cycles. * 2. Divide by number of square wave cycles to get the period * of a square wave cycle. * 3. Divide by 2*(int_out.fields.count+1), which is the formula * by which the IOC4 generates the square wave, to get the * period of an IOC4 INT_OUT count. */ end = end_ts.tv_sec * NSEC_PER_SEC + end_ts.tv_nsec; start = start_ts.tv_sec * NSEC_PER_SEC + start_ts.tv_nsec; period = (end - start) / (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1)); /* Bounds check the result. */ if (period > IOC4_CALIBRATE_LOW_LIMIT || period < IOC4_CALIBRATE_HIGH_LIMIT) { printk(KERN_INFO "IOC4 %s: Clock calibration failed. Assuming" "PCI clock is %d ns.\n", pci_name(idd->idd_pdev), IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR); period = IOC4_CALIBRATE_DEFAULT; } else { u64 ns = period; do_div(ns, IOC4_EXTINT_COUNT_DIVISOR); printk(KERN_DEBUG "IOC4 %s: PCI clock is %llu ns.\n", pci_name(idd->idd_pdev), (unsigned long long)ns); } /* Remember results. We store the extint clock period rather * than the PCI clock period so that greater precision is * retained. Divide by IOC4_EXTINT_COUNT_DIVISOR to get * PCI clock period. */ idd->count_period = period; } /* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT. * Each brings out different combinations of IOC4 signals, thus. * the IOC4 subdrivers need to know to which we're attached. * * We look for the presence of a SCSI (IO9) or SATA (IO10) controller * on the same PCI bus at slot number 3 to differentiate IO9 from IO10. * If neither is present, it's a PCI-RT. */ static unsigned int ioc4_variant(struct ioc4_driver_data *idd) { struct pci_dev *pdev = NULL; int found = 0; /* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */ do { pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160, pdev); if (pdev && idd->idd_pdev->bus->number == pdev->bus->number && 3 == PCI_SLOT(pdev->devfn)) found = 1; } while (pdev && !found); if (NULL != pdev) { pci_dev_put(pdev); return IOC4_VARIANT_IO9; } /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */ pdev = NULL; do { pdev = pci_get_device(PCI_VENDOR_ID_VITESSE, PCI_DEVICE_ID_VITESSE_VSC7174, pdev); if (pdev && idd->idd_pdev->bus->number == pdev->bus->number && 3 == PCI_SLOT(pdev->devfn)) found = 1; } while (pdev && !found); if (NULL != pdev) { pci_dev_put(pdev); return IOC4_VARIANT_IO10; } /* PCI-RT: No SCSI/SATA controller will be present */ return IOC4_VARIANT_PCI_RT; } static void ioc4_load_modules(struct work_struct *work) { request_module("sgiioc4"); } static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules); /* Adds a new instance of an IOC4 card */ static int ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct ioc4_driver_data *idd; struct ioc4_submodule *is; uint32_t pcmd; int ret; /* Enable IOC4 and take ownership of it */ if ((ret = pci_enable_device(pdev))) { printk(KERN_WARNING "%s: Failed to enable IOC4 device for pci_dev %s.\n", __func__, pci_name(pdev)); goto out; } pci_set_master(pdev); /* Set up per-IOC4 data */ idd = kmalloc(sizeof(struct ioc4_driver_data), GFP_KERNEL); if (!idd) { printk(KERN_WARNING "%s: Failed to allocate IOC4 data for pci_dev %s.\n", __func__, pci_name(pdev)); ret = -ENODEV; goto out_idd; } idd->idd_pdev = pdev; idd->idd_pci_id = pci_id; /* Map IOC4 misc registers. These are shared between subdevices * so the main IOC4 module manages them. */ idd->idd_bar0 = pci_resource_start(idd->idd_pdev, 0); if (!idd->idd_bar0) { printk(KERN_WARNING "%s: Unable to find IOC4 misc resource " "for pci_dev %s.\n", __func__, pci_name(idd->idd_pdev)); ret = -ENODEV; goto out_pci; } if (!request_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs), "ioc4_misc")) { printk(KERN_WARNING "%s: Unable to request IOC4 misc region " "for pci_dev %s.\n", __func__, pci_name(idd->idd_pdev)); ret = -ENODEV; goto out_pci; } idd->idd_misc_regs = ioremap(idd->idd_bar0, sizeof(struct ioc4_misc_regs)); if (!idd->idd_misc_regs) { printk(KERN_WARNING "%s: Unable to remap IOC4 misc region " "for pci_dev %s.\n", __func__, pci_name(idd->idd_pdev)); ret = -ENODEV; goto out_misc_region; } /* Failsafe portion of per-IOC4 initialization */ /* Detect card variant */ idd->idd_variant = ioc4_variant(idd); printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev), idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" : idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" : idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown"); /* Initialize IOC4 */ pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd); pci_write_config_dword(idd->idd_pdev, PCI_COMMAND, pcmd | PCI_COMMAND_PARITY | PCI_COMMAND_SERR); /* Determine PCI clock */ ioc4_clock_calibrate(idd); /* Disable/clear all interrupts. Need to do this here lest * one submodule request the shared IOC4 IRQ, but interrupt * is generated by a different subdevice. */ /* Disable */ writel(~0, &idd->idd_misc_regs->other_iec.raw); writel(~0, &idd->idd_misc_regs->sio_iec); /* Clear (i.e. acknowledge) */ writel(~0, &idd->idd_misc_regs->other_ir.raw); writel(~0, &idd->idd_misc_regs->sio_ir); /* Track PCI-device specific data */ idd->idd_serial_data = NULL; pci_set_drvdata(idd->idd_pdev, idd); mutex_lock(&ioc4_mutex); list_add_tail(&idd->idd_list, &ioc4_devices); /* Add this IOC4 to all submodules */ list_for_each_entry(is, &ioc4_submodules, is_list) { if (is->is_probe && is->is_probe(idd)) { printk(KERN_WARNING "%s: IOC4 submodule 0x%s probe failed " "for pci_dev %s.\n", __func__, module_name(is->is_owner), pci_name(idd->idd_pdev)); } } mutex_unlock(&ioc4_mutex); /* Request sgiioc4 IDE driver on boards that bring that functionality * off of IOC4. The root filesystem may be hosted on a drive connected * to IOC4, so we need to make sure the sgiioc4 driver is loaded as it * won't be picked up by modprobes due to the ioc4 module owning the * PCI device. */ if (idd->idd_variant != IOC4_VARIANT_PCI_RT) { /* Request the module from a work procedure as the modprobe * goes out to a userland helper and that will hang if done * directly from ioc4_probe(). */ printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n"); schedule_work(&ioc4_load_modules_work); } return 0; out_misc_region: release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs)); out_pci: kfree(idd); out_idd: pci_disable_device(pdev); out: return ret; } /* Removes a particular instance of an IOC4 card. */ static void ioc4_remove(struct pci_dev *pdev) { struct ioc4_submodule *is; struct ioc4_driver_data *idd; idd = pci_get_drvdata(pdev); /* Remove this IOC4 from all submodules */ mutex_lock(&ioc4_mutex); list_for_each_entry(is, &ioc4_submodules, is_list) { if (is->is_remove && is->is_remove(idd)) { printk(KERN_WARNING "%s: IOC4 submodule 0x%s remove failed " "for pci_dev %s.\n", __func__, module_name(is->is_owner), pci_name(idd->idd_pdev)); } } mutex_unlock(&ioc4_mutex); /* Release resources */ iounmap(idd->idd_misc_regs); if (!idd->idd_bar0) { printk(KERN_WARNING "%s: Unable to get IOC4 misc mapping for pci_dev %s. " "Device removal may be incomplete.\n", __func__, pci_name(idd->idd_pdev)); } release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs)); /* Disable IOC4 and relinquish */ pci_disable_device(pdev); /* Remove and free driver data */ mutex_lock(&ioc4_mutex); list_del(&idd->idd_list); mutex_unlock(&ioc4_mutex); kfree(idd); } static struct pci_device_id ioc4_id_table[] = { {PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID, PCI_ANY_ID, 0x0b4000, 0xFFFFFF}, {0} }; static struct pci_driver ioc4_driver = { .name = "IOC4", .id_table = ioc4_id_table, .probe = ioc4_probe, .remove = ioc4_remove, }; MODULE_DEVICE_TABLE(pci, ioc4_id_table); /********************* * Module management * *********************/ /* Module load */ static int __init ioc4_init(void) { return pci_register_driver(&ioc4_driver); } /* Module unload */ static void __exit ioc4_exit(void) { /* Ensure ioc4_load_modules() has completed before exiting */ flush_work(&ioc4_load_modules_work); pci_unregister_driver(&ioc4_driver); } module_init(ioc4_init); module_exit(ioc4_exit); MODULE_AUTHOR("Brent Casavant - Silicon Graphics, Inc. <bcasavan@sgi.com>"); MODULE_DESCRIPTION("PCI driver master module for SGI IOC4 Base-IO Card"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ioc4_register_submodule); EXPORT_SYMBOL(ioc4_unregister_submodule);
gpl-2.0
webore/lenovo
drivers/rtc/rtc-max8925.c
3092
8080
/* * RTC driver for Maxim MAX8925 * * Copyright (C) 2009-2010 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/mfd/max8925.h> enum { RTC_SEC = 0, RTC_MIN, RTC_HOUR, RTC_WEEKDAY, RTC_DATE, RTC_MONTH, RTC_YEAR1, RTC_YEAR2, }; #define MAX8925_RTC_SEC 0x00 #define MAX8925_RTC_MIN 0x01 #define MAX8925_RTC_HOUR 0x02 #define MAX8925_RTC_WEEKDAY 0x03 #define MAX8925_RTC_DATE 0x04 #define MAX8925_RTC_MONTH 0x05 #define MAX8925_RTC_YEAR1 0x06 #define MAX8925_RTC_YEAR2 0x07 #define MAX8925_ALARM0_SEC 0x08 #define MAX8925_ALARM0_MIN 0x09 #define MAX8925_ALARM0_HOUR 0x0a #define MAX8925_ALARM0_WEEKDAY 0x0b #define MAX8925_ALARM0_DATE 0x0c #define MAX8925_ALARM0_MON 0x0d #define MAX8925_ALARM0_YEAR1 0x0e #define MAX8925_ALARM0_YEAR2 0x0f #define MAX8925_ALARM1_SEC 0x10 #define MAX8925_ALARM1_MIN 0x11 #define MAX8925_ALARM1_HOUR 0x12 #define MAX8925_ALARM1_WEEKDAY 0x13 #define MAX8925_ALARM1_DATE 0x14 #define MAX8925_ALARM1_MON 0x15 #define MAX8925_ALARM1_YEAR1 0x16 #define MAX8925_ALARM1_YEAR2 0x17 #define MAX8925_RTC_CNTL 0x1b #define MAX8925_RTC_STATUS 0x20 #define TIME_NUM 8 #define ALARM_1SEC (1 << 7) #define HOUR_12 (1 << 7) #define HOUR_AM_PM (1 << 5) #define ALARM0_IRQ (1 << 3) #define ALARM1_IRQ (1 << 2) #define ALARM0_STATUS (1 << 2) #define ALARM1_STATUS (1 << 1) struct max8925_rtc_info { struct rtc_device *rtc_dev; struct max8925_chip *chip; struct i2c_client *rtc; struct device *dev; }; static irqreturn_t rtc_update_handler(int irq, void *data) { struct max8925_rtc_info *info = (struct max8925_rtc_info *)data; /* disable ALARM0 except for 1SEC alarm */ max8925_set_bits(info->rtc, MAX8925_ALARM0_CNTL, 0x7f, 0); rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF); return IRQ_HANDLED; } static int tm_calc(struct rtc_time *tm, unsigned char *buf, int len) { if (len < TIME_NUM) return -EINVAL; tm->tm_year = (buf[RTC_YEAR2] >> 4) * 1000 + (buf[RTC_YEAR2] & 0xf) * 100 + (buf[RTC_YEAR1] >> 4) * 10 + (buf[RTC_YEAR1] & 0xf); tm->tm_year -= 1900; tm->tm_mon = ((buf[RTC_MONTH] >> 4) & 0x01) * 10 + (buf[RTC_MONTH] & 0x0f); tm->tm_mday = ((buf[RTC_DATE] >> 4) & 0x03) * 10 + (buf[RTC_DATE] & 0x0f); tm->tm_wday = buf[RTC_WEEKDAY] & 0x07; if (buf[RTC_HOUR] & HOUR_12) { tm->tm_hour = ((buf[RTC_HOUR] >> 4) & 0x1) * 10 + (buf[RTC_HOUR] & 0x0f); if (buf[RTC_HOUR] & HOUR_AM_PM) tm->tm_hour += 12; } else tm->tm_hour = ((buf[RTC_HOUR] >> 4) & 0x03) * 10 + (buf[RTC_HOUR] & 0x0f); tm->tm_min = ((buf[RTC_MIN] >> 4) & 0x7) * 10 + (buf[RTC_MIN] & 0x0f); tm->tm_sec = ((buf[RTC_SEC] >> 4) & 0x7) * 10 + (buf[RTC_SEC] & 0x0f); return 0; } static int data_calc(unsigned char *buf, struct rtc_time *tm, int len) { unsigned char high, low; if (len < TIME_NUM) return -EINVAL; high = (tm->tm_year + 1900) / 1000; low = (tm->tm_year + 1900) / 100; low = low - high * 10; buf[RTC_YEAR2] = (high << 4) + low; high = (tm->tm_year + 1900) / 10; low = tm->tm_year + 1900; low = low - high * 10; high = high - (high / 10) * 10; buf[RTC_YEAR1] = (high << 4) + low; high = tm->tm_mon / 10; low = tm->tm_mon; low = low - high * 10; buf[RTC_MONTH] = (high << 4) + low; high = tm->tm_mday / 10; low = tm->tm_mday; low = low - high * 10; buf[RTC_DATE] = (high << 4) + low; buf[RTC_WEEKDAY] = tm->tm_wday; high = tm->tm_hour / 10; low = tm->tm_hour; low = low - high * 10; buf[RTC_HOUR] = (high << 4) + low; high = tm->tm_min / 10; low = tm->tm_min; low = low - high * 10; buf[RTC_MIN] = (high << 4) + low; high = tm->tm_sec / 10; low = tm->tm_sec; low = low - high * 10; buf[RTC_SEC] = (high << 4) + low; return 0; } static int max8925_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct max8925_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[TIME_NUM]; int ret; ret = max8925_bulk_read(info->rtc, MAX8925_RTC_SEC, TIME_NUM, buf); if (ret < 0) goto out; ret = tm_calc(tm, buf, TIME_NUM); out: return ret; } static int max8925_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct max8925_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[TIME_NUM]; int ret; ret = data_calc(buf, tm, TIME_NUM); if (ret < 0) goto out; ret = max8925_bulk_write(info->rtc, MAX8925_RTC_SEC, TIME_NUM, buf); out: return ret; } static int max8925_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct max8925_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[TIME_NUM]; int ret; ret = max8925_bulk_read(info->rtc, MAX8925_ALARM0_SEC, TIME_NUM, buf); if (ret < 0) goto out; ret = tm_calc(&alrm->time, buf, TIME_NUM); if (ret < 0) goto out; ret = max8925_reg_read(info->rtc, MAX8925_RTC_IRQ_MASK); if (ret < 0) goto out; if ((ret & ALARM0_IRQ) == 0) alrm->enabled = 1; else alrm->enabled = 0; ret = max8925_reg_read(info->rtc, MAX8925_RTC_STATUS); if (ret < 0) goto out; if (ret & ALARM0_STATUS) alrm->pending = 1; else alrm->pending = 0; out: return ret; } static int max8925_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct max8925_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[TIME_NUM]; int ret; ret = data_calc(buf, &alrm->time, TIME_NUM); if (ret < 0) goto out; ret = max8925_bulk_write(info->rtc, MAX8925_ALARM0_SEC, TIME_NUM, buf); if (ret < 0) goto out; /* only enable alarm on year/month/day/hour/min/sec */ ret = max8925_reg_write(info->rtc, MAX8925_ALARM0_CNTL, 0x77); if (ret < 0) goto out; out: return ret; } static const struct rtc_class_ops max8925_rtc_ops = { .read_time = max8925_rtc_read_time, .set_time = max8925_rtc_set_time, .read_alarm = max8925_rtc_read_alarm, .set_alarm = max8925_rtc_set_alarm, }; static int __devinit max8925_rtc_probe(struct platform_device *pdev) { struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); struct max8925_rtc_info *info; int irq, ret; info = kzalloc(sizeof(struct max8925_rtc_info), GFP_KERNEL); if (!info) return -ENOMEM; info->chip = chip; info->rtc = chip->rtc; info->dev = &pdev->dev; irq = chip->irq_base + MAX8925_IRQ_RTC_ALARM0; ret = request_threaded_irq(irq, NULL, rtc_update_handler, IRQF_ONESHOT, "rtc-alarm0", info); if (ret < 0) { dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", irq, ret); goto out_irq; } dev_set_drvdata(&pdev->dev, info); /* XXX - isn't this redundant? */ platform_set_drvdata(pdev, info); info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, &max8925_rtc_ops, THIS_MODULE); ret = PTR_ERR(info->rtc_dev); if (IS_ERR(info->rtc_dev)) { dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); goto out_rtc; } return 0; out_rtc: platform_set_drvdata(pdev, NULL); free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); out_irq: kfree(info); return ret; } static int __devexit max8925_rtc_remove(struct platform_device *pdev) { struct max8925_rtc_info *info = platform_get_drvdata(pdev); if (info) { free_irq(info->chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); rtc_device_unregister(info->rtc_dev); kfree(info); } return 0; } static struct platform_driver max8925_rtc_driver = { .driver = { .name = "max8925-rtc", .owner = THIS_MODULE, }, .probe = max8925_rtc_probe, .remove = __devexit_p(max8925_rtc_remove), }; static int __init max8925_rtc_init(void) { return platform_driver_register(&max8925_rtc_driver); } module_init(max8925_rtc_init); static void __exit max8925_rtc_exit(void) { platform_driver_unregister(&max8925_rtc_driver); } module_exit(max8925_rtc_exit); MODULE_DESCRIPTION("Maxim MAX8925 RTC driver"); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Fusion-Devices/android_kernel_samsung_klte
drivers/staging/speakup/speakup_soft.c
3092
9712
/* speakup_soft.c - speakup driver to register and make available * a user space device for software synthesizers. written by: Kirk * Reiser <kirk@braille.uwo.ca> * * Copyright (C) 2003 Kirk Reiser. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. */ #include <linux/unistd.h> #include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */ #include <linux/poll.h> /* for poll_wait() */ #include <linux/sched.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ #include "spk_priv.h" #include "speakup.h" #define DRV_VERSION "2.6" #define SOFTSYNTH_MINOR 26 /* might as well give it one more than /dev/synth */ #define PROCSPEECH 0x0d #define CLEAR_SYNTH 0x18 static int softsynth_probe(struct spk_synth *synth); static void softsynth_release(void); static int softsynth_is_alive(struct spk_synth *synth); static unsigned char get_index(void); static struct miscdevice synth_device; static int init_pos; static int misc_registered; static struct var_t vars[] = { { CAPS_START, .u.s = {"\x01+3p" } }, { CAPS_STOP, .u.s = {"\x01-3p" } }, { RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } }, { PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } }, { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, { PUNCT, .u.n = {"\x01%db", 0, 0, 2, 0, 0, NULL } }, { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/soft. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute freq_attribute = __ATTR(freq, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = __ATTR(tone, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); /* * We should uncomment the following definition, when we agree on a * method of passing a language designation to the software synthesizer. * static struct kobj_attribute lang_attribute = * __ATTR(lang, USER_RW, spk_var_show, spk_var_store); */ static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &freq_attribute.attr, /* &lang_attribute.attr, */ &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &tone_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_soft = { .name = "soft", .version = DRV_VERSION, .long_name = "software synth", .init = "\01@\x01\x31y\n", .procspeech = PROCSPEECH, .delay = 0, .trigger = 0, .jiffies = 0, .full = 0, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = softsynth_probe, .release = softsynth_release, .synth_immediate = NULL, .catch_up = NULL, .flush = NULL, .is_alive = softsynth_is_alive, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = get_index, .indexing = { .command = "\x01%di", .lowindex = 1, .highindex = 5, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "soft", }, }; static char *get_initstring(void) { static char buf[40]; char *cp; struct var_t *var; memset(buf, 0, sizeof(buf)); cp = buf; var = synth_soft.vars; while (var->var_id != MAXVARS) { if (var->var_id != CAPS_START && var->var_id != CAPS_STOP && var->var_id != DIRECT) cp = cp + sprintf(cp, var->u.n.synth_fmt, var->u.n.value); var++; } cp = cp + sprintf(cp, "\n"); return buf; } static int softsynth_open(struct inode *inode, struct file *fp) { unsigned long flags; /*if ((fp->f_flags & O_ACCMODE) != O_RDONLY) */ /* return -EPERM; */ spk_lock(flags); if (synth_soft.alive) { spk_unlock(flags); return -EBUSY; } synth_soft.alive = 1; spk_unlock(flags); return 0; } static int softsynth_close(struct inode *inode, struct file *fp) { unsigned long flags; spk_lock(flags); synth_soft.alive = 0; init_pos = 0; spk_unlock(flags); /* Make sure we let applications go before leaving */ speakup_start_ttys(); return 0; } static ssize_t softsynth_read(struct file *fp, char *buf, size_t count, loff_t *pos) { int chars_sent = 0; char *cp; char *init; char ch; int empty; unsigned long flags; DEFINE_WAIT(wait); spk_lock(flags); while (1) { prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); if (!synth_buffer_empty() || speakup_info.flushing) break; spk_unlock(flags); if (fp->f_flags & O_NONBLOCK) { finish_wait(&speakup_event, &wait); return -EAGAIN; } if (signal_pending(current)) { finish_wait(&speakup_event, &wait); return -ERESTARTSYS; } schedule(); spk_lock(flags); } finish_wait(&speakup_event, &wait); cp = buf; init = get_initstring(); while (chars_sent < count) { if (speakup_info.flushing) { speakup_info.flushing = 0; ch = '\x18'; } else if (synth_buffer_empty()) { break; } else if (init[init_pos]) { ch = init[init_pos++]; } else { ch = synth_buffer_getc(); } spk_unlock(flags); if (copy_to_user(cp, &ch, 1)) return -EFAULT; spk_lock(flags); chars_sent++; cp++; } *pos += chars_sent; empty = synth_buffer_empty(); spk_unlock(flags); if (empty) { speakup_start_ttys(); *pos = 0; } return chars_sent; } static int last_index; static ssize_t softsynth_write(struct file *fp, const char *buf, size_t count, loff_t *pos) { unsigned long supplied_index = 0; int converted; converted = kstrtoul_from_user(buf, count, 0, &supplied_index); if (converted < 0) return converted; last_index = supplied_index; return count; } static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait) { unsigned long flags; int ret = 0; poll_wait(fp, &speakup_event, wait); spk_lock(flags); if (!synth_buffer_empty() || speakup_info.flushing) ret = POLLIN | POLLRDNORM; spk_unlock(flags); return ret; } static unsigned char get_index(void) { int rv; rv = last_index; last_index = 0; return rv; } static const struct file_operations softsynth_fops = { .owner = THIS_MODULE, .poll = softsynth_poll, .read = softsynth_read, .write = softsynth_write, .open = softsynth_open, .release = softsynth_close, }; static int softsynth_probe(struct spk_synth *synth) { if (misc_registered != 0) return 0; memset(&synth_device, 0, sizeof(synth_device)); synth_device.minor = SOFTSYNTH_MINOR; synth_device.name = "softsynth"; synth_device.fops = &softsynth_fops; if (misc_register(&synth_device)) { pr_warn("Couldn't initialize miscdevice /dev/softsynth.\n"); return -ENODEV; } misc_registered = 1; pr_info("initialized device: /dev/softsynth, node (MAJOR 10, MINOR 26)\n"); return 0; } static void softsynth_release(void) { misc_deregister(&synth_device); misc_registered = 0; pr_info("unregistered /dev/softsynth\n"); } static int softsynth_is_alive(struct spk_synth *synth) { if (synth_soft.alive) return 1; return 0; } module_param_named(start, synth_soft.startup, short, S_IRUGO); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init soft_init(void) { return synth_add(&synth_soft); } static void __exit soft_exit(void) { synth_remove(&synth_soft); } module_init(soft_init); module_exit(soft_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_DESCRIPTION("Speakup userspace software synthesizer support"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
RyanMallon/linux-ep93xx
lib/strnlen_user.c
3092
3717
#include <linux/kernel.h> #include <linux/export.h> #include <linux/uaccess.h> #include <asm/word-at-a-time.h> /* Set bits in the first 'n' bytes when loaded from memory */ #ifdef __LITTLE_ENDIAN # define aligned_byte_mask(n) ((1ul << 8*(n))-1) #else # define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n))) #endif /* * Do a strnlen, return length of string *with* final '\0'. * 'count' is the user-supplied count, while 'max' is the * address space maximum. * * Return 0 for exceptions (which includes hitting the address * space maximum), or 'count+1' if hitting the user-supplied * maximum count. * * NOTE! We can sometimes overshoot the user-supplied maximum * if it fits in a aligned 'long'. The caller needs to check * the return value against "> max". */ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; long align, res = 0; unsigned long c; /* * Truncate 'max' to the user-specified limit, so that * we only have one limit we need to check in the loop */ if (max > count) max = count; /* * Do everything aligned. But that means that we * need to also expand the maximum.. */ align = (sizeof(long) - 1) & (unsigned long)src; src -= align; max += align; if (unlikely(__get_user(c,(unsigned long __user *)src))) return 0; c |= aligned_byte_mask(align); for (;;) { unsigned long data; if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); return res + find_zero(data) + 1 - align; } res += sizeof(unsigned long); if (unlikely(max < sizeof(unsigned long))) break; max -= sizeof(unsigned long); if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) return 0; } res -= align; /* * Uhhuh. We hit 'max'. But was that the user-specified maximum * too? If so, return the marker for "too long". */ if (res >= count) return count+1; /* * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's 0. */ return 0; } /** * strnlen_user: - Get the size of a user string INCLUDING final NUL. * @str: The string to measure. * @count: Maximum count (including NUL character) * * Context: User context only. This function may sleep. * * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. * If the string is too long, returns 'count+1'. * On exception (or invalid count), returns 0. */ long strnlen_user(const char __user *str, long count) { unsigned long max_addr, src_addr; if (unlikely(count <= 0)) return 0; max_addr = user_addr_max(); src_addr = (unsigned long)str; if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; return do_strnlen_user(str, count, max); } return 0; } EXPORT_SYMBOL(strnlen_user); /** * strlen_user: - Get the size of a user string INCLUDING final NUL. * @str: The string to measure. * * Context: User context only. This function may sleep. * * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. * On exception, returns 0. * * If there is a limit on the length of a valid string, you may wish to * consider using strnlen_user() instead. */ long strlen_user(const char __user *str) { unsigned long max_addr, src_addr; max_addr = user_addr_max(); src_addr = (unsigned long)str; if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; return do_strnlen_user(str, ~0ul, max); } return 0; } EXPORT_SYMBOL(strlen_user);
gpl-2.0
lujji/JXD-7800b-KK-kernel
drivers/media/video/ks0127.c
3348
21211
/* * Video Capture Driver (Video for Linux 1/2) * for the Matrox Marvel G200,G400 and Rainbow Runner-G series * * This module is an interface to the KS0127 video decoder chip. * * Copyright (C) 1999 Ryan Drake <stiletto@mediaone.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************** * * Modified and extended by * Mike Bernson <mike@mlb.org> * Gerard v.d. Horst * Leon van Stuivenberg <l.vanstuivenberg@chello.nl> * Gernot Ziegler <gz@lysator.liu.se> * * Version History: * V1.0 Ryan Drake Initial version by Ryan Drake * V1.1 Gerard v.d. Horst Added some debugoutput, reset the video-standard */ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include "ks0127.h" MODULE_DESCRIPTION("KS0127 video decoder driver"); MODULE_AUTHOR("Ryan Drake"); MODULE_LICENSE("GPL"); /* Addresses */ #define I2C_KS0127_ADDON 0xD8 #define I2C_KS0127_ONBOARD 0xDA /* ks0127 control registers */ #define KS_STAT 0x00 #define KS_CMDA 0x01 #define KS_CMDB 0x02 #define KS_CMDC 0x03 #define KS_CMDD 0x04 #define KS_HAVB 0x05 #define KS_HAVE 0x06 #define KS_HS1B 0x07 #define KS_HS1E 0x08 #define KS_HS2B 0x09 #define KS_HS2E 0x0a #define KS_AGC 0x0b #define KS_HXTRA 0x0c #define KS_CDEM 0x0d #define KS_PORTAB 0x0e #define KS_LUMA 0x0f #define KS_CON 0x10 #define KS_BRT 0x11 #define KS_CHROMA 0x12 #define KS_CHROMB 0x13 #define KS_DEMOD 0x14 #define KS_SAT 0x15 #define KS_HUE 0x16 #define KS_VERTIA 0x17 #define KS_VERTIB 0x18 #define KS_VERTIC 0x19 #define KS_HSCLL 0x1a #define KS_HSCLH 0x1b #define KS_VSCLL 0x1c #define KS_VSCLH 0x1d #define KS_OFMTA 0x1e #define KS_OFMTB 0x1f #define KS_VBICTL 0x20 #define KS_CCDAT2 0x21 #define KS_CCDAT1 0x22 #define KS_VBIL30 0x23 #define KS_VBIL74 0x24 #define KS_VBIL118 0x25 #define KS_VBIL1512 0x26 #define KS_TTFRAM 0x27 #define KS_TESTA 0x28 #define KS_UVOFFH 0x29 #define KS_UVOFFL 0x2a #define KS_UGAIN 0x2b #define KS_VGAIN 0x2c #define KS_VAVB 0x2d #define KS_VAVE 0x2e #define KS_CTRACK 0x2f #define KS_POLCTL 0x30 #define KS_REFCOD 0x31 #define KS_INVALY 0x32 #define KS_INVALU 0x33 #define KS_INVALV 0x34 #define KS_UNUSEY 0x35 #define KS_UNUSEU 0x36 #define KS_UNUSEV 0x37 #define KS_USRSAV 0x38 #define KS_USREAV 0x39 #define KS_SHS1A 0x3a #define KS_SHS1B 0x3b #define KS_SHS1C 0x3c #define KS_CMDE 0x3d #define KS_VSDEL 0x3e #define KS_CMDF 0x3f #define KS_GAMMA0 0x40 #define KS_GAMMA1 0x41 #define KS_GAMMA2 0x42 #define KS_GAMMA3 0x43 #define KS_GAMMA4 0x44 #define KS_GAMMA5 0x45 #define KS_GAMMA6 0x46 #define KS_GAMMA7 0x47 #define KS_GAMMA8 0x48 #define KS_GAMMA9 0x49 #define KS_GAMMA10 0x4a #define KS_GAMMA11 0x4b #define KS_GAMMA12 0x4c #define KS_GAMMA13 0x4d #define KS_GAMMA14 0x4e #define KS_GAMMA15 0x4f #define KS_GAMMA16 0x50 #define KS_GAMMA17 0x51 #define KS_GAMMA18 0x52 #define KS_GAMMA19 0x53 #define KS_GAMMA20 0x54 #define KS_GAMMA21 0x55 #define KS_GAMMA22 0x56 #define KS_GAMMA23 0x57 #define KS_GAMMA24 0x58 #define KS_GAMMA25 0x59 #define KS_GAMMA26 0x5a #define KS_GAMMA27 0x5b #define KS_GAMMA28 0x5c #define KS_GAMMA29 0x5d #define KS_GAMMA30 0x5e #define KS_GAMMA31 0x5f #define KS_GAMMAD0 0x60 #define KS_GAMMAD1 0x61 #define KS_GAMMAD2 0x62 #define KS_GAMMAD3 0x63 #define KS_GAMMAD4 0x64 #define KS_GAMMAD5 0x65 #define KS_GAMMAD6 0x66 #define KS_GAMMAD7 0x67 #define KS_GAMMAD8 0x68 #define KS_GAMMAD9 0x69 #define KS_GAMMAD10 0x6a #define KS_GAMMAD11 0x6b #define KS_GAMMAD12 0x6c #define KS_GAMMAD13 0x6d #define KS_GAMMAD14 0x6e #define KS_GAMMAD15 0x6f #define KS_GAMMAD16 0x70 #define KS_GAMMAD17 0x71 #define KS_GAMMAD18 0x72 #define KS_GAMMAD19 0x73 #define KS_GAMMAD20 0x74 #define KS_GAMMAD21 0x75 #define KS_GAMMAD22 0x76 #define KS_GAMMAD23 0x77 #define KS_GAMMAD24 0x78 #define KS_GAMMAD25 0x79 #define KS_GAMMAD26 0x7a #define KS_GAMMAD27 0x7b #define KS_GAMMAD28 0x7c #define KS_GAMMAD29 0x7d #define KS_GAMMAD30 0x7e #define KS_GAMMAD31 0x7f /**************************************************************************** * mga_dev : represents one ks0127 chip. ****************************************************************************/ struct adjust { int contrast; int bright; int hue; int ugain; int vgain; }; struct ks0127 { struct v4l2_subdev sd; v4l2_std_id norm; int ident; u8 regs[256]; }; static inline struct ks0127 *to_ks0127(struct v4l2_subdev *sd) { return container_of(sd, struct ks0127, sd); } static int debug; /* insmod parameter */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug output"); static u8 reg_defaults[64]; static void init_reg_defaults(void) { static int initialized; u8 *table = reg_defaults; if (initialized) return; initialized = 1; table[KS_CMDA] = 0x2c; /* VSE=0, CCIR 601, autodetect standard */ table[KS_CMDB] = 0x12; /* VALIGN=0, AGC control and input */ table[KS_CMDC] = 0x00; /* Test options */ /* clock & input select, write 1 to PORTA */ table[KS_CMDD] = 0x01; table[KS_HAVB] = 0x00; /* HAV Start Control */ table[KS_HAVE] = 0x00; /* HAV End Control */ table[KS_HS1B] = 0x10; /* HS1 Start Control */ table[KS_HS1E] = 0x00; /* HS1 End Control */ table[KS_HS2B] = 0x00; /* HS2 Start Control */ table[KS_HS2E] = 0x00; /* HS2 End Control */ table[KS_AGC] = 0x53; /* Manual setting for AGC */ table[KS_HXTRA] = 0x00; /* Extra Bits for HAV and HS1/2 */ table[KS_CDEM] = 0x00; /* Chroma Demodulation Control */ table[KS_PORTAB] = 0x0f; /* port B is input, port A output GPPORT */ table[KS_LUMA] = 0x01; /* Luma control */ table[KS_CON] = 0x00; /* Contrast Control */ table[KS_BRT] = 0x00; /* Brightness Control */ table[KS_CHROMA] = 0x2a; /* Chroma control A */ table[KS_CHROMB] = 0x90; /* Chroma control B */ table[KS_DEMOD] = 0x00; /* Chroma Demodulation Control & Status */ table[KS_SAT] = 0x00; /* Color Saturation Control*/ table[KS_HUE] = 0x00; /* Hue Control */ table[KS_VERTIA] = 0x00; /* Vertical Processing Control A */ /* Vertical Processing Control B, luma 1 line delayed */ table[KS_VERTIB] = 0x12; table[KS_VERTIC] = 0x0b; /* Vertical Processing Control C */ table[KS_HSCLL] = 0x00; /* Horizontal Scaling Ratio Low */ table[KS_HSCLH] = 0x00; /* Horizontal Scaling Ratio High */ table[KS_VSCLL] = 0x00; /* Vertical Scaling Ratio Low */ table[KS_VSCLH] = 0x00; /* Vertical Scaling Ratio High */ /* 16 bit YCbCr 4:2:2 output; I can't make the bt866 like 8 bit /Sam */ table[KS_OFMTA] = 0x30; table[KS_OFMTB] = 0x00; /* Output Control B */ /* VBI Decoder Control; 4bit fmt: avoid Y overflow */ table[KS_VBICTL] = 0x5d; table[KS_CCDAT2] = 0x00; /* Read Only register */ table[KS_CCDAT1] = 0x00; /* Read Only register */ table[KS_VBIL30] = 0xa8; /* VBI data decoding options */ table[KS_VBIL74] = 0xaa; /* VBI data decoding options */ table[KS_VBIL118] = 0x2a; /* VBI data decoding options */ table[KS_VBIL1512] = 0x00; /* VBI data decoding options */ table[KS_TTFRAM] = 0x00; /* Teletext frame alignment pattern */ table[KS_TESTA] = 0x00; /* test register, shouldn't be written */ table[KS_UVOFFH] = 0x00; /* UV Offset Adjustment High */ table[KS_UVOFFL] = 0x00; /* UV Offset Adjustment Low */ table[KS_UGAIN] = 0x00; /* U Component Gain Adjustment */ table[KS_VGAIN] = 0x00; /* V Component Gain Adjustment */ table[KS_VAVB] = 0x07; /* VAV Begin */ table[KS_VAVE] = 0x00; /* VAV End */ table[KS_CTRACK] = 0x00; /* Chroma Tracking Control */ table[KS_POLCTL] = 0x41; /* Timing Signal Polarity Control */ table[KS_REFCOD] = 0x80; /* Reference Code Insertion Control */ table[KS_INVALY] = 0x10; /* Invalid Y Code */ table[KS_INVALU] = 0x80; /* Invalid U Code */ table[KS_INVALV] = 0x80; /* Invalid V Code */ table[KS_UNUSEY] = 0x10; /* Unused Y Code */ table[KS_UNUSEU] = 0x80; /* Unused U Code */ table[KS_UNUSEV] = 0x80; /* Unused V Code */ table[KS_USRSAV] = 0x00; /* reserved */ table[KS_USREAV] = 0x00; /* reserved */ table[KS_SHS1A] = 0x00; /* User Defined SHS1 A */ /* User Defined SHS1 B, ALT656=1 on 0127B */ table[KS_SHS1B] = 0x80; table[KS_SHS1C] = 0x00; /* User Defined SHS1 C */ table[KS_CMDE] = 0x00; /* Command Register E */ table[KS_VSDEL] = 0x00; /* VS Delay Control */ /* Command Register F, update -immediately- */ /* (there might come no vsync)*/ table[KS_CMDF] = 0x02; } /* We need to manually read because of a bug in the KS0127 chip. * * An explanation from kayork@mail.utexas.edu: * * During I2C reads, the KS0127 only samples for a stop condition * during the place where the acknowledge bit should be. Any standard * I2C implementation (correctly) throws in another clock transition * at the 9th bit, and the KS0127 will not recognize the stop condition * and will continue to clock out data. * * So we have to do the read ourself. Big deal. * workaround in i2c-algo-bit */ static u8 ks0127_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); char val = 0; struct i2c_msg msgs[] = { { client->addr, 0, sizeof(reg), &reg }, { client->addr, I2C_M_RD | I2C_M_NO_RD_ACK, sizeof(val), &val } }; int ret; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) v4l2_dbg(1, debug, sd, "read error\n"); return val; } static void ks0127_write(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ks0127 *ks = to_ks0127(sd); char msg[] = { reg, val }; if (i2c_master_send(client, msg, sizeof(msg)) != sizeof(msg)) v4l2_dbg(1, debug, sd, "write error\n"); ks->regs[reg] = val; } /* generic bit-twiddling */ static void ks0127_and_or(struct v4l2_subdev *sd, u8 reg, u8 and_v, u8 or_v) { struct ks0127 *ks = to_ks0127(sd); u8 val = ks->regs[reg]; val = (val & and_v) | or_v; ks0127_write(sd, reg, val); } /**************************************************************************** * ks0127 private api ****************************************************************************/ static void ks0127_init(struct v4l2_subdev *sd) { struct ks0127 *ks = to_ks0127(sd); u8 *table = reg_defaults; int i; ks->ident = V4L2_IDENT_KS0127; v4l2_dbg(1, debug, sd, "reset\n"); msleep(1); /* initialize all registers to known values */ /* (except STAT, 0x21, 0x22, TEST and 0x38,0x39) */ for (i = 1; i < 33; i++) ks0127_write(sd, i, table[i]); for (i = 35; i < 40; i++) ks0127_write(sd, i, table[i]); for (i = 41; i < 56; i++) ks0127_write(sd, i, table[i]); for (i = 58; i < 64; i++) ks0127_write(sd, i, table[i]); if ((ks0127_read(sd, KS_STAT) & 0x80) == 0) { ks->ident = V4L2_IDENT_KS0122S; v4l2_dbg(1, debug, sd, "ks0122s found\n"); return; } switch (ks0127_read(sd, KS_CMDE) & 0x0f) { case 0: v4l2_dbg(1, debug, sd, "ks0127 found\n"); break; case 9: ks->ident = V4L2_IDENT_KS0127B; v4l2_dbg(1, debug, sd, "ks0127B Revision A found\n"); break; default: v4l2_dbg(1, debug, sd, "unknown revision\n"); break; } } static int ks0127_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct ks0127 *ks = to_ks0127(sd); switch (input) { case KS_INPUT_COMPOSITE_1: case KS_INPUT_COMPOSITE_2: case KS_INPUT_COMPOSITE_3: case KS_INPUT_COMPOSITE_4: case KS_INPUT_COMPOSITE_5: case KS_INPUT_COMPOSITE_6: v4l2_dbg(1, debug, sd, "s_routing %d: Composite\n", input); /* autodetect 50/60 Hz */ ks0127_and_or(sd, KS_CMDA, 0xfc, 0x00); /* VSE=0 */ ks0127_and_or(sd, KS_CMDA, ~0x40, 0x00); /* set input line */ ks0127_and_or(sd, KS_CMDB, 0xb0, input); /* non-freerunning mode */ ks0127_and_or(sd, KS_CMDC, 0x70, 0x0a); /* analog input */ ks0127_and_or(sd, KS_CMDD, 0x03, 0x00); /* enable chroma demodulation */ ks0127_and_or(sd, KS_CTRACK, 0xcf, 0x00); /* chroma trap, HYBWR=1 */ ks0127_and_or(sd, KS_LUMA, 0x00, (reg_defaults[KS_LUMA])|0x0c); /* scaler fullbw, luma comb off */ ks0127_and_or(sd, KS_VERTIA, 0x08, 0x81); /* manual chroma comb .25 .5 .25 */ ks0127_and_or(sd, KS_VERTIC, 0x0f, 0x90); /* chroma path delay */ ks0127_and_or(sd, KS_CHROMB, 0x0f, 0x90); ks0127_write(sd, KS_UGAIN, reg_defaults[KS_UGAIN]); ks0127_write(sd, KS_VGAIN, reg_defaults[KS_VGAIN]); ks0127_write(sd, KS_UVOFFH, reg_defaults[KS_UVOFFH]); ks0127_write(sd, KS_UVOFFL, reg_defaults[KS_UVOFFL]); break; case KS_INPUT_SVIDEO_1: case KS_INPUT_SVIDEO_2: case KS_INPUT_SVIDEO_3: v4l2_dbg(1, debug, sd, "s_routing %d: S-Video\n", input); /* autodetect 50/60 Hz */ ks0127_and_or(sd, KS_CMDA, 0xfc, 0x00); /* VSE=0 */ ks0127_and_or(sd, KS_CMDA, ~0x40, 0x00); /* set input line */ ks0127_and_or(sd, KS_CMDB, 0xb0, input); /* non-freerunning mode */ ks0127_and_or(sd, KS_CMDC, 0x70, 0x0a); /* analog input */ ks0127_and_or(sd, KS_CMDD, 0x03, 0x00); /* enable chroma demodulation */ ks0127_and_or(sd, KS_CTRACK, 0xcf, 0x00); ks0127_and_or(sd, KS_LUMA, 0x00, reg_defaults[KS_LUMA]); /* disable luma comb */ ks0127_and_or(sd, KS_VERTIA, 0x08, (reg_defaults[KS_VERTIA]&0xf0)|0x01); ks0127_and_or(sd, KS_VERTIC, 0x0f, reg_defaults[KS_VERTIC]&0xf0); ks0127_and_or(sd, KS_CHROMB, 0x0f, reg_defaults[KS_CHROMB]&0xf0); ks0127_write(sd, KS_UGAIN, reg_defaults[KS_UGAIN]); ks0127_write(sd, KS_VGAIN, reg_defaults[KS_VGAIN]); ks0127_write(sd, KS_UVOFFH, reg_defaults[KS_UVOFFH]); ks0127_write(sd, KS_UVOFFL, reg_defaults[KS_UVOFFL]); break; case KS_INPUT_YUV656: v4l2_dbg(1, debug, sd, "s_routing 15: YUV656\n"); if (ks->norm & V4L2_STD_525_60) /* force 60 Hz */ ks0127_and_or(sd, KS_CMDA, 0xfc, 0x03); else /* force 50 Hz */ ks0127_and_or(sd, KS_CMDA, 0xfc, 0x02); ks0127_and_or(sd, KS_CMDA, 0xff, 0x40); /* VSE=1 */ /* set input line and VALIGN */ ks0127_and_or(sd, KS_CMDB, 0xb0, (input | 0x40)); /* freerunning mode, */ /* TSTGEN = 1 TSTGFR=11 TSTGPH=0 TSTGPK=0 VMEM=1*/ ks0127_and_or(sd, KS_CMDC, 0x70, 0x87); /* digital input, SYNDIR = 0 INPSL=01 CLKDIR=0 EAV=0 */ ks0127_and_or(sd, KS_CMDD, 0x03, 0x08); /* disable chroma demodulation */ ks0127_and_or(sd, KS_CTRACK, 0xcf, 0x30); /* HYPK =01 CTRAP = 0 HYBWR=0 PED=1 RGBH=1 UNIT=1 */ ks0127_and_or(sd, KS_LUMA, 0x00, 0x71); ks0127_and_or(sd, KS_VERTIC, 0x0f, reg_defaults[KS_VERTIC]&0xf0); /* scaler fullbw, luma comb off */ ks0127_and_or(sd, KS_VERTIA, 0x08, 0x81); ks0127_and_or(sd, KS_CHROMB, 0x0f, reg_defaults[KS_CHROMB]&0xf0); ks0127_and_or(sd, KS_CON, 0x00, 0x00); ks0127_and_or(sd, KS_BRT, 0x00, 32); /* spec: 34 */ /* spec: 229 (e5) */ ks0127_and_or(sd, KS_SAT, 0x00, 0xe8); ks0127_and_or(sd, KS_HUE, 0x00, 0); ks0127_and_or(sd, KS_UGAIN, 0x00, 238); ks0127_and_or(sd, KS_VGAIN, 0x00, 0x00); /*UOFF:0x30, VOFF:0x30, TSTCGN=1 */ ks0127_and_or(sd, KS_UVOFFH, 0x00, 0x4f); ks0127_and_or(sd, KS_UVOFFL, 0x00, 0x00); break; default: v4l2_dbg(1, debug, sd, "s_routing: Unknown input %d\n", input); break; } /* hack: CDMLPF sometimes spontaneously switches on; */ /* force back off */ ks0127_write(sd, KS_DEMOD, reg_defaults[KS_DEMOD]); return 0; } static int ks0127_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct ks0127 *ks = to_ks0127(sd); /* Set to automatic SECAM/Fsc mode */ ks0127_and_or(sd, KS_DEMOD, 0xf0, 0x00); ks->norm = std; if (std & V4L2_STD_NTSC) { v4l2_dbg(1, debug, sd, "s_std: NTSC_M\n"); ks0127_and_or(sd, KS_CHROMA, 0x9f, 0x20); } else if (std & V4L2_STD_PAL_N) { v4l2_dbg(1, debug, sd, "s_std: NTSC_N (fixme)\n"); ks0127_and_or(sd, KS_CHROMA, 0x9f, 0x40); } else if (std & V4L2_STD_PAL) { v4l2_dbg(1, debug, sd, "s_std: PAL_N\n"); ks0127_and_or(sd, KS_CHROMA, 0x9f, 0x20); } else if (std & V4L2_STD_PAL_M) { v4l2_dbg(1, debug, sd, "s_std: PAL_M (fixme)\n"); ks0127_and_or(sd, KS_CHROMA, 0x9f, 0x40); } else if (std & V4L2_STD_SECAM) { v4l2_dbg(1, debug, sd, "s_std: SECAM\n"); /* set to secam autodetection */ ks0127_and_or(sd, KS_CHROMA, 0xdf, 0x20); ks0127_and_or(sd, KS_DEMOD, 0xf0, 0x00); schedule_timeout_interruptible(HZ/10+1); /* did it autodetect? */ if (!(ks0127_read(sd, KS_DEMOD) & 0x40)) /* force to secam mode */ ks0127_and_or(sd, KS_DEMOD, 0xf0, 0x0f); } else { v4l2_dbg(1, debug, sd, "s_std: Unknown norm %llx\n", (unsigned long long)std); } return 0; } static int ks0127_s_stream(struct v4l2_subdev *sd, int enable) { v4l2_dbg(1, debug, sd, "s_stream(%d)\n", enable); if (enable) { /* All output pins on */ ks0127_and_or(sd, KS_OFMTA, 0xcf, 0x30); /* Obey the OEN pin */ ks0127_and_or(sd, KS_CDEM, 0x7f, 0x00); } else { /* Video output pins off */ ks0127_and_or(sd, KS_OFMTA, 0xcf, 0x00); /* Ignore the OEN pin */ ks0127_and_or(sd, KS_CDEM, 0x7f, 0x80); } return 0; } static int ks0127_status(struct v4l2_subdev *sd, u32 *pstatus, v4l2_std_id *pstd) { int stat = V4L2_IN_ST_NO_SIGNAL; u8 status; v4l2_std_id std = V4L2_STD_ALL; status = ks0127_read(sd, KS_STAT); if (!(status & 0x20)) /* NOVID not set */ stat = 0; if (!(status & 0x01)) /* CLOCK set */ stat |= V4L2_IN_ST_NO_COLOR; if ((status & 0x08)) /* PALDET set */ std = V4L2_STD_PAL; else std = V4L2_STD_NTSC; if (pstd) *pstd = std; if (pstatus) *pstatus = stat; return 0; } static int ks0127_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) { v4l2_dbg(1, debug, sd, "querystd\n"); return ks0127_status(sd, NULL, std); } static int ks0127_g_input_status(struct v4l2_subdev *sd, u32 *status) { v4l2_dbg(1, debug, sd, "g_input_status\n"); return ks0127_status(sd, status, NULL); } static int ks0127_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ks0127 *ks = to_ks0127(sd); return v4l2_chip_ident_i2c_client(client, chip, ks->ident, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops ks0127_core_ops = { .g_chip_ident = ks0127_g_chip_ident, .s_std = ks0127_s_std, }; static const struct v4l2_subdev_video_ops ks0127_video_ops = { .s_routing = ks0127_s_routing, .s_stream = ks0127_s_stream, .querystd = ks0127_querystd, .g_input_status = ks0127_g_input_status, }; static const struct v4l2_subdev_ops ks0127_ops = { .core = &ks0127_core_ops, .video = &ks0127_video_ops, }; /* ----------------------------------------------------------------------- */ static int ks0127_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ks0127 *ks; struct v4l2_subdev *sd; v4l_info(client, "%s chip found @ 0x%x (%s)\n", client->addr == (I2C_KS0127_ADDON >> 1) ? "addon" : "on-board", client->addr << 1, client->adapter->name); ks = kzalloc(sizeof(*ks), GFP_KERNEL); if (ks == NULL) return -ENOMEM; sd = &ks->sd; v4l2_i2c_subdev_init(sd, client, &ks0127_ops); /* power up */ init_reg_defaults(); ks0127_write(sd, KS_CMDA, 0x2c); mdelay(10); /* reset the device */ ks0127_init(sd); return 0; } static int ks0127_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); ks0127_write(sd, KS_OFMTA, 0x20); /* tristate */ ks0127_write(sd, KS_CMDA, 0x2c | 0x80); /* power down */ kfree(to_ks0127(sd)); return 0; } static const struct i2c_device_id ks0127_id[] = { { "ks0127", 0 }, { "ks0127b", 0 }, { "ks0122s", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ks0127_id); static struct i2c_driver ks0127_driver = { .driver = { .owner = THIS_MODULE, .name = "ks0127", }, .probe = ks0127_probe, .remove = ks0127_remove, .id_table = ks0127_id, }; static __init int init_ks0127(void) { return i2c_add_driver(&ks0127_driver); } static __exit void exit_ks0127(void) { i2c_del_driver(&ks0127_driver); } module_init(init_ks0127); module_exit(exit_ks0127);
gpl-2.0
m-stein/linux
net/9p/protocol.c
3348
14013
/* * net/9p/protocol.c * * 9P Protocol Support Code * * Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com> * * Base on code from Anthony Liguori <aliguori@us.ibm.com> * Copyright (C) 2008 by IBM, Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/stddef.h> #include <linux/types.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "protocol.h" #include <trace/events/9p.h> static int p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); void p9stat_free(struct p9_wstat *stbuf) { kfree(stbuf->name); kfree(stbuf->uid); kfree(stbuf->gid); kfree(stbuf->muid); kfree(stbuf->extension); } EXPORT_SYMBOL(p9stat_free); size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size) { size_t len = min(pdu->size - pdu->offset, size); memcpy(data, &pdu->sdata[pdu->offset], len); pdu->offset += len; return size - len; } static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size) { size_t len = min(pdu->capacity - pdu->size, size); memcpy(&pdu->sdata[pdu->size], data, len); pdu->size += len; return size - len; } static size_t pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size) { size_t len = min(pdu->capacity - pdu->size, size); if (copy_from_user(&pdu->sdata[pdu->size], udata, len)) len = 0; pdu->size += len; return size - len; } /* b - int8_t w - int16_t d - int32_t q - int64_t s - string u - numeric uid g - numeric gid S - stat Q - qid D - data blob (int32_t size followed by void *, results are not freed) T - array of strings (int16_t count, followed by strings) R - array of qids (int16_t count, followed by qids) A - stat for 9p2000.L (p9_stat_dotl) ? - if optional = 1, continue parsing */ static int p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, va_list ap) { const char *ptr; int errcode = 0; for (ptr = fmt; *ptr; ptr++) { switch (*ptr) { case 'b':{ int8_t *val = va_arg(ap, int8_t *); if (pdu_read(pdu, val, sizeof(*val))) { errcode = -EFAULT; break; } } break; case 'w':{ int16_t *val = va_arg(ap, int16_t *); __le16 le_val; if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } *val = le16_to_cpu(le_val); } break; case 'd':{ int32_t *val = va_arg(ap, int32_t *); __le32 le_val; if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } *val = le32_to_cpu(le_val); } break; case 'q':{ int64_t *val = va_arg(ap, int64_t *); __le64 le_val; if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } *val = le64_to_cpu(le_val); } break; case 's':{ char **sptr = va_arg(ap, char **); uint16_t len; errcode = p9pdu_readf(pdu, proto_version, "w", &len); if (errcode) break; *sptr = kmalloc(len + 1, GFP_NOFS); if (*sptr == NULL) { errcode = -EFAULT; break; } if (pdu_read(pdu, *sptr, len)) { errcode = -EFAULT; kfree(*sptr); *sptr = NULL; } else (*sptr)[len] = 0; } break; case 'u': { kuid_t *uid = va_arg(ap, kuid_t *); __le32 le_val; if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } *uid = make_kuid(&init_user_ns, le32_to_cpu(le_val)); } break; case 'g': { kgid_t *gid = va_arg(ap, kgid_t *); __le32 le_val; if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } *gid = make_kgid(&init_user_ns, le32_to_cpu(le_val)); } break; case 'Q':{ struct p9_qid *qid = va_arg(ap, struct p9_qid *); errcode = p9pdu_readf(pdu, proto_version, "bdq", &qid->type, &qid->version, &qid->path); } break; case 'S':{ struct p9_wstat *stbuf = va_arg(ap, struct p9_wstat *); memset(stbuf, 0, sizeof(struct p9_wstat)); stbuf->n_uid = stbuf->n_muid = INVALID_UID; stbuf->n_gid = INVALID_GID; errcode = p9pdu_readf(pdu, proto_version, "wwdQdddqssss?sugu", &stbuf->size, &stbuf->type, &stbuf->dev, &stbuf->qid, &stbuf->mode, &stbuf->atime, &stbuf->mtime, &stbuf->length, &stbuf->name, &stbuf->uid, &stbuf->gid, &stbuf->muid, &stbuf->extension, &stbuf->n_uid, &stbuf->n_gid, &stbuf->n_muid); if (errcode) p9stat_free(stbuf); } break; case 'D':{ uint32_t *count = va_arg(ap, uint32_t *); void **data = va_arg(ap, void **); errcode = p9pdu_readf(pdu, proto_version, "d", count); if (!errcode) { *count = min_t(uint32_t, *count, pdu->size - pdu->offset); *data = &pdu->sdata[pdu->offset]; } } break; case 'T':{ uint16_t *nwname = va_arg(ap, uint16_t *); char ***wnames = va_arg(ap, char ***); errcode = p9pdu_readf(pdu, proto_version, "w", nwname); if (!errcode) { *wnames = kmalloc(sizeof(char *) * *nwname, GFP_NOFS); if (!*wnames) errcode = -ENOMEM; } if (!errcode) { int i; for (i = 0; i < *nwname; i++) { errcode = p9pdu_readf(pdu, proto_version, "s", &(*wnames)[i]); if (errcode) break; } } if (errcode) { if (*wnames) { int i; for (i = 0; i < *nwname; i++) kfree((*wnames)[i]); } kfree(*wnames); *wnames = NULL; } } break; case 'R':{ int16_t *nwqid = va_arg(ap, int16_t *); struct p9_qid **wqids = va_arg(ap, struct p9_qid **); *wqids = NULL; errcode = p9pdu_readf(pdu, proto_version, "w", nwqid); if (!errcode) { *wqids = kmalloc(*nwqid * sizeof(struct p9_qid), GFP_NOFS); if (*wqids == NULL) errcode = -ENOMEM; } if (!errcode) { int i; for (i = 0; i < *nwqid; i++) { errcode = p9pdu_readf(pdu, proto_version, "Q", &(*wqids)[i]); if (errcode) break; } } if (errcode) { kfree(*wqids); *wqids = NULL; } } break; case 'A': { struct p9_stat_dotl *stbuf = va_arg(ap, struct p9_stat_dotl *); memset(stbuf, 0, sizeof(struct p9_stat_dotl)); errcode = p9pdu_readf(pdu, proto_version, "qQdugqqqqqqqqqqqqqqq", &stbuf->st_result_mask, &stbuf->qid, &stbuf->st_mode, &stbuf->st_uid, &stbuf->st_gid, &stbuf->st_nlink, &stbuf->st_rdev, &stbuf->st_size, &stbuf->st_blksize, &stbuf->st_blocks, &stbuf->st_atime_sec, &stbuf->st_atime_nsec, &stbuf->st_mtime_sec, &stbuf->st_mtime_nsec, &stbuf->st_ctime_sec, &stbuf->st_ctime_nsec, &stbuf->st_btime_sec, &stbuf->st_btime_nsec, &stbuf->st_gen, &stbuf->st_data_version); } break; case '?': if ((proto_version != p9_proto_2000u) && (proto_version != p9_proto_2000L)) return 0; break; default: BUG(); break; } if (errcode) break; } return errcode; } int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, va_list ap) { const char *ptr; int errcode = 0; for (ptr = fmt; *ptr; ptr++) { switch (*ptr) { case 'b':{ int8_t val = va_arg(ap, int); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 'w':{ __le16 val = cpu_to_le16(va_arg(ap, int)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 'd':{ __le32 val = cpu_to_le32(va_arg(ap, int32_t)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 'q':{ __le64 val = cpu_to_le64(va_arg(ap, int64_t)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 's':{ const char *sptr = va_arg(ap, const char *); uint16_t len = 0; if (sptr) len = min_t(size_t, strlen(sptr), USHRT_MAX); errcode = p9pdu_writef(pdu, proto_version, "w", len); if (!errcode && pdu_write(pdu, sptr, len)) errcode = -EFAULT; } break; case 'u': { kuid_t uid = va_arg(ap, kuid_t); __le32 val = cpu_to_le32( from_kuid(&init_user_ns, uid)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 'g': { kgid_t gid = va_arg(ap, kgid_t); __le32 val = cpu_to_le32( from_kgid(&init_user_ns, gid)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 'Q':{ const struct p9_qid *qid = va_arg(ap, const struct p9_qid *); errcode = p9pdu_writef(pdu, proto_version, "bdq", qid->type, qid->version, qid->path); } break; case 'S':{ const struct p9_wstat *stbuf = va_arg(ap, const struct p9_wstat *); errcode = p9pdu_writef(pdu, proto_version, "wwdQdddqssss?sugu", stbuf->size, stbuf->type, stbuf->dev, &stbuf->qid, stbuf->mode, stbuf->atime, stbuf->mtime, stbuf->length, stbuf->name, stbuf->uid, stbuf->gid, stbuf->muid, stbuf->extension, stbuf->n_uid, stbuf->n_gid, stbuf->n_muid); } break; case 'D':{ uint32_t count = va_arg(ap, uint32_t); const void *data = va_arg(ap, const void *); errcode = p9pdu_writef(pdu, proto_version, "d", count); if (!errcode && pdu_write(pdu, data, count)) errcode = -EFAULT; } break; case 'U':{ int32_t count = va_arg(ap, int32_t); const char __user *udata = va_arg(ap, const void __user *); errcode = p9pdu_writef(pdu, proto_version, "d", count); if (!errcode && pdu_write_u(pdu, udata, count)) errcode = -EFAULT; } break; case 'T':{ uint16_t nwname = va_arg(ap, int); const char **wnames = va_arg(ap, const char **); errcode = p9pdu_writef(pdu, proto_version, "w", nwname); if (!errcode) { int i; for (i = 0; i < nwname; i++) { errcode = p9pdu_writef(pdu, proto_version, "s", wnames[i]); if (errcode) break; } } } break; case 'R':{ int16_t nwqid = va_arg(ap, int); struct p9_qid *wqids = va_arg(ap, struct p9_qid *); errcode = p9pdu_writef(pdu, proto_version, "w", nwqid); if (!errcode) { int i; for (i = 0; i < nwqid; i++) { errcode = p9pdu_writef(pdu, proto_version, "Q", &wqids[i]); if (errcode) break; } } } break; case 'I':{ struct p9_iattr_dotl *p9attr = va_arg(ap, struct p9_iattr_dotl *); errcode = p9pdu_writef(pdu, proto_version, "ddugqqqqq", p9attr->valid, p9attr->mode, p9attr->uid, p9attr->gid, p9attr->size, p9attr->atime_sec, p9attr->atime_nsec, p9attr->mtime_sec, p9attr->mtime_nsec); } break; case '?': if ((proto_version != p9_proto_2000u) && (proto_version != p9_proto_2000L)) return 0; break; default: BUG(); break; } if (errcode) break; } return errcode; } int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = p9pdu_vreadf(pdu, proto_version, fmt, ap); va_end(ap); return ret; } static int p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = p9pdu_vwritef(pdu, proto_version, fmt, ap); va_end(ap); return ret; } int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st) { struct p9_fcall fake_pdu; int ret; fake_pdu.size = len; fake_pdu.capacity = len; fake_pdu.sdata = buf; fake_pdu.offset = 0; ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st); if (ret) { p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); trace_9p_protocol_dump(clnt, &fake_pdu); } return ret; } EXPORT_SYMBOL(p9stat_read); int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type) { pdu->id = type; return p9pdu_writef(pdu, 0, "dbw", 0, type, tag); } int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu) { int size = pdu->size; int err; pdu->size = 0; err = p9pdu_writef(pdu, 0, "d", size); pdu->size = size; trace_9p_protocol_dump(clnt, pdu); p9_debug(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size, pdu->id, pdu->tag); return err; } void p9pdu_reset(struct p9_fcall *pdu) { pdu->offset = 0; pdu->size = 0; } int p9dirent_read(struct p9_client *clnt, char *buf, int len, struct p9_dirent *dirent) { struct p9_fcall fake_pdu; int ret; char *nameptr; fake_pdu.size = len; fake_pdu.capacity = len; fake_pdu.sdata = buf; fake_pdu.offset = 0; ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid, &dirent->d_off, &dirent->d_type, &nameptr); if (ret) { p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret); trace_9p_protocol_dump(clnt, &fake_pdu); goto out; } strcpy(dirent->d_name, nameptr); kfree(nameptr); out: return fake_pdu.offset; } EXPORT_SYMBOL(p9dirent_read);
gpl-2.0
bcnice20/android-kernel-common
drivers/media/video/indycam.c
3348
9407
/* * indycam.c - Silicon Graphics IndyCam digital camera driver * * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org> * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/slab.h> /* IndyCam decodes stream of photons into digital image representation ;-) */ #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include "indycam.h" #define INDYCAM_MODULE_VERSION "0.0.5" MODULE_DESCRIPTION("SGI IndyCam driver"); MODULE_VERSION(INDYCAM_MODULE_VERSION); MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>"); MODULE_LICENSE("GPL"); // #define INDYCAM_DEBUG #ifdef INDYCAM_DEBUG #define dprintk(x...) printk("IndyCam: " x); #define indycam_regdump(client) indycam_regdump_debug(client) #else #define dprintk(x...) #define indycam_regdump(client) #endif struct indycam { struct v4l2_subdev sd; u8 version; }; static inline struct indycam *to_indycam(struct v4l2_subdev *sd) { return container_of(sd, struct indycam, sd); } static const u8 initseq[] = { INDYCAM_CONTROL_AGCENA, /* INDYCAM_CONTROL */ INDYCAM_SHUTTER_60, /* INDYCAM_SHUTTER */ INDYCAM_GAIN_DEFAULT, /* INDYCAM_GAIN */ 0x00, /* INDYCAM_BRIGHTNESS (read-only) */ INDYCAM_RED_BALANCE_DEFAULT, /* INDYCAM_RED_BALANCE */ INDYCAM_BLUE_BALANCE_DEFAULT, /* INDYCAM_BLUE_BALANCE */ INDYCAM_RED_SATURATION_DEFAULT, /* INDYCAM_RED_SATURATION */ INDYCAM_BLUE_SATURATION_DEFAULT,/* INDYCAM_BLUE_SATURATION */ }; /* IndyCam register handling */ static int indycam_read_reg(struct v4l2_subdev *sd, u8 reg, u8 *value) { struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; if (reg == INDYCAM_REG_RESET) { dprintk("indycam_read_reg(): " "skipping write-only register %d\n", reg); *value = 0; return 0; } ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) { printk(KERN_ERR "IndyCam: indycam_read_reg(): read failed, " "register = 0x%02x\n", reg); return ret; } *value = (u8)ret; return 0; } static int indycam_write_reg(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); int err; if (reg == INDYCAM_REG_BRIGHTNESS || reg == INDYCAM_REG_VERSION) { dprintk("indycam_write_reg(): " "skipping read-only register %d\n", reg); return 0; } dprintk("Writing Reg %d = 0x%02x\n", reg, value); err = i2c_smbus_write_byte_data(client, reg, value); if (err) { printk(KERN_ERR "IndyCam: indycam_write_reg(): write failed, " "register = 0x%02x, value = 0x%02x\n", reg, value); } return err; } static int indycam_write_block(struct v4l2_subdev *sd, u8 reg, u8 length, u8 *data) { int i, err; for (i = 0; i < length; i++) { err = indycam_write_reg(sd, reg + i, data[i]); if (err) return err; } return 0; } /* Helper functions */ #ifdef INDYCAM_DEBUG static void indycam_regdump_debug(struct v4l2_subdev *sd) { int i; u8 val; for (i = 0; i < 9; i++) { indycam_read_reg(sd, i, &val); dprintk("Reg %d = 0x%02x\n", i, val); } } #endif static int indycam_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct indycam *camera = to_indycam(sd); u8 reg; int ret = 0; switch (ctrl->id) { case V4L2_CID_AUTOGAIN: case V4L2_CID_AUTO_WHITE_BALANCE: ret = indycam_read_reg(sd, INDYCAM_REG_CONTROL, &reg); if (ret) return -EIO; if (ctrl->id == V4L2_CID_AUTOGAIN) ctrl->value = (reg & INDYCAM_CONTROL_AGCENA) ? 1 : 0; else ctrl->value = (reg & INDYCAM_CONTROL_AWBCTL) ? 1 : 0; break; case V4L2_CID_EXPOSURE: ret = indycam_read_reg(sd, INDYCAM_REG_SHUTTER, &reg); if (ret) return -EIO; ctrl->value = ((s32)reg == 0x00) ? 0xff : ((s32)reg - 1); break; case V4L2_CID_GAIN: ret = indycam_read_reg(sd, INDYCAM_REG_GAIN, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; break; case V4L2_CID_RED_BALANCE: ret = indycam_read_reg(sd, INDYCAM_REG_RED_BALANCE, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; break; case V4L2_CID_BLUE_BALANCE: ret = indycam_read_reg(sd, INDYCAM_REG_BLUE_BALANCE, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; break; case INDYCAM_CONTROL_RED_SATURATION: ret = indycam_read_reg(sd, INDYCAM_REG_RED_SATURATION, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; break; case INDYCAM_CONTROL_BLUE_SATURATION: ret = indycam_read_reg(sd, INDYCAM_REG_BLUE_SATURATION, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; break; case V4L2_CID_GAMMA: if (camera->version == CAMERA_VERSION_MOOSE) { ret = indycam_read_reg(sd, INDYCAM_REG_GAMMA, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; } else { ctrl->value = INDYCAM_GAMMA_DEFAULT; } break; default: ret = -EINVAL; } return ret; } static int indycam_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct indycam *camera = to_indycam(sd); u8 reg; int ret = 0; switch (ctrl->id) { case V4L2_CID_AUTOGAIN: case V4L2_CID_AUTO_WHITE_BALANCE: ret = indycam_read_reg(sd, INDYCAM_REG_CONTROL, &reg); if (ret) break; if (ctrl->id == V4L2_CID_AUTOGAIN) { if (ctrl->value) reg |= INDYCAM_CONTROL_AGCENA; else reg &= ~INDYCAM_CONTROL_AGCENA; } else { if (ctrl->value) reg |= INDYCAM_CONTROL_AWBCTL; else reg &= ~INDYCAM_CONTROL_AWBCTL; } ret = indycam_write_reg(sd, INDYCAM_REG_CONTROL, reg); break; case V4L2_CID_EXPOSURE: reg = (ctrl->value == 0xff) ? 0x00 : (ctrl->value + 1); ret = indycam_write_reg(sd, INDYCAM_REG_SHUTTER, reg); break; case V4L2_CID_GAIN: ret = indycam_write_reg(sd, INDYCAM_REG_GAIN, ctrl->value); break; case V4L2_CID_RED_BALANCE: ret = indycam_write_reg(sd, INDYCAM_REG_RED_BALANCE, ctrl->value); break; case V4L2_CID_BLUE_BALANCE: ret = indycam_write_reg(sd, INDYCAM_REG_BLUE_BALANCE, ctrl->value); break; case INDYCAM_CONTROL_RED_SATURATION: ret = indycam_write_reg(sd, INDYCAM_REG_RED_SATURATION, ctrl->value); break; case INDYCAM_CONTROL_BLUE_SATURATION: ret = indycam_write_reg(sd, INDYCAM_REG_BLUE_SATURATION, ctrl->value); break; case V4L2_CID_GAMMA: if (camera->version == CAMERA_VERSION_MOOSE) { ret = indycam_write_reg(sd, INDYCAM_REG_GAMMA, ctrl->value); } break; default: ret = -EINVAL; } return ret; } /* I2C-interface */ static int indycam_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct indycam *camera = to_indycam(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_INDYCAM, camera->version); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops indycam_core_ops = { .g_chip_ident = indycam_g_chip_ident, .g_ctrl = indycam_g_ctrl, .s_ctrl = indycam_s_ctrl, }; static const struct v4l2_subdev_ops indycam_ops = { .core = &indycam_core_ops, }; static int indycam_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err = 0; struct indycam *camera; struct v4l2_subdev *sd; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); camera = kzalloc(sizeof(struct indycam), GFP_KERNEL); if (!camera) return -ENOMEM; sd = &camera->sd; v4l2_i2c_subdev_init(sd, client, &indycam_ops); camera->version = i2c_smbus_read_byte_data(client, INDYCAM_REG_VERSION); if (camera->version != CAMERA_VERSION_INDY && camera->version != CAMERA_VERSION_MOOSE) { kfree(camera); return -ENODEV; } printk(KERN_INFO "IndyCam v%d.%d detected\n", INDYCAM_VERSION_MAJOR(camera->version), INDYCAM_VERSION_MINOR(camera->version)); indycam_regdump(sd); // initialize err = indycam_write_block(sd, 0, sizeof(initseq), (u8 *)&initseq); if (err) { printk(KERN_ERR "IndyCam initialization failed\n"); kfree(camera); return -EIO; } indycam_regdump(sd); // white balance err = indycam_write_reg(sd, INDYCAM_REG_CONTROL, INDYCAM_CONTROL_AGCENA | INDYCAM_CONTROL_AWBCTL); if (err) { printk(KERN_ERR "IndyCam: White balancing camera failed\n"); kfree(camera); return -EIO; } indycam_regdump(sd); printk(KERN_INFO "IndyCam initialized\n"); return 0; } static int indycam_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_indycam(sd)); return 0; } static const struct i2c_device_id indycam_id[] = { { "indycam", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, indycam_id); static struct i2c_driver indycam_driver = { .driver = { .owner = THIS_MODULE, .name = "indycam", }, .probe = indycam_probe, .remove = indycam_remove, .id_table = indycam_id, }; static __init int init_indycam(void) { return i2c_add_driver(&indycam_driver); } static __exit void exit_indycam(void) { i2c_del_driver(&indycam_driver); } module_init(init_indycam); module_exit(exit_indycam);
gpl-2.0
zaventh/android_kernel_lge_hammerhead
fs/gfs2/ops_fstype.c
3860
35622
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/export.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/gfs2_ondisk.h> #include <linux/quotaops.h> #include "gfs2.h" #include "incore.h" #include "bmap.h" #include "glock.h" #include "glops.h" #include "inode.h" #include "recovery.h" #include "rgrp.h" #include "super.h" #include "sys.h" #include "util.h" #include "log.h" #include "quota.h" #include "dir.h" #include "trace_gfs2.h" #define DO 0 #define UNDO 1 /** * gfs2_tune_init - Fill a gfs2_tune structure with default values * @gt: tune * */ static void gfs2_tune_init(struct gfs2_tune *gt) { spin_lock_init(&gt->gt_spin); gt->gt_quota_simul_sync = 64; gt->gt_quota_warn_period = 10; gt->gt_quota_scale_num = 1; gt->gt_quota_scale_den = 1; gt->gt_new_files_jdata = 0; gt->gt_max_readahead = 1 << 18; gt->gt_complain_secs = 10; } static struct gfs2_sbd *init_sbd(struct super_block *sb) { struct gfs2_sbd *sdp; sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL); if (!sdp) return NULL; sb->s_fs_info = sdp; sdp->sd_vfs = sb; sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats); if (!sdp->sd_lkstats) { kfree(sdp); return NULL; } set_bit(SDF_NOJOURNALID, &sdp->sd_flags); gfs2_tune_init(&sdp->sd_tune); init_waitqueue_head(&sdp->sd_glock_wait); atomic_set(&sdp->sd_glock_disposal, 0); init_completion(&sdp->sd_locking_init); spin_lock_init(&sdp->sd_statfs_spin); spin_lock_init(&sdp->sd_rindex_spin); sdp->sd_rindex_tree.rb_node = NULL; INIT_LIST_HEAD(&sdp->sd_jindex_list); spin_lock_init(&sdp->sd_jindex_spin); mutex_init(&sdp->sd_jindex_mutex); INIT_LIST_HEAD(&sdp->sd_quota_list); mutex_init(&sdp->sd_quota_mutex); init_waitqueue_head(&sdp->sd_quota_wait); INIT_LIST_HEAD(&sdp->sd_trunc_list); spin_lock_init(&sdp->sd_trunc_lock); spin_lock_init(&sdp->sd_log_lock); atomic_set(&sdp->sd_log_pinned, 0); INIT_LIST_HEAD(&sdp->sd_log_le_buf); INIT_LIST_HEAD(&sdp->sd_log_le_revoke); INIT_LIST_HEAD(&sdp->sd_log_le_rg); INIT_LIST_HEAD(&sdp->sd_log_le_databuf); INIT_LIST_HEAD(&sdp->sd_log_le_ordered); init_waitqueue_head(&sdp->sd_log_waitq); init_waitqueue_head(&sdp->sd_logd_waitq); spin_lock_init(&sdp->sd_ail_lock); INIT_LIST_HEAD(&sdp->sd_ail1_list); INIT_LIST_HEAD(&sdp->sd_ail2_list); init_rwsem(&sdp->sd_log_flush_lock); atomic_set(&sdp->sd_log_in_flight, 0); init_waitqueue_head(&sdp->sd_log_flush_wait); INIT_LIST_HEAD(&sdp->sd_revoke_list); mutex_init(&sdp->sd_freeze_lock); return sdp; } /** * gfs2_check_sb - Check superblock * @sdp: the filesystem * @sb: The superblock * @silent: Don't print a message if the check fails * * Checks the version code of the FS is one that we understand how to * read and that the sizes of the various on-disk structures have not * changed. */ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) { struct gfs2_sb_host *sb = &sdp->sd_sb; if (sb->sb_magic != GFS2_MAGIC || sb->sb_type != GFS2_METATYPE_SB) { if (!silent) printk(KERN_WARNING "GFS2: not a GFS2 filesystem\n"); return -EINVAL; } /* If format numbers match exactly, we're done. */ if (sb->sb_fs_format == GFS2_FORMAT_FS && sb->sb_multihost_format == GFS2_FORMAT_MULTI) return 0; fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); return -EINVAL; } static void end_bio_io_page(struct bio *bio, int error) { struct page *page = bio->bi_private; if (!error) SetPageUptodate(page); else printk(KERN_WARNING "gfs2: error %d reading superblock\n", error); unlock_page(page); } static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf) { struct gfs2_sb_host *sb = &sdp->sd_sb; struct super_block *s = sdp->sd_vfs; const struct gfs2_sb *str = buf; sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic); sb->sb_type = be32_to_cpu(str->sb_header.mh_type); sb->sb_format = be32_to_cpu(str->sb_header.mh_format); sb->sb_fs_format = be32_to_cpu(str->sb_fs_format); sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format); sb->sb_bsize = be32_to_cpu(str->sb_bsize); sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift); sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr); sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino); sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr); sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino); memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN); memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN); memcpy(s->s_uuid, str->sb_uuid, 16); } /** * gfs2_read_super - Read the gfs2 super block from disk * @sdp: The GFS2 super block * @sector: The location of the super block * @error: The error code to return * * This uses the bio functions to read the super block from disk * because we want to be 100% sure that we never read cached data. * A super block is read twice only during each GFS2 mount and is * never written to by the filesystem. The first time its read no * locks are held, and the only details which are looked at are those * relating to the locking protocol. Once locking is up and working, * the sb is read again under the lock to establish the location of * the master directory (contains pointers to journals etc) and the * root directory. * * Returns: 0 on success or error */ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) { struct super_block *sb = sdp->sd_vfs; struct gfs2_sb *p; struct page *page; struct bio *bio; page = alloc_page(GFP_NOFS); if (unlikely(!page)) return -ENOBUFS; ClearPageUptodate(page); ClearPageDirty(page); lock_page(page); bio = bio_alloc(GFP_NOFS, 1); bio->bi_sector = sector * (sb->s_blocksize >> 9); bio->bi_bdev = sb->s_bdev; bio_add_page(bio, page, PAGE_SIZE, 0); bio->bi_end_io = end_bio_io_page; bio->bi_private = page; submit_bio(READ_SYNC | REQ_META, bio); wait_on_page_locked(page); bio_put(bio); if (!PageUptodate(page)) { __free_page(page); return -EIO; } p = kmap(page); gfs2_sb_in(sdp, p); kunmap(page); __free_page(page); return gfs2_check_sb(sdp, silent); } /** * gfs2_read_sb - Read super block * @sdp: The GFS2 superblock * @silent: Don't print message if mount fails * */ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent) { u32 hash_blocks, ind_blocks, leaf_blocks; u32 tmp_blocks; unsigned int x; int error; error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent); if (error) { if (!silent) fs_err(sdp, "can't read superblock\n"); return error; } sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT; sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; sdp->sd_diptrs = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) / sizeof(u64); sdp->sd_inptrs = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header); sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2; sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1; sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64); sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(struct gfs2_quota_change); /* Compute maximum reservation required to add a entry to a directory */ hash_blocks = DIV_ROUND_UP(sizeof(u64) * (1 << GFS2_DIR_MAX_DEPTH), sdp->sd_jbsize); ind_blocks = 0; for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) { tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs); ind_blocks += tmp_blocks; } leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH; sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks; sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs; for (x = 2;; x++) { u64 space, d; u32 m; space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs; d = space; m = do_div(d, sdp->sd_inptrs); if (d != sdp->sd_heightsize[x - 1] || m) break; sdp->sd_heightsize[x] = space; } sdp->sd_max_height = x; sdp->sd_heightsize[x] = ~0; gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT); sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); sdp->sd_jheightsize[1] = sdp->sd_jbsize * sdp->sd_diptrs; for (x = 2;; x++) { u64 space, d; u32 m; space = sdp->sd_jheightsize[x - 1] * sdp->sd_inptrs; d = space; m = do_div(d, sdp->sd_inptrs); if (d != sdp->sd_jheightsize[x - 1] || m) break; sdp->sd_jheightsize[x] = space; } sdp->sd_max_jheight = x; sdp->sd_jheightsize[x] = ~0; gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT); return 0; } static int init_names(struct gfs2_sbd *sdp, int silent) { char *proto, *table; int error = 0; proto = sdp->sd_args.ar_lockproto; table = sdp->sd_args.ar_locktable; /* Try to autodetect */ if (!proto[0] || !table[0]) { error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent); if (error) return error; if (!proto[0]) proto = sdp->sd_sb.sb_lockproto; if (!table[0]) table = sdp->sd_sb.sb_locktable; } if (!table[0]) table = sdp->sd_vfs->s_id; strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN); strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN); table = sdp->sd_table_name; while ((table = strchr(table, '/'))) *table = '_'; return error; } static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh, int undo) { int error = 0; if (undo) goto fail_trans; error = gfs2_glock_nq_num(sdp, GFS2_MOUNT_LOCK, &gfs2_nondisk_glops, LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE, mount_gh); if (error) { fs_err(sdp, "can't acquire mount glock: %d\n", error); goto fail; } error = gfs2_glock_nq_num(sdp, GFS2_LIVE_LOCK, &gfs2_nondisk_glops, LM_ST_SHARED, LM_FLAG_NOEXP | GL_EXACT, &sdp->sd_live_gh); if (error) { fs_err(sdp, "can't acquire live glock: %d\n", error); goto fail_mount; } error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops, CREATE, &sdp->sd_rename_gl); if (error) { fs_err(sdp, "can't create rename glock: %d\n", error); goto fail_live; } error = gfs2_glock_get(sdp, GFS2_TRANS_LOCK, &gfs2_trans_glops, CREATE, &sdp->sd_trans_gl); if (error) { fs_err(sdp, "can't create transaction glock: %d\n", error); goto fail_rename; } return 0; fail_trans: gfs2_glock_put(sdp->sd_trans_gl); fail_rename: gfs2_glock_put(sdp->sd_rename_gl); fail_live: gfs2_glock_dq_uninit(&sdp->sd_live_gh); fail_mount: gfs2_glock_dq_uninit(mount_gh); fail: return error; } static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr, u64 no_addr, const char *name) { struct gfs2_sbd *sdp = sb->s_fs_info; struct dentry *dentry; struct inode *inode; inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0); if (IS_ERR(inode)) { fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode)); return PTR_ERR(inode); } dentry = d_make_root(inode); if (!dentry) { fs_err(sdp, "can't alloc %s dentry\n", name); return -ENOMEM; } *dptr = dentry; return 0; } static int init_sb(struct gfs2_sbd *sdp, int silent) { struct super_block *sb = sdp->sd_vfs; struct gfs2_holder sb_gh; u64 no_addr; int ret; ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops, LM_ST_SHARED, 0, &sb_gh); if (ret) { fs_err(sdp, "can't acquire superblock glock: %d\n", ret); return ret; } ret = gfs2_read_sb(sdp, silent); if (ret) { fs_err(sdp, "can't read superblock: %d\n", ret); goto out; } /* Set up the buffer cache and SB for real */ if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) { ret = -EINVAL; fs_err(sdp, "FS block size (%u) is too small for device " "block size (%u)\n", sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev)); goto out; } if (sdp->sd_sb.sb_bsize > PAGE_SIZE) { ret = -EINVAL; fs_err(sdp, "FS block size (%u) is too big for machine " "page size (%u)\n", sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE); goto out; } sb_set_blocksize(sb, sdp->sd_sb.sb_bsize); /* Get the root inode */ no_addr = sdp->sd_sb.sb_root_dir.no_addr; ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root"); if (ret) goto out; /* Get the master inode */ no_addr = sdp->sd_sb.sb_master_dir.no_addr; ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master"); if (ret) { dput(sdp->sd_root_dir); goto out; } sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir); out: gfs2_glock_dq_uninit(&sb_gh); return ret; } /** * map_journal_extents - create a reusable "extent" mapping from all logical * blocks to all physical blocks for the given journal. This will save * us time when writing journal blocks. Most journals will have only one * extent that maps all their logical blocks. That's because gfs2.mkfs * arranges the journal blocks sequentially to maximize performance. * So the extent would map the first block for the entire file length. * However, gfs2_jadd can happen while file activity is happening, so * those journals may not be sequential. Less likely is the case where * the users created their own journals by mounting the metafs and * laying it out. But it's still possible. These journals might have * several extents. * * TODO: This should be done in bigger chunks rather than one block at a time, * but since it's only done at mount time, I'm not worried about the * time it takes. */ static int map_journal_extents(struct gfs2_sbd *sdp) { struct gfs2_jdesc *jd = sdp->sd_jdesc; unsigned int lb; u64 db, prev_db; /* logical block, disk block, prev disk block */ struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_journal_extent *jext = NULL; struct buffer_head bh; int rc = 0; prev_db = 0; for (lb = 0; lb < i_size_read(jd->jd_inode) >> sdp->sd_sb.sb_bsize_shift; lb++) { bh.b_state = 0; bh.b_blocknr = 0; bh.b_size = 1 << ip->i_inode.i_blkbits; rc = gfs2_block_map(jd->jd_inode, lb, &bh, 0); db = bh.b_blocknr; if (rc || !db) { printk(KERN_INFO "GFS2 journal mapping error %d: lb=" "%u db=%llu\n", rc, lb, (unsigned long long)db); break; } if (!prev_db || db != prev_db + 1) { jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_KERNEL); if (!jext) { printk(KERN_INFO "GFS2 error: out of memory " "mapping journal extents.\n"); rc = -ENOMEM; break; } jext->dblock = db; jext->lblock = lb; jext->blocks = 1; list_add_tail(&jext->extent_list, &jd->extent_list); } else { jext->blocks++; } prev_db = db; } return rc; } static void gfs2_others_may_mount(struct gfs2_sbd *sdp) { char *message = "FIRSTMOUNT=Done"; char *envp[] = { message, NULL }; fs_info(sdp, "first mount done, others may mount\n"); if (sdp->sd_lockstruct.ls_ops->lm_first_done) sdp->sd_lockstruct.ls_ops->lm_first_done(sdp); kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp); } /** * gfs2_jindex_hold - Grab a lock on the jindex * @sdp: The GFS2 superblock * @ji_gh: the holder for the jindex glock * * Returns: errno */ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) { struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex); struct qstr name; char buf[20]; struct gfs2_jdesc *jd; int error; name.name = buf; mutex_lock(&sdp->sd_jindex_mutex); for (;;) { error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); if (error) break; name.len = sprintf(buf, "journal%u", sdp->sd_journals); name.hash = gfs2_disk_hash(name.name, name.len); error = gfs2_dir_check(sdp->sd_jindex, &name, NULL); if (error == -ENOENT) { error = 0; break; } gfs2_glock_dq_uninit(ji_gh); if (error) break; error = -ENOMEM; jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL); if (!jd) break; INIT_LIST_HEAD(&jd->extent_list); INIT_WORK(&jd->jd_work, gfs2_recover_func); jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1); if (!jd->jd_inode || IS_ERR(jd->jd_inode)) { if (!jd->jd_inode) error = -ENOENT; else error = PTR_ERR(jd->jd_inode); kfree(jd); break; } spin_lock(&sdp->sd_jindex_spin); jd->jd_jid = sdp->sd_journals++; list_add_tail(&jd->jd_list, &sdp->sd_jindex_list); spin_unlock(&sdp->sd_jindex_spin); } mutex_unlock(&sdp->sd_jindex_mutex); return error; } static int init_journal(struct gfs2_sbd *sdp, int undo) { struct inode *master = sdp->sd_master_dir->d_inode; struct gfs2_holder ji_gh; struct gfs2_inode *ip; int jindex = 1; int error = 0; if (undo) { jindex = 0; goto fail_jinode_gh; } sdp->sd_jindex = gfs2_lookup_simple(master, "jindex"); if (IS_ERR(sdp->sd_jindex)) { fs_err(sdp, "can't lookup journal index: %d\n", error); return PTR_ERR(sdp->sd_jindex); } /* Load in the journal index special file */ error = gfs2_jindex_hold(sdp, &ji_gh); if (error) { fs_err(sdp, "can't read journal index: %d\n", error); goto fail; } error = -EUSERS; if (!gfs2_jindex_size(sdp)) { fs_err(sdp, "no journals!\n"); goto fail_jindex; } if (sdp->sd_args.ar_spectator) { sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0); atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5); atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5); } else { if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) { fs_err(sdp, "can't mount journal #%u\n", sdp->sd_lockstruct.ls_jid); fs_err(sdp, "there are only %u journals (0 - %u)\n", gfs2_jindex_size(sdp), gfs2_jindex_size(sdp) - 1); goto fail_jindex; } sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid); error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid, &gfs2_journal_glops, LM_ST_EXCLUSIVE, LM_FLAG_NOEXP, &sdp->sd_journal_gh); if (error) { fs_err(sdp, "can't acquire journal glock: %d\n", error); goto fail_jindex; } ip = GFS2_I(sdp->sd_jdesc->jd_inode); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE, &sdp->sd_jinode_gh); if (error) { fs_err(sdp, "can't acquire journal inode glock: %d\n", error); goto fail_journal_gh; } error = gfs2_jdesc_check(sdp->sd_jdesc); if (error) { fs_err(sdp, "my journal (%u) is bad: %d\n", sdp->sd_jdesc->jd_jid, error); goto fail_jinode_gh; } atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5); atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5); /* Map the extents for this journal's blocks */ map_journal_extents(sdp); } trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free)); if (sdp->sd_lockstruct.ls_first) { unsigned int x; for (x = 0; x < sdp->sd_journals; x++) { error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x), true); if (error) { fs_err(sdp, "error recovering journal %u: %d\n", x, error); goto fail_jinode_gh; } } gfs2_others_may_mount(sdp); } else if (!sdp->sd_args.ar_spectator) { error = gfs2_recover_journal(sdp->sd_jdesc, true); if (error) { fs_err(sdp, "error recovering my journal: %d\n", error); goto fail_jinode_gh; } } set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags); gfs2_glock_dq_uninit(&ji_gh); jindex = 0; return 0; fail_jinode_gh: if (!sdp->sd_args.ar_spectator) gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); fail_journal_gh: if (!sdp->sd_args.ar_spectator) gfs2_glock_dq_uninit(&sdp->sd_journal_gh); fail_jindex: gfs2_jindex_free(sdp); if (jindex) gfs2_glock_dq_uninit(&ji_gh); fail: iput(sdp->sd_jindex); return error; } static int init_inodes(struct gfs2_sbd *sdp, int undo) { int error = 0; struct inode *master = sdp->sd_master_dir->d_inode; if (undo) goto fail_qinode; error = init_journal(sdp, undo); if (error) goto fail; /* Read in the master statfs inode */ sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs"); if (IS_ERR(sdp->sd_statfs_inode)) { error = PTR_ERR(sdp->sd_statfs_inode); fs_err(sdp, "can't read in statfs inode: %d\n", error); goto fail_journal; } /* Read in the resource index inode */ sdp->sd_rindex = gfs2_lookup_simple(master, "rindex"); if (IS_ERR(sdp->sd_rindex)) { error = PTR_ERR(sdp->sd_rindex); fs_err(sdp, "can't get resource index inode: %d\n", error); goto fail_statfs; } sdp->sd_rindex_uptodate = 0; /* Read in the quota inode */ sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota"); if (IS_ERR(sdp->sd_quota_inode)) { error = PTR_ERR(sdp->sd_quota_inode); fs_err(sdp, "can't get quota file inode: %d\n", error); goto fail_rindex; } error = gfs2_rindex_update(sdp); if (error) goto fail_qinode; return 0; fail_qinode: iput(sdp->sd_quota_inode); fail_rindex: gfs2_clear_rgrpd(sdp); iput(sdp->sd_rindex); fail_statfs: iput(sdp->sd_statfs_inode); fail_journal: init_journal(sdp, UNDO); fail: return error; } static int init_per_node(struct gfs2_sbd *sdp, int undo) { struct inode *pn = NULL; char buf[30]; int error = 0; struct gfs2_inode *ip; struct inode *master = sdp->sd_master_dir->d_inode; if (sdp->sd_args.ar_spectator) return 0; if (undo) goto fail_qc_gh; pn = gfs2_lookup_simple(master, "per_node"); if (IS_ERR(pn)) { error = PTR_ERR(pn); fs_err(sdp, "can't find per_node directory: %d\n", error); return error; } sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid); sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf); if (IS_ERR(sdp->sd_sc_inode)) { error = PTR_ERR(sdp->sd_sc_inode); fs_err(sdp, "can't find local \"sc\" file: %d\n", error); goto fail; } sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid); sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf); if (IS_ERR(sdp->sd_qc_inode)) { error = PTR_ERR(sdp->sd_qc_inode); fs_err(sdp, "can't find local \"qc\" file: %d\n", error); goto fail_ut_i; } iput(pn); pn = NULL; ip = GFS2_I(sdp->sd_sc_inode); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &sdp->sd_sc_gh); if (error) { fs_err(sdp, "can't lock local \"sc\" file: %d\n", error); goto fail_qc_i; } ip = GFS2_I(sdp->sd_qc_inode); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &sdp->sd_qc_gh); if (error) { fs_err(sdp, "can't lock local \"qc\" file: %d\n", error); goto fail_ut_gh; } return 0; fail_qc_gh: gfs2_glock_dq_uninit(&sdp->sd_qc_gh); fail_ut_gh: gfs2_glock_dq_uninit(&sdp->sd_sc_gh); fail_qc_i: iput(sdp->sd_qc_inode); fail_ut_i: iput(sdp->sd_sc_inode); fail: if (pn) iput(pn); return error; } static int init_threads(struct gfs2_sbd *sdp, int undo) { struct task_struct *p; int error = 0; if (undo) goto fail_quotad; p = kthread_run(gfs2_logd, sdp, "gfs2_logd"); error = IS_ERR(p); if (error) { fs_err(sdp, "can't start logd thread: %d\n", error); return error; } sdp->sd_logd_process = p; p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad"); error = IS_ERR(p); if (error) { fs_err(sdp, "can't start quotad thread: %d\n", error); goto fail; } sdp->sd_quotad_process = p; return 0; fail_quotad: kthread_stop(sdp->sd_quotad_process); fail: kthread_stop(sdp->sd_logd_process); return error; } static const match_table_t nolock_tokens = { { Opt_jid, "jid=%d\n", }, { Opt_err, NULL }, }; static const struct lm_lockops nolock_ops = { .lm_proto_name = "lock_nolock", .lm_put_lock = gfs2_glock_free, .lm_tokens = &nolock_tokens, }; /** * gfs2_lm_mount - mount a locking protocol * @sdp: the filesystem * @args: mount arguments * @silent: if 1, don't complain if the FS isn't a GFS2 fs * * Returns: errno */ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) { const struct lm_lockops *lm; struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct gfs2_args *args = &sdp->sd_args; const char *proto = sdp->sd_proto_name; const char *table = sdp->sd_table_name; char *o, *options; int ret; if (!strcmp("lock_nolock", proto)) { lm = &nolock_ops; sdp->sd_args.ar_localflocks = 1; #ifdef CONFIG_GFS2_FS_LOCKING_DLM } else if (!strcmp("lock_dlm", proto)) { lm = &gfs2_dlm_ops; #endif } else { printk(KERN_INFO "GFS2: can't find protocol %s\n", proto); return -ENOENT; } fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table); ls->ls_ops = lm; ls->ls_first = 1; for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) { substring_t tmp[MAX_OPT_ARGS]; int token, option; if (!o || !*o) continue; token = match_token(o, *lm->lm_tokens, tmp); switch (token) { case Opt_jid: ret = match_int(&tmp[0], &option); if (ret || option < 0) goto hostdata_error; if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags)) ls->ls_jid = option; break; case Opt_id: /* Obsolete, but left for backward compat purposes */ break; case Opt_first: ret = match_int(&tmp[0], &option); if (ret || (option != 0 && option != 1)) goto hostdata_error; ls->ls_first = option; break; case Opt_nodir: ret = match_int(&tmp[0], &option); if (ret || (option != 0 && option != 1)) goto hostdata_error; ls->ls_nodir = option; break; case Opt_err: default: hostdata_error: fs_info(sdp, "unknown hostdata (%s)\n", o); return -EINVAL; } } if (lm->lm_mount == NULL) { fs_info(sdp, "Now mounting FS...\n"); complete_all(&sdp->sd_locking_init); return 0; } ret = lm->lm_mount(sdp, table); if (ret == 0) fs_info(sdp, "Joined cluster. Now mounting FS...\n"); complete_all(&sdp->sd_locking_init); return ret; } void gfs2_lm_unmount(struct gfs2_sbd *sdp) { const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops; if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) && lm->lm_unmount) lm->lm_unmount(sdp); } static int gfs2_journalid_wait(void *word) { if (signal_pending(current)) return -EINTR; schedule(); return 0; } static int wait_on_journal(struct gfs2_sbd *sdp) { if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) return 0; return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, gfs2_journalid_wait, TASK_INTERRUPTIBLE); } void gfs2_online_uevent(struct gfs2_sbd *sdp) { struct super_block *sb = sdp->sd_vfs; char ro[20]; char spectator[20]; char *envp[] = { ro, spectator, NULL }; sprintf(ro, "RDONLY=%d", (sb->s_flags & MS_RDONLY) ? 1 : 0); sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0); kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp); } /** * fill_super - Read in superblock * @sb: The VFS superblock * @data: Mount options * @silent: Don't complain if it's not a GFS2 filesystem * * Returns: errno */ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent) { struct gfs2_sbd *sdp; struct gfs2_holder mount_gh; int error; sdp = init_sbd(sb); if (!sdp) { printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n"); return -ENOMEM; } sdp->sd_args = *args; if (sdp->sd_args.ar_spectator) { sb->s_flags |= MS_RDONLY; set_bit(SDF_RORECOVERY, &sdp->sd_flags); } if (sdp->sd_args.ar_posix_acl) sb->s_flags |= MS_POSIXACL; if (sdp->sd_args.ar_nobarrier) set_bit(SDF_NOBARRIERS, &sdp->sd_flags); sb->s_flags |= MS_NOSEC; sb->s_magic = GFS2_MAGIC; sb->s_op = &gfs2_super_ops; sb->s_d_op = &gfs2_dops; sb->s_export_op = &gfs2_export_ops; sb->s_xattr = gfs2_xattr_handlers; sb->s_qcop = &gfs2_quotactl_ops; sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; sb->s_time_gran = 1; sb->s_maxbytes = MAX_LFS_FILESIZE; /* Set up the buffer cache and fill in some fake block size values to allow us to read-in the on-disk superblock. */ sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK); sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits; sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT; sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit; sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum; if (sdp->sd_args.ar_statfs_quantum) { sdp->sd_tune.gt_statfs_slow = 0; sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum; } else { sdp->sd_tune.gt_statfs_slow = 1; sdp->sd_tune.gt_statfs_quantum = 30; } error = init_names(sdp, silent); if (error) goto fail; snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s", sdp->sd_table_name); gfs2_create_debugfs_file(sdp); error = gfs2_sys_fs_add(sdp); if (error) goto fail; error = gfs2_lm_mount(sdp, silent); if (error) goto fail_sys; error = init_locking(sdp, &mount_gh, DO); if (error) goto fail_lm; error = init_sb(sdp, silent); if (error) goto fail_locking; error = wait_on_journal(sdp); if (error) goto fail_sb; /* * If user space has failed to join the cluster or some similar * failure has occurred, then the journal id will contain a * negative (error) number. This will then be returned to the * caller (of the mount syscall). We do this even for spectator * mounts (which just write a jid of 0 to indicate "ok" even though * the jid is unused in the spectator case) */ if (sdp->sd_lockstruct.ls_jid < 0) { error = sdp->sd_lockstruct.ls_jid; sdp->sd_lockstruct.ls_jid = 0; goto fail_sb; } if (sdp->sd_args.ar_spectator) snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s", sdp->sd_table_name); else snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", sdp->sd_table_name, sdp->sd_lockstruct.ls_jid); error = init_inodes(sdp, DO); if (error) goto fail_sb; error = init_per_node(sdp, DO); if (error) goto fail_inodes; error = gfs2_statfs_init(sdp); if (error) { fs_err(sdp, "can't initialize statfs subsystem: %d\n", error); goto fail_per_node; } error = init_threads(sdp, DO); if (error) goto fail_per_node; if (!(sb->s_flags & MS_RDONLY)) { error = gfs2_make_fs_rw(sdp); if (error) { fs_err(sdp, "can't make FS RW: %d\n", error); goto fail_threads; } } gfs2_glock_dq_uninit(&mount_gh); gfs2_online_uevent(sdp); return 0; fail_threads: init_threads(sdp, UNDO); fail_per_node: init_per_node(sdp, UNDO); fail_inodes: init_inodes(sdp, UNDO); fail_sb: if (sdp->sd_root_dir) dput(sdp->sd_root_dir); if (sdp->sd_master_dir) dput(sdp->sd_master_dir); if (sb->s_root) dput(sb->s_root); sb->s_root = NULL; fail_locking: init_locking(sdp, &mount_gh, UNDO); fail_lm: gfs2_gl_hash_clear(sdp); gfs2_lm_unmount(sdp); fail_sys: gfs2_sys_fs_del(sdp); fail: gfs2_delete_debugfs_file(sdp); free_percpu(sdp->sd_lkstats); kfree(sdp); sb->s_fs_info = NULL; return error; } static int set_gfs2_super(struct super_block *s, void *data) { s->s_bdev = data; s->s_dev = s->s_bdev->bd_dev; /* * We set the bdi here to the queue backing, file systems can * overwrite this in ->fill_super() */ s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; return 0; } static int test_gfs2_super(struct super_block *s, void *ptr) { struct block_device *bdev = ptr; return (bdev == s->s_bdev); } /** * gfs2_mount - Get the GFS2 superblock * @fs_type: The GFS2 filesystem type * @flags: Mount flags * @dev_name: The name of the device * @data: The mount arguments * * Q. Why not use get_sb_bdev() ? * A. We need to select one of two root directories to mount, independent * of whether this is the initial, or subsequent, mount of this sb * * Returns: 0 or -ve on error */ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct block_device *bdev; struct super_block *s; fmode_t mode = FMODE_READ | FMODE_EXCL; int error; struct gfs2_args args; struct gfs2_sbd *sdp; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(bdev)) return ERR_CAST(bdev); /* * once the super is inserted into the list by sget, s_umount * will protect the lockfs code from trying to start a snapshot * while we are mounting */ mutex_lock(&bdev->bd_fsfreeze_mutex); if (bdev->bd_fsfreeze_count > 0) { mutex_unlock(&bdev->bd_fsfreeze_mutex); error = -EBUSY; goto error_bdev; } s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev); mutex_unlock(&bdev->bd_fsfreeze_mutex); error = PTR_ERR(s); if (IS_ERR(s)) goto error_bdev; if (s->s_root) blkdev_put(bdev, mode); memset(&args, 0, sizeof(args)); args.ar_quota = GFS2_QUOTA_DEFAULT; args.ar_data = GFS2_DATA_DEFAULT; args.ar_commit = 30; args.ar_statfs_quantum = 30; args.ar_quota_quantum = 60; args.ar_errors = GFS2_ERRORS_DEFAULT; error = gfs2_mount_args(&args, data); if (error) { printk(KERN_WARNING "GFS2: can't parse mount arguments\n"); goto error_super; } if (s->s_root) { error = -EBUSY; if ((flags ^ s->s_flags) & MS_RDONLY) goto error_super; } else { char b[BDEVNAME_SIZE]; s->s_flags = flags; s->s_mode = mode; strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(bdev)); error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0); if (error) goto error_super; s->s_flags |= MS_ACTIVE; bdev->bd_super = s; } sdp = s->s_fs_info; if (args.ar_meta) return dget(sdp->sd_master_dir); else return dget(sdp->sd_root_dir); error_super: deactivate_locked_super(s); return ERR_PTR(error); error_bdev: blkdev_put(bdev, mode); return ERR_PTR(error); } static int set_meta_super(struct super_block *s, void *ptr) { return -EINVAL; } static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct super_block *s; struct gfs2_sbd *sdp; struct path path; int error; error = kern_path(dev_name, LOOKUP_FOLLOW, &path); if (error) { printk(KERN_WARNING "GFS2: path_lookup on %s returned error %d\n", dev_name, error); return ERR_PTR(error); } s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, path.dentry->d_inode->i_sb->s_bdev); path_put(&path); if (IS_ERR(s)) { printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n"); return ERR_CAST(s); } if ((flags ^ s->s_flags) & MS_RDONLY) { deactivate_locked_super(s); return ERR_PTR(-EBUSY); } sdp = s->s_fs_info; return dget(sdp->sd_master_dir); } static void gfs2_kill_sb(struct super_block *sb) { struct gfs2_sbd *sdp = sb->s_fs_info; if (sdp == NULL) { kill_block_super(sb); return; } gfs2_meta_syncfs(sdp); dput(sdp->sd_root_dir); dput(sdp->sd_master_dir); sdp->sd_root_dir = NULL; sdp->sd_master_dir = NULL; shrink_dcache_sb(sb); kill_block_super(sb); gfs2_delete_debugfs_file(sdp); free_percpu(sdp->sd_lkstats); kfree(sdp); } struct file_system_type gfs2_fs_type = { .name = "gfs2", .fs_flags = FS_REQUIRES_DEV, .mount = gfs2_mount, .kill_sb = gfs2_kill_sb, .owner = THIS_MODULE, }; struct file_system_type gfs2meta_fs_type = { .name = "gfs2meta", .fs_flags = FS_REQUIRES_DEV, .mount = gfs2_mount_meta, .owner = THIS_MODULE, };
gpl-2.0
UberPinguin/android_kernel_samsung_t769
arch/arm/mach-omap2/clock2xxx.c
4116
1545
/* * clock2xxx.c - OMAP2xxx-specific clock integration code * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/clock.h> #include "clock.h" #include "clock2xxx.h" #include "cm.h" #include "cm-regbits-24xx.h" struct clk *vclk, *sclk, *dclk; /* * Omap24xx specific clock functions */ /* * Set clocks for bypass mode for reboot to work. */ void omap2xxx_clk_prepare_for_reboot(void) { u32 rate; if (vclk == NULL || sclk == NULL) return; rate = clk_get_rate(sclk); clk_set_rate(vclk, rate); } /* * Switch the MPU rate if specified on cmdline. We cannot do this * early until cmdline is parsed. XXX This should be removed from the * clock code and handled by the OPP layer code in the near future. */ static int __init omap2xxx_clk_arch_init(void) { int ret; if (!cpu_is_omap24xx()) return 0; ret = omap2_clk_switch_mpurate_at_boot("virt_prcm_set"); if (!ret) omap2_clk_print_new_rates("sys_ck", "dpll_ck", "mpu_ck"); return ret; } arch_initcall(omap2xxx_clk_arch_init);
gpl-2.0
fus1on/3.4.xx_LG_kernel
drivers/usb/storage/jumpshot.c
4628
18478
/* Driver for Lexar "Jumpshot" Compact Flash reader * * jumpshot driver v0.1: * * First release * * Current development and maintenance by: * (c) 2000 Jimmie Mayfield (mayfield+usb@sackheads.org) * * Many thanks to Robert Baruch for the SanDisk SmartMedia reader driver * which I used as a template for this driver. * * Some bugfixes and scatter-gather code by Gregory P. Smith * (greg-usb@electricrain.com) * * Fix for media change by Joerg Schneider (js@joergschneider.com) * * Developed with the assistance of: * * (C) 2002 Alan Stern <stern@rowland.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * This driver attempts to support the Lexar Jumpshot USB CompactFlash * reader. Like many other USB CompactFlash readers, the Jumpshot contains * a USB-to-ATA chip. * * This driver supports reading and writing. If you're truly paranoid, * however, you can force the driver into a write-protected state by setting * the WP enable bits in jumpshot_handle_mode_sense. See the comments * in that routine. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" MODULE_DESCRIPTION("Driver for Lexar \"Jumpshot\" Compact Flash reader"); MODULE_AUTHOR("Jimmie Mayfield <mayfield+usb@sackheads.org>"); MODULE_LICENSE("GPL"); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags)|(USB_US_TYPE_STOR<<24) } static struct usb_device_id jumpshot_usb_ids[] = { # include "unusual_jumpshot.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, jumpshot_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev jumpshot_unusual_dev_list[] = { # include "unusual_jumpshot.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV struct jumpshot_info { unsigned long sectors; /* total sector count */ unsigned long ssize; /* sector size in bytes */ /* the following aren't used yet */ unsigned char sense_key; unsigned long sense_asc; /* additional sense code */ unsigned long sense_ascq; /* additional sense code qualifier */ }; static inline int jumpshot_bulk_read(struct us_data *us, unsigned char *data, unsigned int len) { if (len == 0) return USB_STOR_XFER_GOOD; US_DEBUGP("jumpshot_bulk_read: len = %d\n", len); return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data, len, NULL); } static inline int jumpshot_bulk_write(struct us_data *us, unsigned char *data, unsigned int len) { if (len == 0) return USB_STOR_XFER_GOOD; US_DEBUGP("jumpshot_bulk_write: len = %d\n", len); return usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, data, len, NULL); } static int jumpshot_get_status(struct us_data *us) { int rc; if (!us) return USB_STOR_TRANSPORT_ERROR; // send the setup rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, 0, 0xA0, 0, 7, us->iobuf, 1); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (us->iobuf[0] != 0x50) { US_DEBUGP("jumpshot_get_status: 0x%2x\n", us->iobuf[0]); return USB_STOR_TRANSPORT_ERROR; } return USB_STOR_TRANSPORT_GOOD; } static int jumpshot_read_data(struct us_data *us, struct jumpshot_info *info, u32 sector, u32 sectors) { unsigned char *command = us->iobuf; unsigned char *buffer; unsigned char thistime; unsigned int totallen, alloclen; int len, result; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; // we're working in LBA mode. according to the ATA spec, // we can support up to 28-bit addressing. I don't know if Jumpshot // supports beyond 24-bit addressing. It's kind of hard to test // since it requires > 8GB CF card. if (sector > 0x0FFFFFFF) return USB_STOR_TRANSPORT_ERROR; totallen = sectors * info->ssize; // Since we don't read more than 64 KB at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer. alloclen = min(totallen, 65536u); buffer = kmalloc(alloclen, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; do { // loop, never allocate or transfer more than 64k at once // (min(128k, 255*info->ssize) is the real limit) len = min(totallen, alloclen); thistime = (len / info->ssize) & 0xff; command[0] = 0; command[1] = thistime; command[2] = sector & 0xFF; command[3] = (sector >> 8) & 0xFF; command[4] = (sector >> 16) & 0xFF; command[5] = 0xE0 | ((sector >> 24) & 0x0F); command[6] = 0x20; // send the setup + command result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 0, 0x20, 0, 1, command, 7); if (result != USB_STOR_XFER_GOOD) goto leave; // read the result result = jumpshot_bulk_read(us, buffer, len); if (result != USB_STOR_XFER_GOOD) goto leave; US_DEBUGP("jumpshot_read_data: %d bytes\n", len); // Store the data in the transfer buffer usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &sg_offset, TO_XFER_BUF); sector += thistime; totallen -= len; } while (totallen > 0); kfree(buffer); return USB_STOR_TRANSPORT_GOOD; leave: kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } static int jumpshot_write_data(struct us_data *us, struct jumpshot_info *info, u32 sector, u32 sectors) { unsigned char *command = us->iobuf; unsigned char *buffer; unsigned char thistime; unsigned int totallen, alloclen; int len, result, waitcount; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; // we're working in LBA mode. according to the ATA spec, // we can support up to 28-bit addressing. I don't know if Jumpshot // supports beyond 24-bit addressing. It's kind of hard to test // since it requires > 8GB CF card. // if (sector > 0x0FFFFFFF) return USB_STOR_TRANSPORT_ERROR; totallen = sectors * info->ssize; // Since we don't write more than 64 KB at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer. alloclen = min(totallen, 65536u); buffer = kmalloc(alloclen, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; do { // loop, never allocate or transfer more than 64k at once // (min(128k, 255*info->ssize) is the real limit) len = min(totallen, alloclen); thistime = (len / info->ssize) & 0xff; // Get the data from the transfer buffer usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &sg_offset, FROM_XFER_BUF); command[0] = 0; command[1] = thistime; command[2] = sector & 0xFF; command[3] = (sector >> 8) & 0xFF; command[4] = (sector >> 16) & 0xFF; command[5] = 0xE0 | ((sector >> 24) & 0x0F); command[6] = 0x30; // send the setup + command result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 0, 0x20, 0, 1, command, 7); if (result != USB_STOR_XFER_GOOD) goto leave; // send the data result = jumpshot_bulk_write(us, buffer, len); if (result != USB_STOR_XFER_GOOD) goto leave; // read the result. apparently the bulk write can complete // before the jumpshot drive is finished writing. so we loop // here until we get a good return code waitcount = 0; do { result = jumpshot_get_status(us); if (result != USB_STOR_TRANSPORT_GOOD) { // I have not experimented to find the smallest value. // msleep(50); } } while ((result != USB_STOR_TRANSPORT_GOOD) && (waitcount < 10)); if (result != USB_STOR_TRANSPORT_GOOD) US_DEBUGP("jumpshot_write_data: Gah! Waitcount = 10. Bad write!?\n"); sector += thistime; totallen -= len; } while (totallen > 0); kfree(buffer); return result; leave: kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } static int jumpshot_id_device(struct us_data *us, struct jumpshot_info *info) { unsigned char *command = us->iobuf; unsigned char *reply; int rc; if (!info) return USB_STOR_TRANSPORT_ERROR; command[0] = 0xE0; command[1] = 0xEC; reply = kmalloc(512, GFP_NOIO); if (!reply) return USB_STOR_TRANSPORT_ERROR; // send the setup rc = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 0, 0x20, 0, 6, command, 2); if (rc != USB_STOR_XFER_GOOD) { US_DEBUGP("jumpshot_id_device: Gah! " "send_control for read_capacity failed\n"); rc = USB_STOR_TRANSPORT_ERROR; goto leave; } // read the reply rc = jumpshot_bulk_read(us, reply, 512); if (rc != USB_STOR_XFER_GOOD) { rc = USB_STOR_TRANSPORT_ERROR; goto leave; } info->sectors = ((u32)(reply[117]) << 24) | ((u32)(reply[116]) << 16) | ((u32)(reply[115]) << 8) | ((u32)(reply[114]) ); rc = USB_STOR_TRANSPORT_GOOD; leave: kfree(reply); return rc; } static int jumpshot_handle_mode_sense(struct us_data *us, struct scsi_cmnd * srb, int sense_6) { static unsigned char rw_err_page[12] = { 0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0 }; static unsigned char cache_page[12] = { 0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static unsigned char rbac_page[12] = { 0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0 }; static unsigned char timer_page[8] = { 0x1C, 0x6, 0, 0, 0, 0 }; unsigned char pc, page_code; unsigned int i = 0; struct jumpshot_info *info = (struct jumpshot_info *) (us->extra); unsigned char *ptr = us->iobuf; pc = srb->cmnd[2] >> 6; page_code = srb->cmnd[2] & 0x3F; switch (pc) { case 0x0: US_DEBUGP("jumpshot_handle_mode_sense: Current values\n"); break; case 0x1: US_DEBUGP("jumpshot_handle_mode_sense: Changeable values\n"); break; case 0x2: US_DEBUGP("jumpshot_handle_mode_sense: Default values\n"); break; case 0x3: US_DEBUGP("jumpshot_handle_mode_sense: Saves values\n"); break; } memset(ptr, 0, 8); if (sense_6) { ptr[2] = 0x00; // WP enable: 0x80 i = 4; } else { ptr[3] = 0x00; // WP enable: 0x80 i = 8; } switch (page_code) { case 0x0: // vendor-specific mode info->sense_key = 0x05; info->sense_asc = 0x24; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; case 0x1: memcpy(ptr + i, rw_err_page, sizeof(rw_err_page)); i += sizeof(rw_err_page); break; case 0x8: memcpy(ptr + i, cache_page, sizeof(cache_page)); i += sizeof(cache_page); break; case 0x1B: memcpy(ptr + i, rbac_page, sizeof(rbac_page)); i += sizeof(rbac_page); break; case 0x1C: memcpy(ptr + i, timer_page, sizeof(timer_page)); i += sizeof(timer_page); break; case 0x3F: memcpy(ptr + i, timer_page, sizeof(timer_page)); i += sizeof(timer_page); memcpy(ptr + i, rbac_page, sizeof(rbac_page)); i += sizeof(rbac_page); memcpy(ptr + i, cache_page, sizeof(cache_page)); i += sizeof(cache_page); memcpy(ptr + i, rw_err_page, sizeof(rw_err_page)); i += sizeof(rw_err_page); break; } if (sense_6) ptr[0] = i - 1; else ((__be16 *) ptr)[0] = cpu_to_be16(i - 2); usb_stor_set_xfer_buf(ptr, i, srb); return USB_STOR_TRANSPORT_GOOD; } static void jumpshot_info_destructor(void *extra) { // this routine is a placeholder... // currently, we don't allocate any extra blocks so we're okay } // Transport for the Lexar 'Jumpshot' // static int jumpshot_transport(struct scsi_cmnd *srb, struct us_data *us) { struct jumpshot_info *info; int rc; unsigned long block, blocks; unsigned char *ptr = us->iobuf; static unsigned char inquiry_response[8] = { 0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00 }; if (!us->extra) { us->extra = kzalloc(sizeof(struct jumpshot_info), GFP_NOIO); if (!us->extra) { US_DEBUGP("jumpshot_transport: Gah! Can't allocate storage for jumpshot info struct!\n"); return USB_STOR_TRANSPORT_ERROR; } us->extra_destructor = jumpshot_info_destructor; } info = (struct jumpshot_info *) (us->extra); if (srb->cmnd[0] == INQUIRY) { US_DEBUGP("jumpshot_transport: INQUIRY. Returning bogus response.\n"); memcpy(ptr, inquiry_response, sizeof(inquiry_response)); fill_inquiry_response(us, ptr, 36); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == READ_CAPACITY) { info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec rc = jumpshot_get_status(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; rc = jumpshot_id_device(us, info); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; US_DEBUGP("jumpshot_transport: READ_CAPACITY: %ld sectors, %ld bytes per sector\n", info->sectors, info->ssize); // build the reply // ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); ((__be32 *) ptr)[1] = cpu_to_be32(info->ssize); usb_stor_set_xfer_buf(ptr, 8, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == MODE_SELECT_10) { US_DEBUGP("jumpshot_transport: Gah! MODE_SELECT_10.\n"); return USB_STOR_TRANSPORT_ERROR; } if (srb->cmnd[0] == READ_10) { block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); US_DEBUGP("jumpshot_transport: READ_10: read block 0x%04lx count %ld\n", block, blocks); return jumpshot_read_data(us, info, block, blocks); } if (srb->cmnd[0] == READ_12) { // I don't think we'll ever see a READ_12 but support it anyway... // block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | ((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9])); US_DEBUGP("jumpshot_transport: READ_12: read block 0x%04lx count %ld\n", block, blocks); return jumpshot_read_data(us, info, block, blocks); } if (srb->cmnd[0] == WRITE_10) { block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); US_DEBUGP("jumpshot_transport: WRITE_10: write block 0x%04lx count %ld\n", block, blocks); return jumpshot_write_data(us, info, block, blocks); } if (srb->cmnd[0] == WRITE_12) { // I don't think we'll ever see a WRITE_12 but support it anyway... // block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | ((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9])); US_DEBUGP("jumpshot_transport: WRITE_12: write block 0x%04lx count %ld\n", block, blocks); return jumpshot_write_data(us, info, block, blocks); } if (srb->cmnd[0] == TEST_UNIT_READY) { US_DEBUGP("jumpshot_transport: TEST_UNIT_READY.\n"); return jumpshot_get_status(us); } if (srb->cmnd[0] == REQUEST_SENSE) { US_DEBUGP("jumpshot_transport: REQUEST_SENSE.\n"); memset(ptr, 0, 18); ptr[0] = 0xF0; ptr[2] = info->sense_key; ptr[7] = 11; ptr[12] = info->sense_asc; ptr[13] = info->sense_ascq; usb_stor_set_xfer_buf(ptr, 18, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == MODE_SENSE) { US_DEBUGP("jumpshot_transport: MODE_SENSE_6 detected\n"); return jumpshot_handle_mode_sense(us, srb, 1); } if (srb->cmnd[0] == MODE_SENSE_10) { US_DEBUGP("jumpshot_transport: MODE_SENSE_10 detected\n"); return jumpshot_handle_mode_sense(us, srb, 0); } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { // sure. whatever. not like we can stop the user from popping // the media out of the device (no locking doors, etc) // return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == START_STOP) { /* this is used by sd.c'check_scsidisk_media_change to detect media change */ US_DEBUGP("jumpshot_transport: START_STOP.\n"); /* the first jumpshot_id_device after a media change returns an error (determined experimentally) */ rc = jumpshot_id_device(us, info); if (rc == USB_STOR_TRANSPORT_GOOD) { info->sense_key = NO_SENSE; srb->result = SUCCESS; } else { info->sense_key = UNIT_ATTENTION; srb->result = SAM_STAT_CHECK_CONDITION; } return rc; } US_DEBUGP("jumpshot_transport: Gah! Unknown command: %d (0x%x)\n", srb->cmnd[0], srb->cmnd[0]); info->sense_key = 0x05; info->sense_asc = 0x20; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } static int jumpshot_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - jumpshot_usb_ids) + jumpshot_unusual_dev_list); if (result) return result; us->transport_name = "Lexar Jumpshot Control/Bulk"; us->transport = jumpshot_transport; us->transport_reset = usb_stor_Bulk_reset; us->max_lun = 1; result = usb_stor_probe2(us); return result; } static struct usb_driver jumpshot_driver = { .name = "ums-jumpshot", .probe = jumpshot_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = jumpshot_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_driver(jumpshot_driver);
gpl-2.0
jsr-d9/android_kernel_msm
drivers/isdn/hardware/avm/avm_cs.c
5140
4246
/* $Id: avm_cs.c,v 1.4.6.3 2001/09/23 22:24:33 kai Exp $ * * A PCMCIA client driver for AVM B1/M1/M2 * * Copyright 1999 by Carsten Paeth <calle@calle.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/tty.h> #include <linux/serial.h> #include <linux/major.h> #include <asm/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <linux/skbuff.h> #include <linux/capi.h> #include <linux/b1lli.h> #include <linux/b1pcmcia.h> /*====================================================================*/ MODULE_DESCRIPTION("CAPI4Linux: PCMCIA client driver for AVM B1/M1/M2"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /*====================================================================*/ static int avmcs_config(struct pcmcia_device *link); static void avmcs_release(struct pcmcia_device *link); static void avmcs_detach(struct pcmcia_device *p_dev); static int avmcs_probe(struct pcmcia_device *p_dev) { /* General socket configuration */ p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; p_dev->config_index = 1; p_dev->config_regs = PRESENT_OPTION; return avmcs_config(p_dev); } /* avmcs_attach */ static void avmcs_detach(struct pcmcia_device *link) { avmcs_release(link); } /* avmcs_detach */ static int avmcs_configcheck(struct pcmcia_device *p_dev, void *priv_data) { p_dev->resource[0]->end = 16; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; return pcmcia_request_io(p_dev); } static int avmcs_config(struct pcmcia_device *link) { int i = -1; char devname[128]; int cardtype; int (*addcard)(unsigned int port, unsigned irq); devname[0] = 0; if (link->prod_id[1]) strlcpy(devname, link->prod_id[1], sizeof(devname)); /* * find IO port */ if (pcmcia_loop_config(link, avmcs_configcheck, NULL)) return -ENODEV; do { if (!link->irq) { /* undo */ pcmcia_disable_device(link); break; } /* * configure the PCMCIA socket */ i = pcmcia_enable_device(link); if (i != 0) { pcmcia_disable_device(link); break; } } while (0); if (devname[0]) { char *s = strrchr(devname, ' '); if (!s) s = devname; else s++; if (strcmp("M1", s) == 0) { cardtype = AVM_CARDTYPE_M1; } else if (strcmp("M2", s) == 0) { cardtype = AVM_CARDTYPE_M2; } else { cardtype = AVM_CARDTYPE_B1; } } else cardtype = AVM_CARDTYPE_B1; /* If any step failed, release any partially configured state */ if (i != 0) { avmcs_release(link); return -ENODEV; } switch (cardtype) { case AVM_CARDTYPE_M1: addcard = b1pcmcia_addcard_m1; break; case AVM_CARDTYPE_M2: addcard = b1pcmcia_addcard_m2; break; default: case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break; } if ((i = (*addcard)(link->resource[0]->start, link->irq)) < 0) { dev_err(&link->dev, "avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n", (unsigned int) link->resource[0]->start, link->irq); avmcs_release(link); return -ENODEV; } return 0; } /* avmcs_config */ static void avmcs_release(struct pcmcia_device *link) { b1pcmcia_delcard(link->resource[0]->start, link->irq); pcmcia_disable_device(link); } /* avmcs_release */ static const struct pcmcia_device_id avmcs_ids[] = { PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN-Controller B1", 0x95d42008, 0x845dc335), PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M1", 0x95d42008, 0x81e10430), PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M2", 0x95d42008, 0x18e8558a), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, avmcs_ids); static struct pcmcia_driver avmcs_driver = { .owner = THIS_MODULE, .name = "avm_cs", .probe = avmcs_probe, .remove = avmcs_detach, .id_table = avmcs_ids, }; static int __init avmcs_init(void) { return pcmcia_register_driver(&avmcs_driver); } static void __exit avmcs_exit(void) { pcmcia_unregister_driver(&avmcs_driver); } module_init(avmcs_init); module_exit(avmcs_exit);
gpl-2.0
messi2050/android_kernel_huawei_msm8610
drivers/net/ethernet/intel/igb/e1000_mbx.c
5140
11481
/******************************************************************************* Intel(R) Gigabit Ethernet Linux driver Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "e1000_mbx.h" /** * igb_read_mbx - Reads a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to read * * returns SUCCESS if it successfully read message from buffer **/ s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; /* limit read to size of mailbox */ if (size > mbx->size) size = mbx->size; if (mbx->ops.read) ret_val = mbx->ops.read(hw, msg, size, mbx_id); return ret_val; } /** * igb_write_mbx - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer **/ s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = 0; if (size > mbx->size) ret_val = -E1000_ERR_MBX; else if (mbx->ops.write) ret_val = mbx->ops.write(hw, msg, size, mbx_id); return ret_val; } /** * igb_check_for_msg - checks to see if someone sent us mail * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; if (mbx->ops.check_for_msg) ret_val = mbx->ops.check_for_msg(hw, mbx_id); return ret_val; } /** * igb_check_for_ack - checks to see if someone sent us ACK * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; if (mbx->ops.check_for_ack) ret_val = mbx->ops.check_for_ack(hw, mbx_id); return ret_val; } /** * igb_check_for_rst - checks to see if other side has reset * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; if (mbx->ops.check_for_rst) ret_val = mbx->ops.check_for_rst(hw, mbx_id); return ret_val; } /** * igb_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification **/ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; if (!countdown || !mbx->ops.check_for_msg) goto out; while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) break; udelay(mbx->usec_delay); } /* if we failed, all future posted messages fail until reset */ if (!countdown) mbx->timeout = 0; out: return countdown ? 0 : -E1000_ERR_MBX; } /** * igb_poll_for_ack - Wait for message acknowledgement * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message acknowledgement **/ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; if (!countdown || !mbx->ops.check_for_ack) goto out; while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) break; udelay(mbx->usec_delay); } /* if we failed, all future posted messages fail until reset */ if (!countdown) mbx->timeout = 0; out: return countdown ? 0 : -E1000_ERR_MBX; } /** * igb_read_posted_mbx - Wait for message notification and receive message * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; if (!mbx->ops.read) goto out; ret_val = igb_poll_for_msg(hw, mbx_id); if (!ret_val) ret_val = mbx->ops.read(hw, msg, size, mbx_id); out: return ret_val; } /** * igb_write_posted_mbx - Write a message to the mailbox, wait for ack * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; /* exit if either we can't write or there isn't a defined timeout */ if (!mbx->ops.write || !mbx->timeout) goto out; /* send msg */ ret_val = mbx->ops.write(hw, msg, size, mbx_id); /* if msg sent wait until we receive an ack */ if (!ret_val) ret_val = igb_poll_for_ack(hw, mbx_id); out: return ret_val; } static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) { u32 mbvficr = rd32(E1000_MBVFICR); s32 ret_val = -E1000_ERR_MBX; if (mbvficr & mask) { ret_val = 0; wr32(E1000_MBVFICR, mask); } return ret_val; } /** * igb_check_for_msg_pf - checks to see if the VF has sent mail * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { ret_val = 0; hw->mbx.stats.reqs++; } return ret_val; } /** * igb_check_for_ack_pf - checks to see if the VF has ACKed * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { ret_val = 0; hw->mbx.stats.acks++; } return ret_val; } /** * igb_check_for_rst_pf - checks to see if the VF has reset * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) { u32 vflre = rd32(E1000_VFLRE); s32 ret_val = -E1000_ERR_MBX; if (vflre & (1 << vf_number)) { ret_val = 0; wr32(E1000_VFLRE, (1 << vf_number)); hw->mbx.stats.rsts++; } return ret_val; } /** * igb_obtain_mbx_lock_pf - obtain mailbox lock * @hw: pointer to the HW structure * @vf_number: the VF index * * return SUCCESS if we obtained the mailbox lock **/ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; u32 p2v_mailbox; /* Take ownership of the buffer */ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); /* reserve mailbox for vf use */ p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); if (p2v_mailbox & E1000_P2VMAILBOX_PFU) ret_val = 0; return ret_val; } /** * igb_write_mbx_pf - Places a message in the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index * * returns SUCCESS if it successfully copied message into the buffer **/ static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; /* lock the mailbox to prevent pf/vf race condition */ ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) goto out_no_write; /* flush msg and acks as we are overwriting the message buffer */ igb_check_for_msg_pf(hw, vf_number); igb_check_for_ack_pf(hw, vf_number); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); /* Interrupt VF to tell it a message has been sent and release buffer*/ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); /* update stats */ hw->mbx.stats.msgs_tx++; out_no_write: return ret_val; } /** * igb_read_mbx_pf - Read a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index * * This function copies a message from the mailbox buffer to the caller's * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; /* lock the mailbox to prevent pf/vf race condition */ ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) goto out_no_read; /* copy the message to the mailbox memory buffer */ for (i = 0; i < size; i++) msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); /* Acknowledge the message and release buffer */ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; out_no_read: return ret_val; } /** * e1000_init_mbx_params_pf - set initial values for pf mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for pf mailbox */ s32 igb_init_mbx_params_pf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; mbx->timeout = 0; mbx->usec_delay = 0; mbx->size = E1000_VFMAILBOX_SIZE; mbx->ops.read = igb_read_mbx_pf; mbx->ops.write = igb_write_mbx_pf; mbx->ops.read_posted = igb_read_posted_mbx; mbx->ops.write_posted = igb_write_posted_mbx; mbx->ops.check_for_msg = igb_check_for_msg_pf; mbx->ops.check_for_ack = igb_check_for_ack_pf; mbx->ops.check_for_rst = igb_check_for_rst_pf; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; return 0; }
gpl-2.0
NoelMacwan/Kernel-C6806-KOT49H.S2.2052
drivers/mtd/maps/autcpu12-nvram.c
5396
3145
/* * NV-RAM memory access on autcpu12 * (C) 2002 Thomas Gleixner (gleixner@autronix.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/init.h> #include <asm/io.h> #include <asm/sizes.h> #include <mach/hardware.h> #include <mach/autcpu12.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> static struct mtd_info *sram_mtd; struct map_info autcpu12_sram_map = { .name = "SRAM", .size = 32768, .bankwidth = 4, .phys = 0x12000000, }; static int __init init_autcpu12_sram (void) { int err, save0, save1; autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K); if (!autcpu12_sram_map.virt) { printk("Failed to ioremap autcpu12 NV-RAM space\n"); err = -EIO; goto out; } simple_map_init(&autcpu_sram_map); /* * Check for 32K/128K * read ofs 0 * read ofs 0x10000 * Write complement to ofs 0x100000 * Read and check result on ofs 0x0 * Restore contents */ save0 = map_read32(&autcpu12_sram_map,0); save1 = map_read32(&autcpu12_sram_map,0x10000); map_write32(&autcpu12_sram_map,~save0,0x10000); /* if we find this pattern on 0x0, we have 32K size * restore contents and exit */ if ( map_read32(&autcpu12_sram_map,0) != save0) { map_write32(&autcpu12_sram_map,save0,0x0); goto map; } /* We have a 128K found, restore 0x10000 and set size * to 128K */ map_write32(&autcpu12_sram_map,save1,0x10000); autcpu12_sram_map.size = SZ_128K; map: sram_mtd = do_map_probe("map_ram", &autcpu12_sram_map); if (!sram_mtd) { printk("NV-RAM probe failed\n"); err = -ENXIO; goto out_ioremap; } sram_mtd->owner = THIS_MODULE; sram_mtd->erasesize = 16; if (mtd_device_register(sram_mtd, NULL, 0)) { printk("NV-RAM device addition failed\n"); err = -ENOMEM; goto out_probe; } printk("NV-RAM device size %ldKiB registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K); return 0; out_probe: map_destroy(sram_mtd); sram_mtd = 0; out_ioremap: iounmap((void *)autcpu12_sram_map.virt); out: return err; } static void __exit cleanup_autcpu12_maps(void) { if (sram_mtd) { mtd_device_unregister(sram_mtd); map_destroy(sram_mtd); iounmap((void *)autcpu12_sram_map.virt); } } module_init(init_autcpu12_sram); module_exit(cleanup_autcpu12_maps); MODULE_AUTHOR("Thomas Gleixner"); MODULE_DESCRIPTION("autcpu12 NV-RAM map driver"); MODULE_LICENSE("GPL");
gpl-2.0
hzc1126/dell_gallo_kernel_ics
fs/nls/nls_iso8859-2.c
12564
13181
/* * linux/fs/nls/nls_iso8859-2.c * * Charset iso8859-2 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f, /* 0x90*/ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f, /* 0xa0*/ 0x00a0, 0x0104, 0x02d8, 0x0141, 0x00a4, 0x013d, 0x015a, 0x00a7, 0x00a8, 0x0160, 0x015e, 0x0164, 0x0179, 0x00ad, 0x017d, 0x017b, /* 0xb0*/ 0x00b0, 0x0105, 0x02db, 0x0142, 0x00b4, 0x013e, 0x015b, 0x02c7, 0x00b8, 0x0161, 0x015f, 0x0165, 0x017a, 0x02dd, 0x017e, 0x017c, /* 0xc0*/ 0x0154, 0x00c1, 0x00c2, 0x0102, 0x00c4, 0x0139, 0x0106, 0x00c7, 0x010c, 0x00c9, 0x0118, 0x00cb, 0x011a, 0x00cd, 0x00ce, 0x010e, /* 0xd0*/ 0x0110, 0x0143, 0x0147, 0x00d3, 0x00d4, 0x0150, 0x00d6, 0x00d7, 0x0158, 0x016e, 0x00da, 0x0170, 0x00dc, 0x00dd, 0x0162, 0x00df, /* 0xe0*/ 0x0155, 0x00e1, 0x00e2, 0x0103, 0x00e4, 0x013a, 0x0107, 0x00e7, 0x010d, 0x00e9, 0x0119, 0x00eb, 0x011b, 0x00ed, 0x00ee, 0x010f, /* 0xf0*/ 0x0111, 0x0144, 0x0148, 0x00f3, 0x00f4, 0x0151, 0x00f6, 0x00f7, 0x0159, 0x016f, 0x00fa, 0x0171, 0x00fc, 0x00fd, 0x0163, 0x02d9, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */ 0xb0, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0xc1, 0xc2, 0x00, 0xc4, 0x00, 0x00, 0xc7, /* 0xc0-0xc7 */ 0x00, 0xc9, 0x00, 0xcb, 0x00, 0xcd, 0xce, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0x00, 0x00, 0xda, 0x00, 0xdc, 0xdd, 0x00, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0x00, 0x00, 0xe7, /* 0xe0-0xe7 */ 0x00, 0xe9, 0x00, 0xeb, 0x00, 0xed, 0xee, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xf3, 0xf4, 0x00, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0x00, 0x00, 0xfa, 0x00, 0xfc, 0xfd, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0xc3, 0xe3, 0xa1, 0xb1, 0xc6, 0xe6, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0xcf, 0xef, /* 0x08-0x0f */ 0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xca, 0xea, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0xc5, 0xe5, 0x00, 0x00, 0xa5, 0xb5, 0x00, /* 0x38-0x3f */ 0x00, 0xa3, 0xb3, 0xd1, 0xf1, 0x00, 0x00, 0xd2, /* 0x40-0x47 */ 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xd5, 0xf5, 0x00, 0x00, 0xc0, 0xe0, 0x00, 0x00, /* 0x50-0x57 */ 0xd8, 0xf8, 0xa6, 0xb6, 0x00, 0x00, 0xaa, 0xba, /* 0x58-0x5f */ 0xa9, 0xb9, 0xde, 0xfe, 0xab, 0xbb, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xf9, /* 0x68-0x6f */ 0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0xac, 0xbc, 0xaf, 0xbf, 0xae, 0xbe, 0x00, /* 0x78-0x7f */ }; static const unsigned char page02[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0xa2, 0xff, 0x00, 0xb2, 0x00, 0xbd, 0x00, 0x00, /* 0xd8-0xdf */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, page02, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xb1, 0xa2, 0xb3, 0xa4, 0xb5, 0xb6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xb9, 0xba, 0xbb, 0xbc, 0xad, 0xbe, 0xbf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xa1, 0xb2, 0xa3, 0xb4, 0xa5, 0xa6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xa9, 0xaa, 0xab, 0xac, 0xbd, 0xae, 0xaf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "iso8859-2", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_iso8859_2(void) { return register_nls(&table); } static void __exit exit_nls_iso8859_2(void) { unregister_nls(&table); } module_init(init_nls_iso8859_2) module_exit(exit_nls_iso8859_2) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
DJNoXD/rockchip-kernel-rk2918
scripts/unifdef.c
12564
35639
/* * Copyright (c) 2002 - 2011 Tony Finch <dot@dotat.at> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * unifdef - remove ifdef'ed lines * * This code was derived from software contributed to Berkeley by Dave Yost. * It was rewritten to support ANSI C by Tony Finch. The original version * of unifdef carried the 4-clause BSD copyright licence. None of its code * remains in this version (though some of the names remain) so it now * carries a more liberal licence. * * Wishlist: * provide an option which will append the name of the * appropriate symbol after #else's and #endif's * provide an option which will check symbols after * #else's and #endif's to see that they match their * corresponding #ifdef or #ifndef * * These require better buffer handling, which would also make * it possible to handle all "dodgy" directives correctly. */ #include <sys/types.h> #include <sys/stat.h> #include <ctype.h> #include <err.h> #include <errno.h> #include <stdarg.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> const char copyright[] = "@(#) $Version: unifdef-2.5 $\n" "@(#) $Author: Tony Finch (dot@dotat.at) $\n" "@(#) $URL: http://dotat.at/prog/unifdef $\n" ; /* types of input lines: */ typedef enum { LT_TRUEI, /* a true #if with ignore flag */ LT_FALSEI, /* a false #if with ignore flag */ LT_IF, /* an unknown #if */ LT_TRUE, /* a true #if */ LT_FALSE, /* a false #if */ LT_ELIF, /* an unknown #elif */ LT_ELTRUE, /* a true #elif */ LT_ELFALSE, /* a false #elif */ LT_ELSE, /* #else */ LT_ENDIF, /* #endif */ LT_DODGY, /* flag: directive is not on one line */ LT_DODGY_LAST = LT_DODGY + LT_ENDIF, LT_PLAIN, /* ordinary line */ LT_EOF, /* end of file */ LT_ERROR, /* unevaluable #if */ LT_COUNT } Linetype; static char const * const linetype_name[] = { "TRUEI", "FALSEI", "IF", "TRUE", "FALSE", "ELIF", "ELTRUE", "ELFALSE", "ELSE", "ENDIF", "DODGY TRUEI", "DODGY FALSEI", "DODGY IF", "DODGY TRUE", "DODGY FALSE", "DODGY ELIF", "DODGY ELTRUE", "DODGY ELFALSE", "DODGY ELSE", "DODGY ENDIF", "PLAIN", "EOF", "ERROR" }; /* state of #if processing */ typedef enum { IS_OUTSIDE, IS_FALSE_PREFIX, /* false #if followed by false #elifs */ IS_TRUE_PREFIX, /* first non-false #(el)if is true */ IS_PASS_MIDDLE, /* first non-false #(el)if is unknown */ IS_FALSE_MIDDLE, /* a false #elif after a pass state */ IS_TRUE_MIDDLE, /* a true #elif after a pass state */ IS_PASS_ELSE, /* an else after a pass state */ IS_FALSE_ELSE, /* an else after a true state */ IS_TRUE_ELSE, /* an else after only false states */ IS_FALSE_TRAILER, /* #elifs after a true are false */ IS_COUNT } Ifstate; static char const * const ifstate_name[] = { "OUTSIDE", "FALSE_PREFIX", "TRUE_PREFIX", "PASS_MIDDLE", "FALSE_MIDDLE", "TRUE_MIDDLE", "PASS_ELSE", "FALSE_ELSE", "TRUE_ELSE", "FALSE_TRAILER" }; /* state of comment parser */ typedef enum { NO_COMMENT = false, /* outside a comment */ C_COMMENT, /* in a comment like this one */ CXX_COMMENT, /* between // and end of line */ STARTING_COMMENT, /* just after slash-backslash-newline */ FINISHING_COMMENT, /* star-backslash-newline in a C comment */ CHAR_LITERAL, /* inside '' */ STRING_LITERAL /* inside "" */ } Comment_state; static char const * const comment_name[] = { "NO", "C", "CXX", "STARTING", "FINISHING", "CHAR", "STRING" }; /* state of preprocessor line parser */ typedef enum { LS_START, /* only space and comments on this line */ LS_HASH, /* only space, comments, and a hash */ LS_DIRTY /* this line can't be a preprocessor line */ } Line_state; static char const * const linestate_name[] = { "START", "HASH", "DIRTY" }; /* * Minimum translation limits from ISO/IEC 9899:1999 5.2.4.1 */ #define MAXDEPTH 64 /* maximum #if nesting */ #define MAXLINE 4096 /* maximum length of line */ #define MAXSYMS 4096 /* maximum number of symbols */ /* * Sometimes when editing a keyword the replacement text is longer, so * we leave some space at the end of the tline buffer to accommodate this. */ #define EDITSLOP 10 /* * For temporary filenames */ #define TEMPLATE "unifdef.XXXXXX" /* * Globals. */ static bool compblank; /* -B: compress blank lines */ static bool lnblank; /* -b: blank deleted lines */ static bool complement; /* -c: do the complement */ static bool debugging; /* -d: debugging reports */ static bool iocccok; /* -e: fewer IOCCC errors */ static bool strictlogic; /* -K: keep ambiguous #ifs */ static bool killconsts; /* -k: eval constant #ifs */ static bool lnnum; /* -n: add #line directives */ static bool symlist; /* -s: output symbol list */ static bool symdepth; /* -S: output symbol depth */ static bool text; /* -t: this is a text file */ static const char *symname[MAXSYMS]; /* symbol name */ static const char *value[MAXSYMS]; /* -Dsym=value */ static bool ignore[MAXSYMS]; /* -iDsym or -iUsym */ static int nsyms; /* number of symbols */ static FILE *input; /* input file pointer */ static const char *filename; /* input file name */ static int linenum; /* current line number */ static FILE *output; /* output file pointer */ static const char *ofilename; /* output file name */ static bool overwriting; /* output overwrites input */ static char tempname[FILENAME_MAX]; /* used when overwriting */ static char tline[MAXLINE+EDITSLOP];/* input buffer plus space */ static char *keyword; /* used for editing #elif's */ static const char *newline; /* input file format */ static const char newline_unix[] = "\n"; static const char newline_crlf[] = "\r\n"; static Comment_state incomment; /* comment parser state */ static Line_state linestate; /* #if line parser state */ static Ifstate ifstate[MAXDEPTH]; /* #if processor state */ static bool ignoring[MAXDEPTH]; /* ignore comments state */ static int stifline[MAXDEPTH]; /* start of current #if */ static int depth; /* current #if nesting */ static int delcount; /* count of deleted lines */ static unsigned blankcount; /* count of blank lines */ static unsigned blankmax; /* maximum recent blankcount */ static bool constexpr; /* constant #if expression */ static bool zerosyms = true; /* to format symdepth output */ static bool firstsym; /* ditto */ static int exitstat; /* program exit status */ static void addsym(bool, bool, char *); static void closeout(void); static void debug(const char *, ...); static void done(void); static void error(const char *); static int findsym(const char *); static void flushline(bool); static Linetype parseline(void); static Linetype ifeval(const char **); static void ignoreoff(void); static void ignoreon(void); static void keywordedit(const char *); static void nest(void); static void process(void); static const char *skipargs(const char *); static const char *skipcomment(const char *); static const char *skipsym(const char *); static void state(Ifstate); static int strlcmp(const char *, const char *, size_t); static void unnest(void); static void usage(void); static void version(void); #define endsym(c) (!isalnum((unsigned char)c) && c != '_') /* * The main program. */ int main(int argc, char *argv[]) { int opt; while ((opt = getopt(argc, argv, "i:D:U:I:o:bBcdeKklnsStV")) != -1) switch (opt) { case 'i': /* treat stuff controlled by these symbols as text */ /* * For strict backwards-compatibility the U or D * should be immediately after the -i but it doesn't * matter much if we relax that requirement. */ opt = *optarg++; if (opt == 'D') addsym(true, true, optarg); else if (opt == 'U') addsym(true, false, optarg); else usage(); break; case 'D': /* define a symbol */ addsym(false, true, optarg); break; case 'U': /* undef a symbol */ addsym(false, false, optarg); break; case 'I': /* no-op for compatibility with cpp */ break; case 'b': /* blank deleted lines instead of omitting them */ case 'l': /* backwards compatibility */ lnblank = true; break; case 'B': /* compress blank lines around removed section */ compblank = true; break; case 'c': /* treat -D as -U and vice versa */ complement = true; break; case 'd': debugging = true; break; case 'e': /* fewer errors from dodgy lines */ iocccok = true; break; case 'K': /* keep ambiguous #ifs */ strictlogic = true; break; case 'k': /* process constant #ifs */ killconsts = true; break; case 'n': /* add #line directive after deleted lines */ lnnum = true; break; case 'o': /* output to a file */ ofilename = optarg; break; case 's': /* only output list of symbols that control #ifs */ symlist = true; break; case 'S': /* list symbols with their nesting depth */ symlist = symdepth = true; break; case 't': /* don't parse C comments */ text = true; break; case 'V': /* print version */ version(); default: usage(); } argc -= optind; argv += optind; if (compblank && lnblank) errx(2, "-B and -b are mutually exclusive"); if (argc > 1) { errx(2, "can only do one file"); } else if (argc == 1 && strcmp(*argv, "-") != 0) { filename = *argv; input = fopen(filename, "rb"); if (input == NULL) err(2, "can't open %s", filename); } else { filename = "[stdin]"; input = stdin; } if (ofilename == NULL) { ofilename = "[stdout]"; output = stdout; } else { struct stat ist, ost; if (stat(ofilename, &ost) == 0 && fstat(fileno(input), &ist) == 0) overwriting = (ist.st_dev == ost.st_dev && ist.st_ino == ost.st_ino); if (overwriting) { const char *dirsep; int ofd; dirsep = strrchr(ofilename, '/'); if (dirsep != NULL) snprintf(tempname, sizeof(tempname), "%.*s/" TEMPLATE, (int)(dirsep - ofilename), ofilename); else snprintf(tempname, sizeof(tempname), TEMPLATE); ofd = mkstemp(tempname); if (ofd != -1) output = fdopen(ofd, "wb+"); if (output == NULL) err(2, "can't create temporary file"); fchmod(ofd, ist.st_mode & (S_IRWXU|S_IRWXG|S_IRWXO)); } else { output = fopen(ofilename, "wb"); if (output == NULL) err(2, "can't open %s", ofilename); } } process(); abort(); /* bug */ } static void version(void) { const char *c = copyright; for (;;) { while (*++c != '$') if (*c == '\0') exit(0); while (*++c != '$') putc(*c, stderr); putc('\n', stderr); } } static void usage(void) { fprintf(stderr, "usage: unifdef [-bBcdeKknsStV] [-Ipath]" " [-Dsym[=val]] [-Usym] [-iDsym[=val]] [-iUsym] ... [file]\n"); exit(2); } /* * A state transition function alters the global #if processing state * in a particular way. The table below is indexed by the current * processing state and the type of the current line. * * Nesting is handled by keeping a stack of states; some transition * functions increase or decrease the depth. They also maintain the * ignore state on a stack. In some complicated cases they have to * alter the preprocessor directive, as follows. * * When we have processed a group that starts off with a known-false * #if/#elif sequence (which has therefore been deleted) followed by a * #elif that we don't understand and therefore must keep, we edit the * latter into a #if to keep the nesting correct. We use strncpy() to * overwrite the 4 byte token "elif" with "if " without a '\0' byte. * * When we find a true #elif in a group, the following block will * always be kept and the rest of the sequence after the next #elif or * #else will be discarded. We edit the #elif into a #else and the * following directive to #endif since this has the desired behaviour. * * "Dodgy" directives are split across multiple lines, the most common * example being a multi-line comment hanging off the right of the * directive. We can handle them correctly only if there is no change * from printing to dropping (or vice versa) caused by that directive. * If the directive is the first of a group we have a choice between * failing with an error, or passing it through unchanged instead of * evaluating it. The latter is not the default to avoid questions from * users about unifdef unexpectedly leaving behind preprocessor directives. */ typedef void state_fn(void); /* report an error */ static void Eelif (void) { error("Inappropriate #elif"); } static void Eelse (void) { error("Inappropriate #else"); } static void Eendif(void) { error("Inappropriate #endif"); } static void Eeof (void) { error("Premature EOF"); } static void Eioccc(void) { error("Obfuscated preprocessor control line"); } /* plain line handling */ static void print (void) { flushline(true); } static void drop (void) { flushline(false); } /* output lacks group's start line */ static void Strue (void) { drop(); ignoreoff(); state(IS_TRUE_PREFIX); } static void Sfalse(void) { drop(); ignoreoff(); state(IS_FALSE_PREFIX); } static void Selse (void) { drop(); state(IS_TRUE_ELSE); } /* print/pass this block */ static void Pelif (void) { print(); ignoreoff(); state(IS_PASS_MIDDLE); } static void Pelse (void) { print(); state(IS_PASS_ELSE); } static void Pendif(void) { print(); unnest(); } /* discard this block */ static void Dfalse(void) { drop(); ignoreoff(); state(IS_FALSE_TRAILER); } static void Delif (void) { drop(); ignoreoff(); state(IS_FALSE_MIDDLE); } static void Delse (void) { drop(); state(IS_FALSE_ELSE); } static void Dendif(void) { drop(); unnest(); } /* first line of group */ static void Fdrop (void) { nest(); Dfalse(); } static void Fpass (void) { nest(); Pelif(); } static void Ftrue (void) { nest(); Strue(); } static void Ffalse(void) { nest(); Sfalse(); } /* variable pedantry for obfuscated lines */ static void Oiffy (void) { if (!iocccok) Eioccc(); Fpass(); ignoreon(); } static void Oif (void) { if (!iocccok) Eioccc(); Fpass(); } static void Oelif (void) { if (!iocccok) Eioccc(); Pelif(); } /* ignore comments in this block */ static void Idrop (void) { Fdrop(); ignoreon(); } static void Itrue (void) { Ftrue(); ignoreon(); } static void Ifalse(void) { Ffalse(); ignoreon(); } /* modify this line */ static void Mpass (void) { strncpy(keyword, "if ", 4); Pelif(); } static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); } static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); } static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); } static state_fn * const trans_table[IS_COUNT][LT_COUNT] = { /* IS_OUTSIDE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Eendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Eendif, print, done, abort }, /* IS_FALSE_PREFIX */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Mpass, Strue, Sfalse,Selse, Dendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Mpass, Eioccc,Eioccc,Eioccc,Eioccc, drop, Eeof, abort }, /* IS_TRUE_PREFIX */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Dfalse,Dfalse,Dfalse,Delse, Dendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eioccc,Eioccc,Eioccc,Eioccc,Eioccc, print, Eeof, abort }, /* IS_PASS_MIDDLE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Pelif, Mtrue, Delif, Pelse, Pendif, Oiffy, Oiffy, Fpass, Oif, Oif, Pelif, Oelif, Oelif, Pelse, Pendif, print, Eeof, abort }, /* IS_FALSE_MIDDLE */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Pelif, Mtrue, Delif, Pelse, Pendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eioccc,Eioccc,Eioccc,Eioccc,Eioccc, drop, Eeof, abort }, /* IS_TRUE_MIDDLE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Melif, Melif, Melif, Melse, Pendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eioccc,Eioccc,Eioccc,Eioccc,Pendif, print, Eeof, abort }, /* IS_PASS_ELSE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Pendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Pendif, print, Eeof, abort }, /* IS_FALSE_ELSE */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eelif, Eelif, Eelif, Eelse, Dendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eelif, Eelif, Eelif, Eelse, Eioccc, drop, Eeof, abort }, /* IS_TRUE_ELSE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Dendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Eioccc, print, Eeof, abort }, /* IS_FALSE_TRAILER */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Dfalse,Dfalse,Dfalse,Delse, Dendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Dfalse,Dfalse,Dfalse,Delse, Eioccc, drop, Eeof, abort } /*TRUEI FALSEI IF TRUE FALSE ELIF ELTRUE ELFALSE ELSE ENDIF TRUEI FALSEI IF TRUE FALSE ELIF ELTRUE ELFALSE ELSE ENDIF (DODGY) PLAIN EOF ERROR */ }; /* * State machine utility functions */ static void ignoreoff(void) { if (depth == 0) abort(); /* bug */ ignoring[depth] = ignoring[depth-1]; } static void ignoreon(void) { ignoring[depth] = true; } static void keywordedit(const char *replacement) { snprintf(keyword, tline + sizeof(tline) - keyword, "%s%s", replacement, newline); print(); } static void nest(void) { if (depth > MAXDEPTH-1) abort(); /* bug */ if (depth == MAXDEPTH-1) error("Too many levels of nesting"); depth += 1; stifline[depth] = linenum; } static void unnest(void) { if (depth == 0) abort(); /* bug */ depth -= 1; } static void state(Ifstate is) { ifstate[depth] = is; } /* * Write a line to the output or not, according to command line options. */ static void flushline(bool keep) { if (symlist) return; if (keep ^ complement) { bool blankline = tline[strspn(tline, " \t\r\n")] == '\0'; if (blankline && compblank && blankcount != blankmax) { delcount += 1; blankcount += 1; } else { if (lnnum && delcount > 0) printf("#line %d%s", linenum, newline); fputs(tline, output); delcount = 0; blankmax = blankcount = blankline ? blankcount + 1 : 0; } } else { if (lnblank) fputs(newline, output); exitstat = 1; delcount += 1; blankcount = 0; } if (debugging) fflush(output); } /* * The driver for the state machine. */ static void process(void) { /* When compressing blank lines, act as if the file is preceded by a large number of blank lines. */ blankmax = blankcount = 1000; for (;;) { Linetype lineval = parseline(); trans_table[ifstate[depth]][lineval](); debug("process line %d %s -> %s depth %d", linenum, linetype_name[lineval], ifstate_name[ifstate[depth]], depth); } } /* * Flush the output and handle errors. */ static void closeout(void) { if (symdepth && !zerosyms) printf("\n"); if (fclose(output) == EOF) { warn("couldn't write to %s", ofilename); if (overwriting) { unlink(tempname); errx(2, "%s unchanged", filename); } else { exit(2); } } } /* * Clean up and exit. */ static void done(void) { if (incomment) error("EOF in comment"); closeout(); if (overwriting && rename(tempname, ofilename) == -1) { warn("couldn't rename temporary file"); unlink(tempname); errx(2, "%s unchanged", ofilename); } exit(exitstat); } /* * Parse a line and determine its type. We keep the preprocessor line * parser state between calls in the global variable linestate, with * help from skipcomment(). */ static Linetype parseline(void) { const char *cp; int cursym; int kwlen; Linetype retval; Comment_state wascomment; linenum++; if (fgets(tline, MAXLINE, input) == NULL) return (LT_EOF); if (newline == NULL) { if (strrchr(tline, '\n') == strrchr(tline, '\r') + 1) newline = newline_crlf; else newline = newline_unix; } retval = LT_PLAIN; wascomment = incomment; cp = skipcomment(tline); if (linestate == LS_START) { if (*cp == '#') { linestate = LS_HASH; firstsym = true; cp = skipcomment(cp + 1); } else if (*cp != '\0') linestate = LS_DIRTY; } if (!incomment && linestate == LS_HASH) { keyword = tline + (cp - tline); cp = skipsym(cp); kwlen = cp - keyword; /* no way can we deal with a continuation inside a keyword */ if (strncmp(cp, "\\\r\n", 3) == 0 || strncmp(cp, "\\\n", 2) == 0) Eioccc(); if (strlcmp("ifdef", keyword, kwlen) == 0 || strlcmp("ifndef", keyword, kwlen) == 0) { cp = skipcomment(cp); if ((cursym = findsym(cp)) < 0) retval = LT_IF; else { retval = (keyword[2] == 'n') ? LT_FALSE : LT_TRUE; if (value[cursym] == NULL) retval = (retval == LT_TRUE) ? LT_FALSE : LT_TRUE; if (ignore[cursym]) retval = (retval == LT_TRUE) ? LT_TRUEI : LT_FALSEI; } cp = skipsym(cp); } else if (strlcmp("if", keyword, kwlen) == 0) retval = ifeval(&cp); else if (strlcmp("elif", keyword, kwlen) == 0) retval = ifeval(&cp) - LT_IF + LT_ELIF; else if (strlcmp("else", keyword, kwlen) == 0) retval = LT_ELSE; else if (strlcmp("endif", keyword, kwlen) == 0) retval = LT_ENDIF; else { linestate = LS_DIRTY; retval = LT_PLAIN; } cp = skipcomment(cp); if (*cp != '\0') { linestate = LS_DIRTY; if (retval == LT_TRUE || retval == LT_FALSE || retval == LT_TRUEI || retval == LT_FALSEI) retval = LT_IF; if (retval == LT_ELTRUE || retval == LT_ELFALSE) retval = LT_ELIF; } if (retval != LT_PLAIN && (wascomment || incomment)) { retval += LT_DODGY; if (incomment) linestate = LS_DIRTY; } /* skipcomment normally changes the state, except if the last line of the file lacks a newline, or if there is too much whitespace in a directive */ if (linestate == LS_HASH) { size_t len = cp - tline; if (fgets(tline + len, MAXLINE - len, input) == NULL) { /* append the missing newline */ strcpy(tline + len, newline); cp += strlen(newline); linestate = LS_START; } else { linestate = LS_DIRTY; } } } if (linestate == LS_DIRTY) { while (*cp != '\0') cp = skipcomment(cp + 1); } debug("parser line %d state %s comment %s line", linenum, comment_name[incomment], linestate_name[linestate]); return (retval); } /* * These are the binary operators that are supported by the expression * evaluator. */ static Linetype op_strict(int *p, int v, Linetype at, Linetype bt) { if(at == LT_IF || bt == LT_IF) return (LT_IF); return (*p = v, v ? LT_TRUE : LT_FALSE); } static Linetype op_lt(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a < b, at, bt); } static Linetype op_gt(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a > b, at, bt); } static Linetype op_le(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a <= b, at, bt); } static Linetype op_ge(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a >= b, at, bt); } static Linetype op_eq(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a == b, at, bt); } static Linetype op_ne(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a != b, at, bt); } static Linetype op_or(int *p, Linetype at, int a, Linetype bt, int b) { if (!strictlogic && (at == LT_TRUE || bt == LT_TRUE)) return (*p = 1, LT_TRUE); return op_strict(p, a || b, at, bt); } static Linetype op_and(int *p, Linetype at, int a, Linetype bt, int b) { if (!strictlogic && (at == LT_FALSE || bt == LT_FALSE)) return (*p = 0, LT_FALSE); return op_strict(p, a && b, at, bt); } /* * An evaluation function takes three arguments, as follows: (1) a pointer to * an element of the precedence table which lists the operators at the current * level of precedence; (2) a pointer to an integer which will receive the * value of the expression; and (3) a pointer to a char* that points to the * expression to be evaluated and that is updated to the end of the expression * when evaluation is complete. The function returns LT_FALSE if the value of * the expression is zero, LT_TRUE if it is non-zero, LT_IF if the expression * depends on an unknown symbol, or LT_ERROR if there is a parse failure. */ struct ops; typedef Linetype eval_fn(const struct ops *, int *, const char **); static eval_fn eval_table, eval_unary; /* * The precedence table. Expressions involving binary operators are evaluated * in a table-driven way by eval_table. When it evaluates a subexpression it * calls the inner function with its first argument pointing to the next * element of the table. Innermost expressions have special non-table-driven * handling. */ static const struct ops { eval_fn *inner; struct op { const char *str; Linetype (*fn)(int *, Linetype, int, Linetype, int); } op[5]; } eval_ops[] = { { eval_table, { { "||", op_or } } }, { eval_table, { { "&&", op_and } } }, { eval_table, { { "==", op_eq }, { "!=", op_ne } } }, { eval_unary, { { "<=", op_le }, { ">=", op_ge }, { "<", op_lt }, { ">", op_gt } } } }; /* * Function for evaluating the innermost parts of expressions, * viz. !expr (expr) number defined(symbol) symbol * We reset the constexpr flag in the last two cases. */ static Linetype eval_unary(const struct ops *ops, int *valp, const char **cpp) { const char *cp; char *ep; int sym; bool defparen; Linetype lt; cp = skipcomment(*cpp); if (*cp == '!') { debug("eval%d !", ops - eval_ops); cp++; lt = eval_unary(ops, valp, &cp); if (lt == LT_ERROR) return (LT_ERROR); if (lt != LT_IF) { *valp = !*valp; lt = *valp ? LT_TRUE : LT_FALSE; } } else if (*cp == '(') { cp++; debug("eval%d (", ops - eval_ops); lt = eval_table(eval_ops, valp, &cp); if (lt == LT_ERROR) return (LT_ERROR); cp = skipcomment(cp); if (*cp++ != ')') return (LT_ERROR); } else if (isdigit((unsigned char)*cp)) { debug("eval%d number", ops - eval_ops); *valp = strtol(cp, &ep, 0); if (ep == cp) return (LT_ERROR); lt = *valp ? LT_TRUE : LT_FALSE; cp = skipsym(cp); } else if (strncmp(cp, "defined", 7) == 0 && endsym(cp[7])) { cp = skipcomment(cp+7); debug("eval%d defined", ops - eval_ops); if (*cp == '(') { cp = skipcomment(cp+1); defparen = true; } else { defparen = false; } sym = findsym(cp); if (sym < 0) { lt = LT_IF; } else { *valp = (value[sym] != NULL); lt = *valp ? LT_TRUE : LT_FALSE; } cp = skipsym(cp); cp = skipcomment(cp); if (defparen && *cp++ != ')') return (LT_ERROR); constexpr = false; } else if (!endsym(*cp)) { debug("eval%d symbol", ops - eval_ops); sym = findsym(cp); cp = skipsym(cp); if (sym < 0) { lt = LT_IF; cp = skipargs(cp); } else if (value[sym] == NULL) { *valp = 0; lt = LT_FALSE; } else { *valp = strtol(value[sym], &ep, 0); if (*ep != '\0' || ep == value[sym]) return (LT_ERROR); lt = *valp ? LT_TRUE : LT_FALSE; cp = skipargs(cp); } constexpr = false; } else { debug("eval%d bad expr", ops - eval_ops); return (LT_ERROR); } *cpp = cp; debug("eval%d = %d", ops - eval_ops, *valp); return (lt); } /* * Table-driven evaluation of binary operators. */ static Linetype eval_table(const struct ops *ops, int *valp, const char **cpp) { const struct op *op; const char *cp; int val; Linetype lt, rt; debug("eval%d", ops - eval_ops); cp = *cpp; lt = ops->inner(ops+1, valp, &cp); if (lt == LT_ERROR) return (LT_ERROR); for (;;) { cp = skipcomment(cp); for (op = ops->op; op->str != NULL; op++) if (strncmp(cp, op->str, strlen(op->str)) == 0) break; if (op->str == NULL) break; cp += strlen(op->str); debug("eval%d %s", ops - eval_ops, op->str); rt = ops->inner(ops+1, &val, &cp); if (rt == LT_ERROR) return (LT_ERROR); lt = op->fn(valp, lt, *valp, rt, val); } *cpp = cp; debug("eval%d = %d", ops - eval_ops, *valp); debug("eval%d lt = %s", ops - eval_ops, linetype_name[lt]); return (lt); } /* * Evaluate the expression on a #if or #elif line. If we can work out * the result we return LT_TRUE or LT_FALSE accordingly, otherwise we * return just a generic LT_IF. */ static Linetype ifeval(const char **cpp) { int ret; int val = 0; debug("eval %s", *cpp); constexpr = killconsts ? false : true; ret = eval_table(eval_ops, &val, cpp); debug("eval = %d", val); return (constexpr ? LT_IF : ret == LT_ERROR ? LT_IF : ret); } /* * Skip over comments, strings, and character literals and stop at the * next character position that is not whitespace. Between calls we keep * the comment state in the global variable incomment, and we also adjust * the global variable linestate when we see a newline. * XXX: doesn't cope with the buffer splitting inside a state transition. */ static const char * skipcomment(const char *cp) { if (text || ignoring[depth]) { for (; isspace((unsigned char)*cp); cp++) if (*cp == '\n') linestate = LS_START; return (cp); } while (*cp != '\0') /* don't reset to LS_START after a line continuation */ if (strncmp(cp, "\\\r\n", 3) == 0) cp += 3; else if (strncmp(cp, "\\\n", 2) == 0) cp += 2; else switch (incomment) { case NO_COMMENT: if (strncmp(cp, "/\\\r\n", 4) == 0) { incomment = STARTING_COMMENT; cp += 4; } else if (strncmp(cp, "/\\\n", 3) == 0) { incomment = STARTING_COMMENT; cp += 3; } else if (strncmp(cp, "/*", 2) == 0) { incomment = C_COMMENT; cp += 2; } else if (strncmp(cp, "//", 2) == 0) { incomment = CXX_COMMENT; cp += 2; } else if (strncmp(cp, "\'", 1) == 0) { incomment = CHAR_LITERAL; linestate = LS_DIRTY; cp += 1; } else if (strncmp(cp, "\"", 1) == 0) { incomment = STRING_LITERAL; linestate = LS_DIRTY; cp += 1; } else if (strncmp(cp, "\n", 1) == 0) { linestate = LS_START; cp += 1; } else if (strchr(" \r\t", *cp) != NULL) { cp += 1; } else return (cp); continue; case CXX_COMMENT: if (strncmp(cp, "\n", 1) == 0) { incomment = NO_COMMENT; linestate = LS_START; } cp += 1; continue; case CHAR_LITERAL: case STRING_LITERAL: if ((incomment == CHAR_LITERAL && cp[0] == '\'') || (incomment == STRING_LITERAL && cp[0] == '\"')) { incomment = NO_COMMENT; cp += 1; } else if (cp[0] == '\\') { if (cp[1] == '\0') cp += 1; else cp += 2; } else if (strncmp(cp, "\n", 1) == 0) { if (incomment == CHAR_LITERAL) error("unterminated char literal"); else error("unterminated string literal"); } else cp += 1; continue; case C_COMMENT: if (strncmp(cp, "*\\\r\n", 4) == 0) { incomment = FINISHING_COMMENT; cp += 4; } else if (strncmp(cp, "*\\\n", 3) == 0) { incomment = FINISHING_COMMENT; cp += 3; } else if (strncmp(cp, "*/", 2) == 0) { incomment = NO_COMMENT; cp += 2; } else cp += 1; continue; case STARTING_COMMENT: if (*cp == '*') { incomment = C_COMMENT; cp += 1; } else if (*cp == '/') { incomment = CXX_COMMENT; cp += 1; } else { incomment = NO_COMMENT; linestate = LS_DIRTY; } continue; case FINISHING_COMMENT: if (*cp == '/') { incomment = NO_COMMENT; cp += 1; } else incomment = C_COMMENT; continue; default: abort(); /* bug */ } return (cp); } /* * Skip macro arguments. */ static const char * skipargs(const char *cp) { const char *ocp = cp; int level = 0; cp = skipcomment(cp); if (*cp != '(') return (cp); do { if (*cp == '(') level++; if (*cp == ')') level--; cp = skipcomment(cp+1); } while (level != 0 && *cp != '\0'); if (level == 0) return (cp); else /* Rewind and re-detect the syntax error later. */ return (ocp); } /* * Skip over an identifier. */ static const char * skipsym(const char *cp) { while (!endsym(*cp)) ++cp; return (cp); } /* * Look for the symbol in the symbol table. If it is found, we return * the symbol table index, else we return -1. */ static int findsym(const char *str) { const char *cp; int symind; cp = skipsym(str); if (cp == str) return (-1); if (symlist) { if (symdepth && firstsym) printf("%s%3d", zerosyms ? "" : "\n", depth); firstsym = zerosyms = false; printf("%s%.*s%s", symdepth ? " " : "", (int)(cp-str), str, symdepth ? "" : "\n"); /* we don't care about the value of the symbol */ return (0); } for (symind = 0; symind < nsyms; ++symind) { if (strlcmp(symname[symind], str, cp-str) == 0) { debug("findsym %s %s", symname[symind], value[symind] ? value[symind] : ""); return (symind); } } return (-1); } /* * Add a symbol to the symbol table. */ static void addsym(bool ignorethis, bool definethis, char *sym) { int symind; char *val; symind = findsym(sym); if (symind < 0) { if (nsyms >= MAXSYMS) errx(2, "too many symbols"); symind = nsyms++; } symname[symind] = sym; ignore[symind] = ignorethis; val = sym + (skipsym(sym) - sym); if (definethis) { if (*val == '=') { value[symind] = val+1; *val = '\0'; } else if (*val == '\0') value[symind] = "1"; else usage(); } else { if (*val != '\0') usage(); value[symind] = NULL; } debug("addsym %s=%s", symname[symind], value[symind] ? value[symind] : "undef"); } /* * Compare s with n characters of t. * The same as strncmp() except that it checks that s[n] == '\0'. */ static int strlcmp(const char *s, const char *t, size_t n) { while (n-- && *t != '\0') if (*s != *t) return ((unsigned char)*s - (unsigned char)*t); else ++s, ++t; return ((unsigned char)*s); } /* * Diagnostics. */ static void debug(const char *msg, ...) { va_list ap; if (debugging) { va_start(ap, msg); vwarnx(msg, ap); va_end(ap); } } static void error(const char *msg) { if (depth == 0) warnx("%s: %d: %s", filename, linenum, msg); else warnx("%s: %d: %s (#if line %d depth %d)", filename, linenum, msg, stifline[depth], depth); closeout(); errx(2, "output may be truncated"); }
gpl-2.0
boulzordev/android_kernel_motorola_msm8916
fs/nls/nls_cp874.c
12564
10995
/* * linux/fs/nls/nls_cp874.c * * Charset cp874 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2026, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 0x90*/ 0x0000, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 0xa0*/ 0x00a0, 0x0e01, 0x0e02, 0x0e03, 0x0e04, 0x0e05, 0x0e06, 0x0e07, 0x0e08, 0x0e09, 0x0e0a, 0x0e0b, 0x0e0c, 0x0e0d, 0x0e0e, 0x0e0f, /* 0xb0*/ 0x0e10, 0x0e11, 0x0e12, 0x0e13, 0x0e14, 0x0e15, 0x0e16, 0x0e17, 0x0e18, 0x0e19, 0x0e1a, 0x0e1b, 0x0e1c, 0x0e1d, 0x0e1e, 0x0e1f, /* 0xc0*/ 0x0e20, 0x0e21, 0x0e22, 0x0e23, 0x0e24, 0x0e25, 0x0e26, 0x0e27, 0x0e28, 0x0e29, 0x0e2a, 0x0e2b, 0x0e2c, 0x0e2d, 0x0e2e, 0x0e2f, /* 0xd0*/ 0x0e30, 0x0e31, 0x0e32, 0x0e33, 0x0e34, 0x0e35, 0x0e36, 0x0e37, 0x0e38, 0x0e39, 0x0e3a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0e3f, /* 0xe0*/ 0x0e40, 0x0e41, 0x0e42, 0x0e43, 0x0e44, 0x0e45, 0x0e46, 0x0e47, 0x0e48, 0x0e49, 0x0e4a, 0x0e4b, 0x0e4c, 0x0e4d, 0x0e4e, 0x0e4f, /* 0xf0*/ 0x0e50, 0x0e51, 0x0e52, 0x0e53, 0x0e54, 0x0e55, 0x0e56, 0x0e57, 0x0e58, 0x0e59, 0x0e5a, 0x0e5b, 0x0000, 0x0000, 0x0000, 0x0000, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char page0e[256] = { 0x00, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x00-0x07 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x08-0x0f */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x10-0x17 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0x18-0x1f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0x38-0x3f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x50-0x57 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x91, 0x92, 0x00, 0x00, 0x93, 0x94, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page0e, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp874", .alias = "tis-620", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp874(void) { return register_nls(&table); } static void __exit exit_nls_cp874(void) { unregister_nls(&table); } module_init(init_nls_cp874) module_exit(exit_nls_cp874) MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS_NLS(tis-620);
gpl-2.0
TeamNostalgia/amlogic-3.0.8
arch/powerpc/boot/prpmc2800.c
13332
13994
/* * Motorola ECC prpmc280/f101 & prpmc2800/f101e platform code. * * Author: Mark A. Greer <mgreer@mvista.com> * * 2007 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "page.h" #include "string.h" #include "stdio.h" #include "io.h" #include "ops.h" #include "gunzip_util.h" #include "mv64x60.h" #define KB 1024U #define MB (KB*KB) #define GB (KB*MB) #define MHz (1000U*1000U) #define GHz (1000U*MHz) #define BOARD_MODEL "PrPMC2800" #define BOARD_MODEL_MAX 32 /* max strlen(BOARD_MODEL) + 1 */ #define EEPROM2_ADDR 0xa4 #define EEPROM3_ADDR 0xa8 BSS_STACK(16*KB); static u8 *bridge_base; typedef enum { BOARD_MODEL_PRPMC280, BOARD_MODEL_PRPMC2800, } prpmc2800_board_model; typedef enum { BRIDGE_TYPE_MV64360, BRIDGE_TYPE_MV64362, } prpmc2800_bridge_type; struct prpmc2800_board_info { prpmc2800_board_model model; char variant; prpmc2800_bridge_type bridge_type; u8 subsys0; u8 subsys1; u8 vpd4; u8 vpd4_mask; u32 core_speed; u32 mem_size; u32 boot_flash; u32 user_flash; }; static struct prpmc2800_board_info prpmc2800_board_info[] = { { .model = BOARD_MODEL_PRPMC280, .variant = 'a', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x00, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'b', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x01, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 0, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'c', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x02, .vpd4_mask = 0x0f, .core_speed = 733*MHz, .mem_size = 512*MB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'd', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x03, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 1*GB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'e', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x04, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'f', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x05, .vpd4_mask = 0x0f, .core_speed = 733*MHz, .mem_size = 128*MB, .boot_flash = 1*MB, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'g', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x06, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 256*MB, .boot_flash = 1*MB, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'h', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x07, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 1*GB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'a', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xb2, .subsys1 = 0x8c, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'b', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xb2, .subsys1 = 0x8d, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 0, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'c', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xb2, .subsys1 = 0x8e, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 733*MHz, .mem_size = 512*MB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'd', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xb2, .subsys1 = 0x8f, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 1*GB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'e', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xa2, .subsys1 = 0x8a, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'f', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xa2, .subsys1 = 0x8b, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 733*MHz, .mem_size = 128*MB, .boot_flash = 2*MB, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'g', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xa2, .subsys1 = 0x8c, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 2*GB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'h', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xa2, .subsys1 = 0x8d, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 733*MHz, .mem_size = 1*GB, .boot_flash = 2*MB, .user_flash = 64*MB, }, }; static struct prpmc2800_board_info *prpmc2800_get_board_info(u8 *vpd) { struct prpmc2800_board_info *bip; int i; for (i=0,bip=prpmc2800_board_info; i<ARRAY_SIZE(prpmc2800_board_info); i++,bip++) if ((vpd[0] == bip->subsys0) && (vpd[1] == bip->subsys1) && ((vpd[4] & bip->vpd4_mask) == bip->vpd4)) return bip; return NULL; } /* Get VPD from i2c eeprom 2, then match it to a board info entry */ static struct prpmc2800_board_info *prpmc2800_get_bip(void) { struct prpmc2800_board_info *bip; u8 vpd[5]; int rc; if (mv64x60_i2c_open()) fatal("Error: Can't open i2c device\n\r"); /* Get VPD from i2c eeprom-2 */ memset(vpd, 0, sizeof(vpd)); rc = mv64x60_i2c_read(EEPROM2_ADDR, vpd, 0x1fde, 2, sizeof(vpd)); if (rc < 0) fatal("Error: Couldn't read eeprom2\n\r"); mv64x60_i2c_close(); /* Get board type & related info */ bip = prpmc2800_get_board_info(vpd); if (bip == NULL) { printf("Error: Unsupported board or corrupted VPD:\n\r"); printf(" 0x%x 0x%x 0x%x 0x%x 0x%x\n\r", vpd[0], vpd[1], vpd[2], vpd[3], vpd[4]); printf("Using device tree defaults...\n\r"); } return bip; } static void prpmc2800_bridge_setup(u32 mem_size) { u32 i, v[12], enables, acc_bits; u32 pci_base_hi, pci_base_lo, size, buf[2]; unsigned long cpu_base; int rc; void *devp; u8 *bridge_pbase, is_coherent; struct mv64x60_cpu2pci_win *tbl; bridge_pbase = mv64x60_get_bridge_pbase(); is_coherent = mv64x60_is_coherent(); if (is_coherent) acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_WB | MV64x60_PCI_ACC_CNTL_SWAP_NONE | MV64x60_PCI_ACC_CNTL_MBURST_32_BYTES | MV64x60_PCI_ACC_CNTL_RDSIZE_32_BYTES; else acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_NONE | MV64x60_PCI_ACC_CNTL_SWAP_NONE | MV64x60_PCI_ACC_CNTL_MBURST_128_BYTES | MV64x60_PCI_ACC_CNTL_RDSIZE_256_BYTES; mv64x60_config_ctlr_windows(bridge_base, bridge_pbase, is_coherent); mv64x60_config_pci_windows(bridge_base, bridge_pbase, 0, 0, mem_size, acc_bits); /* Get the cpu -> pci i/o & mem mappings from the device tree */ devp = find_node_by_compatible(NULL, "marvell,mv64360-pci"); if (devp == NULL) fatal("Error: Missing marvell,mv64360-pci" " device tree node\n\r"); rc = getprop(devp, "ranges", v, sizeof(v)); if (rc != sizeof(v)) fatal("Error: Can't find marvell,mv64360-pci ranges" " property\n\r"); /* Get the cpu -> pci i/o & mem mappings from the device tree */ devp = find_node_by_compatible(NULL, "marvell,mv64360"); if (devp == NULL) fatal("Error: Missing marvell,mv64360 device tree node\n\r"); enables = in_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE)); enables |= 0x0007fe00; /* Disable all cpu->pci windows */ out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); for (i=0; i<12; i+=6) { switch (v[i] & 0xff000000) { case 0x01000000: /* PCI I/O Space */ tbl = mv64x60_cpu2pci_io; break; case 0x02000000: /* PCI MEM Space */ tbl = mv64x60_cpu2pci_mem; break; default: continue; } pci_base_hi = v[i+1]; pci_base_lo = v[i+2]; cpu_base = v[i+3]; size = v[i+5]; buf[0] = cpu_base; buf[1] = size; if (!dt_xlate_addr(devp, buf, sizeof(buf), &cpu_base)) fatal("Error: Can't translate PCI address 0x%x\n\r", (u32)cpu_base); mv64x60_config_cpu2pci_window(bridge_base, 0, pci_base_hi, pci_base_lo, cpu_base, size, tbl); } enables &= ~0x00000600; /* Enable cpu->pci0 i/o, cpu->pci0 mem0 */ out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); } static void prpmc2800_fixups(void) { u32 v[2], l, mem_size; int rc; void *devp; char model[BOARD_MODEL_MAX]; struct prpmc2800_board_info *bip; bip = prpmc2800_get_bip(); /* Get board info based on VPD */ mem_size = (bip) ? bip->mem_size : mv64x60_get_mem_size(bridge_base); prpmc2800_bridge_setup(mem_size); /* Do necessary bridge setup */ /* If the VPD doesn't match what we know about, just use the * defaults already in the device tree. */ if (!bip) return; /* Know the board type so override device tree defaults */ /* Set /model appropriately */ devp = finddevice("/"); if (devp == NULL) fatal("Error: Missing '/' device tree node\n\r"); memset(model, 0, BOARD_MODEL_MAX); strncpy(model, BOARD_MODEL, BOARD_MODEL_MAX - 2); l = strlen(model); if (bip->model == BOARD_MODEL_PRPMC280) l--; model[l++] = bip->variant; model[l++] = '\0'; setprop(devp, "model", model, l); /* Set /cpus/PowerPC,7447/clock-frequency */ devp = find_node_by_prop_value_str(NULL, "device_type", "cpu"); if (devp == NULL) fatal("Error: Missing proper cpu device tree node\n\r"); v[0] = bip->core_speed; setprop(devp, "clock-frequency", &v[0], sizeof(v[0])); /* Set /memory/reg size */ devp = finddevice("/memory"); if (devp == NULL) fatal("Error: Missing /memory device tree node\n\r"); v[0] = 0; v[1] = bip->mem_size; setprop(devp, "reg", v, sizeof(v)); /* Update model, if this is a mv64362 */ if (bip->bridge_type == BRIDGE_TYPE_MV64362) { devp = find_node_by_compatible(NULL, "marvell,mv64360"); if (devp == NULL) fatal("Error: Missing marvell,mv64360" " device tree node\n\r"); setprop(devp, "model", "mv64362", strlen("mv64362") + 1); } /* Set User FLASH size */ devp = find_node_by_compatible(NULL, "direct-mapped"); if (devp == NULL) fatal("Error: Missing User FLASH device tree node\n\r"); rc = getprop(devp, "reg", v, sizeof(v)); if (rc != sizeof(v)) fatal("Error: Can't find User FLASH reg property\n\r"); v[1] = bip->user_flash; setprop(devp, "reg", v, sizeof(v)); } #define MV64x60_MPP_CNTL_0 0xf000 #define MV64x60_MPP_CNTL_2 0xf008 #define MV64x60_GPP_IO_CNTL 0xf100 #define MV64x60_GPP_LEVEL_CNTL 0xf110 #define MV64x60_GPP_VALUE_SET 0xf118 static void prpmc2800_reset(void) { u32 temp; udelay(5000000); if (bridge_base != 0) { temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); out_le32((u32 *)(bridge_base + MV64x60_GPP_VALUE_SET), 0x00080004); } for (;;); } #define HEAP_SIZE (16*MB) static struct gunzip_state gzstate; void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { struct elf_info ei; char *heap_start, *dtb; int dt_size = _dtb_end - _dtb_start; void *vmlinuz_addr = _vmlinux_start; unsigned long vmlinuz_size = _vmlinux_end - _vmlinux_start; char elfheader[256]; if (dt_size <= 0) /* No fdt */ exit(); /* * Start heap after end of the kernel (after decompressed to * address 0) or the end of the zImage, whichever is higher. * That's so things allocated by simple_alloc won't overwrite * any part of the zImage and the kernel won't overwrite the dtb * when decompressed & relocated. */ gunzip_start(&gzstate, vmlinuz_addr, vmlinuz_size); gunzip_exactly(&gzstate, elfheader, sizeof(elfheader)); if (!parse_elf32(elfheader, &ei)) exit(); heap_start = (char *)(ei.memsize + ei.elfoffset); /* end of kernel*/ heap_start = max(heap_start, (char *)_end); /* end of zImage */ if ((unsigned)simple_alloc_init(heap_start, HEAP_SIZE, 2*KB, 16) > (128*MB)) exit(); /* Relocate dtb to safe area past end of zImage & kernel */ dtb = malloc(dt_size); if (!dtb) exit(); memmove(dtb, _dtb_start, dt_size); fdt_init(dtb); bridge_base = mv64x60_get_bridge_base(); platform_ops.fixups = prpmc2800_fixups; platform_ops.exit = prpmc2800_reset; if (serial_console_init() < 0) exit(); } /* _zimage_start called very early--need to turn off external interrupts */ asm (" .globl _zimage_start\n\ _zimage_start:\n\ mfmsr 10\n\ rlwinm 10,10,0,~(1<<15) /* Clear MSR_EE */\n\ sync\n\ mtmsr 10\n\ isync\n\ b _zimage_start_lib\n\ ");
gpl-2.0
nik124seleznev/ZC500TG
arch/powerpc/boot/treeboot-walnut.c
14100
2215
/* * Old U-boot compatibility for Walnut * * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com> * * Copyright 2007 IBM Corporation * Based on cuboot-83xx.c, which is: * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "dcr.h" #include "4xx.h" #include "io.h" BSS_STACK(4096); static void walnut_flashsel_fixup(void) { void *devp, *sram; u32 reg_flash[3] = {0x0, 0x0, 0x80000}; u32 reg_sram[3] = {0x0, 0x0, 0x80000}; u8 *fpga; u8 fpga_brds1 = 0x0; devp = finddevice("/plb/ebc/fpga"); if (!devp) fatal("Couldn't locate FPGA node\n\r"); if (getprop(devp, "virtual-reg", &fpga, sizeof(fpga)) != sizeof(fpga)) fatal("no virtual-reg property\n\r"); fpga_brds1 = in_8(fpga); devp = finddevice("/plb/ebc/flash"); if (!devp) fatal("Couldn't locate flash node\n\r"); if (getprop(devp, "reg", reg_flash, sizeof(reg_flash)) != sizeof(reg_flash)) fatal("flash reg property has unexpected size\n\r"); sram = finddevice("/plb/ebc/sram"); if (!sram) fatal("Couldn't locate sram node\n\r"); if (getprop(sram, "reg", reg_sram, sizeof(reg_sram)) != sizeof(reg_sram)) fatal("sram reg property has unexpected size\n\r"); if (fpga_brds1 & 0x1) { reg_flash[1] ^= 0x80000; reg_sram[1] ^= 0x80000; } setprop(devp, "reg", reg_flash, sizeof(reg_flash)); setprop(sram, "reg", reg_sram, sizeof(reg_sram)); } #define WALNUT_OPENBIOS_MAC_OFF 0xfffffe0b static void walnut_fixups(void) { ibm4xx_sdram_fixup_memsize(); ibm405gp_fixup_clocks(33330000, 0xa8c000); ibm4xx_quiesce_eth((u32 *)0xef600800, NULL); ibm4xx_fixup_ebc_ranges("/plb/ebc"); walnut_flashsel_fixup(); dt_fixup_mac_address_by_alias("ethernet0", (u8 *) WALNUT_OPENBIOS_MAC_OFF); } void platform_init(void) { unsigned long end_of_ram = 0x2000000; unsigned long avail_ram = end_of_ram - (unsigned long) _end; simple_alloc_init(_end, avail_ram, 32, 32); platform_ops.fixups = walnut_fixups; platform_ops.exit = ibm40x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
unofficial-opensource-apple/llvmgcc42
gcc/genemit.c
21
24478
/* Generate code from machine description to emit insns as rtl. Copyright (C) 1987, 1988, 1991, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "bconfig.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "rtl.h" #include "errors.h" #include "gensupport.h" static int max_opno; static int max_dup_opno; static int max_scratch_opno; static int insn_code_number; static int insn_index_number; /* Data structure for recording the patterns of insns that have CLOBBERs. We use this to output a function that adds these CLOBBERs to a previously-allocated PARALLEL expression. */ struct clobber_pat { struct clobber_ent *insns; rtx pattern; int first_clobber; struct clobber_pat *next; int has_hard_reg; } *clobber_list; /* Records one insn that uses the clobber list. */ struct clobber_ent { int code_number; /* Counts only insns. */ struct clobber_ent *next; }; static void max_operand_1 (rtx); static int max_operand_vec (rtx, int); static void print_code (RTX_CODE); static void gen_exp (rtx, enum rtx_code, char *); static void gen_insn (rtx, int); static void gen_expand (rtx); static void gen_split (rtx); static void output_add_clobbers (void); static void output_added_clobbers_hard_reg_p (void); static void gen_rtx_scratch (rtx, enum rtx_code); static void output_peephole2_scratches (rtx); static void max_operand_1 (rtx x) { RTX_CODE code; int i; int len; const char *fmt; if (x == 0) return; code = GET_CODE (x); if (code == MATCH_OPERAND || code == MATCH_OPERATOR || code == MATCH_PARALLEL) max_opno = MAX (max_opno, XINT (x, 0)); if (code == MATCH_DUP || code == MATCH_OP_DUP || code == MATCH_PAR_DUP) max_dup_opno = MAX (max_dup_opno, XINT (x, 0)); if (code == MATCH_SCRATCH) max_scratch_opno = MAX (max_scratch_opno, XINT (x, 0)); fmt = GET_RTX_FORMAT (code); len = GET_RTX_LENGTH (code); for (i = 0; i < len; i++) { if (fmt[i] == 'e' || fmt[i] == 'u') max_operand_1 (XEXP (x, i)); else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) max_operand_1 (XVECEXP (x, i, j)); } } } static int max_operand_vec (rtx insn, int arg) { int len = XVECLEN (insn, arg); int i; max_opno = -1; max_dup_opno = -1; max_scratch_opno = -1; for (i = 0; i < len; i++) max_operand_1 (XVECEXP (insn, arg, i)); return max_opno + 1; } static void print_code (RTX_CODE code) { const char *p1; for (p1 = GET_RTX_NAME (code); *p1; p1++) putchar (TOUPPER(*p1)); } static void gen_rtx_scratch (rtx x, enum rtx_code subroutine_type) { if (subroutine_type == DEFINE_PEEPHOLE2) { printf ("operand%d", XINT (x, 0)); } else { printf ("gen_rtx_SCRATCH (%smode)", GET_MODE_NAME (GET_MODE (x))); } } /* Print a C expression to construct an RTX just like X, substituting any operand references appearing within. */ static void gen_exp (rtx x, enum rtx_code subroutine_type, char *used) { RTX_CODE code; int i; int len; const char *fmt; if (x == 0) { printf ("NULL_RTX"); return; } code = GET_CODE (x); switch (code) { case MATCH_OPERAND: case MATCH_DUP: if (used) { if (used[XINT (x, 0)]) { printf ("copy_rtx (operand%d)", XINT (x, 0)); return; } used[XINT (x, 0)] = 1; } printf ("operand%d", XINT (x, 0)); return; case MATCH_OP_DUP: printf ("gen_rtx_fmt_"); for (i = 0; i < XVECLEN (x, 1); i++) printf ("e"); printf (" (GET_CODE (operand%d), ", XINT (x, 0)); if (GET_MODE (x) == VOIDmode) printf ("GET_MODE (operand%d)", XINT (x, 0)); else printf ("%smode", GET_MODE_NAME (GET_MODE (x))); for (i = 0; i < XVECLEN (x, 1); i++) { printf (",\n\t\t"); gen_exp (XVECEXP (x, 1, i), subroutine_type, used); } printf (")"); return; case MATCH_OPERATOR: printf ("gen_rtx_fmt_"); for (i = 0; i < XVECLEN (x, 2); i++) printf ("e"); printf (" (GET_CODE (operand%d)", XINT (x, 0)); printf (", %smode", GET_MODE_NAME (GET_MODE (x))); for (i = 0; i < XVECLEN (x, 2); i++) { printf (",\n\t\t"); gen_exp (XVECEXP (x, 2, i), subroutine_type, used); } printf (")"); return; case MATCH_PARALLEL: case MATCH_PAR_DUP: printf ("operand%d", XINT (x, 0)); return; case MATCH_SCRATCH: gen_rtx_scratch (x, subroutine_type); return; case ADDRESS: fatal ("ADDRESS expression code used in named instruction pattern"); case PC: printf ("pc_rtx"); return; case CLOBBER: if (REG_P (XEXP (x, 0))) { printf ("gen_hard_reg_clobber (%smode, %i)", GET_MODE_NAME (GET_MODE (XEXP (x, 0))), REGNO (XEXP (x, 0))); return; } break; case CC0: printf ("cc0_rtx"); return; case CONST_INT: if (INTVAL (x) == 0) printf ("const0_rtx"); else if (INTVAL (x) == 1) printf ("const1_rtx"); else if (INTVAL (x) == -1) printf ("constm1_rtx"); else if (-MAX_SAVED_CONST_INT <= INTVAL (x) && INTVAL (x) <= MAX_SAVED_CONST_INT) printf ("const_int_rtx[MAX_SAVED_CONST_INT + (%d)]", (int) INTVAL (x)); else if (INTVAL (x) == STORE_FLAG_VALUE) printf ("const_true_rtx"); else { printf ("GEN_INT ("); printf (HOST_WIDE_INT_PRINT_DEC_C, INTVAL (x)); printf (")"); } return; case CONST_DOUBLE: /* These shouldn't be written in MD files. Instead, the appropriate routines in varasm.c should be called. */ gcc_unreachable (); default: break; } printf ("gen_rtx_"); print_code (code); printf (" (%smode", GET_MODE_NAME (GET_MODE (x))); fmt = GET_RTX_FORMAT (code); len = GET_RTX_LENGTH (code); for (i = 0; i < len; i++) { if (fmt[i] == '0') break; printf (",\n\t"); switch (fmt[i]) { case 'e': case 'u': gen_exp (XEXP (x, i), subroutine_type, used); break; case 'i': printf ("%u", XINT (x, i)); break; case 's': printf ("\"%s\"", XSTR (x, i)); break; case 'E': { int j; printf ("gen_rtvec (%d", XVECLEN (x, i)); for (j = 0; j < XVECLEN (x, i); j++) { printf (",\n\t\t"); gen_exp (XVECEXP (x, i, j), subroutine_type, used); } printf (")"); break; } default: gcc_unreachable (); } } printf (")"); } /* Generate the `gen_...' function for a DEFINE_INSN. */ static void gen_insn (rtx insn, int lineno) { int operands; int i; /* See if the pattern for this insn ends with a group of CLOBBERs of (hard) registers or MATCH_SCRATCHes. If so, store away the information for later. */ if (XVEC (insn, 1)) { int has_hard_reg = 0; for (i = XVECLEN (insn, 1) - 1; i > 0; i--) { if (GET_CODE (XVECEXP (insn, 1, i)) != CLOBBER) break; if (REG_P (XEXP (XVECEXP (insn, 1, i), 0))) has_hard_reg = 1; else if (GET_CODE (XEXP (XVECEXP (insn, 1, i), 0)) != MATCH_SCRATCH) break; } if (i != XVECLEN (insn, 1) - 1) { struct clobber_pat *p; struct clobber_ent *link = XNEW (struct clobber_ent); int j; link->code_number = insn_code_number; /* See if any previous CLOBBER_LIST entry is the same as this one. */ for (p = clobber_list; p; p = p->next) { if (p->first_clobber != i + 1 || XVECLEN (p->pattern, 1) != XVECLEN (insn, 1)) continue; for (j = i + 1; j < XVECLEN (insn, 1); j++) { rtx old = XEXP (XVECEXP (p->pattern, 1, j), 0); rtx new = XEXP (XVECEXP (insn, 1, j), 0); /* OLD and NEW are the same if both are to be a SCRATCH of the same mode, or if both are registers of the same mode and number. */ if (! (GET_MODE (old) == GET_MODE (new) && ((GET_CODE (old) == MATCH_SCRATCH && GET_CODE (new) == MATCH_SCRATCH) || (REG_P (old) && REG_P (new) && REGNO (old) == REGNO (new))))) break; } if (j == XVECLEN (insn, 1)) break; } if (p == 0) { p = XNEW (struct clobber_pat); p->insns = 0; p->pattern = insn; p->first_clobber = i + 1; p->next = clobber_list; p->has_hard_reg = has_hard_reg; clobber_list = p; } link->next = p->insns; p->insns = link; } } /* Don't mention instructions whose names are the null string or begin with '*'. They are in the machine description just to be recognized. */ if (XSTR (insn, 0)[0] == 0 || XSTR (insn, 0)[0] == '*') return; printf ("/* %s:%d */\n", read_rtx_filename, lineno); /* Find out how many operands this function has. */ operands = max_operand_vec (insn, 1); if (max_dup_opno >= operands) fatal ("match_dup operand number has no match_operand"); /* Output the function name and argument declarations. */ printf ("rtx\ngen_%s (", XSTR (insn, 0)); if (operands) for (i = 0; i < operands; i++) if (i) printf (",\n\trtx operand%d ATTRIBUTE_UNUSED", i); else printf ("rtx operand%d ATTRIBUTE_UNUSED", i); else printf ("void"); printf (")\n"); printf ("{\n"); /* Output code to construct and return the rtl for the instruction body. */ if (XVECLEN (insn, 1) == 1) { printf (" return "); gen_exp (XVECEXP (insn, 1, 0), DEFINE_INSN, NULL); printf (";\n}\n\n"); } else { printf (" return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (%d", XVECLEN (insn, 1)); for (i = 0; i < XVECLEN (insn, 1); i++) { printf (",\n\t\t"); gen_exp (XVECEXP (insn, 1, i), DEFINE_INSN, NULL); } printf ("));\n}\n\n"); } } /* Generate the `gen_...' function for a DEFINE_EXPAND. */ static void gen_expand (rtx expand) { int operands; int i; if (strlen (XSTR (expand, 0)) == 0) fatal ("define_expand lacks a name"); if (XVEC (expand, 1) == 0) fatal ("define_expand for %s lacks a pattern", XSTR (expand, 0)); /* Find out how many operands this function has. */ operands = max_operand_vec (expand, 1); /* Output the function name and argument declarations. */ printf ("rtx\ngen_%s (", XSTR (expand, 0)); if (operands) for (i = 0; i < operands; i++) if (i) printf (",\n\trtx operand%d", i); else printf ("rtx operand%d", i); else printf ("void"); printf (")\n"); printf ("{\n"); /* If we don't have any C code to write, only one insn is being written, and no MATCH_DUPs are present, we can just return the desired insn like we do for a DEFINE_INSN. This saves memory. */ if ((XSTR (expand, 3) == 0 || *XSTR (expand, 3) == '\0') && operands > max_dup_opno && XVECLEN (expand, 1) == 1) { printf (" return "); gen_exp (XVECEXP (expand, 1, 0), DEFINE_EXPAND, NULL); printf (";\n}\n\n"); return; } /* For each operand referred to only with MATCH_DUPs, make a local variable. */ for (i = operands; i <= max_dup_opno; i++) printf (" rtx operand%d;\n", i); for (; i <= max_scratch_opno; i++) printf (" rtx operand%d ATTRIBUTE_UNUSED;\n", i); printf (" rtx _val = 0;\n"); printf (" start_sequence ();\n"); /* The fourth operand of DEFINE_EXPAND is some code to be executed before the actual construction. This code expects to refer to `operands' just as the output-code in a DEFINE_INSN does, but here `operands' is an automatic array. So copy the operand values there before executing it. */ if (XSTR (expand, 3) && *XSTR (expand, 3)) { printf (" {\n"); if (operands > 0 || max_dup_opno >= 0 || max_scratch_opno >= 0) printf (" rtx operands[%d];\n", MAX (operands, MAX (max_scratch_opno, max_dup_opno) + 1)); /* Output code to copy the arguments into `operands'. */ for (i = 0; i < operands; i++) printf (" operands[%d] = operand%d;\n", i, i); /* Output the special code to be executed before the sequence is generated. */ print_rtx_ptr_loc (XSTR (expand, 3)); printf ("%s\n", XSTR (expand, 3)); /* Output code to copy the arguments back out of `operands' (unless we aren't going to use them at all). */ if (XVEC (expand, 1) != 0) { for (i = 0; i < operands; i++) printf (" operand%d = operands[%d];\n", i, i); for (; i <= max_dup_opno; i++) printf (" operand%d = operands[%d];\n", i, i); for (; i <= max_scratch_opno; i++) printf (" operand%d = operands[%d];\n", i, i); } printf (" }\n"); } /* Output code to construct the rtl for the instruction bodies. Use emit_insn to add them to the sequence being accumulated. But don't do this if the user's code has set `no_more' nonzero. */ for (i = 0; i < XVECLEN (expand, 1); i++) { rtx next = XVECEXP (expand, 1, i); if ((GET_CODE (next) == SET && GET_CODE (SET_DEST (next)) == PC) || (GET_CODE (next) == PARALLEL && ((GET_CODE (XVECEXP (next, 0, 0)) == SET && GET_CODE (SET_DEST (XVECEXP (next, 0, 0))) == PC) || GET_CODE (XVECEXP (next, 0, 0)) == RETURN)) || GET_CODE (next) == RETURN) printf (" emit_jump_insn ("); else if ((GET_CODE (next) == SET && GET_CODE (SET_SRC (next)) == CALL) || GET_CODE (next) == CALL || (GET_CODE (next) == PARALLEL && GET_CODE (XVECEXP (next, 0, 0)) == SET && GET_CODE (SET_SRC (XVECEXP (next, 0, 0))) == CALL) || (GET_CODE (next) == PARALLEL && GET_CODE (XVECEXP (next, 0, 0)) == CALL)) printf (" emit_call_insn ("); else if (LABEL_P (next)) printf (" emit_label ("); else if (GET_CODE (next) == MATCH_OPERAND || GET_CODE (next) == MATCH_DUP || GET_CODE (next) == MATCH_OPERATOR || GET_CODE (next) == MATCH_OP_DUP || GET_CODE (next) == MATCH_PARALLEL || GET_CODE (next) == MATCH_PAR_DUP || GET_CODE (next) == PARALLEL) printf (" emit ("); else printf (" emit_insn ("); gen_exp (next, DEFINE_EXPAND, NULL); printf (");\n"); if (GET_CODE (next) == SET && GET_CODE (SET_DEST (next)) == PC && GET_CODE (SET_SRC (next)) == LABEL_REF) printf (" emit_barrier ();"); } /* Call `get_insns' to extract the list of all the insns emitted within this gen_... function. */ printf (" _val = get_insns ();\n"); printf (" end_sequence ();\n"); printf (" return _val;\n}\n\n"); } /* Like gen_expand, but generates insns resulting from splitting SPLIT. */ static void gen_split (rtx split) { int i; int operands; const char *const name = ((GET_CODE (split) == DEFINE_PEEPHOLE2) ? "peephole2" : "split"); const char *unused; char *used; if (XVEC (split, 0) == 0) fatal ("define_%s (definition %d) lacks a pattern", name, insn_index_number); else if (XVEC (split, 2) == 0) fatal ("define_%s (definition %d) lacks a replacement pattern", name, insn_index_number); /* Find out how many operands this function has. */ max_operand_vec (split, 2); operands = MAX (max_opno, MAX (max_dup_opno, max_scratch_opno)) + 1; unused = (operands == 0 ? " ATTRIBUTE_UNUSED" : ""); used = XCNEWVEC (char, operands); /* Output the prototype, function name and argument declarations. */ if (GET_CODE (split) == DEFINE_PEEPHOLE2) { printf ("extern rtx gen_%s_%d (rtx, rtx *);\n", name, insn_code_number); printf ("rtx\ngen_%s_%d (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands%s)\n", name, insn_code_number, unused); } else { printf ("extern rtx gen_split_%d (rtx, rtx *);\n", insn_code_number); printf ("rtx\ngen_split_%d (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands%s)\n", insn_code_number, unused); } printf ("{\n"); /* Declare all local variables. */ for (i = 0; i < operands; i++) printf (" rtx operand%d;\n", i); printf (" rtx _val = 0;\n"); if (GET_CODE (split) == DEFINE_PEEPHOLE2) output_peephole2_scratches (split); printf (" start_sequence ();\n"); /* The fourth operand of DEFINE_SPLIT is some code to be executed before the actual construction. */ if (XSTR (split, 3)) { print_rtx_ptr_loc (XSTR (split, 3)); printf ("%s\n", XSTR (split, 3)); } /* Output code to copy the arguments back out of `operands' */ for (i = 0; i < operands; i++) printf (" operand%d = operands[%d];\n", i, i); /* Output code to construct the rtl for the instruction bodies. Use emit_insn to add them to the sequence being accumulated. But don't do this if the user's code has set `no_more' nonzero. */ for (i = 0; i < XVECLEN (split, 2); i++) { rtx next = XVECEXP (split, 2, i); if ((GET_CODE (next) == SET && GET_CODE (SET_DEST (next)) == PC) || (GET_CODE (next) == PARALLEL && GET_CODE (XVECEXP (next, 0, 0)) == SET && GET_CODE (SET_DEST (XVECEXP (next, 0, 0))) == PC) || GET_CODE (next) == RETURN) printf (" emit_jump_insn ("); else if ((GET_CODE (next) == SET && GET_CODE (SET_SRC (next)) == CALL) || GET_CODE (next) == CALL || (GET_CODE (next) == PARALLEL && GET_CODE (XVECEXP (next, 0, 0)) == SET && GET_CODE (SET_SRC (XVECEXP (next, 0, 0))) == CALL) || (GET_CODE (next) == PARALLEL && GET_CODE (XVECEXP (next, 0, 0)) == CALL)) printf (" emit_call_insn ("); else if (LABEL_P (next)) printf (" emit_label ("); else if (GET_CODE (next) == MATCH_OPERAND || GET_CODE (next) == MATCH_OPERATOR || GET_CODE (next) == MATCH_PARALLEL || GET_CODE (next) == MATCH_OP_DUP || GET_CODE (next) == MATCH_DUP || GET_CODE (next) == PARALLEL) printf (" emit ("); else printf (" emit_insn ("); gen_exp (next, GET_CODE (split), used); printf (");\n"); if (GET_CODE (next) == SET && GET_CODE (SET_DEST (next)) == PC && GET_CODE (SET_SRC (next)) == LABEL_REF) printf (" emit_barrier ();"); } /* Call `get_insns' to make a list of all the insns emitted within this gen_... function. */ printf (" _val = get_insns ();\n"); printf (" end_sequence ();\n"); printf (" return _val;\n}\n\n"); free (used); } /* Write a function, `add_clobbers', that is given a PARALLEL of sufficient size for the insn and an INSN_CODE, and inserts the required CLOBBERs at the end of the vector. */ static void output_add_clobbers (void) { struct clobber_pat *clobber; struct clobber_ent *ent; int i; printf ("\n\nvoid\nadd_clobbers (rtx pattern ATTRIBUTE_UNUSED, int insn_code_number)\n"); printf ("{\n"); printf (" switch (insn_code_number)\n"); printf (" {\n"); for (clobber = clobber_list; clobber; clobber = clobber->next) { for (ent = clobber->insns; ent; ent = ent->next) printf (" case %d:\n", ent->code_number); for (i = clobber->first_clobber; i < XVECLEN (clobber->pattern, 1); i++) { printf (" XVECEXP (pattern, 0, %d) = ", i); gen_exp (XVECEXP (clobber->pattern, 1, i), GET_CODE (clobber->pattern), NULL); printf (";\n"); } printf (" break;\n\n"); } printf (" default:\n"); printf (" gcc_unreachable ();\n"); printf (" }\n"); printf ("}\n"); } /* Write a function, `added_clobbers_hard_reg_p' that is given an insn_code number that will have clobbers added (as indicated by `recog') and returns 1 if those include a clobber of a hard reg or 0 if all of them just clobber SCRATCH. */ static void output_added_clobbers_hard_reg_p (void) { struct clobber_pat *clobber; struct clobber_ent *ent; int clobber_p, used; printf ("\n\nint\nadded_clobbers_hard_reg_p (int insn_code_number)\n"); printf ("{\n"); printf (" switch (insn_code_number)\n"); printf (" {\n"); for (clobber_p = 0; clobber_p <= 1; clobber_p++) { used = 0; for (clobber = clobber_list; clobber; clobber = clobber->next) if (clobber->has_hard_reg == clobber_p) for (ent = clobber->insns; ent; ent = ent->next) { printf (" case %d:\n", ent->code_number); used++; } if (used) printf (" return %d;\n\n", clobber_p); } printf (" default:\n"); printf (" gcc_unreachable ();\n"); printf (" }\n"); printf ("}\n"); } /* Generate code to invoke find_free_register () as needed for the scratch registers used by the peephole2 pattern in SPLIT. */ static void output_peephole2_scratches (rtx split) { int i; int insn_nr = 0; printf (" HARD_REG_SET _regs_allocated;\n"); printf (" CLEAR_HARD_REG_SET (_regs_allocated);\n"); for (i = 0; i < XVECLEN (split, 0); i++) { rtx elt = XVECEXP (split, 0, i); if (GET_CODE (elt) == MATCH_SCRATCH) { int last_insn_nr = insn_nr; int cur_insn_nr = insn_nr; int j; for (j = i + 1; j < XVECLEN (split, 0); j++) if (GET_CODE (XVECEXP (split, 0, j)) == MATCH_DUP) { if (XINT (XVECEXP (split, 0, j), 0) == XINT (elt, 0)) last_insn_nr = cur_insn_nr; } else if (GET_CODE (XVECEXP (split, 0, j)) != MATCH_SCRATCH) cur_insn_nr++; printf (" if ((operands[%d] = peep2_find_free_register (%d, %d, \"%s\", %smode, &_regs_allocated)) == NULL_RTX)\n\ return NULL;\n", XINT (elt, 0), insn_nr, last_insn_nr, XSTR (elt, 1), GET_MODE_NAME (GET_MODE (elt))); } else if (GET_CODE (elt) != MATCH_DUP) insn_nr++; } } int main (int argc, char **argv) { rtx desc; progname = "genemit"; if (init_md_reader_args (argc, argv) != SUCCESS_EXIT_CODE) return (FATAL_EXIT_CODE); /* Assign sequential codes to all entries in the machine description in parallel with the tables in insn-output.c. */ insn_code_number = 0; insn_index_number = 0; printf ("/* Generated automatically by the program `genemit'\n\ from the machine description file `md'. */\n\n"); printf ("#include \"config.h\"\n"); printf ("#include \"system.h\"\n"); printf ("#include \"coretypes.h\"\n"); printf ("#include \"tm.h\"\n"); printf ("#include \"rtl.h\"\n"); printf ("#include \"tm_p.h\"\n"); printf ("#include \"function.h\"\n"); printf ("#include \"expr.h\"\n"); printf ("#include \"optabs.h\"\n"); printf ("#include \"real.h\"\n"); printf ("#include \"flags.h\"\n"); printf ("#include \"output.h\"\n"); printf ("#include \"insn-config.h\"\n"); printf ("#include \"hard-reg-set.h\"\n"); printf ("#include \"recog.h\"\n"); printf ("#include \"resource.h\"\n"); printf ("#include \"reload.h\"\n"); printf ("#include \"toplev.h\"\n"); printf ("#include \"tm-constrs.h\"\n"); printf ("#include \"ggc.h\"\n\n"); printf ("#include \"basic-block.h\"\n\n"); printf ("#define FAIL return (end_sequence (), _val)\n"); printf ("#define DONE return (_val = get_insns (), end_sequence (), _val)\n\n"); /* Read the machine description. */ while (1) { int line_no; desc = read_md_rtx (&line_no, &insn_code_number); if (desc == NULL) break; switch (GET_CODE (desc)) { case DEFINE_INSN: gen_insn (desc, line_no); break; case DEFINE_EXPAND: printf ("/* %s:%d */\n", read_rtx_filename, line_no); gen_expand (desc); break; case DEFINE_SPLIT: printf ("/* %s:%d */\n", read_rtx_filename, line_no); gen_split (desc); break; case DEFINE_PEEPHOLE2: printf ("/* %s:%d */\n", read_rtx_filename, line_no); gen_split (desc); break; default: break; } ++insn_index_number; } /* Write out the routines to add CLOBBERs to a pattern and say whether they clobber a hard reg. */ output_add_clobbers (); output_added_clobbers_hard_reg_p (); fflush (stdout); return (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE); }
gpl-2.0
fengshao0907/fastsocket
kernel/arch/powerpc/platforms/pseries/eeh.c
21
38295
/* * eeh.c * Copyright IBM Corporation 2001, 2005, 2006 * Copyright Dave Engebretsen & Todd Inglett 2001 * Copyright Linas Vepstas 2005, 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> */ #undef DEBUG #include <linux/delay.h> #include <linux/init.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/rbtree.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/of.h> #include <asm/atomic.h> #include <asm/eeh.h> #include <asm/eeh_event.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> #include <asm/rtas.h> /** Overview: * EEH, or "Extended Error Handling" is a PCI bridge technology for * dealing with PCI bus errors that can't be dealt with within the * usual PCI framework, except by check-stopping the CPU. Systems * that are designed for high-availability/reliability cannot afford * to crash due to a "mere" PCI error, thus the need for EEH. * An EEH-capable bridge operates by converting a detected error * into a "slot freeze", taking the PCI adapter off-line, making * the slot behave, from the OS'es point of view, as if the slot * were "empty": all reads return 0xff's and all writes are silently * ignored. EEH slot isolation events can be triggered by parity * errors on the address or data busses (e.g. during posted writes), * which in turn might be caused by low voltage on the bus, dust, * vibration, humidity, radioactivity or plain-old failed hardware. * * Note, however, that one of the leading causes of EEH slot * freeze events are buggy device drivers, buggy device microcode, * or buggy device hardware. This is because any attempt by the * device to bus-master data to a memory address that is not * assigned to the device will trigger a slot freeze. (The idea * is to prevent devices-gone-wild from corrupting system memory). * Buggy hardware/drivers will have a miserable time co-existing * with EEH. * * Ideally, a PCI device driver, when suspecting that an isolation * event has occured (e.g. by reading 0xff's), will then ask EEH * whether this is the case, and then take appropriate steps to * reset the PCI slot, the PCI device, and then resume operations. * However, until that day, the checking is done here, with the * eeh_check_failure() routine embedded in the MMIO macros. If * the slot is found to be isolated, an "EEH Event" is synthesized * and sent out for processing. */ /* If a device driver keeps reading an MMIO register in an interrupt * handler after a slot isolation event, it might be broken. * This sets the threshold for how many read attempts we allow * before printing an error message. */ #define EEH_MAX_FAILS 2100000 /* Time to wait for a PCI slot to report status, in milliseconds */ #define PCI_BUS_RESET_WAIT_MSEC (60*1000) /* RTAS tokens */ static int ibm_set_eeh_option; static int ibm_set_slot_reset; static int ibm_read_slot_reset_state; static int ibm_read_slot_reset_state2; static int ibm_slot_error_detail; static int ibm_get_config_addr_info; static int ibm_get_config_addr_info2; static int ibm_configure_bridge; static int ibm_configure_pe; int eeh_subsystem_enabled; EXPORT_SYMBOL(eeh_subsystem_enabled); /* Lock to avoid races due to multiple reports of an error */ static DEFINE_SPINLOCK(confirm_error_lock); /* Buffer for reporting slot-error-detail rtas calls. Its here * in BSS, and not dynamically alloced, so that it ends up in * RMO where RTAS can access it. */ static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; static DEFINE_SPINLOCK(slot_errbuf_lock); static int eeh_error_buf_size; /* Buffer for reporting pci register dumps. Its here in BSS, and * not dynamically alloced, so that it ends up in RMO where RTAS * can access it. */ #define EEH_PCI_REGS_LOG_LEN 4096 static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN]; /* System monitoring statistics */ static unsigned long no_device; static unsigned long no_dn; static unsigned long no_cfg_addr; static unsigned long ignored_check; static unsigned long total_mmio_ffs; static unsigned long false_positives; static unsigned long slot_resets; #define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE) /* --------------------------------------------------------------- */ /* Below lies the EEH event infrastructure */ static void rtas_slot_error_detail(struct pci_dn *pdn, int severity, char *driver_log, size_t loglen) { int config_addr; unsigned long flags; int rc; /* Log the error with the rtas logger */ spin_lock_irqsave(&slot_errbuf_lock, flags); memset(slot_errbuf, 0, eeh_error_buf_size); /* Use PE configuration address, if present */ config_addr = pdn->eeh_config_addr; if (pdn->eeh_pe_config_addr) config_addr = pdn->eeh_pe_config_addr; rc = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid), virt_to_phys(driver_log), loglen, virt_to_phys(slot_errbuf), eeh_error_buf_size, severity); if (rc == 0) log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0); spin_unlock_irqrestore(&slot_errbuf_lock, flags); } /** * gather_pci_data - copy assorted PCI config space registers to buff * @pdn: device to report data for * @buf: point to buffer in which to log * @len: amount of room in buffer * * This routine captures assorted PCI configuration space data, * and puts them into a buffer for RTAS error logging. */ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len) { struct pci_dev *dev = pdn->pcidev; u32 cfg; int cap, i; int n = 0; n += scnprintf(buf+n, len-n, "%s\n", pdn->node->full_name); printk(KERN_WARNING "EEH: of node=%s\n", pdn->node->full_name); rtas_read_config(pdn, PCI_VENDOR_ID, 4, &cfg); n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg); rtas_read_config(pdn, PCI_COMMAND, 4, &cfg); n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg); if (!dev) { printk(KERN_WARNING "EEH: no PCI device for this of node\n"); return n; } /* Gather bridge-specific registers */ if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) { rtas_read_config(pdn, PCI_SEC_STATUS, 2, &cfg); n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); printk(KERN_WARNING "EEH: Bridge secondary status: %04x\n", cfg); rtas_read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg); n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); printk(KERN_WARNING "EEH: Bridge control: %04x\n", cfg); } /* Dump out the PCI-X command and status regs */ cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (cap) { rtas_read_config(pdn, cap, 4, &cfg); n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg); rtas_read_config(pdn, cap+4, 4, &cfg); n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg); } /* If PCI-E capable, dump PCI-E cap 10, and the AER */ cap = pci_find_capability(dev, PCI_CAP_ID_EXP); if (cap) { n += scnprintf(buf+n, len-n, "pci-e cap10:\n"); printk(KERN_WARNING "EEH: PCI-E capabilities and status follow:\n"); for (i=0; i<=8; i++) { rtas_read_config(pdn, cap+4*i, 4, &cfg); n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg); } cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (cap) { n += scnprintf(buf+n, len-n, "pci-e AER:\n"); printk(KERN_WARNING "EEH: PCI-E AER capability register set follows:\n"); for (i=0; i<14; i++) { rtas_read_config(pdn, cap+4*i, 4, &cfg); n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg); } } } /* Gather status on devices under the bridge */ if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) { struct device_node *dn; for_each_child_of_node(pdn->node, dn) { pdn = PCI_DN(dn); if (pdn) n += gather_pci_data(pdn, buf+n, len-n); } } return n; } void eeh_slot_error_detail(struct pci_dn *pdn, int severity) { size_t loglen = 0; pci_regs_buf[0] = 0; rtas_pci_enable(pdn, EEH_THAW_MMIO); rtas_configure_bridge(pdn); eeh_restore_bars(pdn); loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen); } /** * read_slot_reset_state - Read the reset state of a device node's slot * @dn: device node to read * @rets: array to return results in */ static int read_slot_reset_state(struct pci_dn *pdn, int rets[]) { int token, outputs; int config_addr; if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { token = ibm_read_slot_reset_state2; outputs = 4; } else { token = ibm_read_slot_reset_state; rets[2] = 0; /* fake PE Unavailable info */ outputs = 3; } /* Use PE configuration address, if present */ config_addr = pdn->eeh_config_addr; if (pdn->eeh_pe_config_addr) config_addr = pdn->eeh_pe_config_addr; return rtas_call(token, 3, outputs, rets, config_addr, BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid)); } /** * eeh_wait_for_slot_status - returns error status of slot * @pdn pci device node * @max_wait_msecs maximum number to millisecs to wait * * Return negative value if a permanent error, else return * Partition Endpoint (PE) status value. * * If @max_wait_msecs is positive, then this routine will * sleep until a valid status can be obtained, or until * the max allowed wait time is exceeded, in which case * a -2 is returned. */ int eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs) { int rc; int rets[3]; int mwait; while (1) { rc = read_slot_reset_state(pdn, rets); if (rc) return rc; if (rets[1] == 0) return -1; /* EEH is not supported */ if (rets[0] != 5) return rets[0]; /* return actual status */ if (rets[2] == 0) return -1; /* permanently unavailable */ if (max_wait_msecs <= 0) break; mwait = rets[2]; if (mwait <= 0) { printk (KERN_WARNING "EEH: Firmware returned bad wait value=%d\n", mwait); mwait = 1000; } else if (mwait > 300*1000) { printk (KERN_WARNING "EEH: Firmware is taking too long, time=%d\n", mwait); mwait = 300*1000; } max_wait_msecs -= mwait; msleep (mwait); } printk(KERN_WARNING "EEH: Timed out waiting for slot status\n"); return -2; } /** * eeh_token_to_phys - convert EEH address token to phys address * @token i/o token, should be address in the form 0xA.... */ static inline unsigned long eeh_token_to_phys(unsigned long token) { pte_t *ptep; unsigned long pa; ptep = find_linux_pte(init_mm.pgd, token); if (!ptep) return token; pa = pte_pfn(*ptep) << PAGE_SHIFT; return pa | (token & (PAGE_SIZE-1)); } /** * Return the "partitionable endpoint" (pe) under which this device lies */ struct device_node * find_device_pe(struct device_node *dn) { while ((dn->parent) && PCI_DN(dn->parent) && (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) { dn = dn->parent; } return dn; } /** Mark all devices that are children of this device as failed. * Mark the device driver too, so that it can see the failure * immediately; this is critical, since some drivers poll * status registers in interrupts ... If a driver is polling, * and the slot is frozen, then the driver can deadlock in * an interrupt context, which is bad. */ static void __eeh_mark_slot(struct device_node *parent, int mode_flag) { struct device_node *dn; for_each_child_of_node(parent, dn) { if (PCI_DN(dn)) { /* Mark the pci device driver too */ struct pci_dev *dev = PCI_DN(dn)->pcidev; PCI_DN(dn)->eeh_mode |= mode_flag; if (dev && dev->driver) dev->error_state = pci_channel_io_frozen; __eeh_mark_slot(dn, mode_flag); } } } void eeh_mark_slot (struct device_node *dn, int mode_flag) { struct pci_dev *dev; dn = find_device_pe (dn); /* Back up one, since config addrs might be shared */ if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent)) dn = dn->parent; PCI_DN(dn)->eeh_mode |= mode_flag; /* Mark the pci device too */ dev = PCI_DN(dn)->pcidev; if (dev) dev->error_state = pci_channel_io_frozen; __eeh_mark_slot(dn, mode_flag); } static void __eeh_clear_slot(struct device_node *parent, int mode_flag) { struct device_node *dn; for_each_child_of_node(parent, dn) { if (PCI_DN(dn)) { PCI_DN(dn)->eeh_mode &= ~mode_flag; PCI_DN(dn)->eeh_check_count = 0; __eeh_clear_slot(dn, mode_flag); } } } void eeh_clear_slot (struct device_node *dn, int mode_flag) { unsigned long flags; spin_lock_irqsave(&confirm_error_lock, flags); dn = find_device_pe (dn); /* Back up one, since config addrs might be shared */ if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent)) dn = dn->parent; PCI_DN(dn)->eeh_mode &= ~mode_flag; PCI_DN(dn)->eeh_check_count = 0; __eeh_clear_slot(dn, mode_flag); spin_unlock_irqrestore(&confirm_error_lock, flags); } void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset) { struct device_node *dn; for_each_child_of_node(parent, dn) { if (PCI_DN(dn)) { struct pci_dev *dev = PCI_DN(dn)->pcidev; if (dev && dev->driver) *freset |= dev->needs_freset; __eeh_set_pe_freset(dn, freset); } } } void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset) { struct pci_dev *dev; dn = find_device_pe(dn); /* Back up one, since config addrs might be shared */ if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent)) dn = dn->parent; dev = PCI_DN(dn)->pcidev; if (dev) *freset |= dev->needs_freset; __eeh_set_pe_freset(dn, freset); } /** * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze * @dn device node * @dev pci device, if known * * Check for an EEH failure for the given device node. Call this * routine if the result of a read was all 0xff's and you want to * find out if this is due to an EEH slot freeze. This routine * will query firmware for the EEH status. * * Returns 0 if there has not been an EEH error; otherwise returns * a non-zero value and queues up a slot isolation event notification. * * It is safe to call this routine in an interrupt context. */ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) { int ret; int rets[3]; unsigned long flags; struct pci_dn *pdn; int rc = 0; const char *location; total_mmio_ffs++; if (!eeh_subsystem_enabled) return 0; if (!dn) { no_dn++; return 0; } dn = find_device_pe(dn); pdn = PCI_DN(dn); /* Access to IO BARs might get this far and still not want checking. */ if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) || pdn->eeh_mode & EEH_MODE_NOCHECK) { ignored_check++; pr_debug("EEH: Ignored check (%x) for %s %s\n", pdn->eeh_mode, eeh_pci_name(dev), dn->full_name); return 0; } if (!pdn->eeh_config_addr && !pdn->eeh_pe_config_addr) { no_cfg_addr++; return 0; } /* If we already have a pending isolation event for this * slot, we know it's bad already, we don't need to check. * Do this checking under a lock; as multiple PCI devices * in one slot might report errors simultaneously, and we * only want one error recovery routine running. */ spin_lock_irqsave(&confirm_error_lock, flags); rc = 1; if (pdn->eeh_mode & EEH_MODE_ISOLATED) { pdn->eeh_check_count ++; if (pdn->eeh_check_count % EEH_MAX_FAILS == 0) { location = of_get_property(dn, "ibm,loc-code", NULL); printk (KERN_ERR "EEH: %d reads ignored for recovering device at " "location=%s driver=%s pci addr=%s\n", pdn->eeh_check_count, location, eeh_driver_name(dev), eeh_pci_name(dev)); printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n", eeh_driver_name(dev)); dump_stack(); } goto dn_unlock; } /* * Now test for an EEH failure. This is VERY expensive. * Note that the eeh_config_addr may be a parent device * in the case of a device behind a bridge, or it may be * function zero of a multi-function device. * In any case they must share a common PHB. */ ret = read_slot_reset_state(pdn, rets); /* If the call to firmware failed, punt */ if (ret != 0) { printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n", ret, dn->full_name); false_positives++; pdn->eeh_false_positives ++; rc = 0; goto dn_unlock; } /* Note that config-io to empty slots may fail; * they are empty when they don't have children. */ if ((rets[0] == 5) && (rets[2] == 0) && (dn->child == NULL)) { false_positives++; pdn->eeh_false_positives ++; rc = 0; goto dn_unlock; } /* If EEH is not supported on this device, punt. */ if (rets[1] != 1) { printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n", ret, dn->full_name); false_positives++; pdn->eeh_false_positives ++; rc = 0; goto dn_unlock; } /* If not the kind of error we know about, punt. */ if (rets[0] != 1 && rets[0] != 2 && rets[0] != 4 && rets[0] != 5) { false_positives++; pdn->eeh_false_positives ++; rc = 0; goto dn_unlock; } slot_resets++; /* Avoid repeated reports of this failure, including problems * with other functions on this device, and functions under * bridges. */ eeh_mark_slot (dn, EEH_MODE_ISOLATED); spin_unlock_irqrestore(&confirm_error_lock, flags); eeh_send_failure_event (dn, dev); /* Most EEH events are due to device driver bugs. Having * a stack trace will help the device-driver authors figure * out what happened. So print that out. */ dump_stack(); return 1; dn_unlock: spin_unlock_irqrestore(&confirm_error_lock, flags); return rc; } EXPORT_SYMBOL_GPL(eeh_dn_check_failure); /** * eeh_check_failure - check if all 1's data is due to EEH slot freeze * @token i/o token, should be address in the form 0xA.... * @val value, should be all 1's (XXX why do we need this arg??) * * Check for an EEH failure at the given token address. Call this * routine if the result of a read was all 0xff's and you want to * find out if this is due to an EEH slot freeze event. This routine * will query firmware for the EEH status. * * Note this routine is safe to call in an interrupt context. */ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) { unsigned long addr; struct pci_dev *dev; struct device_node *dn; /* Finding the phys addr + pci device; this is pretty quick. */ addr = eeh_token_to_phys((unsigned long __force) token); dev = pci_get_device_by_addr(addr); if (!dev) { no_device++; return val; } dn = pci_device_to_OF_node(dev); eeh_dn_check_failure (dn, dev); pci_dev_put(dev); return val; } EXPORT_SYMBOL(eeh_check_failure); /* ------------------------------------------------------------- */ /* The code below deals with error recovery */ /** * rtas_pci_enable - enable MMIO or DMA transfers for this slot * @pdn pci device node */ int rtas_pci_enable(struct pci_dn *pdn, int function) { int config_addr; int rc; /* Use PE configuration address, if present */ config_addr = pdn->eeh_config_addr; if (pdn->eeh_pe_config_addr) config_addr = pdn->eeh_pe_config_addr; rc = rtas_call(ibm_set_eeh_option, 4, 1, NULL, config_addr, BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid), function); if (rc) printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n", function, rc, pdn->node->full_name); rc = eeh_wait_for_slot_status (pdn, PCI_BUS_RESET_WAIT_MSEC); if ((rc == 4) && (function == EEH_THAW_MMIO)) return 0; return rc; } /** * rtas_pci_slot_reset - raises/lowers the pci #RST line * @pdn pci device node * @state: 1/0 to raise/lower the #RST * * Clear the EEH-frozen condition on a slot. This routine * asserts the PCI #RST line if the 'state' argument is '1', * and drops the #RST line if 'state is '0'. This routine is * safe to call in an interrupt context. * */ static void rtas_pci_slot_reset(struct pci_dn *pdn, int state) { int config_addr; int rc; BUG_ON (pdn==NULL); if (!pdn->phb) { printk (KERN_WARNING "EEH: in slot reset, device node %s has no phb\n", pdn->node->full_name); return; } /* Use PE configuration address, if present */ config_addr = pdn->eeh_config_addr; if (pdn->eeh_pe_config_addr) config_addr = pdn->eeh_pe_config_addr; rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL, config_addr, BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid), state); /* Fundamental-reset not supported on this PE, try hot-reset */ if (rc == -8 && state == 3) { rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL, config_addr, BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid), 1); if (rc) printk(KERN_WARNING "EEH: Unable to reset the failed slot," " #RST=%d dn=%s\n", rc, pdn->node->full_name); } } /** * pcibios_set_pcie_slot_reset - Set PCI-E reset state * @dev: pci device struct * @state: reset state to enter * * Return value: * 0 if success **/ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) { struct device_node *dn = pci_device_to_OF_node(dev); struct pci_dn *pdn = PCI_DN(dn); switch (state) { case pcie_deassert_reset: rtas_pci_slot_reset(pdn, 0); break; case pcie_hot_reset: rtas_pci_slot_reset(pdn, 1); break; case pcie_warm_reset: rtas_pci_slot_reset(pdn, 3); break; default: return -EINVAL; }; return 0; } /** * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second * @pdn: pci device node to be reset. */ static void __rtas_set_slot_reset(struct pci_dn *pdn) { unsigned int freset = 0; /* Determine type of EEH reset required for * Partitionable Endpoint, a hot-reset (1) * or a fundamental reset (3). * A fundamental reset required by any device under * Partitionable Endpoint trumps hot-reset. */ eeh_set_pe_freset(pdn->node, &freset); if (freset) rtas_pci_slot_reset(pdn, 3); else rtas_pci_slot_reset(pdn, 1); /* The PCI bus requires that the reset be held high for at least * a 100 milliseconds. We wait a bit longer 'just in case'. */ #define PCI_BUS_RST_HOLD_TIME_MSEC 250 msleep (PCI_BUS_RST_HOLD_TIME_MSEC); /* We might get hit with another EEH freeze as soon as the * pci slot reset line is dropped. Make sure we don't miss * these, and clear the flag now. */ eeh_clear_slot (pdn->node, EEH_MODE_ISOLATED); rtas_pci_slot_reset (pdn, 0); /* After a PCI slot has been reset, the PCI Express spec requires * a 1.5 second idle time for the bus to stabilize, before starting * up traffic. */ #define PCI_BUS_SETTLE_TIME_MSEC 1800 msleep (PCI_BUS_SETTLE_TIME_MSEC); } int rtas_set_slot_reset(struct pci_dn *pdn) { int i, rc; /* Take three shots at resetting the bus */ for (i=0; i<3; i++) { __rtas_set_slot_reset(pdn); rc = eeh_wait_for_slot_status(pdn, PCI_BUS_RESET_WAIT_MSEC); if (rc == 0) return 0; if (rc < 0) { printk(KERN_ERR "EEH: unrecoverable slot failure %s\n", pdn->node->full_name); return -1; } printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n", i+1, pdn->node->full_name, rc); } return -1; } /* ------------------------------------------------------- */ /** Save and restore of PCI BARs * * Although firmware will set up BARs during boot, it doesn't * set up device BAR's after a device reset, although it will, * if requested, set up bridge configuration. Thus, we need to * configure the PCI devices ourselves. */ /** * __restore_bars - Restore the Base Address Registers * @pdn: pci device node * * Loads the PCI configuration space base address registers, * the expansion ROM base address, the latency timer, and etc. * from the saved values in the device node. */ static inline void __restore_bars (struct pci_dn *pdn) { int i; u32 cmd; if (NULL==pdn->phb) return; for (i=4; i<10; i++) { rtas_write_config(pdn, i*4, 4, pdn->config_space[i]); } /* 12 == Expansion ROM Address */ rtas_write_config(pdn, 12*4, 4, pdn->config_space[12]); #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) #define SAVED_BYTE(OFF) (((u8 *)(pdn->config_space))[BYTE_SWAP(OFF)]) rtas_write_config (pdn, PCI_CACHE_LINE_SIZE, 1, SAVED_BYTE(PCI_CACHE_LINE_SIZE)); rtas_write_config (pdn, PCI_LATENCY_TIMER, 1, SAVED_BYTE(PCI_LATENCY_TIMER)); /* max latency, min grant, interrupt pin and line */ rtas_write_config(pdn, 15*4, 4, pdn->config_space[15]); /* Restore PERR & SERR bits, some devices require it, don't touch the other command bits */ rtas_read_config(pdn, PCI_COMMAND, 4, &cmd); if (pdn->config_space[1] & PCI_COMMAND_PARITY) cmd |= PCI_COMMAND_PARITY; else cmd &= ~PCI_COMMAND_PARITY; if (pdn->config_space[1] & PCI_COMMAND_SERR) cmd |= PCI_COMMAND_SERR; else cmd &= ~PCI_COMMAND_SERR; rtas_write_config(pdn, PCI_COMMAND, 4, cmd); } /** * eeh_restore_bars - restore the PCI config space info * * This routine performs a recursive walk to the children * of this device as well. */ void eeh_restore_bars(struct pci_dn *pdn) { struct device_node *dn; if (!pdn) return; if ((pdn->eeh_mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(pdn->class_code)) __restore_bars (pdn); for_each_child_of_node(pdn->node, dn) eeh_restore_bars (PCI_DN(dn)); } /** * eeh_save_bars - save device bars * * Save the values of the device bars. Unlike the restore * routine, this routine is *not* recursive. This is because * PCI devices are added individuallly; but, for the restore, * an entire slot is reset at a time. */ static void eeh_save_bars(struct pci_dn *pdn) { int i; if (!pdn ) return; for (i = 0; i < 16; i++) rtas_read_config(pdn, i * 4, 4, &pdn->config_space[i]); } void rtas_configure_bridge(struct pci_dn *pdn) { int config_addr; int rc; int token; /* Use PE configuration address, if present */ config_addr = pdn->eeh_config_addr; if (pdn->eeh_pe_config_addr) config_addr = pdn->eeh_pe_config_addr; /* Use new configure-pe function, if supported */ if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) token = ibm_configure_pe; else token = ibm_configure_bridge; rc = rtas_call(token, 3, 1, NULL, config_addr, BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid)); if (rc) { printk (KERN_WARNING "EEH: Unable to configure device bridge (%d) for %s\n", rc, pdn->node->full_name); } } /* ------------------------------------------------------------- */ /* The code below deals with enabling EEH for devices during the * early boot sequence. EEH must be enabled before any PCI probing * can be done. */ #define EEH_ENABLE 1 struct eeh_early_enable_info { unsigned int buid_hi; unsigned int buid_lo; }; static int get_pe_addr (int config_addr, struct eeh_early_enable_info *info) { unsigned int rets[3]; int ret; /* Use latest config-addr token on power6 */ if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { /* Make sure we have a PE in hand */ ret = rtas_call (ibm_get_config_addr_info2, 4, 2, rets, config_addr, info->buid_hi, info->buid_lo, 1); if (ret || (rets[0]==0)) return 0; ret = rtas_call (ibm_get_config_addr_info2, 4, 2, rets, config_addr, info->buid_hi, info->buid_lo, 0); if (ret) return 0; return rets[0]; } /* Use older config-addr token on power5 */ if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { ret = rtas_call (ibm_get_config_addr_info, 4, 2, rets, config_addr, info->buid_hi, info->buid_lo, 0); if (ret) return 0; return rets[0]; } return 0; } /* Enable eeh for the given device node. */ static void *early_enable_eeh(struct device_node *dn, void *data) { unsigned int rets[3]; struct eeh_early_enable_info *info = data; int ret; const u32 *class_code = of_get_property(dn, "class-code", NULL); const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL); const u32 *device_id = of_get_property(dn, "device-id", NULL); const u32 *regs; int enable; struct pci_dn *pdn = PCI_DN(dn); pdn->class_code = 0; pdn->eeh_mode = 0; pdn->eeh_check_count = 0; pdn->eeh_freeze_count = 0; pdn->eeh_false_positives = 0; if (!of_device_is_available(dn)) return NULL; /* Ignore bad nodes. */ if (!class_code || !vendor_id || !device_id) return NULL; /* There is nothing to check on PCI to ISA bridges */ if (dn->type && !strcmp(dn->type, "isa")) { pdn->eeh_mode |= EEH_MODE_NOCHECK; return NULL; } pdn->class_code = *class_code; /* Ok... see if this device supports EEH. Some do, some don't, * and the only way to find out is to check each and every one. */ regs = of_get_property(dn, "reg", NULL); if (regs) { /* First register entry is addr (00BBSS00) */ /* Try to enable eeh */ ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, regs[0], info->buid_hi, info->buid_lo, EEH_ENABLE); enable = 0; if (ret == 0) { pdn->eeh_config_addr = regs[0]; /* If the newer, better, ibm,get-config-addr-info is supported, * then use that instead. */ pdn->eeh_pe_config_addr = get_pe_addr(pdn->eeh_config_addr, info); /* Some older systems (Power4) allow the * ibm,set-eeh-option call to succeed even on nodes * where EEH is not supported. Verify support * explicitly. */ ret = read_slot_reset_state(pdn, rets); if ((ret == 0) && (rets[1] == 1)) enable = 1; } if (enable) { eeh_subsystem_enabled = 1; pdn->eeh_mode |= EEH_MODE_SUPPORTED; pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n", dn->full_name, pdn->eeh_config_addr, pdn->eeh_pe_config_addr); } else { /* This device doesn't support EEH, but it may have an * EEH parent, in which case we mark it as supported. */ if (dn->parent && PCI_DN(dn->parent) && (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) { /* Parent supports EEH. */ pdn->eeh_mode |= EEH_MODE_SUPPORTED; pdn->eeh_config_addr = PCI_DN(dn->parent)->eeh_config_addr; return NULL; } } } else { printk(KERN_WARNING "EEH: %s: unable to get reg property.\n", dn->full_name); } eeh_save_bars(pdn); return NULL; } /* * Initialize EEH by trying to enable it for all of the adapters in the system. * As a side effect we can determine here if eeh is supported at all. * Note that we leave EEH on so failed config cycles won't cause a machine * check. If a user turns off EEH for a particular adapter they are really * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't * grant access to a slot if EEH isn't enabled, and so we always enable * EEH for all slots/all devices. * * The eeh-force-off option disables EEH checking globally, for all slots. * Even if force-off is set, the EEH hardware is still enabled, so that * newer systems can boot. */ void __init eeh_init(void) { struct device_node *phb, *np; struct eeh_early_enable_info info; spin_lock_init(&confirm_error_lock); spin_lock_init(&slot_errbuf_lock); np = of_find_node_by_path("/rtas"); if (np == NULL) return; ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); ibm_configure_pe = rtas_token("ibm,configure-pe"); if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) return; eeh_error_buf_size = rtas_token("rtas-error-log-max"); if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { eeh_error_buf_size = 1024; } if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { printk(KERN_WARNING "EEH: rtas-error-log-max is bigger than allocated " "buffer ! (%d vs %d)", eeh_error_buf_size, RTAS_ERROR_LOG_MAX); eeh_error_buf_size = RTAS_ERROR_LOG_MAX; } /* Enable EEH for all adapters. Note that eeh requires buid's */ for (phb = of_find_node_by_name(NULL, "pci"); phb; phb = of_find_node_by_name(phb, "pci")) { unsigned long buid; buid = get_phb_buid(phb); if (buid == 0 || PCI_DN(phb) == NULL) continue; info.buid_lo = BUID_LO(buid); info.buid_hi = BUID_HI(buid); traverse_pci_devices(phb, early_enable_eeh, &info); } if (eeh_subsystem_enabled) printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n"); else printk(KERN_WARNING "EEH: No capable adapters found\n"); } /** * eeh_add_device_early - enable EEH for the indicated device_node * @dn: device node for which to set up EEH * * This routine must be used to perform EEH initialization for PCI * devices that were added after system boot (e.g. hotplug, dlpar). * This routine must be called before any i/o is performed to the * adapter (inluding any config-space i/o). * Whether this actually enables EEH or not for this device depends * on the CEC architecture, type of the device, on earlier boot * command-line arguments & etc. */ static void eeh_add_device_early(struct device_node *dn) { struct pci_controller *phb; struct eeh_early_enable_info info; if (!dn || !PCI_DN(dn)) return; phb = PCI_DN(dn)->phb; /* USB Bus children of PCI devices will not have BUID's */ if (NULL == phb || 0 == phb->buid) return; info.buid_hi = BUID_HI(phb->buid); info.buid_lo = BUID_LO(phb->buid); early_enable_eeh(dn, &info); } void eeh_add_device_tree_early(struct device_node *dn) { struct device_node *sib; for_each_child_of_node(dn, sib) eeh_add_device_tree_early(sib); eeh_add_device_early(dn); } EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); /** * eeh_add_device_late - perform EEH initialization for the indicated pci device * @dev: pci device for which to set up EEH * * This routine must be used to complete EEH initialization for PCI * devices that were added after system boot (e.g. hotplug, dlpar). */ static void eeh_add_device_late(struct pci_dev *dev) { struct device_node *dn; struct pci_dn *pdn; if (!dev || !eeh_subsystem_enabled) return; pr_debug("EEH: Adding device %s\n", pci_name(dev)); dn = pci_device_to_OF_node(dev); pdn = PCI_DN(dn); if (pdn->pcidev == dev) { pr_debug("EEH: Already referenced !\n"); return; } WARN_ON(pdn->pcidev); pci_dev_get (dev); pdn->pcidev = dev; pci_addr_cache_insert_device(dev); eeh_sysfs_add_device(dev); } void eeh_add_device_tree_late(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { eeh_add_device_late(dev); if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { struct pci_bus *subbus = dev->subordinate; if (subbus) eeh_add_device_tree_late(subbus); } } } EXPORT_SYMBOL_GPL(eeh_add_device_tree_late); /** * eeh_remove_device - undo EEH setup for the indicated pci device * @dev: pci device to be removed * * This routine should be called when a device is removed from * a running system (e.g. by hotplug or dlpar). It unregisters * the PCI device from the EEH subsystem. I/O errors affecting * this device will no longer be detected after this call; thus, * i/o errors affecting this slot may leave this device unusable. */ static void eeh_remove_device(struct pci_dev *dev) { struct device_node *dn; if (!dev || !eeh_subsystem_enabled) return; /* Unregister the device with the EEH/PCI address search system */ pr_debug("EEH: Removing device %s\n", pci_name(dev)); dn = pci_device_to_OF_node(dev); if (PCI_DN(dn)->pcidev == NULL) { pr_debug("EEH: Not referenced !\n"); return; } PCI_DN(dn)->pcidev = NULL; pci_dev_put (dev); pci_addr_cache_remove_device(dev); eeh_sysfs_remove_device(dev); } void eeh_remove_bus_device(struct pci_dev *dev) { struct pci_bus *bus = dev->subordinate; struct pci_dev *child, *tmp; eeh_remove_device(dev); if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { list_for_each_entry_safe(child, tmp, &bus->devices, bus_list) eeh_remove_bus_device(child); } } EXPORT_SYMBOL_GPL(eeh_remove_bus_device); static int proc_eeh_show(struct seq_file *m, void *v) { if (0 == eeh_subsystem_enabled) { seq_printf(m, "EEH Subsystem is globally disabled\n"); seq_printf(m, "eeh_total_mmio_ffs=%ld\n", total_mmio_ffs); } else { seq_printf(m, "EEH Subsystem is enabled\n"); seq_printf(m, "no device=%ld\n" "no device node=%ld\n" "no config address=%ld\n" "check not wanted=%ld\n" "eeh_total_mmio_ffs=%ld\n" "eeh_false_positives=%ld\n" "eeh_slot_resets=%ld\n", no_device, no_dn, no_cfg_addr, ignored_check, total_mmio_ffs, false_positives, slot_resets); } return 0; } static int proc_eeh_open(struct inode *inode, struct file *file) { return single_open(file, proc_eeh_show, NULL); } static const struct file_operations proc_eeh_operations = { .open = proc_eeh_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init eeh_init_proc(void) { if (machine_is(pseries)) proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations); return 0; } __initcall(eeh_init_proc);
gpl-2.0
kouril/nfs-utils
support/nfs/xlog.c
21
3978
/* * support/nfs/xlog.c * * This module handles the logging of requests. * * TODO: Merge the two "XXX_log() calls. * * Authors: Donald J. Becker, <becker@super.org> * Rick Sladkey, <jrs@world.std.com> * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Olaf Kirch, <okir@monad.swb.de> * * This software maybe be used for any purpose provided * the above copyright notice is retained. It is supplied * as is, with no warranty expressed or implied. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <unistd.h> #include <signal.h> #include <time.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <stdarg.h> #include <syslog.h> #include "nfslib.h" #undef VERBOSE_PRINTF static int log_stderr = 1; static int log_syslog = 1; static int logging = 0; /* enable/disable DEBUG logs */ static int logmask = 0; /* What will be logged */ static char log_name[256]; /* name of this program */ static int log_pid = -1; /* PID of this program */ static void xlog_toggle(int sig); static struct xlog_debugfac debugnames[] = { { "general", D_GENERAL, }, { "call", D_CALL, }, { "auth", D_AUTH, }, { "parse", D_PARSE, }, { "all", D_ALL, }, { NULL, 0, }, }; void xlog_open(char *progname) { openlog(progname, LOG_PID, LOG_DAEMON); strncpy(log_name, progname, sizeof (log_name) - 1); log_name [sizeof (log_name) - 1] = '\0'; log_pid = getpid(); signal(SIGUSR1, xlog_toggle); signal(SIGUSR2, xlog_toggle); } void xlog_stderr(int on) { log_stderr = on; } void xlog_syslog(int on) { log_syslog = on; } static void xlog_toggle(int sig) { unsigned int tmp, i; if (sig == SIGUSR1) { if ((logmask & D_ALL) && !logging) { xlog(D_GENERAL, "turned on logging"); logging = 1; return; } tmp = ~logmask; logmask |= ((logmask & D_ALL) << 1) | D_GENERAL; for (i = -1, tmp &= logmask; tmp; tmp >>= 1, i++) if (tmp & 1) xlog(D_GENERAL, "turned on logging level %d", i); } else { xlog(D_GENERAL, "turned off logging"); logging = 0; } signal(sig, xlog_toggle); } void xlog_config(int fac, int on) { if (on) logmask |= fac; else logmask &= ~fac; if (on) logging = 1; } void xlog_sconfig(char *kind, int on) { struct xlog_debugfac *tbl = debugnames; while (tbl->df_name != NULL && strcasecmp(tbl->df_name, kind)) tbl++; if (!tbl->df_name) { xlog (L_WARNING, "Invalid debug facility: %s\n", kind); return; } xlog_config(tbl->df_fac, on); } int xlog_enabled(int fac) { return (logging && (fac & logmask)); } /* Write something to the system logfile and/or stderr */ void xlog_backend(int kind, const char *fmt, va_list args) { va_list args2; if (!(kind & (L_ALL)) && !(logging && (kind & logmask))) return; if (log_stderr) va_copy(args2, args); if (log_syslog) { switch (kind) { case L_FATAL: vsyslog(LOG_ERR, fmt, args); break; case L_ERROR: vsyslog(LOG_ERR, fmt, args); break; case L_WARNING: vsyslog(LOG_WARNING, fmt, args); break; case L_NOTICE: vsyslog(LOG_NOTICE, fmt, args); break; default: if (!log_stderr) vsyslog(LOG_INFO, fmt, args); break; } } if (log_stderr) { #ifdef VERBOSE_PRINTF time_t now; struct tm *tm; time(&now); tm = localtime(&now); fprintf(stderr, "%s[%d] %04d-%02d-%02d %02d:%02d:%02d ", log_name, log_pid, tm->tm_year+1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); #else fprintf(stderr, "%s: ", log_name); #endif vfprintf(stderr, fmt, args2); fprintf(stderr, "\n"); va_end(args2); } if (kind == L_FATAL) exit(1); } void xlog(int kind, const char* fmt, ...) { va_list args; va_start(args, fmt); xlog_backend(kind, fmt, args); va_end(args); } void xlog_warn(const char* fmt, ...) { va_list args; va_start(args, fmt); xlog_backend(L_WARNING, fmt, args); va_end(args); } void xlog_err(const char* fmt, ...) { va_list args; va_start(args, fmt); xlog_backend(L_FATAL, fmt, args); va_end(args); }
gpl-2.0
110440/fastsocket
kernel/drivers/gpu/drm/radeon/si.c
21
140043
/* * Copyright 2011 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #include <linux/firmware.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> #include <drm/drmP.h> #include "radeon.h" #include "radeon_asic.h" #include <drm/radeon_drm.h> #include "sid.h" #include "atom.h" #include "si_blit_shaders.h" #define SI_PFP_UCODE_SIZE 2144 #define SI_PM4_UCODE_SIZE 2144 #define SI_CE_UCODE_SIZE 2144 #define SI_RLC_UCODE_SIZE 2048 #define SI_MC_UCODE_SIZE 7769 #define OLAND_MC_UCODE_SIZE 7863 MODULE_FIRMWARE("radeon/TAHITI_pfp.bin"); MODULE_FIRMWARE("radeon/TAHITI_me.bin"); MODULE_FIRMWARE("radeon/TAHITI_ce.bin"); MODULE_FIRMWARE("radeon/TAHITI_mc.bin"); MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); MODULE_FIRMWARE("radeon/VERDE_me.bin"); MODULE_FIRMWARE("radeon/VERDE_ce.bin"); MODULE_FIRMWARE("radeon/VERDE_mc.bin"); MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); MODULE_FIRMWARE("radeon/OLAND_me.bin"); MODULE_FIRMWARE("radeon/OLAND_ce.bin"); MODULE_FIRMWARE("radeon/OLAND_mc.bin"); MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); extern bool evergreen_is_display_hung(struct radeon_device *rdev); #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) /** * si_get_xclk - get the xclk * * @rdev: radeon_device pointer * * Returns the reference clock used by the gfx engine * (SI). */ u32 si_get_xclk(struct radeon_device *rdev) { u32 reference_clock = rdev->clock.spll.reference_freq; u32 tmp; tmp = RREG32(CG_CLKPIN_CNTL_2); if (tmp & MUX_TCLK_TO_XCLK) return TCLK; tmp = RREG32(CG_CLKPIN_CNTL); if (tmp & XTALIN_DIVIDE) return reference_clock / 4; return reference_clock; } /* get temperature in millidegrees */ int si_get_temp(struct radeon_device *rdev) { u32 temp; int actual_temp = 0; temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> CTF_TEMP_SHIFT; if (temp & 0x200) actual_temp = 255; else actual_temp = temp & 0x1ff; actual_temp = (actual_temp * 1000); return actual_temp; } #define TAHITI_IO_MC_REGS_SIZE 36 static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { {0x0000006f, 0x03044000}, {0x00000070, 0x0480c018}, {0x00000071, 0x00000040}, {0x00000072, 0x01000000}, {0x00000074, 0x000000ff}, {0x00000075, 0x00143400}, {0x00000076, 0x08ec0800}, {0x00000077, 0x040000cc}, {0x00000079, 0x00000000}, {0x0000007a, 0x21000409}, {0x0000007c, 0x00000000}, {0x0000007d, 0xe8000000}, {0x0000007e, 0x044408a8}, {0x0000007f, 0x00000003}, {0x00000080, 0x00000000}, {0x00000081, 0x01000000}, {0x00000082, 0x02000000}, {0x00000083, 0x00000000}, {0x00000084, 0xe3f3e4f4}, {0x00000085, 0x00052024}, {0x00000087, 0x00000000}, {0x00000088, 0x66036603}, {0x00000089, 0x01000000}, {0x0000008b, 0x1c0a0000}, {0x0000008c, 0xff010000}, {0x0000008e, 0xffffefff}, {0x0000008f, 0xfff3efff}, {0x00000090, 0xfff3efbf}, {0x00000094, 0x00101101}, {0x00000095, 0x00000fff}, {0x00000096, 0x00116fff}, {0x00000097, 0x60010000}, {0x00000098, 0x10010000}, {0x00000099, 0x00006000}, {0x0000009a, 0x00001000}, {0x0000009f, 0x00a77400} }; static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { {0x0000006f, 0x03044000}, {0x00000070, 0x0480c018}, {0x00000071, 0x00000040}, {0x00000072, 0x01000000}, {0x00000074, 0x000000ff}, {0x00000075, 0x00143400}, {0x00000076, 0x08ec0800}, {0x00000077, 0x040000cc}, {0x00000079, 0x00000000}, {0x0000007a, 0x21000409}, {0x0000007c, 0x00000000}, {0x0000007d, 0xe8000000}, {0x0000007e, 0x044408a8}, {0x0000007f, 0x00000003}, {0x00000080, 0x00000000}, {0x00000081, 0x01000000}, {0x00000082, 0x02000000}, {0x00000083, 0x00000000}, {0x00000084, 0xe3f3e4f4}, {0x00000085, 0x00052024}, {0x00000087, 0x00000000}, {0x00000088, 0x66036603}, {0x00000089, 0x01000000}, {0x0000008b, 0x1c0a0000}, {0x0000008c, 0xff010000}, {0x0000008e, 0xffffefff}, {0x0000008f, 0xfff3efff}, {0x00000090, 0xfff3efbf}, {0x00000094, 0x00101101}, {0x00000095, 0x00000fff}, {0x00000096, 0x00116fff}, {0x00000097, 0x60010000}, {0x00000098, 0x10010000}, {0x00000099, 0x00006000}, {0x0000009a, 0x00001000}, {0x0000009f, 0x00a47400} }; static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { {0x0000006f, 0x03044000}, {0x00000070, 0x0480c018}, {0x00000071, 0x00000040}, {0x00000072, 0x01000000}, {0x00000074, 0x000000ff}, {0x00000075, 0x00143400}, {0x00000076, 0x08ec0800}, {0x00000077, 0x040000cc}, {0x00000079, 0x00000000}, {0x0000007a, 0x21000409}, {0x0000007c, 0x00000000}, {0x0000007d, 0xe8000000}, {0x0000007e, 0x044408a8}, {0x0000007f, 0x00000003}, {0x00000080, 0x00000000}, {0x00000081, 0x01000000}, {0x00000082, 0x02000000}, {0x00000083, 0x00000000}, {0x00000084, 0xe3f3e4f4}, {0x00000085, 0x00052024}, {0x00000087, 0x00000000}, {0x00000088, 0x66036603}, {0x00000089, 0x01000000}, {0x0000008b, 0x1c0a0000}, {0x0000008c, 0xff010000}, {0x0000008e, 0xffffefff}, {0x0000008f, 0xfff3efff}, {0x00000090, 0xfff3efbf}, {0x00000094, 0x00101101}, {0x00000095, 0x00000fff}, {0x00000096, 0x00116fff}, {0x00000097, 0x60010000}, {0x00000098, 0x10010000}, {0x00000099, 0x00006000}, {0x0000009a, 0x00001000}, {0x0000009f, 0x00a37400} }; static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { {0x0000006f, 0x03044000}, {0x00000070, 0x0480c018}, {0x00000071, 0x00000040}, {0x00000072, 0x01000000}, {0x00000074, 0x000000ff}, {0x00000075, 0x00143400}, {0x00000076, 0x08ec0800}, {0x00000077, 0x040000cc}, {0x00000079, 0x00000000}, {0x0000007a, 0x21000409}, {0x0000007c, 0x00000000}, {0x0000007d, 0xe8000000}, {0x0000007e, 0x044408a8}, {0x0000007f, 0x00000003}, {0x00000080, 0x00000000}, {0x00000081, 0x01000000}, {0x00000082, 0x02000000}, {0x00000083, 0x00000000}, {0x00000084, 0xe3f3e4f4}, {0x00000085, 0x00052024}, {0x00000087, 0x00000000}, {0x00000088, 0x66036603}, {0x00000089, 0x01000000}, {0x0000008b, 0x1c0a0000}, {0x0000008c, 0xff010000}, {0x0000008e, 0xffffefff}, {0x0000008f, 0xfff3efff}, {0x00000090, 0xfff3efbf}, {0x00000094, 0x00101101}, {0x00000095, 0x00000fff}, {0x00000096, 0x00116fff}, {0x00000097, 0x60010000}, {0x00000098, 0x10010000}, {0x00000099, 0x00006000}, {0x0000009a, 0x00001000}, {0x0000009f, 0x00a17730} }; /* ucode loading */ static int si_mc_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data; u32 running, blackout = 0; u32 *io_mc_regs; int i, ucode_size, regs_size; if (!rdev->mc_fw) return -EINVAL; switch (rdev->family) { case CHIP_TAHITI: io_mc_regs = (u32 *)&tahiti_io_mc_regs; ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_PITCAIRN: io_mc_regs = (u32 *)&pitcairn_io_mc_regs; ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_VERDE: default: io_mc_regs = (u32 *)&verde_io_mc_regs; ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_OLAND: io_mc_regs = (u32 *)&oland_io_mc_regs; ucode_size = OLAND_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; } running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; if (running == 0) { if (running) { blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); } /* reset the engine and set to writable */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); WREG32(MC_SEQ_SUP_CNTL, 0x00000010); /* load mc io regs */ for (i = 0; i < regs_size; i++) { WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); } /* load the MC ucode */ fw_data = (const __be32 *)rdev->mc_fw->data; for (i = 0; i < ucode_size; i++) WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); /* put the engine back into the active state */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); WREG32(MC_SEQ_SUP_CNTL, 0x00000004); WREG32(MC_SEQ_SUP_CNTL, 0x00000001); /* wait for training to complete */ for (i = 0; i < rdev->usec_timeout; i++) { if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0) break; udelay(1); } for (i = 0; i < rdev->usec_timeout; i++) { if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1) break; udelay(1); } if (running) WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); } return 0; } static int si_init_microcode(struct radeon_device *rdev) { struct platform_device *pdev; const char *chip_name; const char *rlc_chip_name; size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; char fw_name[30]; int err; DRM_DEBUG("\n"); pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); err = IS_ERR(pdev); if (err) { printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); return -EINVAL; } switch (rdev->family) { case CHIP_TAHITI: chip_name = "TAHITI"; rlc_chip_name = "TAHITI"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; break; case CHIP_PITCAIRN: chip_name = "PITCAIRN"; rlc_chip_name = "PITCAIRN"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; break; case CHIP_VERDE: chip_name = "VERDE"; rlc_chip_name = "VERDE"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; break; case CHIP_OLAND: chip_name = "OLAND"; rlc_chip_name = "OLAND"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = OLAND_MC_UCODE_SIZE * 4; break; default: BUG(); } DRM_INFO("Loading %s Microcode\n", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); if (err) goto out; if (rdev->pfp_fw->size != pfp_req_size) { printk(KERN_ERR "si_cp: Bogus length %zu in firmware \"%s\"\n", rdev->pfp_fw->size, fw_name); err = -EINVAL; goto out; } snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); if (err) goto out; if (rdev->me_fw->size != me_req_size) { printk(KERN_ERR "si_cp: Bogus length %zu in firmware \"%s\"\n", rdev->me_fw->size, fw_name); err = -EINVAL; } snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); if (err) goto out; if (rdev->ce_fw->size != ce_req_size) { printk(KERN_ERR "si_cp: Bogus length %zu in firmware \"%s\"\n", rdev->ce_fw->size, fw_name); err = -EINVAL; } snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); if (err) goto out; if (rdev->rlc_fw->size != rlc_req_size) { printk(KERN_ERR "si_rlc: Bogus length %zu in firmware \"%s\"\n", rdev->rlc_fw->size, fw_name); err = -EINVAL; } snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); if (err) goto out; if (rdev->mc_fw->size != mc_req_size) { printk(KERN_ERR "si_mc: Bogus length %zu in firmware \"%s\"\n", rdev->mc_fw->size, fw_name); err = -EINVAL; } out: platform_device_unregister(pdev); if (err) { if (err != -EINVAL) printk(KERN_ERR "si_cp: Failed to load firmware \"%s\"\n", fw_name); release_firmware(rdev->pfp_fw); rdev->pfp_fw = NULL; release_firmware(rdev->me_fw); rdev->me_fw = NULL; release_firmware(rdev->ce_fw); rdev->ce_fw = NULL; release_firmware(rdev->rlc_fw); rdev->rlc_fw = NULL; release_firmware(rdev->mc_fw); rdev->mc_fw = NULL; } return err; } /* watermark setup */ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc, struct drm_display_mode *mode, struct drm_display_mode *other_mode) { u32 tmp; /* * Line Buffer Setup * There are 3 line buffers, each one shared by 2 display controllers. * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between * the display controllers. The paritioning is done via one of four * preset allocations specified in bits 21:20: * 0 - half lb * 2 - whole lb, other crtc must be disabled */ /* this can get tricky if we have two large displays on a paired group * of crtcs. Ideally for multiple large displays we'd assign them to * non-linked crtcs for maximum line buffer allocation. */ if (radeon_crtc->base.enabled && mode) { if (other_mode) tmp = 0; /* 1/2 */ else tmp = 2; /* whole */ } else tmp = 0; WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, DC_LB_MEMORY_CONFIG(tmp)); if (radeon_crtc->base.enabled && mode) { switch (tmp) { case 0: default: return 4096 * 2; case 2: return 8192 * 2; } } /* controller not enabled, so no lb used */ return 0; } static u32 si_get_number_of_dram_channels(struct radeon_device *rdev) { u32 tmp = RREG32(MC_SHARED_CHMAP); switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { case 0: default: return 1; case 1: return 2; case 2: return 4; case 3: return 8; case 4: return 3; case 5: return 6; case 6: return 10; case 7: return 12; case 8: return 16; } } struct dce6_wm_params { u32 dram_channels; /* number of dram channels */ u32 yclk; /* bandwidth per dram data pin in kHz */ u32 sclk; /* engine clock in kHz */ u32 disp_clk; /* display clock in kHz */ u32 src_width; /* viewport width */ u32 active_time; /* active display time in ns */ u32 blank_time; /* blank time in ns */ bool interlaced; /* mode is interlaced */ fixed20_12 vsc; /* vertical scale ratio */ u32 num_heads; /* number of active crtcs */ u32 bytes_per_pixel; /* bytes per pixel display + overlay */ u32 lb_size; /* line buffer allocated to pipe */ u32 vtaps; /* vertical scaler taps */ }; static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm) { /* Calculate raw DRAM Bandwidth */ fixed20_12 dram_efficiency; /* 0.7 */ fixed20_12 yclk, dram_channels, bandwidth; fixed20_12 a; a.full = dfixed_const(1000); yclk.full = dfixed_const(wm->yclk); yclk.full = dfixed_div(yclk, a); dram_channels.full = dfixed_const(wm->dram_channels * 4); a.full = dfixed_const(10); dram_efficiency.full = dfixed_const(7); dram_efficiency.full = dfixed_div(dram_efficiency, a); bandwidth.full = dfixed_mul(dram_channels, yclk); bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); return dfixed_trunc(bandwidth); } static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm) { /* Calculate DRAM Bandwidth and the part allocated to display. */ fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ fixed20_12 yclk, dram_channels, bandwidth; fixed20_12 a; a.full = dfixed_const(1000); yclk.full = dfixed_const(wm->yclk); yclk.full = dfixed_div(yclk, a); dram_channels.full = dfixed_const(wm->dram_channels * 4); a.full = dfixed_const(10); disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); bandwidth.full = dfixed_mul(dram_channels, yclk); bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); return dfixed_trunc(bandwidth); } static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm) { /* Calculate the display Data return Bandwidth */ fixed20_12 return_efficiency; /* 0.8 */ fixed20_12 sclk, bandwidth; fixed20_12 a; a.full = dfixed_const(1000); sclk.full = dfixed_const(wm->sclk); sclk.full = dfixed_div(sclk, a); a.full = dfixed_const(10); return_efficiency.full = dfixed_const(8); return_efficiency.full = dfixed_div(return_efficiency, a); a.full = dfixed_const(32); bandwidth.full = dfixed_mul(a, sclk); bandwidth.full = dfixed_mul(bandwidth, return_efficiency); return dfixed_trunc(bandwidth); } static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm) { return 32; } static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm) { /* Calculate the DMIF Request Bandwidth */ fixed20_12 disp_clk_request_efficiency; /* 0.8 */ fixed20_12 disp_clk, sclk, bandwidth; fixed20_12 a, b1, b2; u32 min_bandwidth; a.full = dfixed_const(1000); disp_clk.full = dfixed_const(wm->disp_clk); disp_clk.full = dfixed_div(disp_clk, a); a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2); b1.full = dfixed_mul(a, disp_clk); a.full = dfixed_const(1000); sclk.full = dfixed_const(wm->sclk); sclk.full = dfixed_div(sclk, a); a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm)); b2.full = dfixed_mul(a, sclk); a.full = dfixed_const(10); disp_clk_request_efficiency.full = dfixed_const(8); disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2)); a.full = dfixed_const(min_bandwidth); bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency); return dfixed_trunc(bandwidth); } static u32 dce6_available_bandwidth(struct dce6_wm_params *wm) { /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ u32 dram_bandwidth = dce6_dram_bandwidth(wm); u32 data_return_bandwidth = dce6_data_return_bandwidth(wm); u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm); return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); } static u32 dce6_average_bandwidth(struct dce6_wm_params *wm) { /* Calculate the display mode Average Bandwidth * DisplayMode should contain the source and destination dimensions, * timing, etc. */ fixed20_12 bpp; fixed20_12 line_time; fixed20_12 src_width; fixed20_12 bandwidth; fixed20_12 a; a.full = dfixed_const(1000); line_time.full = dfixed_const(wm->active_time + wm->blank_time); line_time.full = dfixed_div(line_time, a); bpp.full = dfixed_const(wm->bytes_per_pixel); src_width.full = dfixed_const(wm->src_width); bandwidth.full = dfixed_mul(src_width, bpp); bandwidth.full = dfixed_mul(bandwidth, wm->vsc); bandwidth.full = dfixed_div(bandwidth, line_time); return dfixed_trunc(bandwidth); } static u32 dce6_latency_watermark(struct dce6_wm_params *wm) { /* First calcualte the latency in ns */ u32 mc_latency = 2000; /* 2000 ns. */ u32 available_bandwidth = dce6_available_bandwidth(wm); u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + (wm->num_heads * cursor_line_pair_return_time); u32 latency = mc_latency + other_heads_data_return_time + dc_latency; u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; u32 tmp, dmif_size = 12288; fixed20_12 a, b, c; if (wm->num_heads == 0) return 0; a.full = dfixed_const(2); b.full = dfixed_const(1); if ((wm->vsc.full > a.full) || ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || (wm->vtaps >= 5) || ((wm->vsc.full >= a.full) && wm->interlaced)) max_src_lines_per_dst_line = 4; else max_src_lines_per_dst_line = 2; a.full = dfixed_const(available_bandwidth); b.full = dfixed_const(wm->num_heads); a.full = dfixed_div(a, b); b.full = dfixed_const(mc_latency + 512); c.full = dfixed_const(wm->disp_clk); b.full = dfixed_div(b, c); c.full = dfixed_const(dmif_size); b.full = dfixed_div(c, b); tmp = min(dfixed_trunc(a), dfixed_trunc(b)); b.full = dfixed_const(1000); c.full = dfixed_const(wm->disp_clk); b.full = dfixed_div(c, b); c.full = dfixed_const(wm->bytes_per_pixel); b.full = dfixed_mul(b, c); lb_fill_bw = min(tmp, dfixed_trunc(b)); a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); b.full = dfixed_const(1000); c.full = dfixed_const(lb_fill_bw); b.full = dfixed_div(c, b); a.full = dfixed_div(a, b); line_fill_time = dfixed_trunc(a); if (line_fill_time < wm->active_time) return latency; else return latency + (line_fill_time - wm->active_time); } static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm) { if (dce6_average_bandwidth(wm) <= (dce6_dram_bandwidth_for_display(wm) / wm->num_heads)) return true; else return false; }; static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm) { if (dce6_average_bandwidth(wm) <= (dce6_available_bandwidth(wm) / wm->num_heads)) return true; else return false; }; static bool dce6_check_latency_hiding(struct dce6_wm_params *wm) { u32 lb_partitions = wm->lb_size / wm->src_width; u32 line_time = wm->active_time + wm->blank_time; u32 latency_tolerant_lines; u32 latency_hiding; fixed20_12 a; a.full = dfixed_const(1); if (wm->vsc.full > a.full) latency_tolerant_lines = 1; else { if (lb_partitions <= (wm->vtaps + 1)) latency_tolerant_lines = 1; else latency_tolerant_lines = 2; } latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); if (dce6_latency_watermark(wm) <= latency_hiding) return true; else return false; } static void dce6_program_watermarks(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc, u32 lb_size, u32 num_heads) { struct drm_display_mode *mode = &radeon_crtc->base.mode; struct dce6_wm_params wm; u32 pixel_period; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; u32 priority_a_mark = 0, priority_b_mark = 0; u32 priority_a_cnt = PRIORITY_OFF; u32 priority_b_cnt = PRIORITY_OFF; u32 tmp, arb_control3; fixed20_12 a, b, c; if (radeon_crtc->base.enabled && num_heads && mode) { pixel_period = 1000000 / (u32)mode->clock; line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); priority_a_cnt = 0; priority_b_cnt = 0; wm.yclk = rdev->pm.current_mclk * 10; wm.sclk = rdev->pm.current_sclk * 10; wm.disp_clk = mode->clock; wm.src_width = mode->crtc_hdisplay; wm.active_time = mode->crtc_hdisplay * pixel_period; wm.blank_time = line_time - wm.active_time; wm.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) wm.interlaced = true; wm.vsc = radeon_crtc->vsc; wm.vtaps = 1; if (radeon_crtc->rmx_type != RMX_OFF) wm.vtaps = 2; wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ wm.lb_size = lb_size; if (rdev->family == CHIP_ARUBA) wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); else wm.dram_channels = si_get_number_of_dram_channels(rdev); wm.num_heads = num_heads; /* set for high clocks */ latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535); /* set for low clocks */ /* wm.yclk = low clk; wm.sclk = low clk */ latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || !dce6_average_bandwidth_vs_available_bandwidth(&wm) || !dce6_check_latency_hiding(&wm) || (rdev->disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); priority_a_cnt |= PRIORITY_ALWAYS_ON; priority_b_cnt |= PRIORITY_ALWAYS_ON; } a.full = dfixed_const(1000); b.full = dfixed_const(mode->clock); b.full = dfixed_div(b, a); c.full = dfixed_const(latency_watermark_a); c.full = dfixed_mul(c, b); c.full = dfixed_mul(c, radeon_crtc->hsc); c.full = dfixed_div(c, a); a.full = dfixed_const(16); c.full = dfixed_div(c, a); priority_a_mark = dfixed_trunc(c); priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; a.full = dfixed_const(1000); b.full = dfixed_const(mode->clock); b.full = dfixed_div(b, a); c.full = dfixed_const(latency_watermark_b); c.full = dfixed_mul(c, b); c.full = dfixed_mul(c, radeon_crtc->hsc); c.full = dfixed_div(c, a); a.full = dfixed_const(16); c.full = dfixed_div(c, a); priority_b_mark = dfixed_trunc(c); priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; } /* select wm A */ arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); tmp = arb_control3; tmp &= ~LATENCY_WATERMARK_MASK(3); tmp |= LATENCY_WATERMARK_MASK(1); WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, (LATENCY_LOW_WATERMARK(latency_watermark_a) | LATENCY_HIGH_WATERMARK(line_time))); /* select wm B */ tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); tmp &= ~LATENCY_WATERMARK_MASK(3); tmp |= LATENCY_WATERMARK_MASK(2); WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, (LATENCY_LOW_WATERMARK(latency_watermark_b) | LATENCY_HIGH_WATERMARK(line_time))); /* restore original selection */ WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3); /* write the priority marks */ WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); } void dce6_bandwidth_update(struct radeon_device *rdev) { struct drm_display_mode *mode0 = NULL; struct drm_display_mode *mode1 = NULL; u32 num_heads = 0, lb_size; int i; radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { if (rdev->mode_info.crtcs[i]->base.enabled) num_heads++; } for (i = 0; i < rdev->num_crtc; i += 2) { mode0 = &rdev->mode_info.crtcs[i]->base.mode; mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); } } /* * Core functions */ static void si_tiling_mode_table_init(struct radeon_device *rdev) { const u32 num_tile_mode_states = 32; u32 reg_offset, gb_tile_moden, split_equal_to_row_size; switch (rdev->config.si.mem_row_size_in_kb) { case 1: split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; break; case 2: default: split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; break; case 4: split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; break; } if ((rdev->family == CHIP_TAHITI) || (rdev->family == CHIP_PITCAIRN)) { for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: /* non-AA compressed depth or any compressed stencil */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 1: /* 2xAA/4xAA compressed depth only */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 2: /* 8xAA compressed depth only */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(split_equal_to_row_size) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(split_equal_to_row_size) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); break; case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(split_equal_to_row_size) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 8: /* 1D and 1D Array Surfaces */ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 9: /* Displayable maps. */ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 10: /* Display 8bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 11: /* Display 16bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 12: /* Display 32bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); break; case 13: /* Thin. */ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 14: /* Thin 8 bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); break; case 15: /* Thin 16 bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); break; case 16: /* Thin 32 bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); break; case 17: /* Thin 64 bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(split_equal_to_row_size) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); break; case 21: /* 8 bpp PRT. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 22: /* 16 bpp PRT */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); break; case 23: /* 32 bpp PRT */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 24: /* 64 bpp PRT */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 25: /* 128 bpp PRT */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | NUM_BANKS(ADDR_SURF_8_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); break; default: gb_tile_moden = 0; break; } WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if ((rdev->family == CHIP_VERDE) || (rdev->family == CHIP_OLAND)) { for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: /* non-AA compressed depth or any compressed stencil */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); break; case 1: /* 2xAA/4xAA compressed depth only */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); break; case 2: /* 8xAA compressed depth only */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); break; case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); break; case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(split_equal_to_row_size) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(split_equal_to_row_size) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(split_equal_to_row_size) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); break; case 8: /* 1D and 1D Array Surfaces */ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 9: /* Displayable maps. */ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 10: /* Display 8bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); break; case 11: /* Display 16bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 12: /* Display 32bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 13: /* Thin. */ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 14: /* Thin 8 bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 15: /* Thin 16 bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 16: /* Thin 32 bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 17: /* Thin 64 bpp. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(split_equal_to_row_size) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 21: /* 8 bpp PRT. */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 22: /* 16 bpp PRT */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); break; case 23: /* 32 bpp PRT */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 24: /* 64 bpp PRT */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); break; case 25: /* 128 bpp PRT */ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | NUM_BANKS(ADDR_SURF_8_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); break; default: gb_tile_moden = 0; break; } WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else DRM_ERROR("unknown asic: 0x%x\n", rdev->family); } static void si_select_se_sh(struct radeon_device *rdev, u32 se_num, u32 sh_num) { u32 data = INSTANCE_BROADCAST_WRITES; if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; else if (se_num == 0xffffffff) data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); else if (sh_num == 0xffffffff) data |= SH_BROADCAST_WRITES | SE_INDEX(se_num); else data |= SH_INDEX(sh_num) | SE_INDEX(se_num); WREG32(GRBM_GFX_INDEX, data); } static u32 si_create_bitmask(u32 bit_width) { u32 i, mask = 0; for (i = 0; i < bit_width; i++) { mask <<= 1; mask |= 1; } return mask; } static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh) { u32 data, mask; data = RREG32(CC_GC_SHADER_ARRAY_CONFIG); if (data & 1) data &= INACTIVE_CUS_MASK; else data = 0; data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG); data >>= INACTIVE_CUS_SHIFT; mask = si_create_bitmask(cu_per_sh); return ~data & mask; } static void si_setup_spi(struct radeon_device *rdev, u32 se_num, u32 sh_per_se, u32 cu_per_sh) { int i, j, k; u32 data, mask, active_cu; for (i = 0; i < se_num; i++) { for (j = 0; j < sh_per_se; j++) { si_select_se_sh(rdev, i, j); data = RREG32(SPI_STATIC_THREAD_MGMT_3); active_cu = si_get_cu_enabled(rdev, cu_per_sh); mask = 1; for (k = 0; k < 16; k++) { mask <<= k; if (active_cu & mask) { data &= ~mask; WREG32(SPI_STATIC_THREAD_MGMT_3, data); break; } } } } si_select_se_sh(rdev, 0xffffffff, 0xffffffff); } static u32 si_get_rb_disabled(struct radeon_device *rdev, u32 max_rb_num, u32 se_num, u32 sh_per_se) { u32 data, mask; data = RREG32(CC_RB_BACKEND_DISABLE); if (data & 1) data &= BACKEND_DISABLE_MASK; else data = 0; data |= RREG32(GC_USER_RB_BACKEND_DISABLE); data >>= BACKEND_DISABLE_SHIFT; mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); return data & mask; } static void si_setup_rb(struct radeon_device *rdev, u32 se_num, u32 sh_per_se, u32 max_rb_num) { int i, j; u32 data, mask; u32 disabled_rbs = 0; u32 enabled_rbs = 0; for (i = 0; i < se_num; i++) { for (j = 0; j < sh_per_se; j++) { si_select_se_sh(rdev, i, j); data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); } } si_select_se_sh(rdev, 0xffffffff, 0xffffffff); mask = 1; for (i = 0; i < max_rb_num; i++) { if (!(disabled_rbs & mask)) enabled_rbs |= mask; mask <<= 1; } for (i = 0; i < se_num; i++) { si_select_se_sh(rdev, i, 0xffffffff); data = 0; for (j = 0; j < sh_per_se; j++) { switch (enabled_rbs & 3) { case 1: data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); break; case 2: data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); break; case 3: default: data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); break; } enabled_rbs >>= 2; } WREG32(PA_SC_RASTER_CONFIG, data); } si_select_se_sh(rdev, 0xffffffff, 0xffffffff); } static void si_gpu_init(struct radeon_device *rdev) { u32 gb_addr_config = 0; u32 mc_shared_chmap, mc_arb_ramcfg; u32 sx_debug_1; u32 hdp_host_path_cntl; u32 tmp; int i, j; switch (rdev->family) { case CHIP_TAHITI: rdev->config.si.max_shader_engines = 2; rdev->config.si.max_tile_pipes = 12; rdev->config.si.max_cu_per_sh = 8; rdev->config.si.max_sh_per_se = 2; rdev->config.si.max_backends_per_se = 4; rdev->config.si.max_texture_channel_caches = 12; rdev->config.si.max_gprs = 256; rdev->config.si.max_gs_threads = 32; rdev->config.si.max_hw_contexts = 8; rdev->config.si.sc_prim_fifo_size_frontend = 0x20; rdev->config.si.sc_prim_fifo_size_backend = 0x100; rdev->config.si.sc_hiz_tile_fifo_size = 0x30; rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_PITCAIRN: rdev->config.si.max_shader_engines = 2; rdev->config.si.max_tile_pipes = 8; rdev->config.si.max_cu_per_sh = 5; rdev->config.si.max_sh_per_se = 2; rdev->config.si.max_backends_per_se = 4; rdev->config.si.max_texture_channel_caches = 8; rdev->config.si.max_gprs = 256; rdev->config.si.max_gs_threads = 32; rdev->config.si.max_hw_contexts = 8; rdev->config.si.sc_prim_fifo_size_frontend = 0x20; rdev->config.si.sc_prim_fifo_size_backend = 0x100; rdev->config.si.sc_hiz_tile_fifo_size = 0x30; rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_VERDE: default: rdev->config.si.max_shader_engines = 1; rdev->config.si.max_tile_pipes = 4; rdev->config.si.max_cu_per_sh = 5; rdev->config.si.max_sh_per_se = 2; rdev->config.si.max_backends_per_se = 4; rdev->config.si.max_texture_channel_caches = 4; rdev->config.si.max_gprs = 256; rdev->config.si.max_gs_threads = 32; rdev->config.si.max_hw_contexts = 8; rdev->config.si.sc_prim_fifo_size_frontend = 0x20; rdev->config.si.sc_prim_fifo_size_backend = 0x40; rdev->config.si.sc_hiz_tile_fifo_size = 0x30; rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_OLAND: rdev->config.si.max_shader_engines = 1; rdev->config.si.max_tile_pipes = 4; rdev->config.si.max_cu_per_sh = 6; rdev->config.si.max_sh_per_se = 1; rdev->config.si.max_backends_per_se = 2; rdev->config.si.max_texture_channel_caches = 4; rdev->config.si.max_gprs = 256; rdev->config.si.max_gs_threads = 16; rdev->config.si.max_hw_contexts = 8; rdev->config.si.sc_prim_fifo_size_frontend = 0x20; rdev->config.si.sc_prim_fifo_size_backend = 0x40; rdev->config.si.sc_hiz_tile_fifo_size = 0x30; rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; break; } /* Initialize HDP */ for (i = 0, j = 0; i < 32; i++, j += 0x18) { WREG32((0x2c14 + j), 0x00000000); WREG32((0x2c18 + j), 0x00000000); WREG32((0x2c1c + j), 0x00000000); WREG32((0x2c20 + j), 0x00000000); WREG32((0x2c24 + j), 0x00000000); } WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); evergreen_fix_pci_max_read_req_size(rdev); WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); mc_shared_chmap = RREG32(MC_SHARED_CHMAP); mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; rdev->config.si.mem_max_burst_length_bytes = 256; tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; if (rdev->config.si.mem_row_size_in_kb > 4) rdev->config.si.mem_row_size_in_kb = 4; /* XXX use MC settings? */ rdev->config.si.shader_engine_tile_size = 32; rdev->config.si.num_gpus = 1; rdev->config.si.multi_gpu_tile_size = 64; /* fix up row size */ gb_addr_config &= ~ROW_SIZE_MASK; switch (rdev->config.si.mem_row_size_in_kb) { case 1: default: gb_addr_config |= ROW_SIZE(0); break; case 2: gb_addr_config |= ROW_SIZE(1); break; case 4: gb_addr_config |= ROW_SIZE(2); break; } /* setup tiling info dword. gb_addr_config is not adequate since it does * not have bank info, so create a custom tiling dword. * bits 3:0 num_pipes * bits 7:4 num_banks * bits 11:8 group_size * bits 15:12 row_size */ rdev->config.si.tile_config = 0; switch (rdev->config.si.num_tile_pipes) { case 1: rdev->config.si.tile_config |= (0 << 0); break; case 2: rdev->config.si.tile_config |= (1 << 0); break; case 4: rdev->config.si.tile_config |= (2 << 0); break; case 8: default: /* XXX what about 12? */ rdev->config.si.tile_config |= (3 << 0); break; } switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { case 0: /* four banks */ rdev->config.si.tile_config |= 0 << 4; break; case 1: /* eight banks */ rdev->config.si.tile_config |= 1 << 4; break; case 2: /* sixteen banks */ default: rdev->config.si.tile_config |= 2 << 4; break; } rdev->config.si.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; rdev->config.si.tile_config |= ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; WREG32(GB_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CALC, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); si_tiling_mode_table_init(rdev); si_setup_rb(rdev, rdev->config.si.max_shader_engines, rdev->config.si.max_sh_per_se, rdev->config.si.max_backends_per_se); si_setup_spi(rdev, rdev->config.si.max_shader_engines, rdev->config.si.max_sh_per_se, rdev->config.si.max_cu_per_sh); /* set HW defaults for 3D engine */ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b))); WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); sx_debug_1 = RREG32(SX_DEBUG_1); WREG32(SX_DEBUG_1, sx_debug_1); WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) | SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) | SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size))); WREG32(VGT_NUM_INSTANCES, 1); WREG32(CP_PERFMON_CNTL, 0); WREG32(SQ_CONFIG, 0); WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | FORCE_EOV_MAX_REZ_CNT(255))); WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | AUTO_INVLD_EN(ES_AND_GS_AUTO)); WREG32(VGT_GS_VERTEX_REUSE, 16); WREG32(PA_SC_LINE_STIPPLE_STATE, 0); WREG32(CB_PERFCOUNTER0_SELECT0, 0); WREG32(CB_PERFCOUNTER0_SELECT1, 0); WREG32(CB_PERFCOUNTER1_SELECT0, 0); WREG32(CB_PERFCOUNTER1_SELECT1, 0); WREG32(CB_PERFCOUNTER2_SELECT0, 0); WREG32(CB_PERFCOUNTER2_SELECT1, 0); WREG32(CB_PERFCOUNTER3_SELECT0, 0); WREG32(CB_PERFCOUNTER3_SELECT1, 0); tmp = RREG32(HDP_MISC_CNTL); tmp |= HDP_FLUSH_INVALIDATE_CACHE; WREG32(HDP_MISC_CNTL, tmp); hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); udelay(50); } /* * GPU scratch registers helpers function. */ static void si_scratch_init(struct radeon_device *rdev) { int i; rdev->scratch.num_reg = 7; rdev->scratch.reg_base = SCRATCH_REG0; for (i = 0; i < rdev->scratch.num_reg; i++) { rdev->scratch.free[i] = true; rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); } } void si_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { struct radeon_ring *ring = &rdev->ring[fence->ring]; u64 addr = rdev->fence_drv[fence->ring].gpu_addr; /* flush read cache over gart */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | PACKET3_TC_ACTION_ENA | PACKET3_SH_KCACHE_ACTION_ENA | PACKET3_SH_ICACHE_ACTION_ENA); radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0); radeon_ring_write(ring, 10); /* poll interval */ /* EVENT_WRITE_EOP - flush caches, send int */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5)); radeon_ring_write(ring, addr & 0xffffffff); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, 0); } /* * IB stuff */ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; u32 header; if (ib->is_const_ib) { /* set switch buffer packet before const IB */ radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); radeon_ring_write(ring, 0); header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); } else { u32 next_rptr; if (ring->rptr_save_reg) { next_rptr = ring->wptr + 3 + 4 + 8; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, ((ring->rptr_save_reg - PACKET3_SET_CONFIG_REG_START) >> 2)); radeon_ring_write(ring, next_rptr); } else if (rdev->wb.enabled) { next_rptr = ring->wptr + 5 + 4 + 8; radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, (1 << 8)); radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); radeon_ring_write(ring, next_rptr); } header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); } radeon_ring_write(ring, header); radeon_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | #endif (ib->gpu_addr & 0xFFFFFFFC)); radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); radeon_ring_write(ring, ib->length_dw | (ib->vm ? (ib->vm->id << 24) : 0)); if (!ib->is_const_ib) { /* flush read cache over gart for this vmid */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | PACKET3_TC_ACTION_ENA | PACKET3_SH_KCACHE_ACTION_ENA | PACKET3_SH_ICACHE_ACTION_ENA); radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0); radeon_ring_write(ring, 10); /* poll interval */ } } /* * CP. */ static void si_cp_enable(struct radeon_device *rdev, bool enable) { if (enable) WREG32(CP_ME_CNTL, 0); else { radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); WREG32(SCRATCH_UMSK, 0); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; } udelay(50); } static int si_cp_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data; int i; if (!rdev->me_fw || !rdev->pfp_fw) return -EINVAL; si_cp_enable(rdev, false); /* PFP */ fw_data = (const __be32 *)rdev->pfp_fw->data; WREG32(CP_PFP_UCODE_ADDR, 0); for (i = 0; i < SI_PFP_UCODE_SIZE; i++) WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); WREG32(CP_PFP_UCODE_ADDR, 0); /* CE */ fw_data = (const __be32 *)rdev->ce_fw->data; WREG32(CP_CE_UCODE_ADDR, 0); for (i = 0; i < SI_CE_UCODE_SIZE; i++) WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); WREG32(CP_CE_UCODE_ADDR, 0); /* ME */ fw_data = (const __be32 *)rdev->me_fw->data; WREG32(CP_ME_RAM_WADDR, 0); for (i = 0; i < SI_PM4_UCODE_SIZE; i++) WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); WREG32(CP_ME_RAM_WADDR, 0); WREG32(CP_PFP_UCODE_ADDR, 0); WREG32(CP_CE_UCODE_ADDR, 0); WREG32(CP_ME_RAM_WADDR, 0); WREG32(CP_ME_RAM_RADDR, 0); return 0; } static int si_cp_start(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r, i; r = radeon_ring_lock(rdev, ring, 7 + 4); if (r) { DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); return r; } /* init the CP */ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); radeon_ring_write(ring, 0x1); radeon_ring_write(ring, 0x0); radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1); radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); /* init the CE partitions */ radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); radeon_ring_write(ring, 0xc000); radeon_ring_write(ring, 0xe000); radeon_ring_unlock_commit(rdev, ring); si_cp_enable(rdev, true); r = radeon_ring_lock(rdev, ring, si_default_size + 10); if (r) { DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); return r; } /* setup clear context state */ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); for (i = 0; i < si_default_size; i++) radeon_ring_write(ring, si_default_state[i]); radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); /* set clear context state */ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, 0x00000316); radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ radeon_ring_unlock_commit(rdev, ring); for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { ring = &rdev->ring[i]; r = radeon_ring_lock(rdev, ring, 2); /* clear the compute context state */ radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); radeon_ring_write(ring, 0); radeon_ring_unlock_commit(rdev, ring); } return 0; } static void si_cp_fini(struct radeon_device *rdev) { struct radeon_ring *ring; si_cp_enable(rdev, false); ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_fini(rdev, ring); radeon_scratch_free(rdev, ring->rptr_save_reg); ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; radeon_ring_fini(rdev, ring); radeon_scratch_free(rdev, ring->rptr_save_reg); ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; radeon_ring_fini(rdev, ring); radeon_scratch_free(rdev, ring->rptr_save_reg); } static int si_cp_resume(struct radeon_device *rdev) { struct radeon_ring *ring; u32 tmp; u32 rb_bufsz; int r; /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | SOFT_RESET_PA | SOFT_RESET_VGT | SOFT_RESET_SPI | SOFT_RESET_SX)); RREG32(GRBM_SOFT_RESET); mdelay(15); WREG32(GRBM_SOFT_RESET, 0); RREG32(GRBM_SOFT_RESET); WREG32(CP_SEM_WAIT_TIMER, 0x0); WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); /* Set the write pointer delay */ WREG32(CP_RB_WPTR_DELAY, 0); WREG32(CP_DEBUG, 0); WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); /* ring 0 - compute and gfx */ /* Set ring buffer size */ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; rb_bufsz = drm_order(ring->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif WREG32(CP_RB0_CNTL, tmp); /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); ring->wptr = 0; WREG32(CP_RB0_WPTR, ring->wptr); /* set the wb address whether it's enabled or not */ WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); if (rdev->wb.enabled) WREG32(SCRATCH_UMSK, 0xff); else { tmp |= RB_NO_UPDATE; WREG32(SCRATCH_UMSK, 0); } mdelay(1); WREG32(CP_RB0_CNTL, tmp); WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); ring->rptr = RREG32(CP_RB0_RPTR); /* ring1 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; rb_bufsz = drm_order(ring->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif WREG32(CP_RB1_CNTL, tmp); /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); ring->wptr = 0; WREG32(CP_RB1_WPTR, ring->wptr); /* set the wb address whether it's enabled or not */ WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); mdelay(1); WREG32(CP_RB1_CNTL, tmp); WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); ring->rptr = RREG32(CP_RB1_RPTR); /* ring2 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; rb_bufsz = drm_order(ring->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif WREG32(CP_RB2_CNTL, tmp); /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); ring->wptr = 0; WREG32(CP_RB2_WPTR, ring->wptr); /* set the wb address whether it's enabled or not */ WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); mdelay(1); WREG32(CP_RB2_CNTL, tmp); WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); ring->rptr = RREG32(CP_RB2_RPTR); /* start the rings */ si_cp_start(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true; rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true; r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); if (r) { rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; return r; } r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); if (r) { rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; } r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); if (r) { rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; } return 0; } static u32 si_gpu_check_soft_reset(struct radeon_device *rdev) { u32 reset_mask = 0; u32 tmp; /* GRBM_STATUS */ tmp = RREG32(GRBM_STATUS); if (tmp & (PA_BUSY | SC_BUSY | BCI_BUSY | SX_BUSY | TA_BUSY | VGT_BUSY | DB_BUSY | CB_BUSY | GDS_BUSY | SPI_BUSY | IA_BUSY | IA_BUSY_NO_DMA)) reset_mask |= RADEON_RESET_GFX; if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING | CP_BUSY | CP_COHERENCY_BUSY)) reset_mask |= RADEON_RESET_CP; if (tmp & GRBM_EE_BUSY) reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP; /* GRBM_STATUS2 */ tmp = RREG32(GRBM_STATUS2); if (tmp & (RLC_RQ_PENDING | RLC_BUSY)) reset_mask |= RADEON_RESET_RLC; /* DMA_STATUS_REG 0 */ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET); if (!(tmp & DMA_IDLE)) reset_mask |= RADEON_RESET_DMA; /* DMA_STATUS_REG 1 */ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET); if (!(tmp & DMA_IDLE)) reset_mask |= RADEON_RESET_DMA1; /* SRBM_STATUS2 */ tmp = RREG32(SRBM_STATUS2); if (tmp & DMA_BUSY) reset_mask |= RADEON_RESET_DMA; if (tmp & DMA1_BUSY) reset_mask |= RADEON_RESET_DMA1; /* SRBM_STATUS */ tmp = RREG32(SRBM_STATUS); if (tmp & IH_BUSY) reset_mask |= RADEON_RESET_IH; if (tmp & SEM_BUSY) reset_mask |= RADEON_RESET_SEM; if (tmp & GRBM_RQ_PENDING) reset_mask |= RADEON_RESET_GRBM; if (tmp & VMC_BUSY) reset_mask |= RADEON_RESET_VMC; if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY | MCC_BUSY | MCD_BUSY)) reset_mask |= RADEON_RESET_MC; if (evergreen_is_display_hung(rdev)) reset_mask |= RADEON_RESET_DISPLAY; /* VM_L2_STATUS */ tmp = RREG32(VM_L2_STATUS); if (tmp & L2_BUSY) reset_mask |= RADEON_RESET_VMC; /* Skip MC reset as it's mostly likely not hung, just busy */ if (reset_mask & RADEON_RESET_MC) { DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); reset_mask &= ~RADEON_RESET_MC; } return reset_mask; } static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) { struct evergreen_mc_save save; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; if (reset_mask == 0) return; dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); evergreen_print_gpu_status_regs(rdev); dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); /* Disable CP parsing/prefetching */ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); if (reset_mask & RADEON_RESET_DMA) { /* dma0 */ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); tmp &= ~DMA_RB_ENABLE; WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); } if (reset_mask & RADEON_RESET_DMA1) { /* dma1 */ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); tmp &= ~DMA_RB_ENABLE; WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); } udelay(50); evergreen_mc_stop(rdev, &save); if (evergreen_mc_wait_for_idle(rdev)) { dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) { grbm_soft_reset = SOFT_RESET_CB | SOFT_RESET_DB | SOFT_RESET_GDS | SOFT_RESET_PA | SOFT_RESET_SC | SOFT_RESET_BCI | SOFT_RESET_SPI | SOFT_RESET_SX | SOFT_RESET_TC | SOFT_RESET_TA | SOFT_RESET_VGT | SOFT_RESET_IA; } if (reset_mask & RADEON_RESET_CP) { grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT; srbm_soft_reset |= SOFT_RESET_GRBM; } if (reset_mask & RADEON_RESET_DMA) srbm_soft_reset |= SOFT_RESET_DMA; if (reset_mask & RADEON_RESET_DMA1) srbm_soft_reset |= SOFT_RESET_DMA1; if (reset_mask & RADEON_RESET_DISPLAY) srbm_soft_reset |= SOFT_RESET_DC; if (reset_mask & RADEON_RESET_RLC) grbm_soft_reset |= SOFT_RESET_RLC; if (reset_mask & RADEON_RESET_SEM) srbm_soft_reset |= SOFT_RESET_SEM; if (reset_mask & RADEON_RESET_IH) srbm_soft_reset |= SOFT_RESET_IH; if (reset_mask & RADEON_RESET_GRBM) srbm_soft_reset |= SOFT_RESET_GRBM; if (reset_mask & RADEON_RESET_VMC) srbm_soft_reset |= SOFT_RESET_VMC; if (reset_mask & RADEON_RESET_MC) srbm_soft_reset |= SOFT_RESET_MC; if (grbm_soft_reset) { tmp = RREG32(GRBM_SOFT_RESET); tmp |= grbm_soft_reset; dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); WREG32(GRBM_SOFT_RESET, tmp); tmp = RREG32(GRBM_SOFT_RESET); udelay(50); tmp &= ~grbm_soft_reset; WREG32(GRBM_SOFT_RESET, tmp); tmp = RREG32(GRBM_SOFT_RESET); } if (srbm_soft_reset) { tmp = RREG32(SRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); WREG32(SRBM_SOFT_RESET, tmp); tmp = RREG32(SRBM_SOFT_RESET); udelay(50); tmp &= ~srbm_soft_reset; WREG32(SRBM_SOFT_RESET, tmp); tmp = RREG32(SRBM_SOFT_RESET); } /* Wait a little for things to settle down */ udelay(50); evergreen_mc_resume(rdev, &save); udelay(50); evergreen_print_gpu_status_regs(rdev); } int si_asic_reset(struct radeon_device *rdev) { u32 reset_mask; reset_mask = si_gpu_check_soft_reset(rdev); if (reset_mask) r600_set_bios_scratch_engine_hung(rdev, true); si_gpu_soft_reset(rdev, reset_mask); reset_mask = si_gpu_check_soft_reset(rdev); if (!reset_mask) r600_set_bios_scratch_engine_hung(rdev, false); return 0; } /** * si_gfx_is_lockup - Check if the GFX engine is locked up * * @rdev: radeon_device pointer * @ring: radeon_ring structure holding ring information * * Check if the GFX engine is locked up. * Returns true if the engine appears to be locked up, false if not. */ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) { u32 reset_mask = si_gpu_check_soft_reset(rdev); if (!(reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))) { radeon_ring_lockup_update(ring); return false; } /* force CP activities */ radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } /** * si_dma_is_lockup - Check if the DMA engine is locked up * * @rdev: radeon_device pointer * @ring: radeon_ring structure holding ring information * * Check if the async DMA engine is locked up. * Returns true if the engine appears to be locked up, false if not. */ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) { u32 reset_mask = si_gpu_check_soft_reset(rdev); u32 mask; if (ring->idx == R600_RING_TYPE_DMA_INDEX) mask = RADEON_RESET_DMA; else mask = RADEON_RESET_DMA1; if (!(reset_mask & mask)) { radeon_ring_lockup_update(ring); return false; } /* force ring activities */ radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } /* MC */ static void si_mc_program(struct radeon_device *rdev) { struct evergreen_mc_save save; u32 tmp; int i, j; /* Initialize HDP */ for (i = 0, j = 0; i < 32; i++, j += 0x18) { WREG32((0x2c14 + j), 0x00000000); WREG32((0x2c18 + j), 0x00000000); WREG32((0x2c1c + j), 0x00000000); WREG32((0x2c20 + j), 0x00000000); WREG32((0x2c24 + j), 0x00000000); } WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); evergreen_mc_stop(rdev, &save); if (radeon_mc_wait_for_idle(rdev)) { dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } /* Lockout access through VGA aperture*/ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); /* Update configuration */ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); /* XXX double check these! */ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); WREG32(MC_VM_AGP_BASE, 0); WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); if (radeon_mc_wait_for_idle(rdev)) { dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } evergreen_mc_resume(rdev, &save); /* we need to own VRAM, so turn off the VGA renderer here * to stop it overwriting our objects */ rv515_vga_render_disable(rdev); } /* SI MC address space is 40 bits */ static void si_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) { mc->vram_start = base; if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) { dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); mc->real_vram_size = mc->aper_size; mc->mc_vram_size = mc->aper_size; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", mc->mc_vram_size >> 20, mc->vram_start, mc->vram_end, mc->real_vram_size >> 20); } static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) { u64 size_af, size_bf; size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; size_bf = mc->vram_start & ~mc->gtt_base_align; if (size_bf > size_af) { if (mc->gtt_size > size_bf) { dev_warn(rdev->dev, "limiting GTT\n"); mc->gtt_size = size_bf; } mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; } else { if (mc->gtt_size > size_af) { dev_warn(rdev->dev, "limiting GTT\n"); mc->gtt_size = size_af; } mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; } mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); } static void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) { if (mc->mc_vram_size > 0xFFC0000000ULL) { /* leave room for at least 1024M GTT */ dev_warn(rdev->dev, "limiting VRAM\n"); mc->real_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL; } si_vram_location(rdev, &rdev->mc, 0); rdev->mc.gtt_base_align = 0; si_gtt_location(rdev, mc); } static int si_mc_init(struct radeon_device *rdev) { u32 tmp; int chansize, numchan; /* Get VRAM informations */ rdev->mc.vram_is_ddr = true; tmp = RREG32(MC_ARB_RAMCFG); if (tmp & CHANSIZE_OVERRIDE) { chansize = 16; } else if (tmp & CHANSIZE_MASK) { chansize = 64; } else { chansize = 32; } tmp = RREG32(MC_SHARED_CHMAP); switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { case 0: default: numchan = 1; break; case 1: numchan = 2; break; case 2: numchan = 4; break; case 3: numchan = 8; break; case 4: numchan = 3; break; case 5: numchan = 6; break; case 6: numchan = 10; break; case 7: numchan = 12; break; case 8: numchan = 16; break; } rdev->mc.vram_width = numchan * chansize; /* Could aper size report 0 ? */ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* size in MB on si */ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; rdev->mc.visible_vram_size = rdev->mc.aper_size; si_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); return 0; } /* * GART */ void si_pcie_gart_tlb_flush(struct radeon_device *rdev) { /* flush hdp cache */ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); /* bits 0-15 are the VM contexts0-15 */ WREG32(VM_INVALIDATE_REQUEST, 1); } static int si_pcie_gart_enable(struct radeon_device *rdev) { int r, i; if (rdev->gart.robj == NULL) { dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } r = radeon_gart_table_vram_pin(rdev); if (r) return r; radeon_gart_restore(rdev); /* Setup TLB control */ WREG32(MC_VM_MX_L1_TLB_CNTL, (0xA << 7) | ENABLE_L1_TLB | SYSTEM_ACCESS_MODE_NOT_IN_SYS | ENABLE_ADVANCED_DRIVER_MODEL | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | EFFECTIVE_L2_QUEUE_SIZE(7) | CONTEXT1_IDENTITY_ACCESS_MODE(1)); WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | L2_CACHE_BIGK_FRAGMENT_SIZE(0)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); WREG32(VM_CONTEXT0_CNTL2, 0); WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT)); WREG32(0x15D4, 0); WREG32(0x15D8, 0); WREG32(0x15DC, 0); /* empty context1-15 */ /* set vm size, must be a multiple of 4 */ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); /* Assign the pt base to something valid for now; the pts used for * the VMs are determined by the application and setup and assigned * on the fly in the vm part of radeon_gart.c */ for (i = 1; i < 16; i++) { if (i < 8) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), rdev->gart.table_addr >> 12); else WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), rdev->gart.table_addr >> 12); } /* enable context1-15 */ WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); WREG32(VM_CONTEXT1_CNTL2, 4); WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | VALID_PROTECTION_FAULT_ENABLE_DEFAULT | READ_PROTECTION_FAULT_ENABLE_INTERRUPT | READ_PROTECTION_FAULT_ENABLE_DEFAULT | WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); si_pcie_gart_tlb_flush(rdev); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(rdev->mc.gtt_size >> 20), (unsigned long long)rdev->gart.table_addr); rdev->gart.ready = true; return 0; } static void si_pcie_gart_disable(struct radeon_device *rdev) { /* Disable all tables */ WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0); /* Setup TLB control */ WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | EFFECTIVE_L2_QUEUE_SIZE(7) | CONTEXT1_IDENTITY_ACCESS_MODE(1)); WREG32(VM_L2_CNTL2, 0); WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | L2_CACHE_BIGK_FRAGMENT_SIZE(0)); radeon_gart_table_vram_unpin(rdev); } static void si_pcie_gart_fini(struct radeon_device *rdev) { si_pcie_gart_disable(rdev); radeon_gart_table_vram_free(rdev); radeon_gart_fini(rdev); } /* vm parser */ static bool si_vm_reg_valid(u32 reg) { /* context regs are fine */ if (reg >= 0x28000) return true; /* check config regs */ switch (reg) { case GRBM_GFX_INDEX: case CP_STRMOUT_CNTL: case VGT_VTX_VECT_EJECT_REG: case VGT_CACHE_INVALIDATION: case VGT_ESGS_RING_SIZE: case VGT_GSVS_RING_SIZE: case VGT_GS_VERTEX_REUSE: case VGT_PRIMITIVE_TYPE: case VGT_INDEX_TYPE: case VGT_NUM_INDICES: case VGT_NUM_INSTANCES: case VGT_TF_RING_SIZE: case VGT_HS_OFFCHIP_PARAM: case VGT_TF_MEMORY_BASE: case PA_CL_ENHANCE: case PA_SU_LINE_STIPPLE_VALUE: case PA_SC_LINE_STIPPLE_STATE: case PA_SC_ENHANCE: case SQC_CACHES: case SPI_STATIC_THREAD_MGMT_1: case SPI_STATIC_THREAD_MGMT_2: case SPI_STATIC_THREAD_MGMT_3: case SPI_PS_MAX_WAVE_ID: case SPI_CONFIG_CNTL: case SPI_CONFIG_CNTL_1: case TA_CNTL_AUX: return true; default: DRM_ERROR("Invalid register 0x%x in CS\n", reg); return false; } } static int si_vm_packet3_ce_check(struct radeon_device *rdev, u32 *ib, struct radeon_cs_packet *pkt) { switch (pkt->opcode) { case PACKET3_NOP: case PACKET3_SET_BASE: case PACKET3_SET_CE_DE_COUNTERS: case PACKET3_LOAD_CONST_RAM: case PACKET3_WRITE_CONST_RAM: case PACKET3_WRITE_CONST_RAM_OFFSET: case PACKET3_DUMP_CONST_RAM: case PACKET3_INCREMENT_CE_COUNTER: case PACKET3_WAIT_ON_DE_COUNTER: case PACKET3_CE_WRITE: break; default: DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode); return -EINVAL; } return 0; } static int si_vm_packet3_gfx_check(struct radeon_device *rdev, u32 *ib, struct radeon_cs_packet *pkt) { u32 idx = pkt->idx + 1; u32 idx_value = ib[idx]; u32 start_reg, end_reg, reg, i; u32 command, info; switch (pkt->opcode) { case PACKET3_NOP: case PACKET3_SET_BASE: case PACKET3_CLEAR_STATE: case PACKET3_INDEX_BUFFER_SIZE: case PACKET3_DISPATCH_DIRECT: case PACKET3_DISPATCH_INDIRECT: case PACKET3_ALLOC_GDS: case PACKET3_WRITE_GDS_RAM: case PACKET3_ATOMIC_GDS: case PACKET3_ATOMIC: case PACKET3_OCCLUSION_QUERY: case PACKET3_SET_PREDICATION: case PACKET3_COND_EXEC: case PACKET3_PRED_EXEC: case PACKET3_DRAW_INDIRECT: case PACKET3_DRAW_INDEX_INDIRECT: case PACKET3_INDEX_BASE: case PACKET3_DRAW_INDEX_2: case PACKET3_CONTEXT_CONTROL: case PACKET3_INDEX_TYPE: case PACKET3_DRAW_INDIRECT_MULTI: case PACKET3_DRAW_INDEX_AUTO: case PACKET3_DRAW_INDEX_IMMD: case PACKET3_NUM_INSTANCES: case PACKET3_DRAW_INDEX_MULTI_AUTO: case PACKET3_STRMOUT_BUFFER_UPDATE: case PACKET3_DRAW_INDEX_OFFSET_2: case PACKET3_DRAW_INDEX_MULTI_ELEMENT: case PACKET3_DRAW_INDEX_INDIRECT_MULTI: case PACKET3_MPEG_INDEX: case PACKET3_WAIT_REG_MEM: case PACKET3_MEM_WRITE: case PACKET3_PFP_SYNC_ME: case PACKET3_SURFACE_SYNC: case PACKET3_EVENT_WRITE: case PACKET3_EVENT_WRITE_EOP: case PACKET3_EVENT_WRITE_EOS: case PACKET3_SET_CONTEXT_REG: case PACKET3_SET_CONTEXT_REG_INDIRECT: case PACKET3_SET_SH_REG: case PACKET3_SET_SH_REG_OFFSET: case PACKET3_INCREMENT_DE_COUNTER: case PACKET3_WAIT_ON_CE_COUNTER: case PACKET3_WAIT_ON_AVAIL_BUFFER: case PACKET3_ME_WRITE: break; case PACKET3_COPY_DATA: if ((idx_value & 0xf00) == 0) { reg = ib[idx + 3] * 4; if (!si_vm_reg_valid(reg)) return -EINVAL; } break; case PACKET3_WRITE_DATA: if ((idx_value & 0xf00) == 0) { start_reg = ib[idx + 1] * 4; if (idx_value & 0x10000) { if (!si_vm_reg_valid(start_reg)) return -EINVAL; } else { for (i = 0; i < (pkt->count - 2); i++) { reg = start_reg + (4 * i); if (!si_vm_reg_valid(reg)) return -EINVAL; } } } break; case PACKET3_COND_WRITE: if (idx_value & 0x100) { reg = ib[idx + 5] * 4; if (!si_vm_reg_valid(reg)) return -EINVAL; } break; case PACKET3_COPY_DW: if (idx_value & 0x2) { reg = ib[idx + 3] * 4; if (!si_vm_reg_valid(reg)) return -EINVAL; } break; case PACKET3_SET_CONFIG_REG: start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CONFIG_REG_START) || (start_reg >= PACKET3_SET_CONFIG_REG_END) || (end_reg >= PACKET3_SET_CONFIG_REG_END)) { DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); if (!si_vm_reg_valid(reg)) return -EINVAL; } break; case PACKET3_CP_DMA: command = ib[idx + 4]; info = ib[idx + 1]; if (command & PACKET3_CP_DMA_CMD_SAS) { /* src address space is register */ if (((info & 0x60000000) >> 29) == 0) { start_reg = idx_value << 2; if (command & PACKET3_CP_DMA_CMD_SAIC) { reg = start_reg; if (!si_vm_reg_valid(reg)) { DRM_ERROR("CP DMA Bad SRC register\n"); return -EINVAL; } } else { for (i = 0; i < (command & 0x1fffff); i++) { reg = start_reg + (4 * i); if (!si_vm_reg_valid(reg)) { DRM_ERROR("CP DMA Bad SRC register\n"); return -EINVAL; } } } } } if (command & PACKET3_CP_DMA_CMD_DAS) { /* dst address space is register */ if (((info & 0x00300000) >> 20) == 0) { start_reg = ib[idx + 2]; if (command & PACKET3_CP_DMA_CMD_DAIC) { reg = start_reg; if (!si_vm_reg_valid(reg)) { DRM_ERROR("CP DMA Bad DST register\n"); return -EINVAL; } } else { for (i = 0; i < (command & 0x1fffff); i++) { reg = start_reg + (4 * i); if (!si_vm_reg_valid(reg)) { DRM_ERROR("CP DMA Bad DST register\n"); return -EINVAL; } } } } } break; default: DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); return -EINVAL; } return 0; } static int si_vm_packet3_compute_check(struct radeon_device *rdev, u32 *ib, struct radeon_cs_packet *pkt) { u32 idx = pkt->idx + 1; u32 idx_value = ib[idx]; u32 start_reg, reg, i; switch (pkt->opcode) { case PACKET3_NOP: case PACKET3_SET_BASE: case PACKET3_CLEAR_STATE: case PACKET3_DISPATCH_DIRECT: case PACKET3_DISPATCH_INDIRECT: case PACKET3_ALLOC_GDS: case PACKET3_WRITE_GDS_RAM: case PACKET3_ATOMIC_GDS: case PACKET3_ATOMIC: case PACKET3_OCCLUSION_QUERY: case PACKET3_SET_PREDICATION: case PACKET3_COND_EXEC: case PACKET3_PRED_EXEC: case PACKET3_CONTEXT_CONTROL: case PACKET3_STRMOUT_BUFFER_UPDATE: case PACKET3_WAIT_REG_MEM: case PACKET3_MEM_WRITE: case PACKET3_PFP_SYNC_ME: case PACKET3_SURFACE_SYNC: case PACKET3_EVENT_WRITE: case PACKET3_EVENT_WRITE_EOP: case PACKET3_EVENT_WRITE_EOS: case PACKET3_SET_CONTEXT_REG: case PACKET3_SET_CONTEXT_REG_INDIRECT: case PACKET3_SET_SH_REG: case PACKET3_SET_SH_REG_OFFSET: case PACKET3_INCREMENT_DE_COUNTER: case PACKET3_WAIT_ON_CE_COUNTER: case PACKET3_WAIT_ON_AVAIL_BUFFER: case PACKET3_ME_WRITE: break; case PACKET3_COPY_DATA: if ((idx_value & 0xf00) == 0) { reg = ib[idx + 3] * 4; if (!si_vm_reg_valid(reg)) return -EINVAL; } break; case PACKET3_WRITE_DATA: if ((idx_value & 0xf00) == 0) { start_reg = ib[idx + 1] * 4; if (idx_value & 0x10000) { if (!si_vm_reg_valid(start_reg)) return -EINVAL; } else { for (i = 0; i < (pkt->count - 2); i++) { reg = start_reg + (4 * i); if (!si_vm_reg_valid(reg)) return -EINVAL; } } } break; case PACKET3_COND_WRITE: if (idx_value & 0x100) { reg = ib[idx + 5] * 4; if (!si_vm_reg_valid(reg)) return -EINVAL; } break; case PACKET3_COPY_DW: if (idx_value & 0x2) { reg = ib[idx + 3] * 4; if (!si_vm_reg_valid(reg)) return -EINVAL; } break; default: DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode); return -EINVAL; } return 0; } int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) { int ret = 0; u32 idx = 0; struct radeon_cs_packet pkt; do { pkt.idx = idx; pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]); pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]); pkt.one_reg_wr = 0; switch (pkt.type) { case RADEON_PACKET_TYPE0: dev_err(rdev->dev, "Packet0 not allowed!\n"); ret = -EINVAL; break; case RADEON_PACKET_TYPE2: idx += 1; break; case RADEON_PACKET_TYPE3: pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]); if (ib->is_const_ib) ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt); else { switch (ib->ring) { case RADEON_RING_TYPE_GFX_INDEX: ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt); break; case CAYMAN_RING_TYPE_CP1_INDEX: case CAYMAN_RING_TYPE_CP2_INDEX: ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt); break; default: dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring); ret = -EINVAL; break; } } idx += pkt.count + 2; break; default: dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type); ret = -EINVAL; break; } if (ret) break; } while (idx < ib->length_dw); return ret; } /* * vm */ int si_vm_init(struct radeon_device *rdev) { /* number of VMs */ rdev->vm_manager.nvm = 16; /* base offset of vram pages */ rdev->vm_manager.vram_base_offset = 0; return 0; } void si_vm_fini(struct radeon_device *rdev) { } /** * si_vm_set_page - update the page tables using the CP * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using the CP (SI). */ void si_vm_set_page(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); uint64_t value; unsigned ndw; if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { while (count) { ndw = 2 + count * 2; if (ndw > 0x3FFE) ndw = 0x3FFE; ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw); ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(1)); ib->ptr[ib->length_dw++] = pe; ib->ptr[ib->length_dw++] = upper_32_bits(pe); for (; ndw > 2; ndw -= 2, --count, pe += 8) { if (flags & RADEON_VM_PAGE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & RADEON_VM_PAGE_VALID) { value = addr; } else { value = 0; } addr += incr; value |= r600_flags; ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = upper_32_bits(value); } } } else { /* DMA */ if (flags & RADEON_VM_PAGE_SYSTEM) { while (count) { ndw = count * 2; if (ndw > 0xFFFFE) ndw = 0xFFFFE; /* for non-physically contiguous pages (system) */ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); ib->ptr[ib->length_dw++] = pe; ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & RADEON_VM_PAGE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & RADEON_VM_PAGE_VALID) { value = addr; } else { value = 0; } addr += incr; value |= r600_flags; ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = upper_32_bits(value); } } } else { while (count) { ndw = count * 2; if (ndw > 0xFFFFE) ndw = 0xFFFFE; if (flags & RADEON_VM_PAGE_VALID) value = addr; else value = 0; /* for physically contiguous pages (vram) */ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); ib->ptr[ib->length_dw++] = pe; /* dst addr */ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; ib->ptr[ib->length_dw++] = r600_flags; /* mask */ ib->ptr[ib->length_dw++] = 0; ib->ptr[ib->length_dw++] = value; /* value */ ib->ptr[ib->length_dw++] = upper_32_bits(value); ib->ptr[ib->length_dw++] = incr; /* increment size */ ib->ptr[ib->length_dw++] = 0; pe += ndw * 4; addr += (ndw / 2) * incr; count -= ndw / 2; } } while (ib->length_dw & 0x7) ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); } } void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) { struct radeon_ring *ring = &rdev->ring[ridx]; if (vm == NULL) return; /* write new base address */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(0))); if (vm->id < 8) { radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); } else { radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); } radeon_ring_write(ring, 0); radeon_ring_write(ring, vm->pd_gpu_addr >> 12); /* flush hdp cache */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(0))); radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0x1); /* bits 0-15 are the VM contexts0-15 */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(0))); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, 1 << vm->id); /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); } void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) { struct radeon_ring *ring = &rdev->ring[ridx]; if (vm == NULL) return; radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); if (vm->id < 8) { radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); } else { radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); } radeon_ring_write(ring, vm->pd_gpu_addr >> 12); /* flush hdp cache */ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); radeon_ring_write(ring, 1); /* bits 0-7 are the VM contexts0-7 */ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); radeon_ring_write(ring, 1 << vm->id); } /* * RLC */ void si_rlc_fini(struct radeon_device *rdev) { int r; /* save restore block */ if (rdev->rlc.save_restore_obj) { r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); if (unlikely(r != 0)) dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r); radeon_bo_unpin(rdev->rlc.save_restore_obj); radeon_bo_unreserve(rdev->rlc.save_restore_obj); radeon_bo_unref(&rdev->rlc.save_restore_obj); rdev->rlc.save_restore_obj = NULL; } /* clear state block */ if (rdev->rlc.clear_state_obj) { r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); if (unlikely(r != 0)) dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r); radeon_bo_unpin(rdev->rlc.clear_state_obj); radeon_bo_unreserve(rdev->rlc.clear_state_obj); radeon_bo_unref(&rdev->rlc.clear_state_obj); rdev->rlc.clear_state_obj = NULL; } } int si_rlc_init(struct radeon_device *rdev) { int r; /* save restore block */ if (rdev->rlc.save_restore_obj == NULL) { r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); return r; } } r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); if (unlikely(r != 0)) { si_rlc_fini(rdev); return r; } r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.save_restore_gpu_addr); radeon_bo_unreserve(rdev->rlc.save_restore_obj); if (r) { dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); si_rlc_fini(rdev); return r; } /* clear state block */ if (rdev->rlc.clear_state_obj == NULL) { r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); si_rlc_fini(rdev); return r; } } r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); if (unlikely(r != 0)) { si_rlc_fini(rdev); return r; } r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.clear_state_gpu_addr); radeon_bo_unreserve(rdev->rlc.clear_state_obj); if (r) { dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); si_rlc_fini(rdev); return r; } return 0; } static void si_rlc_stop(struct radeon_device *rdev) { WREG32(RLC_CNTL, 0); } static void si_rlc_start(struct radeon_device *rdev) { WREG32(RLC_CNTL, RLC_ENABLE); } static int si_rlc_resume(struct radeon_device *rdev) { u32 i; const __be32 *fw_data; if (!rdev->rlc_fw) return -EINVAL; si_rlc_stop(rdev); WREG32(RLC_RL_BASE, 0); WREG32(RLC_RL_SIZE, 0); WREG32(RLC_LB_CNTL, 0); WREG32(RLC_LB_CNTR_MAX, 0xffffffff); WREG32(RLC_LB_CNTR_INIT, 0); WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); WREG32(RLC_MC_CNTL, 0); WREG32(RLC_UCODE_CNTL, 0); fw_data = (const __be32 *)rdev->rlc_fw->data; for (i = 0; i < SI_RLC_UCODE_SIZE; i++) { WREG32(RLC_UCODE_ADDR, i); WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); } WREG32(RLC_UCODE_ADDR, 0); si_rlc_start(rdev); return 0; } static void si_enable_interrupts(struct radeon_device *rdev) { u32 ih_cntl = RREG32(IH_CNTL); u32 ih_rb_cntl = RREG32(IH_RB_CNTL); ih_cntl |= ENABLE_INTR; ih_rb_cntl |= IH_RB_ENABLE; WREG32(IH_CNTL, ih_cntl); WREG32(IH_RB_CNTL, ih_rb_cntl); rdev->ih.enabled = true; } static void si_disable_interrupts(struct radeon_device *rdev) { u32 ih_rb_cntl = RREG32(IH_RB_CNTL); u32 ih_cntl = RREG32(IH_CNTL); ih_rb_cntl &= ~IH_RB_ENABLE; ih_cntl &= ~ENABLE_INTR; WREG32(IH_RB_CNTL, ih_rb_cntl); WREG32(IH_CNTL, ih_cntl); /* set rptr, wptr to 0 */ WREG32(IH_RB_RPTR, 0); WREG32(IH_RB_WPTR, 0); rdev->ih.enabled = false; rdev->ih.rptr = 0; } static void si_disable_interrupt_state(struct radeon_device *rdev) { u32 tmp; WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); WREG32(CP_INT_CNTL_RING1, 0); WREG32(CP_INT_CNTL_RING2, 0); tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp); tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); WREG32(GRBM_INT_CNTL, 0); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); if (rdev->num_crtc >= 4) { WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); } if (rdev->num_crtc >= 6) { WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); if (rdev->num_crtc >= 4) { WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); } if (rdev->num_crtc >= 6) { WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } WREG32(DACA_AUTODETECT_INT_CONTROL, 0); tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; WREG32(DC_HPD1_INT_CONTROL, tmp); tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; WREG32(DC_HPD2_INT_CONTROL, tmp); tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; WREG32(DC_HPD3_INT_CONTROL, tmp); tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; WREG32(DC_HPD4_INT_CONTROL, tmp); tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; WREG32(DC_HPD5_INT_CONTROL, tmp); tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; WREG32(DC_HPD6_INT_CONTROL, tmp); } static int si_irq_init(struct radeon_device *rdev) { int ret = 0; int rb_bufsz; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; /* allocate ring */ ret = r600_ih_ring_alloc(rdev); if (ret) return ret; /* disable irqs */ si_disable_interrupts(rdev); /* init rlc */ ret = si_rlc_resume(rdev); if (ret) { r600_ih_ring_fini(rdev); return ret; } /* setup interrupt control */ /* set dummy read address to ring address */ WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN */ interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); rb_bufsz = drm_order(rdev->ih.ring_size / 4); ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | (rb_bufsz << 1)); if (rdev->wb.enabled) ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; /* set the writeback address whether it's enabled or not */ WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); WREG32(IH_RB_CNTL, ih_rb_cntl); /* set rptr, wptr to 0 */ WREG32(IH_RB_RPTR, 0); WREG32(IH_RB_WPTR, 0); /* Default settings for IH_CNTL (disabled at first) */ ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0); /* RPTR_REARM only works if msi's are enabled */ if (rdev->msi_enabled) ih_cntl |= RPTR_REARM; WREG32(IH_CNTL, ih_cntl); /* force the active interrupt state to all disabled */ si_disable_interrupt_state(rdev); pci_set_master(rdev->pdev); /* enable irqs */ si_enable_interrupts(rdev); return ret; } int si_irq_set(struct radeon_device *rdev) { u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 dma_cntl, dma_cntl1; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); return -EINVAL; } /* don't enable anything if the ih is disabled */ if (!rdev->ih.enabled) { si_disable_interrupts(rdev); /* force the active interrupt state to all disabled */ si_disable_interrupt_state(rdev); return 0; } hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; /* enable CP interrupts on all rings */ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("si_irq_set: sw int gfx\n"); cp_int_cntl |= TIME_STAMP_INT_ENABLE; } if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) { DRM_DEBUG("si_irq_set: sw int cp1\n"); cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; } if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) { DRM_DEBUG("si_irq_set: sw int cp2\n"); cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; } if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { DRM_DEBUG("si_irq_set: sw int dma\n"); dma_cntl |= TRAP_ENABLE; } if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { DRM_DEBUG("si_irq_set: sw int dma1\n"); dma_cntl1 |= TRAP_ENABLE; } if (rdev->irq.crtc_vblank_int[0] || atomic_read(&rdev->irq.pflip[0])) { DRM_DEBUG("si_irq_set: vblank 0\n"); crtc1 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[1] || atomic_read(&rdev->irq.pflip[1])) { DRM_DEBUG("si_irq_set: vblank 1\n"); crtc2 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[2] || atomic_read(&rdev->irq.pflip[2])) { DRM_DEBUG("si_irq_set: vblank 2\n"); crtc3 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[3] || atomic_read(&rdev->irq.pflip[3])) { DRM_DEBUG("si_irq_set: vblank 3\n"); crtc4 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[4] || atomic_read(&rdev->irq.pflip[4])) { DRM_DEBUG("si_irq_set: vblank 4\n"); crtc5 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[5] || atomic_read(&rdev->irq.pflip[5])) { DRM_DEBUG("si_irq_set: vblank 5\n"); crtc6 |= VBLANK_INT_MASK; } if (rdev->irq.hpd[0]) { DRM_DEBUG("si_irq_set: hpd 1\n"); hpd1 |= DC_HPDx_INT_EN; } if (rdev->irq.hpd[1]) { DRM_DEBUG("si_irq_set: hpd 2\n"); hpd2 |= DC_HPDx_INT_EN; } if (rdev->irq.hpd[2]) { DRM_DEBUG("si_irq_set: hpd 3\n"); hpd3 |= DC_HPDx_INT_EN; } if (rdev->irq.hpd[3]) { DRM_DEBUG("si_irq_set: hpd 4\n"); hpd4 |= DC_HPDx_INT_EN; } if (rdev->irq.hpd[4]) { DRM_DEBUG("si_irq_set: hpd 5\n"); hpd5 |= DC_HPDx_INT_EN; } if (rdev->irq.hpd[5]) { DRM_DEBUG("si_irq_set: hpd 6\n"); hpd6 |= DC_HPDx_INT_EN; } WREG32(CP_INT_CNTL_RING0, cp_int_cntl); WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); WREG32(CP_INT_CNTL_RING2, cp_int_cntl2); WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl); WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1); WREG32(GRBM_INT_CNTL, grbm_int_cntl); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); if (rdev->num_crtc >= 4) { WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); } if (rdev->num_crtc >= 6) { WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); if (rdev->num_crtc >= 4) { WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); } if (rdev->num_crtc >= 6) { WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); } WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD2_INT_CONTROL, hpd2); WREG32(DC_HPD3_INT_CONTROL, hpd3); WREG32(DC_HPD4_INT_CONTROL, hpd4); WREG32(DC_HPD5_INT_CONTROL, hpd5); WREG32(DC_HPD6_INT_CONTROL, hpd6); return 0; } static inline void si_irq_ack(struct radeon_device *rdev) { u32 tmp; rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); if (rdev->num_crtc >= 4) { rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); } if (rdev->num_crtc >= 6) { rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); } if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); if (rdev->num_crtc >= 4) { if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED) WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED) WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); } if (rdev->num_crtc >= 6) { if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED) WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED) WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); } if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { tmp = RREG32(DC_HPD1_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD1_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { tmp = RREG32(DC_HPD2_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD2_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { tmp = RREG32(DC_HPD3_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD3_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { tmp = RREG32(DC_HPD4_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD4_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { tmp = RREG32(DC_HPD5_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { tmp = RREG32(DC_HPD5_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } } static void si_irq_disable(struct radeon_device *rdev) { si_disable_interrupts(rdev); /* Wait and acknowledge irq */ mdelay(1); si_irq_ack(rdev); si_disable_interrupt_state(rdev); } static void si_irq_suspend(struct radeon_device *rdev) { si_irq_disable(rdev); si_rlc_stop(rdev); } static void si_irq_fini(struct radeon_device *rdev) { si_irq_suspend(rdev); r600_ih_ring_fini(rdev); } static inline u32 si_get_ih_wptr(struct radeon_device *rdev) { u32 wptr, tmp; if (rdev->wb.enabled) wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); else wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); } return (wptr & rdev->ih.ptr_mask); } /* SI IV Ring * Each IV ring entry is 128 bits: * [7:0] - interrupt source id * [31:8] - reserved * [59:32] - interrupt source data * [63:60] - reserved * [71:64] - RINGID * [79:72] - VMID * [127:80] - reserved */ int si_irq_process(struct radeon_device *rdev) { u32 wptr; u32 rptr; u32 src_id, src_data, ring_id; u32 ring_index; bool queue_hotplug = false; if (!rdev->ih.enabled || rdev->shutdown) return IRQ_NONE; wptr = si_get_ih_wptr(rdev); restart_ih: /* is somebody else already processing irqs? */ if (atomic_xchg(&rdev->ih.lock, 1)) return IRQ_NONE; rptr = rdev->ih.rptr; DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ rmb(); /* display interrupts */ si_irq_ack(rdev); while (rptr != wptr) { /* wptr/rptr are in bytes! */ ring_index = rptr / 4; src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff; switch (src_id) { case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { if (rdev->irq.crtc_vblank_int[0]) { drm_handle_vblank(rdev->ddev, 0); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[0])) radeon_crtc_handle_flip(rdev, 0); rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } break; case 1: /* D1 vline */ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; DRM_DEBUG("IH: D1 vline\n"); } break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } break; case 2: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { if (rdev->irq.crtc_vblank_int[1]) { drm_handle_vblank(rdev->ddev, 1); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[1])) radeon_crtc_handle_flip(rdev, 1); rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } break; case 1: /* D2 vline */ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; DRM_DEBUG("IH: D2 vline\n"); } break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } break; case 3: /* D3 vblank/vline */ switch (src_data) { case 0: /* D3 vblank */ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { if (rdev->irq.crtc_vblank_int[2]) { drm_handle_vblank(rdev->ddev, 2); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[2])) radeon_crtc_handle_flip(rdev, 2); rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; DRM_DEBUG("IH: D3 vblank\n"); } break; case 1: /* D3 vline */ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; DRM_DEBUG("IH: D3 vline\n"); } break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } break; case 4: /* D4 vblank/vline */ switch (src_data) { case 0: /* D4 vblank */ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { if (rdev->irq.crtc_vblank_int[3]) { drm_handle_vblank(rdev->ddev, 3); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[3])) radeon_crtc_handle_flip(rdev, 3); rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; DRM_DEBUG("IH: D4 vblank\n"); } break; case 1: /* D4 vline */ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; DRM_DEBUG("IH: D4 vline\n"); } break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } break; case 5: /* D5 vblank/vline */ switch (src_data) { case 0: /* D5 vblank */ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { if (rdev->irq.crtc_vblank_int[4]) { drm_handle_vblank(rdev->ddev, 4); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[4])) radeon_crtc_handle_flip(rdev, 4); rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; DRM_DEBUG("IH: D5 vblank\n"); } break; case 1: /* D5 vline */ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; DRM_DEBUG("IH: D5 vline\n"); } break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } break; case 6: /* D6 vblank/vline */ switch (src_data) { case 0: /* D6 vblank */ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { if (rdev->irq.crtc_vblank_int[5]) { drm_handle_vblank(rdev->ddev, 5); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[5])) radeon_crtc_handle_flip(rdev, 5); rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; DRM_DEBUG("IH: D6 vblank\n"); } break; case 1: /* D6 vline */ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; DRM_DEBUG("IH: D6 vline\n"); } break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } break; case 42: /* HPD hotplug */ switch (src_data) { case 0: if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD1\n"); } break; case 1: if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD2\n"); } break; case 2: if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD3\n"); } break; case 3: if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD4\n"); } break; case 4: if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD5\n"); } break; case 5: if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD6\n"); } break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } break; case 146: case 147: dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); /* reset addr and status */ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); break; case 176: /* RINGID0 CP_INT */ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); break; case 177: /* RINGID1 CP_INT */ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX); break; case 178: /* RINGID2 CP_INT */ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX); break; case 181: /* CP EOP event */ DRM_DEBUG("IH: CP EOP\n"); switch (ring_id) { case 0: radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); break; case 1: radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX); break; case 2: radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX); break; } break; case 224: /* DMA trap event */ DRM_DEBUG("IH: DMA trap\n"); radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); break; case 244: /* DMA trap event */ DRM_DEBUG("IH: DMA1 trap\n"); radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } /* wptr/rptr are in bytes! */ rptr += 16; rptr &= rdev->ih.ptr_mask; } if (queue_hotplug) schedule_work(&rdev->hotplug_work); rdev->ih.rptr = rptr; WREG32(IH_RB_RPTR, rdev->ih.rptr); atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ wptr = si_get_ih_wptr(rdev); if (wptr != rptr) goto restart_ih; return IRQ_HANDLED; } /** * si_copy_dma - copy pages using the DMA engine * * @rdev: radeon_device pointer * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer * @fence: radeon fence object * * Copy GPU paging using the DMA engine (SI). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ int si_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence) { struct radeon_semaphore *sem = NULL; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_bytes, cur_size_in_bytes; int i, num_loops; int r = 0; r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); return r; } size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); return r; } if (radeon_fence_need_sync(*fence, ring->idx)) { radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, ring->idx); radeon_fence_note_sync(*fence, ring->idx); } else { radeon_semaphore_free(rdev, &sem, NULL); } for (i = 0; i < num_loops; i++) { cur_size_in_bytes = size_in_bytes; if (cur_size_in_bytes > 0xFFFFF) cur_size_in_bytes = 0xFFFFF; size_in_bytes -= cur_size_in_bytes; radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); radeon_ring_write(ring, dst_offset & 0xffffffff); radeon_ring_write(ring, src_offset & 0xffffffff); radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); src_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes; } r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); return r; } radeon_ring_unlock_commit(rdev, ring); radeon_semaphore_free(rdev, &sem, *fence); return r; } /* * startup/shutdown callbacks */ static int si_startup(struct radeon_device *rdev) { struct radeon_ring *ring; int r; if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || !rdev->rlc_fw || !rdev->mc_fw) { r = si_init_microcode(rdev); if (r) { DRM_ERROR("Failed to load firmware!\n"); return r; } } r = si_mc_load_microcode(rdev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); return r; } r = r600_vram_scratch_init(rdev); if (r) return r; si_mc_program(rdev); r = si_pcie_gart_enable(rdev); if (r) return r; si_gpu_init(rdev); #if 0 r = evergreen_blit_init(rdev); if (r) { r600_blit_fini(rdev); rdev->asic->copy = NULL; dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } #endif /* allocate rlc buffers */ r = si_rlc_init(rdev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; } /* allocate wb buffer */ r = radeon_wb_init(rdev); if (r) return r; r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); if (r) { dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); return r; } r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); if (r) { dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); return r; } r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); if (r) { dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); return r; } r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); if (r) { dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); return r; } r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); if (r) { dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); return r; } /* Enable IRQ */ if (!rdev->irq.installed) { r = radeon_irq_kms_init(rdev); if (r) return r; } r = si_irq_init(rdev); if (r) { DRM_ERROR("radeon: IH init failed (%d).\n", r); radeon_irq_kms_fini(rdev); return r; } si_irq_set(rdev); ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, CP_RB0_RPTR, CP_RB0_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, CP_RB1_RPTR, CP_RB1_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, CP_RB2_RPTR, CP_RB2_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, DMA_RB_RPTR + DMA0_REGISTER_OFFSET, DMA_RB_WPTR + DMA0_REGISTER_OFFSET, 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, DMA_RB_RPTR + DMA1_REGISTER_OFFSET, DMA_RB_WPTR + DMA1_REGISTER_OFFSET, 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); if (r) return r; r = si_cp_load_microcode(rdev); if (r) return r; r = si_cp_resume(rdev); if (r) return r; r = cayman_dma_resume(rdev); if (r) return r; r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; } r = radeon_vm_manager_init(rdev); if (r) { dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); return r; } return 0; } int si_resume(struct radeon_device *rdev) { int r; /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, * posting will perform necessary task to bring back GPU into good * shape. */ /* post card */ atom_asic_init(rdev->mode_info.atom_context); rdev->accel_working = true; r = si_startup(rdev); if (r) { DRM_ERROR("si startup failed on resume\n"); rdev->accel_working = false; return r; } return r; } int si_suspend(struct radeon_device *rdev) { radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); si_irq_suspend(rdev); radeon_wb_disable(rdev); si_pcie_gart_disable(rdev); return 0; } /* Plan is to move initialization in that function and use * helper function so that radeon_device_init pretty much * do nothing more than calling asic specific function. This * should also allow to remove a bunch of callback function * like vram_info. */ int si_init(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r; /* Read BIOS */ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) return -EINVAL; } /* Must be an ATOMBIOS */ if (!rdev->is_atom_bios) { dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); return -EINVAL; } r = radeon_atombios_init(rdev); if (r) return r; /* Post card if necessary */ if (!radeon_card_posted(rdev)) { if (!rdev->bios) { dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); return -EINVAL; } DRM_INFO("GPU not posted. posting now...\n"); atom_asic_init(rdev->mode_info.atom_context); } /* Initialize scratch registers */ si_scratch_init(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) return r; /* initialize memory controller */ r = si_mc_init(rdev); if (r) return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) return r; ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 64 * 1024); ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 64 * 1024); rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); r = r600_pcie_gart_init(rdev); if (r) return r; rdev->accel_working = true; r = si_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); si_cp_fini(rdev); cayman_dma_fini(rdev); si_irq_fini(rdev); si_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_vm_manager_fini(rdev); radeon_irq_kms_fini(rdev); si_pcie_gart_fini(rdev); rdev->accel_working = false; } /* Don't start up if the MC ucode is missing. * The default clocks and voltages before the MC ucode * is loaded are not suffient for advanced operations. */ if (!rdev->mc_fw) { DRM_ERROR("radeon: MC ucode required for NI+.\n"); return -EINVAL; } return 0; } void si_fini(struct radeon_device *rdev) { #if 0 r600_blit_fini(rdev); #endif si_cp_fini(rdev); cayman_dma_fini(rdev); si_irq_fini(rdev); si_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); si_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; } /** * si_get_gpu_clock_counter - return GPU clock counter snapshot * * @rdev: radeon_device pointer * * Fetches a GPU clock counter snapshot (SI). * Returns the 64 bit clock counter snapshot. */ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev) { uint64_t clock; mutex_lock(&rdev->gpu_clock_mutex); WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); mutex_unlock(&rdev->gpu_clock_mutex); return clock; }
gpl-2.0
mcfi/MCFI
lib/libcxx-3.5.0.src/test/experimental/string.view/string.view.find/find_first_of_pointer_size.pass.cpp
21
6434
//===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // <string> // constexpr size_type find_first_of(const charT* s, size_type pos = 0) const; #include <experimental/string_view> #include <cassert> #include "constexpr_char_traits.hpp" template <class S> void test(const S& s, const typename S::value_type* str, typename S::size_type pos, typename S::size_type x) { assert(s.find_first_of(str, pos) == x); if (x != S::npos) assert(pos <= x && x < s.size()); } template <class S> void test(const S& s, const typename S::value_type* str, typename S::size_type x) { assert(s.find_first_of(str) == x); if (x != S::npos) assert(x < s.size()); } template <class S> void test0() { test(S(""), "", 0, S::npos); test(S(""), "laenf", 0, S::npos); test(S(""), "pqlnkmbdjo", 0, S::npos); test(S(""), "qkamfogpnljdcshbreti", 0, S::npos); test(S(""), "", 1, S::npos); test(S(""), "bjaht", 1, S::npos); test(S(""), "hjlcmgpket", 1, S::npos); test(S(""), "htaobedqikfplcgjsmrn", 1, S::npos); test(S("fodgq"), "", 0, S::npos); test(S("qanej"), "dfkap", 0, 1); test(S("clbao"), "ihqrfebgad", 0, 2); test(S("mekdn"), "ngtjfcalbseiqrphmkdo", 0, 0); test(S("srdfq"), "", 1, S::npos); test(S("oemth"), "ikcrq", 1, S::npos); test(S("cdaih"), "dmajblfhsg", 1, 1); test(S("qohtk"), "oqftjhdmkgsblacenirp", 1, 1); test(S("cshmd"), "", 2, S::npos); test(S("lhcdo"), "oebqi", 2, 4); test(S("qnsoh"), "kojhpmbsfe", 2, 2); test(S("pkrof"), "acbsjqogpltdkhinfrem", 2, 2); test(S("fmtsp"), "", 4, S::npos); test(S("khbpm"), "aobjd", 4, S::npos); test(S("pbsji"), "pcbahntsje", 4, S::npos); test(S("mprdj"), "fhepcrntkoagbmldqijs", 4, 4); test(S("eqmpa"), "", 5, S::npos); test(S("omigs"), "kocgb", 5, S::npos); test(S("onmje"), "fbslrjiqkm", 5, S::npos); test(S("oqmrj"), "jeidpcmalhfnqbgtrsko", 5, S::npos); test(S("schfa"), "", 6, S::npos); test(S("igdsc"), "qngpd", 6, S::npos); test(S("brqgo"), "rodhqklgmb", 6, S::npos); test(S("tnrph"), "thdjgafrlbkoiqcspmne", 6, S::npos); test(S("hcjitbfapl"), "", 0, S::npos); test(S("daiprenocl"), "ashjd", 0, 0); test(S("litpcfdghe"), "mgojkldsqh", 0, 0); test(S("aidjksrolc"), "imqnaghkfrdtlopbjesc", 0, 0); test(S("qpghtfbaji"), "", 1, S::npos); test(S("gfshlcmdjr"), "nadkh", 1, 3); test(S("nkodajteqp"), "ofdrqmkebl", 1, 1); test(S("gbmetiprqd"), "bdfjqgatlksriohemnpc", 1, 1); test(S("crnklpmegd"), "", 5, S::npos); test(S("jsbtafedoc"), "prqgn", 5, S::npos); test(S("qnmodrtkeb"), "pejafmnokr", 5, 5); test(S("cpebqsfmnj"), "odnqkgijrhabfmcestlp", 5, 5); test(S("lmofqdhpki"), "", 9, S::npos); test(S("hnefkqimca"), "rtjpa", 9, 9); test(S("drtasbgmfp"), "ktsrmnqagd", 9, S::npos); test(S("lsaijeqhtr"), "rtdhgcisbnmoaqkfpjle", 9, 9); test(S("elgofjmbrq"), "", 10, S::npos); test(S("mjqdgalkpc"), "dplqa", 10, S::npos); test(S("kthqnfcerm"), "dkacjoptns", 10, S::npos); test(S("dfsjhanorc"), "hqfimtrgnbekpdcsjalo", 10, S::npos); test(S("eqsgalomhb"), "", 11, S::npos); test(S("akiteljmoh"), "lofbc", 11, S::npos); test(S("hlbdfreqjo"), "astoegbfpn", 11, S::npos); test(S("taqobhlerg"), "pdgreqomsncafklhtibj", 11, S::npos); test(S("snafbdlghrjkpqtoceim"), "", 0, S::npos); test(S("aemtbrgcklhndjisfpoq"), "lbtqd", 0, 3); test(S("pnracgfkjdiholtbqsem"), "tboimldpjh", 0, 0); test(S("dicfltehbsgrmojnpkaq"), "slcerthdaiqjfnobgkpm", 0, 0); test(S("jlnkraeodhcspfgbqitm"), "", 1, S::npos); test(S("lhosrngtmfjikbqpcade"), "aqibs", 1, 3); test(S("rbtaqjhgkneisldpmfoc"), "gtfblmqinc", 1, 1); test(S("gpifsqlrdkbonjtmheca"), "mkqpbtdalgniorhfescj", 1, 1); test(S("hdpkobnsalmcfijregtq"), "", 10, S::npos); test(S("jtlshdgqaiprkbcoenfm"), "pblas", 10, 10); test(S("fkdrbqltsgmcoiphneaj"), "arosdhcfme", 10, 10); test(S("crsplifgtqedjohnabmk"), "blkhjeogicatqfnpdmsr", 10, 10); test(S("niptglfbosehkamrdqcj"), "", 19, S::npos); test(S("copqdhstbingamjfkler"), "djkqc", 19, S::npos); test(S("mrtaefilpdsgocnhqbjk"), "lgokshjtpb", 19, 19); test(S("kojatdhlcmigpbfrqnes"), "bqjhtkfepimcnsgrlado", 19, 19); test(S("eaintpchlqsbdgrkjofm"), "", 20, S::npos); test(S("gjnhidfsepkrtaqbmclo"), "nocfa", 20, S::npos); test(S("spocfaktqdbiejlhngmr"), "bgtajmiedc", 20, S::npos); test(S("rphmlekgfscndtaobiqj"), "lsckfnqgdahejiopbtmr", 20, S::npos); test(S("liatsqdoegkmfcnbhrpj"), "", 21, S::npos); test(S("binjagtfldkrspcomqeh"), "gfsrt", 21, S::npos); test(S("latkmisecnorjbfhqpdg"), "pfsocbhjtm", 21, S::npos); test(S("lecfratdjkhnsmqpoigb"), "tpflmdnoicjgkberhqsa", 21, S::npos); } template <class S> void test1() { test(S(""), "", S::npos); test(S(""), "laenf", S::npos); test(S(""), "pqlnkmbdjo", S::npos); test(S(""), "qkamfogpnljdcshbreti", S::npos); test(S("nhmko"), "", S::npos); test(S("lahfb"), "irkhs", 2); test(S("gmfhd"), "kantesmpgj", 0); test(S("odaft"), "oknlrstdpiqmjbaghcfe", 0); test(S("eolhfgpjqk"), "", S::npos); test(S("nbatdlmekr"), "bnrpe", 0); test(S("jdmciepkaq"), "jtdaefblso", 0); test(S("hkbgspoflt"), "oselktgbcapndfjihrmq", 0); test(S("gprdcokbnjhlsfmtieqa"), "", S::npos); test(S("qjghlnftcaismkropdeb"), "bjaht", 1); test(S("pnalfrdtkqcmojiesbhg"), "hjlcmgpket", 0); test(S("pniotcfrhqsmgdkjbael"), "htaobedqikfplcgjsmrn", 0); } int main() { { typedef std::experimental::string_view S; test0<S>(); test1<S>(); } #if _LIBCPP_STD_VER > 11 { typedef std::experimental::basic_string_view<char, constexpr_char_traits<char>> SV; constexpr SV sv1; constexpr SV sv2 { "abcde", 5 }; static_assert (sv1.find_first_of( "", 0) == SV::npos, "" ); static_assert (sv1.find_first_of( "irkhs", 0) == SV::npos, "" ); static_assert (sv2.find_first_of( "", 0) == SV::npos, "" ); static_assert (sv2.find_first_of( "gfsrt", 0) == SV::npos, "" ); static_assert (sv2.find_first_of( "lecar", 0) == 0, "" ); } #endif }
gpl-2.0
visi0nary/android_kernel_blackview_alifeponepro
drivers/misc/mediatek/imgsensor/src/mt6735m/imx219_mipi_raw/imx219mipiraw_Sensor.c
21
78444
/***************************************************************************** * * Filename: * --------- * imx219mipi_Sensor.c * * Project: * -------- * ALPS * * Description: * ------------ * Source code of Sensor driver * * *------------------------------------------------------------------------------ * Upper this line, this part is controlled by CC/CQ. DO NOT MODIFY!! *============================================================================ ****************************************************************************/ #include <linux/videodev2.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/cdev.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <asm/atomic.h> //#include <asm/system.h> #include <linux/xlog.h> #include "kd_camera_hw.h" #include "kd_imgsensor.h" #include "kd_imgsensor_define.h" #include "kd_imgsensor_errcode.h" #include "imx219mipiraw_Sensor.h" #ifdef CONFIG_MTK_CAM_CAL extern int read_imx219_eeprom_mtk_fmt(void); #endif /****************************Modify following Strings for debug****************************/ #define PFX "imx219_camera_sensor" #define LOG_1 LOG_INF("imx219,MIPI 2LANE\n") #define LOG_2 LOG_INF("preview 1280*960@30fps,864Mbps/lane; video 1280*960@30fps,864Mbps/lane; capture 5M@30fps,864Mbps/lane\n") /**************************** Modify end *******************************************/ #define LOG_INF(format, args...) pr_debug(PFX "[%s] " format, __FUNCTION__, ##args) static DEFINE_SPINLOCK(imgsensor_drv_lock); static imgsensor_info_struct imgsensor_info = { .sensor_id = IMX219_SENSOR_ID, //record sensor id defined in Kd_imgsensor.h .checksum_value = 0x9e08861c, //checksum value for Camera Auto Test .pre = { .pclk = 137600000, //record different mode's pclk .linelength = 0xD78, //record different mode's linelength .framelength = 0x534, //record different mode's framelength .startx = 2, //record different mode's startx of grabwindow .starty = 2, //record different mode's starty of grabwindow .grabwindow_width = 1632, //record different mode's width of grabwindow .grabwindow_height = 1224, //record different mode's height of grabwindow /* following for MIPIDataLowPwr2HighSpeedSettleDelayCount by different scenario */ .mipi_data_lp2hs_settle_dc = 85,//unit , ns /* following for GetDefaultFramerateByScenario() */ .max_framerate = 300, }, .cap = { .pclk = 265600000, .linelength = 0xD78, .framelength = 0x9F0, .startx = 4, .starty = 4, .grabwindow_width = 3264, .grabwindow_height = 2448, .mipi_data_lp2hs_settle_dc = 85,//unit , ns .max_framerate = 300, }, .cap1 = { //capture for PIP 24fps relative information, capture1 mode must use same framelength, linelength with Capture mode for shutter calculate .pclk = 265600000, .linelength = 0xD78, .framelength = 0x9F0, .startx = 4, .starty = 4, .grabwindow_width = 3264, .grabwindow_height = 2448, .mipi_data_lp2hs_settle_dc = 85,//unit , ns .max_framerate = 240, //less than 13M(include 13M),cap1 max framerate is 24fps,16M max framerate is 20fps, 20M max framerate is 15fps }, .normal_video = { .pclk = 265600000, .linelength = 0xD78, .framelength = 0x9F0, .startx = 4, .starty = 4, .grabwindow_width = 3264, .grabwindow_height = 2448, .mipi_data_lp2hs_settle_dc = 85,//unit , ns .max_framerate = 300, }, .hs_video = { .pclk = 265600000, .linelength = 0xD78, .framelength = 0x9F0, .startx = 4, .starty = 4, .grabwindow_width = 3264, .grabwindow_height = 2448, .mipi_data_lp2hs_settle_dc = 85,//unit , ns .max_framerate = 300, }, .slim_video = { .pclk = 265600000, .linelength = 0xD78, .framelength = 0x9F0, .startx = 4, .starty = 4, .grabwindow_width = 1280, .grabwindow_height = 720, .mipi_data_lp2hs_settle_dc = 85,//unit , ns .max_framerate = 300, }, .custom1 = { .pclk = 137600000, //record different mode's pclk .linelength = 0xD78, //record different mode's linelength .framelength = 0x534, //record different mode's framelength .startx = 2, //record different mode's startx of grabwindow .starty = 2, //record different mode's starty of grabwindow .grabwindow_width = 1632, //record different mode's width of grabwindow .grabwindow_height = 1224, //record different mode's height of grabwindow /* following for MIPIDataLowPwr2HighSpeedSettleDelayCount by different scenario */ .mipi_data_lp2hs_settle_dc = 85,//unit , ns /* following for GetDefaultFramerateByScenario() */ .max_framerate = 300, }, .custom2 = { .pclk = 137600000, //record different mode's pclk .linelength = 0xD78, //record different mode's linelength .framelength = 0x534, //record different mode's framelength .startx = 2, //record different mode's startx of grabwindow .starty = 2, //record different mode's starty of grabwindow .grabwindow_width = 1632, //record different mode's width of grabwindow .grabwindow_height = 1224, //record different mode's height of grabwindow /* following for MIPIDataLowPwr2HighSpeedSettleDelayCount by different scenario */ .mipi_data_lp2hs_settle_dc = 85,//unit , ns /* following for GetDefaultFramerateByScenario() */ .max_framerate = 300, }, .custom3 = { .pclk = 137600000, //record different mode's pclk .linelength = 0xD78, //record different mode's linelength .framelength = 0x534, //record different mode's framelength .startx = 2, //record different mode's startx of grabwindow .starty = 2, //record different mode's starty of grabwindow .grabwindow_width = 1632, //record different mode's width of grabwindow .grabwindow_height = 1224, //record different mode's height of grabwindow /* following for MIPIDataLowPwr2HighSpeedSettleDelayCount by different scenario */ .mipi_data_lp2hs_settle_dc = 85,//unit , ns /* following for GetDefaultFramerateByScenario() */ .max_framerate = 300, }, .custom4 = { .pclk = 137600000, //record different mode's pclk .linelength = 0xD78, //record different mode's linelength .framelength = 0x534, //record different mode's framelength .startx = 2, //record different mode's startx of grabwindow .starty = 2, //record different mode's starty of grabwindow .grabwindow_width = 1632, //record different mode's width of grabwindow .grabwindow_height = 1224, //record different mode's height of grabwindow /* following for MIPIDataLowPwr2HighSpeedSettleDelayCount by different scenario */ .mipi_data_lp2hs_settle_dc = 85,//unit , ns /* following for GetDefaultFramerateByScenario() */ .max_framerate = 300, }, .custom5 = { .pclk = 137600000, //record different mode's pclk .linelength = 0xD78, //record different mode's linelength .framelength = 0x534, //record different mode's framelength .startx = 2, //record different mode's startx of grabwindow .starty = 2, //record different mode's starty of grabwindow .grabwindow_width = 1632, //record different mode's width of grabwindow .grabwindow_height = 1224, //record different mode's height of grabwindow /* following for MIPIDataLowPwr2HighSpeedSettleDelayCount by different scenario */ .mipi_data_lp2hs_settle_dc = 85,//unit , ns /* following for GetDefaultFramerateByScenario() */ .max_framerate = 300, }, .margin = 5, //sensor framelength & shutter margin .min_shutter = 2, //min shutter .max_frame_length = 0xffff,//max framelength by sensor register's limitation .ae_shut_delay_frame = 0, //shutter delay frame for AE cycle, 2 frame with ispGain_delay-shut_delay=2-0=2 .ae_sensor_gain_delay_frame = 1,//sensor gain delay frame for AE cycle,2 frame with ispGain_delay-sensor_gain_delay=2-0=2 .ae_ispGain_delay_frame = 2,//isp gain delay frame for AE cycle .ihdr_support = 0, //1, support; 0,not support .ihdr_le_firstline = 0, //1,le first ; 0, se first .sensor_mode_num = 10, //support sensor mode num .cap_delay_frame = 2, //enter capture delay frame num .pre_delay_frame = 2, //enter preview delay frame num .video_delay_frame = 2, //enter video delay frame num .hs_video_delay_frame = 2, //enter high speed video delay frame num .slim_video_delay_frame = 2,//enter slim video delay frame num .custom1_delay_frame = 2, .custom2_delay_frame = 2, .custom3_delay_frame = 2, .custom4_delay_frame = 2, .custom5_delay_frame = 2, .isp_driving_current = ISP_DRIVING_8MA, //mclk driving current .sensor_interface_type = SENSOR_INTERFACE_TYPE_MIPI,//sensor_interface_type .mipi_sensor_type = MIPI_OPHY_NCSI2, //0,MIPI_OPHY_NCSI2; 1,MIPI_OPHY_CSI2 .mipi_settle_delay_mode = 1,//0,MIPI_SETTLEDELAY_AUTO; 1,MIPI_SETTLEDELAY_MANNUAL .sensor_output_dataformat = SENSOR_OUTPUT_FORMAT_RAW_R,//sensor output first pixel color .mclk = 24,//mclk value, suggest 24 or 26 for 24Mhz or 26Mhz .mipi_lane_num = SENSOR_MIPI_4_LANE,//mipi lane num .i2c_addr_table = {0x21, 0x20, 0xff},//record sensor support all write id addr, only supprt 4must end with 0xff .i2c_speed = 300, // i2c read/write speed }; static imgsensor_struct imgsensor = { .mirror = IMAGE_NORMAL, //mirrorflip information .sensor_mode = IMGSENSOR_MODE_INIT, //IMGSENSOR_MODE enum value,record current sensor mode,such as: INIT, Preview, Capture, Video,High Speed Video, Slim Video .shutter = 0x3D0, //current shutter .gain = 0x100, //current gain .dummy_pixel = 0, //current dummypixel .dummy_line = 0, //current dummyline .current_fps = 300, //full size current fps : 24fps for PIP, 30fps for Normal or ZSD .autoflicker_en = KAL_FALSE, //auto flicker enable: KAL_FALSE for disable auto flicker, KAL_TRUE for enable auto flicker .test_pattern = KAL_FALSE, //test pattern mode or not. KAL_FALSE for in test pattern mode, KAL_TRUE for normal output .current_scenario_id = MSDK_SCENARIO_ID_CAMERA_PREVIEW,//current scenario id .ihdr_en = 0, //sensor need support LE, SE with HDR feature .i2c_write_id = 0x20,//record current sensor's i2c write id }; /* Sensor output window information */ static SENSOR_WINSIZE_INFO_STRUCT imgsensor_winsize_info[10] = {{ 3280, 2464, 0, 0, 3280, 2464, 1640, 1232, 0, 0, 1640, 1232, 2, 2, 1632, 1224}, // Preview { 3280, 2464, 0, 0, 3280, 2464, 3280, 2464, 0, 0, 3280, 2464, 4, 4, 3264, 2448}, // capture { 3280, 2464, 0, 0, 3280, 2464, 3280, 2464, 0, 0, 3280, 2464, 4, 4, 3264, 2448}, // video { 3280, 2464, 0, 0, 3280, 2464, 3280, 2464, 0, 0, 3280, 2464, 4, 4, 3264, 2448}, //hight speed video { 3280, 2464, 0, 0, 3280, 2464, 1640, 926, 0, 0, 1640, 926, 0, 0, 1280, 720},// slim video { 3280, 2464, 0, 0, 3280, 2464, 1640, 1232, 0, 0, 1640, 1232, 2, 2, 1632, 1224}, { 3280, 2464, 0, 0, 3280, 2464, 1640, 1232, 0, 0, 1640, 1232, 2, 2, 1632, 1224}, { 3280, 2464, 0, 0, 3280, 2464, 1640, 1232, 0, 0, 1640, 1232, 2, 2, 1632, 1224}, { 3280, 2464, 0, 0, 3280, 2464, 1640, 1232, 0, 0, 1640, 1232, 2, 2, 1632, 1224}, { 3280, 2464, 0, 0, 3280, 2464, 1640, 1232, 0, 0, 1640, 1232, 2, 2, 1632, 1224}}; #define IMX219MIPI_MaxGainIndex (97) kal_uint16 IMX219MIPI_sensorGainMapping[IMX219MIPI_MaxGainIndex][2] ={ { 64 ,0 }, { 68 ,12 }, { 71 ,23 }, { 74 ,33 }, { 77 ,42 }, { 81 ,52 }, { 84 ,59 }, { 87 ,66 }, { 90 ,73 }, { 93 ,79 }, { 96 ,85 }, { 100,91 }, { 103,96 }, { 106,101}, { 109,105}, { 113,110}, { 116,114}, { 120,118}, { 122,121}, { 125,125}, { 128,128}, { 132,131}, { 135,134}, { 138,137}, { 141,139}, { 144,142}, { 148,145}, { 151,147}, { 153,149}, { 157,151}, { 160,153}, { 164,156}, { 168,158}, { 169,159}, { 173,161}, { 176,163}, { 180,165}, { 182,166}, { 187,168}, { 189,169}, { 193,171}, { 196,172}, { 200,174}, { 203,175}, { 205,176}, { 208,177}, { 213,179}, { 216,180}, { 219,181}, { 222,182}, { 225,183}, { 228,184}, { 232,185}, { 235,186}, { 238,187}, { 241,188}, { 245,189}, { 249,190}, { 253,191}, { 256,192}, { 260,193}, { 265,194}, { 269,195}, { 274,196}, { 278,197}, { 283,198}, { 288,199}, { 293,200}, { 298,201}, { 304,202}, { 310,203}, { 315,204}, { 322,205}, { 328,206}, { 335,207}, { 342,208}, { 349,209}, { 357,210}, { 365,211}, { 373,212}, { 381,213}, { 400,215}, { 420,217}, { 432,218}, { 443,219}, { 468,221}, { 482,222}, { 497,223}, { 512,224}, { 529,225}, { 546,226}, { 566,227}, { 585,228}, { 607,229}, { 631,230}, { 656,231}, { 683,232} }; static kal_uint16 read_cmos_sensor(kal_uint32 addr) { kal_uint16 get_byte=0; char pu_send_cmd[2] = {(char)(addr >> 8), (char)(addr & 0xFF) }; kdSetI2CSpeed(imgsensor_info.i2c_speed); iReadRegI2C(pu_send_cmd, 2, (u8*)&get_byte, 1, imgsensor.i2c_write_id); return get_byte; } static void write_cmos_sensor(kal_uint32 addr, kal_uint32 para) { char pu_send_cmd[3] = {(char)(addr >> 8), (char)(addr & 0xFF), (char)(para & 0xFF)}; kdSetI2CSpeed(imgsensor_info.i2c_speed); iWriteRegI2C(pu_send_cmd, 3, imgsensor.i2c_write_id); } static void set_dummy(void) { LOG_INF("dummyline = %d, dummypixels = %d \n", imgsensor.dummy_line, imgsensor.dummy_pixel); /* you can set dummy by imgsensor.dummy_line and imgsensor.dummy_pixel, or you can set dummy by imgsensor.frame_length and imgsensor.line_length */ write_cmos_sensor(0x0160, (imgsensor.frame_length >>8) & 0xFF); write_cmos_sensor(0x0161, imgsensor.frame_length & 0xFF); write_cmos_sensor(0x0162, (imgsensor.line_length >>8) & 0xFF); write_cmos_sensor(0x0163, imgsensor.line_length & 0xFF); } /* set_dummy */ static kal_uint32 return_sensor_id(void) { return ((read_cmos_sensor(0x0000) << 8) | read_cmos_sensor(0x0001)); //int sensorid; //sensorid = ((read_cmos_sensor(0x0000) << 8) | read_cmos_sensor(0x0001)); //LOG_INF("read sensor id:%x", sensorid); //return 0x0219; } static void set_max_framerate(UINT16 framerate,kal_bool min_framelength_en) { kal_uint32 frame_length = imgsensor.frame_length; //unsigned long flags; LOG_INF("framerate = %d, min framelength should enable = %d\n", framerate,min_framelength_en); frame_length = imgsensor.pclk / framerate * 10 / imgsensor.line_length; spin_lock(&imgsensor_drv_lock); imgsensor.frame_length = (frame_length > imgsensor.min_frame_length) ? frame_length : imgsensor.min_frame_length; imgsensor.dummy_line = imgsensor.frame_length - imgsensor.min_frame_length; //dummy_line = frame_length - imgsensor.min_frame_length; //if (dummy_line < 0) //imgsensor.dummy_line = 0; //else //imgsensor.dummy_line = dummy_line; //imgsensor.frame_length = frame_length + imgsensor.dummy_line; if (imgsensor.frame_length > imgsensor_info.max_frame_length) { imgsensor.frame_length = imgsensor_info.max_frame_length; imgsensor.dummy_line = imgsensor.frame_length - imgsensor.min_frame_length; } if (min_framelength_en) imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); } /* set_max_framerate */ #if 0 static void write_shutter(kal_uint16 shutter) { kal_uint16 realtime_fps = 0; kal_uint32 frame_length = 0; /* 0x3500, 0x3501, 0x3502 will increase VBLANK to get exposure larger than frame exposure */ /* AE doesn't update sensor gain at capture mode, thus extra exposure lines must be updated here. */ // OV Recommend Solution // if shutter bigger than frame_length, should extend frame length first spin_lock(&imgsensor_drv_lock); if (shutter > imgsensor.min_frame_length - imgsensor_info.margin) imgsensor.frame_length = shutter + imgsensor_info.margin; else imgsensor.frame_length = imgsensor.min_frame_length; if (imgsensor.frame_length > imgsensor_info.max_frame_length) imgsensor.frame_length = imgsensor_info.max_frame_length; spin_unlock(&imgsensor_drv_lock); shutter = (shutter < imgsensor_info.min_shutter) ? imgsensor_info.min_shutter : shutter; shutter = (shutter > (imgsensor_info.max_frame_length - imgsensor_info.margin)) ? (imgsensor_info.max_frame_length - imgsensor_info.margin) : shutter; // Framelength should be an even number shutter = (shutter >> 1) << 1; imgsensor.frame_length = (imgsensor.frame_length >> 1) << 1; if (imgsensor.autoflicker_en) { realtime_fps = imgsensor.pclk / imgsensor.line_length * 10 / imgsensor.frame_length; if(realtime_fps >= 297 && realtime_fps <= 305) set_max_framerate(296,0); else if(realtime_fps >= 147 && realtime_fps <= 150) set_max_framerate(146,0); else { // Extend frame length write_cmos_sensor(0x0160, imgsensor.frame_length >> 8); write_cmos_sensor(0x0161, imgsensor.frame_length & 0xFF); } } else { // Extend frame length write_cmos_sensor(0x0160, imgsensor.frame_length >> 8); write_cmos_sensor(0x0161, imgsensor.frame_length & 0xFF); } // Update Shutter write_cmos_sensor(0x015a, (shutter >> 8) & 0xFF); write_cmos_sensor(0x015b, (shutter ) & 0xFF); LOG_INF("Exit! shutter =%d, framelength =%d\n", shutter,imgsensor.frame_length); //LOG_INF("frame_length = %d ", frame_length); } /* write_shutter */ #endif /************************************************************************* * FUNCTION * set_shutter * * DESCRIPTION * This function set e-shutter of sensor to change exposure time. * * PARAMETERS * iShutter : exposured lines * * RETURNS * None * * GLOBALS AFFECTED * *************************************************************************/ static void set_shutter(kal_uint16 shutter) { unsigned long flags; kal_uint16 realtime_fps = 0; spin_lock_irqsave(&imgsensor_drv_lock, flags); imgsensor.shutter = shutter; spin_unlock_irqrestore(&imgsensor_drv_lock, flags); //write_shutter(shutter); /* 0x3500, 0x3501, 0x3502 will increase VBLANK to get exposure larger than frame exposure */ /* AE doesn't update sensor gain at capture mode, thus extra exposure lines must be updated here. */ // OV Recommend Solution // if shutter bigger than frame_length, should extend frame length first spin_lock(&imgsensor_drv_lock); if (shutter > imgsensor.min_frame_length - imgsensor_info.margin) imgsensor.frame_length = shutter + imgsensor_info.margin; else imgsensor.frame_length = imgsensor.min_frame_length; if (imgsensor.frame_length > imgsensor_info.max_frame_length) imgsensor.frame_length = imgsensor_info.max_frame_length; spin_unlock(&imgsensor_drv_lock); shutter = (shutter < imgsensor_info.min_shutter) ? imgsensor_info.min_shutter : shutter; shutter = (shutter > (imgsensor_info.max_frame_length - imgsensor_info.margin)) ? (imgsensor_info.max_frame_length - imgsensor_info.margin) : shutter; if (imgsensor.autoflicker_en) { realtime_fps = imgsensor.pclk / imgsensor.line_length * 10 / imgsensor.frame_length; if(realtime_fps >= 297 && realtime_fps <= 305) set_max_framerate(296,0); else if(realtime_fps >= 147 && realtime_fps <= 150) set_max_framerate(146,0); else { // Extend frame length write_cmos_sensor(0x0160, imgsensor.frame_length >> 8); write_cmos_sensor(0x0161, imgsensor.frame_length & 0xFF); } } else { // Extend frame length write_cmos_sensor(0x0160, imgsensor.frame_length >> 8); write_cmos_sensor(0x0161, imgsensor.frame_length & 0xFF); } // Update Shutter write_cmos_sensor(0x015a, (shutter >> 8) & 0xFF); write_cmos_sensor(0x015b, (shutter ) & 0xFF); LOG_INF("Exit! shutter =%d, framelength =%d\n", shutter,imgsensor.frame_length); } /* set_shutter */ static kal_uint16 gain2reg(const kal_uint16 gain) { kal_uint8 iI; LOG_INF("[IMX219MIPI]enter IMX219MIPIGain2Reg function\n"); for (iI = 0; iI < (IMX219MIPI_MaxGainIndex-1); iI++) { if(gain <IMX219MIPI_sensorGainMapping[iI][0]) { break; } if(gain < IMX219MIPI_sensorGainMapping[iI][0]) { return IMX219MIPI_sensorGainMapping[iI][1]; } } if(gain != IMX219MIPI_sensorGainMapping[iI][0]) { LOG_INF("Gain mapping don't correctly:%d %d \n", gain, IMX219MIPI_sensorGainMapping[iI][0]); } LOG_INF("exit IMX219MIPIGain2Reg function\n"); return IMX219MIPI_sensorGainMapping[iI-1][1]; //return NONE; } /************************************************************************* * FUNCTION * set_gain * * DESCRIPTION * This function is to set global gain to sensor. * * PARAMETERS * iGain : sensor global gain(base: 0x40) * * RETURNS * the actually gain set to sensor. * * GLOBALS AFFECTED * *************************************************************************/ UINT16 iPreGain = 0; static kal_uint16 set_gain(kal_uint16 gain) { kal_uint16 reg_gain; if (iPreGain != gain) { reg_gain = gain2reg(gain); spin_lock(&imgsensor_drv_lock); imgsensor.gain = reg_gain; spin_unlock(&imgsensor_drv_lock); write_cmos_sensor(0x0157, (kal_uint8)reg_gain); LOG_INF("gain = %d , reg_gain = 0x%x\n ", gain, reg_gain); } return gain; } /* set_gain */ static void ihdr_write_shutter_gain(kal_uint16 le, kal_uint16 se, kal_uint16 gain) { LOG_INF("le:0x%x, se:0x%x, gain:0x%x\n",le,se,gain); if (imgsensor.ihdr_en) { spin_lock(&imgsensor_drv_lock); if (le > imgsensor.min_frame_length - imgsensor_info.margin) imgsensor.frame_length = le + imgsensor_info.margin; else imgsensor.frame_length = imgsensor.min_frame_length; if (imgsensor.frame_length > imgsensor_info.max_frame_length) imgsensor.frame_length = imgsensor_info.max_frame_length; spin_unlock(&imgsensor_drv_lock); if (le < imgsensor_info.min_shutter) le = imgsensor_info.min_shutter; if (se < imgsensor_info.min_shutter) se = imgsensor_info.min_shutter; // Extend frame length first write_cmos_sensor(0x380e, imgsensor.frame_length >> 8); write_cmos_sensor(0x380f, imgsensor.frame_length & 0xFF); write_cmos_sensor(0x3502, (le << 4) & 0xFF); write_cmos_sensor(0x3501, (le >> 4) & 0xFF); write_cmos_sensor(0x3500, (le >> 12) & 0x0F); write_cmos_sensor(0x3508, (se << 4) & 0xFF); write_cmos_sensor(0x3507, (se >> 4) & 0xFF); write_cmos_sensor(0x3506, (se >> 12) & 0x0F); set_gain(gain); } } #if 0 static void set_mirror_flip(kal_uint8 image_mirror) { LOG_INF("image_mirror = %d\n", image_mirror); /******************************************************** * * 0x3820[2] ISP Vertical flip * 0x3820[1] Sensor Vertical flip * * 0x3821[2] ISP Horizontal mirror * 0x3821[1] Sensor Horizontal mirror * * ISP and Sensor flip or mirror register bit should be the same!! * ********************************************************/ kal_uint8 iTemp; LOG_INF("set_mirror_flip function\n"); iTemp = read_cmos_sensor(0x0172) & 0x03; //Clear the mirror and flip bits. switch (image_mirror) { case IMAGE_NORMAL: write_cmos_sensor(0x0172, 0x03); //Set normal break; case IMAGE_V_MIRROR: write_cmos_sensor(0x0172, iTemp | 0x01); //Set flip break; case IMAGE_H_MIRROR: write_cmos_sensor(0x0172, iTemp | 0x02); //Set mirror break; case IMAGE_HV_MIRROR: write_cmos_sensor(0x0172, 0x00); //Set mirror and flip break; } LOG_INF("Error image_mirror setting\n"); } #endif /************************************************************************* * FUNCTION * night_mode * * DESCRIPTION * This function night mode of sensor. * * PARAMETERS * bEnable: KAL_TRUE -> enable night mode, otherwise, disable night mode * * RETURNS * None * * GLOBALS AFFECTED * *************************************************************************/ static void night_mode(kal_bool enable) { /*No Need to implement this function*/ } /* night_mode */ static void sensor_init(void) { LOG_INF("E\n"); //write_cmos_sensor(0x0100,0x01); //wake up } /* sensor_init */ static void preview_setting(void) { write_cmos_sensor(0x0100, 0x00); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x0C); write_cmos_sensor(0x300A, 0xFF); write_cmos_sensor(0x300B, 0xFF); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x09); write_cmos_sensor(0x0114, 0x03); write_cmos_sensor(0x0128, 0x00); write_cmos_sensor(0x012A, 0x18); write_cmos_sensor(0x012B, 0x00); write_cmos_sensor(0x0160, ((imgsensor_info.pre.framelength >> 8) & 0xFF)); write_cmos_sensor(0x0161, (imgsensor_info.pre.framelength & 0xFF)); write_cmos_sensor(0x0162, ((imgsensor_info.pre.linelength >> 8) & 0xFF)); write_cmos_sensor(0x0163, (imgsensor_info.pre.linelength & 0xFF)); write_cmos_sensor(0x0164, 0x00); write_cmos_sensor(0x0165, 0x00); write_cmos_sensor(0x0166, 0x0C); write_cmos_sensor(0x0167, 0xCF); write_cmos_sensor(0x0168, 0x00); write_cmos_sensor(0x0169, 0x00); write_cmos_sensor(0x016A, 0x09); write_cmos_sensor(0x016B, 0x9F); write_cmos_sensor(0x016C, 0x06); write_cmos_sensor(0x016D, 0x68); write_cmos_sensor(0x016E, 0x04); write_cmos_sensor(0x016F, 0xD0); write_cmos_sensor(0x0170, 0x01); write_cmos_sensor(0x0171, 0x01); write_cmos_sensor(0x0174, 0x01); write_cmos_sensor(0x0175, 0x01); write_cmos_sensor(0x018C, 0x0A); write_cmos_sensor(0x018D, 0x0A); write_cmos_sensor(0x0301, 0x05); write_cmos_sensor(0x0303, 0x01); write_cmos_sensor(0x0304, 0x03); write_cmos_sensor(0x0305, 0x03); write_cmos_sensor(0x0306, 0x00); write_cmos_sensor(0x0307, 0x2B); write_cmos_sensor(0x0309, 0x0A); write_cmos_sensor(0x030B, 0x01); write_cmos_sensor(0x030C, 0x00); write_cmos_sensor(0x030D, 0x2E); write_cmos_sensor(0x455E, 0x00); write_cmos_sensor(0x471E, 0x4B); write_cmos_sensor(0x4767, 0x0F); write_cmos_sensor(0x4750, 0x14); write_cmos_sensor(0x4540, 0x00); write_cmos_sensor(0x47B4, 0x14); write_cmos_sensor(0x4713, 0x30); write_cmos_sensor(0x478B, 0x10); write_cmos_sensor(0x478F, 0x10); write_cmos_sensor(0x4793, 0x10); write_cmos_sensor(0x4797, 0x0E); write_cmos_sensor(0x479B, 0x0E); write_cmos_sensor(0x0100, 0x01); } /* preview_setting */ static void capture_setting(kal_uint16 currefps) { LOG_INF("E! currefps:%d\n",currefps); if (currefps == 240) { //24fps for PIP //@@full_132PCLK_24.75 write_cmos_sensor(0x0100, 0x00); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x0C); write_cmos_sensor(0x300A, 0xFF); write_cmos_sensor(0x300B, 0xFF); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x09); write_cmos_sensor(0x0114, 0x03); write_cmos_sensor(0x0128, 0x00); write_cmos_sensor(0x012A, 0x18); write_cmos_sensor(0x012B, 0x00); write_cmos_sensor(0x0160, ((imgsensor_info.cap1.framelength >> 8) & 0xFF)); write_cmos_sensor(0x0161, (imgsensor_info.cap1.framelength & 0xFF)); write_cmos_sensor(0x0162, ((imgsensor_info.cap1.linelength >> 8) & 0xFF)); write_cmos_sensor(0x0163, (imgsensor_info.cap1.linelength & 0xFF)); write_cmos_sensor(0x0164, 0x00); write_cmos_sensor(0x0165, 0x00); write_cmos_sensor(0x0166, 0x0C); write_cmos_sensor(0x0167, 0xCF); write_cmos_sensor(0x0168, 0x00); write_cmos_sensor(0x0169, 0x00); write_cmos_sensor(0x016A, 0x09); write_cmos_sensor(0x016B, 0x9F); write_cmos_sensor(0x016C, 0x0C); write_cmos_sensor(0x016D, 0xD0); write_cmos_sensor(0x016E, 0x09); write_cmos_sensor(0x016F, 0xA0); write_cmos_sensor(0x0170, 0x01); write_cmos_sensor(0x0171, 0x01); write_cmos_sensor(0x0174, 0x00); write_cmos_sensor(0x0175, 0x00); write_cmos_sensor(0x018C, 0x0A); write_cmos_sensor(0x018D, 0x0A); write_cmos_sensor(0x0301, 0x05); write_cmos_sensor(0x0303, 0x01); write_cmos_sensor(0x0304, 0x03); write_cmos_sensor(0x0305, 0x03); write_cmos_sensor(0x0306, 0x00); write_cmos_sensor(0x0307, 0x53); //0x51 write_cmos_sensor(0x0309, 0x0A); write_cmos_sensor(0x030B, 0x01); write_cmos_sensor(0x030C, 0x00); write_cmos_sensor(0x030D, 0x56);//0x54 write_cmos_sensor(0x455E, 0x00); write_cmos_sensor(0x471E, 0x4B); write_cmos_sensor(0x4767, 0x0F); write_cmos_sensor(0x4750, 0x14); write_cmos_sensor(0x4540, 0x00); write_cmos_sensor(0x47B4, 0x14); write_cmos_sensor(0x4713, 0x30); write_cmos_sensor(0x478B, 0x10); write_cmos_sensor(0x478F, 0x10); write_cmos_sensor(0x4793, 0x10); write_cmos_sensor(0x4797, 0x0E); write_cmos_sensor(0x479B, 0x0E); write_cmos_sensor(0x0100, 0x01); } else { //30fps //30fps for Normal capture & ZSD write_cmos_sensor(0x0100, 0x00); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x0C); write_cmos_sensor(0x300A, 0xFF); write_cmos_sensor(0x300B, 0xFF); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x09); write_cmos_sensor(0x0114, 0x03); write_cmos_sensor(0x0128, 0x00); write_cmos_sensor(0x012A, 0x18); write_cmos_sensor(0x012B, 0x00); write_cmos_sensor(0x0160, ((imgsensor_info.cap.framelength >> 8) & 0xFF)); write_cmos_sensor(0x0161, (imgsensor_info.cap.framelength & 0xFF)); write_cmos_sensor(0x0162, ((imgsensor_info.cap.linelength >> 8) & 0xFF)); write_cmos_sensor(0x0163, (imgsensor_info.cap.linelength & 0xFF)); write_cmos_sensor(0x0164, 0x00); write_cmos_sensor(0x0165, 0x00); write_cmos_sensor(0x0166, 0x0C); write_cmos_sensor(0x0167, 0xCF); write_cmos_sensor(0x0168, 0x00); write_cmos_sensor(0x0169, 0x00); write_cmos_sensor(0x016A, 0x09); write_cmos_sensor(0x016B, 0x9F); write_cmos_sensor(0x016C, 0x0C); write_cmos_sensor(0x016D, 0xD0); write_cmos_sensor(0x016E, 0x09); write_cmos_sensor(0x016F, 0xA0); write_cmos_sensor(0x0170, 0x01); write_cmos_sensor(0x0171, 0x01); write_cmos_sensor(0x0174, 0x00); write_cmos_sensor(0x0175, 0x00); write_cmos_sensor(0x018C, 0x0A); write_cmos_sensor(0x018D, 0x0A); write_cmos_sensor(0x0301, 0x05); write_cmos_sensor(0x0303, 0x01); write_cmos_sensor(0x0304, 0x03); write_cmos_sensor(0x0305, 0x03); write_cmos_sensor(0x0306, 0x00); write_cmos_sensor(0x0307, 0x53); //0x51 write_cmos_sensor(0x0309, 0x0A); write_cmos_sensor(0x030B, 0x01); write_cmos_sensor(0x030C, 0x00); write_cmos_sensor(0x030D, 0x56);//0x54 write_cmos_sensor(0x455E, 0x00); write_cmos_sensor(0x471E, 0x4B); write_cmos_sensor(0x4767, 0x0F); write_cmos_sensor(0x4750, 0x14); write_cmos_sensor(0x4540, 0x00); write_cmos_sensor(0x47B4, 0x14); write_cmos_sensor(0x4713, 0x30); write_cmos_sensor(0x478B, 0x10); write_cmos_sensor(0x478F, 0x10); write_cmos_sensor(0x4793, 0x10); write_cmos_sensor(0x4797, 0x0E); write_cmos_sensor(0x479B, 0x0E); write_cmos_sensor(0x0100, 0x01); if (imgsensor.ihdr_en) { } else { } } } static void normal_video_setting(kal_uint16 currefps) { LOG_INF("E! currefps:%d\n",currefps); write_cmos_sensor(0x0100, 0x00); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x0C); write_cmos_sensor(0x300A, 0xFF); write_cmos_sensor(0x300B, 0xFF); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x09); write_cmos_sensor(0x0114, 0x03); write_cmos_sensor(0x0128, 0x00); write_cmos_sensor(0x012A, 0x18); write_cmos_sensor(0x012B, 0x00); write_cmos_sensor(0x0160, ((imgsensor_info.normal_video.framelength >> 8) & 0xFF)); write_cmos_sensor(0x0161, (imgsensor_info.normal_video.framelength & 0xFF)); write_cmos_sensor(0x0162, ((imgsensor_info.normal_video.linelength >> 8) & 0xFF)); write_cmos_sensor(0x0163, (imgsensor_info.normal_video.linelength & 0xFF)); write_cmos_sensor(0x0164, 0x00); write_cmos_sensor(0x0165, 0x00); write_cmos_sensor(0x0166, 0x0C); write_cmos_sensor(0x0167, 0xCF); write_cmos_sensor(0x0168, 0x00); write_cmos_sensor(0x0169, 0x00); write_cmos_sensor(0x016A, 0x09); write_cmos_sensor(0x016B, 0x9F); write_cmos_sensor(0x016C, 0x0C); write_cmos_sensor(0x016D, 0xD0); write_cmos_sensor(0x016E, 0x09); write_cmos_sensor(0x016F, 0xA0); write_cmos_sensor(0x0170, 0x01); write_cmos_sensor(0x0171, 0x01); write_cmos_sensor(0x0174, 0x00); write_cmos_sensor(0x0175, 0x00); write_cmos_sensor(0x018C, 0x0A); write_cmos_sensor(0x018D, 0x0A); write_cmos_sensor(0x0301, 0x05); write_cmos_sensor(0x0303, 0x01); write_cmos_sensor(0x0304, 0x03); write_cmos_sensor(0x0305, 0x03); write_cmos_sensor(0x0306, 0x00); write_cmos_sensor(0x0307, 0x53); //0x51 write_cmos_sensor(0x0309, 0x0A); write_cmos_sensor(0x030B, 0x01); write_cmos_sensor(0x030C, 0x00); write_cmos_sensor(0x030D, 0x56);//0x54 write_cmos_sensor(0x455E, 0x00); write_cmos_sensor(0x471E, 0x4B); write_cmos_sensor(0x4767, 0x0F); write_cmos_sensor(0x4750, 0x14); write_cmos_sensor(0x4540, 0x00); write_cmos_sensor(0x47B4, 0x14); write_cmos_sensor(0x4713, 0x30); write_cmos_sensor(0x478B, 0x10); write_cmos_sensor(0x478F, 0x10); write_cmos_sensor(0x4793, 0x10); write_cmos_sensor(0x4797, 0x0E); write_cmos_sensor(0x479B, 0x0E); write_cmos_sensor(0x0100, 0x01); if (imgsensor.ihdr_en) { } else { } } static void hs_video_setting(kal_uint16 currefps) { LOG_INF("E! currefps:%d\n",currefps); write_cmos_sensor(0x0100, 0x00); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x0C); write_cmos_sensor(0x300A, 0xFF); write_cmos_sensor(0x300B, 0xFF); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x09); write_cmos_sensor(0x0114, 0x03); write_cmos_sensor(0x0128, 0x00); write_cmos_sensor(0x012A, 0x18); write_cmos_sensor(0x012B, 0x00); write_cmos_sensor(0x0160, ((imgsensor_info.hs_video.framelength >> 8) & 0xFF)); write_cmos_sensor(0x0161, (imgsensor_info.hs_video.framelength & 0xFF)); write_cmos_sensor(0x0162, ((imgsensor_info.hs_video.linelength >> 8) & 0xFF)); write_cmos_sensor(0x0163, (imgsensor_info.hs_video.linelength & 0xFF)); write_cmos_sensor(0x0164, 0x00); write_cmos_sensor(0x0165, 0x00); write_cmos_sensor(0x0166, 0x0C); write_cmos_sensor(0x0167, 0xCF); write_cmos_sensor(0x0168, 0x00); write_cmos_sensor(0x0169, 0x00); write_cmos_sensor(0x016A, 0x09); write_cmos_sensor(0x016B, 0x9F); write_cmos_sensor(0x016C, 0x0C); write_cmos_sensor(0x016D, 0xD0); write_cmos_sensor(0x016E, 0x09); write_cmos_sensor(0x016F, 0xA0); write_cmos_sensor(0x0170, 0x01); write_cmos_sensor(0x0171, 0x01); write_cmos_sensor(0x0174, 0x00); write_cmos_sensor(0x0175, 0x00); write_cmos_sensor(0x018C, 0x0A); write_cmos_sensor(0x018D, 0x0A); write_cmos_sensor(0x0301, 0x05); write_cmos_sensor(0x0303, 0x01); write_cmos_sensor(0x0304, 0x03); write_cmos_sensor(0x0305, 0x03); write_cmos_sensor(0x0306, 0x00); write_cmos_sensor(0x0307, 0x53); //0x51 write_cmos_sensor(0x0309, 0x0A); write_cmos_sensor(0x030B, 0x01); write_cmos_sensor(0x030C, 0x00); write_cmos_sensor(0x030D, 0x56);//0x54 write_cmos_sensor(0x455E, 0x00); write_cmos_sensor(0x471E, 0x4B); write_cmos_sensor(0x4767, 0x0F); write_cmos_sensor(0x4750, 0x14); write_cmos_sensor(0x4540, 0x00); write_cmos_sensor(0x47B4, 0x14); write_cmos_sensor(0x4713, 0x30); write_cmos_sensor(0x478B, 0x10); write_cmos_sensor(0x478F, 0x10); write_cmos_sensor(0x4793, 0x10); write_cmos_sensor(0x4797, 0x0E); write_cmos_sensor(0x479B, 0x0E); write_cmos_sensor(0x0100, 0x01); if (imgsensor.ihdr_en) { } else { } } static void slim_video_setting(void) { LOG_INF("E\n"); write_cmos_sensor(0x0100, 0x00); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x0C); write_cmos_sensor(0x300A, 0xFF); write_cmos_sensor(0x300B, 0xFF); write_cmos_sensor(0x30EB, 0x05); write_cmos_sensor(0x30EB, 0x09); write_cmos_sensor(0x0114, 0x03); write_cmos_sensor(0x0128, 0x00); write_cmos_sensor(0x012A, 0x18); write_cmos_sensor(0x012B, 0x00); write_cmos_sensor(0x0160, ((imgsensor_info.slim_video.framelength >> 8) & 0xFF)); write_cmos_sensor(0x0161, (imgsensor_info.slim_video.framelength & 0xFF)); write_cmos_sensor(0x0162, ((imgsensor_info.slim_video.linelength >> 8) & 0xFF)); write_cmos_sensor(0x0163, (imgsensor_info.slim_video.linelength & 0xFF)); write_cmos_sensor(0x0164, 0x00); write_cmos_sensor(0x0165, 0x00); write_cmos_sensor(0x0166, 0x0C); write_cmos_sensor(0x0167, 0xCF); write_cmos_sensor(0x0168, 0x01); write_cmos_sensor(0x0169, 0x32); write_cmos_sensor(0x016A, 0x08); write_cmos_sensor(0x016B, 0x6D); write_cmos_sensor(0x016C, 0x06); write_cmos_sensor(0x016D, 0x68); write_cmos_sensor(0x016E, 0x03); write_cmos_sensor(0x016F, 0x9E); write_cmos_sensor(0x0170, 0x01); write_cmos_sensor(0x0171, 0x01); write_cmos_sensor(0x0174, 0x01); write_cmos_sensor(0x0175, 0x01); write_cmos_sensor(0x018C, 0x0A); write_cmos_sensor(0x018D, 0x0A); write_cmos_sensor(0x0301, 0x05); write_cmos_sensor(0x0303, 0x01); write_cmos_sensor(0x0304, 0x03); write_cmos_sensor(0x0305, 0x03); write_cmos_sensor(0x0306, 0x00); write_cmos_sensor(0x0307, 0x2B); //0x51 write_cmos_sensor(0x0309, 0x0A); write_cmos_sensor(0x030B, 0x01); write_cmos_sensor(0x030C, 0x00); write_cmos_sensor(0x030D, 0x2E);//0x54 write_cmos_sensor(0x455E, 0x00); write_cmos_sensor(0x471E, 0x4B); write_cmos_sensor(0x4767, 0x0F); write_cmos_sensor(0x4750, 0x14); write_cmos_sensor(0x4540, 0x00); write_cmos_sensor(0x47B4, 0x14); write_cmos_sensor(0x4713, 0x30); write_cmos_sensor(0x478B, 0x10); write_cmos_sensor(0x478F, 0x10); write_cmos_sensor(0x4793, 0x10); write_cmos_sensor(0x4797, 0x0E); write_cmos_sensor(0x479B, 0x0E); write_cmos_sensor(0x0100, 0x01); //@@video_720p_30fps_800Mbps if (imgsensor.ihdr_en) { } else { } } // kal_uint8 test_pattern_flag=0; static kal_uint32 set_test_pattern_mode(kal_bool enable) { LOG_INF("enable: %d\n", enable); if(imgsensor.current_scenario_id != MSDK_SCENARIO_ID_CAMERA_CAPTURE_JPEG) { if(enable) { //1640 x 1232 // enable color bar test_pattern_flag=TRUE; write_cmos_sensor(0x0600, 0x00); write_cmos_sensor(0x0601, 0x02); write_cmos_sensor(0x0624, 0x06); //W:3280---h write_cmos_sensor(0x0625, 0x68); // l write_cmos_sensor(0x0626, 0x04); //H:2464 h write_cmos_sensor(0x0627, 0xd0); // l write_cmos_sensor(0x6128, 0x00); write_cmos_sensor(0x6129, 0x02); write_cmos_sensor(0x613C, 0x06); //W h write_cmos_sensor(0x613D, 0x68); // l write_cmos_sensor(0x613E, 0x04); //H h write_cmos_sensor(0x613F, 0xd0); // l write_cmos_sensor(0x6506, 0x00); write_cmos_sensor(0x6507, 0x00); } else { //1640 x 1232 test_pattern_flag=FALSE; write_cmos_sensor(0x0600, 0x00); write_cmos_sensor(0x0601, 0x00); write_cmos_sensor(0x0624, 0x06); //W:3280---h write_cmos_sensor(0x0625, 0x68); // l write_cmos_sensor(0x0626, 0x04); //H:2464 h write_cmos_sensor(0x0627, 0xd0); // l write_cmos_sensor(0x6128, 0x00); write_cmos_sensor(0x6129, 0x02); write_cmos_sensor(0x613C, 0x06); //W h write_cmos_sensor(0x613D, 0x68); // l write_cmos_sensor(0x613E, 0x04); //H h write_cmos_sensor(0x613F, 0xd0); // l write_cmos_sensor(0x6506, 0x00); write_cmos_sensor(0x6507, 0x00); } } else { if(enable) { //3280 x 2464 // enable color bar test_pattern_flag=TRUE; write_cmos_sensor(0x0600, 0x00); write_cmos_sensor(0x0601, 0x02); write_cmos_sensor(0x0624, 0x0C); //W:3280---h write_cmos_sensor(0x0625, 0xD0); // l write_cmos_sensor(0x0626, 0x09); //H:2464 h write_cmos_sensor(0x0627, 0xA0); // l write_cmos_sensor(0x6128, 0x00); write_cmos_sensor(0x6129, 0x02); write_cmos_sensor(0x613C, 0x0C); //W h write_cmos_sensor(0x613D, 0xD0); // l write_cmos_sensor(0x613E, 0x09); //H h write_cmos_sensor(0x613F, 0xA0); // l write_cmos_sensor(0x6506, 0x00); write_cmos_sensor(0x6507, 0x00); } else { test_pattern_flag=FALSE; write_cmos_sensor(0x0600, 0x00); write_cmos_sensor(0x0601, 0x02); write_cmos_sensor(0x0624, 0x0C); //W:3280---h write_cmos_sensor(0x0625, 0xD0); // l write_cmos_sensor(0x0626, 0x09); //H:2464 h write_cmos_sensor(0x0627, 0xA0); // l write_cmos_sensor(0x6128, 0x00); write_cmos_sensor(0x6129, 0x02); write_cmos_sensor(0x613C, 0x0C); //W h write_cmos_sensor(0x613D, 0xD0); // l write_cmos_sensor(0x613E, 0x09); //H h write_cmos_sensor(0x613F, 0xA0); // l write_cmos_sensor(0x6506, 0x00); write_cmos_sensor(0x6507, 0x00); } } return ERROR_NONE; spin_lock(&imgsensor_drv_lock); imgsensor.test_pattern = enable; spin_unlock(&imgsensor_drv_lock); return ERROR_NONE; } /************************************************************************* * FUNCTION * get_imgsensor_id * * DESCRIPTION * This function get the sensor ID * * PARAMETERS * *sensorID : return the sensor ID * * RETURNS * None * * GLOBALS AFFECTED * *************************************************************************/ static kal_uint32 get_imgsensor_id(UINT32 *sensor_id) { kal_uint8 i = 0; kal_uint8 retry = 2; //sensor have two i2c address 0x6c 0x6d & 0x21 0x20, we should detect the module used i2c address while (imgsensor_info.i2c_addr_table[i] != 0xff) { spin_lock(&imgsensor_drv_lock); imgsensor.i2c_write_id = imgsensor_info.i2c_addr_table[i]; spin_unlock(&imgsensor_drv_lock); do { *sensor_id = return_sensor_id(); if (*sensor_id == imgsensor_info.sensor_id) { #ifdef CONFIG_MTK_CAM_CAL read_imx219_eeprom_mtk_fmt(); #endif LOG_INF("i2c write id: 0x%x, sensor id: 0x%x\n", imgsensor.i2c_write_id,*sensor_id); return ERROR_NONE; } LOG_INF("Read sensor id fail, i2c write id: 0x%x id: 0x%x\n", imgsensor.i2c_write_id,*sensor_id); retry--; } while(retry > 0); i++; retry = 2; } if (*sensor_id != imgsensor_info.sensor_id) { // if Sensor ID is not correct, Must set *sensor_id to 0xFFFFFFFF *sensor_id = 0xFFFFFFFF; return ERROR_SENSOR_CONNECT_FAIL; } return ERROR_NONE; } /************************************************************************* * FUNCTION * open * * DESCRIPTION * This function initialize the registers of CMOS sensor * * PARAMETERS * None * * RETURNS * None * * GLOBALS AFFECTED * *************************************************************************/ static kal_uint32 open(void) { kal_uint8 i = 0; kal_uint8 retry = 2; kal_uint32 sensor_id = 0; LOG_1; LOG_2; //sensor have two i2c address 0x6c 0x6d & 0x21 0x20, we should detect the module used i2c address while (imgsensor_info.i2c_addr_table[i] != 0xff) { spin_lock(&imgsensor_drv_lock); imgsensor.i2c_write_id = imgsensor_info.i2c_addr_table[i]; spin_unlock(&imgsensor_drv_lock); do { sensor_id = return_sensor_id(); if (sensor_id == imgsensor_info.sensor_id) { LOG_INF("i2c write id: 0x%x, sensor id: 0x%x\n", imgsensor.i2c_write_id,sensor_id); break; } LOG_INF("Read sensor id fail, write id:0x%x id: 0x%x\n", imgsensor.i2c_write_id,sensor_id); retry--; } while(retry > 0); i++; if (sensor_id == imgsensor_info.sensor_id) break; retry = 2; } if (imgsensor_info.sensor_id != sensor_id) return ERROR_SENSOR_CONNECT_FAIL; /* initail sequence write in */ sensor_init(); iPreGain = 0; spin_lock(&imgsensor_drv_lock); imgsensor.autoflicker_en= KAL_FALSE; imgsensor.sensor_mode = IMGSENSOR_MODE_INIT; imgsensor.pclk = imgsensor_info.pre.pclk; imgsensor.frame_length = imgsensor_info.pre.framelength; imgsensor.line_length = imgsensor_info.pre.linelength; imgsensor.min_frame_length = imgsensor_info.pre.framelength; imgsensor.dummy_pixel = 0; imgsensor.dummy_line = 0; imgsensor.ihdr_en = 0; imgsensor.test_pattern = KAL_FALSE; imgsensor.current_fps = imgsensor_info.pre.max_framerate; spin_unlock(&imgsensor_drv_lock); return ERROR_NONE; } /* open */ /************************************************************************* * FUNCTION * close * * DESCRIPTION * * * PARAMETERS * None * * RETURNS * None * * GLOBALS AFFECTED * *************************************************************************/ static kal_uint32 close(void) { LOG_INF("E\n"); /*No Need to implement this function*/ return ERROR_NONE; } /* close */ /************************************************************************* * FUNCTION * preview * * DESCRIPTION * This function start the sensor preview. * * PARAMETERS * *image_window : address pointer of pixel numbers in one period of HSYNC * *sensor_config_data : address pointer of line numbers in one period of VSYNC * * RETURNS * None * * GLOBALS AFFECTED * *************************************************************************/ static kal_uint32 preview(MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("E\n"); spin_lock(&imgsensor_drv_lock); imgsensor.sensor_mode = IMGSENSOR_MODE_PREVIEW; imgsensor.pclk = imgsensor_info.pre.pclk; //imgsensor.video_mode = KAL_FALSE; imgsensor.line_length = imgsensor_info.pre.linelength; imgsensor.frame_length = imgsensor_info.pre.framelength; imgsensor.min_frame_length = imgsensor_info.pre.framelength; imgsensor.autoflicker_en = KAL_FALSE; spin_unlock(&imgsensor_drv_lock); preview_setting(); return ERROR_NONE; } /* preview */ /************************************************************************* * FUNCTION * capture * * DESCRIPTION * This function setup the CMOS sensor in capture MY_OUTPUT mode * * PARAMETERS * * RETURNS * None * * GLOBALS AFFECTED * *************************************************************************/ static kal_uint32 capture(MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("E\n"); spin_lock(&imgsensor_drv_lock); imgsensor.sensor_mode = IMGSENSOR_MODE_CAPTURE; if (imgsensor.current_fps == imgsensor_info.cap1.max_framerate) {//PIP capture: 24fps for less than 13M, 20fps for 16M,15fps for 20M imgsensor.pclk = imgsensor_info.cap1.pclk; imgsensor.line_length = imgsensor_info.cap1.linelength; imgsensor.frame_length = imgsensor_info.cap1.framelength; imgsensor.min_frame_length = imgsensor_info.cap1.framelength; imgsensor.autoflicker_en = KAL_FALSE; } else { if (imgsensor.current_fps != imgsensor_info.cap.max_framerate) LOG_INF("Warning: current_fps %d fps is not support, so use cap1's setting: %d fps!\n",imgsensor.current_fps,imgsensor_info.cap1.max_framerate/10); imgsensor.pclk = imgsensor_info.cap.pclk; imgsensor.line_length = imgsensor_info.cap.linelength; imgsensor.frame_length = imgsensor_info.cap.framelength; imgsensor.min_frame_length = imgsensor_info.cap.framelength; imgsensor.autoflicker_en = KAL_FALSE; } spin_unlock(&imgsensor_drv_lock); capture_setting(imgsensor.current_fps); if(test_pattern_flag) { set_test_pattern_mode(TRUE); test_pattern_flag=FALSE; } return ERROR_NONE; } /* capture() */ static kal_uint32 normal_video(MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("E\n"); spin_lock(&imgsensor_drv_lock); imgsensor.sensor_mode = IMGSENSOR_MODE_VIDEO; imgsensor.pclk = imgsensor_info.normal_video.pclk; imgsensor.line_length = imgsensor_info.normal_video.linelength; imgsensor.frame_length = imgsensor_info.normal_video.framelength; imgsensor.min_frame_length = imgsensor_info.normal_video.framelength; //imgsensor.current_fps = 300; imgsensor.autoflicker_en = KAL_FALSE; spin_unlock(&imgsensor_drv_lock); normal_video_setting(imgsensor.current_fps); return ERROR_NONE; } /* normal_video */ static kal_uint32 hs_video(MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("E\n"); spin_lock(&imgsensor_drv_lock); imgsensor.sensor_mode = IMGSENSOR_MODE_HIGH_SPEED_VIDEO; imgsensor.pclk = imgsensor_info.hs_video.pclk; //imgsensor.video_mode = KAL_TRUE; imgsensor.line_length = imgsensor_info.hs_video.linelength; imgsensor.frame_length = imgsensor_info.hs_video.framelength; imgsensor.min_frame_length = imgsensor_info.hs_video.framelength; imgsensor.dummy_line = 0; imgsensor.dummy_pixel = 0; imgsensor.autoflicker_en = KAL_FALSE; spin_unlock(&imgsensor_drv_lock); hs_video_setting(imgsensor.current_fps); return ERROR_NONE; } /* hs_video */ static kal_uint32 slim_video(MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("E\n"); spin_lock(&imgsensor_drv_lock); imgsensor.sensor_mode = IMGSENSOR_MODE_SLIM_VIDEO; imgsensor.pclk = imgsensor_info.slim_video.pclk; imgsensor.line_length = imgsensor_info.slim_video.linelength; imgsensor.frame_length = imgsensor_info.slim_video.framelength; imgsensor.min_frame_length = imgsensor_info.slim_video.framelength; imgsensor.dummy_line = 0; imgsensor.dummy_pixel = 0; imgsensor.autoflicker_en = KAL_FALSE; spin_unlock(&imgsensor_drv_lock); slim_video_setting(); return ERROR_NONE; } /* slim_video */ /************************************************************************* * FUNCTION * Custom1 * * DESCRIPTION * This function start the sensor Custom1. * * PARAMETERS * *image_window : address pointer of pixel numbers in one period of HSYNC * *sensor_config_data : address pointer of line numbers in one period of VSYNC * * RETURNS * None * * GLOBALS AFFECTED * *************************************************************************/ static kal_uint32 Custom1(MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("E\n"); spin_lock(&imgsensor_drv_lock); imgsensor.sensor_mode = IMGSENSOR_MODE_CUSTOM1; imgsensor.pclk = imgsensor_info.custom1.pclk; //imgsensor.video_mode = KAL_FALSE; imgsensor.line_length = imgsensor_info.custom1.linelength; imgsensor.frame_length = imgsensor_info.custom1.framelength; imgsensor.min_frame_length = imgsensor_info.custom1.framelength; imgsensor.autoflicker_en = KAL_FALSE; spin_unlock(&imgsensor_drv_lock); preview_setting(); return ERROR_NONE; } /* Custom1 */ static kal_uint32 Custom2(MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("E\n"); spin_lock(&imgsensor_drv_lock); imgsensor.sensor_mode = IMGSENSOR_MODE_CUSTOM2; imgsensor.pclk = imgsensor_info.custom2.pclk; //imgsensor.video_mode = KAL_FALSE; imgsensor.line_length = imgsensor_info.custom2.linelength; imgsensor.frame_length = imgsensor_info.custom2.framelength; imgsensor.min_frame_length = imgsensor_info.custom2.framelength; imgsensor.autoflicker_en = KAL_FALSE; spin_unlock(&imgsensor_drv_lock); preview_setting(); return ERROR_NONE; } /* Custom2 */ static kal_uint32 Custom3(MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("E\n"); spin_lock(&imgsensor_drv_lock); imgsensor.sensor_mode = IMGSENSOR_MODE_CUSTOM3; imgsensor.pclk = imgsensor_info.custom3.pclk; //imgsensor.video_mode = KAL_FALSE; imgsensor.line_length = imgsensor_info.custom3.linelength; imgsensor.frame_length = imgsensor_info.custom3.framelength; imgsensor.min_frame_length = imgsensor_info.custom3.framelength; imgsensor.autoflicker_en = KAL_FALSE; spin_unlock(&imgsensor_drv_lock); preview_setting(); return ERROR_NONE; } /* Custom3 */ static kal_uint32 Custom4(MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("E\n"); spin_lock(&imgsensor_drv_lock); imgsensor.sensor_mode = IMGSENSOR_MODE_CUSTOM4; imgsensor.pclk = imgsensor_info.custom4.pclk; //imgsensor.video_mode = KAL_FALSE; imgsensor.line_length = imgsensor_info.custom4.linelength; imgsensor.frame_length = imgsensor_info.custom4.framelength; imgsensor.min_frame_length = imgsensor_info.custom4.framelength; imgsensor.autoflicker_en = KAL_FALSE; spin_unlock(&imgsensor_drv_lock); preview_setting(); return ERROR_NONE; } /* Custom4 */ static kal_uint32 Custom5(MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("E\n"); spin_lock(&imgsensor_drv_lock); imgsensor.sensor_mode = IMGSENSOR_MODE_CUSTOM5; imgsensor.pclk = imgsensor_info.custom5.pclk; //imgsensor.video_mode = KAL_FALSE; imgsensor.line_length = imgsensor_info.custom5.linelength; imgsensor.frame_length = imgsensor_info.custom5.framelength; imgsensor.min_frame_length = imgsensor_info.custom5.framelength; imgsensor.autoflicker_en = KAL_FALSE; spin_unlock(&imgsensor_drv_lock); preview_setting(); return ERROR_NONE; } /* Custom5 */ static kal_uint32 get_resolution(MSDK_SENSOR_RESOLUTION_INFO_STRUCT *sensor_resolution) { LOG_INF("E\n"); sensor_resolution->SensorFullWidth = imgsensor_info.cap.grabwindow_width; sensor_resolution->SensorFullHeight = imgsensor_info.cap.grabwindow_height; sensor_resolution->SensorPreviewWidth = imgsensor_info.pre.grabwindow_width; sensor_resolution->SensorPreviewHeight = imgsensor_info.pre.grabwindow_height; sensor_resolution->SensorVideoWidth = imgsensor_info.normal_video.grabwindow_width; sensor_resolution->SensorVideoHeight = imgsensor_info.normal_video.grabwindow_height; sensor_resolution->SensorHighSpeedVideoWidth = imgsensor_info.hs_video.grabwindow_width; sensor_resolution->SensorHighSpeedVideoHeight = imgsensor_info.hs_video.grabwindow_height; sensor_resolution->SensorSlimVideoWidth = imgsensor_info.slim_video.grabwindow_width; sensor_resolution->SensorSlimVideoHeight = imgsensor_info.slim_video.grabwindow_height; sensor_resolution->SensorHighSpeedVideoWidth = imgsensor_info.hs_video.grabwindow_width; sensor_resolution->SensorHighSpeedVideoHeight = imgsensor_info.hs_video.grabwindow_height; sensor_resolution->SensorSlimVideoWidth = imgsensor_info.slim_video.grabwindow_width; sensor_resolution->SensorSlimVideoHeight = imgsensor_info.slim_video.grabwindow_height; sensor_resolution->SensorCustom1Width = imgsensor_info.custom1.grabwindow_width; sensor_resolution->SensorCustom1Height = imgsensor_info.custom1.grabwindow_height; sensor_resolution->SensorCustom2Width = imgsensor_info.custom2.grabwindow_width; sensor_resolution->SensorCustom2Height = imgsensor_info.custom2.grabwindow_height; sensor_resolution->SensorCustom3Width = imgsensor_info.custom3.grabwindow_width; sensor_resolution->SensorCustom3Height = imgsensor_info.custom3.grabwindow_height; sensor_resolution->SensorCustom4Width = imgsensor_info.custom4.grabwindow_width; sensor_resolution->SensorCustom4Height = imgsensor_info.custom4.grabwindow_height; sensor_resolution->SensorCustom5Width = imgsensor_info.custom5.grabwindow_width; sensor_resolution->SensorCustom5Height = imgsensor_info.custom5.grabwindow_height; return ERROR_NONE; } /* get_resolution */ static kal_uint32 get_info(MSDK_SCENARIO_ID_ENUM scenario_id, MSDK_SENSOR_INFO_STRUCT *sensor_info, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("scenario_id = %d\n", scenario_id); //sensor_info->SensorVideoFrameRate = imgsensor_info.normal_video.max_framerate/10; /* not use */ //sensor_info->SensorStillCaptureFrameRate= imgsensor_info.cap.max_framerate/10; /* not use */ //imgsensor_info->SensorWebCamCaptureFrameRate= imgsensor_info.v.max_framerate; /* not use */ sensor_info->SensorClockPolarity = SENSOR_CLOCK_POLARITY_LOW; sensor_info->SensorClockFallingPolarity = SENSOR_CLOCK_POLARITY_LOW; /* not use */ sensor_info->SensorHsyncPolarity = SENSOR_CLOCK_POLARITY_LOW; // inverse with datasheet sensor_info->SensorVsyncPolarity = SENSOR_CLOCK_POLARITY_LOW; sensor_info->SensorInterruptDelayLines = 4; /* not use */ sensor_info->SensorResetActiveHigh = FALSE; /* not use */ sensor_info->SensorResetDelayCount = 5; /* not use */ sensor_info->SensroInterfaceType = imgsensor_info.sensor_interface_type; sensor_info->MIPIsensorType = imgsensor_info.mipi_sensor_type; sensor_info->SettleDelayMode = imgsensor_info.mipi_settle_delay_mode; sensor_info->SensorOutputDataFormat = imgsensor_info.sensor_output_dataformat; sensor_info->CaptureDelayFrame = imgsensor_info.cap_delay_frame; sensor_info->PreviewDelayFrame = imgsensor_info.pre_delay_frame; sensor_info->VideoDelayFrame = imgsensor_info.video_delay_frame; sensor_info->HighSpeedVideoDelayFrame = imgsensor_info.hs_video_delay_frame; sensor_info->SlimVideoDelayFrame = imgsensor_info.slim_video_delay_frame; sensor_info->Custom1DelayFrame = imgsensor_info.custom1_delay_frame; sensor_info->Custom2DelayFrame = imgsensor_info.custom2_delay_frame; sensor_info->Custom3DelayFrame = imgsensor_info.custom3_delay_frame; sensor_info->Custom4DelayFrame = imgsensor_info.custom4_delay_frame; sensor_info->Custom5DelayFrame = imgsensor_info.custom5_delay_frame; sensor_info->SensorMasterClockSwitch = 0; /* not use */ sensor_info->SensorDrivingCurrent = imgsensor_info.isp_driving_current; sensor_info->AEShutDelayFrame = imgsensor_info.ae_shut_delay_frame; /* The frame of setting shutter default 0 for TG int */ sensor_info->AESensorGainDelayFrame = imgsensor_info.ae_sensor_gain_delay_frame; /* The frame of setting sensor gain */ sensor_info->AEISPGainDelayFrame = imgsensor_info.ae_ispGain_delay_frame; sensor_info->IHDR_Support = imgsensor_info.ihdr_support; sensor_info->IHDR_LE_FirstLine = imgsensor_info.ihdr_le_firstline; sensor_info->SensorModeNum = imgsensor_info.sensor_mode_num; sensor_info->SensorMIPILaneNumber = imgsensor_info.mipi_lane_num; sensor_info->SensorClockFreq = imgsensor_info.mclk; sensor_info->SensorClockDividCount = 3; /* not use */ sensor_info->SensorClockRisingCount = 0; sensor_info->SensorClockFallingCount = 2; /* not use */ sensor_info->SensorPixelClockCount = 3; /* not use */ sensor_info->SensorDataLatchCount = 2; /* not use */ sensor_info->MIPIDataLowPwr2HighSpeedTermDelayCount = 0; sensor_info->MIPICLKLowPwr2HighSpeedTermDelayCount = 0; sensor_info->SensorWidthSampling = 0; // 0 is default 1x sensor_info->SensorHightSampling = 0; // 0 is default 1x sensor_info->SensorPacketECCOrder = 1; switch (scenario_id) { case MSDK_SCENARIO_ID_CAMERA_PREVIEW: sensor_info->SensorGrabStartX = imgsensor_info.pre.startx; sensor_info->SensorGrabStartY = imgsensor_info.pre.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.pre.mipi_data_lp2hs_settle_dc; break; case MSDK_SCENARIO_ID_CAMERA_CAPTURE_JPEG: sensor_info->SensorGrabStartX = imgsensor_info.cap.startx; sensor_info->SensorGrabStartY = imgsensor_info.cap.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.cap.mipi_data_lp2hs_settle_dc; break; case MSDK_SCENARIO_ID_VIDEO_PREVIEW: sensor_info->SensorGrabStartX = imgsensor_info.normal_video.startx; sensor_info->SensorGrabStartY = imgsensor_info.normal_video.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.normal_video.mipi_data_lp2hs_settle_dc; break; case MSDK_SCENARIO_ID_HIGH_SPEED_VIDEO: sensor_info->SensorGrabStartX = imgsensor_info.hs_video.startx; sensor_info->SensorGrabStartY = imgsensor_info.hs_video.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.hs_video.mipi_data_lp2hs_settle_dc; break; case MSDK_SCENARIO_ID_SLIM_VIDEO: sensor_info->SensorGrabStartX = imgsensor_info.slim_video.startx; sensor_info->SensorGrabStartY = imgsensor_info.slim_video.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.slim_video.mipi_data_lp2hs_settle_dc; break; case MSDK_SCENARIO_ID_CUSTOM1: sensor_info->SensorGrabStartX = imgsensor_info.custom1.startx; sensor_info->SensorGrabStartY = imgsensor_info.custom1.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.custom1.mipi_data_lp2hs_settle_dc; break; case MSDK_SCENARIO_ID_CUSTOM2: sensor_info->SensorGrabStartX = imgsensor_info.custom2.startx; sensor_info->SensorGrabStartY = imgsensor_info.custom2.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.custom1.mipi_data_lp2hs_settle_dc; break; case MSDK_SCENARIO_ID_CUSTOM3: sensor_info->SensorGrabStartX = imgsensor_info.custom3.startx; sensor_info->SensorGrabStartY = imgsensor_info.custom3.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.custom1.mipi_data_lp2hs_settle_dc; break; case MSDK_SCENARIO_ID_CUSTOM4: sensor_info->SensorGrabStartX = imgsensor_info.custom4.startx; sensor_info->SensorGrabStartY = imgsensor_info.custom4.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.custom1.mipi_data_lp2hs_settle_dc; break; case MSDK_SCENARIO_ID_CUSTOM5: sensor_info->SensorGrabStartX = imgsensor_info.custom5.startx; sensor_info->SensorGrabStartY = imgsensor_info.custom5.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.custom1.mipi_data_lp2hs_settle_dc; break; default: sensor_info->SensorGrabStartX = imgsensor_info.pre.startx; sensor_info->SensorGrabStartY = imgsensor_info.pre.starty; sensor_info->MIPIDataLowPwr2HighSpeedSettleDelayCount = imgsensor_info.pre.mipi_data_lp2hs_settle_dc; break; } return ERROR_NONE; } /* get_info */ static kal_uint32 control(MSDK_SCENARIO_ID_ENUM scenario_id, MSDK_SENSOR_EXPOSURE_WINDOW_STRUCT *image_window, MSDK_SENSOR_CONFIG_STRUCT *sensor_config_data) { LOG_INF("scenario_id = %d\n", scenario_id); spin_lock(&imgsensor_drv_lock); imgsensor.current_scenario_id = scenario_id; spin_unlock(&imgsensor_drv_lock); switch (scenario_id) { case MSDK_SCENARIO_ID_CAMERA_PREVIEW: preview(image_window, sensor_config_data); break; case MSDK_SCENARIO_ID_CAMERA_CAPTURE_JPEG: capture(image_window, sensor_config_data); break; case MSDK_SCENARIO_ID_VIDEO_PREVIEW: normal_video(image_window, sensor_config_data); break; case MSDK_SCENARIO_ID_HIGH_SPEED_VIDEO: hs_video(image_window, sensor_config_data); break; case MSDK_SCENARIO_ID_SLIM_VIDEO: slim_video(image_window, sensor_config_data); break; case MSDK_SCENARIO_ID_CUSTOM1: Custom1(image_window, sensor_config_data); // Custom1 break; case MSDK_SCENARIO_ID_CUSTOM2: Custom2(image_window, sensor_config_data); // Custom1 break; case MSDK_SCENARIO_ID_CUSTOM3: Custom3(image_window, sensor_config_data); // Custom1 break; case MSDK_SCENARIO_ID_CUSTOM4: Custom4(image_window, sensor_config_data); // Custom1 break; case MSDK_SCENARIO_ID_CUSTOM5: Custom5(image_window, sensor_config_data); // Custom1 break; default: LOG_INF("Error ScenarioId setting"); preview(image_window, sensor_config_data); return ERROR_INVALID_SCENARIO_ID; } return ERROR_NONE; } /* control() */ static kal_uint32 set_video_mode(UINT16 framerate) { LOG_INF("framerate = %d\n ", framerate); // SetVideoMode Function should fix framerate if (framerate == 0) // Dynamic frame rate return ERROR_NONE; spin_lock(&imgsensor_drv_lock); if ((framerate == 300) && (imgsensor.autoflicker_en == KAL_TRUE)) imgsensor.current_fps = 296; else if ((framerate == 150) && (imgsensor.autoflicker_en == KAL_TRUE)) imgsensor.current_fps = 146; else imgsensor.current_fps = framerate; spin_unlock(&imgsensor_drv_lock); set_max_framerate(imgsensor.current_fps,1); return ERROR_NONE; } static kal_uint32 set_auto_flicker_mode(kal_bool enable, UINT16 framerate) { LOG_INF("enable = %d, framerate = %d \n", enable, framerate); spin_lock(&imgsensor_drv_lock); if (enable) //enable auto flicker imgsensor.autoflicker_en = KAL_TRUE; else //Cancel Auto flick imgsensor.autoflicker_en = KAL_FALSE; spin_unlock(&imgsensor_drv_lock); return ERROR_NONE; } static kal_uint32 set_max_framerate_by_scenario(MSDK_SCENARIO_ID_ENUM scenario_id, MUINT32 framerate) { kal_uint32 frame_length; LOG_INF("scenario_id = %d, framerate = %d\n", scenario_id, framerate); switch (scenario_id) { case MSDK_SCENARIO_ID_CAMERA_PREVIEW: frame_length = imgsensor_info.pre.pclk / framerate * 10 / imgsensor_info.pre.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.pre.framelength) ? (frame_length - imgsensor_info.pre.framelength) : 0; imgsensor.frame_length = imgsensor_info.pre.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); break; case MSDK_SCENARIO_ID_VIDEO_PREVIEW: if(framerate == 0) return ERROR_NONE; frame_length = imgsensor_info.normal_video.pclk / framerate * 10 / imgsensor_info.normal_video.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.normal_video.framelength) ? (frame_length - imgsensor_info.normal_video.framelength) : 0; imgsensor.frame_length = imgsensor_info.normal_video.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); break; case MSDK_SCENARIO_ID_CAMERA_CAPTURE_JPEG: frame_length = imgsensor_info.cap.pclk / framerate * 10 / imgsensor_info.cap.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.cap.framelength) ? (frame_length - imgsensor_info.cap.framelength) : 0; imgsensor.frame_length = imgsensor_info.cap.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); break; case MSDK_SCENARIO_ID_HIGH_SPEED_VIDEO: frame_length = imgsensor_info.hs_video.pclk / framerate * 10 / imgsensor_info.hs_video.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.hs_video.framelength) ? (frame_length - imgsensor_info.hs_video.framelength) : 0; imgsensor.frame_length = imgsensor_info.hs_video.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); break; case MSDK_SCENARIO_ID_SLIM_VIDEO: frame_length = imgsensor_info.slim_video.pclk / framerate * 10 / imgsensor_info.slim_video.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.slim_video.framelength) ? (frame_length - imgsensor_info.slim_video.framelength): 0; imgsensor.frame_length = imgsensor_info.slim_video.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); case MSDK_SCENARIO_ID_CUSTOM1: frame_length = imgsensor_info.custom1.pclk / framerate * 10 / imgsensor_info.custom1.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.custom1.framelength) ? (frame_length - imgsensor_info.custom1.framelength) : 0; if (imgsensor.dummy_line < 0) imgsensor.dummy_line = 0; imgsensor.frame_length = imgsensor_info.custom1.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); break; case MSDK_SCENARIO_ID_CUSTOM2: frame_length = imgsensor_info.custom2.pclk / framerate * 10 / imgsensor_info.custom2.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.custom2.framelength) ? (frame_length - imgsensor_info.custom2.framelength) : 0; if (imgsensor.dummy_line < 0) imgsensor.dummy_line = 0; imgsensor.frame_length = imgsensor_info.custom2.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); break; case MSDK_SCENARIO_ID_CUSTOM3: frame_length = imgsensor_info.custom3.pclk / framerate * 10 / imgsensor_info.custom3.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.custom3.framelength) ? (frame_length - imgsensor_info.custom3.framelength) : 0; if (imgsensor.dummy_line < 0) imgsensor.dummy_line = 0; imgsensor.frame_length = imgsensor_info.custom3.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); break; case MSDK_SCENARIO_ID_CUSTOM4: frame_length = imgsensor_info.custom4.pclk / framerate * 10 / imgsensor_info.custom4.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.custom4.framelength) ? (frame_length - imgsensor_info.custom4.framelength) : 0; if (imgsensor.dummy_line < 0) imgsensor.dummy_line = 0; imgsensor.frame_length = imgsensor_info.custom4.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); break; case MSDK_SCENARIO_ID_CUSTOM5: frame_length = imgsensor_info.custom5.pclk / framerate * 10 / imgsensor_info.custom5.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.custom5.framelength) ? (frame_length - imgsensor_info.custom5.framelength) : 0; if (imgsensor.dummy_line < 0) imgsensor.dummy_line = 0; imgsensor.frame_length = imgsensor_info.custom1.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); break; default: //coding with preview scenario by default frame_length = imgsensor_info.pre.pclk / framerate * 10 / imgsensor_info.pre.linelength; spin_lock(&imgsensor_drv_lock); imgsensor.dummy_line = (frame_length > imgsensor_info.pre.framelength) ? (frame_length - imgsensor_info.pre.framelength) : 0; imgsensor.frame_length = imgsensor_info.pre.framelength + imgsensor.dummy_line; imgsensor.min_frame_length = imgsensor.frame_length; spin_unlock(&imgsensor_drv_lock); set_dummy(); LOG_INF("error scenario_id = %d, we use preview scenario \n", scenario_id); break; } return ERROR_NONE; } static kal_uint32 get_default_framerate_by_scenario(MSDK_SCENARIO_ID_ENUM scenario_id, MUINT32 *framerate) { LOG_INF("scenario_id = %d\n", scenario_id); switch (scenario_id) { case MSDK_SCENARIO_ID_CAMERA_PREVIEW: *framerate = imgsensor_info.pre.max_framerate; break; case MSDK_SCENARIO_ID_VIDEO_PREVIEW: *framerate = imgsensor_info.normal_video.max_framerate; break; case MSDK_SCENARIO_ID_CAMERA_CAPTURE_JPEG: *framerate = imgsensor_info.cap.max_framerate; break; case MSDK_SCENARIO_ID_HIGH_SPEED_VIDEO: *framerate = imgsensor_info.hs_video.max_framerate; break; case MSDK_SCENARIO_ID_SLIM_VIDEO: *framerate = imgsensor_info.slim_video.max_framerate; break; case MSDK_SCENARIO_ID_CUSTOM1: *framerate = imgsensor_info.custom1.max_framerate; break; case MSDK_SCENARIO_ID_CUSTOM2: *framerate = imgsensor_info.custom2.max_framerate; break; case MSDK_SCENARIO_ID_CUSTOM3: *framerate = imgsensor_info.custom3.max_framerate; break; case MSDK_SCENARIO_ID_CUSTOM4: *framerate = imgsensor_info.custom4.max_framerate; break; case MSDK_SCENARIO_ID_CUSTOM5: *framerate = imgsensor_info.custom5.max_framerate; break; default: break; } return ERROR_NONE; } static kal_uint32 feature_control(MSDK_SENSOR_FEATURE_ENUM feature_id, UINT8 *feature_para,UINT32 *feature_para_len) { UINT16 *feature_return_para_16=(UINT16 *) feature_para; UINT16 *feature_data_16=(UINT16 *) feature_para; UINT32 *feature_return_para_32=(UINT32 *) feature_para; UINT32 *feature_data_32=(UINT32 *) feature_para; unsigned long long *feature_data=(unsigned long long *) feature_para; SENSOR_WINSIZE_INFO_STRUCT *wininfo; MSDK_SENSOR_REG_INFO_STRUCT *sensor_reg_data=(MSDK_SENSOR_REG_INFO_STRUCT *) feature_para; LOG_INF("feature_id = %d\n", feature_id); switch (feature_id) { case SENSOR_FEATURE_GET_PERIOD: *feature_return_para_16++ = imgsensor.line_length; *feature_return_para_16 = imgsensor.frame_length; *feature_para_len=4; break; case SENSOR_FEATURE_GET_PIXEL_CLOCK_FREQ: LOG_INF("feature_Control imgsensor.pclk = %d,imgsensor.current_fps = %d\n", imgsensor.pclk,imgsensor.current_fps); *feature_return_para_32 = imgsensor.pclk; *feature_para_len=4; break; case SENSOR_FEATURE_SET_ESHUTTER: set_shutter(*feature_data); break; case SENSOR_FEATURE_SET_NIGHTMODE: night_mode((BOOL) *feature_data); break; case SENSOR_FEATURE_SET_GAIN: set_gain((UINT16) *feature_data); break; case SENSOR_FEATURE_SET_FLASHLIGHT: break; case SENSOR_FEATURE_SET_ISP_MASTER_CLOCK_FREQ: break; case SENSOR_FEATURE_SET_REGISTER: write_cmos_sensor(sensor_reg_data->RegAddr, sensor_reg_data->RegData); break; case SENSOR_FEATURE_GET_REGISTER: sensor_reg_data->RegData = read_cmos_sensor(sensor_reg_data->RegAddr); break; case SENSOR_FEATURE_GET_LENS_DRIVER_ID: // get the lens driver ID from EEPROM or just return LENS_DRIVER_ID_DO_NOT_CARE // if EEPROM does not exist in camera module. *feature_return_para_32=LENS_DRIVER_ID_DO_NOT_CARE; *feature_para_len=4; break; case SENSOR_FEATURE_SET_VIDEO_MODE: set_video_mode(*feature_data); break; case SENSOR_FEATURE_CHECK_SENSOR_ID: get_imgsensor_id(feature_return_para_32); break; case SENSOR_FEATURE_SET_AUTO_FLICKER_MODE: set_auto_flicker_mode((BOOL)*feature_data_16,*(feature_data_16+1)); break; case SENSOR_FEATURE_SET_MAX_FRAME_RATE_BY_SCENARIO: set_max_framerate_by_scenario((MSDK_SCENARIO_ID_ENUM)*feature_data, *(feature_data+1)); break; case SENSOR_FEATURE_GET_DEFAULT_FRAME_RATE_BY_SCENARIO: get_default_framerate_by_scenario((MSDK_SCENARIO_ID_ENUM)*(feature_data), (MUINT32 *)(uintptr_t)(*(feature_data+1))); break; case SENSOR_FEATURE_SET_TEST_PATTERN: set_test_pattern_mode((BOOL)*feature_data); break; case SENSOR_FEATURE_GET_TEST_PATTERN_CHECKSUM_VALUE: //for factory mode auto testing *feature_return_para_32 = imgsensor_info.checksum_value; *feature_para_len=4; break; case SENSOR_FEATURE_SET_FRAMERATE: LOG_INF("current fps :%d\n", (UINT32)*feature_data); spin_lock(&imgsensor_drv_lock); imgsensor.current_fps = *feature_data; spin_unlock(&imgsensor_drv_lock); break; case SENSOR_FEATURE_SET_HDR: LOG_INF("ihdr enable :%d\n", (BOOL)*feature_data); spin_lock(&imgsensor_drv_lock); imgsensor.ihdr_en = *feature_data; spin_unlock(&imgsensor_drv_lock); break; case SENSOR_FEATURE_GET_CROP_INFO: LOG_INF("SENSOR_FEATURE_GET_CROP_INFO scenarioId:%d\n", (UINT32)*feature_data); wininfo = (SENSOR_WINSIZE_INFO_STRUCT *)(uintptr_t)(*(feature_data+1)); switch (*feature_data_32) { case MSDK_SCENARIO_ID_CAMERA_CAPTURE_JPEG: memcpy((void *)wininfo,(void *)&imgsensor_winsize_info[1],sizeof(SENSOR_WINSIZE_INFO_STRUCT)); break; case MSDK_SCENARIO_ID_VIDEO_PREVIEW: memcpy((void *)wininfo,(void *)&imgsensor_winsize_info[2],sizeof(SENSOR_WINSIZE_INFO_STRUCT)); break; case MSDK_SCENARIO_ID_HIGH_SPEED_VIDEO: memcpy((void *)wininfo,(void *)&imgsensor_winsize_info[3],sizeof(SENSOR_WINSIZE_INFO_STRUCT)); break; case MSDK_SCENARIO_ID_SLIM_VIDEO: memcpy((void *)wininfo,(void *)&imgsensor_winsize_info[4],sizeof(SENSOR_WINSIZE_INFO_STRUCT)); break; case MSDK_SCENARIO_ID_CAMERA_PREVIEW: default: memcpy((void *)wininfo,(void *)&imgsensor_winsize_info[0],sizeof(SENSOR_WINSIZE_INFO_STRUCT)); break; } case SENSOR_FEATURE_SET_IHDR_SHUTTER_GAIN: LOG_INF("SENSOR_SET_SENSOR_IHDR LE=%d, SE=%d, Gain=%d\n",(UINT16)*feature_data,(UINT16)*(feature_data+1),(UINT16)*(feature_data+2)); ihdr_write_shutter_gain((UINT16)*feature_data,(UINT16)*(feature_data+1),(UINT16)*(feature_data+2)); break; default: break; } return ERROR_NONE; } /* feature_control() */ static SENSOR_FUNCTION_STRUCT sensor_func = { open, get_info, get_resolution, feature_control, control, close }; UINT32 IMX219_MIPI_RAW_SensorInit(PSENSOR_FUNCTION_STRUCT *pfFunc) { /* To Do : Check Sensor status here */ if (pfFunc!=NULL) *pfFunc=&sensor_func; return ERROR_NONE; } /* OV5693_MIPI_RAW_SensorInit */
gpl-2.0
vidyaravipati/net-next-rocker
drivers/net/can/usb/peak_usb/pcan_usb_core.c
277
22571
/* * CAN driver for PEAK System USB adapters * Derived from the PCAN project file driver/src/pcan_usb_core.c * * Copyright (C) 2003-2010 PEAK System-Technik GmbH * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com> * * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published * by the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/init.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "pcan_usb_core.h" MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters"); MODULE_LICENSE("GPL v2"); /* Table of devices that work with this driver */ static struct usb_device_id peak_usb_table[] = { {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)}, {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, peak_usb_table); /* List of supported PCAN-USB adapters (NULL terminated list) */ static struct peak_usb_adapter *peak_usb_adapters_list[] = { &pcan_usb, &pcan_usb_pro, NULL, }; /* * dump memory */ #define DUMP_WIDTH 16 void pcan_dump_mem(char *prompt, void *p, int l) { pr_info("%s dumping %s (%d bytes):\n", PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l); print_hex_dump(KERN_INFO, PCAN_USB_DRIVER_NAME " ", DUMP_PREFIX_NONE, DUMP_WIDTH, 1, p, l, false); } /* * initialize a time_ref object with usb adapter own settings */ void peak_usb_init_time_ref(struct peak_time_ref *time_ref, struct peak_usb_adapter *adapter) { if (time_ref) { memset(time_ref, 0, sizeof(struct peak_time_ref)); time_ref->adapter = adapter; } } static void peak_usb_add_us(struct timeval *tv, u32 delta_us) { /* number of s. to add to final time */ u32 delta_s = delta_us / 1000000; delta_us -= delta_s * 1000000; tv->tv_usec += delta_us; if (tv->tv_usec >= 1000000) { tv->tv_usec -= 1000000; delta_s++; } tv->tv_sec += delta_s; } /* * sometimes, another now may be more recent than current one... */ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now) { time_ref->ts_dev_2 = ts_now; /* should wait at least two passes before computing */ if (time_ref->tv_host.tv_sec > 0) { u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1; if (time_ref->ts_dev_2 < time_ref->ts_dev_1) delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1; time_ref->ts_total += delta_ts; } } /* * register device timestamp as now */ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now) { if (time_ref->tv_host_0.tv_sec == 0) { /* use monotonic clock to correctly compute further deltas */ time_ref->tv_host_0 = ktime_to_timeval(ktime_get()); time_ref->tv_host.tv_sec = 0; } else { /* * delta_us should not be >= 2^32 => delta_s should be < 4294 * handle 32-bits wrapping here: if count of s. reaches 4200, * reset counters and change time base */ if (time_ref->tv_host.tv_sec != 0) { u32 delta_s = time_ref->tv_host.tv_sec - time_ref->tv_host_0.tv_sec; if (delta_s > 4200) { time_ref->tv_host_0 = time_ref->tv_host; time_ref->ts_total = 0; } } time_ref->tv_host = ktime_to_timeval(ktime_get()); time_ref->tick_count++; } time_ref->ts_dev_1 = time_ref->ts_dev_2; peak_usb_update_ts_now(time_ref, ts_now); } /* * compute timeval according to current ts and time_ref data */ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts, struct timeval *tv) { /* protect from getting timeval before setting now */ if (time_ref->tv_host.tv_sec > 0) { u64 delta_us; delta_us = ts - time_ref->ts_dev_2; if (ts < time_ref->ts_dev_2) delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1; delta_us += time_ref->ts_total; delta_us *= time_ref->adapter->us_per_ts_scale; delta_us >>= time_ref->adapter->us_per_ts_shift; *tv = time_ref->tv_host_0; peak_usb_add_us(tv, (u32)delta_us); } else { *tv = ktime_to_timeval(ktime_get()); } } /* * callback for bulk Rx urb */ static void peak_usb_read_bulk_callback(struct urb *urb) { struct peak_usb_device *dev = urb->context; struct net_device *netdev; int err; netdev = dev->netdev; if (!netif_device_present(netdev)) return; /* check reception status */ switch (urb->status) { case 0: /* success */ break; case -EILSEQ: case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: return; default: if (net_ratelimit()) netdev_err(netdev, "Rx urb aborted (%d)\n", urb->status); goto resubmit_urb; } /* protect from any incoming empty msgs */ if ((urb->actual_length > 0) && (dev->adapter->dev_decode_buf)) { /* handle these kinds of msgs only if _start callback called */ if (dev->state & PCAN_USB_STATE_STARTED) { err = dev->adapter->dev_decode_buf(dev, urb); if (err) pcan_dump_mem("received usb message", urb->transfer_buffer, urb->transfer_buffer_length); } } resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep_msg_in), urb->transfer_buffer, dev->adapter->rx_buffer_size, peak_usb_read_bulk_callback, dev); usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (!err) return; usb_unanchor_urb(urb); if (err == -ENODEV) netif_device_detach(netdev); else netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", err); } /* * callback for bulk Tx urb */ static void peak_usb_write_bulk_callback(struct urb *urb) { struct peak_tx_urb_context *context = urb->context; struct peak_usb_device *dev; struct net_device *netdev; BUG_ON(!context); dev = context->dev; netdev = dev->netdev; atomic_dec(&dev->active_tx_urbs); if (!netif_device_present(netdev)) return; /* check tx status */ switch (urb->status) { case 0: /* transmission complete */ netdev->stats.tx_packets++; netdev->stats.tx_bytes += context->dlc; /* prevent tx timeout */ netdev->trans_start = jiffies; break; default: if (net_ratelimit()) netdev_err(netdev, "Tx urb aborted (%d)\n", urb->status); case -EPROTO: case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: break; } /* should always release echo skb and corresponding context */ can_get_echo_skb(netdev, context->echo_index); context->echo_index = PCAN_USB_MAX_TX_URBS; /* do wakeup tx queue in case of success only */ if (!urb->status) netif_wake_queue(netdev); } /* * called by netdev to send one skb on the CAN interface. */ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); struct peak_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; struct urb *urb; u8 *obuf; int i, err; size_t size = dev->adapter->tx_buffer_size; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) if (dev->tx_contexts[i].echo_index == PCAN_USB_MAX_TX_URBS) { context = dev->tx_contexts + i; break; } if (!context) { /* should not occur except during restart */ return NETDEV_TX_BUSY; } urb = context->urb; obuf = urb->transfer_buffer; err = dev->adapter->dev_encode_msg(dev, skb, obuf, &size); if (err) { if (net_ratelimit()) netdev_err(netdev, "packet dropped\n"); dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } context->echo_index = i; context->dlc = cf->can_dlc; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index); atomic_inc(&dev->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { can_free_echo_skb(netdev, context->echo_index); usb_unanchor_urb(urb); /* this context is not used in fact */ context->echo_index = PCAN_USB_MAX_TX_URBS; atomic_dec(&dev->active_tx_urbs); switch (err) { case -ENODEV: netif_device_detach(netdev); break; default: netdev_warn(netdev, "tx urb submitting failed err=%d\n", err); case -ENOENT: /* cable unplugged */ stats->tx_dropped++; } } else { netdev->trans_start = jiffies; /* slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= PCAN_USB_MAX_TX_URBS) netif_stop_queue(netdev); } return NETDEV_TX_OK; } /* * start the CAN interface. * Rx and Tx urbs are allocated here. Rx urbs are submitted here. */ static int peak_usb_start(struct peak_usb_device *dev) { struct net_device *netdev = dev->netdev; int err, i; for (i = 0; i < PCAN_USB_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; /* create a URB, and a buffer for it, to receive usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } buf = kmalloc(dev->adapter->rx_buffer_size, GFP_KERNEL); if (!buf) { usb_free_urb(urb); err = -ENOMEM; break; } usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep_msg_in), buf, dev->adapter->rx_buffer_size, peak_usb_read_bulk_callback, dev); /* ask last usb_free_urb() to also kfree() transfer_buffer */ urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { if (err == -ENODEV) netif_device_detach(dev->netdev); usb_unanchor_urb(urb); kfree(buf); usb_free_urb(urb); break; } /* drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* did we submit any URBs? Warn if we was not able to submit all urbs */ if (i < PCAN_USB_MAX_RX_URBS) { if (i == 0) { netdev_err(netdev, "couldn't setup any rx URB\n"); return err; } netdev_warn(netdev, "rx performance may be slow\n"); } /* pre-alloc tx buffers and corresponding urbs */ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { struct peak_tx_urb_context *context; struct urb *urb; u8 *buf; /* create a URB and a buffer for it, to transmit usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } buf = kmalloc(dev->adapter->tx_buffer_size, GFP_KERNEL); if (!buf) { usb_free_urb(urb); err = -ENOMEM; break; } context = dev->tx_contexts + i; context->dev = dev; context->urb = urb; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->ep_msg_out), buf, dev->adapter->tx_buffer_size, peak_usb_write_bulk_callback, context); /* ask last usb_free_urb() to also kfree() transfer_buffer */ urb->transfer_flags |= URB_FREE_BUFFER; } /* warn if we were not able to allocate enough tx contexts */ if (i < PCAN_USB_MAX_TX_URBS) { if (i == 0) { netdev_err(netdev, "couldn't setup any tx URB\n"); goto err_tx; } netdev_warn(netdev, "tx performance may be slow\n"); } if (dev->adapter->dev_start) { err = dev->adapter->dev_start(dev); if (err) goto err_adapter; } dev->state |= PCAN_USB_STATE_STARTED; /* can set bus on now */ if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 1); if (err) goto err_adapter; } dev->can.state = CAN_STATE_ERROR_ACTIVE; return 0; err_adapter: if (err == -ENODEV) netif_device_detach(dev->netdev); netdev_warn(netdev, "couldn't submit control: %d\n", err); for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { usb_free_urb(dev->tx_contexts[i].urb); dev->tx_contexts[i].urb = NULL; } err_tx: usb_kill_anchored_urbs(&dev->rx_submitted); return err; } /* * called by netdev to open the corresponding CAN interface. */ static int peak_usb_ndo_open(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); int err; /* common open */ err = open_candev(netdev); if (err) return err; /* finally start device */ err = peak_usb_start(dev); if (err) { netdev_err(netdev, "couldn't start device: %d\n", err); close_candev(netdev); return err; } netif_start_queue(netdev); return 0; } /* * unlink in-flight Rx and Tx urbs and free their memory. */ static void peak_usb_unlink_all_urbs(struct peak_usb_device *dev) { int i; /* free all Rx (submitted) urbs */ usb_kill_anchored_urbs(&dev->rx_submitted); /* free unsubmitted Tx urbs first */ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { struct urb *urb = dev->tx_contexts[i].urb; if (!urb || dev->tx_contexts[i].echo_index != PCAN_USB_MAX_TX_URBS) { /* * this urb is already released or always submitted, * let usb core free by itself */ continue; } usb_free_urb(urb); dev->tx_contexts[i].urb = NULL; } /* then free all submitted Tx urbs */ usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); } /* * called by netdev to close the corresponding CAN interface. */ static int peak_usb_ndo_stop(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); dev->state &= ~PCAN_USB_STATE_STARTED; netif_stop_queue(netdev); /* unlink all pending urbs and free used memory */ peak_usb_unlink_all_urbs(dev); if (dev->adapter->dev_stop) dev->adapter->dev_stop(dev); close_candev(netdev); dev->can.state = CAN_STATE_STOPPED; /* can set bus off now */ if (dev->adapter->dev_set_bus) { int err = dev->adapter->dev_set_bus(dev, 0); if (err) return err; } return 0; } /* * handle end of waiting for the device to reset */ void peak_usb_restart_complete(struct peak_usb_device *dev) { /* finally MUST update can state */ dev->can.state = CAN_STATE_ERROR_ACTIVE; /* netdev queue can be awaken now */ netif_wake_queue(dev->netdev); } void peak_usb_async_complete(struct urb *urb) { kfree(urb->transfer_buffer); usb_free_urb(urb); } /* * device (auto-)restart mechanism runs in a timer context => * MUST handle restart with asynchronous usb transfers */ static int peak_usb_restart(struct peak_usb_device *dev) { struct urb *urb; int err; u8 *buf; /* * if device doesn't define any asynchronous restart handler, simply * wake the netdev queue up */ if (!dev->adapter->dev_restart_async) { peak_usb_restart_complete(dev); return 0; } /* first allocate a urb to handle the asynchronous steps */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { netdev_err(dev->netdev, "no memory left for urb\n"); return -ENOMEM; } /* also allocate enough space for the commands to send */ buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC); if (!buf) { usb_free_urb(urb); return -ENOMEM; } /* call the device specific handler for the restart */ err = dev->adapter->dev_restart_async(dev, urb, buf); if (!err) return 0; kfree(buf); usb_free_urb(urb); return err; } /* * candev callback used to change CAN mode. * Warning: this is called from a timer context! */ static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode) { struct peak_usb_device *dev = netdev_priv(netdev); int err = 0; switch (mode) { case CAN_MODE_START: err = peak_usb_restart(dev); if (err) netdev_err(netdev, "couldn't start device (err %d)\n", err); break; default: return -EOPNOTSUPP; } return err; } /* * candev callback used to set device bitrate. */ static int peak_usb_set_bittiming(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); struct can_bittiming *bt = &dev->can.bittiming; if (dev->adapter->dev_set_bittiming) { int err = dev->adapter->dev_set_bittiming(dev, bt); if (err) netdev_info(netdev, "couldn't set bitrate (err %d)\n", err); return err; } return 0; } static const struct net_device_ops peak_usb_netdev_ops = { .ndo_open = peak_usb_ndo_open, .ndo_stop = peak_usb_ndo_stop, .ndo_start_xmit = peak_usb_ndo_start_xmit, .ndo_change_mtu = can_change_mtu, }; /* * create one device which is attached to CAN controller #ctrl_idx of the * usb adapter. */ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, struct usb_interface *intf, int ctrl_idx) { struct usb_device *usb_dev = interface_to_usbdev(intf); int sizeof_candev = peak_usb_adapter->sizeof_dev_private; struct peak_usb_device *dev; struct net_device *netdev; int i, err; u16 tmp16; if (sizeof_candev < sizeof(struct peak_usb_device)) sizeof_candev = sizeof(struct peak_usb_device); netdev = alloc_candev(sizeof_candev, PCAN_USB_MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "%s: couldn't alloc candev\n", PCAN_USB_DRIVER_NAME); return -ENOMEM; } dev = netdev_priv(netdev); /* allocate a buffer large enough to send commands */ dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { err = -ENOMEM; goto lbl_set_intf_data; } dev->udev = usb_dev; dev->netdev = netdev; dev->adapter = peak_usb_adapter; dev->ctrl_idx = ctrl_idx; dev->state = PCAN_USB_STATE_CONNECTED; dev->ep_msg_in = peak_usb_adapter->ep_msg_in; dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx]; dev->can.clock = peak_usb_adapter->clock; dev->can.bittiming_const = &peak_usb_adapter->bittiming_const; dev->can.do_set_bittiming = peak_usb_set_bittiming; dev->can.do_set_mode = peak_usb_set_mode; dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY; netdev->netdev_ops = &peak_usb_netdev_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ init_usb_anchor(&dev->rx_submitted); init_usb_anchor(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) dev->tx_contexts[i].echo_index = PCAN_USB_MAX_TX_URBS; dev->prev_siblings = usb_get_intfdata(intf); usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); netdev->dev_id = ctrl_idx; err = register_candev(netdev); if (err) { dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); goto lbl_free_cmd_buf; } if (dev->prev_siblings) (dev->prev_siblings)->next_siblings = dev; /* keep hw revision into the netdevice */ tmp16 = le16_to_cpu(usb_dev->descriptor.bcdDevice); dev->device_rev = tmp16 >> 8; if (dev->adapter->dev_init) { err = dev->adapter->dev_init(dev); if (err) goto lbl_free_cmd_buf; } /* set bus off */ if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) goto lbl_free_cmd_buf; } /* get device number early */ if (dev->adapter->dev_get_device_id) dev->adapter->dev_get_device_id(dev, &dev->device_number); netdev_info(netdev, "attached to %s channel %u (device %u)\n", peak_usb_adapter->name, ctrl_idx, dev->device_number); return 0; lbl_free_cmd_buf: kfree(dev->cmd_buf); lbl_set_intf_data: usb_set_intfdata(intf, dev->prev_siblings); free_candev(netdev); return err; } /* * called by the usb core when the device is unplugged from the system */ static void peak_usb_disconnect(struct usb_interface *intf) { struct peak_usb_device *dev; /* unregister as many netdev devices as siblings */ for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) { struct net_device *netdev = dev->netdev; char name[IFNAMSIZ]; dev->state &= ~PCAN_USB_STATE_CONNECTED; strncpy(name, netdev->name, IFNAMSIZ); unregister_netdev(netdev); free_candev(netdev); kfree(dev->cmd_buf); dev->next_siblings = NULL; if (dev->adapter->dev_free) dev->adapter->dev_free(dev); dev_info(&intf->dev, "%s removed\n", name); } usb_set_intfdata(intf, NULL); } /* * probe function for new PEAK-System devices */ static int peak_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); struct peak_usb_adapter *peak_usb_adapter, **pp; int i, err = -ENOMEM; usb_dev = interface_to_usbdev(intf); /* get corresponding PCAN-USB adapter */ for (pp = peak_usb_adapters_list; *pp; pp++) if ((*pp)->device_id == usb_dev->descriptor.idProduct) break; peak_usb_adapter = *pp; if (!peak_usb_adapter) { /* should never come except device_id bad usage in this file */ pr_err("%s: didn't find device id. 0x%x in devices list\n", PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct); return -ENODEV; } /* got corresponding adapter: check if it handles current interface */ if (peak_usb_adapter->intf_probe) { err = peak_usb_adapter->intf_probe(intf); if (err) return err; } for (i = 0; i < peak_usb_adapter->ctrl_count; i++) { err = peak_usb_create_dev(peak_usb_adapter, intf, i); if (err) { /* deregister already created devices */ peak_usb_disconnect(intf); break; } } return err; } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver peak_usb_driver = { .name = PCAN_USB_DRIVER_NAME, .disconnect = peak_usb_disconnect, .probe = peak_usb_probe, .id_table = peak_usb_table, }; static int __init peak_usb_init(void) { int err; /* register this driver with the USB subsystem */ err = usb_register(&peak_usb_driver); if (err) pr_err("%s: usb_register failed (err %d)\n", PCAN_USB_DRIVER_NAME, err); return err; } static int peak_usb_do_device_exit(struct device *d, void *arg) { struct usb_interface *intf = to_usb_interface(d); struct peak_usb_device *dev; /* stop as many netdev devices as siblings */ for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) { struct net_device *netdev = dev->netdev; if (netif_device_present(netdev)) if (dev->adapter->dev_exit) dev->adapter->dev_exit(dev); } return 0; } static void __exit peak_usb_exit(void) { int err; /* last chance do send any synchronous commands here */ err = driver_for_each_device(&peak_usb_driver.drvwrap.driver, NULL, NULL, peak_usb_do_device_exit); if (err) pr_err("%s: failed to stop all can devices (err %d)\n", PCAN_USB_DRIVER_NAME, err); /* deregister this driver with the USB subsystem */ usb_deregister(&peak_usb_driver); pr_info("%s: PCAN-USB interfaces driver unloaded\n", PCAN_USB_DRIVER_NAME); } module_init(peak_usb_init); module_exit(peak_usb_exit);
gpl-2.0
djwong/linux-xfs-dev
drivers/staging/comedi/drivers/addi_apci_2032.c
277
8507
// SPDX-License-Identifier: GPL-2.0+ /* * addi_apci_2032.c * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. * Project manager: Eric Stolz * * ADDI-DATA GmbH * Dieselstrasse 3 * D-77833 Ottersweier * Tel: +19(0)7223/9493-0 * Fax: +49(0)7223/9493-92 * http://www.addi-data.com * info@addi-data.com */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> #include "../comedi_pci.h" #include "addi_watchdog.h" /* * PCI bar 1 I/O Register map */ #define APCI2032_DO_REG 0x00 #define APCI2032_INT_CTRL_REG 0x04 #define APCI2032_INT_CTRL_VCC_ENA BIT(0) #define APCI2032_INT_CTRL_CC_ENA BIT(1) #define APCI2032_INT_STATUS_REG 0x08 #define APCI2032_INT_STATUS_VCC BIT(0) #define APCI2032_INT_STATUS_CC BIT(1) #define APCI2032_STATUS_REG 0x0c #define APCI2032_STATUS_IRQ BIT(0) #define APCI2032_WDOG_REG 0x10 struct apci2032_int_private { spinlock_t spinlock; /* protects the following members */ bool active; /* an async command is running */ unsigned char enabled_isns; /* mask of enabled interrupt channels */ }; static int apci2032_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { s->state = inl(dev->iobase + APCI2032_DO_REG); if (comedi_dio_update_state(s, data)) outl(s->state, dev->iobase + APCI2032_DO_REG); data[1] = s->state; return insn->n; } static int apci2032_int_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = inl(dev->iobase + APCI2032_INT_STATUS_REG) & 3; return insn->n; } static void apci2032_int_stop(struct comedi_device *dev, struct comedi_subdevice *s) { struct apci2032_int_private *subpriv = s->private; subpriv->active = false; subpriv->enabled_isns = 0; outl(0x0, dev->iobase + APCI2032_INT_CTRL_REG); } static int apci2032_int_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW); err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT); err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= comedi_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); else /* TRIG_NONE */ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* Step 4: fix up any arguments */ /* Step 5: check channel list if it exists */ return 0; } static int apci2032_int_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; struct apci2032_int_private *subpriv = s->private; unsigned char enabled_isns; unsigned int n; unsigned long flags; enabled_isns = 0; for (n = 0; n < cmd->chanlist_len; n++) enabled_isns |= 1 << CR_CHAN(cmd->chanlist[n]); spin_lock_irqsave(&subpriv->spinlock, flags); subpriv->enabled_isns = enabled_isns; subpriv->active = true; outl(enabled_isns, dev->iobase + APCI2032_INT_CTRL_REG); spin_unlock_irqrestore(&subpriv->spinlock, flags); return 0; } static int apci2032_int_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct apci2032_int_private *subpriv = s->private; unsigned long flags; spin_lock_irqsave(&subpriv->spinlock, flags); if (subpriv->active) apci2032_int_stop(dev, s); spin_unlock_irqrestore(&subpriv->spinlock, flags); return 0; } static irqreturn_t apci2032_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; struct comedi_cmd *cmd = &s->async->cmd; struct apci2032_int_private *subpriv; unsigned int val; if (!dev->attached) return IRQ_NONE; /* Check if VCC OR CC interrupt has occurred */ val = inl(dev->iobase + APCI2032_STATUS_REG) & APCI2032_STATUS_IRQ; if (!val) return IRQ_NONE; subpriv = s->private; spin_lock(&subpriv->spinlock); val = inl(dev->iobase + APCI2032_INT_STATUS_REG) & 3; /* Disable triggered interrupt sources. */ outl(~val & 3, dev->iobase + APCI2032_INT_CTRL_REG); /* * Note: We don't reenable the triggered interrupt sources because they * are level-sensitive, hardware error status interrupt sources and * they'd keep triggering interrupts repeatedly. */ if (subpriv->active && (val & subpriv->enabled_isns) != 0) { unsigned short bits = 0; int i; /* Bits in scan data correspond to indices in channel list. */ for (i = 0; i < cmd->chanlist_len; i++) { unsigned int chan = CR_CHAN(cmd->chanlist[i]); if (val & (1 << chan)) bits |= (1 << i); } comedi_buf_write_samples(s, &bits, 1); if (cmd->stop_src == TRIG_COUNT && s->async->scans_done >= cmd->stop_arg) s->async->events |= COMEDI_CB_EOA; } spin_unlock(&subpriv->spinlock); comedi_handle_events(dev, s); return IRQ_HANDLED; } static int apci2032_reset(struct comedi_device *dev) { outl(0x0, dev->iobase + APCI2032_DO_REG); outl(0x0, dev->iobase + APCI2032_INT_CTRL_REG); addi_watchdog_reset(dev->iobase + APCI2032_WDOG_REG); return 0; } static int apci2032_auto_attach(struct comedi_device *dev, unsigned long context_unused) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); struct comedi_subdevice *s; int ret; ret = comedi_pci_enable(dev); if (ret) return ret; dev->iobase = pci_resource_start(pcidev, 1); apci2032_reset(dev); if (pcidev->irq > 0) { ret = request_irq(pcidev->irq, apci2032_interrupt, IRQF_SHARED, dev->board_name, dev); if (ret == 0) dev->irq = pcidev->irq; } ret = comedi_alloc_subdevices(dev, 3); if (ret) return ret; /* Initialize the digital output subdevice */ s = &dev->subdevices[0]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 32; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = apci2032_do_insn_bits; /* Initialize the watchdog subdevice */ s = &dev->subdevices[1]; ret = addi_watchdog_init(s, dev->iobase + APCI2032_WDOG_REG); if (ret) return ret; /* Initialize the interrupt subdevice */ s = &dev->subdevices[2]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 2; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = apci2032_int_insn_bits; if (dev->irq) { struct apci2032_int_private *subpriv; dev->read_subdev = s; subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL); if (!subpriv) return -ENOMEM; spin_lock_init(&subpriv->spinlock); s->private = subpriv; s->subdev_flags = SDF_READABLE | SDF_CMD_READ | SDF_PACKED; s->len_chanlist = 2; s->do_cmdtest = apci2032_int_cmdtest; s->do_cmd = apci2032_int_cmd; s->cancel = apci2032_int_cancel; } return 0; } static void apci2032_detach(struct comedi_device *dev) { if (dev->iobase) apci2032_reset(dev); comedi_pci_detach(dev); if (dev->read_subdev) kfree(dev->read_subdev->private); } static struct comedi_driver apci2032_driver = { .driver_name = "addi_apci_2032", .module = THIS_MODULE, .auto_attach = apci2032_auto_attach, .detach = apci2032_detach, }; static int apci2032_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &apci2032_driver, id->driver_data); } static const struct pci_device_id apci2032_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1004) }, { 0 } }; MODULE_DEVICE_TABLE(pci, apci2032_pci_table); static struct pci_driver apci2032_pci_driver = { .name = "addi_apci_2032", .id_table = apci2032_pci_table, .probe = apci2032_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(apci2032_driver, apci2032_pci_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("ADDI-DATA APCI-2032, 32 channel DO boards"); MODULE_LICENSE("GPL");
gpl-2.0
Mrcl1450/f2fs
arch/arm/mach-omap2/prm_common.c
277
22221
/* * OMAP2+ common Power & Reset Management (PRM) IP block functions * * Copyright (C) 2011 Texas Instruments, Inc. * Tero Kristo <t-kristo@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * * For historical purposes, the API used to configure the PRM * interrupt handler refers to it as the "PRCM interrupt." The * underlying registers are located in the PRM on OMAP3/4. * * XXX This code should eventually be moved to a PRM driver. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/clk-provider.h> #include <linux/clk/ti.h> #include "soc.h" #include "prm2xxx_3xxx.h" #include "prm2xxx.h" #include "prm3xxx.h" #include "prm33xx.h" #include "prm44xx.h" #include "prm54xx.h" #include "prm7xx.h" #include "prcm43xx.h" #include "common.h" #include "clock.h" #include "cm.h" #include "control.h" /* * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs * XXX this is technically not needed, since * omap_prcm_register_chain_handler() could allocate this based on the * actual amount of memory needed for the SoC */ #define OMAP_PRCM_MAX_NR_PENDING_REG 2 /* * prcm_irq_chips: an array of all of the "generic IRQ chips" in use * by the PRCM interrupt handler code. There will be one 'chip' per * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair. (So OMAP3 will have * one "chip" and OMAP4 will have two.) */ static struct irq_chip_generic **prcm_irq_chips; /* * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code * is currently running on. Defined and passed by initialization code * that calls omap_prcm_register_chain_handler(). */ static struct omap_prcm_irq_setup *prcm_irq_setup; /* prm_base: base virtual address of the PRM IP block */ void __iomem *prm_base; u16 prm_features; /* * prm_ll_data: function pointers to SoC-specific implementations of * common PRM functions */ static struct prm_ll_data null_prm_ll_data; static struct prm_ll_data *prm_ll_data = &null_prm_ll_data; /* Private functions */ /* * Move priority events from events to priority_events array */ static void omap_prcm_events_filter_priority(unsigned long *events, unsigned long *priority_events) { int i; for (i = 0; i < prcm_irq_setup->nr_regs; i++) { priority_events[i] = events[i] & prcm_irq_setup->priority_mask[i]; events[i] ^= priority_events[i]; } } /* * PRCM Interrupt Handler * * This is a common handler for the OMAP PRCM interrupts. Pending * interrupts are detected by a call to prcm_pending_events and * dispatched accordingly. Clearing of the wakeup events should be * done by the SoC specific individual handlers. */ static void omap_prcm_irq_handler(struct irq_desc *desc) { unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG]; unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG]; struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int virtirq; int nr_irq = prcm_irq_setup->nr_regs * 32; /* * If we are suspended, mask all interrupts from PRCM level, * this does not ack them, and they will be pending until we * re-enable the interrupts, at which point the * omap_prcm_irq_handler will be executed again. The * _save_and_clear_irqen() function must ensure that the PRM * write to disable all IRQs has reached the PRM before * returning, or spurious PRCM interrupts may occur during * suspend. */ if (prcm_irq_setup->suspended) { prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask); prcm_irq_setup->suspend_save_flag = true; } /* * Loop until all pending irqs are handled, since * generic_handle_irq() can cause new irqs to come */ while (!prcm_irq_setup->suspended) { prcm_irq_setup->read_pending_irqs(pending); /* No bit set, then all IRQs are handled */ if (find_first_bit(pending, nr_irq) >= nr_irq) break; omap_prcm_events_filter_priority(pending, priority_pending); /* * Loop on all currently pending irqs so that new irqs * cannot starve previously pending irqs */ /* Serve priority events first */ for_each_set_bit(virtirq, priority_pending, nr_irq) generic_handle_irq(prcm_irq_setup->base_irq + virtirq); /* Serve normal events next */ for_each_set_bit(virtirq, pending, nr_irq) generic_handle_irq(prcm_irq_setup->base_irq + virtirq); } if (chip->irq_ack) chip->irq_ack(&desc->irq_data); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); chip->irq_unmask(&desc->irq_data); prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */ } /* Public functions */ /** * omap_prcm_event_to_irq - given a PRCM event name, returns the * corresponding IRQ on which the handler should be registered * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq * * Returns the Linux internal IRQ ID corresponding to @name upon success, * or -ENOENT upon failure. */ int omap_prcm_event_to_irq(const char *name) { int i; if (!prcm_irq_setup || !name) return -ENOENT; for (i = 0; i < prcm_irq_setup->nr_irqs; i++) if (!strcmp(prcm_irq_setup->irqs[i].name, name)) return prcm_irq_setup->base_irq + prcm_irq_setup->irqs[i].offset; return -ENOENT; } /** * omap_prcm_irq_cleanup - reverses memory allocated and other steps * done by omap_prcm_register_chain_handler() * * No return value. */ void omap_prcm_irq_cleanup(void) { unsigned int irq; int i; if (!prcm_irq_setup) { pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n"); return; } if (prcm_irq_chips) { for (i = 0; i < prcm_irq_setup->nr_regs; i++) { if (prcm_irq_chips[i]) irq_remove_generic_chip(prcm_irq_chips[i], 0xffffffff, 0, 0); prcm_irq_chips[i] = NULL; } kfree(prcm_irq_chips); prcm_irq_chips = NULL; } kfree(prcm_irq_setup->saved_mask); prcm_irq_setup->saved_mask = NULL; kfree(prcm_irq_setup->priority_mask); prcm_irq_setup->priority_mask = NULL; if (prcm_irq_setup->xlate_irq) irq = prcm_irq_setup->xlate_irq(prcm_irq_setup->irq); else irq = prcm_irq_setup->irq; irq_set_chained_handler(irq, NULL); if (prcm_irq_setup->base_irq > 0) irq_free_descs(prcm_irq_setup->base_irq, prcm_irq_setup->nr_regs * 32); prcm_irq_setup->base_irq = 0; } void omap_prcm_irq_prepare(void) { prcm_irq_setup->suspended = true; } void omap_prcm_irq_complete(void) { prcm_irq_setup->suspended = false; /* If we have not saved the masks, do not attempt to restore */ if (!prcm_irq_setup->suspend_save_flag) return; prcm_irq_setup->suspend_save_flag = false; /* * Re-enable all masked PRCM irq sources, this causes the PRCM * interrupt to fire immediately if the events were masked * previously in the chain handler */ prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask); } /** * omap_prcm_register_chain_handler - initializes the prcm chained interrupt * handler based on provided parameters * @irq_setup: hardware data about the underlying PRM/PRCM * * Set up the PRCM chained interrupt handler on the PRCM IRQ. Sets up * one generic IRQ chip per PRM interrupt status/enable register pair. * Returns 0 upon success, -EINVAL if called twice or if invalid * arguments are passed, or -ENOMEM on any other error. */ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup) { int nr_regs; u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG]; int offset, i; struct irq_chip_generic *gc; struct irq_chip_type *ct; unsigned int irq; if (!irq_setup) return -EINVAL; nr_regs = irq_setup->nr_regs; if (prcm_irq_setup) { pr_err("PRCM: already initialized; won't reinitialize\n"); return -EINVAL; } if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) { pr_err("PRCM: nr_regs too large\n"); return -EINVAL; } prcm_irq_setup = irq_setup; prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL); prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL); prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL); if (!prcm_irq_chips || !prcm_irq_setup->saved_mask || !prcm_irq_setup->priority_mask) { pr_err("PRCM: kzalloc failed\n"); goto err; } memset(mask, 0, sizeof(mask)); for (i = 0; i < irq_setup->nr_irqs; i++) { offset = irq_setup->irqs[i].offset; mask[offset >> 5] |= 1 << (offset & 0x1f); if (irq_setup->irqs[i].priority) irq_setup->priority_mask[offset >> 5] |= 1 << (offset & 0x1f); } if (irq_setup->xlate_irq) irq = irq_setup->xlate_irq(irq_setup->irq); else irq = irq_setup->irq; irq_set_chained_handler(irq, omap_prcm_irq_handler); irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32, 0); if (irq_setup->base_irq < 0) { pr_err("PRCM: failed to allocate irq descs: %d\n", irq_setup->base_irq); goto err; } for (i = 0; i < irq_setup->nr_regs; i++) { gc = irq_alloc_generic_chip("PRCM", 1, irq_setup->base_irq + i * 32, prm_base, handle_level_irq); if (!gc) { pr_err("PRCM: failed to allocate generic chip\n"); goto err; } ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->regs.ack = irq_setup->ack + i * 4; ct->regs.mask = irq_setup->mask + i * 4; irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0); prcm_irq_chips[i] = gc; } if (of_have_populated_dt()) { int irq = omap_prcm_event_to_irq("io"); omap_pcs_legacy_init(irq, irq_setup->reconfigure_io_chain); } return 0; err: omap_prcm_irq_cleanup(); return -ENOMEM; } /** * omap2_set_globals_prm - set the PRM base address (for early use) * @prm: PRM base virtual address * * XXX Will be replaced when the PRM/CM drivers are completed. */ void __init omap2_set_globals_prm(void __iomem *prm) { prm_base = prm; } /** * prm_read_reset_sources - return the sources of the SoC's last reset * * Return a u32 bitmask representing the reset sources that caused the * SoC to reset. The low-level per-SoC functions called by this * function remap the SoC-specific reset source bits into an * OMAP-common set of reset source bits, defined in * arch/arm/mach-omap2/prm.h. Returns the standardized reset source * u32 bitmask from the hardware upon success, or returns (1 << * OMAP_UNKNOWN_RST_SRC_ID_SHIFT) if no low-level read_reset_sources() * function was registered. */ u32 prm_read_reset_sources(void) { u32 ret = 1 << OMAP_UNKNOWN_RST_SRC_ID_SHIFT; if (prm_ll_data->read_reset_sources) ret = prm_ll_data->read_reset_sources(); else WARN_ONCE(1, "prm: %s: no mapping function defined for reset sources\n", __func__); return ret; } /** * prm_was_any_context_lost_old - was device context lost? (old API) * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) * @idx: CONTEXT register offset * * Return 1 if any bits were set in the *_CONTEXT_* register * identified by (@part, @inst, @idx), which means that some context * was lost for that module; otherwise, return 0. XXX Deprecated; * callers need to use a less-SoC-dependent way to identify hardware * IP blocks. */ bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx) { bool ret = true; if (prm_ll_data->was_any_context_lost_old) ret = prm_ll_data->was_any_context_lost_old(part, inst, idx); else WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return ret; } /** * prm_clear_context_lost_flags_old - clear context loss flags (old API) * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) * @idx: CONTEXT register offset * * Clear hardware context loss bits for the module identified by * (@part, @inst, @idx). No return value. XXX Deprecated; callers * need to use a less-SoC-dependent way to identify hardware IP * blocks. */ void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx) { if (prm_ll_data->clear_context_loss_flags_old) prm_ll_data->clear_context_loss_flags_old(part, inst, idx); else WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); } /** * omap_prm_assert_hardreset - assert hardreset for an IP block * @shift: register bit shift corresponding to the reset line * @part: PRM partition * @prm_mod: PRM submodule base or instance offset * @offset: register offset * * Asserts a hardware reset line for an IP block. */ int omap_prm_assert_hardreset(u8 shift, u8 part, s16 prm_mod, u16 offset) { if (!prm_ll_data->assert_hardreset) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return -EINVAL; } return prm_ll_data->assert_hardreset(shift, part, prm_mod, offset); } /** * omap_prm_deassert_hardreset - deassert hardreset for an IP block * @shift: register bit shift corresponding to the reset line * @st_shift: reset status bit shift corresponding to the reset line * @part: PRM partition * @prm_mod: PRM submodule base or instance offset * @offset: register offset * @st_offset: status register offset * * Deasserts a hardware reset line for an IP block. */ int omap_prm_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 prm_mod, u16 offset, u16 st_offset) { if (!prm_ll_data->deassert_hardreset) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return -EINVAL; } return prm_ll_data->deassert_hardreset(shift, st_shift, part, prm_mod, offset, st_offset); } /** * omap_prm_is_hardreset_asserted - check the hardreset status for an IP block * @shift: register bit shift corresponding to the reset line * @part: PRM partition * @prm_mod: PRM submodule base or instance offset * @offset: register offset * * Checks if a hardware reset line for an IP block is enabled or not. */ int omap_prm_is_hardreset_asserted(u8 shift, u8 part, s16 prm_mod, u16 offset) { if (!prm_ll_data->is_hardreset_asserted) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return -EINVAL; } return prm_ll_data->is_hardreset_asserted(shift, part, prm_mod, offset); } /** * omap_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain * * Clear any previously-latched I/O wakeup events and ensure that the * I/O wakeup gates are aligned with the current mux settings. * Calls SoC specific I/O chain reconfigure function if available, * otherwise does nothing. */ void omap_prm_reconfigure_io_chain(void) { if (!prcm_irq_setup || !prcm_irq_setup->reconfigure_io_chain) return; prcm_irq_setup->reconfigure_io_chain(); } /** * omap_prm_reset_system - trigger global SW reset * * Triggers SoC specific global warm reset to reboot the device. */ void omap_prm_reset_system(void) { if (!prm_ll_data->reset_system) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return; } prm_ll_data->reset_system(); while (1) cpu_relax(); } /** * omap_prm_clear_mod_irqs - clear wake-up events from PRCM interrupt * @module: PRM module to clear wakeups from * @regs: register to clear * @wkst_mask: wkst bits to clear * * Clears any wakeup events for the module and register set defined. * Uses SoC specific implementation to do the actual wakeup status * clearing. */ int omap_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask) { if (!prm_ll_data->clear_mod_irqs) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return -EINVAL; } return prm_ll_data->clear_mod_irqs(module, regs, wkst_mask); } /** * omap_prm_vp_check_txdone - check voltage processor TX done status * * Checks if voltage processor transmission has been completed. * Returns non-zero if a transmission has completed, 0 otherwise. */ u32 omap_prm_vp_check_txdone(u8 vp_id) { if (!prm_ll_data->vp_check_txdone) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return 0; } return prm_ll_data->vp_check_txdone(vp_id); } /** * omap_prm_vp_clear_txdone - clears voltage processor TX done status * * Clears the status bit for completed voltage processor transmission * returned by prm_vp_check_txdone. */ void omap_prm_vp_clear_txdone(u8 vp_id) { if (!prm_ll_data->vp_clear_txdone) { WARN_ONCE(1, "prm: %s: no mapping function defined\n", __func__); return; } prm_ll_data->vp_clear_txdone(vp_id); } /** * prm_register - register per-SoC low-level data with the PRM * @pld: low-level per-SoC OMAP PRM data & function pointers to register * * Register per-SoC low-level OMAP PRM data and function pointers with * the OMAP PRM common interface. The caller must keep the data * pointed to by @pld valid until it calls prm_unregister() and * it returns successfully. Returns 0 upon success, -EINVAL if @pld * is NULL, or -EEXIST if prm_register() has already been called * without an intervening prm_unregister(). */ int prm_register(struct prm_ll_data *pld) { if (!pld) return -EINVAL; if (prm_ll_data != &null_prm_ll_data) return -EEXIST; prm_ll_data = pld; return 0; } /** * prm_unregister - unregister per-SoC low-level data & function pointers * @pld: low-level per-SoC OMAP PRM data & function pointers to unregister * * Unregister per-SoC low-level OMAP PRM data and function pointers * that were previously registered with prm_register(). The * caller may not destroy any of the data pointed to by @pld until * this function returns successfully. Returns 0 upon success, or * -EINVAL if @pld is NULL or if @pld does not match the struct * prm_ll_data * previously registered by prm_register(). */ int prm_unregister(struct prm_ll_data *pld) { if (!pld || prm_ll_data != pld) return -EINVAL; prm_ll_data = &null_prm_ll_data; return 0; } #ifdef CONFIG_ARCH_OMAP2 static struct omap_prcm_init_data omap2_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap2xxx_prm_init, }; #endif #ifdef CONFIG_ARCH_OMAP3 static struct omap_prcm_init_data omap3_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap3xxx_prm_init, /* * IVA2 offset is a negative value, must offset the prm_base * address by this to get it to positive */ .offset = -OMAP3430_IVA2_MOD, }; #endif #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_TI81XX) static struct omap_prcm_init_data am3_prm_data __initdata = { .index = TI_CLKM_PRM, .init = am33xx_prm_init, }; #endif #ifdef CONFIG_ARCH_OMAP4 static struct omap_prcm_init_data omap4_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap44xx_prm_init, .device_inst_offset = OMAP4430_PRM_DEVICE_INST, .flags = PRM_HAS_IO_WAKEUP | PRM_HAS_VOLTAGE | PRM_IRQ_DEFAULT, }; #endif #ifdef CONFIG_SOC_OMAP5 static struct omap_prcm_init_data omap5_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap44xx_prm_init, .device_inst_offset = OMAP54XX_PRM_DEVICE_INST, .flags = PRM_HAS_IO_WAKEUP | PRM_HAS_VOLTAGE, }; #endif #ifdef CONFIG_SOC_DRA7XX static struct omap_prcm_init_data dra7_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap44xx_prm_init, .device_inst_offset = DRA7XX_PRM_DEVICE_INST, .flags = PRM_HAS_IO_WAKEUP, }; #endif #ifdef CONFIG_SOC_AM43XX static struct omap_prcm_init_data am4_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap44xx_prm_init, .device_inst_offset = AM43XX_PRM_DEVICE_INST, .flags = PRM_HAS_IO_WAKEUP, }; #endif #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) static struct omap_prcm_init_data scrm_data __initdata = { .index = TI_CLKM_SCRM, }; #endif static const struct of_device_id const omap_prcm_dt_match_table[] __initconst = { #ifdef CONFIG_SOC_AM33XX { .compatible = "ti,am3-prcm", .data = &am3_prm_data }, #endif #ifdef CONFIG_SOC_AM43XX { .compatible = "ti,am4-prcm", .data = &am4_prm_data }, #endif #ifdef CONFIG_SOC_TI81XX { .compatible = "ti,dm814-prcm", .data = &am3_prm_data }, { .compatible = "ti,dm816-prcm", .data = &am3_prm_data }, #endif #ifdef CONFIG_ARCH_OMAP2 { .compatible = "ti,omap2-prcm", .data = &omap2_prm_data }, #endif #ifdef CONFIG_ARCH_OMAP3 { .compatible = "ti,omap3-prm", .data = &omap3_prm_data }, #endif #ifdef CONFIG_ARCH_OMAP4 { .compatible = "ti,omap4-prm", .data = &omap4_prm_data }, { .compatible = "ti,omap4-scrm", .data = &scrm_data }, #endif #ifdef CONFIG_SOC_OMAP5 { .compatible = "ti,omap5-prm", .data = &omap5_prm_data }, { .compatible = "ti,omap5-scrm", .data = &scrm_data }, #endif #ifdef CONFIG_SOC_DRA7XX { .compatible = "ti,dra7-prm", .data = &dra7_prm_data }, #endif { } }; /** * omap2_prm_base_init - initialize iomappings for the PRM driver * * Detects and initializes the iomappings for the PRM driver, based * on the DT data. Returns 0 in success, negative error value * otherwise. */ int __init omap2_prm_base_init(void) { struct device_node *np; const struct of_device_id *match; struct omap_prcm_init_data *data; void __iomem *mem; for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) { data = (struct omap_prcm_init_data *)match->data; mem = of_iomap(np, 0); if (!mem) return -ENOMEM; if (data->index == TI_CLKM_PRM) prm_base = mem + data->offset; data->mem = mem; data->np = np; if (data->init) data->init(data); } return 0; } int __init omap2_prcm_base_init(void) { int ret; ret = omap2_prm_base_init(); if (ret) return ret; return omap2_cm_base_init(); } /** * omap_prcm_init - low level init for the PRCM drivers * * Initializes the low level clock infrastructure for PRCM drivers. * Returns 0 in success, negative error value in failure. */ int __init omap_prcm_init(void) { struct device_node *np; const struct of_device_id *match; const struct omap_prcm_init_data *data; int ret; for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) { data = match->data; ret = omap2_clk_provider_init(np, data->index, NULL, data->mem); if (ret) return ret; } omap_cm_init(); return 0; } static int __init prm_late_init(void) { if (prm_ll_data->late_init) return prm_ll_data->late_init(); return 0; } subsys_initcall(prm_late_init);
gpl-2.0
vickylinuxer/at91sam9g35-kernel
drivers/i2c/busses/i2c-imx.c
789
18164
/* * Copyright (C) 2002 Motorola GSG-China * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. * * Author: * Darius Augulis, Teltonika Inc. * * Desc.: * Implementation of I2C Adapter/Algorithm Driver * for I2C Bus integrated in Freescale i.MX/MXC processors * * Derived from Motorola GSG China I2C example driver * * Copyright (C) 2005 Torsten Koschorrek <koschorrek at synertronixx.de * Copyright (C) 2005 Matthias Blaschke <blaschke at synertronixx.de * Copyright (C) 2007 RightHand Technologies, Inc. * Copyright (C) 2008 Darius Augulis <darius.augulis at teltonika.lt> * */ /** Includes ******************************************************************* *******************************************************************************/ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/sched.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/slab.h> #include <mach/irqs.h> #include <mach/hardware.h> #include <mach/i2c.h> /** Defines ******************************************************************** *******************************************************************************/ /* This will be the driver name the kernel reports */ #define DRIVER_NAME "imx-i2c" /* Default value */ #define IMX_I2C_BIT_RATE 100000 /* 100kHz */ /* IMX I2C registers */ #define IMX_I2C_IADR 0x00 /* i2c slave address */ #define IMX_I2C_IFDR 0x04 /* i2c frequency divider */ #define IMX_I2C_I2CR 0x08 /* i2c control */ #define IMX_I2C_I2SR 0x0C /* i2c status */ #define IMX_I2C_I2DR 0x10 /* i2c transfer data */ /* Bits of IMX I2C registers */ #define I2SR_RXAK 0x01 #define I2SR_IIF 0x02 #define I2SR_SRW 0x04 #define I2SR_IAL 0x10 #define I2SR_IBB 0x20 #define I2SR_IAAS 0x40 #define I2SR_ICF 0x80 #define I2CR_RSTA 0x04 #define I2CR_TXAK 0x08 #define I2CR_MTX 0x10 #define I2CR_MSTA 0x20 #define I2CR_IIEN 0x40 #define I2CR_IEN 0x80 /** Variables ****************************************************************** *******************************************************************************/ /* * sorted list of clock divider, register value pairs * taken from table 26-5, p.26-9, Freescale i.MX * Integrated Portable System Processor Reference Manual * Document Number: MC9328MXLRM, Rev. 5.1, 06/2007 * * Duplicated divider values removed from list */ static u16 __initdata i2c_clk_div[50][2] = { { 22, 0x20 }, { 24, 0x21 }, { 26, 0x22 }, { 28, 0x23 }, { 30, 0x00 }, { 32, 0x24 }, { 36, 0x25 }, { 40, 0x26 }, { 42, 0x03 }, { 44, 0x27 }, { 48, 0x28 }, { 52, 0x05 }, { 56, 0x29 }, { 60, 0x06 }, { 64, 0x2A }, { 72, 0x2B }, { 80, 0x2C }, { 88, 0x09 }, { 96, 0x2D }, { 104, 0x0A }, { 112, 0x2E }, { 128, 0x2F }, { 144, 0x0C }, { 160, 0x30 }, { 192, 0x31 }, { 224, 0x32 }, { 240, 0x0F }, { 256, 0x33 }, { 288, 0x10 }, { 320, 0x34 }, { 384, 0x35 }, { 448, 0x36 }, { 480, 0x13 }, { 512, 0x37 }, { 576, 0x14 }, { 640, 0x38 }, { 768, 0x39 }, { 896, 0x3A }, { 960, 0x17 }, { 1024, 0x3B }, { 1152, 0x18 }, { 1280, 0x3C }, { 1536, 0x3D }, { 1792, 0x3E }, { 1920, 0x1B }, { 2048, 0x3F }, { 2304, 0x1C }, { 2560, 0x1D }, { 3072, 0x1E }, { 3840, 0x1F } }; struct imx_i2c_struct { struct i2c_adapter adapter; struct resource *res; struct clk *clk; void __iomem *base; int irq; wait_queue_head_t queue; unsigned long i2csr; unsigned int disable_delay; int stopped; unsigned int ifdr; /* IMX_I2C_IFDR */ }; /** Functions for IMX I2C adapter driver *************************************** *******************************************************************************/ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) { unsigned long orig_jiffies = jiffies; unsigned int temp; dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); while (1) { temp = readb(i2c_imx->base + IMX_I2C_I2SR); if (for_busy && (temp & I2SR_IBB)) break; if (!for_busy && !(temp & I2SR_IBB)) break; if (signal_pending(current)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C Interrupted\n", __func__); return -EINTR; } if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C bus is busy\n", __func__); return -ETIMEDOUT; } schedule(); } return 0; } static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx) { int result; result = wait_event_interruptible_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10); if (unlikely(result < 0)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> result < 0\n", __func__); return result; } else if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) { dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); return -ETIMEDOUT; } dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__); i2c_imx->i2csr = 0; return 0; } static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx) { if (readb(i2c_imx->base + IMX_I2C_I2SR) & I2SR_RXAK) { dev_dbg(&i2c_imx->adapter.dev, "<%s> No ACK\n", __func__); return -EIO; /* No ACK */ } dev_dbg(&i2c_imx->adapter.dev, "<%s> ACK received\n", __func__); return 0; } static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) { unsigned int temp = 0; int result; dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); clk_enable(i2c_imx->clk); writeb(i2c_imx->ifdr, i2c_imx->base + IMX_I2C_IFDR); /* Enable I2C controller */ writeb(0, i2c_imx->base + IMX_I2C_I2SR); writeb(I2CR_IEN, i2c_imx->base + IMX_I2C_I2CR); /* Wait controller to be stable */ udelay(50); /* Start I2C transaction */ temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp |= I2CR_MSTA; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); result = i2c_imx_bus_busy(i2c_imx, 1); if (result) return result; i2c_imx->stopped = 0; temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); return result; } static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) { unsigned int temp = 0; if (!i2c_imx->stopped) { /* Stop I2C transaction */ dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp &= ~(I2CR_MSTA | I2CR_MTX); writeb(temp, i2c_imx->base + IMX_I2C_I2CR); } if (cpu_is_mx1()) { /* * This delay caused by an i.MXL hardware bug. * If no (or too short) delay, no "STOP" bit will be generated. */ udelay(i2c_imx->disable_delay); } if (!i2c_imx->stopped) { i2c_imx_bus_busy(i2c_imx, 0); i2c_imx->stopped = 1; } /* Disable I2C controller */ writeb(0, i2c_imx->base + IMX_I2C_I2CR); clk_disable(i2c_imx->clk); } static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, unsigned int rate) { unsigned int i2c_clk_rate; unsigned int div; int i; /* Divider value calculation */ i2c_clk_rate = clk_get_rate(i2c_imx->clk); div = (i2c_clk_rate + rate - 1) / rate; if (div < i2c_clk_div[0][0]) i = 0; else if (div > i2c_clk_div[ARRAY_SIZE(i2c_clk_div) - 1][0]) i = ARRAY_SIZE(i2c_clk_div) - 1; else for (i = 0; i2c_clk_div[i][0] < div; i++); /* Store divider value */ i2c_imx->ifdr = i2c_clk_div[i][1]; /* * There dummy delay is calculated. * It should be about one I2C clock period long. * This delay is used in I2C bus disable function * to fix chip hardware bug. */ i2c_imx->disable_delay = (500000U * i2c_clk_div[i][0] + (i2c_clk_rate / 2) - 1) / (i2c_clk_rate / 2); /* dev_dbg() can't be used, because adapter is not yet registered */ #ifdef CONFIG_I2C_DEBUG_BUS printk(KERN_DEBUG "I2C: <%s> I2C_CLK=%d, REQ DIV=%d\n", __func__, i2c_clk_rate, div); printk(KERN_DEBUG "I2C: <%s> IFDR[IC]=0x%x, REAL DIV=%d\n", __func__, i2c_clk_div[i][1], i2c_clk_div[i][0]); #endif } static irqreturn_t i2c_imx_isr(int irq, void *dev_id) { struct imx_i2c_struct *i2c_imx = dev_id; unsigned int temp; temp = readb(i2c_imx->base + IMX_I2C_I2SR); if (temp & I2SR_IIF) { /* save status register */ i2c_imx->i2csr = temp; temp &= ~I2SR_IIF; writeb(temp, i2c_imx->base + IMX_I2C_I2SR); wake_up_interruptible(&i2c_imx->queue); return IRQ_HANDLED; } return IRQ_NONE; } static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) { int i, result; dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", __func__, msgs->addr << 1); /* write slave address */ writeb(msgs->addr << 1, i2c_imx->base + IMX_I2C_I2DR); result = i2c_imx_trx_complete(i2c_imx); if (result) return result; result = i2c_imx_acked(i2c_imx); if (result) return result; dev_dbg(&i2c_imx->adapter.dev, "<%s> write data\n", __func__); /* write data */ for (i = 0; i < msgs->len; i++) { dev_dbg(&i2c_imx->adapter.dev, "<%s> write byte: B%d=0x%X\n", __func__, i, msgs->buf[i]); writeb(msgs->buf[i], i2c_imx->base + IMX_I2C_I2DR); result = i2c_imx_trx_complete(i2c_imx); if (result) return result; result = i2c_imx_acked(i2c_imx); if (result) return result; } return 0; } static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) { int i, result; unsigned int temp; dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", __func__, (msgs->addr << 1) | 0x01); /* write slave address */ writeb((msgs->addr << 1) | 0x01, i2c_imx->base + IMX_I2C_I2DR); result = i2c_imx_trx_complete(i2c_imx); if (result) return result; result = i2c_imx_acked(i2c_imx); if (result) return result; dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__); /* setup bus to read data */ temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp &= ~I2CR_MTX; if (msgs->len - 1) temp &= ~I2CR_TXAK; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); readb(i2c_imx->base + IMX_I2C_I2DR); /* dummy read */ dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); /* read data */ for (i = 0; i < msgs->len; i++) { result = i2c_imx_trx_complete(i2c_imx); if (result) return result; if (i == (msgs->len - 1)) { /* It must generate STOP before read I2DR to prevent controller from generating another clock cycle */ dev_dbg(&i2c_imx->adapter.dev, "<%s> clear MSTA\n", __func__); temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp &= ~(I2CR_MSTA | I2CR_MTX); writeb(temp, i2c_imx->base + IMX_I2C_I2CR); i2c_imx_bus_busy(i2c_imx, 0); i2c_imx->stopped = 1; } else if (i == (msgs->len - 2)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> set TXAK\n", __func__); temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp |= I2CR_TXAK; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); } msgs->buf[i] = readb(i2c_imx->base + IMX_I2C_I2DR); dev_dbg(&i2c_imx->adapter.dev, "<%s> read byte: B%d=0x%X\n", __func__, i, msgs->buf[i]); } return 0; } static int i2c_imx_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { unsigned int i, temp; int result; struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter); dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); /* Start I2C transfer */ result = i2c_imx_start(i2c_imx); if (result) goto fail0; /* read/write data */ for (i = 0; i < num; i++) { if (i) { dev_dbg(&i2c_imx->adapter.dev, "<%s> repeated start\n", __func__); temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp |= I2CR_RSTA; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); result = i2c_imx_bus_busy(i2c_imx, 1); if (result) goto fail0; } dev_dbg(&i2c_imx->adapter.dev, "<%s> transfer message: %d\n", __func__, i); /* write/read data */ #ifdef CONFIG_I2C_DEBUG_BUS temp = readb(i2c_imx->base + IMX_I2C_I2CR); dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, " "MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__, (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0), (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0), (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0)); temp = readb(i2c_imx->base + IMX_I2C_I2SR); dev_dbg(&i2c_imx->adapter.dev, "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, " "IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__, (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0), (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0), (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0), (temp & I2SR_RXAK ? 1 : 0)); #endif if (msgs[i].flags & I2C_M_RD) result = i2c_imx_read(i2c_imx, &msgs[i]); else result = i2c_imx_write(i2c_imx, &msgs[i]); if (result) goto fail0; } fail0: /* Stop I2C transfer */ i2c_imx_stop(i2c_imx); dev_dbg(&i2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__, (result < 0) ? "error" : "success msg", (result < 0) ? result : num); return (result < 0) ? result : num; } static u32 i2c_imx_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm i2c_imx_algo = { .master_xfer = i2c_imx_xfer, .functionality = i2c_imx_func, }; static int __init i2c_imx_probe(struct platform_device *pdev) { struct imx_i2c_struct *i2c_imx; struct resource *res; struct imxi2c_platform_data *pdata; void __iomem *base; resource_size_t res_size; int irq; int ret; dev_dbg(&pdev->dev, "<%s>\n", __func__); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "can't get device resources\n"); return -ENOENT; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "can't get irq number\n"); return -ENOENT; } pdata = pdev->dev.platform_data; if (pdata && pdata->init) { ret = pdata->init(&pdev->dev); if (ret) return ret; } res_size = resource_size(res); if (!request_mem_region(res->start, res_size, DRIVER_NAME)) { ret = -EBUSY; goto fail0; } base = ioremap(res->start, res_size); if (!base) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -EIO; goto fail1; } i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL); if (!i2c_imx) { dev_err(&pdev->dev, "can't allocate interface\n"); ret = -ENOMEM; goto fail2; } /* Setup i2c_imx driver structure */ strcpy(i2c_imx->adapter.name, pdev->name); i2c_imx->adapter.owner = THIS_MODULE; i2c_imx->adapter.algo = &i2c_imx_algo; i2c_imx->adapter.dev.parent = &pdev->dev; i2c_imx->adapter.nr = pdev->id; i2c_imx->irq = irq; i2c_imx->base = base; i2c_imx->res = res; /* Get I2C clock */ i2c_imx->clk = clk_get(&pdev->dev, "i2c_clk"); if (IS_ERR(i2c_imx->clk)) { ret = PTR_ERR(i2c_imx->clk); dev_err(&pdev->dev, "can't get I2C clock\n"); goto fail3; } /* Request IRQ */ ret = request_irq(i2c_imx->irq, i2c_imx_isr, 0, pdev->name, i2c_imx); if (ret) { dev_err(&pdev->dev, "can't claim irq %d\n", i2c_imx->irq); goto fail4; } /* Init queue */ init_waitqueue_head(&i2c_imx->queue); /* Set up adapter data */ i2c_set_adapdata(&i2c_imx->adapter, i2c_imx); /* Set up clock divider */ if (pdata && pdata->bitrate) i2c_imx_set_clk(i2c_imx, pdata->bitrate); else i2c_imx_set_clk(i2c_imx, IMX_I2C_BIT_RATE); /* Set up chip registers to defaults */ writeb(0, i2c_imx->base + IMX_I2C_I2CR); writeb(0, i2c_imx->base + IMX_I2C_I2SR); /* Add I2C adapter */ ret = i2c_add_numbered_adapter(&i2c_imx->adapter); if (ret < 0) { dev_err(&pdev->dev, "registration failed\n"); goto fail5; } /* Set up platform driver data */ platform_set_drvdata(pdev, i2c_imx); dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", i2c_imx->irq); dev_dbg(&i2c_imx->adapter.dev, "device resources from 0x%x to 0x%x\n", i2c_imx->res->start, i2c_imx->res->end); dev_dbg(&i2c_imx->adapter.dev, "allocated %d bytes at 0x%x \n", res_size, i2c_imx->res->start); dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", i2c_imx->adapter.name); dev_dbg(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); return 0; /* Return OK */ fail5: free_irq(i2c_imx->irq, i2c_imx); fail4: clk_put(i2c_imx->clk); fail3: kfree(i2c_imx); fail2: iounmap(base); fail1: release_mem_region(res->start, resource_size(res)); fail0: if (pdata && pdata->exit) pdata->exit(&pdev->dev); return ret; /* Return error number */ } static int __exit i2c_imx_remove(struct platform_device *pdev) { struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); struct imxi2c_platform_data *pdata = pdev->dev.platform_data; /* remove adapter */ dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n"); i2c_del_adapter(&i2c_imx->adapter); platform_set_drvdata(pdev, NULL); /* free interrupt */ free_irq(i2c_imx->irq, i2c_imx); /* setup chip registers to defaults */ writeb(0, i2c_imx->base + IMX_I2C_IADR); writeb(0, i2c_imx->base + IMX_I2C_IFDR); writeb(0, i2c_imx->base + IMX_I2C_I2CR); writeb(0, i2c_imx->base + IMX_I2C_I2SR); /* Shut down hardware */ if (pdata && pdata->exit) pdata->exit(&pdev->dev); clk_put(i2c_imx->clk); iounmap(i2c_imx->base); release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res)); kfree(i2c_imx); return 0; } static struct platform_driver i2c_imx_driver = { .remove = __exit_p(i2c_imx_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, } }; static int __init i2c_adap_imx_init(void) { return platform_driver_probe(&i2c_imx_driver, i2c_imx_probe); } subsys_initcall(i2c_adap_imx_init); static void __exit i2c_adap_imx_exit(void) { platform_driver_unregister(&i2c_imx_driver); } module_exit(i2c_adap_imx_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Darius Augulis"); MODULE_DESCRIPTION("I2C adapter driver for IMX I2C bus"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
jvaughan/san-francisco-kernel
net/netfilter/nf_conntrack_h323_asn1.c
1813
20164
/**************************************************************************** * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323 * conntrack/NAT module. * * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net> * * This source code is licensed under General Public License version 2. * * See ip_conntrack_helper_h323_asn1.h for details. * ****************************************************************************/ #ifdef __KERNEL__ #include <linux/kernel.h> #else #include <stdio.h> #endif #include <linux/netfilter/nf_conntrack_h323_asn1.h> /* Trace Flag */ #ifndef H323_TRACE #define H323_TRACE 0 #endif #if H323_TRACE #define TAB_SIZE 4 #define IFTHEN(cond, act) if(cond){act;} #ifdef __KERNEL__ #define PRINT printk #else #define PRINT printf #endif #define FNAME(name) name, #else #define IFTHEN(cond, act) #define PRINT(fmt, args...) #define FNAME(name) #endif /* ASN.1 Types */ #define NUL 0 #define BOOL 1 #define OID 2 #define INT 3 #define ENUM 4 #define BITSTR 5 #define NUMSTR 6 #define NUMDGT 6 #define TBCDSTR 6 #define OCTSTR 7 #define PRTSTR 7 #define IA5STR 7 #define GENSTR 7 #define BMPSTR 8 #define SEQ 9 #define SET 9 #define SEQOF 10 #define SETOF 10 #define CHOICE 11 /* Constraint Types */ #define FIXD 0 /* #define BITS 1-8 */ #define BYTE 9 #define WORD 10 #define CONS 11 #define SEMI 12 #define UNCO 13 /* ASN.1 Type Attributes */ #define SKIP 0 #define STOP 1 #define DECODE 2 #define EXT 4 #define OPEN 8 #define OPT 16 /* ASN.1 Field Structure */ typedef struct field_t { #if H323_TRACE char *name; #endif unsigned char type; unsigned char sz; unsigned char lb; unsigned char ub; unsigned short attr; unsigned short offset; const struct field_t *fields; } field_t; /* Bit Stream */ typedef struct { unsigned char *buf; unsigned char *beg; unsigned char *end; unsigned char *cur; unsigned int bit; } bitstr_t; /* Tool Functions */ #define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;} #define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;} #define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;} #define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND) static unsigned int get_len(bitstr_t *bs); static unsigned int get_bit(bitstr_t *bs); static unsigned int get_bits(bitstr_t *bs, unsigned int b); static unsigned int get_bitmap(bitstr_t *bs, unsigned int b); static unsigned int get_uint(bitstr_t *bs, int b); /* Decoder Functions */ static int decode_nul(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_bool(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_oid(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_int(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_enum(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_bitstr(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_numstr(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_octstr(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_bmpstr(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_seq(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_seqof(bitstr_t *bs, const struct field_t *f, char *base, int level); static int decode_choice(bitstr_t *bs, const struct field_t *f, char *base, int level); /* Decoder Functions Vector */ typedef int (*decoder_t)(bitstr_t *, const struct field_t *, char *, int); static const decoder_t Decoders[] = { decode_nul, decode_bool, decode_oid, decode_int, decode_enum, decode_bitstr, decode_numstr, decode_octstr, decode_bmpstr, decode_seq, decode_seqof, decode_choice, }; /**************************************************************************** * H.323 Types ****************************************************************************/ #include "nf_conntrack_h323_types.c" /**************************************************************************** * Functions ****************************************************************************/ /* Assume bs is aligned && v < 16384 */ static unsigned int get_len(bitstr_t *bs) { unsigned int v; v = *bs->cur++; if (v & 0x80) { v &= 0x3f; v <<= 8; v += *bs->cur++; } return v; } /****************************************************************************/ static unsigned int get_bit(bitstr_t *bs) { unsigned int b = (*bs->cur) & (0x80 >> bs->bit); INC_BIT(bs); return b; } /****************************************************************************/ /* Assume b <= 8 */ static unsigned int get_bits(bitstr_t *bs, unsigned int b) { unsigned int v, l; v = (*bs->cur) & (0xffU >> bs->bit); l = b + bs->bit; if (l < 8) { v >>= 8 - l; bs->bit = l; } else if (l == 8) { bs->cur++; bs->bit = 0; } else { /* l > 8 */ v <<= 8; v += *(++bs->cur); v >>= 16 - l; bs->bit = l - 8; } return v; } /****************************************************************************/ /* Assume b <= 32 */ static unsigned int get_bitmap(bitstr_t *bs, unsigned int b) { unsigned int v, l, shift, bytes; if (!b) return 0; l = bs->bit + b; if (l < 8) { v = (unsigned int)(*bs->cur) << (bs->bit + 24); bs->bit = l; } else if (l == 8) { v = (unsigned int)(*bs->cur++) << (bs->bit + 24); bs->bit = 0; } else { for (bytes = l >> 3, shift = 24, v = 0; bytes; bytes--, shift -= 8) v |= (unsigned int)(*bs->cur++) << shift; if (l < 32) { v |= (unsigned int)(*bs->cur) << shift; v <<= bs->bit; } else if (l > 32) { v <<= bs->bit; v |= (*bs->cur) >> (8 - bs->bit); } bs->bit = l & 0x7; } v &= 0xffffffff << (32 - b); return v; } /**************************************************************************** * Assume bs is aligned and sizeof(unsigned int) == 4 ****************************************************************************/ static unsigned int get_uint(bitstr_t *bs, int b) { unsigned int v = 0; switch (b) { case 4: v |= *bs->cur++; v <<= 8; case 3: v |= *bs->cur++; v <<= 8; case 2: v |= *bs->cur++; v <<= 8; case 1: v |= *bs->cur++; break; } return v; } /****************************************************************************/ static int decode_nul(bitstr_t *bs, const struct field_t *f, char *base, int level) { PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); return H323_ERROR_NONE; } /****************************************************************************/ static int decode_bool(bitstr_t *bs, const struct field_t *f, char *base, int level) { PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); INC_BIT(bs); CHECK_BOUND(bs, 0); return H323_ERROR_NONE; } /****************************************************************************/ static int decode_oid(bitstr_t *bs, const struct field_t *f, char *base, int level) { int len; PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); BYTE_ALIGN(bs); CHECK_BOUND(bs, 1); len = *bs->cur++; bs->cur += len; CHECK_BOUND(bs, 0); return H323_ERROR_NONE; } /****************************************************************************/ static int decode_int(bitstr_t *bs, const struct field_t *f, char *base, int level) { unsigned int len; PRINT("%*.s%s", level * TAB_SIZE, " ", f->name); switch (f->sz) { case BYTE: /* Range == 256 */ BYTE_ALIGN(bs); bs->cur++; break; case WORD: /* 257 <= Range <= 64K */ BYTE_ALIGN(bs); bs->cur += 2; break; case CONS: /* 64K < Range < 4G */ len = get_bits(bs, 2) + 1; BYTE_ALIGN(bs); if (base && (f->attr & DECODE)) { /* timeToLive */ unsigned int v = get_uint(bs, len) + f->lb; PRINT(" = %u", v); *((unsigned int *)(base + f->offset)) = v; } bs->cur += len; break; case UNCO: BYTE_ALIGN(bs); CHECK_BOUND(bs, 2); len = get_len(bs); bs->cur += len; break; default: /* 2 <= Range <= 255 */ INC_BITS(bs, f->sz); break; } PRINT("\n"); CHECK_BOUND(bs, 0); return H323_ERROR_NONE; } /****************************************************************************/ static int decode_enum(bitstr_t *bs, const struct field_t *f, char *base, int level) { PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); if ((f->attr & EXT) && get_bit(bs)) { INC_BITS(bs, 7); } else { INC_BITS(bs, f->sz); } CHECK_BOUND(bs, 0); return H323_ERROR_NONE; } /****************************************************************************/ static int decode_bitstr(bitstr_t *bs, const struct field_t *f, char *base, int level) { unsigned int len; PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); BYTE_ALIGN(bs); switch (f->sz) { case FIXD: /* fixed length > 16 */ len = f->lb; break; case WORD: /* 2-byte length */ CHECK_BOUND(bs, 2); len = (*bs->cur++) << 8; len += (*bs->cur++) + f->lb; break; case SEMI: CHECK_BOUND(bs, 2); len = get_len(bs); break; default: len = 0; break; } bs->cur += len >> 3; bs->bit = len & 7; CHECK_BOUND(bs, 0); return H323_ERROR_NONE; } /****************************************************************************/ static int decode_numstr(bitstr_t *bs, const struct field_t *f, char *base, int level) { unsigned int len; PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); /* 2 <= Range <= 255 */ len = get_bits(bs, f->sz) + f->lb; BYTE_ALIGN(bs); INC_BITS(bs, (len << 2)); CHECK_BOUND(bs, 0); return H323_ERROR_NONE; } /****************************************************************************/ static int decode_octstr(bitstr_t *bs, const struct field_t *f, char *base, int level) { unsigned int len; PRINT("%*.s%s", level * TAB_SIZE, " ", f->name); switch (f->sz) { case FIXD: /* Range == 1 */ if (f->lb > 2) { BYTE_ALIGN(bs); if (base && (f->attr & DECODE)) { /* The IP Address */ IFTHEN(f->lb == 4, PRINT(" = %d.%d.%d.%d:%d", bs->cur[0], bs->cur[1], bs->cur[2], bs->cur[3], bs->cur[4] * 256 + bs->cur[5])); *((unsigned int *)(base + f->offset)) = bs->cur - bs->buf; } } len = f->lb; break; case BYTE: /* Range == 256 */ BYTE_ALIGN(bs); CHECK_BOUND(bs, 1); len = (*bs->cur++) + f->lb; break; case SEMI: BYTE_ALIGN(bs); CHECK_BOUND(bs, 2); len = get_len(bs) + f->lb; break; default: /* 2 <= Range <= 255 */ len = get_bits(bs, f->sz) + f->lb; BYTE_ALIGN(bs); break; } bs->cur += len; PRINT("\n"); CHECK_BOUND(bs, 0); return H323_ERROR_NONE; } /****************************************************************************/ static int decode_bmpstr(bitstr_t *bs, const struct field_t *f, char *base, int level) { unsigned int len; PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); switch (f->sz) { case BYTE: /* Range == 256 */ BYTE_ALIGN(bs); CHECK_BOUND(bs, 1); len = (*bs->cur++) + f->lb; break; default: /* 2 <= Range <= 255 */ len = get_bits(bs, f->sz) + f->lb; BYTE_ALIGN(bs); break; } bs->cur += len << 1; CHECK_BOUND(bs, 0); return H323_ERROR_NONE; } /****************************************************************************/ static int decode_seq(bitstr_t *bs, const struct field_t *f, char *base, int level) { unsigned int ext, bmp, i, opt, len = 0, bmp2, bmp2_len; int err; const struct field_t *son; unsigned char *beg = NULL; PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); /* Decode? */ base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; /* Extensible? */ ext = (f->attr & EXT) ? get_bit(bs) : 0; /* Get fields bitmap */ bmp = get_bitmap(bs, f->sz); if (base) *(unsigned int *)base = bmp; /* Decode the root components */ for (i = opt = 0, son = f->fields; i < f->lb; i++, son++) { if (son->attr & STOP) { PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", son->name); return H323_ERROR_STOP; } if (son->attr & OPT) { /* Optional component */ if (!((0x80000000U >> (opt++)) & bmp)) /* Not exist */ continue; } /* Decode */ if (son->attr & OPEN) { /* Open field */ CHECK_BOUND(bs, 2); len = get_len(bs); CHECK_BOUND(bs, len); if (!base || !(son->attr & DECODE)) { PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", son->name); bs->cur += len; continue; } beg = bs->cur; /* Decode */ if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) < H323_ERROR_NONE) return err; bs->cur = beg + len; bs->bit = 0; } else if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) < H323_ERROR_NONE) return err; } /* No extension? */ if (!ext) return H323_ERROR_NONE; /* Get the extension bitmap */ bmp2_len = get_bits(bs, 7) + 1; CHECK_BOUND(bs, (bmp2_len + 7) >> 3); bmp2 = get_bitmap(bs, bmp2_len); bmp |= bmp2 >> f->sz; if (base) *(unsigned int *)base = bmp; BYTE_ALIGN(bs); /* Decode the extension components */ for (opt = 0; opt < bmp2_len; opt++, i++, son++) { /* Check Range */ if (i >= f->ub) { /* Newer Version? */ CHECK_BOUND(bs, 2); len = get_len(bs); CHECK_BOUND(bs, len); bs->cur += len; continue; } if (son->attr & STOP) { PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", son->name); return H323_ERROR_STOP; } if (!((0x80000000 >> opt) & bmp2)) /* Not present */ continue; CHECK_BOUND(bs, 2); len = get_len(bs); CHECK_BOUND(bs, len); if (!base || !(son->attr & DECODE)) { PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", son->name); bs->cur += len; continue; } beg = bs->cur; if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) < H323_ERROR_NONE) return err; bs->cur = beg + len; bs->bit = 0; } return H323_ERROR_NONE; } /****************************************************************************/ static int decode_seqof(bitstr_t *bs, const struct field_t *f, char *base, int level) { unsigned int count, effective_count = 0, i, len = 0; int err; const struct field_t *son; unsigned char *beg = NULL; PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); /* Decode? */ base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; /* Decode item count */ switch (f->sz) { case BYTE: BYTE_ALIGN(bs); CHECK_BOUND(bs, 1); count = *bs->cur++; break; case WORD: BYTE_ALIGN(bs); CHECK_BOUND(bs, 2); count = *bs->cur++; count <<= 8; count = *bs->cur++; break; case SEMI: BYTE_ALIGN(bs); CHECK_BOUND(bs, 2); count = get_len(bs); break; default: count = get_bits(bs, f->sz); break; } count += f->lb; /* Write Count */ if (base) { effective_count = count > f->ub ? f->ub : count; *(unsigned int *)base = effective_count; base += sizeof(unsigned int); } /* Decode nested field */ son = f->fields; if (base) base -= son->offset; for (i = 0; i < count; i++) { if (son->attr & OPEN) { BYTE_ALIGN(bs); len = get_len(bs); CHECK_BOUND(bs, len); if (!base || !(son->attr & DECODE)) { PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", son->name); bs->cur += len; continue; } beg = bs->cur; if ((err = (Decoders[son->type]) (bs, son, i < effective_count ? base : NULL, level + 1)) < H323_ERROR_NONE) return err; bs->cur = beg + len; bs->bit = 0; } else if ((err = (Decoders[son->type]) (bs, son, i < effective_count ? base : NULL, level + 1)) < H323_ERROR_NONE) return err; if (base) base += son->offset; } return H323_ERROR_NONE; } /****************************************************************************/ static int decode_choice(bitstr_t *bs, const struct field_t *f, char *base, int level) { unsigned int type, ext, len = 0; int err; const struct field_t *son; unsigned char *beg = NULL; PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); /* Decode? */ base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; /* Decode the choice index number */ if ((f->attr & EXT) && get_bit(bs)) { ext = 1; type = get_bits(bs, 7) + f->lb; } else { ext = 0; type = get_bits(bs, f->sz); if (type >= f->lb) return H323_ERROR_RANGE; } /* Write Type */ if (base) *(unsigned int *)base = type; /* Check Range */ if (type >= f->ub) { /* Newer version? */ BYTE_ALIGN(bs); len = get_len(bs); CHECK_BOUND(bs, len); bs->cur += len; return H323_ERROR_NONE; } /* Transfer to son level */ son = &f->fields[type]; if (son->attr & STOP) { PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", son->name); return H323_ERROR_STOP; } if (ext || (son->attr & OPEN)) { BYTE_ALIGN(bs); len = get_len(bs); CHECK_BOUND(bs, len); if (!base || !(son->attr & DECODE)) { PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", son->name); bs->cur += len; return H323_ERROR_NONE; } beg = bs->cur; if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) < H323_ERROR_NONE) return err; bs->cur = beg + len; bs->bit = 0; } else if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) < H323_ERROR_NONE) return err; return H323_ERROR_NONE; } /****************************************************************************/ int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras) { static const struct field_t ras_message = { FNAME("RasMessage") CHOICE, 5, 24, 32, DECODE | EXT, 0, _RasMessage }; bitstr_t bs; bs.buf = bs.beg = bs.cur = buf; bs.end = buf + sz; bs.bit = 0; return decode_choice(&bs, &ras_message, (char *) ras, 0); } /****************************************************************************/ static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg, size_t sz, H323_UserInformation *uuie) { static const struct field_t h323_userinformation = { FNAME("H323-UserInformation") SEQ, 1, 2, 2, DECODE | EXT, 0, _H323_UserInformation }; bitstr_t bs; bs.buf = buf; bs.beg = bs.cur = beg; bs.end = beg + sz; bs.bit = 0; return decode_seq(&bs, &h323_userinformation, (char *) uuie, 0); } /****************************************************************************/ int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz, MultimediaSystemControlMessage * mscm) { static const struct field_t multimediasystemcontrolmessage = { FNAME("MultimediaSystemControlMessage") CHOICE, 2, 4, 4, DECODE | EXT, 0, _MultimediaSystemControlMessage }; bitstr_t bs; bs.buf = bs.beg = bs.cur = buf; bs.end = buf + sz; bs.bit = 0; return decode_choice(&bs, &multimediasystemcontrolmessage, (char *) mscm, 0); } /****************************************************************************/ int DecodeQ931(unsigned char *buf, size_t sz, Q931 *q931) { unsigned char *p = buf; int len; if (!p || sz < 1) return H323_ERROR_BOUND; /* Protocol Discriminator */ if (*p != 0x08) { PRINT("Unknown Protocol Discriminator\n"); return H323_ERROR_RANGE; } p++; sz--; /* CallReferenceValue */ if (sz < 1) return H323_ERROR_BOUND; len = *p++; sz--; if (sz < len) return H323_ERROR_BOUND; p += len; sz -= len; /* Message Type */ if (sz < 1) return H323_ERROR_BOUND; q931->MessageType = *p++; PRINT("MessageType = %02X\n", q931->MessageType); if (*p & 0x80) { p++; sz--; } /* Decode Information Elements */ while (sz > 0) { if (*p == 0x7e) { /* UserUserIE */ if (sz < 3) break; p++; len = *p++ << 8; len |= *p++; sz -= 3; if (sz < len) break; p++; len--; return DecodeH323_UserInformation(buf, p, len, &q931->UUIE); } p++; sz--; if (sz < 1) break; len = *p++; if (sz < len) break; p += len; sz -= len; } PRINT("Q.931 UUIE not found\n"); return H323_ERROR_BOUND; }
gpl-2.0
issi5862/ishida_jbd2_linux-2.0
drivers/usb/serial/usb_debug.c
2069
1869
/* * USB Debug cable driver * * Copyright (C) 2006 Greg Kroah-Hartman <greg@kroah.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define USB_DEBUG_MAX_PACKET_SIZE 8 #define USB_DEBUG_BRK_SIZE 8 static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = { 0x00, 0xff, 0x01, 0xfe, 0x00, 0xfe, 0x01, 0xff, }; static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0525, 0x127a) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); /* This HW really does not support a serial break, so one will be * emulated when ever the break state is set to true. */ static void usb_debug_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; if (!break_state) return; usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE); } static void usb_debug_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; if (urb->actual_length == USB_DEBUG_BRK_SIZE && memcmp(urb->transfer_buffer, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE) == 0) { usb_serial_handle_break(port); return; } usb_serial_generic_process_read_urb(urb); } static struct usb_serial_driver debug_device = { .driver = { .owner = THIS_MODULE, .name = "debug", }, .id_table = id_table, .num_ports = 1, .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE, .break_ctl = usb_debug_break_ctl, .process_read_urb = usb_debug_process_read_urb, }; static struct usb_serial_driver * const serial_drivers[] = { &debug_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_LICENSE("GPL");
gpl-2.0
Radium-Devices/Radium_shamu
arch/mips/sibyte/sb1250/setup.c
2069
6051
/* * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/string.h> #include <asm/bootinfo.h> #include <asm/mipsregs.h> #include <asm/io.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_scd.h> unsigned int sb1_pass; unsigned int soc_pass; unsigned int soc_type; EXPORT_SYMBOL(soc_type); unsigned int periph_rev; unsigned int zbbus_mhz; EXPORT_SYMBOL(zbbus_mhz); static char *soc_str; static char *pass_str; static unsigned int war_pass; /* XXXKW don't overload PASS defines? */ static int __init setup_bcm1250(void) { int ret = 0; switch (soc_pass) { case K_SYS_REVISION_BCM1250_PASS1: periph_rev = 1; pass_str = "Pass 1"; break; case K_SYS_REVISION_BCM1250_A10: periph_rev = 2; pass_str = "A8/A10"; /* XXXKW different war_pass? */ war_pass = K_SYS_REVISION_BCM1250_PASS2; break; case K_SYS_REVISION_BCM1250_PASS2_2: periph_rev = 2; pass_str = "B1"; break; case K_SYS_REVISION_BCM1250_B2: periph_rev = 2; pass_str = "B2"; war_pass = K_SYS_REVISION_BCM1250_PASS2_2; break; case K_SYS_REVISION_BCM1250_PASS3: periph_rev = 3; pass_str = "C0"; break; case K_SYS_REVISION_BCM1250_C1: periph_rev = 3; pass_str = "C1"; break; default: if (soc_pass < K_SYS_REVISION_BCM1250_PASS2_2) { periph_rev = 2; pass_str = "A0-A6"; war_pass = K_SYS_REVISION_BCM1250_PASS2; } else { printk("Unknown BCM1250 rev %x\n", soc_pass); ret = 1; } break; } return ret; } int sb1250_m3_workaround_needed(void) { switch (soc_type) { case K_SYS_SOC_TYPE_BCM1250: case K_SYS_SOC_TYPE_BCM1250_ALT: case K_SYS_SOC_TYPE_BCM1250_ALT2: case K_SYS_SOC_TYPE_BCM1125: case K_SYS_SOC_TYPE_BCM1125H: return soc_pass < K_SYS_REVISION_BCM1250_C0; default: return 0; } } static int __init setup_bcm112x(void) { int ret = 0; switch (soc_pass) { case 0: /* Early build didn't have revid set */ periph_rev = 3; pass_str = "A1"; war_pass = K_SYS_REVISION_BCM112x_A1; break; case K_SYS_REVISION_BCM112x_A1: periph_rev = 3; pass_str = "A1"; break; case K_SYS_REVISION_BCM112x_A2: periph_rev = 3; pass_str = "A2"; break; case K_SYS_REVISION_BCM112x_A3: periph_rev = 3; pass_str = "A3"; break; case K_SYS_REVISION_BCM112x_A4: periph_rev = 3; pass_str = "A4"; break; case K_SYS_REVISION_BCM112x_B0: periph_rev = 3; pass_str = "B0"; break; default: printk("Unknown %s rev %x\n", soc_str, soc_pass); ret = 1; } return ret; } /* Setup code likely to be common to all SiByte platforms */ static int __init sys_rev_decode(void) { int ret = 0; war_pass = soc_pass; switch (soc_type) { case K_SYS_SOC_TYPE_BCM1250: case K_SYS_SOC_TYPE_BCM1250_ALT: case K_SYS_SOC_TYPE_BCM1250_ALT2: soc_str = "BCM1250"; ret = setup_bcm1250(); break; case K_SYS_SOC_TYPE_BCM1120: soc_str = "BCM1120"; ret = setup_bcm112x(); break; case K_SYS_SOC_TYPE_BCM1125: soc_str = "BCM1125"; ret = setup_bcm112x(); break; case K_SYS_SOC_TYPE_BCM1125H: soc_str = "BCM1125H"; ret = setup_bcm112x(); break; default: printk("Unknown SOC type %x\n", soc_type); ret = 1; break; } return ret; } void __init sb1250_setup(void) { uint64_t sys_rev; int plldiv; int bad_config = 0; sb1_pass = read_c0_prid() & 0xff; sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION)); soc_type = SYS_SOC_TYPE(sys_rev); soc_pass = G_SYS_REVISION(sys_rev); if (sys_rev_decode()) { printk("Restart after failure to identify SiByte chip\n"); machine_restart(NULL); } plldiv = G_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); zbbus_mhz = ((plldiv >> 1) * 50) + ((plldiv & 1) * 25); printk("Broadcom SiByte %s %s @ %d MHz (SB1 rev %d)\n", soc_str, pass_str, zbbus_mhz * 2, sb1_pass); printk("Board type: %s\n", get_system_type()); switch (war_pass) { case K_SYS_REVISION_BCM1250_PASS1: #ifndef CONFIG_SB1_PASS_1_WORKAROUNDS printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, " "and the kernel doesn't have the proper " "workarounds compiled in. @@@@\n"); bad_config = 1; #endif break; case K_SYS_REVISION_BCM1250_PASS2: /* Pass 2 - easiest as default for now - so many numbers */ #if !defined(CONFIG_SB1_PASS_2_WORKAROUNDS) || \ !defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) printk("@@@@ This is a BCM1250 A3-A10 board, and the " "kernel doesn't have the proper workarounds " "compiled in. @@@@\n"); bad_config = 1; #endif #ifdef CONFIG_CPU_HAS_PREFETCH printk("@@@@ Prefetches may be enabled in this kernel, " "but are buggy on this board. @@@@\n"); bad_config = 1; #endif break; case K_SYS_REVISION_BCM1250_PASS2_2: #ifndef CONFIG_SB1_PASS_2_WORKAROUNDS printk("@@@@ This is a BCM1250 B1/B2. board, and the " "kernel doesn't have the proper workarounds " "compiled in. @@@@\n"); bad_config = 1; #endif #if defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) || \ !defined(CONFIG_CPU_HAS_PREFETCH) printk("@@@@ This is a BCM1250 B1/B2, but the kernel is " "conservatively configured for an 'A' stepping. " "@@@@\n"); #endif break; default: break; } if (bad_config) { printk("Invalid configuration for this chip.\n"); machine_restart(NULL); } }
gpl-2.0
sachinthomaspj/android_kernel_htc_pico
net/ipv6/exthdrs_core.c
3093
3239
/* * IPv6 library code, needed by static components when full IPv6 support is * not configured or static. */ #include <net/ipv6.h> /* * find out if nexthdr is a well-known extension header or a protocol */ int ipv6_ext_hdr(u8 nexthdr) { /* * find out if nexthdr is an extension header or a protocol */ return (nexthdr == NEXTHDR_HOP) || (nexthdr == NEXTHDR_ROUTING) || (nexthdr == NEXTHDR_FRAGMENT) || (nexthdr == NEXTHDR_AUTH) || (nexthdr == NEXTHDR_NONE) || (nexthdr == NEXTHDR_DEST); } /* * Skip any extension headers. This is used by the ICMP module. * * Note that strictly speaking this conflicts with RFC 2460 4.0: * ...The contents and semantics of each extension header determine whether * or not to proceed to the next header. Therefore, extension headers must * be processed strictly in the order they appear in the packet; a * receiver must not, for example, scan through a packet looking for a * particular kind of extension header and process that header prior to * processing all preceding ones. * * We do exactly this. This is a protocol bug. We can't decide after a * seeing an unknown discard-with-error flavour TLV option if it's a * ICMP error message or not (errors should never be send in reply to * ICMP error messages). * * But I see no other way to do this. This might need to be reexamined * when Linux implements ESP (and maybe AUTH) headers. * --AK * * This function parses (probably truncated) exthdr set "hdr". * "nexthdrp" initially points to some place, * where type of the first header can be found. * * It skips all well-known exthdrs, and returns pointer to the start * of unparsable area i.e. the first header with unknown type. * If it is not NULL *nexthdr is updated by type/protocol of this header. * * NOTES: - if packet terminated with NEXTHDR_NONE it returns NULL. * - it may return pointer pointing beyond end of packet, * if the last recognized header is truncated in the middle. * - if packet is truncated, so that all parsed headers are skipped, * it returns NULL. * - First fragment header is skipped, not-first ones * are considered as unparsable. * - ESP is unparsable for now and considered like * normal payload protocol. * - Note also special handling of AUTH header. Thanks to IPsec wizards. * * --ANK (980726) */ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp) { u8 nexthdr = *nexthdrp; while (ipv6_ext_hdr(nexthdr)) { struct ipv6_opt_hdr _hdr, *hp; int hdrlen; if (nexthdr == NEXTHDR_NONE) return -1; hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); if (hp == NULL) return -1; if (nexthdr == NEXTHDR_FRAGMENT) { __be16 _frag_off, *fp; fp = skb_header_pointer(skb, start+offsetof(struct frag_hdr, frag_off), sizeof(_frag_off), &_frag_off); if (fp == NULL) return -1; if (ntohs(*fp) & ~0x7) break; hdrlen = 8; } else if (nexthdr == NEXTHDR_AUTH) hdrlen = (hp->hdrlen+2)<<2; else hdrlen = ipv6_optlen(hp); nexthdr = hp->nexthdr; start += hdrlen; } *nexthdrp = nexthdr; return start; } EXPORT_SYMBOL(ipv6_ext_hdr); EXPORT_SYMBOL(ipv6_skip_exthdr);
gpl-2.0
UnicronNL/vyos-kernel-utilite
drivers/scsi/aic94xx/aic94xx_dev.c
4629
11507
/* * Aic94xx SAS/SATA DDB management * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * $Id: //depot/aic94xx/aic94xx_dev.c#21 $ */ #include "aic94xx.h" #include "aic94xx_hwi.h" #include "aic94xx_reg.h" #include "aic94xx_sas.h" #define FIND_FREE_DDB(_ha) find_first_zero_bit((_ha)->hw_prof.ddb_bitmap, \ (_ha)->hw_prof.max_ddbs) #define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) #define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) static int asd_get_ddb(struct asd_ha_struct *asd_ha) { int ddb, i; ddb = FIND_FREE_DDB(asd_ha); if (ddb >= asd_ha->hw_prof.max_ddbs) { ddb = -ENOMEM; goto out; } SET_DDB(ddb, asd_ha); for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) asd_ddbsite_write_dword(asd_ha, ddb, i, 0); out: return ddb; } #define INIT_CONN_TAG offsetof(struct asd_ddb_ssp_smp_target_port, init_conn_tag) #define DEST_SAS_ADDR offsetof(struct asd_ddb_ssp_smp_target_port, dest_sas_addr) #define SEND_QUEUE_HEAD offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_head) #define DDB_TYPE offsetof(struct asd_ddb_ssp_smp_target_port, ddb_type) #define CONN_MASK offsetof(struct asd_ddb_ssp_smp_target_port, conn_mask) #define DDB_TARG_FLAGS offsetof(struct asd_ddb_ssp_smp_target_port, flags) #define DDB_TARG_FLAGS2 offsetof(struct asd_ddb_stp_sata_target_port, flags2) #define EXEC_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, exec_queue_tail) #define SEND_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_tail) #define SISTER_DDB offsetof(struct asd_ddb_ssp_smp_target_port, sister_ddb) #define MAX_CCONN offsetof(struct asd_ddb_ssp_smp_target_port, max_concurrent_conn) #define NUM_CTX offsetof(struct asd_ddb_ssp_smp_target_port, num_contexts) #define ATA_CMD_SCBPTR offsetof(struct asd_ddb_stp_sata_target_port, ata_cmd_scbptr) #define SATA_TAG_ALLOC_MASK offsetof(struct asd_ddb_stp_sata_target_port, sata_tag_alloc_mask) #define NUM_SATA_TAGS offsetof(struct asd_ddb_stp_sata_target_port, num_sata_tags) #define SATA_STATUS offsetof(struct asd_ddb_stp_sata_target_port, sata_status) #define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr) #define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout) static void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) { if (!ddb || ddb >= 0xFFFF) return; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED); CLEAR_DDB(ddb, asd_ha); } static void asd_set_ddb_type(struct domain_device *dev) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb = (int) (unsigned long) dev->lldd_dev; if (dev->dev_type == SAS_SATA_PM_PORT) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); else if (dev->tproto) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); else asd_ddbsite_write_byte(asd_ha,ddb,DDB_TYPE,DDB_TYPE_INITIATOR); } static int asd_init_sata_tag_ddb(struct domain_device *dev) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb, i; ddb = asd_get_ddb(asd_ha); if (ddb < 0) return ddb; for (i = 0; i < sizeof(struct asd_ddb_sata_tag); i += 2) asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF); asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev, SISTER_DDB, ddb); return 0; } void asd_set_dmamode(struct domain_device *dev) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; struct ata_device *ata_dev = sas_to_ata_dev(dev); int ddb = (int) (unsigned long) dev->lldd_dev; u32 qdepth = 0; if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) { if (ata_id_has_ncq(ata_dev->id)) qdepth = ata_id_queue_depth(ata_dev->id); asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, (1ULL<<qdepth)-1); asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth); } if (qdepth > 0) if (asd_init_sata_tag_ddb(dev) != 0) { unsigned long flags; spin_lock_irqsave(dev->sata_dev.ap->lock, flags); ata_dev->flags |= ATA_DFLAG_NCQ_OFF; spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); } } static int asd_init_sata(struct domain_device *dev) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb = (int) (unsigned long) dev->lldd_dev; asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM || dev->dev_type == SAS_SATA_PM_PORT) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); } asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF); return 0; } static int asd_init_target_ddb(struct domain_device *dev) { int ddb, i; struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; u8 flags = 0; ddb = asd_get_ddb(asd_ha); if (ddb < 0) return ddb; dev->lldd_dev = (void *) (unsigned long) ddb; asd_ddbsite_write_byte(asd_ha, ddb, 0, DDB_TP_CONN_TYPE); asd_ddbsite_write_byte(asd_ha, ddb, 1, 0); asd_ddbsite_write_word(asd_ha, ddb, INIT_CONN_TAG, 0xFFFF); for (i = 0; i < SAS_ADDR_SIZE; i++) asd_ddbsite_write_byte(asd_ha, ddb, DEST_SAS_ADDR+i, dev->sas_addr[i]); asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_HEAD, 0xFFFF); asd_set_ddb_type(dev); asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); if (dev->port->oob_mode != SATA_OOB_MODE) { flags |= OPEN_REQUIRED; if ((dev->dev_type == SAS_SATA_DEV) || (dev->tproto & SAS_PROTOCOL_STP)) { struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; if (rps_resp->frame_type == SMP_RESPONSE && rps_resp->function == SMP_REPORT_PHY_SATA && rps_resp->result == SMP_RESP_FUNC_ACC) { if (rps_resp->rps.affil_valid) flags |= STP_AFFIL_POL; if (rps_resp->rps.affil_supp) flags |= SUPPORTS_AFFIL; } } else { flags |= CONCURRENT_CONN_SUPP; if (!dev->parent && (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)) asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, 4); else asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, dev->pathways); asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); } } if (dev->dev_type == SAS_SATA_PM) flags |= SATA_MULTIPORT; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); flags = 0; if (dev->tproto & SAS_PROTOCOL_STP) flags |= STP_CL_POL_NO_TX; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags); asd_ddbsite_write_word(asd_ha, ddb, EXEC_QUEUE_TAIL, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { i = asd_init_sata(dev); if (i < 0) { asd_free_ddb(asd_ha, ddb); return i; } } if (dev->dev_type == SAS_END_DEVICE) { struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); if (rdev->I_T_nexus_loss_timeout > 0) asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, min(rdev->I_T_nexus_loss_timeout, (u16)ITNL_TIMEOUT_CONST)); else asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, (u16)ITNL_TIMEOUT_CONST); } return 0; } static int asd_init_sata_pm_table_ddb(struct domain_device *dev) { struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb, i; ddb = asd_get_ddb(asd_ha); if (ddb < 0) return ddb; for (i = 0; i < 32; i += 2) asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF); asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev, SISTER_DDB, ddb); return 0; } #define PM_PORT_FLAGS offsetof(struct asd_ddb_sata_pm_port, pm_port_flags) #define PARENT_DDB offsetof(struct asd_ddb_sata_pm_port, parent_ddb) /** * asd_init_sata_pm_port_ddb -- SATA Port Multiplier Port * dev: pointer to domain device * * For SATA Port Multiplier Ports we need to allocate one SATA Port * Multiplier Port DDB and depending on whether the target on it * supports SATA II NCQ, one SATA Tag DDB. */ static int asd_init_sata_pm_port_ddb(struct domain_device *dev) { int ddb, i, parent_ddb, pmtable_ddb; struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; u8 flags; ddb = asd_get_ddb(asd_ha); if (ddb < 0) return ddb; asd_set_ddb_type(dev); flags = (dev->sata_dev.port_no << 4) | PM_PORT_SET; asd_ddbsite_write_byte(asd_ha, ddb, PM_PORT_FLAGS, flags); asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); asd_init_sata(dev); parent_ddb = (int) (unsigned long) dev->parent->lldd_dev; asd_ddbsite_write_word(asd_ha, ddb, PARENT_DDB, parent_ddb); pmtable_ddb = asd_ddbsite_read_word(asd_ha, parent_ddb, SISTER_DDB); asd_ddbsite_write_word(asd_ha, pmtable_ddb, dev->sata_dev.port_no,ddb); if (asd_ddbsite_read_byte(asd_ha, ddb, NUM_SATA_TAGS) > 0) { i = asd_init_sata_tag_ddb(dev); if (i < 0) { asd_free_ddb(asd_ha, ddb); return i; } } return 0; } static int asd_init_initiator_ddb(struct domain_device *dev) { return -ENODEV; } /** * asd_init_sata_pm_ddb -- SATA Port Multiplier * dev: pointer to domain device * * For STP and direct-attached SATA Port Multipliers we need * one target port DDB entry and one SATA PM table DDB entry. */ static int asd_init_sata_pm_ddb(struct domain_device *dev) { int res = 0; res = asd_init_target_ddb(dev); if (res) goto out; res = asd_init_sata_pm_table_ddb(dev); if (res) asd_free_ddb(dev->port->ha->lldd_ha, (int) (unsigned long) dev->lldd_dev); out: return res; } int asd_dev_found(struct domain_device *dev) { unsigned long flags; int res = 0; struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); switch (dev->dev_type) { case SAS_SATA_PM: res = asd_init_sata_pm_ddb(dev); break; case SAS_SATA_PM_PORT: res = asd_init_sata_pm_port_ddb(dev); break; default: if (dev->tproto) res = asd_init_target_ddb(dev); else res = asd_init_initiator_ddb(dev); } spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); return res; } void asd_dev_gone(struct domain_device *dev) { int ddb, sister_ddb; unsigned long flags; struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); ddb = (int) (unsigned long) dev->lldd_dev; sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB); if (sister_ddb != 0xFFFF) asd_free_ddb(asd_ha, sister_ddb); asd_free_ddb(asd_ha, ddb); dev->lldd_dev = NULL; spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); }
gpl-2.0
sexmachine/caf
net/decnet/netfilter/dn_rtmsg.c
4885
3777
/* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket * interface as the means of communication with the user level. * * DECnet Routing Message Grabulator * * (C) 2000 ChyGwyn Limited - http://www.chygwyn.com/ * This code may be copied under the GPL v.2 or at your option * any later version. * * Author: Steven Whitehouse <steve@chygwyn.com> * */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/spinlock.h> #include <linux/netlink.h> #include <linux/netfilter_decnet.h> #include <net/sock.h> #include <net/flow.h> #include <net/dn.h> #include <net/dn_route.h> static struct sock *dnrmg = NULL; static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp) { struct sk_buff *skb = NULL; size_t size; sk_buff_data_t old_tail; struct nlmsghdr *nlh; unsigned char *ptr; struct nf_dn_rtmsg *rtm; size = NLMSG_SPACE(rt_skb->len); size += NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)); skb = alloc_skb(size, GFP_ATOMIC); if (!skb) goto nlmsg_failure; old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, 0, size - sizeof(*nlh)); rtm = (struct nf_dn_rtmsg *)NLMSG_DATA(nlh); rtm->nfdn_ifindex = rt_skb->dev->ifindex; ptr = NFDN_RTMSG(rtm); skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len); nlh->nlmsg_len = skb->tail - old_tail; return skb; nlmsg_failure: if (skb) kfree_skb(skb); *errp = -ENOMEM; if (net_ratelimit()) printk(KERN_ERR "dn_rtmsg: error creating netlink message\n"); return NULL; } static void dnrmg_send_peer(struct sk_buff *skb) { struct sk_buff *skb2; int status = 0; int group = 0; unsigned char flags = *skb->data; switch (flags & DN_RT_CNTL_MSK) { case DN_RT_PKT_L1RT: group = DNRNG_NLGRP_L1; break; case DN_RT_PKT_L2RT: group = DNRNG_NLGRP_L2; break; default: return; } skb2 = dnrmg_build_message(skb, &status); if (skb2 == NULL) return; NETLINK_CB(skb2).dst_group = group; netlink_broadcast(dnrmg, skb2, 0, group, GFP_ATOMIC); } static unsigned int dnrmg_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { dnrmg_send_peer(skb); return NF_ACCEPT; } #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) return; if (!capable(CAP_NET_ADMIN)) RCV_SKB_FAIL(-EPERM); /* Eventually we might send routing messages too */ RCV_SKB_FAIL(-EINVAL); } static struct nf_hook_ops dnrmg_ops __read_mostly = { .hook = dnrmg_hook, .pf = PF_DECnet, .hooknum = NF_DN_ROUTE, .priority = NF_DN_PRI_DNRTMSG, }; static int __init dn_rtmsg_init(void) { int rv = 0; dnrmg = netlink_kernel_create(&init_net, NETLINK_DNRTMSG, DNRNG_NLGRP_MAX, dnrmg_receive_user_skb, NULL, THIS_MODULE); if (dnrmg == NULL) { printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket"); return -ENOMEM; } rv = nf_register_hook(&dnrmg_ops); if (rv) { netlink_kernel_release(dnrmg); } return rv; } static void __exit dn_rtmsg_fini(void) { nf_unregister_hook(&dnrmg_ops); netlink_kernel_release(dnrmg); } MODULE_DESCRIPTION("DECnet Routing Message Grabulator"); MODULE_AUTHOR("Steven Whitehouse <steve@chygwyn.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_DNRTMSG); module_init(dn_rtmsg_init); module_exit(dn_rtmsg_fini);
gpl-2.0
Surge1223/kernel_samsung-jfltevzw-VRUFNC5
drivers/gpu/drm/mga/mga_warp.c
5653
4832
/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*- * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com * * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes <gareth@valinux.com> */ #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/platform_device.h> #include <linux/module.h> #include "drmP.h" #include "drm.h" #include "mga_drm.h" #include "mga_drv.h" #define FIRMWARE_G200 "matrox/g200_warp.fw" #define FIRMWARE_G400 "matrox/g400_warp.fw" MODULE_FIRMWARE(FIRMWARE_G200); MODULE_FIRMWARE(FIRMWARE_G400); #define MGA_WARP_CODE_ALIGN 256 /* in bytes */ #define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN) int mga_warp_install_microcode(drm_mga_private_t *dev_priv) { unsigned char *vcbase = dev_priv->warp->handle; unsigned long pcbase = dev_priv->warp->offset; const char *firmware_name; struct platform_device *pdev; const struct firmware *fw = NULL; const struct ihex_binrec *rec; unsigned int size; int n_pipes, where; int rc = 0; switch (dev_priv->chipset) { case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G550: firmware_name = FIRMWARE_G400; n_pipes = MGA_MAX_G400_PIPES; break; case MGA_CARD_TYPE_G200: firmware_name = FIRMWARE_G200; n_pipes = MGA_MAX_G200_PIPES; break; default: return -EINVAL; } pdev = platform_device_register_simple("mga_warp", 0, NULL, 0); if (IS_ERR(pdev)) { DRM_ERROR("mga: Failed to register microcode\n"); return PTR_ERR(pdev); } rc = request_ihex_firmware(&fw, firmware_name, &pdev->dev); platform_device_unregister(pdev); if (rc) { DRM_ERROR("mga: Failed to load microcode \"%s\"\n", firmware_name); return rc; } size = 0; where = 0; for (rec = (const struct ihex_binrec *)fw->data; rec; rec = ihex_next_binrec(rec)) { size += WARP_UCODE_SIZE(be16_to_cpu(rec->len)); where++; } if (where != n_pipes) { DRM_ERROR("mga: Invalid microcode \"%s\"\n", firmware_name); rc = -EINVAL; goto out; } size = PAGE_ALIGN(size); DRM_DEBUG("MGA ucode size = %d bytes\n", size); if (size > dev_priv->warp->size) { DRM_ERROR("microcode too large! (%u > %lu)\n", size, dev_priv->warp->size); rc = -ENOMEM; goto out; } memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); where = 0; for (rec = (const struct ihex_binrec *)fw->data; rec; rec = ihex_next_binrec(rec)) { unsigned int src_size, dst_size; DRM_DEBUG(" pcbase = 0x%08lx vcbase = %p\n", pcbase, vcbase); dev_priv->warp_pipe_phys[where] = pcbase; src_size = be16_to_cpu(rec->len); dst_size = WARP_UCODE_SIZE(src_size); memcpy(vcbase, rec->data, src_size); pcbase += dst_size; vcbase += dst_size; where++; } out: release_firmware(fw); return rc; } #define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE) int mga_warp_init(drm_mga_private_t *dev_priv) { u32 wmisc; /* FIXME: Get rid of these damned magic numbers... */ switch (dev_priv->chipset) { case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G550: MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND); MGA_WRITE(MGA_WGETMSB, 0x00000E00); MGA_WRITE(MGA_WVRTXSZ, 0x00001807); MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000); break; case MGA_CARD_TYPE_G200: MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND); MGA_WRITE(MGA_WGETMSB, 0x1606); MGA_WRITE(MGA_WVRTXSZ, 7); break; default: return -EINVAL; } MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE)); wmisc = MGA_READ(MGA_WMISC); if (wmisc != WMISC_EXPECTED) { DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", wmisc, WMISC_EXPECTED); return -EINVAL; } return 0; }
gpl-2.0
Toygoon/ToyKernel-Team.TCP_SGS2
lib/raid6/mmx.c
8469
3900
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright 2002 H. Peter Anvin - All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, Inc., 53 Temple Place Ste 330, * Boston MA 02111-1307, USA; either version 2 of the License, or * (at your option) any later version; incorporated herein by reference. * * ----------------------------------------------------------------------- */ /* * raid6/mmx.c * * MMX implementation of RAID-6 syndrome functions */ #if defined(__i386__) && !defined(__arch_um__) #include <linux/raid/pq.h> #include "x86.h" /* Shared with raid6/sse1.c */ const struct raid6_mmx_constants { u64 x1d; } raid6_mmx_constants = { 0x1d1d1d1d1d1d1d1dULL, }; static int raid6_have_mmx(void) { /* Not really "boot_cpu" but "all_cpus" */ return boot_cpu_has(X86_FEATURE_MMX); } /* * Plain MMX implementation */ static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs) { u8 **dptr = (u8 **)ptrs; u8 *p, *q; int d, z, z0; z0 = disks - 3; /* Highest data disk */ p = dptr[z0+1]; /* XOR parity */ q = dptr[z0+2]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); asm volatile("pxor %mm5,%mm5"); /* Zero temp */ for ( d = 0 ; d < bytes ; d += 8 ) { asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ asm volatile("movq %mm2,%mm4"); /* Q[0] */ for ( z = z0-1 ; z >= 0 ; z-- ) { asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); asm volatile("pcmpgtb %mm4,%mm5"); asm volatile("paddb %mm4,%mm4"); asm volatile("pand %mm0,%mm5"); asm volatile("pxor %mm5,%mm4"); asm volatile("pxor %mm5,%mm5"); asm volatile("pxor %mm6,%mm2"); asm volatile("pxor %mm6,%mm4"); } asm volatile("movq %%mm2,%0" : "=m" (p[d])); asm volatile("pxor %mm2,%mm2"); asm volatile("movq %%mm4,%0" : "=m" (q[d])); asm volatile("pxor %mm4,%mm4"); } kernel_fpu_end(); } const struct raid6_calls raid6_mmxx1 = { raid6_mmx1_gen_syndrome, raid6_have_mmx, "mmxx1", 0 }; /* * Unrolled-by-2 MMX implementation */ static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs) { u8 **dptr = (u8 **)ptrs; u8 *p, *q; int d, z, z0; z0 = disks - 3; /* Highest data disk */ p = dptr[z0+1]; /* XOR parity */ q = dptr[z0+2]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); asm volatile("pxor %mm5,%mm5"); /* Zero temp */ asm volatile("pxor %mm7,%mm7"); /* Zero temp */ for ( d = 0 ; d < bytes ; d += 16 ) { asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); asm volatile("movq %mm2,%mm4"); /* Q[0] */ asm volatile("movq %mm3,%mm6"); /* Q[1] */ for ( z = z0-1 ; z >= 0 ; z-- ) { asm volatile("pcmpgtb %mm4,%mm5"); asm volatile("pcmpgtb %mm6,%mm7"); asm volatile("paddb %mm4,%mm4"); asm volatile("paddb %mm6,%mm6"); asm volatile("pand %mm0,%mm5"); asm volatile("pand %mm0,%mm7"); asm volatile("pxor %mm5,%mm4"); asm volatile("pxor %mm7,%mm6"); asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d])); asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8])); asm volatile("pxor %mm5,%mm2"); asm volatile("pxor %mm7,%mm3"); asm volatile("pxor %mm5,%mm4"); asm volatile("pxor %mm7,%mm6"); asm volatile("pxor %mm5,%mm5"); asm volatile("pxor %mm7,%mm7"); } asm volatile("movq %%mm2,%0" : "=m" (p[d])); asm volatile("movq %%mm3,%0" : "=m" (p[d+8])); asm volatile("movq %%mm4,%0" : "=m" (q[d])); asm volatile("movq %%mm6,%0" : "=m" (q[d+8])); } kernel_fpu_end(); } const struct raid6_calls raid6_mmxx2 = { raid6_mmx2_gen_syndrome, raid6_have_mmx, "mmxx2", 0 }; #endif
gpl-2.0
StarKissed/starkissed-kernel-roth
fs/nls/nls_cp1251.c
12565
12751
/* * linux/fs/nls/nls_cp1251.c * * Charset cp1251 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0402, 0x0403, 0x201a, 0x0453, 0x201e, 0x2026, 0x2020, 0x2021, 0x20ac, 0x2030, 0x0409, 0x2039, 0x040a, 0x040c, 0x040b, 0x040f, /* 0x90*/ 0x0452, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0x0000, 0x2122, 0x0459, 0x203a, 0x045a, 0x045c, 0x045b, 0x045f, /* 0xa0*/ 0x00a0, 0x040e, 0x045e, 0x0408, 0x00a4, 0x0490, 0x00a6, 0x00a7, 0x0401, 0x00a9, 0x0404, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x0407, /* 0xb0*/ 0x00b0, 0x00b1, 0x0406, 0x0456, 0x0491, 0x00b5, 0x00b6, 0x00b7, 0x0451, 0x2116, 0x0454, 0x00bb, 0x0458, 0x0405, 0x0455, 0x0457, /* 0xc0*/ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, /* 0xd0*/ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, /* 0xe0*/ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f, /* 0xf0*/ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0x00, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */ 0xb0, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page04[256] = { 0x00, 0xa8, 0x80, 0x81, 0xaa, 0xbd, 0xb2, 0xaf, /* 0x00-0x07 */ 0xa3, 0x8a, 0x8c, 0x8e, 0x8d, 0x00, 0xa1, 0x8f, /* 0x08-0x0f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x10-0x17 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x18-0x1f */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x20-0x27 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0x28-0x2f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0x48-0x4f */ 0x00, 0xb8, 0x90, 0x83, 0xba, 0xbe, 0xb3, 0xbf, /* 0x50-0x57 */ 0xbc, 0x9a, 0x9c, 0x9e, 0x9d, 0x00, 0xa2, 0x9f, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0xa5, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x91, 0x92, 0x82, 0x00, 0x93, 0x94, 0x84, 0x00, /* 0x18-0x1f */ 0x86, 0x87, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x8b, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, page21, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x90, 0x83, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa2, 0xa2, 0xbc, 0xa4, 0xb4, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xb8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb3, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x81, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x80, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x8a, 0x9b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa1, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb2, 0xa5, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xa8, 0xb9, 0xaa, 0xbb, 0xa3, 0xbd, 0xbd, 0xaf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp1251", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp1251(void) { return register_nls(&table); } static void __exit exit_nls_cp1251(void) { unregister_nls(&table); } module_init(init_nls_cp1251) module_exit(exit_nls_cp1251) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
shuiqingliu/android_kernel_lenovo_stuttgart
lib/is_single_threaded.c
13589
1363
/* Function to determine if a thread group is single threaded or not * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from security/selinux/hooks.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/sched.h> /* * Returns true if the task does not share ->mm with another thread/process. */ bool current_is_single_threaded(void) { struct task_struct *task = current; struct mm_struct *mm = task->mm; struct task_struct *p, *t; bool ret; if (atomic_read(&task->signal->live) != 1) return false; if (atomic_read(&mm->mm_users) == 1) return true; ret = false; rcu_read_lock(); for_each_process(p) { if (unlikely(p->flags & PF_KTHREAD)) continue; if (unlikely(p == task->group_leader)) continue; t = p; do { if (unlikely(t->mm == mm)) goto found; if (likely(t->mm)) break; /* * t->mm == NULL. Make sure next_thread/next_task * will see other CLONE_VM tasks which might be * forked before exiting. */ smp_rmb(); } while_each_thread(p, t); } ret = true; found: rcu_read_unlock(); return ret; }
gpl-2.0
Ryuinferno/Blazing_Kernel_t1
mm/memory-failure.c
22
40357
/* * Copyright (C) 2008, 2009 Intel Corporation * Authors: Andi Kleen, Fengguang Wu * * This software may be redistributed and/or modified under the terms of * the GNU General Public License ("GPL") version 2 only as published by the * Free Software Foundation. * * High level machine check handler. Handles pages reported by the * hardware as being corrupted usually due to a multi-bit ECC memory or cache * failure. * * In addition there is a "soft offline" entry point that allows stop using * not-yet-corrupted-by-suspicious pages without killing anything. * * Handles page cache pages in various states. The tricky part * here is that we can access any page asynchronously in respect to * other VM users, because memory failures could happen anytime and * anywhere. This could violate some of their assumptions. This is why * this code has to be extremely careful. Generally it tries to use * normal locking rules, as in get the standard locks, even if that means * the error handling takes potentially a long time. * * There are several operations here with exponential complexity because * of unsuitable VM data structures. For example the operation to map back * from RMAP chains to processes has to walk the complete process list and * has non linear complexity with the number. But since memory corruptions * are rare we hope to get away with this. This avoids impacting the core * VM. */ /* * Notebook: * - hugetlb needs more code * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages * - pass bad pages to kdump next kernel */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/page-flags.h> #include <linux/kernel-page-flags.h> #include <linux/sched.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/backing-dev.h> #include <linux/migrate.h> #include <linux/page-isolation.h> #include <linux/suspend.h> #include <linux/slab.h> #include <linux/swapops.h> #include <linux/hugetlb.h> #include <linux/memory_hotplug.h> #include <linux/mm_inline.h> #include "internal.h" int sysctl_memory_failure_early_kill __read_mostly = 0; int sysctl_memory_failure_recovery __read_mostly = 1; atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) u32 hwpoison_filter_enable = 0; u32 hwpoison_filter_dev_major = ~0U; u32 hwpoison_filter_dev_minor = ~0U; u64 hwpoison_filter_flags_mask; u64 hwpoison_filter_flags_value; EXPORT_SYMBOL_GPL(hwpoison_filter_enable); EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); static int hwpoison_filter_dev(struct page *p) { struct address_space *mapping; dev_t dev; if (hwpoison_filter_dev_major == ~0U && hwpoison_filter_dev_minor == ~0U) return 0; /* * page_mapping() does not accept slab pages. */ if (PageSlab(p)) return -EINVAL; mapping = page_mapping(p); if (mapping == NULL || mapping->host == NULL) return -EINVAL; dev = mapping->host->i_sb->s_dev; if (hwpoison_filter_dev_major != ~0U && hwpoison_filter_dev_major != MAJOR(dev)) return -EINVAL; if (hwpoison_filter_dev_minor != ~0U && hwpoison_filter_dev_minor != MINOR(dev)) return -EINVAL; return 0; } static int hwpoison_filter_flags(struct page *p) { if (!hwpoison_filter_flags_mask) return 0; if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == hwpoison_filter_flags_value) return 0; else return -EINVAL; } /* * This allows stress tests to limit test scope to a collection of tasks * by putting them under some memcg. This prevents killing unrelated/important * processes such as /sbin/init. Note that the target task may share clean * pages with init (eg. libc text), which is harmless. If the target task * share _dirty_ pages with another task B, the test scheme must make sure B * is also included in the memcg. At last, due to race conditions this filter * can only guarantee that the page either belongs to the memcg tasks, or is * a freed page. */ #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP u64 hwpoison_filter_memcg; EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); static int hwpoison_filter_task(struct page *p) { struct mem_cgroup *mem; struct cgroup_subsys_state *css; unsigned long ino; if (!hwpoison_filter_memcg) return 0; mem = try_get_mem_cgroup_from_page(p); if (!mem) return -EINVAL; css = mem_cgroup_css(mem); /* root_mem_cgroup has NULL dentries */ if (!css->cgroup->dentry) return -EINVAL; ino = css->cgroup->dentry->d_inode->i_ino; css_put(css); if (ino != hwpoison_filter_memcg) return -EINVAL; return 0; } #else static int hwpoison_filter_task(struct page *p) { return 0; } #endif int hwpoison_filter(struct page *p) { if (!hwpoison_filter_enable) return 0; if (hwpoison_filter_dev(p)) return -EINVAL; if (hwpoison_filter_flags(p)) return -EINVAL; if (hwpoison_filter_task(p)) return -EINVAL; return 0; } #else int hwpoison_filter(struct page *p) { return 0; } #endif EXPORT_SYMBOL_GPL(hwpoison_filter); /* * Send all the processes who have the page mapped an ``action optional'' * signal. */ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, unsigned long pfn, struct page *page) { struct siginfo si; int ret; printk(KERN_ERR "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n", pfn, t->comm, t->pid); si.si_signo = SIGBUS; si.si_errno = 0; si.si_code = BUS_MCEERR_AO; si.si_addr = (void *)addr; #ifdef __ARCH_SI_TRAPNO si.si_trapno = trapno; #endif si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT; /* * Don't use force here, it's convenient if the signal * can be temporarily blocked. * This could cause a loop when the user sets SIGBUS * to SIG_IGN, but hopefully no one will do that? */ ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ if (ret < 0) printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n", t->comm, t->pid, ret); return ret; } /* * When a unknown page type is encountered drain as many buffers as possible * in the hope to turn the page into a LRU or free page, which we can handle. */ void shake_page(struct page *p, int access) { if (!PageSlab(p)) { lru_add_drain_all(); if (PageLRU(p)) return; drain_all_pages(); if (PageLRU(p) || is_free_buddy_page(p)) return; } /* * Only call shrink_slab here (which would also shrink other caches) if * access is not potentially fatal. */ if (access) { int nr; do { struct shrink_control shrink = { .gfp_mask = GFP_KERNEL, }; nr = shrink_slab(&shrink, 1000, 1000); if (page_count(p) == 1) break; } while (nr > 10); } } EXPORT_SYMBOL_GPL(shake_page); /* * Kill all processes that have a poisoned page mapped and then isolate * the page. * * General strategy: * Find all processes having the page mapped and kill them. * But we keep a page reference around so that the page is not * actually freed yet. * Then stash the page away * * There's no convenient way to get back to mapped processes * from the VMAs. So do a brute-force search over all * running processes. * * Remember that machine checks are not common (or rather * if they are common you have other problems), so this shouldn't * be a performance issue. * * Also there are some races possible while we get from the * error detection to actually handle it. */ struct to_kill { struct list_head nd; struct task_struct *tsk; unsigned long addr; char addr_valid; }; /* * Failure handling: if we can't find or can't kill a process there's * not much we can do. We just print a message and ignore otherwise. */ /* * Schedule a process for later kill. * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. * TBD would GFP_NOIO be enough? */ static void add_to_kill(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, struct to_kill **tkc) { struct to_kill *tk; if (*tkc) { tk = *tkc; *tkc = NULL; } else { tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); if (!tk) { printk(KERN_ERR "MCE: Out of memory while machine check handling\n"); return; } } tk->addr = page_address_in_vma(p, vma); tk->addr_valid = 1; /* * In theory we don't have to kill when the page was * munmaped. But it could be also a mremap. Since that's * likely very rare kill anyways just out of paranoia, but use * a SIGKILL because the error is not contained anymore. */ if (tk->addr == -EFAULT) { pr_info("MCE: Unable to find user space address %lx in %s\n", page_to_pfn(p), tsk->comm); tk->addr_valid = 0; } get_task_struct(tsk); tk->tsk = tsk; list_add_tail(&tk->nd, to_kill); } /* * Kill the processes that have been collected earlier. * * Only do anything when DOIT is set, otherwise just free the list * (this is used for clean pages which do not need killing) * Also when FAIL is set do a force kill because something went * wrong earlier. */ static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, int fail, struct page *page, unsigned long pfn) { struct to_kill *tk, *next; list_for_each_entry_safe (tk, next, to_kill, nd) { if (doit) { /* * In case something went wrong with munmapping * make sure the process doesn't catch the * signal and then access the memory. Just kill it. */ if (fail || tk->addr_valid == 0) { printk(KERN_ERR "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", pfn, tk->tsk->comm, tk->tsk->pid); force_sig(SIGKILL, tk->tsk); } /* * In theory the process could have mapped * something else on the address in-between. We could * check for that, but we need to tell the * process anyways. */ else if (kill_proc_ao(tk->tsk, tk->addr, trapno, pfn, page) < 0) printk(KERN_ERR "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", pfn, tk->tsk->comm, tk->tsk->pid); } put_task_struct(tk->tsk); kfree(tk); } } static int task_early_kill(struct task_struct *tsk) { if (!tsk->mm) return 0; if (tsk->flags & PF_MCE_PROCESS) return !!(tsk->flags & PF_MCE_EARLY); return sysctl_memory_failure_early_kill; } /* * Collect processes when the error hit an anonymous page. */ static void collect_procs_anon(struct page *page, struct list_head *to_kill, struct to_kill **tkc) { struct vm_area_struct *vma; struct task_struct *tsk; struct anon_vma *av; av = page_lock_anon_vma(page); if (av == NULL) /* Not actually mapped anymore */ return; read_lock(&tasklist_lock); for_each_process (tsk) { struct anon_vma_chain *vmac; if (!task_early_kill(tsk)) continue; list_for_each_entry(vmac, &av->head, same_anon_vma) { vma = vmac->vma; if (!page_mapped_in_vma(page, vma)) continue; if (vma->vm_mm == tsk->mm) add_to_kill(tsk, page, vma, to_kill, tkc); } } read_unlock(&tasklist_lock); page_unlock_anon_vma(av); } /* * Collect processes when the error hit a file mapped page. */ static void collect_procs_file(struct page *page, struct list_head *to_kill, struct to_kill **tkc) { struct vm_area_struct *vma; struct task_struct *tsk; struct prio_tree_iter iter; struct address_space *mapping = page->mapping; mutex_lock(&mapping->i_mmap_mutex); read_lock(&tasklist_lock); for_each_process(tsk) { pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); if (!task_early_kill(tsk)) continue; vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { /* * Send early kill signal to tasks where a vma covers * the page but the corrupted page is not necessarily * mapped it in its pte. * Assume applications who requested early kill want * to be informed of all such data corruptions. */ if (vma->vm_mm == tsk->mm) add_to_kill(tsk, page, vma, to_kill, tkc); } } read_unlock(&tasklist_lock); mutex_unlock(&mapping->i_mmap_mutex); } /* * Collect the processes who have the corrupted page mapped to kill. * This is done in two steps for locking reasons. * First preallocate one tokill structure outside the spin locks, * so that we can kill at least one process reasonably reliable. */ static void collect_procs(struct page *page, struct list_head *tokill) { struct to_kill *tk; if (!page->mapping) return; tk = kmalloc(sizeof(struct to_kill), GFP_NOIO); if (!tk) return; if (PageAnon(page)) collect_procs_anon(page, tokill, &tk); else collect_procs_file(page, tokill, &tk); kfree(tk); } /* * Error handlers for various types of pages. */ enum outcome { IGNORED, /* Error: cannot be handled */ FAILED, /* Error: handling failed */ DELAYED, /* Will be handled later */ RECOVERED, /* Successfully recovered */ }; static const char *action_name[] = { [IGNORED] = "Ignored", [FAILED] = "Failed", [DELAYED] = "Delayed", [RECOVERED] = "Recovered", }; /* * XXX: It is possible that a page is isolated from LRU cache, * and then kept in swap cache or failed to remove from page cache. * The page count will stop it from being freed by unpoison. * Stress tests should be aware of this memory leak problem. */ static int delete_from_lru_cache(struct page *p) { if (!isolate_lru_page(p)) { /* * Clear sensible page flags, so that the buddy system won't * complain when the page is unpoison-and-freed. */ ClearPageActive(p); ClearPageUnevictable(p); /* * drop the page count elevated by isolate_lru_page() */ page_cache_release(p); return 0; } return -EIO; } /* * Error hit kernel page. * Do nothing, try to be lucky and not touch this instead. For a few cases we * could be more sophisticated. */ static int me_kernel(struct page *p, unsigned long pfn) { return IGNORED; } /* * Page in unknown state. Do nothing. */ static int me_unknown(struct page *p, unsigned long pfn) { printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); return FAILED; } /* * Clean (or cleaned) page cache page. */ static int me_pagecache_clean(struct page *p, unsigned long pfn) { int err; int ret = FAILED; struct address_space *mapping; delete_from_lru_cache(p); /* * For anonymous pages we're done the only reference left * should be the one m_f() holds. */ if (PageAnon(p)) return RECOVERED; /* * Now truncate the page in the page cache. This is really * more like a "temporary hole punch" * Don't do this for block devices when someone else * has a reference, because it could be file system metadata * and that's not safe to truncate. */ mapping = page_mapping(p); if (!mapping) { /* * Page has been teared down in the meanwhile */ return FAILED; } /* * Truncation is a bit tricky. Enable it per file system for now. * * Open: to take i_mutex or not for this? Right now we don't. */ if (mapping->a_ops->error_remove_page) { err = mapping->a_ops->error_remove_page(mapping, p); if (err != 0) { printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n", pfn, err); } else if (page_has_private(p) && !try_to_release_page(p, GFP_NOIO)) { pr_info("MCE %#lx: failed to release buffers\n", pfn); } else { ret = RECOVERED; } } else { /* * If the file system doesn't support it just invalidate * This fails on dirty or anything with private pages */ if (invalidate_inode_page(p)) ret = RECOVERED; else printk(KERN_INFO "MCE %#lx: Failed to invalidate\n", pfn); } return ret; } /* * Dirty cache page page * Issues: when the error hit a hole page the error is not properly * propagated. */ static int me_pagecache_dirty(struct page *p, unsigned long pfn) { struct address_space *mapping = page_mapping(p); SetPageError(p); /* TBD: print more information about the file. */ if (mapping) { /* * IO error will be reported by write(), fsync(), etc. * who check the mapping. * This way the application knows that something went * wrong with its dirty file data. * * There's one open issue: * * The EIO will be only reported on the next IO * operation and then cleared through the IO map. * Normally Linux has two mechanisms to pass IO error * first through the AS_EIO flag in the address space * and then through the PageError flag in the page. * Since we drop pages on memory failure handling the * only mechanism open to use is through AS_AIO. * * This has the disadvantage that it gets cleared on * the first operation that returns an error, while * the PageError bit is more sticky and only cleared * when the page is reread or dropped. If an * application assumes it will always get error on * fsync, but does other operations on the fd before * and the page is dropped between then the error * will not be properly reported. * * This can already happen even without hwpoisoned * pages: first on metadata IO errors (which only * report through AS_EIO) or when the page is dropped * at the wrong time. * * So right now we assume that the application DTRT on * the first EIO, but we're not worse than other parts * of the kernel. */ mapping_set_error(mapping, EIO); } return me_pagecache_clean(p, pfn); } /* * Clean and dirty swap cache. * * Dirty swap cache page is tricky to handle. The page could live both in page * cache and swap cache(ie. page is freshly swapped in). So it could be * referenced concurrently by 2 types of PTEs: * normal PTEs and swap PTEs. We try to handle them consistently by calling * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs, * and then * - clear dirty bit to prevent IO * - remove from LRU * - but keep in the swap cache, so that when we return to it on * a later page fault, we know the application is accessing * corrupted data and shall be killed (we installed simple * interception code in do_swap_page to catch it). * * Clean swap cache pages can be directly isolated. A later page fault will * bring in the known good data from disk. */ static int me_swapcache_dirty(struct page *p, unsigned long pfn) { ClearPageDirty(p); /* Trigger EIO in shmem: */ ClearPageUptodate(p); if (!delete_from_lru_cache(p)) return DELAYED; else return FAILED; } static int me_swapcache_clean(struct page *p, unsigned long pfn) { delete_from_swap_cache(p); if (!delete_from_lru_cache(p)) return RECOVERED; else return FAILED; } /* * Huge pages. Needs work. * Issues: * - Error on hugepage is contained in hugepage unit (not in raw page unit.) * To narrow down kill region to one page, we need to break up pmd. */ static int me_huge_page(struct page *p, unsigned long pfn) { int res = 0; struct page *hpage = compound_head(p); /* * We can safely recover from error on free or reserved (i.e. * not in-use) hugepage by dequeuing it from freelist. * To check whether a hugepage is in-use or not, we can't use * page->lru because it can be used in other hugepage operations, * such as __unmap_hugepage_range() and gather_surplus_pages(). * So instead we use page_mapping() and PageAnon(). * We assume that this function is called with page lock held, * so there is no race between isolation and mapping/unmapping. */ if (!(page_mapping(hpage) || PageAnon(hpage))) { res = dequeue_hwpoisoned_huge_page(hpage); if (!res) return RECOVERED; } return DELAYED; } /* * Various page states we can handle. * * A page state is defined by its current page->flags bits. * The table matches them in order and calls the right handler. * * This is quite tricky because we can access page at any time * in its live cycle, so all accesses have to be extremely careful. * * This is not complete. More states could be added. * For any missing state don't attempt recovery. */ #define dirty (1UL << PG_dirty) #define sc (1UL << PG_swapcache) #define unevict (1UL << PG_unevictable) #define mlock (1UL << PG_mlocked) #define writeback (1UL << PG_writeback) #define lru (1UL << PG_lru) #define swapbacked (1UL << PG_swapbacked) #define head (1UL << PG_head) #define tail (1UL << PG_tail) #define compound (1UL << PG_compound) #define slab (1UL << PG_slab) #define reserved (1UL << PG_reserved) static struct page_state { unsigned long mask; unsigned long res; char *msg; int (*action)(struct page *p, unsigned long pfn); } error_states[] = { { reserved, reserved, "reserved kernel", me_kernel }, /* * free pages are specially detected outside this table: * PG_buddy pages only make a small fraction of all free pages. */ /* * Could in theory check if slab page is free or if we can drop * currently unused objects without touching them. But just * treat it as standard kernel for now. */ { slab, slab, "kernel slab", me_kernel }, #ifdef CONFIG_PAGEFLAGS_EXTENDED { head, head, "huge", me_huge_page }, { tail, tail, "huge", me_huge_page }, #else { compound, compound, "huge", me_huge_page }, #endif { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty }, { sc|dirty, sc, "swapcache", me_swapcache_clean }, { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, { unevict, unevict, "unevictable LRU", me_pagecache_clean}, { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, { mlock, mlock, "mlocked LRU", me_pagecache_clean }, { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, { lru|dirty, lru, "clean LRU", me_pagecache_clean }, /* * Catchall entry: must be at end. */ { 0, 0, "unknown page state", me_unknown }, }; #undef dirty #undef sc #undef unevict #undef mlock #undef writeback #undef lru #undef swapbacked #undef head #undef tail #undef compound #undef slab #undef reserved static void action_result(unsigned long pfn, char *msg, int result) { struct page *page = pfn_to_page(pfn); printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n", pfn, PageDirty(page) ? "dirty " : "", msg, action_name[result]); } static int page_action(struct page_state *ps, struct page *p, unsigned long pfn) { int result; int count; result = ps->action(p, pfn); action_result(pfn, ps->msg, result); count = page_count(p) - 1; if (ps->action == me_swapcache_dirty && result == DELAYED) count--; if (count != 0) { printk(KERN_ERR "MCE %#lx: %s page still referenced by %d users\n", pfn, ps->msg, count); result = FAILED; } /* Could do more checks here if page looks ok */ /* * Could adjust zone counters here to correct for the missing page. */ return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY; } /* * Do all that is necessary to remove user space mappings. Unmap * the pages and send SIGBUS to the processes if the data was dirty. */ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, int trapno) { enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; struct address_space *mapping; LIST_HEAD(tokill); int ret; int kill = 1; struct page *hpage = compound_head(p); struct page *ppage; if (PageReserved(p) || PageSlab(p)) return SWAP_SUCCESS; /* * This check implies we don't kill processes if their pages * are in the swap cache early. Those are always late kills. */ if (!page_mapped(hpage)) return SWAP_SUCCESS; if (PageKsm(p)) return SWAP_FAIL; if (PageSwapCache(p)) { printk(KERN_ERR "MCE %#lx: keeping poisoned page in swap cache\n", pfn); ttu |= TTU_IGNORE_HWPOISON; } /* * Propagate the dirty bit from PTEs to struct page first, because we * need this to decide if we should kill or just drop the page. * XXX: the dirty test could be racy: set_page_dirty() may not always * be called inside page lock (it's recommended but not enforced). */ mapping = page_mapping(hpage); if (!PageDirty(hpage) && mapping && mapping_cap_writeback_dirty(mapping)) { if (page_mkclean(hpage)) { SetPageDirty(hpage); } else { kill = 0; ttu |= TTU_IGNORE_HWPOISON; printk(KERN_INFO "MCE %#lx: corrupted page was clean: dropped without side effects\n", pfn); } } /* * ppage: poisoned page * if p is regular page(4k page) * ppage == real poisoned page; * else p is hugetlb or THP, ppage == head page. */ ppage = hpage; if (PageTransHuge(hpage)) { /* * Verify that this isn't a hugetlbfs head page, the check for * PageAnon is just for avoid tripping a split_huge_page * internal debug check, as split_huge_page refuses to deal with * anything that isn't an anon page. PageAnon can't go away fro * under us because we hold a refcount on the hpage, without a * refcount on the hpage. split_huge_page can't be safely called * in the first place, having a refcount on the tail isn't * enough * to be safe. */ if (!PageHuge(hpage) && PageAnon(hpage)) { if (unlikely(split_huge_page(hpage))) { /* * FIXME: if splitting THP is failed, it is * better to stop the following operation rather * than causing panic by unmapping. System might * survive if the page is freed later. */ printk(KERN_INFO "MCE %#lx: failed to split THP\n", pfn); BUG_ON(!PageHWPoison(p)); return SWAP_FAIL; } /* THP is split, so ppage should be the real poisoned page. */ ppage = p; } } /* * First collect all the processes that have the page * mapped in dirty form. This has to be done before try_to_unmap, * because ttu takes the rmap data structures down. * * Error handling: We ignore errors here because * there's nothing that can be done. */ if (kill) collect_procs(ppage, &tokill); if (hpage != ppage) lock_page(ppage); ret = try_to_unmap(ppage, ttu); if (ret != SWAP_SUCCESS) printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", pfn, page_mapcount(ppage)); if (hpage != ppage) unlock_page(ppage); /* * Now that the dirty bit has been propagated to the * struct page and all unmaps done we can decide if * killing is needed or not. Only kill when the page * was dirty, otherwise the tokill list is merely * freed. When there was a problem unmapping earlier * use a more force-full uncatchable kill to prevent * any accesses to the poisoned memory. */ kill_procs_ao(&tokill, !!PageDirty(ppage), trapno, ret != SWAP_SUCCESS, p, pfn); return ret; } static void set_page_hwpoison_huge_page(struct page *hpage) { int i; int nr_pages = 1 << compound_trans_order(hpage); for (i = 0; i < nr_pages; i++) SetPageHWPoison(hpage + i); } static void clear_page_hwpoison_huge_page(struct page *hpage) { int i; int nr_pages = 1 << compound_trans_order(hpage); for (i = 0; i < nr_pages; i++) ClearPageHWPoison(hpage + i); } int __memory_failure(unsigned long pfn, int trapno, int flags) { struct page_state *ps; struct page *p; struct page *hpage; int res; unsigned int nr_pages; if (!sysctl_memory_failure_recovery) panic("Memory failure from trap %d on page %lx", trapno, pfn); if (!pfn_valid(pfn)) { printk(KERN_ERR "MCE %#lx: memory outside kernel control\n", pfn); return -ENXIO; } p = pfn_to_page(pfn); hpage = compound_head(p); if (TestSetPageHWPoison(p)) { printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); return 0; } nr_pages = 1 << compound_trans_order(hpage); atomic_long_add(nr_pages, &mce_bad_pages); /* * We need/can do nothing about count=0 pages. * 1) it's a free page, and therefore in safe hand: * prep_new_page() will be the gate keeper. * 2) it's a free hugepage, which is also safe: * an affected hugepage will be dequeued from hugepage freelist, * so there's no concern about reusing it ever after. * 3) it's part of a non-compound high order page. * Implies some kernel user: cannot stop them from * R/W the page; let's pray that the page has been * used and will be freed some time later. * In fact it's dangerous to directly bump up page count from 0, * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. */ if (!(flags & MF_COUNT_INCREASED) && !get_page_unless_zero(hpage)) { if (is_free_buddy_page(p)) { action_result(pfn, "free buddy", DELAYED); return 0; } else if (PageHuge(hpage)) { /* * Check "just unpoisoned", "filter hit", and * "race with other subpage." */ lock_page(hpage); if (!PageHWPoison(hpage) || (hwpoison_filter(p) && TestClearPageHWPoison(p)) || (p != hpage && TestSetPageHWPoison(hpage))) { atomic_long_sub(nr_pages, &mce_bad_pages); return 0; } set_page_hwpoison_huge_page(hpage); res = dequeue_hwpoisoned_huge_page(hpage); action_result(pfn, "free huge", res ? IGNORED : DELAYED); unlock_page(hpage); return res; } else { action_result(pfn, "high order kernel", IGNORED); return -EBUSY; } } /* * We ignore non-LRU pages for good reasons. * - PG_locked is only well defined for LRU pages and a few others * - to avoid races with __set_page_locked() * - to avoid races with __SetPageSlab*() (and more non-atomic ops) * The check (unnecessarily) ignores LRU pages being isolated and * walked by the page reclaim code, however that's not a big loss. */ if (!PageHuge(p) && !PageTransCompound(p)) { if (!PageLRU(p)) shake_page(p, 0); if (!PageLRU(p)) { /* * shake_page could have turned it free. */ if (is_free_buddy_page(p)) { action_result(pfn, "free buddy, 2nd try", DELAYED); return 0; } action_result(pfn, "non LRU", IGNORED); put_page(p); return -EBUSY; } } /* * Lock the page and wait for writeback to finish. * It's very difficult to mess with pages currently under IO * and in many cases impossible, so we just avoid it here. */ lock_page(hpage); /* * unpoison always clear PG_hwpoison inside page lock */ if (!PageHWPoison(p)) { printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); res = 0; goto out; } if (hwpoison_filter(p)) { if (TestClearPageHWPoison(p)) atomic_long_sub(nr_pages, &mce_bad_pages); unlock_page(hpage); put_page(hpage); return 0; } /* * For error on the tail page, we should set PG_hwpoison * on the head page to show that the hugepage is hwpoisoned */ if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) { action_result(pfn, "hugepage already hardware poisoned", IGNORED); unlock_page(hpage); put_page(hpage); return 0; } /* * Set PG_hwpoison on all pages in an error hugepage, * because containment is done in hugepage unit for now. * Since we have done TestSetPageHWPoison() for the head page with * page lock held, we can safely set PG_hwpoison bits on tail pages. */ if (PageHuge(p)) set_page_hwpoison_huge_page(hpage); wait_on_page_writeback(p); /* * Now take care of user space mappings. * Abort on fail: __delete_from_page_cache() assumes unmapped page. */ if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) { printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); res = -EBUSY; goto out; } /* * Torn down by someone else? */ if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { action_result(pfn, "already truncated LRU", IGNORED); res = -EBUSY; goto out; } res = -EBUSY; for (ps = error_states;; ps++) { if ((p->flags & ps->mask) == ps->res) { res = page_action(ps, p, pfn); break; } } out: unlock_page(hpage); return res; } EXPORT_SYMBOL_GPL(__memory_failure); /** * memory_failure - Handle memory failure of a page. * @pfn: Page Number of the corrupted page * @trapno: Trap number reported in the signal to user space. * * This function is called by the low level machine check code * of an architecture when it detects hardware memory corruption * of a page. It tries its best to recover, which includes * dropping pages, killing processes etc. * * The function is primarily of use for corruptions that * happen outside the current execution context (e.g. when * detected by a background scrubber) * * Must run in process context (e.g. a work queue) with interrupts * enabled and no spinlocks hold. */ void memory_failure(unsigned long pfn, int trapno) { __memory_failure(pfn, trapno, 0); } /** * unpoison_memory - Unpoison a previously poisoned page * @pfn: Page number of the to be unpoisoned page * * Software-unpoison a page that has been poisoned by * memory_failure() earlier. * * This is only done on the software-level, so it only works * for linux injected failures, not real hardware failures * * Returns 0 for success, otherwise -errno. */ int unpoison_memory(unsigned long pfn) { struct page *page; struct page *p; int freeit = 0; unsigned int nr_pages; if (!pfn_valid(pfn)) return -ENXIO; p = pfn_to_page(pfn); page = compound_head(p); if (!PageHWPoison(p)) { pr_info("MCE: Page was already unpoisoned %#lx\n", pfn); return 0; } nr_pages = 1 << compound_trans_order(page); if (!get_page_unless_zero(page)) { /* * Since HWPoisoned hugepage should have non-zero refcount, * race between memory failure and unpoison seems to happen. * In such case unpoison fails and memory failure runs * to the end. */ if (PageHuge(page)) { pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn); return 0; } if (TestClearPageHWPoison(p)) atomic_long_sub(nr_pages, &mce_bad_pages); pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn); return 0; } lock_page(page); /* * This test is racy because PG_hwpoison is set outside of page lock. * That's acceptable because that won't trigger kernel panic. Instead, * the PG_hwpoison page will be caught and isolated on the entrance to * the free buddy page pool. */ if (TestClearPageHWPoison(page)) { pr_info("MCE: Software-unpoisoned page %#lx\n", pfn); atomic_long_sub(nr_pages, &mce_bad_pages); freeit = 1; if (PageHuge(page)) clear_page_hwpoison_huge_page(page); } unlock_page(page); put_page(page); if (freeit) put_page(page); return 0; } EXPORT_SYMBOL(unpoison_memory); static struct page *new_page(struct page *p, unsigned long private, int **x) { int nid = page_to_nid(p); if (PageHuge(p)) return alloc_huge_page_node(page_hstate(compound_head(p)), nid); else return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); } /* * Safely get reference count of an arbitrary page. * Returns 0 for a free page, -EIO for a zero refcount page * that is not free, and 1 for any other page type. * For 1 the page is returned with increased page count, otherwise not. */ static int get_any_page(struct page *p, unsigned long pfn, int flags) { int ret; if (flags & MF_COUNT_INCREASED) return 1; /* * The lock_memory_hotplug prevents a race with memory hotplug. * This is a big hammer, a better would be nicer. */ lock_memory_hotplug(); /* * Isolate the page, so that it doesn't get reallocated if it * was free. */ set_migratetype_isolate(p); /* * When the target page is a free hugepage, just remove it * from free hugepage list. */ if (!get_page_unless_zero(compound_head(p))) { if (PageHuge(p)) { pr_info("get_any_page: %#lx free huge page\n", pfn); ret = dequeue_hwpoisoned_huge_page(compound_head(p)); } else if (is_free_buddy_page(p)) { pr_info("get_any_page: %#lx free buddy page\n", pfn); /* Set hwpoison bit while page is still isolated */ SetPageHWPoison(p); ret = 0; } else { pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n", pfn, p->flags); ret = -EIO; } } else { /* Not a free page */ ret = 1; } unset_migratetype_isolate(p, MIGRATE_MOVABLE); unlock_memory_hotplug(); return ret; } static int soft_offline_huge_page(struct page *page, int flags) { int ret; unsigned long pfn = page_to_pfn(page); struct page *hpage = compound_head(page); LIST_HEAD(pagelist); ret = get_any_page(page, pfn, flags); if (ret < 0) return ret; if (ret == 0) goto done; if (PageHWPoison(hpage)) { put_page(hpage); pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn); return -EBUSY; } /* Keep page count to indicate a given hugepage is isolated. */ list_add(&hpage->lru, &pagelist); ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false, MIGRATE_SYNC); if (ret) { struct page *page1, *page2; list_for_each_entry_safe(page1, page2, &pagelist, lru) put_page(page1); pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", pfn, ret, page->flags); if (ret > 0) ret = -EIO; return ret; } done: if (!PageHWPoison(hpage)) atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages); set_page_hwpoison_huge_page(hpage); dequeue_hwpoisoned_huge_page(hpage); /* keep elevated page count for bad page */ return ret; } /** * soft_offline_page - Soft offline a page. * @page: page to offline * @flags: flags. Same as memory_failure(). * * Returns 0 on success, otherwise negated errno. * * Soft offline a page, by migration or invalidation, * without killing anything. This is for the case when * a page is not corrupted yet (so it's still valid to access), * but has had a number of corrected errors and is better taken * out. * * The actual policy on when to do that is maintained by * user space. * * This should never impact any application or cause data loss, * however it might take some time. * * This is not a 100% solution for all memory, but tries to be * ``good enough'' for the majority of memory. */ int soft_offline_page(struct page *page, int flags) { int ret; unsigned long pfn = page_to_pfn(page); struct page *hpage = compound_trans_head(page); if (PageHuge(page)) return soft_offline_huge_page(page, flags); if (PageTransHuge(hpage)) { if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) { pr_info("soft offline: %#lx: failed to split THP\n", pfn); return -EBUSY; } } ret = get_any_page(page, pfn, flags); if (ret < 0) return ret; if (ret == 0) goto done; /* * Page cache page we can handle? */ if (!PageLRU(page)) { /* * Try to free it. */ put_page(page); shake_page(page, 1); /* * Did it turn free? */ ret = get_any_page(page, pfn, 0); if (ret < 0) return ret; if (ret == 0) goto done; } if (!PageLRU(page)) { pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", pfn, page->flags); return -EIO; } lock_page(page); wait_on_page_writeback(page); /* * Synchronized using the page lock with memory_failure() */ if (PageHWPoison(page)) { unlock_page(page); put_page(page); pr_info("soft offline: %#lx page already poisoned\n", pfn); return -EBUSY; } /* * Try to invalidate first. This should work for * non dirty unmapped page cache pages. */ ret = invalidate_inode_page(page); unlock_page(page); /* * RED-PEN would be better to keep it isolated here, but we * would need to fix isolation locking first. */ if (ret == 1) { put_page(page); ret = 0; pr_info("soft_offline: %#lx: invalidated\n", pfn); goto done; } /* * Simple invalidation didn't work. * Try to migrate to a new page instead. migrate.c * handles a large number of cases for us. */ ret = isolate_lru_page(page); /* * Drop page reference which is came from get_any_page() * successful isolate_lru_page() already took another one. */ put_page(page); if (!ret) { LIST_HEAD(pagelist); inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); list_add(&page->lru, &pagelist); ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false, MIGRATE_SYNC); if (ret) { putback_lru_pages(&pagelist); pr_info("soft offline: %#lx: migration failed %d, type %lx\n", pfn, ret, page->flags); if (ret > 0) ret = -EIO; } } else { pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", pfn, ret, page_count(page), page->flags); } if (ret) return ret; done: atomic_long_add(1, &mce_bad_pages); SetPageHWPoison(page); /* keep elevated page count for bad page */ return ret; }
gpl-2.0