repo_name
string
path
string
copies
string
size
string
content
string
license
string
akiradeveloper/linux
drivers/isdn/pcbit/edss1.c
9295
9083
/* * DSS.1 Finite State Machine * base: ITU-T Rec Q.931 * * Copyright (C) 1996 Universidade de Lisboa * * Written by Pedro Roque Marques (roque@di.fc.ul.pt) * * This software may be used and distributed according to the terms of * the GNU General Public License, incorporated herein by reference. */ /* * TODO: complete the FSM * move state/event descriptions to a user space logger */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/skbuff.h> #include <linux/timer.h> #include <asm/io.h> #include <linux/isdnif.h> #include "pcbit.h" #include "edss1.h" #include "layer2.h" #include "callbacks.h" const char * const isdn_state_table[] = { "Closed", "Call initiated", "Overlap sending", "Outgoing call proceeding", "NOT DEFINED", "Call delivered", "Call present", "Call received", "Connect request", "Incoming call proceeding", "Active", "Disconnect request", "Disconnect indication", "NOT DEFINED", "NOT DEFINED", "Suspend request", "NOT DEFINED", "Resume request", "NOT DEFINED", "Release Request", "NOT DEFINED", "NOT DEFINED", "NOT DEFINED", "NOT DEFINED", "NOT DEFINED", "Overlap receiving", "Select protocol on B-Channel", "Activate B-channel protocol" }; #ifdef DEBUG_ERRS static struct CauseValue { byte nr; char *descr; } cvlist[] = { {0x01, "Unallocated (unassigned) number"}, {0x02, "No route to specified transit network"}, {0x03, "No route to destination"}, {0x04, "Send special information tone"}, {0x05, "Misdialled trunk prefix"}, {0x06, "Channel unacceptable"}, {0x07, "Channel awarded and being delivered in an established channel"}, {0x08, "Preemption"}, {0x09, "Preemption - circuit reserved for reuse"}, {0x10, "Normal call clearing"}, {0x11, "User busy"}, {0x12, "No user responding"}, {0x13, "No answer from user (user alerted)"}, {0x14, "Subscriber absent"}, {0x15, "Call rejected"}, {0x16, "Number changed"}, {0x1a, "non-selected user clearing"}, {0x1b, "Destination out of order"}, {0x1c, "Invalid number format (address incomplete)"}, {0x1d, "Facility rejected"}, {0x1e, "Response to Status enquiry"}, {0x1f, "Normal, unspecified"}, {0x22, "No circuit/channel available"}, {0x26, "Network out of order"}, {0x27, "Permanent frame mode connection out-of-service"}, {0x28, "Permanent frame mode connection operational"}, {0x29, "Temporary failure"}, {0x2a, "Switching equipment congestion"}, {0x2b, "Access information discarded"}, {0x2c, "Requested circuit/channel not available"}, {0x2e, "Precedence call blocked"}, {0x2f, "Resource unavailable, unspecified"}, {0x31, "Quality of service unavailable"}, {0x32, "Requested facility not subscribed"}, {0x35, "Outgoing calls barred within CUG"}, {0x37, "Incoming calls barred within CUG"}, {0x39, "Bearer capability not authorized"}, {0x3a, "Bearer capability not presently available"}, {0x3e, "Inconsistency in designated outgoing access information and subscriber class"}, {0x3f, "Service or option not available, unspecified"}, {0x41, "Bearer capability not implemented"}, {0x42, "Channel type not implemented"}, {0x43, "Requested facility not implemented"}, {0x44, "Only restricted digital information bearer capability is available"}, {0x4f, "Service or option not implemented"}, {0x51, "Invalid call reference value"}, {0x52, "Identified channel does not exist"}, {0x53, "A suspended call exists, but this call identity does not"}, {0x54, "Call identity in use"}, {0x55, "No call suspended"}, {0x56, "Call having the requested call identity has been cleared"}, {0x57, "User not member of CUG"}, {0x58, "Incompatible destination"}, {0x5a, "Non-existent CUG"}, {0x5b, "Invalid transit network selection"}, {0x5f, "Invalid message, unspecified"}, {0x60, "Mandatory information element is missing"}, {0x61, "Message type non-existent or not implemented"}, {0x62, "Message not compatible with call state or message type non-existent or not implemented"}, {0x63, "Information element/parameter non-existent or not implemented"}, {0x64, "Invalid information element contents"}, {0x65, "Message not compatible with call state"}, {0x66, "Recovery on timer expiry"}, {0x67, "Parameter non-existent or not implemented - passed on"}, {0x6e, "Message with unrecognized parameter discarded"}, {0x6f, "Protocol error, unspecified"}, {0x7f, "Interworking, unspecified"} }; #endif static struct isdn_event_desc { unsigned short ev; char *desc; } isdn_event_table[] = { {EV_USR_SETUP_REQ, "CC->L3: Setup Request"}, {EV_USR_SETUP_RESP, "CC->L3: Setup Response"}, {EV_USR_PROCED_REQ, "CC->L3: Proceeding Request"}, {EV_USR_RELEASE_REQ, "CC->L3: Release Request"}, {EV_NET_SETUP, "NET->TE: setup "}, {EV_NET_CALL_PROC, "NET->TE: call proceeding"}, {EV_NET_SETUP_ACK, "NET->TE: setup acknowledge (more info needed)"}, {EV_NET_CONN, "NET->TE: connect"}, {EV_NET_CONN_ACK, "NET->TE: connect acknowledge"}, {EV_NET_DISC, "NET->TE: disconnect indication"}, {EV_NET_RELEASE, "NET->TE: release"}, {EV_NET_RELEASE_COMP, "NET->TE: release complete"}, {EV_NET_SELP_RESP, "Board: Select B-channel protocol ack"}, {EV_NET_ACTV_RESP, "Board: Activate B-channel protocol ack"}, {EV_TIMER, "Timeout"}, {0, "NULL"} }; char *strisdnevent(ushort ev) { struct isdn_event_desc *entry; for (entry = isdn_event_table; entry->ev; entry++) if (entry->ev == ev) break; return entry->desc; } /* * Euro ISDN finite state machine */ static struct fsm_timer_entry fsm_timers[] = { {ST_CALL_PROC, 10}, {ST_DISC_REQ, 2}, {ST_ACTIVE_SELP, 5}, {ST_ACTIVE_ACTV, 5}, {ST_INCM_PROC, 10}, {ST_CONN_REQ, 2}, {0xff, 0} }; static struct fsm_entry fsm_table[] = { /* Connect Phase */ /* Outgoing */ {ST_NULL, ST_CALL_INIT, EV_USR_SETUP_REQ, cb_out_1}, {ST_CALL_INIT, ST_OVER_SEND, EV_NET_SETUP_ACK, cb_notdone}, {ST_CALL_INIT, ST_CALL_PROC, EV_NET_CALL_PROC, NULL}, {ST_CALL_INIT, ST_NULL, EV_NET_DISC, cb_out_2}, {ST_CALL_PROC, ST_ACTIVE_SELP, EV_NET_CONN, cb_out_2}, {ST_CALL_PROC, ST_NULL, EV_NET_DISC, cb_disc_1}, {ST_CALL_PROC, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, /* Incoming */ {ST_NULL, ST_CALL_PRES, EV_NET_SETUP, NULL}, {ST_CALL_PRES, ST_INCM_PROC, EV_USR_PROCED_REQ, cb_in_1}, {ST_CALL_PRES, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, {ST_INCM_PROC, ST_CONN_REQ, EV_USR_SETUP_RESP, cb_in_2}, {ST_INCM_PROC, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, {ST_CONN_REQ, ST_ACTIVE_SELP, EV_NET_CONN_ACK, cb_in_3}, /* Active */ {ST_ACTIVE, ST_NULL, EV_NET_DISC, cb_disc_1}, {ST_ACTIVE, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, {ST_ACTIVE, ST_NULL, EV_NET_RELEASE, cb_disc_3}, /* Disconnect */ {ST_DISC_REQ, ST_NULL, EV_NET_DISC, cb_disc_1}, {ST_DISC_REQ, ST_NULL, EV_NET_RELEASE, cb_disc_3}, /* protocol selection */ {ST_ACTIVE_SELP, ST_ACTIVE_ACTV, EV_NET_SELP_RESP, cb_selp_1}, {ST_ACTIVE_SELP, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, {ST_ACTIVE_ACTV, ST_ACTIVE, EV_NET_ACTV_RESP, cb_open}, {ST_ACTIVE_ACTV, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2}, /* Timers */ {ST_CALL_PROC, ST_DISC_REQ, EV_TIMER, cb_disc_2}, {ST_DISC_REQ, ST_NULL, EV_TIMER, cb_disc_3}, {ST_ACTIVE_SELP, ST_DISC_REQ, EV_TIMER, cb_disc_2}, {ST_ACTIVE_ACTV, ST_DISC_REQ, EV_TIMER, cb_disc_2}, {ST_INCM_PROC, ST_DISC_REQ, EV_TIMER, cb_disc_2}, {ST_CONN_REQ, ST_CONN_REQ, EV_TIMER, cb_in_2}, {0xff, 0, 0, NULL} }; static void pcbit_fsm_timer(unsigned long data) { struct pcbit_dev *dev; struct pcbit_chan *chan; chan = (struct pcbit_chan *) data; del_timer(&chan->fsm_timer); chan->fsm_timer.function = NULL; dev = chan2dev(chan); if (dev == NULL) { printk(KERN_WARNING "pcbit: timer for unknown device\n"); return; } pcbit_fsm_event(dev, chan, EV_TIMER, NULL); } void pcbit_fsm_event(struct pcbit_dev *dev, struct pcbit_chan *chan, unsigned short event, struct callb_data *data) { struct fsm_entry *action; struct fsm_timer_entry *tentry; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); for (action = fsm_table; action->init != 0xff; action++) if (action->init == chan->fsm_state && action->event == event) break; if (action->init == 0xff) { spin_unlock_irqrestore(&dev->lock, flags); printk(KERN_DEBUG "fsm error: event %x on state %x\n", event, chan->fsm_state); return; } if (chan->fsm_timer.function) { del_timer(&chan->fsm_timer); chan->fsm_timer.function = NULL; } chan->fsm_state = action->final; pcbit_state_change(dev, chan, action->init, event, action->final); for (tentry = fsm_timers; tentry->init != 0xff; tentry++) if (tentry->init == chan->fsm_state) break; if (tentry->init != 0xff) { init_timer(&chan->fsm_timer); chan->fsm_timer.function = &pcbit_fsm_timer; chan->fsm_timer.data = (ulong) chan; chan->fsm_timer.expires = jiffies + tentry->timeout * HZ; add_timer(&chan->fsm_timer); } spin_unlock_irqrestore(&dev->lock, flags); if (action->callb) action->callb(dev, chan, data); }
gpl-2.0
chiehwen/AGNI-pureSTOCK-I9300
arch/arm/mach-shmobile/console.c
11855
1043
/* * SH-Mobile Console * * Copyright (C) 2010 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <mach/common.h> #include <asm/mach/map.h> void __init shmobile_setup_console(void) { parse_early_param(); /* Let earlyprintk output early console messages */ early_platform_driver_probe("earlyprintk", 1, 1); }
gpl-2.0
itgb/linux-4.1.10
net/netfilter/xt_sctp.c
12879
5088
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/sctp/sctp.h> #include <linux/sctp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_sctp.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kiran Kumar Immidi"); MODULE_DESCRIPTION("Xtables: SCTP protocol packet match"); MODULE_ALIAS("ipt_sctp"); MODULE_ALIAS("ip6t_sctp"); #define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \ || (!!((invflag) & (option)) ^ (cond))) static bool match_flags(const struct xt_sctp_flag_info *flag_info, const int flag_count, u_int8_t chunktype, u_int8_t chunkflags) { int i; for (i = 0; i < flag_count; i++) if (flag_info[i].chunktype == chunktype) return (chunkflags & flag_info[i].flag_mask) == flag_info[i].flag; return true; } static inline bool match_packet(const struct sk_buff *skb, unsigned int offset, const struct xt_sctp_info *info, bool *hotdrop) { u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)]; const sctp_chunkhdr_t *sch; sctp_chunkhdr_t _sch; int chunk_match_type = info->chunk_match_type; const struct xt_sctp_flag_info *flag_info = info->flag_info; int flag_count = info->flag_count; #ifdef DEBUG int i = 0; #endif if (chunk_match_type == SCTP_CHUNK_MATCH_ALL) SCTP_CHUNKMAP_COPY(chunkmapcopy, info->chunkmap); do { sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch); if (sch == NULL || sch->length == 0) { pr_debug("Dropping invalid SCTP packet.\n"); *hotdrop = true; return false; } #ifdef DEBUG pr_debug("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d" "\tflags: %x\n", ++i, offset, sch->type, htons(sch->length), sch->flags); #endif offset += WORD_ROUND(ntohs(sch->length)); pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset); if (SCTP_CHUNKMAP_IS_SET(info->chunkmap, sch->type)) { switch (chunk_match_type) { case SCTP_CHUNK_MATCH_ANY: if (match_flags(flag_info, flag_count, sch->type, sch->flags)) { return true; } break; case SCTP_CHUNK_MATCH_ALL: if (match_flags(flag_info, flag_count, sch->type, sch->flags)) SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type); break; case SCTP_CHUNK_MATCH_ONLY: if (!match_flags(flag_info, flag_count, sch->type, sch->flags)) return false; break; } } else { switch (chunk_match_type) { case SCTP_CHUNK_MATCH_ONLY: return false; } } } while (offset < skb->len); switch (chunk_match_type) { case SCTP_CHUNK_MATCH_ALL: return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy); case SCTP_CHUNK_MATCH_ANY: return false; case SCTP_CHUNK_MATCH_ONLY: return true; } /* This will never be reached, but required to stop compiler whine */ return false; } static bool sctp_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_sctp_info *info = par->matchinfo; const sctp_sctphdr_t *sh; sctp_sctphdr_t _sh; if (par->fragoff != 0) { pr_debug("Dropping non-first fragment.. FIXME\n"); return false; } sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh); if (sh == NULL) { pr_debug("Dropping evil TCP offset=0 tinygram.\n"); par->hotdrop = true; return false; } pr_debug("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest)); return SCCHECK(ntohs(sh->source) >= info->spts[0] && ntohs(sh->source) <= info->spts[1], XT_SCTP_SRC_PORTS, info->flags, info->invflags) && SCCHECK(ntohs(sh->dest) >= info->dpts[0] && ntohs(sh->dest) <= info->dpts[1], XT_SCTP_DEST_PORTS, info->flags, info->invflags) && SCCHECK(match_packet(skb, par->thoff + sizeof(sctp_sctphdr_t), info, &par->hotdrop), XT_SCTP_CHUNK_TYPES, info->flags, info->invflags); } static int sctp_mt_check(const struct xt_mtchk_param *par) { const struct xt_sctp_info *info = par->matchinfo; if (info->flags & ~XT_SCTP_VALID_FLAGS) return -EINVAL; if (info->invflags & ~XT_SCTP_VALID_FLAGS) return -EINVAL; if (info->invflags & ~info->flags) return -EINVAL; if (!(info->flags & XT_SCTP_CHUNK_TYPES)) return 0; if (info->chunk_match_type & (SCTP_CHUNK_MATCH_ALL | SCTP_CHUNK_MATCH_ANY | SCTP_CHUNK_MATCH_ONLY)) return 0; return -EINVAL; } static struct xt_match sctp_mt_reg[] __read_mostly = { { .name = "sctp", .family = NFPROTO_IPV4, .checkentry = sctp_mt_check, .match = sctp_mt, .matchsize = sizeof(struct xt_sctp_info), .proto = IPPROTO_SCTP, .me = THIS_MODULE }, { .name = "sctp", .family = NFPROTO_IPV6, .checkentry = sctp_mt_check, .match = sctp_mt, .matchsize = sizeof(struct xt_sctp_info), .proto = IPPROTO_SCTP, .me = THIS_MODULE }, }; static int __init sctp_mt_init(void) { return xt_register_matches(sctp_mt_reg, ARRAY_SIZE(sctp_mt_reg)); } static void __exit sctp_mt_exit(void) { xt_unregister_matches(sctp_mt_reg, ARRAY_SIZE(sctp_mt_reg)); } module_init(sctp_mt_init); module_exit(sctp_mt_exit);
gpl-2.0
InsomniaROM/kernel_lge_mako
net/rds/iw_stats.c
14159
2865
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/percpu.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include "rds.h" #include "iw.h" DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_iw_statistics, rds_iw_stats); static const char *const rds_iw_stat_names[] = { "iw_connect_raced", "iw_listen_closed_stale", "iw_tx_cq_call", "iw_tx_cq_event", "iw_tx_ring_full", "iw_tx_throttle", "iw_tx_sg_mapping_failure", "iw_tx_stalled", "iw_tx_credit_updates", "iw_rx_cq_call", "iw_rx_cq_event", "iw_rx_ring_empty", "iw_rx_refill_from_cq", "iw_rx_refill_from_thread", "iw_rx_alloc_limit", "iw_rx_credit_updates", "iw_ack_sent", "iw_ack_send_failure", "iw_ack_send_delayed", "iw_ack_send_piggybacked", "iw_ack_received", "iw_rdma_mr_alloc", "iw_rdma_mr_free", "iw_rdma_mr_used", "iw_rdma_mr_pool_flush", "iw_rdma_mr_pool_wait", "iw_rdma_mr_pool_depleted", }; unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail) { struct rds_iw_statistics stats = {0, }; uint64_t *src; uint64_t *sum; size_t i; int cpu; if (avail < ARRAY_SIZE(rds_iw_stat_names)) goto out; for_each_online_cpu(cpu) { src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu)); sum = (uint64_t *)&stats; for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++) *(sum++) += *(src++); } rds_stats_info_copy(iter, (uint64_t *)&stats, rds_iw_stat_names, ARRAY_SIZE(rds_iw_stat_names)); out: return ARRAY_SIZE(rds_iw_stat_names); }
gpl-2.0
casinobrawl27/android_133-108_dt2w
net/rds/iw_stats.c
14159
2865
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/percpu.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include "rds.h" #include "iw.h" DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_iw_statistics, rds_iw_stats); static const char *const rds_iw_stat_names[] = { "iw_connect_raced", "iw_listen_closed_stale", "iw_tx_cq_call", "iw_tx_cq_event", "iw_tx_ring_full", "iw_tx_throttle", "iw_tx_sg_mapping_failure", "iw_tx_stalled", "iw_tx_credit_updates", "iw_rx_cq_call", "iw_rx_cq_event", "iw_rx_ring_empty", "iw_rx_refill_from_cq", "iw_rx_refill_from_thread", "iw_rx_alloc_limit", "iw_rx_credit_updates", "iw_ack_sent", "iw_ack_send_failure", "iw_ack_send_delayed", "iw_ack_send_piggybacked", "iw_ack_received", "iw_rdma_mr_alloc", "iw_rdma_mr_free", "iw_rdma_mr_used", "iw_rdma_mr_pool_flush", "iw_rdma_mr_pool_wait", "iw_rdma_mr_pool_depleted", }; unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail) { struct rds_iw_statistics stats = {0, }; uint64_t *src; uint64_t *sum; size_t i; int cpu; if (avail < ARRAY_SIZE(rds_iw_stat_names)) goto out; for_each_online_cpu(cpu) { src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu)); sum = (uint64_t *)&stats; for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++) *(sum++) += *(src++); } rds_stats_info_copy(iter, (uint64_t *)&stats, rds_iw_stat_names, ARRAY_SIZE(rds_iw_stat_names)); out: return ARRAY_SIZE(rds_iw_stat_names); }
gpl-2.0
aldanopolis/android_kernel_motorola_msm8226
drivers/staging/prima/CORE/DXE/src/wlan_qct_dxe.c
80
210711
/* * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /**========================================================================= @file wlan_qct_dxe.c @brief This file contains the external API exposed by the wlan data transfer abstraction layer module. ========================================================================*/ /*=========================================================================== EDIT HISTORY FOR FILE This section contains comments describing changes made to the module. Notice that changes are listed in reverse chronological order. $Header:$ $DateTime: $ $Author: $ when who what, where, why -------- --- ---------------------------------------------------------- 08/03/10 schang Created module. ===========================================================================*/ /*=========================================================================== INCLUDE FILES FOR MODULE ===========================================================================*/ /*---------------------------------------------------------------------------- * Include Files * -------------------------------------------------------------------------*/ #include "wlan_qct_dxe.h" #include "wlan_qct_dxe_i.h" #include "wlan_qct_pal_device.h" #ifdef FEATURE_R33D #include "wlan_qct_pal_bus.h" #endif /* FEATURE_R33D */ /*---------------------------------------------------------------------------- * Local Definitions * -------------------------------------------------------------------------*/ //#define WLANDXE_DEBUG_CH_INFO_DUMP /* Temporary configuration defines * Have to find out permanent solution */ #define T_WLANDXE_MAX_DESCRIPTOR_COUNT 40 #define T_WLANDXE_MAX_FRAME_SIZE 2000 #define T_WLANDXE_TX_INT_ENABLE_FCOUNT 1 #define T_WLANDXE_MEMDUMP_BYTE_PER_LINE 16 #define T_WLANDXE_MAX_RX_PACKET_WAIT 6000 #define T_WLANDXE_SSR_TIMEOUT 5000 #define T_WLANDXE_PERIODIC_HEALTH_M_TIME 2500 #define T_WLANDXE_MAX_HW_ACCESS_WAIT 2000 #define WLANDXE_MAX_REAPED_RX_FRAMES 512 #define WLANPAL_RX_INTERRUPT_PRO_MASK 0x20 #define WLANDXE_RX_INTERRUPT_PRO_UNMASK 0x5F /* 1msec busy wait in case CSR is not valid */ #define WLANDXE_CSR_NEXT_READ_WAIT 1000 /* CSR max retry count */ #define WLANDXE_CSR_MAX_READ_COUNT 30 /* This is temporary fot the compile * WDI will release official version * This must be removed */ #define WDI_GET_PAL_CTX() NULL /*------------------------------------------------------------------------- * Local Varables *-------------------------------------------------------------------------*/ /* This is temp, someone have to allocate for me, and must be part of global context */ static WLANDXE_CtrlBlkType *tempDxeCtrlBlk; static char *channelType[WDTS_CHANNEL_MAX] = { "TX_LOW_PRI", "TX_HIGH_PRI", "RX_LOW_PRI", #ifndef WLANDXE_TEST_CHANNEL_ENABLE "RX_HIGH_PRI", #else "H2H_TEST_TX", "H2H_TEST_RX" #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ }; static wpt_packet *rx_reaped_buf[WLANDXE_MAX_REAPED_RX_FRAMES]; /*------------------------------------------------------------------------- * External Function Proto Type *-------------------------------------------------------------------------*/ /*------------------------------------------------------------------------- * Local Function Proto Type *-------------------------------------------------------------------------*/ static wpt_status dxeRXFrameSingleBufferAlloc ( WLANDXE_CtrlBlkType *dxeCtxt, WLANDXE_ChannelCBType *channelEntry, WLANDXE_DescCtrlBlkType *currentCtrlBlock ); static wpt_status dxeNotifySmsm ( wpt_boolean kickDxe, wpt_boolean ringEmpty ); static void dxeStartSSRTimer ( WLANDXE_CtrlBlkType *dxeCtxt ); /*------------------------------------------------------------------------- * Local Function *-------------------------------------------------------------------------*/ /*========================================================================== @ Function Name dxeChannelMonitor @ Description @ Parameters WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeChannelMonitor ( char *monitorDescription, WLANDXE_ChannelCBType *channelEntry, wpt_log_data_stall_channel_type *channelLog ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; if((NULL == monitorDescription) || (NULL == channelEntry)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "INVALID Input ARG"); return eWLAN_PAL_STATUS_E_INVAL; } if(channelEntry->channelType > WDTS_CHANNEL_RX_HIGH_PRI) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "INVALID Channel type"); return eWLAN_PAL_STATUS_E_INVAL; } wpalTrace(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : HCBO %d, HCBDP 0x%x, HCBDC 0x%x,", channelType[channelEntry->channelType], channelEntry->headCtrlBlk->ctrlBlkOrder, channelEntry->headCtrlBlk->linkedDescPhyAddr, channelEntry->headCtrlBlk->linkedDesc->descCtrl.ctrl); wpalTrace(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : TCBO %d, TCBDP 0x%x, TCBDC 0x%x", channelType[channelEntry->channelType], channelEntry->tailCtrlBlk->ctrlBlkOrder, channelEntry->tailCtrlBlk->linkedDescPhyAddr, channelEntry->tailCtrlBlk->linkedDesc->descCtrl.ctrl); wpalTrace(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : FDC %d, RDC %d, TFC %d", channelType[channelEntry->channelType], channelEntry->numFreeDesc, channelEntry->numRsvdDesc, channelEntry->numTotalFrame); if(channelLog) { channelLog->numDesc = channelEntry->numDesc; channelLog->numFreeDesc = channelEntry->numFreeDesc; channelLog->numRsvdDesc = channelEntry->numRsvdDesc; channelLog->headDescOrder = channelEntry->headCtrlBlk->ctrlBlkOrder; channelLog->tailDescOrder = channelEntry->tailCtrlBlk->ctrlBlkOrder; } return status; } #ifdef WLANDXE_DEBUG_MEMORY_DUMP /*========================================================================== @ Function Name dxeMemoryDump @ Description @ Parameters WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeMemoryDump ( wpt_uint8 *dumpPointer, wpt_uint32 dumpSize, char *dumpTarget ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 numBytes = 0; wpt_uint32 idx; if((NULL == dumpPointer) || (NULL == dumpTarget)) { return status; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Location 0x%x, Size %d", dumpTarget, dumpPointer, dumpSize); numBytes = dumpSize % T_WLANDXE_MEMDUMP_BYTE_PER_LINE; for(idx = 0; idx < dumpSize; idx++) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "0x%2x ", dumpPointer[idx]); if(0 == ((idx + 1) % T_WLANDXE_MEMDUMP_BYTE_PER_LINE)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "\n"); } } if(0 != numBytes) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "\n"); } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"); return status; } #endif /* WLANDXE_DEBUG_MEMORY_DUMP */ /*========================================================================== @ Function Name dxeDescriptorDump @ Description @ Parameters WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ wpt_status dxeDescriptorDump ( WLANDXE_ChannelCBType *channelEntry, WLANDXE_DescType *targetDesc, wpt_uint32 fragmentOrder ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "Descriptor Dump for channel %s, %d / %d fragment", channelType[channelEntry->channelType], fragmentOrder + 1, channelEntry->numFragmentCurrentChain); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "CTRL WORD 0x%x, TransferSize %d", WLANDXE_U32_SWAP_ENDIAN(targetDesc->descCtrl.ctrl), WLANDXE_U32_SWAP_ENDIAN(targetDesc->xfrSize)); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "SRC ADD 0x%x, DST ADD 0x%x, NEXT DESC 0x%x", WLANDXE_U32_SWAP_ENDIAN(targetDesc->dxedesc.dxe_short_desc.srcMemAddrL), WLANDXE_U32_SWAP_ENDIAN(targetDesc->dxedesc.dxe_short_desc.dstMemAddrL), WLANDXE_U32_SWAP_ENDIAN(targetDesc->dxedesc.dxe_short_desc.phyNextL)); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"); return status; } /*========================================================================== @ Function Name dxeChannelRegisterDump @ Description @ Parameters WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ wpt_status dxeChannelRegisterDump ( WLANDXE_ChannelCBType *channelEntry, char *dumpTarget, wpt_log_data_stall_channel_type *channelLog ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 chStatusReg, chControlReg, chDescReg, chLDescReg; /* Whatever RIVA power condition try to wakeup RIVA through SMSM * This will not simply wakeup RIVA * Just incase TX not wanted stuck, Trigger TX again */ dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); dxeNotifySmsm(eWLAN_PAL_TRUE, eWLAN_PAL_FALSE); wpalSleep(10); if(channelEntry->channelType > WDTS_CHANNEL_RX_HIGH_PRI) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "INVALID Channel type"); return eWLAN_PAL_STATUS_E_INVAL; } wpalReadRegister(channelEntry->channelRegister.chDXEDesclRegAddr, &chDescReg); wpalReadRegister(channelEntry->channelRegister.chDXELstDesclRegAddr, &chLDescReg); wpalReadRegister(channelEntry->channelRegister.chDXECtrlRegAddr, &chControlReg); wpalReadRegister(channelEntry->channelRegister.chDXEStatusRegAddr, &chStatusReg); wpalTrace(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : CCR 0x%x, CSR 0x%x, CDR 0x%x, CLDR 0x%x", channelType[channelEntry->channelType], chControlReg, chStatusReg, chDescReg, chLDescReg); if(channelLog) { channelLog->ctrlRegVal = chControlReg; channelLog->statRegVal = chStatusReg; } return status; } /*========================================================================== @ Function Name dxeChannelAllDescDump @ Description Dump all DXE descriptors within assigned channe; @ Parameters WLANDXE_ChannelCBType *channelEntry @ Return NONE ===========================================================================*/ void dxeChannelAllDescDump ( WLANDXE_ChannelCBType *channelEntry, WDTS_ChannelType channel, wpt_log_data_stall_channel_type *channelLog ) { wpt_uint32 channelLoop; WLANDXE_DescCtrlBlkType *targetCtrlBlk; wpt_uint32 previousCtrlValue = 0; wpt_uint32 previousCtrlValid = 0; wpt_uint32 currentCtrlValid = 0; wpt_uint32 valDescCount = 0; wpt_uint32 invalDescCount = 0; targetCtrlBlk = channelEntry->headCtrlBlk; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : %d descriptor chains, head desc ctrl 0x%x", channelType[channelEntry->channelType], channelEntry->numDesc, targetCtrlBlk->linkedDesc->descCtrl.ctrl); previousCtrlValue = targetCtrlBlk->linkedDesc->descCtrl.ctrl; if((WDTS_CHANNEL_RX_LOW_PRI == channel) || (WDTS_CHANNEL_RX_HIGH_PRI == channel)) { for(channelLoop = 0; channelLoop < channelEntry->numDesc; channelLoop++) { if(previousCtrlValue != targetCtrlBlk->linkedDesc->descCtrl.ctrl) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%5d : 0x%x", targetCtrlBlk->ctrlBlkOrder, targetCtrlBlk->linkedDesc->descCtrl.ctrl); } if(targetCtrlBlk->linkedDesc->descCtrl.ctrl & WLANDXE_DESC_CTRL_VALID) { valDescCount++; } else { invalDescCount++; } previousCtrlValue = targetCtrlBlk->linkedDesc->descCtrl.ctrl; targetCtrlBlk = (WLANDXE_DescCtrlBlkType *)targetCtrlBlk->nextCtrlBlk; } } else { /* Head Descriptor is valid or not */ previousCtrlValid = targetCtrlBlk->linkedDesc->descCtrl.ctrl & WLANDXE_DESC_CTRL_VALID; targetCtrlBlk = (WLANDXE_DescCtrlBlkType *)targetCtrlBlk->nextCtrlBlk; for(channelLoop = 0; channelLoop < channelEntry->numDesc; channelLoop++) { currentCtrlValid = targetCtrlBlk->linkedDesc->descCtrl.ctrl & WLANDXE_DESC_CTRL_VALID; if(currentCtrlValid) { valDescCount++; } else { invalDescCount++; } if(currentCtrlValid != previousCtrlValid) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%5d : 0x%x", targetCtrlBlk->ctrlBlkOrder, targetCtrlBlk->linkedDesc->descCtrl.ctrl); } previousCtrlValid = currentCtrlValid; targetCtrlBlk = (WLANDXE_DescCtrlBlkType *)targetCtrlBlk->nextCtrlBlk; } } if(channelLog) { channelLog->numValDesc = valDescCount; channelLog->numInvalDesc = invalDescCount; } return; } /*========================================================================== @ Function Name dxeErrChannelDebug @ Description Dump channel information for which Error interrupt has occured @ Parameters WLANDXE_ChannelCBType *channelCb @ Return NONE ===========================================================================*/ void dxeErrChannelDebug ( WLANDXE_ChannelCBType *channelCb, wpt_uint32 chStatusReg ) { wpt_log_data_stall_channel_type channelLog; wpt_uint32 chLDescReg, channelLoop; WLANDXE_DescCtrlBlkType *targetCtrlBlk; dxeChannelMonitor("INT_ERR", channelCb, &channelLog); dxeDescriptorDump(channelCb, channelCb->headCtrlBlk->linkedDesc, 0); dxeChannelRegisterDump(channelCb, "INT_ERR", &channelLog); dxeChannelAllDescDump(channelCb, channelCb->channelType, &channelLog); wpalMemoryCopy(channelLog.channelName, "INT_ERR", WPT_TRPT_CHANNEL_NAME); wpalPacketStallUpdateInfo(NULL, NULL, &channelLog, channelCb->channelType); #ifdef FEATURE_WLAN_DIAG_SUPPORT wpalPacketStallDumpLog(); #endif /* FEATURE_WLAN_DIAG_SUPPORT */ switch ((chStatusReg & WLANDXE_CH_STAT_ERR_CODE_MASK) >> WLANDXE_CH_STAT_ERR_CODE_OFFSET) { case WLANDXE_ERROR_PRG_INV_B2H_SRC_QID: case WLANDXE_ERROR_PRG_INV_B2H_DST_QID: case WLANDXE_ERROR_PRG_INV_B2H_SRC_IDX: case WLANDXE_ERROR_PRG_INV_H2B_SRC_QID: case WLANDXE_ERROR_PRG_INV_H2B_DST_QID: case WLANDXE_ERROR_PRG_INV_H2B_DST_IDX: { dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); dxeNotifySmsm(eWLAN_PAL_TRUE, eWLAN_PAL_FALSE); wpalSleep(10); if(channelCb->channelType > WDTS_CHANNEL_RX_HIGH_PRI) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%s: Invalid Channel", __func__); break; } wpalReadRegister(channelCb->channelRegister.chDXELstDesclRegAddr, &chLDescReg); targetCtrlBlk = channelCb->headCtrlBlk; for(channelLoop = 0; channelLoop < channelCb->numDesc; channelLoop++) { if (targetCtrlBlk->linkedDescPhyAddr == chLDescReg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s :CHx_DESCL: desc ctrl 0x%x, src 0x%x, dst 0x%x, next 0x%x", channelType[channelCb->channelType], targetCtrlBlk->linkedDesc->descCtrl.ctrl, targetCtrlBlk->linkedDesc->dxedesc.dxe_short_desc.srcMemAddrL, targetCtrlBlk->linkedDesc->dxedesc.dxe_short_desc.dstMemAddrL, targetCtrlBlk->linkedDesc->dxedesc.dxe_short_desc.phyNextL); targetCtrlBlk = (WLANDXE_DescCtrlBlkType *)targetCtrlBlk->nextCtrlBlk; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s :Next Desc: desc ctrl 0x%x, src 0x%x, dst 0x%x, next 0x%x", channelType[channelCb->channelType], targetCtrlBlk->linkedDesc->descCtrl.ctrl, targetCtrlBlk->linkedDesc->dxedesc.dxe_short_desc.srcMemAddrL, targetCtrlBlk->linkedDesc->dxedesc.dxe_short_desc.dstMemAddrL, targetCtrlBlk->linkedDesc->dxedesc.dxe_short_desc.phyNextL); break; } targetCtrlBlk = (WLANDXE_DescCtrlBlkType *)targetCtrlBlk->nextCtrlBlk; } break; } default: { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%s: No Debug Inormation", __func__); break; } } wpalFwDumpReq(17, 0, 0, 0, 0, 0); } /*========================================================================== @ Function Name dxeTxThreadChannelDebugHandler @ Description Dump TX channel information @ Parameters Wwpt_msg *msgPtr @ Return NONE ===========================================================================*/ void dxeTxThreadChannelDebugHandler ( wpt_msg *msgPtr ) { wpt_uint8 channelLoop; wpt_log_data_stall_channel_type channelLog; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Whatever RIVA power condition try to wakeup RIVA through SMSM * This will not simply wakeup RIVA * Just incase TX not wanted stuck, Trigger TX again */ for(channelLoop = 0; channelLoop < WDTS_CHANNEL_RX_LOW_PRI; channelLoop++) { dxeChannelMonitor("******** Get Descriptor Snapshot ", &tempDxeCtrlBlk->dxeChannel[channelLoop], &channelLog); dxeChannelRegisterDump(&tempDxeCtrlBlk->dxeChannel[channelLoop], "Abnormal successive empty interrupt", &channelLog); dxeChannelAllDescDump(&tempDxeCtrlBlk->dxeChannel[channelLoop], channelLoop, &channelLog); wpalMemoryCopy(channelLog.channelName, channelType[channelLoop], WPT_TRPT_CHANNEL_NAME); wpalPacketStallUpdateInfo(NULL, NULL, &channelLog, channelLoop); } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "================== DXE Dump End ======================"); wpalMemoryFree(msgPtr); #ifdef FEATURE_WLAN_DIAG_SUPPORT wpalPacketStallDumpLog(); #endif /* FEATURE_WLAN_DIAG_SUPPORT */ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return; } /*========================================================================== @ Function Name dxeRxThreadChannelDebugHandler @ Description Dump RX channel information @ Parameters Wwpt_msg *msgPtr @ Return NONE ===========================================================================*/ void dxeRxThreadChannelDebugHandler ( wpt_msg *msgPtr ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint8 channelLoop; wpt_log_data_stall_channel_type channelLog; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Whatever RIVA power condition try to wakeup RIVA through SMSM * This will not simply wakeup RIVA * Just incase TX not wanted stuck, Trigger TX again */ for(channelLoop = WDTS_CHANNEL_RX_LOW_PRI; channelLoop < WDTS_CHANNEL_MAX; channelLoop++) { dxeChannelMonitor("******** Get Descriptor Snapshot ", &tempDxeCtrlBlk->dxeChannel[channelLoop], &channelLog); dxeChannelRegisterDump(&tempDxeCtrlBlk->dxeChannel[channelLoop], "Abnormal successive empty interrupt", &channelLog); dxeChannelAllDescDump(&tempDxeCtrlBlk->dxeChannel[channelLoop], channelLoop, &channelLog); wpalMemoryCopy(channelLog.channelName, channelType[channelLoop], WPT_TRPT_CHANNEL_NAME); wpalPacketStallUpdateInfo(NULL, NULL, &channelLog, channelLoop); } /* Now serialise the message through Tx thread also to make sure * no register access when RIVA is in powersave */ /*Use the same message pointer just change the call back function */ msgPtr->callback = dxeTxThreadChannelDebugHandler; status = wpalPostTxMsg(WDI_GET_PAL_CTX(), msgPtr); if ( eWLAN_PAL_STATUS_SUCCESS != status ) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "Tx thread state dump req serialize fail status=%d", status); } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return; } /*========================================================================== @ Function Name dxeRXHealthMonitor @ Description Monitoring RX channel healthy stataus If detect any problem, try to recover @ Parameters healthMonitorMsg MSG pointer. will have low resource TX channel context @ Return NONE ===========================================================================*/ void dxeRXHealthMonitor ( wpt_msg *healthMonitorMsg ) { WLANDXE_ChannelCBType *channelCtrlBlk; WLANDXE_ChannelCBType *testCHCtrlBlk; wpt_uint32 regValue; wpt_uint32 chStatusReg, chControlReg, chDescReg, chLDescReg; wpt_uint32 hwWakeLoop, chLoop; if(NULL == healthMonitorMsg) { return; } /* Make wake up HW */ dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); dxeNotifySmsm(eWLAN_PAL_TRUE, eWLAN_PAL_FALSE); dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); for(hwWakeLoop = 0; hwWakeLoop < T_WLANDXE_MAX_HW_ACCESS_WAIT; hwWakeLoop++) { wpalReadRegister(WLANDXE_BMU_AVAILABLE_BD_PDU, &regValue); if(0 != regValue) { break; } } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "Scheduled RX, num free BD/PDU %d, loop Count %d", regValue, hwWakeLoop); for(chLoop = WDTS_CHANNEL_RX_LOW_PRI; chLoop < WDTS_CHANNEL_MAX; chLoop++) { testCHCtrlBlk = &tempDxeCtrlBlk->dxeChannel[chLoop]; wpalReadRegister(testCHCtrlBlk->channelRegister.chDXECtrlRegAddr, &chControlReg); wpalReadRegister(testCHCtrlBlk->channelRegister.chDXEStatusRegAddr, &chStatusReg); wpalReadRegister(testCHCtrlBlk->channelRegister.chDXEDesclRegAddr, &chDescReg); wpalReadRegister(testCHCtrlBlk->channelRegister.chDXELstDesclRegAddr, &chLDescReg); wpalTrace(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO, "%11s : CCR 0x%x, CSR 0x%x, CDR 0x%x, CLDR 0x%x, HCBO %d, HCBDP 0x%x, HCBDC 0x%x, TCBO %d,TCBDP 0x%x, TCBDC 0x%x", channelType[chLoop], chControlReg, chStatusReg, chDescReg, chLDescReg, testCHCtrlBlk->headCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->headCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->headCtrlBlk->linkedDesc->descCtrl.ctrl, testCHCtrlBlk->tailCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->tailCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->tailCtrlBlk->linkedDesc->descCtrl.ctrl); if((chControlReg & WLANDXE_DESC_CTRL_VALID) && (chLDescReg != testCHCtrlBlk->headCtrlBlk->linkedDescPhyAddr)) { wpalTrace(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : CCR 0x%x, CSR 0x%x, CDR 0x%x, CLDR 0x%x, " "HCBO %d, HCBDP 0x%x, HCBDC 0x%x, TCBO %d,TCBDP 0x%x, TCBDC 0x%x", channelType[chLoop], chControlReg, chStatusReg, chDescReg, chLDescReg, testCHCtrlBlk->headCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->headCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->headCtrlBlk->linkedDesc->descCtrl.ctrl, testCHCtrlBlk->tailCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->tailCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->tailCtrlBlk->linkedDesc->descCtrl.ctrl); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : RX CH EN Descriptor Async, resync it", channelType[chLoop]); wpalWriteRegister(testCHCtrlBlk->channelRegister.chDXELstDesclRegAddr, testCHCtrlBlk->headCtrlBlk->linkedDescPhyAddr); } else if(!(chControlReg & WLANDXE_DESC_CTRL_VALID) && (chDescReg != testCHCtrlBlk->headCtrlBlk->linkedDescPhyAddr)) { wpalTrace(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : CCR 0x%x, CSR 0x%x, CDR 0x%x, CLDR 0x%x, " "HCBO %d, HCBDP 0x%x, HCBDC 0x%x, TCBO %d,TCBDP 0x%x, TCBDC 0x%x", channelType[chLoop], chControlReg, chStatusReg, chDescReg, chLDescReg, testCHCtrlBlk->headCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->headCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->headCtrlBlk->linkedDesc->descCtrl.ctrl, testCHCtrlBlk->tailCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->tailCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->tailCtrlBlk->linkedDesc->descCtrl.ctrl); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : RX CH DIS Descriptor Async, resync it", channelType[chLoop]); wpalWriteRegister(testCHCtrlBlk->channelRegister.chDXEDesclRegAddr, testCHCtrlBlk->headCtrlBlk->linkedDescPhyAddr); } } channelCtrlBlk = (WLANDXE_ChannelCBType *)healthMonitorMsg->pContext; if(channelCtrlBlk->hitLowResource) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : Still Low Resource, kick DXE TX and restart timer", channelType[channelCtrlBlk->channelType]); /* Still Low Resource, Kick DXE again and start timer again */ wpalTimerStart(&channelCtrlBlk->healthMonitorTimer, T_WLANDXE_PERIODIC_HEALTH_M_TIME); } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : Out from Low resource condition, do nothing", channelType[channelCtrlBlk->channelType]); /* Recovered from low resource condition * Not need to do anything */ } return; } /*========================================================================== @ Function Name dxeTXHealthMonitor @ Description Monitoring TX channel healthy stataus If detect any problem, try to recover @ Parameters healthMonitorMsg MSG pointer. will have low resource TX channel context @ Return NONE ===========================================================================*/ void dxeTXHealthMonitor ( wpt_msg *healthMonitorMsg ) { WLANDXE_ChannelCBType *channelCtrlBlk; WLANDXE_ChannelCBType *testCHCtrlBlk; wpt_uint32 regValue; wpt_uint32 chStatusReg, chControlReg, chDescReg, chLDescReg; wpt_uint32 hwWakeLoop, chLoop; wpt_status status = eWLAN_PAL_STATUS_SUCCESS; if(NULL == healthMonitorMsg) { return; } /* First of all kick TX channel * This will fix if there is any problem with SMSM state */ dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); dxeNotifySmsm(eWLAN_PAL_TRUE, eWLAN_PAL_FALSE); dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); /* Wait till RIVA up */ for(hwWakeLoop = 0; hwWakeLoop < T_WLANDXE_MAX_HW_ACCESS_WAIT; hwWakeLoop++) { wpalReadRegister(WLANDXE_BMU_AVAILABLE_BD_PDU, &regValue); if(0 != regValue) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "num free BD/PDU %d, loop Count %d", regValue, hwWakeLoop); break; } } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "Scheduled TX, num free BD/PDU %d, loop Count %d", regValue, hwWakeLoop); for(chLoop = 0; chLoop < WDTS_CHANNEL_RX_LOW_PRI; chLoop++) { testCHCtrlBlk = &tempDxeCtrlBlk->dxeChannel[chLoop]; wpalReadRegister(testCHCtrlBlk->channelRegister.chDXECtrlRegAddr, &chControlReg); wpalReadRegister(testCHCtrlBlk->channelRegister.chDXEStatusRegAddr, &chStatusReg); wpalReadRegister(testCHCtrlBlk->channelRegister.chDXEDesclRegAddr, &chDescReg); wpalReadRegister(testCHCtrlBlk->channelRegister.chDXELstDesclRegAddr, &chLDescReg); wpalTrace(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO, "%11s : CCR 0x%x, CSR 0x%x, CDR 0x%x, CLDR 0x%x, HCBO %d, HCBDP 0x%x, HCBDC 0x%x, TCBO %d,TCBDP 0x%x, TCBDC 0x%x", channelType[chLoop], chControlReg, chStatusReg, chDescReg, chLDescReg, testCHCtrlBlk->headCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->headCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->headCtrlBlk->linkedDesc->descCtrl.ctrl, testCHCtrlBlk->tailCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->tailCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->tailCtrlBlk->linkedDesc->descCtrl.ctrl); if((chControlReg & WLANDXE_DESC_CTRL_VALID) && (chLDescReg != testCHCtrlBlk->tailCtrlBlk->linkedDescPhyAddr)) { wpalTrace(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : CCR 0x%x, CSR 0x%x, CDR 0x%x, CLDR 0x%x, " "HCBO %d, HCBDP 0x%x, HCBDC 0x%x, TCBO %d,TCBDP 0x%x, TCBDC 0x%x", channelType[chLoop], chControlReg, chStatusReg, chDescReg, chLDescReg, testCHCtrlBlk->headCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->headCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->headCtrlBlk->linkedDesc->descCtrl.ctrl, testCHCtrlBlk->tailCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->tailCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->tailCtrlBlk->linkedDesc->descCtrl.ctrl); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : TX CH EN Descriptor Async, resync it", channelType[chLoop]); wpalWriteRegister(testCHCtrlBlk->channelRegister.chDXELstDesclRegAddr, testCHCtrlBlk->tailCtrlBlk->linkedDescPhyAddr); } else if(!(chControlReg & WLANDXE_DESC_CTRL_VALID) && (chDescReg != testCHCtrlBlk->tailCtrlBlk->linkedDescPhyAddr)) { wpalTrace(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : CCR 0x%x, CSR 0x%x, CDR 0x%x, CLDR 0x%x, " "HCBO %d, HCBDP 0x%x, HCBDC 0x%x, TCBO %d,TCBDP 0x%x, TCBDC 0x%x", channelType[chLoop], chControlReg, chStatusReg, chDescReg, chLDescReg, testCHCtrlBlk->headCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->headCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->headCtrlBlk->linkedDesc->descCtrl.ctrl, testCHCtrlBlk->tailCtrlBlk->ctrlBlkOrder, testCHCtrlBlk->tailCtrlBlk->linkedDescPhyAddr, testCHCtrlBlk->tailCtrlBlk->linkedDesc->descCtrl.ctrl); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : TX CH DIS Descriptor Async, resync it", channelType[chLoop]); wpalWriteRegister(testCHCtrlBlk->channelRegister.chDXEDesclRegAddr, testCHCtrlBlk->tailCtrlBlk->linkedDescPhyAddr); } } /* TX channel test done, test RX channels */ channelCtrlBlk = (WLANDXE_ChannelCBType *)healthMonitorMsg->pContext; channelCtrlBlk->healthMonitorMsg->callback = dxeRXHealthMonitor; status = wpalPostRxMsg(WDI_GET_PAL_CTX(), channelCtrlBlk->healthMonitorMsg); if (eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "TX Low resource Kick DXE MSG Serialize fail status=%d", status); } return; } /*========================================================================== @ Function Name dxeHealthMonitorTimeout @ Description Health Monitor timer started when TX channel low resource condition And if reciovered from low resource condition, timer would not fired Timer fired means during certain time, TX CH could not be recovered @ Parameters channelCtxt Low resource condition happen Channel context @ Return NONE ===========================================================================*/ void dxeHealthMonitorTimeout ( void *channelCtxt ) { WLANDXE_ChannelCBType *channelCtrlBlk; wpt_status status = eWLAN_PAL_STATUS_SUCCESS; if(NULL == channelCtxt) { return; } /* Timeout Fired, DXE TX should kick on TX thread * Serailize to TX Thread */ channelCtrlBlk = (WLANDXE_ChannelCBType *)channelCtxt; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO, "%11s : Health Monitor timer expired", channelType[channelCtrlBlk->channelType]); channelCtrlBlk->healthMonitorMsg->callback = dxeTXHealthMonitor; status = wpalPostTxMsg(WDI_GET_PAL_CTX(), channelCtrlBlk->healthMonitorMsg); if (eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "TX Low resource Kick DXE MSG Serialize fail status=%d", status); } return; } /*========================================================================== @ Function Name dxeCtrlBlkAlloc @ Description Allocate DXE Control block DXE control block will used by Host DXE driver only, internal structure Will make ring linked list @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeCtrlBlkAlloc ( WLANDXE_CtrlBlkType *dxeCtrlBlk, WLANDXE_ChannelCBType *channelEntry ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; unsigned int idx, fIdx; WLANDXE_DescCtrlBlkType *currentCtrlBlk = NULL; WLANDXE_DescCtrlBlkType *freeCtrlBlk = NULL; WLANDXE_DescCtrlBlkType *prevCtrlBlk = NULL; WLANDXE_DescCtrlBlkType *nextCtrlBlk = NULL; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity check */ if((NULL == dxeCtrlBlk) || (NULL == channelEntry)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeCtrlBlkAlloc Channel Entry is not valid"); return eWLAN_PAL_STATUS_E_INVAL; } /* Allocate pre asigned number of control blocks */ for(idx = 0; idx < channelEntry->numDesc; idx++) { currentCtrlBlk = (WLANDXE_DescCtrlBlkType *)wpalMemoryAllocate(sizeof(WLANDXE_DescCtrlBlkType)); if(NULL == currentCtrlBlk) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeCtrlBlkOpen MemAlloc Fail for channel %d", channelEntry->channelType); freeCtrlBlk = channelEntry->headCtrlBlk; for(fIdx = 0; fIdx < idx; fIdx++) { if(NULL == freeCtrlBlk) { break; } nextCtrlBlk = freeCtrlBlk->nextCtrlBlk; wpalMemoryFree((void *)freeCtrlBlk); freeCtrlBlk = nextCtrlBlk; } return eWLAN_PAL_STATUS_E_FAULT; } memset((wpt_uint8 *)currentCtrlBlk, 0, sizeof(WLANDXE_DescCtrlBlkType)); /* Initialize common elements first */ currentCtrlBlk->xfrFrame = NULL; currentCtrlBlk->linkedDesc = NULL; currentCtrlBlk->linkedDescPhyAddr = 0; currentCtrlBlk->ctrlBlkOrder = idx; /* This is the first control block allocated * Next Control block is not allocated yet * head and tail must be first control block */ if(0 == idx) { currentCtrlBlk->nextCtrlBlk = NULL; channelEntry->headCtrlBlk = currentCtrlBlk; channelEntry->tailCtrlBlk = currentCtrlBlk; } /* This is not first, not last control block * previous control block may has next linked block */ else if((0 < idx) && (idx < (channelEntry->numDesc - 1))) { prevCtrlBlk->nextCtrlBlk = currentCtrlBlk; } /* This is last control blocl * next control block for the last control block is head, first control block * then whole linked list made RING */ else if((channelEntry->numDesc - 1) == idx) { prevCtrlBlk->nextCtrlBlk = currentCtrlBlk; currentCtrlBlk->nextCtrlBlk = channelEntry->headCtrlBlk; } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeCtrlBlkOpen Invalid Ctrl Blk location %d", channelEntry->channelType); wpalMemoryFree(currentCtrlBlk); return eWLAN_PAL_STATUS_E_FAULT; } prevCtrlBlk = currentCtrlBlk; channelEntry->numFreeDesc++; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW,"%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeDescLinkAlloc @ Description Allocate DXE descriptor DXE descriptor will be shared by DXE host driver and RIVA DXE engine Will make RING linked list Will be linked with Descriptor control block one by one @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeDescAllocAndLink ( WLANDXE_CtrlBlkType *dxeCtrlBlk, WLANDXE_ChannelCBType *channelEntry ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_DescType *currentDesc = NULL; WLANDXE_DescType *prevDesc = NULL; WLANDXE_DescCtrlBlkType *currentCtrlBlk = NULL; unsigned int idx; void *physAddressAlloc = NULL; wpt_uint32 physAddress; #ifdef WLANDXE_TEST_CHANNEL_ENABLE WLANDXE_ChannelCBType *testTXChannelCB = &dxeCtrlBlk->dxeChannel[WDTS_CHANNEL_H2H_TEST_TX]; WLANDXE_DescCtrlBlkType *currDescCtrlBlk = testTXChannelCB->headCtrlBlk; #endif /* WLANDXE_TEST_CHANNEL_ENABLE*/ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity Check */ if((NULL == dxeCtrlBlk) || (NULL == channelEntry)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeDescLinkAlloc Channel Entry is not valid"); return eWLAN_PAL_STATUS_E_INVAL; } currentCtrlBlk = channelEntry->headCtrlBlk; #if !(defined(FEATURE_R33D) || defined(WLANDXE_TEST_CHANNEL_ENABLE)) /* allocate all DXE descriptors for this channel in one chunk */ channelEntry->descriptorAllocation = (WLANDXE_DescType *) wpalDmaMemoryAllocate(sizeof(WLANDXE_DescType)*channelEntry->numDesc, &physAddressAlloc); physAddress = (wpt_uint32) (uintptr_t)(physAddressAlloc); if(NULL == channelEntry->descriptorAllocation) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeDescLinkAlloc Descriptor Alloc Fail"); return eWLAN_PAL_STATUS_E_RESOURCES; } currentDesc = channelEntry->descriptorAllocation; #endif /* Allocate pre asigned number of descriptor */ for(idx = 0; idx < channelEntry->numDesc; idx++) { #ifndef FEATURE_R33D #ifndef WLANDXE_TEST_CHANNEL_ENABLE // descriptors were allocated in a chunk -- use the current one memset((wpt_uint8 *)currentDesc, 0, sizeof(WLANDXE_DescType)); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "Allocated Descriptor VA %p, PA %p", currentDesc, physAddressAlloc); #else if(WDTS_CHANNEL_H2H_TEST_RX != channelEntry->channelType) { // allocate a descriptor currentDesc = (WLANDXE_DescType *)wpalDmaMemoryAllocate(sizeof(WLANDXE_DescType), &physAddressAlloc); memset((wpt_uint8 *)currentDesc, 0, sizeof(WLANDXE_DescType)); physAddress = (wpt_uint32) (uintptr_t)(physAddressAlloc); } else { currentDesc = currDescCtrlBlk->linkedDesc; physAddress = currDescCtrlBlk->linkedDescPhyAddr; currDescCtrlBlk = (WLANDXE_DescCtrlBlkType *)currDescCtrlBlk->nextCtrlBlk; } #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ #else #ifndef WLANDXE_TEST_CHANNEL_ENABLE currentDesc = (WLANDXE_DescType *)wpalAcpuDdrDxeDescMemoryAllocate(&physAddressAlloc); memset((wpt_uint8 *)currentDesc, 0, sizeof(WLANDXE_DescType)); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "Allocated Descriptor VA %p, PA %p", currentDesc, physAddressAlloc); physAddress = (wpt_uint32) (uintptr_t)(physAddressAlloc); #else if(WDTS_CHANNEL_H2H_TEST_RX != channelEntry->channelType) { currentDesc = (WLANDXE_DescType *)wpalAcpuDdrDxeDescMemoryAllocate(&physAddressAlloc); memset((wpt_uint8 *)currentDesc, 0, sizeof(WLANDXE_DescType)); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "Allocated Descriptor VA %p, PA %p", currentDesc, physAddressAlloc); physAddress = (wpt_uint32) (uintptr_t)(physAddressAlloc); } else { currentDesc = currDescCtrlBlk->linkedDesc; physAddress = currDescCtrlBlk->linkedDescPhyAddr; currDescCtrlBlk = (WLANDXE_DescCtrlBlkType *)currDescCtrlBlk->nextCtrlBlk; } #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ #endif /* FEATURE_R33D */ if(NULL == currentDesc) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeDescLinkAlloc MemAlloc Fail for channel %d", channelEntry->channelType); return eWLAN_PAL_STATUS_E_FAULT; } currentCtrlBlk->linkedDesc = currentDesc; currentCtrlBlk->linkedDescPhyAddr = physAddress; /* First descriptor, next none * descriptor bottom location is first descriptor address */ if(0 == idx) { currentDesc->dxedesc.dxe_short_desc.phyNextL = 0; channelEntry->DescBottomLoc = currentDesc; channelEntry->descBottomLocPhyAddr = physAddress; } /* Not first, not last descriptor * may make link for previous descriptor with current descriptor * ENDIAN SWAP needed ????? */ else if((0 < idx) && (idx < (channelEntry->numDesc - 1))) { prevDesc->dxedesc.dxe_short_desc.phyNextL = WLANDXE_U32_SWAP_ENDIAN(physAddress); } /* Last descriptor * make a ring by asign next pointer as first descriptor * ENDIAN SWAP NEEDED ??? */ else if((channelEntry->numDesc - 1) == idx) { prevDesc->dxedesc.dxe_short_desc.phyNextL = WLANDXE_U32_SWAP_ENDIAN(physAddress); currentDesc->dxedesc.dxe_short_desc.phyNextL = WLANDXE_U32_SWAP_ENDIAN(channelEntry->headCtrlBlk->linkedDescPhyAddr); } /* If Current Channel is RX channel PAL Packet and OS packet buffer should be * Pre allocated and physical address must be assigned into * Corresponding DXE Descriptor */ #ifdef WLANDXE_TEST_CHANNEL_ENABLE if((WDTS_CHANNEL_RX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_RX_HIGH_PRI == channelEntry->channelType) || (WDTS_CHANNEL_H2H_TEST_RX == channelEntry->channelType)) #else if((WDTS_CHANNEL_RX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_RX_HIGH_PRI == channelEntry->channelType)) #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ { status = dxeRXFrameSingleBufferAlloc(dxeCtrlBlk, channelEntry, currentCtrlBlk); if( !WLAN_PAL_IS_STATUS_SUCCESS(status) ) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeDescLinkAlloc RX Buffer Alloc Fail for channel %d", channelEntry->channelType); return status; } --channelEntry->numFreeDesc; } if((WDTS_CHANNEL_TX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_TX_HIGH_PRI == channelEntry->channelType)) { currentDesc->descCtrl.ctrl = channelEntry->extraConfig.cw_ctrl_write; currentDesc->dxedesc.dxe_short_desc.dstMemAddrL = channelEntry->extraConfig.refWQ_swapped; } else if((WDTS_CHANNEL_RX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_RX_HIGH_PRI == channelEntry->channelType)) { currentDesc->descCtrl.ctrl = channelEntry->extraConfig.cw_ctrl_read; currentDesc->dxedesc.dxe_short_desc.srcMemAddrL = channelEntry->extraConfig.refWQ_swapped; } else { /* Just in case. H2H Test RX channel, do nothing * By Definition this must not happen */ } currentCtrlBlk = currentCtrlBlk->nextCtrlBlk; prevDesc = currentDesc; #ifndef FEATURE_R33D #ifndef WLANDXE_TEST_CHANNEL_ENABLE // advance to the next pre-allocated descriptor in the chunk currentDesc++; physAddress = (physAddress + sizeof(WLANDXE_DescType)); #endif #endif } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name @ Description @ Parameters @ Return wpt_status ===========================================================================*/ static wpt_status dxeSetInterruptPath ( WLANDXE_CtrlBlkType *dxeCtrlBlk ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 interruptPath = 0; wpt_uint32 idx; WLANDXE_ChannelCBType *channelEntry = NULL; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); for(idx = 0; idx < WDTS_CHANNEL_MAX; idx++) { channelEntry = &dxeCtrlBlk->dxeChannel[idx]; #ifdef WLANDXE_TEST_CHANNEL_ENABLE if((WDTS_CHANNEL_TX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_TX_HIGH_PRI == channelEntry->channelType) || (WDTS_CHANNEL_H2H_TEST_TX == channelEntry->channelType)) #else if((WDTS_CHANNEL_TX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_TX_HIGH_PRI == channelEntry->channelType)) #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ { interruptPath |= (1 << channelEntry->assignedDMAChannel); } else if((WDTS_CHANNEL_RX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_RX_HIGH_PRI == channelEntry->channelType)) { interruptPath |= (1 << (channelEntry->assignedDMAChannel + 16)); } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "H2H TEST RX???? %d", channelEntry->channelType); } } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "Interrupt Path Must be 0x%x", interruptPath); dxeCtrlBlk->interruptPath = interruptPath; wpalWriteRegister(WLANDXE_CCU_DXE_INT_SELECT, interruptPath); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeEngineCoreStart @ Description Trigger to start RIVA DXE Hardware @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeEngineCoreStart ( WLANDXE_CtrlBlkType *dxeCtrlBlk ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 registerData = 0; wpt_uint8 readRetry; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); #ifdef WCN_PRONTO /* Read default */ wpalReadRegister(WLANDXE_CCU_SOFT_RESET, &registerData); registerData |= WLANDXE_DMA_CCU_DXE_RESET_MASK; /* Make reset */ wpalWriteRegister(WLANDXE_CCU_SOFT_RESET, registerData); /* Clear reset */ registerData &= ~WLANDXE_DMA_CCU_DXE_RESET_MASK; wpalWriteRegister(WLANDXE_CCU_SOFT_RESET, registerData); #else /* START This core init is not needed for the integrated system */ /* Reset First */ registerData = WLANDXE_DMA_CSR_RESET_MASK; wpalWriteRegister(WALNDEX_DMA_CSR_ADDRESS, registerData); #endif /* WCN_PRONTO */ for(readRetry = 0; readRetry < WLANDXE_CSR_MAX_READ_COUNT; readRetry++) { wpalWriteRegister(WALNDEX_DMA_CSR_ADDRESS, WLANDXE_CSR_DEFAULT_ENABLE); wpalReadRegister(WALNDEX_DMA_CSR_ADDRESS, &registerData); if(!(registerData & WLANDXE_DMA_CSR_EN_MASK)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s CSR 0x%x, count %d", __func__, registerData, readRetry); /* CSR is not valid value, re-try to write */ wpalBusyWait(WLANDXE_CSR_NEXT_READ_WAIT); } else { break; } } if(WLANDXE_CSR_MAX_READ_COUNT == readRetry) { /* MAX wait, still cannot write correct value * Panic device */ wpalDevicePanic(); } /* Is This needed? * Not sure, revisit with integrated system */ /* END This core init is not needed for the integrated system */ dxeSetInterruptPath(dxeCtrlBlk); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeChannelInitProgram @ Description Program RIVA DXE engine register with initial value What must be programmed - Source Address (SADRL, chDXESadrlRegAddr) - Destination address (DADRL, chDXEDadrlRegAddr) - Next Descriptor address (DESCL, chDXEDesclRegAddr) - current descriptor address (LST_DESCL, chDXELstDesclRegAddr) Not need to program now - Channel Control register (CH_CTRL, chDXECtrlRegAddr) TX : Have to program to trigger send out frame RX : programmed by DXE engine @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeChannelInitProgram ( WLANDXE_CtrlBlkType *dxeCtrlBlk, WLANDXE_ChannelCBType *channelEntry ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 idx; WLANDXE_DescType *currentDesc = NULL; WLANDXE_DescCtrlBlkType *currentCtrlBlk = NULL; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity Check */ if((NULL == dxeCtrlBlk) || (NULL == channelEntry)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelInitProgram Channel Entry is not valid"); return eWLAN_PAL_STATUS_E_INVAL; } /* Program Source address and destination adderss */ if(!channelEntry->channelConfig.useShortDescFmt) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelInitProgram Long Descriptor not support yet"); return eWLAN_PAL_STATUS_E_FAILURE; } /* Common register area */ /* Next linked list Descriptor pointer */ status = wpalWriteRegister(channelEntry->channelRegister.chDXEDesclRegAddr, channelEntry->headCtrlBlk->linkedDescPhyAddr); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelInitProgram Write DESC Address register fail"); return status; } if((WDTS_CHANNEL_TX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_TX_HIGH_PRI == channelEntry->channelType)) { /* Program default registers */ /* TX DMA channel, DMA destination address is work Q */ status = wpalWriteRegister(channelEntry->channelRegister.chDXEDadrlRegAddr, channelEntry->channelConfig.refWQ); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelInitProgram Write TX DAddress register fail"); return status; } } else if((WDTS_CHANNEL_RX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_RX_HIGH_PRI == channelEntry->channelType)) { /* Initialize Descriptor control Word First */ currentCtrlBlk = channelEntry->headCtrlBlk; for(idx = 0; idx < channelEntry->channelConfig.nDescs; idx++) { currentDesc = currentCtrlBlk->linkedDesc; currentCtrlBlk = currentCtrlBlk->nextCtrlBlk; } /* RX DMA channel, DMA source address is work Q */ status = wpalWriteRegister(channelEntry->channelRegister.chDXESadrlRegAddr, channelEntry->channelConfig.refWQ); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelInitProgram Write RX SAddress WQ register fail"); return status; } /* RX DMA channel, Program pre allocated destination Address */ status = wpalWriteRegister(channelEntry->channelRegister.chDXEDadrlRegAddr, WLANDXE_U32_SWAP_ENDIAN(channelEntry->DescBottomLoc->dxedesc.dxe_short_desc.phyNextL)); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelInitProgram Write RX DAddress register fail"); return status; } /* RX Channels, default Control registers MUST BE ENABLED */ wpalWriteRegister(channelEntry->channelRegister.chDXECtrlRegAddr, channelEntry->extraConfig.chan_mask); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelInitProgram Write RX Control register fail"); return status; } } else { /* H2H test channel, not use work Q */ /* Program pre allocated destination Address */ status = wpalWriteRegister(channelEntry->channelRegister.chDXEDadrlRegAddr, WLANDXE_U32_SWAP_ENDIAN(channelEntry->DescBottomLoc->dxedesc.dxe_short_desc.phyNextL)); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelInitProgram Write RX DAddress register fail"); return status; } } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeChannelStart @ Description Start Specific Channel @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeChannelStart ( WLANDXE_CtrlBlkType *dxeCtrlBlk, WLANDXE_ChannelCBType *channelEntry ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 regValue = 0; wpt_uint32 intMaskVal = 0; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); channelEntry->extraConfig.chEnabled = eWLAN_PAL_TRUE; channelEntry->extraConfig.chConfigured = eWLAN_PAL_TRUE; /* Enable individual channel * not to break current channel setup, first read register */ status = wpalReadRegister(WALNDEX_DMA_CH_EN_ADDRESS, &regValue); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelStart Read Channel Enable register fail"); return status; } /* Enable Channel specific Interrupt */ status = wpalReadRegister(WLANDXE_INT_MASK_REG_ADDRESS, &intMaskVal); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelStart Read INT_MASK register fail"); return status; } intMaskVal |= channelEntry->extraConfig.intMask; status = wpalWriteRegister(WLANDXE_INT_MASK_REG_ADDRESS, intMaskVal); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelStart Write INT_MASK register fail"); return status; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeChannelStop @ Description Stop Specific Channel @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeChannelStop ( WLANDXE_CtrlBlkType *dxeCtrlBlk, WLANDXE_ChannelCBType *channelEntry ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 intMaskVal = 0; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity */ if((NULL == dxeCtrlBlk) || (NULL == channelEntry)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelStop Invalid arg input"); return eWLAN_PAL_STATUS_E_INVAL; } if ( (channelEntry->extraConfig.chEnabled != eWLAN_PAL_TRUE) || (channelEntry->extraConfig.chConfigured != eWLAN_PAL_TRUE)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelStop channels are not enabled "); return status; } /* Maskout interrupt */ status = wpalReadRegister(WLANDXE_INT_MASK_REG_ADDRESS, &intMaskVal); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelStop Read INT_MASK register fail"); return status; } intMaskVal ^= channelEntry->extraConfig.intMask; status = wpalWriteRegister(WLANDXE_INT_MASK_REG_ADDRESS, intMaskVal); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelStop Write INT_MASK register fail"); return status; } channelEntry->extraConfig.chEnabled = eWLAN_PAL_FALSE; /* Stop Channel ??? */ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeChannelClose @ Description Close Specific Channel Free pre allocated RX frame buffer if RX channel Free DXE descriptor for each channel Free Descriptor control block for each channel @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeChannelClose ( WLANDXE_CtrlBlkType *dxeCtrlBlk, WLANDXE_ChannelCBType *channelEntry ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 idx; WLANDXE_DescCtrlBlkType *currentCtrlBlk = NULL; WLANDXE_DescCtrlBlkType *nextCtrlBlk = NULL; WLANDXE_DescType *currentDescriptor = NULL; WLANDXE_DescType *nextDescriptor = NULL; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity */ if((NULL == dxeCtrlBlk) || (NULL == channelEntry)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelStop Invalid arg input"); return eWLAN_PAL_STATUS_E_INVAL; } currentCtrlBlk = channelEntry->headCtrlBlk; if(NULL != currentCtrlBlk) { currentDescriptor = currentCtrlBlk->linkedDesc; for(idx = 0; idx < channelEntry->numDesc; idx++) { if (idx + 1 != channelEntry->numDesc) { nextCtrlBlk = currentCtrlBlk->nextCtrlBlk; nextDescriptor = nextCtrlBlk->linkedDesc; } else { nextCtrlBlk = NULL; nextDescriptor = NULL; } if((WDTS_CHANNEL_RX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_RX_HIGH_PRI == channelEntry->channelType)) { if (NULL != currentCtrlBlk->xfrFrame) { wpalUnlockPacket(currentCtrlBlk->xfrFrame); wpalPacketFree(currentCtrlBlk->xfrFrame); } } /* * It is the responsibility of DXE to walk through the * descriptor chain and unlock any pending packets (if * locked). */ if((WDTS_CHANNEL_TX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_TX_HIGH_PRI == channelEntry->channelType)) { if((NULL != currentCtrlBlk->xfrFrame) && (eWLAN_PAL_STATUS_SUCCESS == wpalIsPacketLocked(currentCtrlBlk->xfrFrame))) { wpalUnlockPacket(currentCtrlBlk->xfrFrame); wpalPacketFree(currentCtrlBlk->xfrFrame); } } #if (defined(FEATURE_R33D) || defined(WLANDXE_TEST_CHANNEL_ENABLE)) // descriptors allocated individually so free them individually wpalDmaMemoryFree(currentDescriptor); #endif wpalMemoryFree(currentCtrlBlk); currentCtrlBlk = nextCtrlBlk; currentDescriptor = nextDescriptor; if(NULL == currentCtrlBlk) { /* Already reach last of the control block * Not need to process anymore, break */ break; } } } #if !(defined(FEATURE_R33D) || defined(WLANDXE_TEST_CHANNEL_ENABLE)) // descriptors were allocated as a single chunk so free the chunk if(NULL != channelEntry->descriptorAllocation) { wpalDmaMemoryFree(channelEntry->descriptorAllocation); } #endif HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeChannelCleanInt @ Description Clean up interrupt from RIVA HW After Host finish to handle interrupt, interrupt signal must be cleaned up Otherwise next interrupt will not be generated @ Parameters WLANDXE_ChannelCBType *channelEntry Channel specific control block wpt_uint32 *chStat Channel Status register value @ Return wpt_status ===========================================================================*/ static wpt_status dxeChannelCleanInt ( WLANDXE_ChannelCBType *channelEntry, wpt_uint32 *chStat ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Read Channel Status Register to know why INT Happen */ status = wpalReadRegister(channelEntry->channelRegister.chDXEStatusRegAddr, chStat); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelCleanInt Read CH STAT register fail"); return eWLAN_PAL_STATUS_E_FAULT; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Channel INT Clean, Status 0x%x", channelType[channelEntry->channelType], *chStat); /* Clean up all the INT within this channel */ status = wpalWriteRegister(WLANDXE_INT_CLR_ADDRESS, (1 << channelEntry->assignedDMAChannel)); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelCleanInt Write CH Clean register fail"); return eWLAN_PAL_STATUS_E_FAULT; } /* Clean up Error INT Bit */ if(WLANDXE_CH_STAT_INT_ERR_MASK & *chStat) { status = wpalWriteRegister(WLANDXE_INT_ERR_CLR_ADDRESS, (1 << channelEntry->assignedDMAChannel)); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelCleanInt Read CH STAT register fail"); return eWLAN_PAL_STATUS_E_FAULT; } } /* Clean up DONE INT Bit */ if(WLANDXE_CH_STAT_INT_DONE_MASK & *chStat) { status = wpalWriteRegister(WLANDXE_INT_DONE_CLR_ADDRESS, (1 << channelEntry->assignedDMAChannel)); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelCleanInt Read CH STAT register fail"); return eWLAN_PAL_STATUS_E_FAULT; } } /* Clean up ED INT Bit */ if(WLANDXE_CH_STAT_INT_ED_MASK & *chStat) { status = wpalWriteRegister(WLANDXE_INT_ED_CLR_ADDRESS, (1 << channelEntry->assignedDMAChannel)); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelCleanInt Read CH STAT register fail"); return eWLAN_PAL_STATUS_E_FAULT; } } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } #ifdef WLAN_DXE_LOW_RESOURCE_TIMER /*========================================================================== @ Function Name dxeRXResourceAvailableTimerExpHandler @ Description During pre-set timeperiod, if free available RX buffer is not allocated Trigger Driver re-loading to recover RX dead end @ Parameters v_VOID_t *usrData DXE context @ Return NONE ===========================================================================*/ void dxeRXResourceAvailableTimerExpHandler ( void *usrData ) { WLANDXE_CtrlBlkType *dxeCtxt = NULL; wpt_uint32 numRxFreePackets; wpt_uint32 numAllocFailures; dxeCtxt = (WLANDXE_CtrlBlkType *)usrData; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "RX Low resource, Durign wait time period %d, RX resource not allocated", T_WLANDXE_MAX_RX_PACKET_WAIT); //This API wil also try to replenish packets wpalGetNumRxFreePacket(&numRxFreePackets); wpalGetNumRxPacketAllocFailures(&numAllocFailures); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "Free Packets: %u, Alloc Failures: %u", numRxFreePackets, numAllocFailures); if (numRxFreePackets > 0) { /* If no. of free packets is greater than 0, it means * that some packets were replenished and can be used * by DXE to receive frames. So try to restart the * resourceAvailable timer here, it will be stopped * by the DXE's low resource callback if atleast one * free packet reaches DXE. */ if (NULL != dxeCtxt) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%s: Replenish successful. Restart the Rx Low resource timer", __func__); wpalTimerStart(&dxeCtxt->rxResourceAvailableTimer, T_WLANDXE_MAX_RX_PACKET_WAIT); return; } } if (NULL != dxeCtxt) dxeCtxt->driverReloadInProcessing = eWLAN_PAL_TRUE; wpalWlanReload(); if (NULL != usrData) dxeStartSSRTimer((WLANDXE_CtrlBlkType *)usrData); return; } #endif /*========================================================================== @ Function Name dxeStartSSRTimer @ Description Start the dxeSSRTimer after issuing the FIQ to restart the WCN chip, this makes sure that if the chip does not respond to the FIQ within the timeout period the dxeSSRTimer expiration handler will take the appropriate action. @ Parameters NONE @ Return NONE ===========================================================================*/ static void dxeStartSSRTimer ( WLANDXE_CtrlBlkType *dxeCtxt ) { if(VOS_TIMER_STATE_RUNNING != wpalTimerGetCurStatus(&dxeCtxt->dxeSSRTimer)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%s: Starting SSR Timer",__func__); wpalTimerStart(&dxeCtxt->dxeSSRTimer, T_WLANDXE_SSR_TIMEOUT); } } /*========================================================================== @ Function Name dxeSSRTimerExpHandler @ Description Issue an explicit subsystem restart of the wcnss subsystem if the WCN chip does not respond to the FIQ within the timeout period @ Parameters v_VOID_t *usrData @ Return NONE ===========================================================================*/ void dxeSSRTimerExpHandler ( void *usrData ) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "DXE not shutdown %d ms after FIQ!! Issue SSR", T_WLANDXE_SSR_TIMEOUT); wpalRivaSubystemRestart(); return; } /*========================================================================== @ Function Name dxeRXPacketAvailableCB @ Description If RX frame handler encounts RX buffer pool empty condition, DXE RX handle loop will be blocked till get available RX buffer pool. When new RX buffer pool available, Packet available CB function will be called. @ Parameters wpt_packet *freePacket Newly allocated RX buffer v_VOID_t *usrData DXE context @ Return NONE ===========================================================================*/ void dxeRXPacketAvailableCB ( wpt_packet *freePacket, v_VOID_t *usrData ) { WLANDXE_CtrlBlkType *dxeCtxt = NULL; wpt_status status; /* Simple Sanity */ if((NULL == freePacket) || (NULL == usrData)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "Get Free RX Buffer fail, Critical Error"); HDXE_ASSERT(0); return; } dxeCtxt = (WLANDXE_CtrlBlkType *)usrData; if(WLANDXE_CTXT_COOKIE != dxeCtxt->dxeCookie) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "DXE Context data corrupted, Critical Error"); HDXE_ASSERT(0); return; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO, "DXE RX packet available, post MSG to RX Thread"); dxeCtxt->freeRXPacket = freePacket; /* Serialize RX Packet Available message upon RX thread */ if (NULL == dxeCtxt->rxPktAvailMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "DXE NULL pkt"); HDXE_ASSERT(0); return; } status = wpalPostRxMsg(WDI_GET_PAL_CTX(), dxeCtxt->rxPktAvailMsg); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "dxeRXPacketAvailableCB serialize fail"); } return; } /*========================================================================== @ Function Name dxeRXFrameSingleBufferAlloc @ Description Allocate Platform packet buffer to prepare RX frame RX frame memory space must be pre allocted and must be asigned to descriptor then whenever DMA engine want to tranfer frame from BMU, buffer must be ready @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block WLANDXE_DescCtrlBlkType currentCtrlBlock current control block which have to be asigned frame buffer @ Return wpt_status ===========================================================================*/ static wpt_status dxeRXFrameSingleBufferAlloc ( WLANDXE_CtrlBlkType *dxeCtxt, WLANDXE_ChannelCBType *channelEntry, WLANDXE_DescCtrlBlkType *currentCtrlBlock ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_packet *currentPalPacketBuffer = NULL; WLANDXE_DescType *currentDesc = NULL; #ifdef FEATURE_R33D wpt_uint32 virtualAddressPCIe; wpt_uint32 physicalAddressPCIe; #else wpt_iterator iterator; wpt_uint32 allocatedSize = 0; void *physAddress = NULL; #endif /* FEATURE_R33D */ currentDesc = currentCtrlBlock->linkedDesc; if(currentDesc->descCtrl.valid) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "This Descriptor is valid, Do not refill"); return eWLAN_PAL_STATUS_E_EXISTS; } /* First check if a packet pointer has already been provided by a previously invoked Rx packet available callback. If so use that packet. */ if(dxeCtxt->rxPalPacketUnavailable && (NULL != dxeCtxt->freeRXPacket)) { currentPalPacketBuffer = dxeCtxt->freeRXPacket; dxeCtxt->rxPalPacketUnavailable = eWLAN_PAL_FALSE; dxeCtxt->freeRXPacket = NULL; if (channelEntry->doneIntDisabled) { wpalWriteRegister(channelEntry->channelRegister.chDXECtrlRegAddr, channelEntry->extraConfig.chan_mask); channelEntry->doneIntDisabled = 0; } } else if(!dxeCtxt->rxPalPacketUnavailable) { /* Allocate platform Packet buffer and OS Frame Buffer at here */ currentPalPacketBuffer = wpalPacketAlloc(eWLAN_PAL_PKT_TYPE_RX_RAW, WLANDXE_DEFAULT_RX_OS_BUFFER_SIZE, dxeRXPacketAvailableCB, (void *)dxeCtxt); if(NULL == currentPalPacketBuffer) { dxeCtxt->rxPalPacketUnavailable = eWLAN_PAL_TRUE; #ifdef WLAN_DXE_LOW_RESOURCE_TIMER /* Out of RX free buffer, * Start timer to recover from RX dead end */ if(VOS_TIMER_STATE_RUNNING != wpalTimerGetCurStatus(&dxeCtxt->rxResourceAvailableTimer)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "RX Low resource, wait available resource"); wpalTimerStart(&dxeCtxt->rxResourceAvailableTimer, T_WLANDXE_MAX_RX_PACKET_WAIT); } #endif } } if(NULL == currentPalPacketBuffer) { return eWLAN_PAL_STATUS_E_RESOURCES; } currentCtrlBlock->xfrFrame = currentPalPacketBuffer; currentPalPacketBuffer->pktType = eWLAN_PAL_PKT_TYPE_RX_RAW; currentPalPacketBuffer->pBD = NULL; currentPalPacketBuffer->pBDPhys = NULL; currentPalPacketBuffer->BDLength = 0; #ifdef FEATURE_R33D status = wpalAllocateShadowRxFrame(currentPalPacketBuffer, &physicalAddressPCIe, &virtualAddressPCIe); if((0 == physicalAddressPCIe) || (0 = virtualAddressPCIe)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_MED, "RX NULL Shadow Memory"); HDXE_ASSERT(0); return eWLAN_PAL_STATUS_E_FAULT; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_MED, "RX Shadow Memory Va 0x%x, Pa 0x%x", virtualAddressPCIe, physicalAddressPCIe); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameBufferAlloc Shadow Mem Alloc fail"); return status; } currentCtrlBlock->shadowBufferVa = virtualAddressPCIe; currentPalPacketBuffer->pBDPhys = (void *)physicalAddressPCIe; memset((wpt_uint8 *)currentCtrlBlock->shadowBufferVa, 0, WLANDXE_DEFAULT_RX_OS_BUFFER_SIZE); #else status = wpalLockPacketForTransfer(currentPalPacketBuffer); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameBufferAlloc unable to lock packet"); return status; } /* Init iterator to get physical os buffer address */ status = wpalIteratorInit(&iterator, currentPalPacketBuffer); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameBufferAlloc iterator init fail"); return status; } status = wpalIteratorNext(&iterator, currentPalPacketBuffer, &physAddress, &allocatedSize); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameBufferAlloc iterator Get Next pointer fail"); return status; } currentPalPacketBuffer->pBDPhys = physAddress; #endif /* FEATURE_R33D */ /* DXE descriptor must have SWAPPED addres in it's structure * !!! SWAPPED !!! */ currentDesc->dxedesc.dxe_short_desc.dstMemAddrL = WLANDXE_U32_SWAP_ENDIAN((wpt_uint32)(uintptr_t)currentPalPacketBuffer->pBDPhys); return status; } /*========================================================================== @ Function Name dxeRXFrameRefillRing @ Description Allocate Platform packet buffers to try to fill up the DXE Rx ring @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeRXFrameRefillRing ( WLANDXE_CtrlBlkType *dxeCtxt, WLANDXE_ChannelCBType *channelEntry ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_DescCtrlBlkType *currentCtrlBlk = channelEntry->tailCtrlBlk; WLANDXE_DescType *currentDesc = NULL; while(channelEntry->numFreeDesc > 0) { /* Current Control block is free * and associated frame buffer is not linked with control block anymore * allocate new frame buffer for current control block */ status = dxeRXFrameSingleBufferAlloc(dxeCtxt, channelEntry, currentCtrlBlk); if((eWLAN_PAL_STATUS_SUCCESS != status) && (eWLAN_PAL_STATUS_E_EXISTS != status)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "dxeRXFrameRefillRing, out of RX buffer pool, break here"); break; } if(eWLAN_PAL_STATUS_E_EXISTS == status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameRefillRing, Descriptor Non-Empry"); } currentDesc = currentCtrlBlk->linkedDesc; currentDesc->descCtrl.ctrl = channelEntry->extraConfig.cw_ctrl_read; /* Issue a dummy read from the DXE descriptor DDR location to ensure that any posted writes are reflected in memory before DXE looks at the descriptor. */ if(channelEntry->extraConfig.cw_ctrl_read != currentDesc->descCtrl.ctrl) { //HDXE_ASSERT(0); } /* Kick off the DXE ring, if not in any power save mode */ if(WLANDXE_POWER_STATE_FULL == dxeCtxt->hostPowerState) { wpalWriteRegister(WALNDEX_DMA_ENCH_ADDRESS, 1 << channelEntry->assignedDMAChannel); } currentCtrlBlk = currentCtrlBlk->nextCtrlBlk; if(eWLAN_PAL_STATUS_E_EXISTS != status) { --channelEntry->numFreeDesc; } } channelEntry->tailCtrlBlk = currentCtrlBlk; return status; } /*========================================================================== @ Function Name dxeRXFrameRouteUpperLayer @ Description Test DXE descriptors and if any RX frame pending within RING, Route to upper layer @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return < 0 Any error happen 0 No frame pulled from RX RING int number of RX frames pulled from RX ring ===========================================================================*/ static wpt_int32 dxeRXFrameRouteUpperLayer ( WLANDXE_CtrlBlkType *dxeCtxt, WLANDXE_ChannelCBType *channelEntry ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_DescCtrlBlkType *currentCtrlBlk = NULL; WLANDXE_DescType *currentDesc = NULL; wpt_uint32 descCtrl, frameCount = 0, i; wpt_int32 ret_val = -1; currentCtrlBlk = channelEntry->headCtrlBlk; currentDesc = currentCtrlBlk->linkedDesc; /* Descriptoe should be SWAPPED ???? */ descCtrl = currentDesc->descCtrl.ctrl; /* Get frames while VALID bit is not set (DMA complete) and a data * associated with it */ while(!(WLANDXE_U32_SWAP_ENDIAN(descCtrl) & WLANDXE_DESC_CTRL_VALID) && (eWLAN_PAL_STATUS_SUCCESS == wpalIsPacketLocked(currentCtrlBlk->xfrFrame)) && (currentCtrlBlk->xfrFrame->pInternalData != NULL) && (frameCount < WLANDXE_MAX_REAPED_RX_FRAMES) ) { channelEntry->numTotalFrame++; channelEntry->numFreeDesc++; #ifdef FEATURE_R33D /* Transfer Size should be */ currentDesc->xfrSize = WLANDXE_U32_SWAP_ENDIAN(WLANDXE_DEFAULT_RX_OS_BUFFER_SIZE); status = wpalPrepareRxFrame(&currentCtrlBlk->xfrFrame, (wpt_uint32)currentCtrlBlk->xfrFrame->pBDPhys, currentCtrlBlk->shadowBufferVa, WLANDXE_DEFAULT_RX_OS_BUFFER_SIZE); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameReady Prepare RX Frame fail"); return ret_val; } status = wpalFreeRxFrame(currentCtrlBlk->shadowBufferVa); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameReady Free Shadow RX Frame fail"); return ret_val; } #else /* FEATURE_R33D */ status = wpalUnlockPacket(currentCtrlBlk->xfrFrame); if (eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameReady unable to unlock packet"); return ret_val; } #endif /* FEATURE_R33D */ /* This Descriptor is valid, so linked Control block is also valid * Linked Control block has pre allocated packet buffer * So, just let upper layer knows preallocated frame pointer will be OK */ /* Reap Rx frames */ rx_reaped_buf[frameCount] = currentCtrlBlk->xfrFrame; frameCount++; currentCtrlBlk->xfrFrame = NULL; /* Now try to refill the ring with empty Rx buffers to keep DXE busy */ dxeRXFrameRefillRing(dxeCtxt, channelEntry); /* Test next contorl block * if valid, this control block also has new RX frame must be handled */ currentCtrlBlk = (WLANDXE_DescCtrlBlkType *)currentCtrlBlk->nextCtrlBlk; currentDesc = currentCtrlBlk->linkedDesc; descCtrl = currentDesc->descCtrl.ctrl; } /* Update head control block * current control block's valid bit was 0 * next trial first control block must be current control block */ channelEntry->headCtrlBlk = currentCtrlBlk; /* Deliver all the reaped RX frames to upper layers */ i = 0; while(i < frameCount) { dxeCtxt->rxReadyCB(dxeCtxt->clientCtxt, rx_reaped_buf[i], channelEntry->channelType); i++; } return frameCount; } /*========================================================================== @ Function Name dxeRXFrameReady @ Description Pop frame from descriptor and route frame to upper transport layer Assign new platform packet buffer into used descriptor Actual frame pop and resource realloc @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return wpt_status ===========================================================================*/ static wpt_status dxeRXFrameReady ( WLANDXE_CtrlBlkType *dxeCtxt, WLANDXE_ChannelCBType *channelEntry, wpt_uint32 chStat ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_DescCtrlBlkType *currentCtrlBlk = NULL; WLANDXE_DescType *currentDesc = NULL; wpt_uint32 descCtrl; wpt_int32 frameCount = 0; wpt_uint32 descLoop; wpt_uint32 invalidatedFound = 0; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity Check */ if((NULL == dxeCtxt) || (NULL == channelEntry)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameReady Channel Entry is not valid"); return eWLAN_PAL_STATUS_E_INVAL; } frameCount = dxeRXFrameRouteUpperLayer(dxeCtxt, channelEntry); if(0 > frameCount) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameReady RX frame route fail"); return eWLAN_PAL_STATUS_E_INVAL; } if((0 == frameCount) && ((WLANDXE_POWER_STATE_BMPS == dxeCtxt->hostPowerState) || (WLANDXE_POWER_STATE_FULL == dxeCtxt->hostPowerState))) { /* None of the frame handled and CH is not enabled * RX CH wrap around happen and No RX free frame * RX side should wait till new free frame available in the pool * Do not try reload driver at here*/ if(!(chStat & WLANDXE_CH_CTRL_EN_MASK)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameReady %s RING Wrapped, RX Free Low 0x%x", channelType[channelEntry->channelType], chStat); /* This is not empty interrupt case * If handle this as empty interrupt, false SSR might be issued * Frame count '1' is dummy frame count to avoid SSR */ channelEntry->numFragmentCurrentChain = 1; return eWLAN_PAL_STATUS_SUCCESS; } currentCtrlBlk = channelEntry->headCtrlBlk; currentDesc = currentCtrlBlk->linkedDesc; descCtrl = currentDesc->descCtrl.ctrl; if(WLANDXE_POWER_STATE_BMPS != dxeCtxt->hostPowerState) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "RX ISR called but no frame handled PWS %d, channel %s", (int)dxeCtxt->hostPowerState, channelType[channelEntry->channelType]); } /* Current interupt empty and previous interrupt also empty * detected successive empty interrupt * or first interrupt empty, this should not happen */ if(0 == channelEntry->numFragmentCurrentChain) { dxeChannelMonitor("RX Ready", channelEntry, NULL); dxeDescriptorDump(channelEntry, channelEntry->headCtrlBlk->linkedDesc, 0); dxeChannelRegisterDump(channelEntry, "RX successive empty interrupt", NULL); dxeChannelAllDescDump(channelEntry, channelEntry->channelType, NULL); /* Abnormal interrupt detected, try to find not validated descriptor */ for(descLoop = 0; descLoop < channelEntry->numDesc; descLoop++) { if(!(WLANDXE_U32_SWAP_ENDIAN(descCtrl) & WLANDXE_DESC_CTRL_VALID)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "Found Invalidated Descriptor %d", (int)descLoop); if(eWLAN_PAL_STATUS_SUCCESS == wpalIsPacketLocked(currentCtrlBlk->xfrFrame)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "Packet locked, Resync Host and HW"); channelEntry->headCtrlBlk = currentCtrlBlk; invalidatedFound = 1; break; } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "Packet Not Locked, cannot transfer frame"); } } currentCtrlBlk = (WLANDXE_DescCtrlBlkType *)currentCtrlBlk->nextCtrlBlk; currentDesc = currentCtrlBlk->linkedDesc; descCtrl = currentDesc->descCtrl.ctrl; } /* Invalidated descriptor found, and that is not head descriptor * This means HW/SW descriptor miss match happen, and we may recover with just resync * Try re-sync here */ if((invalidatedFound) && (0 != descLoop)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "Found New Sync location with HW, handle frames from there"); frameCount = dxeRXFrameRouteUpperLayer(dxeCtxt, channelEntry); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "re-sync routed %d frames to upper layer", (int)frameCount); channelEntry->numFragmentCurrentChain = frameCount; } /* Successive Empty interrupt * But this case, first descriptor also invalidated, then it means head descriptor * is linked with already handled RX frame, then could not unlock RX frame * This is just Out of RX buffer pool, not need to anything here */ else if((invalidatedFound) && (0 == descLoop)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "Out of RX Low resource, and INT came in, do nothing till get RX resource"); } /* Critical error, reload driver */ else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "Could not found invalidated descriptor"); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "RX successive empty interrupt, Could not find invalidated DESC reload driver"); dxeCtxt->driverReloadInProcessing = eWLAN_PAL_TRUE; wpalWlanReload(); dxeStartSSRTimer(dxeCtxt); } } } channelEntry->numFragmentCurrentChain = frameCount; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeNotifySmsm @ Description: Notify SMSM to start DXE engine and/or condition of Tx ring buffer @ Parameters @ Return wpt_status ===========================================================================*/ static wpt_status dxeNotifySmsm ( wpt_boolean kickDxe, wpt_boolean ringEmpty ) { wpt_uint32 clrSt = 0; wpt_uint32 setSt = 0; if(kickDxe) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_MED, "Kick off DXE"); if(tempDxeCtrlBlk->lastKickOffDxe == 0) { setSt |= WPAL_SMSM_WLAN_TX_ENABLE; tempDxeCtrlBlk->lastKickOffDxe = 1; } else if(tempDxeCtrlBlk->lastKickOffDxe == 1) { clrSt |= WPAL_SMSM_WLAN_TX_ENABLE; tempDxeCtrlBlk->lastKickOffDxe = 0; } else { HDXE_ASSERT(0); } } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_MED, "no need to kick off DXE"); } tempDxeCtrlBlk->txRingsEmpty = ringEmpty; if(ringEmpty) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_MED, "SMSM Tx Ring Empty"); clrSt |= WPAL_SMSM_WLAN_TX_RINGS_EMPTY; } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_MED, "SMSM Tx Ring Not Empty"); setSt |= WPAL_SMSM_WLAN_TX_RINGS_EMPTY; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_HIGH, "C%x S%x", clrSt, setSt); wpalNotifySmsm(clrSt, setSt); return eWLAN_PAL_STATUS_SUCCESS; } /*========================================================================== @ Function Name dxePsComplete @ Description: Utility function to check the resv desc to deside if we can get into Power Save mode now @ Parameters @ Return None ===========================================================================*/ static void dxePsComplete(WLANDXE_CtrlBlkType *dxeCtxt, wpt_boolean intr_based) { if( dxeCtxt->hostPowerState == WLANDXE_POWER_STATE_FULL ) { return; } //if both HIGH & LOW Tx channels don't have anything on resv desc,all Tx pkts //must have been consumed by RIVA, OK to get into BMPS if((0 == dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI].numRsvdDesc) && (0 == dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_HIGH_PRI].numRsvdDesc)) { tempDxeCtrlBlk->ringNotEmpty = eWLAN_PAL_FALSE; //if host is in BMPS & no pkt to Tx, RIVA can go to power save if(WLANDXE_POWER_STATE_BMPS == dxeCtxt->hostPowerState) { dxeCtxt->rivaPowerState = WLANDXE_RIVA_POWER_STATE_BMPS_UNKNOWN; dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); } } else //still more pkts to be served by RIVA { tempDxeCtrlBlk->ringNotEmpty = eWLAN_PAL_TRUE; switch(dxeCtxt->rivaPowerState) { case WLANDXE_RIVA_POWER_STATE_ACTIVE: //NOP break; case WLANDXE_RIVA_POWER_STATE_BMPS_UNKNOWN: if(intr_based) { dxeCtxt->rivaPowerState = WLANDXE_RIVA_POWER_STATE_ACTIVE; dxeNotifySmsm(eWLAN_PAL_TRUE, eWLAN_PAL_FALSE); } break; default: //assert break; } } } /*========================================================================== @ Function Name dxeRXEventHandler @ Description Handle serailized RX frame ready event First disable interrupt then pick up frame from pre allocated buffer Since frame handle is doen, clear interrupt bit to ready next interrupt Finally re enable interrupt @ Parameters wpt_msg *rxReadyMsg RX frame ready MSG pointer include DXE control context @ Return NONE ===========================================================================*/ void dxeRXEventHandler ( wpt_msg *rxReadyMsg ) { wpt_msg *msgContent = (wpt_msg *)rxReadyMsg; WLANDXE_CtrlBlkType *dxeCtxt = NULL; wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 intSrc = 0; WLANDXE_ChannelCBType *channelCb = NULL; wpt_uint32 chHighStat = 0; wpt_uint32 chLowStat = 0; wpt_uint32 regValue, chanMask; dxeCtxt = (WLANDXE_CtrlBlkType *)(msgContent->pContext); if(eWLAN_PAL_TRUE == dxeCtxt->driverReloadInProcessing) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "RX Ready WLAN Driver re-loading in progress"); return; } /* Now try to refill the ring with empty Rx buffers to keep DXE busy */ dxeRXFrameRefillRing(dxeCtxt, &dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI]); dxeRXFrameRefillRing(dxeCtxt, &dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI]); dxeCtxt = (WLANDXE_CtrlBlkType *)(msgContent->pContext); if((!dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI].extraConfig.chEnabled) || (!dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI].extraConfig.chEnabled)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "DXE already stopped in RX event handler. Just return"); return; } if((WLANDXE_POWER_STATE_IMPS == dxeCtxt->hostPowerState) || (WLANDXE_POWER_STATE_DOWN == dxeCtxt->hostPowerState)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%s Riva is in %d, Just Pull frames without any register touch ", __func__, dxeCtxt->hostPowerState); /* Not to touch any register, just pull frame directly from chain ring * First high priority */ channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI]; status = dxeRXFrameReady(dxeCtxt, channelCb, chHighStat); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXEventHandler Pull from RX high channel fail"); } /* In case FW could not power collapse in IMPS mode * Next power restore might have empty interrupt * If IMPS mode has empty interrupt since RX thread race, * Invalid re-load driver might happen * To prevent invalid re-load driver, * IMPS event handler set dummpy frame count */ channelCb->numFragmentCurrentChain = 1; /* Second low priority */ channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI]; status = dxeRXFrameReady(dxeCtxt, channelCb, chLowStat); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXEventHandler Pull from RX low channel fail"); } /* LOW Priority CH same above */ channelCb->numFragmentCurrentChain = 1; /* Interrupt will not enabled at here, it will be enabled at PS mode change */ tempDxeCtrlBlk->rxIntDisabledByIMPS = eWLAN_PAL_TRUE; return; } /* Disable device interrupt */ /* Read whole interrupt mask register and exclusive only this channel int */ status = wpalReadRegister(WLANDXE_INT_SRC_RAW_ADDRESS, &intSrc); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXEventHandler Read INT_SRC register fail"); return; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_MED, "RX Event Handler INT Source 0x%x", intSrc); #ifndef WLANDXE_TEST_CHANNEL_ENABLE /* Test High Priority Channel interrupt is enabled or not */ channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI]; if(intSrc & (1 << channelCb->assignedDMAChannel)) { status = dxeChannelCleanInt(channelCb, &chHighStat); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXEventHandler INT Clean up fail"); return; } if(WLANDXE_CH_STAT_INT_ERR_MASK & chHighStat) { /* Error Happen during transaction, Handle it */ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : 0x%x Error Reported, Reload Driver", channelType[channelCb->channelType], chHighStat); dxeErrChannelDebug(channelCb, chHighStat); dxeCtxt->driverReloadInProcessing = eWLAN_PAL_TRUE; wpalWlanReload(); dxeStartSSRTimer(dxeCtxt); } else if((WLANDXE_CH_STAT_INT_DONE_MASK & chHighStat) || (WLANDXE_CH_STAT_INT_ED_MASK & chHighStat)) { /* Handle RX Ready for high priority channel */ status = dxeRXFrameReady(dxeCtxt, channelCb, chHighStat); } else if(WLANDXE_CH_STAT_MASKED_MASK & chHighStat) { status = dxeRXFrameReady(dxeCtxt, channelCb, chHighStat); } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO, "RX HIGH CH EVNT STAT 0x%x, %d frames handled", chHighStat, channelCb->numFragmentCurrentChain); /* Update the Rx DONE histogram */ channelCb->rxDoneHistogram = (channelCb->rxDoneHistogram << 1); if(WLANDXE_CH_STAT_INT_DONE_MASK & chHighStat) { channelCb->rxDoneHistogram |= 1; } else { channelCb->rxDoneHistogram &= ~1; } } #else /* Test H2H Test interrupt is enabled or not */ channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_H2H_TEST_RX]; if(intSrc & (1 << channelCb->assignedDMAChannel)) { status = dxeChannelCleanInt(channelCb, &chStat); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXEventHandler INT Clean up fail"); return; } if(WLANDXE_CH_STAT_INT_ERR_MASK & chStat) { /* Error Happen during transaction, Handle it */ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : 0x%x Error Reported, Reload Driver", channelType[channelCb->channelType], chStat); dxeErrChannelDebug(channelCb, chStat); dxeCtxt->driverReloadInProcessing = eWLAN_PAL_TRUE; wpalWlanReload(); dxeStartSSRTimer(dxeCtxt); } else if(WLANDXE_CH_STAT_INT_ED_MASK & chStat) { /* Handle RX Ready for high priority channel */ status = dxeRXFrameReady(dxeCtxt, channelCb, chStat); } /* Update the Rx DONE histogram */ channelCb->rxDoneHistogram = (channelCb->rxDoneHistogram << 1); if(WLANDXE_CH_STAT_INT_DONE_MASK & chStat) { channelCb->rxDoneHistogram |= 1; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "DXE Channel Number %d, Rx DONE Histogram 0x%016llx", channelCb->assignedDMAChannel, channelCb->rxDoneHistogram); } else { channelCb->rxDoneHistogram &= ~1; } } #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ /* Test Low Priority Channel interrupt is enabled or not */ channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI]; if(intSrc & (1 << channelCb->assignedDMAChannel)) { status = dxeChannelCleanInt(channelCb, &chLowStat); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXEventHandler INT Clean up fail"); return; } if(WLANDXE_CH_STAT_INT_ERR_MASK & chLowStat) { /* Error Happen during transaction, Handle it */ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : 0x%x Error Reported, Reload Driver", channelType[channelCb->channelType], chLowStat); dxeErrChannelDebug(channelCb, chLowStat); dxeCtxt->driverReloadInProcessing = eWLAN_PAL_TRUE; wpalWlanReload(); dxeStartSSRTimer(dxeCtxt); } else if((WLANDXE_CH_STAT_INT_ED_MASK & chLowStat) || (WLANDXE_CH_STAT_INT_DONE_MASK & chLowStat)) { /* Handle RX Ready for low priority channel */ status = dxeRXFrameReady(dxeCtxt, channelCb, chLowStat); } /* Update the Rx DONE histogram */ channelCb->rxDoneHistogram = (channelCb->rxDoneHistogram << 1); if(WLANDXE_CH_STAT_INT_DONE_MASK & chLowStat) { channelCb->rxDoneHistogram |= 1; } else { channelCb->rxDoneHistogram &= ~1; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO, "RX LOW CH EVNT STAT 0x%x, %d frames handled", chLowStat, channelCb->numFragmentCurrentChain); } if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXEventHandler Handle Frame Ready Fail"); return; } /* Prepare Control Register EN Channel */ if(!(dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI].extraConfig.chan_mask & WLANDXE_CH_CTRL_EN_MASK)) { HDXE_ASSERT(0); } if (dxeCtxt->rxPalPacketUnavailable && (WLANDXE_CH_STAT_INT_DONE_MASK & chHighStat)) { chanMask = dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI].extraConfig.chan_mask & (~WLANDXE_CH_CTRL_INE_DONE_MASK); dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI].doneIntDisabled = 1; } else { chanMask = dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI].extraConfig.chan_mask; dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI].doneIntDisabled = 0; } wpalWriteRegister(dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI].channelRegister.chDXECtrlRegAddr, chanMask); /* Prepare Control Register EN Channel */ if(!(dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI].extraConfig.chan_mask & WLANDXE_CH_CTRL_EN_MASK)) { HDXE_ASSERT(0); } if (dxeCtxt->rxPalPacketUnavailable && (WLANDXE_CH_STAT_INT_DONE_MASK & chLowStat)) { chanMask = dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI].extraConfig.chan_mask & (~WLANDXE_CH_CTRL_INE_DONE_MASK); dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI].doneIntDisabled = 1; } else { chanMask = dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI].extraConfig.chan_mask; dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI].doneIntDisabled = 0; } wpalWriteRegister(dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI].channelRegister.chDXECtrlRegAddr, chanMask); /* Clear Interrupt handle processing bit * RIVA may power down */ wpalReadRegister(WLANDXE_INT_MASK_REG_ADDRESS, &regValue); regValue &= WLANDXE_RX_INTERRUPT_PRO_UNMASK; wpalWriteRegister(WLANDXE_INT_MASK_REG_ADDRESS, regValue); /* Enable system level ISR */ /* Enable RX ready Interrupt at here */ status = wpalEnableInterrupt(DXE_INTERRUPT_RX_READY); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXEventHandler Enable RX Ready interrupt fail"); return; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return; } /*========================================================================== @ Function Name dxeRXPacketAvailableEventHandler @ Description Handle serialized RX Packet Available event when the corresponding callback is invoked by WPAL. Try to fill up any completed DXE descriptors with available Rx packet buffer pointers. @ Parameters wpt_msg *rxPktAvailMsg RX frame ready MSG pointer include DXE control context @ Return NONE ===========================================================================*/ void dxeRXPacketAvailableEventHandler ( wpt_msg *rxPktAvailMsg ) { WLANDXE_CtrlBlkType *dxeCtxt = NULL; wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_ChannelCBType *channelCb = NULL; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity Check */ if(NULL == rxPktAvailMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXPacketAvailableEventHandler Context is not valid"); return; } dxeCtxt = (WLANDXE_CtrlBlkType *)(rxPktAvailMsg->pContext); #ifdef WLAN_DXE_LOW_RESOURCE_TIMER /* Available resource allocated * Stop timer not needed */ if(VOS_TIMER_STATE_RUNNING == wpalTimerGetCurStatus(&dxeCtxt->rxResourceAvailableTimer)) { wpalTimerStop(&dxeCtxt->rxResourceAvailableTimer); } #endif do { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "dxeRXPacketAvailableEventHandler, start refilling ring"); channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_HIGH_PRI]; status = dxeRXFrameRefillRing(dxeCtxt,channelCb); // Wait for another callback to indicate when Rx resources are available // again. if(eWLAN_PAL_STATUS_SUCCESS != status) { break; } channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_RX_LOW_PRI]; status = dxeRXFrameRefillRing(dxeCtxt,channelCb); if(eWLAN_PAL_STATUS_SUCCESS != status) { break; } } while(0); if((WLANDXE_POWER_STATE_IMPS == dxeCtxt->hostPowerState) || (WLANDXE_POWER_STATE_DOWN == dxeCtxt->hostPowerState)) { /* Interrupt will not enabled at here, it will be enabled at PS mode change */ tempDxeCtrlBlk->rxIntDisabledByIMPS = eWLAN_PAL_TRUE; } } /*========================================================================== @ Function Name dxeRXISR @ Description RX frame ready interrupt service routine interrupt entry function, this function called based on ISR context Must be serialized @ Parameters void *hostCtxt DXE host driver control context, pre registerd during interrupt registration @ Return NONE ===========================================================================*/ static void dxeRXISR ( void *hostCtxt ) { WLANDXE_CtrlBlkType *dxeCtxt = (WLANDXE_CtrlBlkType *)hostCtxt; wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 regValue; #ifdef FEATURE_R33D status = wpalReadRegister(WLANDXE_INT_SRC_RAW_ADDRESS, &regValue); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompISR Read INT_SRC_RAW fail"); return; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "INT_SRC_RAW 0x%x", regValue); if(0 == regValue) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "This is not DXE Interrupt, Reject it 0x%x", regValue); return; } #endif /* FEATURE_R33D */ /* Set Interrupt processing bit * During this bit set, WLAN HW may not power collapse */ wpalReadRegister(WLANDXE_INT_MASK_REG_ADDRESS, &regValue); regValue |= WLANPAL_RX_INTERRUPT_PRO_MASK; wpalWriteRegister(WLANDXE_INT_MASK_REG_ADDRESS, regValue); /* Disable interrupt at here * Disable RX Ready system level Interrupt at here * Otherwise infinite loop might happen */ status = wpalDisableInterrupt(DXE_INTERRUPT_RX_READY); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameReadyISR Disable RX ready interrupt fail"); return; } /* Serialize RX Ready interrupt upon RX thread */ if(NULL == dxeCtxt->rxIsrMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameReadyISR NULL message"); HDXE_ASSERT(0); return; } status = wpalPostRxMsg(WDI_GET_PAL_CTX(), dxeCtxt->rxIsrMsg); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "dxeRXFrameReadyISR interrupt serialize fail"); } return; } /*========================================================================== @ Function Name dxeTXPushFrame @ Description Push TX frame into DXE descriptor and DXE register Send notification to DXE register that TX frame is ready to transfer @ Parameters WLANDXE_ChannelCBType *channelEntry Channel specific control block wpt_packet *palPacket Packet pointer ready to transfer @ Return PAL_STATUS_T ===========================================================================*/ static wpt_status dxeTXPushFrame ( WLANDXE_ChannelCBType *channelEntry, wpt_packet *palPacket ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_DescCtrlBlkType *currentCtrlBlk = NULL; WLANDXE_DescType *currentDesc = NULL; WLANDXE_DescType *firstDesc = NULL; WLANDXE_DescType *LastDesc = NULL; void *sourcePhysicalAddress = NULL; wpt_uint32 xferSize = 0; #ifdef FEATURE_R33D tx_frm_pcie_vector_t frameVector; wpt_uint32 Va; wpt_uint32 fragCount = 0; #else wpt_iterator iterator; #endif /* FEATURE_R33D */ wpt_uint32 isEmpty = 0; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); tempDxeCtrlBlk->smsmToggled = eWLAN_PAL_FALSE; if((0 == tempDxeCtrlBlk->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI].numRsvdDesc) && (0 == tempDxeCtrlBlk->dxeChannel[WDTS_CHANNEL_TX_HIGH_PRI].numRsvdDesc)) { isEmpty = 1; } channelEntry->numFragmentCurrentChain = 0; currentCtrlBlk = channelEntry->headCtrlBlk; /* Initialize interator, TX is fragmented */ #ifdef FEATURE_R33D memset(&frameVector, 0, sizeof(tx_frm_pcie_vector_t)); status = wpalPrepareTxFrame(palPacket, &frameVector, &Va); #else status = wpalLockPacketForTransfer(palPacket); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame unable to lock packet"); return status; } status = wpalIteratorInit(&iterator, palPacket); #endif /* FEATURE_R33D */ if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame iterator init fail"); return status; } /* !!!! Revisit break condition !!!!!!! */ while(1) { /* Get current descriptor pointer from current control block */ currentDesc = currentCtrlBlk->linkedDesc; if(NULL == firstDesc) { firstDesc = currentCtrlBlk->linkedDesc; } /* All control block will have same palPacket Pointer * to make logic simpler */ currentCtrlBlk->xfrFrame = palPacket; /* Get next fragment physical address and fragment size * if this is the first trial, will get first physical address * if no more fragment, Descriptor src address will be set as NULL, OK??? */ #ifdef FEATURE_R33D if(fragCount == frameVector.num_frg) { break; } currentCtrlBlk->shadowBufferVa = frameVector.frg[0].va; sourcePhysicalAddress = (void *)frameVector.frg[fragCount].pa; xferSize = frameVector.frg[fragCount].size; fragCount++; if(0 == xferSize) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame invalid transfer size"); HDXE_ASSERT(0); return eWLAN_PAL_STATUS_E_FAILURE; } if(NULL == sourcePhysicalAddress) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame invalid sourcePhysicalAddress"); HDXE_ASSERT(0); return eWLAN_PAL_STATUS_E_FAILURE; } #else status = wpalIteratorNext(&iterator, palPacket, &sourcePhysicalAddress, &xferSize); if((NULL == sourcePhysicalAddress) || (0 == xferSize)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "dxeTXPushFrame end of current frame"); break; } if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Get next frame fail"); return status; } #endif /* FEATURE_R33D */ /* This is the LAST descriptor valid for this transaction */ LastDesc = currentCtrlBlk->linkedDesc; /* Program DXE descriptor */ currentDesc->dxedesc.dxe_short_desc.srcMemAddrL = WLANDXE_U32_SWAP_ENDIAN((wpt_uint32)(uintptr_t)sourcePhysicalAddress); /* Just normal data transfer from aCPU Flat Memory to BMU Q */ if((WDTS_CHANNEL_TX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_TX_HIGH_PRI == channelEntry->channelType)) { currentDesc->dxedesc.dxe_short_desc.dstMemAddrL = WLANDXE_U32_SWAP_ENDIAN(channelEntry->channelConfig.refWQ); } else { /* Test specific H2H transfer, destination address already set * Do Nothing */ } currentDesc->xfrSize = WLANDXE_U32_SWAP_ENDIAN(xferSize); /* Program channel control register */ /* First frame not set VAL bit, why ??? */ if(0 == channelEntry->numFragmentCurrentChain) { currentDesc->descCtrl.ctrl = channelEntry->extraConfig.cw_ctrl_write; } else { currentDesc->descCtrl.ctrl = channelEntry->extraConfig.cw_ctrl_write_valid; } /* Update statistics */ channelEntry->numFragmentCurrentChain++; channelEntry->numFreeDesc--; channelEntry->numRsvdDesc++; /* Get next control block */ currentCtrlBlk = currentCtrlBlk->nextCtrlBlk; } channelEntry->numTotalFrame++; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "NUM TX FRAG %d, Total Frame %d", channelEntry->numFragmentCurrentChain, channelEntry->numTotalFrame); /* Program Channel control register * Set as end of packet * Enable interrupt also for first code lock down * performace optimization, this will be revisited */ if(NULL == LastDesc) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame NULL Last Descriptor, broken chain"); return eWLAN_PAL_STATUS_E_FAULT; } LastDesc->descCtrl.ctrl = channelEntry->extraConfig.cw_ctrl_write_eop_int; /* Now First one also Valid ???? * this procedure will prevent over handle descriptor from previous * TX trigger */ firstDesc->descCtrl.ctrl = channelEntry->extraConfig.cw_ctrl_write_valid; /* If in BMPS mode no need to notify the DXE Engine, notify SMSM instead */ if(WLANDXE_RIVA_POWER_STATE_BMPS_UNKNOWN == tempDxeCtrlBlk->rivaPowerState) { /* Update channel head as next avaliable linked slot */ channelEntry->headCtrlBlk = currentCtrlBlk; if(isEmpty) { tempDxeCtrlBlk->ringNotEmpty = eWLAN_PAL_TRUE; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "SMSM_ret LO=%d HI=%d", tempDxeCtrlBlk->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI].numRsvdDesc, tempDxeCtrlBlk->dxeChannel[WDTS_CHANNEL_TX_HIGH_PRI].numRsvdDesc ); dxeNotifySmsm(eWLAN_PAL_TRUE, eWLAN_PAL_FALSE); tempDxeCtrlBlk->smsmToggled = eWLAN_PAL_TRUE; } return status; } /* If DXE use external descriptor, registers are not needed to be programmed * Just after finish to program descriptor, tirigger to send */ if(channelEntry->extraConfig.chan_mask & WLANDXE_CH_CTRL_EDEN_MASK) { /* Issue a dummy read from the DXE descriptor DDR location to ensure that any previously posted write to the descriptor completes. */ if(channelEntry->extraConfig.cw_ctrl_write_valid != firstDesc->descCtrl.ctrl) { //HDXE_ASSERT(0); } /* Everything is ready * Trigger to start DMA */ status = wpalWriteRegister(channelEntry->channelRegister.chDXECtrlRegAddr, channelEntry->extraConfig.chan_mask); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Write Channel Ctrl Register fail"); return status; } /* Update channel head as next avaliable linked slot */ channelEntry->headCtrlBlk = currentCtrlBlk; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /* If DXE not use external descriptor, program each registers */ /* Circular buffer handle not need to program DESC register??? * GEN5 code not programed RING buffer case * REVISIT THIS !!!!!! */ if((WDTS_CHANNEL_TX_LOW_PRI == channelEntry->channelType) || (WDTS_CHANNEL_TX_HIGH_PRI == channelEntry->channelType)) { /* Destination address, assigned Work Q */ status = wpalWriteRegister(channelEntry->channelRegister.chDXEDadrlRegAddr, channelEntry->channelConfig.refWQ); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Program dest address register fail"); return status; } /* If descriptor format is SHORT */ if(channelEntry->channelConfig.useShortDescFmt) { status = wpalWriteRegister(channelEntry->channelRegister.chDXEDadrhRegAddr, 0); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Program dest address register fail"); return status; } } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame LONG Descriptor Format!!!"); } } #ifdef WLANDXE_TEST_CHANNEL_ENABLE else if(WDTS_CHANNEL_H2H_TEST_TX == channelEntry->channelType) { /* Destination address, Physical memory address */ status = wpalWriteRegister(channelEntry->channelRegister.chDXEDadrlRegAddr, WLANDXE_U32_SWAP_ENDIAN(firstDesc->dxedesc.dxe_short_desc.dstMemAddrL)); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Program dest address register fail"); return status; } /* If descriptor format is SHORT */ if(channelEntry->channelConfig.useShortDescFmt) { status = wpalWriteRegister(channelEntry->channelRegister.chDXEDadrhRegAddr, 0); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Program dest address register fail"); return status; } } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame LONG Descriptor Format!!!"); } } #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ /* Program Source address register * This address is already programmed into DXE Descriptor * But register also upadte */ status = wpalWriteRegister(channelEntry->channelRegister.chDXESadrlRegAddr, WLANDXE_U32_SWAP_ENDIAN(firstDesc->dxedesc.dxe_short_desc.srcMemAddrL)); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Program src address register fail"); return status; } /* If descriptor format is SHORT */ if(channelEntry->channelConfig.useShortDescFmt) { status = wpalWriteRegister(channelEntry->channelRegister.chDXESadrhRegAddr, 0); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Program dest address register fail"); return status; } } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame LONG Descriptor Format!!!"); } /* Linked list Descriptor pointer */ status = wpalWriteRegister(channelEntry->channelRegister.chDXEDesclRegAddr, channelEntry->headCtrlBlk->linkedDescPhyAddr); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Write DESC Address register fail"); return status; } /* If descriptor format is SHORT */ if(channelEntry->channelConfig.useShortDescFmt) { status = wpalWriteRegister(channelEntry->channelRegister.chDXEDeschRegAddr, 0); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Program dest address register fail"); return status; } } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame LONG Descriptor Format!!!"); } /* Transfer Size */ xferSize = WLANDXE_U32_SWAP_ENDIAN(firstDesc->xfrSize); status = wpalWriteRegister(channelEntry->channelRegister.chDXESzRegAddr, xferSize); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Write DESC Address register fail"); return status; } /* Everything is ready * Trigger to start DMA */ status = wpalWriteRegister(channelEntry->channelRegister.chDXECtrlRegAddr, channelEntry->extraConfig.chan_mask); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXPushFrame Write Channel Ctrl Register fail"); return status; } /* Update channel head as next avaliable linked slot */ channelEntry->headCtrlBlk = currentCtrlBlk; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeTXCompFrame @ Description TX Frame transfer complete event handler @ Parameters WLANDXE_CtrlBlkType *dxeCtrlBlk, DXE host driver main control block WLANDXE_ChannelCBType *channelEntry Channel specific control block @ Return PAL_STATUS_T ===========================================================================*/ static wpt_status dxeTXCompFrame ( WLANDXE_CtrlBlkType *hostCtxt, WLANDXE_ChannelCBType *channelEntry ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_DescCtrlBlkType *currentCtrlBlk = NULL; WLANDXE_DescType *currentDesc = NULL; wpt_uint32 descCtrlValue = 0; unsigned int *lowThreshold = NULL; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity */ if((NULL == hostCtxt) || (NULL == channelEntry)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompFrame Invalid ARG"); return eWLAN_PAL_STATUS_E_INVAL; } if(NULL == hostCtxt->txCompCB) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompFrame TXCompCB is not registered"); return eWLAN_PAL_STATUS_SUCCESS; } status = wpalMutexAcquire(&channelEntry->dxeChannelLock); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompFrame Mutex Acquire fail"); return status; } currentCtrlBlk = channelEntry->tailCtrlBlk; currentDesc = currentCtrlBlk->linkedDesc; if( currentCtrlBlk == channelEntry->headCtrlBlk ) { status = wpalMutexRelease(&channelEntry->dxeChannelLock); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompFrame Mutex Release fail"); return status; } return eWLAN_PAL_STATUS_SUCCESS; } while(1) { // HDXE_ASSERT(WLAN_PAL_IS_STATUS_SUCCESS(WLAN_RivaValidateDesc(currentDesc))); descCtrlValue = currentDesc->descCtrl.ctrl; if((descCtrlValue & WLANDXE_DESC_CTRL_VALID)) { /* caught up with head, bail out */ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_MED, "dxeTXCompFrame caught up with head - next DESC has VALID set"); break; } if(currentCtrlBlk->xfrFrame == NULL) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "Invalid transfer frame"); HDXE_ASSERT(0); break; } channelEntry->numFreeDesc++; channelEntry->numRsvdDesc--; /* Send Frame TX Complete notification with frame start fragment location */ if(WLANDXE_U32_SWAP_ENDIAN(descCtrlValue) & WLANDXE_DESC_CTRL_EOP) { hostCtxt->txCompletedFrames--; #ifdef FEATURE_R33D wpalFreeTxFrame(currentCtrlBlk->shadowBufferVa); #else status = wpalUnlockPacket(currentCtrlBlk->xfrFrame); if (eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameReady unable to unlock packet"); status = wpalMutexRelease(&channelEntry->dxeChannelLock); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompFrame Mutex Release fail"); } return status; } #endif /* FEATURE_R33D */ hostCtxt->txCompCB(hostCtxt->clientCtxt, currentCtrlBlk->xfrFrame, eWLAN_PAL_STATUS_SUCCESS); channelEntry->numFragmentCurrentChain = 0; } currentCtrlBlk = currentCtrlBlk->nextCtrlBlk; currentDesc = currentCtrlBlk->linkedDesc; /* Break condition * Head control block is the control block must be programed for the next TX * so, head control block is not programmed control block yet * if loop encounte head control block, stop to complete * in theory, COMP CB must be called already ??? */ if(currentCtrlBlk == channelEntry->headCtrlBlk) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_MED, "dxeTXCompFrame caught up with head ptr"); break; } /* VALID Bit check ???? */ } /* Tail and Head Control block must be same */ channelEntry->tailCtrlBlk = currentCtrlBlk; lowThreshold = channelEntry->channelType == WDTS_CHANNEL_TX_LOW_PRI? &(hostCtxt->txCompInt.txLowResourceThreshold_LoPriCh): &(hostCtxt->txCompInt.txLowResourceThreshold_HiPriCh); /* If specific channel hit low resource condition send notification to upper layer */ if((eWLAN_PAL_TRUE == channelEntry->hitLowResource) && (channelEntry->numFreeDesc > *lowThreshold)) { /* Change it back if we raised it for fetching a remaining packet from TL */ if(WLANDXE_TX_LOW_RES_THRESHOLD > *lowThreshold) { *lowThreshold = WLANDXE_TX_LOW_RES_THRESHOLD; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "DXE TX %d channel recovered from low resource", channelEntry->channelType); hostCtxt->lowResourceCB(hostCtxt->clientCtxt, channelEntry->channelType, eWLAN_PAL_TRUE); channelEntry->hitLowResource = eWLAN_PAL_FALSE; wpalTimerStop(&channelEntry->healthMonitorTimer); } status = wpalMutexRelease(&channelEntry->dxeChannelLock); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompFrame Mutex Release fail"); } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeTXEventHandler @ Description If DXE HW sends TX related interrupt, this event handler will be called Handle higher priority channel first Figureout why interrupt happen and call appropriate final even handler TX complete or error happen @ Parameters void *msgPtr Even MSG @ Return PAL_STATUS_T ===========================================================================*/ void dxeTXEventHandler ( wpt_msg *msgPtr ) { wpt_msg *msgContent = (wpt_msg *)msgPtr; WLANDXE_CtrlBlkType *dxeCtxt = NULL; wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 intSrc = 0; wpt_uint32 chStat = 0; WLANDXE_ChannelCBType *channelCb = NULL; wpt_uint8 bEnableISR = 0; static wpt_uint8 successiveIntWithIMPS; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); dxeCtxt = (WLANDXE_CtrlBlkType *)(msgContent->pContext); dxeCtxt->ucTxMsgCnt = 0; if(eWLAN_PAL_TRUE == dxeCtxt->driverReloadInProcessing) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "wlan: TX COMP WLAN Driver re-loading in progress"); return; } /* Return from here if the RIVA is in IMPS, to avoid register access */ if(WLANDXE_POWER_STATE_IMPS == dxeCtxt->hostPowerState) { successiveIntWithIMPS++; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXEventHandler IMPS TX COMP INT successiveIntWithIMPS %d", successiveIntWithIMPS); status = dxeTXCompFrame(dxeCtxt, &dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_HIGH_PRI]); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXEventHandler IMPS HC COMP interrupt fail"); } status = dxeTXCompFrame(dxeCtxt, &dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI]); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXEventHandler IMPS LC COMP interrupt fail"); } if(((dxeCtxt->txCompletedFrames) && (eWLAN_PAL_FALSE == dxeCtxt->txIntEnable)) && (successiveIntWithIMPS == 1)) { dxeCtxt->txIntEnable = eWLAN_PAL_TRUE; wpalEnableInterrupt(DXE_INTERRUPT_TX_COMPLE); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "TX COMP INT Enabled, remain TX frame count on ring %d", dxeCtxt->txCompletedFrames); /*Kicking the DXE after the TX Complete interrupt was enabled - to avoid the posibility of a race*/ dxePsComplete(dxeCtxt, eWLAN_PAL_TRUE); } else { dxeCtxt->txIntEnable = eWLAN_PAL_FALSE; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "TX COMP INT NOT Enabled, RIVA still wake up? remain TX frame count on ring %d, successiveIntWithIMPS %d", dxeCtxt->txCompletedFrames, successiveIntWithIMPS); } return; } successiveIntWithIMPS = 0; if((!dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_HIGH_PRI].extraConfig.chEnabled) || (!dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI].extraConfig.chEnabled)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "DXE already stopped in TX event handler. Just return"); return; } /* Disable device interrupt */ /* Read whole interrupt mask register and exclusive only this channel int */ status = wpalReadRegister(WLANDXE_INT_SRC_RAW_ADDRESS, &intSrc); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompleteEventHandler Read INT_DONE_SRC register fail"); return; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_MED, "TX Event Handler INT Source 0x%x", intSrc); /* Test High Priority Channel is the INT source or not */ channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_HIGH_PRI]; if(intSrc & (1 << channelCb->assignedDMAChannel)) { status = dxeChannelCleanInt(channelCb, &chStat); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXEventHandler INT Clean up fail"); return; } if(WLANDXE_CH_STAT_INT_ERR_MASK & chStat) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : 0x%x Error Reported, Reload Driver", channelType[channelCb->channelType], chStat); dxeErrChannelDebug(channelCb, chStat); dxeCtxt->driverReloadInProcessing = eWLAN_PAL_TRUE; wpalWlanReload(); dxeStartSSRTimer(dxeCtxt); } else if(WLANDXE_CH_STAT_INT_DONE_MASK & chStat) { /* Handle TX complete for high priority channel */ status = dxeTXCompFrame(dxeCtxt, channelCb); bEnableISR = 1; } else if(WLANDXE_CH_STAT_INT_ED_MASK & chStat) { /* Handle TX complete for high priority channel */ status = dxeTXCompFrame(dxeCtxt, channelCb); bEnableISR = 1; } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXEventHandler TX HI status=%x", chStat); } if(WLANDXE_CH_STAT_MASKED_MASK & chStat) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_HIGH, "dxeTXEventHandler TX HIGH Channel Masked Unmask it!!!!"); } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_HIGH, "TX HIGH STAT 0x%x RESRVD %d", chStat, channelCb->numRsvdDesc); } /* Test Low Priority Channel interrupt is enabled or not */ channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI]; if(intSrc & (1 << channelCb->assignedDMAChannel)) { status = dxeChannelCleanInt(channelCb, &chStat); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXEventHandler INT Clean up fail"); return; } if(WLANDXE_CH_STAT_INT_ERR_MASK & chStat) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : 0x%x Error Reported, Reload Driver", channelType[channelCb->channelType], chStat); dxeErrChannelDebug(channelCb, chStat); dxeCtxt->driverReloadInProcessing = eWLAN_PAL_TRUE; wpalWlanReload(); dxeStartSSRTimer(dxeCtxt); } else if(WLANDXE_CH_STAT_INT_DONE_MASK & chStat) { /* Handle TX complete for low priority channel */ status = dxeTXCompFrame(dxeCtxt, channelCb); bEnableISR = 1; } else if(WLANDXE_CH_STAT_INT_ED_MASK & chStat) { /* Handle TX complete for low priority channel */ status = dxeTXCompFrame(dxeCtxt, channelCb); bEnableISR = 1; } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXEventHandler TX LO status=%x", chStat); } if(WLANDXE_CH_STAT_MASKED_MASK & chStat) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_HIGH, "dxeTXEventHandler TX Low Channel Masked Unmask it!!!!"); } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO, "TX LOW STAT 0x%x RESRVD %d", chStat, channelCb->numRsvdDesc); } #ifdef WLANDXE_TEST_CHANNEL_ENABLE /* Test H2H TX Channel interrupt is enabled or not */ channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_H2H_TEST_TX]; if(intSrc & (1 << channelCb->assignedDMAChannel)) { status = wpalReadRegister(channelCb->channelRegister.chDXEStatusRegAddr, &chStat); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeChannelCleanInt Read CH STAT register fail"); return; } if(WLANDXE_CH_STAT_INT_ERR_MASK & chStat) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : 0x%x Error Reported, Reload Driver", channelType[channelCb->channelType], chStat); dxeErrChannelDebug(channelCb, chStat); dxeCtxt->driverReloadInProcessing = eWLAN_PAL_TRUE; wpalWlanReload(); dxeStartSSRTimer(dxeCtxt); } else if(WLANDXE_CH_STAT_INT_DONE_MASK & chStat) { /* Handle TX complete for high priority channel */ status = dxeTXCompFrame(dxeCtxt, channelCb); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXEventHandler INT Clean up fail"); return; } } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "unexpected channel state %d", chStat); } } #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ if((bEnableISR || (dxeCtxt->txCompletedFrames)) && (eWLAN_PAL_FALSE == dxeCtxt->txIntEnable)) { dxeCtxt->txIntEnable = eWLAN_PAL_TRUE; wpalEnableInterrupt(DXE_INTERRUPT_TX_COMPLE); if(0 != dxeCtxt->txCompletedFrames) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "TX COMP INT Enabled, remain TX frame count on ring %d", dxeCtxt->txCompletedFrames); } } /*Kicking the DXE after the TX Complete interrupt was enabled - to avoid the posibility of a race*/ dxePsComplete(dxeCtxt, eWLAN_PAL_TRUE); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return; } /*========================================================================== @ Function Name dxeTXCompleteProcessing @ Description If DXE HW sends TX related interrupt, this event handler will be called Handle higher priority channel first Figureout why interrupt happen and call appropriate final even handler TX complete or error happen @ Parameters dxeCtxt DXE context @ Return PAL_STATUS_T ===========================================================================*/ void dxeTXCompleteProcessing ( WLANDXE_CtrlBlkType *dxeCtxt ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_ChannelCBType *channelCb = NULL; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Test High Priority Channel is the INT source or not */ channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_HIGH_PRI]; /* Handle TX complete for high priority channel */ status = dxeTXCompFrame(dxeCtxt, channelCb); /* Test Low Priority Channel interrupt is enabled or not */ channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI]; /* Handle TX complete for low priority channel */ status = dxeTXCompFrame(dxeCtxt, channelCb); if((eWLAN_PAL_FALSE == dxeCtxt->txIntEnable) && ((dxeCtxt->txCompletedFrames > 0) || (WLANDXE_POWER_STATE_FULL == dxeCtxt->hostPowerState))) { dxeCtxt->txIntEnable = eWLAN_PAL_TRUE; wpalEnableInterrupt(DXE_INTERRUPT_TX_COMPLE); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%s %s : %d, %s : %d", __func__, channelType[dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_HIGH_PRI].channelType], dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_HIGH_PRI].numRsvdDesc, channelType[dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI].channelType], dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI].numRsvdDesc); if((WLANDXE_POWER_STATE_FULL != dxeCtxt->hostPowerState) && (eWLAN_PAL_FALSE == tempDxeCtrlBlk->smsmToggled)) { /* After TX Comp processing, still remaining frame on the DXE TX ring * And when push frame, RING was not empty marked * Then when push frame, no SMSM toggle happen * To avoid permanent TX stall, SMSM toggle is needed at here * With this toggle, host should gaurantee SMSM state should be changed */ dxeNotifySmsm(eWLAN_PAL_TRUE, dxeCtxt->txRingsEmpty); } } /*Kicking the DXE after the TX Complete interrupt was enabled - to avoid the posibility of a race*/ dxePsComplete(dxeCtxt, eWLAN_PAL_FALSE); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return; } /*========================================================================== @ Function Name dxeTXReSyncDesc @ Description When STA comeout from IMPS, check DXE TX next transfer candidate descriptor And HW programmed descriptor. If any async happen between HW/SW TX stall will happen @ Parameters void *msgPtr Message pointer to sync with TX thread @ Return NONE ===========================================================================*/ void dxeTXReSyncDesc ( wpt_msg *msgPtr ) { wpt_msg *msgContent = (wpt_msg *)msgPtr; WLANDXE_CtrlBlkType *pDxeCtrlBlk; wpt_uint32 nextDescReg; WLANDXE_ChannelCBType *channelEntry; WLANDXE_DescCtrlBlkType *validCtrlBlk; wpt_uint32 descLoop; wpt_uint32 channelLoop; if(NULL == msgContent) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXReSyncDesc Invalid Control Block"); return; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "dxeTXReSyncDesc Try to re-sync TX channel if any problem"); pDxeCtrlBlk = (WLANDXE_CtrlBlkType *)(msgContent->pContext); for(channelLoop = WDTS_CHANNEL_TX_LOW_PRI; channelLoop < WDTS_CHANNEL_RX_LOW_PRI; channelLoop++) { channelEntry = &pDxeCtrlBlk->dxeChannel[channelLoop]; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%11s : Try to detect TX descriptor async", channelType[channelEntry->channelType]); wpalReadRegister(channelEntry->channelRegister.chDXEDesclRegAddr, &nextDescReg); /* Async detect without TX pending frame */ if(channelEntry->tailCtrlBlk == channelEntry->headCtrlBlk) { if(nextDescReg != channelEntry->tailCtrlBlk->linkedDescPhyAddr) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "TX Async no Pending frame"); dxeChannelMonitor("!!! TX Async no Pending frame !!!", channelEntry, NULL); dxeChannelRegisterDump(channelEntry, "!!! TX Async no Pending frame !!!", NULL); wpalWriteRegister(channelEntry->channelRegister.chDXEDesclRegAddr, channelEntry->tailCtrlBlk->linkedDescPhyAddr); } } /* Async detect with some TX pending frames * next descriptor register should sync with first valid descriptor */ else { validCtrlBlk = channelEntry->tailCtrlBlk; for(descLoop = 0; descLoop < channelEntry->numDesc; descLoop++) { if(validCtrlBlk->linkedDesc->descCtrl.ctrl & WLANDXE_DESC_CTRL_VALID) { if(nextDescReg != validCtrlBlk->linkedDescPhyAddr) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "TX Async"); dxeChannelMonitor("!!! TX Async !!!", channelEntry, NULL); dxeChannelRegisterDump(channelEntry, "!!! TX Async !!!", NULL); wpalWriteRegister(channelEntry->channelRegister.chDXEDesclRegAddr, validCtrlBlk->linkedDescPhyAddr); } break; } validCtrlBlk = (WLANDXE_DescCtrlBlkType *)validCtrlBlk->nextCtrlBlk; if(validCtrlBlk == channelEntry->headCtrlBlk->nextCtrlBlk) { /* Finished to test till head control blcok, but could not find valid descriptor * from head to tail all descriptors are invalidated * host point of view head descriptor is next TX candidate * So, next descriptor control have to be programmed with head descriptor * check */ if(nextDescReg != channelEntry->headCtrlBlk->linkedDescPhyAddr) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "TX Async with not completed transferred frames, next descriptor must be head"); dxeChannelMonitor("!!! TX Async !!!", channelEntry, NULL); dxeChannelRegisterDump(channelEntry, "!!! TX Async !!!", NULL); wpalWriteRegister(channelEntry->channelRegister.chDXEDesclRegAddr, validCtrlBlk->linkedDescPhyAddr); } break; } } } } /* HW/SW descriptor resync is done. * Next if there are any valid descriptor in chain, Push to HW again */ for(channelLoop = WDTS_CHANNEL_TX_LOW_PRI; channelLoop < WDTS_CHANNEL_RX_LOW_PRI; channelLoop++) { channelEntry = &pDxeCtrlBlk->dxeChannel[channelLoop]; if(channelEntry->tailCtrlBlk == channelEntry->headCtrlBlk) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%11s : No TX Pending frame", channelType[channelEntry->channelType]); /* No Pending frame, Do nothing */ } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : TX Pending frame, process it", channelType[channelEntry->channelType]); validCtrlBlk = channelEntry->tailCtrlBlk; for(descLoop = 0; descLoop < channelEntry->numDesc; descLoop++) { if(validCtrlBlk->linkedDesc->descCtrl.ctrl & WLANDXE_DESC_CTRL_VALID) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%11s : when exit IMPS found valid descriptor", channelType[channelEntry->channelType]); /* Found valid descriptor, kick DXE */ wpalWriteRegister(channelEntry->channelRegister.chDXECtrlRegAddr, channelEntry->extraConfig.chan_mask); break; } validCtrlBlk = (WLANDXE_DescCtrlBlkType *)validCtrlBlk->nextCtrlBlk; if(validCtrlBlk == channelEntry->headCtrlBlk->nextCtrlBlk) { /* Finished to test till head control blcok, but could not find valid descriptor * from head to tail all descriptors are invalidated */ break; } } } } wpalMemoryFree(msgPtr); return; } /*========================================================================== @ Function Name dxeDebugTxDescReSync @ Description Check DXE Tx channel state and correct it in case Tx Data stall is detected by calling %dxeTXReSyncDesc. Also ensure that WCN SS is not power collapsed before calling %dxeTXReSyncDesc @ Parameters void *msgPtr Message pointer to sync with TX thread @ Return NONE ===========================================================================*/ void dxeDebugTxDescReSync ( wpt_msg *msgPtr ) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: Check for DXE TX Async",__func__); /* Make wake up HW */ dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); dxeNotifySmsm(eWLAN_PAL_TRUE, eWLAN_PAL_FALSE); wpalSleep(10); dxeTXReSyncDesc(msgPtr); } /*========================================================================== @ Function Name dxeTXISR @ Description TX interrupt ISR Platform will call this function if INT is happen This function must be registered into platform interrupt module @ Parameters void *hostCtxt DXE host driver control context, pre registerd during interrupt registration @ Return PAL_STATUS_T ===========================================================================*/ static void dxeTXISR ( void *hostCtxt ) { WLANDXE_CtrlBlkType *dxeCtxt = (WLANDXE_CtrlBlkType *)hostCtxt; wpt_status status = eWLAN_PAL_STATUS_SUCCESS; #ifdef FEATURE_R33D wpt_uint32 regValue; #endif /* FEATURE_R33D */ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Return from here if the RIVA is in IMPS, to avoid register access */ if(WLANDXE_POWER_STATE_DOWN == dxeCtxt->hostPowerState) { dxeCtxt->txIntEnable = eWLAN_PAL_FALSE; /* Disable interrupt at here, IMPS or IMPS Pending state should not access RIVA register */ status = wpalDisableInterrupt(DXE_INTERRUPT_TX_COMPLE); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeRXFrameReadyISR Disable RX ready interrupt fail"); return; } dxeCtxt->txIntDisabledByIMPS = eWLAN_PAL_TRUE; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%s Riva is in %d, return from here ", __func__, dxeCtxt->hostPowerState); return; } #ifdef FEATURE_R33D status = wpalReadRegister(WLANDXE_INT_SRC_RAW_ADDRESS, &regValue); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompISR Read INT_SRC_RAW fail"); return; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "INT_SRC_RAW 0x%x", regValue); if(0 == regValue) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "This is not DXE Interrupt, Reject it"); return; } #endif /* FEATURE_R33D */ /* Disable TX Complete Interrupt at here */ status = wpalDisableInterrupt(DXE_INTERRUPT_TX_COMPLE); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompISR Disable TX complete interrupt fail"); return; } dxeCtxt->txIntEnable = eWLAN_PAL_FALSE; if( dxeCtxt->ucTxMsgCnt ) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO, "Avoiding serializing TX Complete event"); return; } dxeCtxt->ucTxMsgCnt = 1; /* Serialize TX complete interrupt upon TX thread */ if(NULL == dxeCtxt->txIsrMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "Invalid message"); HDXE_ASSERT(0); return; } status = wpalPostTxMsg(WDI_GET_PAL_CTX(), dxeCtxt->txIsrMsg); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "dxeTXCompISR interrupt serialize fail status=%d", status); } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return; } /*------------------------------------------------------------------------- * Global Function *-------------------------------------------------------------------------*/ /*========================================================================== @ Function Name WLANDXE_Open @ Description Open host DXE driver, allocate DXE resources Allocate, DXE local control block, DXE descriptor pool, DXE descriptor control block pool @ Parameters pVoid pAdapter : Driver global control block pointer @ Return pVoid DXE local module control block pointer ===========================================================================*/ void *WLANDXE_Open ( void ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; unsigned int idx; WLANDXE_ChannelCBType *currentChannel = NULL; int smsmInitState; #ifdef WLANDXE_TEST_CHANNEL_ENABLE wpt_uint32 sIdx; WLANDXE_ChannelCBType *channel = NULL; WLANDXE_DescCtrlBlkType *crntDescCB = NULL; WLANDXE_DescCtrlBlkType *nextDescCB = NULL; #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* This is temporary allocation */ tempDxeCtrlBlk = (WLANDXE_CtrlBlkType *)wpalMemoryAllocate(sizeof(WLANDXE_CtrlBlkType)); if(NULL == tempDxeCtrlBlk) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Open Control Block Alloc Fail"); return NULL; } wpalMemoryZero(tempDxeCtrlBlk, sizeof(WLANDXE_CtrlBlkType)); status = dxeCommonDefaultConfig(tempDxeCtrlBlk); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Open Common Configuration Fail"); WLANDXE_Close(tempDxeCtrlBlk); return NULL; } for(idx = 0; idx < WDTS_CHANNEL_MAX; idx++) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "WLANDXE_Open Channel %s Open Start", channelType[idx]); currentChannel = &tempDxeCtrlBlk->dxeChannel[idx]; if(idx == WDTS_CHANNEL_TX_LOW_PRI) { currentChannel->channelType = WDTS_CHANNEL_TX_LOW_PRI; } else if(idx == WDTS_CHANNEL_TX_HIGH_PRI) { currentChannel->channelType = WDTS_CHANNEL_TX_HIGH_PRI; } else if(idx == WDTS_CHANNEL_RX_LOW_PRI) { currentChannel->channelType = WDTS_CHANNEL_RX_LOW_PRI; } else if(idx == WDTS_CHANNEL_RX_HIGH_PRI) { currentChannel->channelType = WDTS_CHANNEL_RX_HIGH_PRI; } #ifdef WLANDXE_TEST_CHANNEL_ENABLE else if(idx == WDTS_CHANNEL_H2H_TEST_TX) { currentChannel->channelType = WDTS_CHANNEL_H2H_TEST_TX; } else if(idx == WDTS_CHANNEL_H2H_TEST_RX) { currentChannel->channelType = WDTS_CHANNEL_H2H_TEST_RX; } #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ /* Config individual channels from channel default setup table */ status = dxeChannelDefaultConfig(tempDxeCtrlBlk, currentChannel); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Open Channel Basic Configuration Fail for channel %d", idx); WLANDXE_Close(tempDxeCtrlBlk); return NULL; } /* Allocate DXE Control Block will be used by host DXE driver */ status = dxeCtrlBlkAlloc(tempDxeCtrlBlk, currentChannel); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Open Alloc DXE Control Block Fail for channel %d", idx); WLANDXE_Close(tempDxeCtrlBlk); return NULL; } status = wpalMutexInit(&currentChannel->dxeChannelLock); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "WLANDXE_Open Lock Init Fail for channel %d", idx); WLANDXE_Close(tempDxeCtrlBlk); return NULL; } status = wpalTimerInit(&currentChannel->healthMonitorTimer, dxeHealthMonitorTimeout, (void *)currentChannel); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "WLANDXE_Open Health Monitor timer init fail %d", idx); WLANDXE_Close(tempDxeCtrlBlk); return NULL; } currentChannel->healthMonitorMsg = (wpt_msg *)wpalMemoryAllocate(sizeof(wpt_msg)); if(NULL == currentChannel->healthMonitorMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "WLANDXE_Open Health Monitor MSG Alloc fail %d", idx); WLANDXE_Close(tempDxeCtrlBlk); return NULL; } wpalMemoryZero(currentChannel->healthMonitorMsg, sizeof(wpt_msg)); currentChannel->healthMonitorMsg->callback = dxeTXHealthMonitor; currentChannel->healthMonitorMsg->pContext = (void *)currentChannel; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "WLANDXE_Open Channel %s Open Success", channelType[idx]); } /* Allocate and Init RX READY ISR Serialize Buffer */ tempDxeCtrlBlk->rxIsrMsg = (wpt_msg *)wpalMemoryAllocate(sizeof(wpt_msg)); if(NULL == tempDxeCtrlBlk->rxIsrMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Open Alloc RX ISR Fail"); WLANDXE_Close(tempDxeCtrlBlk); return NULL; } wpalMemoryZero(tempDxeCtrlBlk->rxIsrMsg, sizeof(wpt_msg)); tempDxeCtrlBlk->rxIsrMsg->callback = dxeRXEventHandler; tempDxeCtrlBlk->rxIsrMsg->pContext = (void *)tempDxeCtrlBlk; /* Allocate and Init TX COMP ISR Serialize Buffer */ tempDxeCtrlBlk->txIsrMsg = (wpt_msg *)wpalMemoryAllocate(sizeof(wpt_msg)); if(NULL == tempDxeCtrlBlk->txIsrMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Open Alloc TX ISR Fail"); WLANDXE_Close(tempDxeCtrlBlk); return NULL; } wpalMemoryZero(tempDxeCtrlBlk->txIsrMsg, sizeof(wpt_msg)); tempDxeCtrlBlk->txIsrMsg->callback = dxeTXEventHandler; tempDxeCtrlBlk->txIsrMsg->pContext = (void *)tempDxeCtrlBlk; /* Allocate and Init RX Packet Available Serialize Message Buffer */ tempDxeCtrlBlk->rxPktAvailMsg = (wpt_msg *)wpalMemoryAllocate(sizeof(wpt_msg)); if(NULL == tempDxeCtrlBlk->rxPktAvailMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Open Alloc RX Packet Available Message Fail"); WLANDXE_Close(tempDxeCtrlBlk); return NULL; } wpalMemoryZero(tempDxeCtrlBlk->rxPktAvailMsg, sizeof(wpt_msg)); tempDxeCtrlBlk->rxPktAvailMsg->callback = dxeRXPacketAvailableEventHandler; tempDxeCtrlBlk->rxPktAvailMsg->pContext = (void *)tempDxeCtrlBlk; tempDxeCtrlBlk->freeRXPacket = NULL; tempDxeCtrlBlk->dxeCookie = WLANDXE_CTXT_COOKIE; tempDxeCtrlBlk->rxIntDisabledByIMPS = eWLAN_PAL_FALSE; tempDxeCtrlBlk->txIntDisabledByIMPS = eWLAN_PAL_FALSE; tempDxeCtrlBlk->driverReloadInProcessing = eWLAN_PAL_FALSE; tempDxeCtrlBlk->smsmToggled = eWLAN_PAL_FALSE; /* Initialize SMSM state * Init State is * Clear TX Enable * RING EMPTY STATE */ smsmInitState = wpalNotifySmsm(WPAL_SMSM_WLAN_TX_ENABLE, WPAL_SMSM_WLAN_TX_RINGS_EMPTY); if(0 != smsmInitState) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "SMSM Channel init fail %d", smsmInitState); for(idx = 0; idx < WDTS_CHANNEL_MAX; idx++) { dxeChannelClose(tempDxeCtrlBlk, &tempDxeCtrlBlk->dxeChannel[idx]); } wpalMemoryFree(tempDxeCtrlBlk->rxIsrMsg); wpalMemoryFree(tempDxeCtrlBlk->txIsrMsg); wpalMemoryFree(tempDxeCtrlBlk); return NULL; } #ifdef WLAN_DXE_LOW_RESOURCE_TIMER wpalTimerInit(&tempDxeCtrlBlk->rxResourceAvailableTimer, dxeRXResourceAvailableTimerExpHandler, tempDxeCtrlBlk); #endif wpalTimerInit(&tempDxeCtrlBlk->dxeSSRTimer, dxeSSRTimerExpHandler, tempDxeCtrlBlk); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "WLANDXE_Open Success"); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return (void *)tempDxeCtrlBlk; } /*========================================================================== @ Function Name WLANDXE_ClientRegistration @ Description Make callback functions registration into DXE driver from DXE driver client @ Parameters pVoid pDXEContext : DXE module control block WDTS_RxFrameReadyCbType rxFrameReadyCB : RX Frame ready CB function pointer WDTS_TxCompleteCbType txCompleteCB : TX complete CB function pointer WDTS_LowResourceCbType lowResourceCB : Low DXE resource notification CB function pointer void *userContext : DXE Cliennt control block @ Return wpt_status ===========================================================================*/ wpt_status WLANDXE_ClientRegistration ( void *pDXEContext, WLANDXE_RxFrameReadyCbType rxFrameReadyCB, WLANDXE_TxCompleteCbType txCompleteCB, WLANDXE_LowResourceCbType lowResourceCB, void *userContext ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_CtrlBlkType *dxeCtxt; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity */ if(NULL == pDXEContext) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_ClientRegistration Invalid DXE CB"); return eWLAN_PAL_STATUS_E_INVAL; } if(NULL == rxFrameReadyCB) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_ClientRegistration Invalid RX READY CB"); return eWLAN_PAL_STATUS_E_INVAL; } if(NULL == txCompleteCB) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_ClientRegistration Invalid txCompleteCB"); return eWLAN_PAL_STATUS_E_INVAL; } if(NULL == lowResourceCB) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_ClientRegistration Invalid lowResourceCB"); return eWLAN_PAL_STATUS_E_INVAL; } if(NULL == userContext) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_ClientRegistration Invalid userContext"); return eWLAN_PAL_STATUS_E_INVAL; } dxeCtxt = (WLANDXE_CtrlBlkType *)pDXEContext; /* Assign */ dxeCtxt->rxReadyCB = rxFrameReadyCB; dxeCtxt->txCompCB = txCompleteCB; dxeCtxt->lowResourceCB = lowResourceCB; dxeCtxt->clientCtxt = userContext; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name WLANDXE_Start @ Description Start Host DXE driver Initialize DXE channels and start channel @ Parameters pVoid pDXEContext : DXE module control block @ Return wpt_status ===========================================================================*/ wpt_status WLANDXE_Start ( void *pDXEContext ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 idx; WLANDXE_CtrlBlkType *dxeCtxt = NULL; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity */ if(NULL == pDXEContext) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Start Invalid DXE CB"); return eWLAN_PAL_STATUS_E_INVAL; } dxeCtxt = (WLANDXE_CtrlBlkType *)pDXEContext; /* WLANDXE_Start called means DXE engine already initiates * And DXE HW is reset and init finished * But here to make sure HW is initialized, reset again */ status = dxeEngineCoreStart(dxeCtxt); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Start DXE HW init Fail"); return status; } /* Individual Channel Start */ for(idx = 0; idx < WDTS_CHANNEL_MAX; idx++) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "WLANDXE_Start Channel %s Start", channelType[idx]); /* Allocate DXE descriptor will be shared by Host driver and DXE engine */ /* Make connection between DXE descriptor and DXE control block */ status = dxeDescAllocAndLink(tempDxeCtrlBlk, &dxeCtxt->dxeChannel[idx]); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Start Alloc DXE Descriptor Fail for channel %d", idx); return status; } /* Program each channel register with configuration arguments */ status = dxeChannelInitProgram(dxeCtxt, &dxeCtxt->dxeChannel[idx]); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Start %d Program DMA channel Fail", idx); return status; } /* ??? Trigger to start DMA channel * This must be seperated from ??? */ status = dxeChannelStart(dxeCtxt, &dxeCtxt->dxeChannel[idx]); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Start %d Channel Start Fail", idx); return status; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "WLANDXE_Start Channel %s Start Success", channelType[idx]); } /* Register ISR to OS */ /* Register TX complete interrupt into platform */ status = wpalRegisterInterrupt(DXE_INTERRUPT_TX_COMPLE, dxeTXISR, dxeCtxt); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Start TX comp interrupt registration Fail"); return status; } /* Register RX ready interrupt into platform */ status = wpalRegisterInterrupt(DXE_INTERRUPT_RX_READY, dxeRXISR, dxeCtxt); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Start RX Ready interrupt registration Fail"); return status; } /* Enable system level ISR */ /* Enable RX ready Interrupt at here */ status = wpalEnableInterrupt(DXE_INTERRUPT_RX_READY); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "dxeTXCompleteEventHandler Enable TX complete interrupt fail"); return status; } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name WLANDXE_TXFrame @ Description Trigger frame transmit from host to RIVA @ Parameters pVoid pDXEContext : DXE Control Block wpt_packet pPacket : transmit packet structure WDTS_ChannelType channel : TX channel @ Return wpt_status ===========================================================================*/ wpt_status WLANDXE_TxFrame ( void *pDXEContext, wpt_packet *pPacket, WDTS_ChannelType channel ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_ChannelCBType *currentChannel = NULL; WLANDXE_CtrlBlkType *dxeCtxt = NULL; unsigned int *lowThreshold = NULL; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity */ if(NULL == pDXEContext) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Start Invalid DXE CB"); return eWLAN_PAL_STATUS_E_INVAL; } if(NULL == pPacket) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Start Invalid pPacket"); return eWLAN_PAL_STATUS_E_INVAL; } if((WDTS_CHANNEL_MAX < channel) || (WDTS_CHANNEL_MAX == channel)) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Start Invalid channel"); return eWLAN_PAL_STATUS_E_INVAL; } dxeCtxt = (WLANDXE_CtrlBlkType *)pDXEContext; currentChannel = &dxeCtxt->dxeChannel[channel]; status = wpalMutexAcquire(&currentChannel->dxeChannelLock); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_TxFrame Mutex Acquire fail"); return status; } lowThreshold = currentChannel->channelType == WDTS_CHANNEL_TX_LOW_PRI? &(dxeCtxt->txCompInt.txLowResourceThreshold_LoPriCh): &(dxeCtxt->txCompInt.txLowResourceThreshold_HiPriCh); /* Decide have to activate TX complete event or not */ switch(dxeCtxt->txCompInt.txIntEnable) { /* TX complete interrupt will be activated when low DXE resource */ case WLANDXE_TX_COMP_INT_LR_THRESHOLD: if((currentChannel->numFreeDesc <= *lowThreshold) && (eWLAN_PAL_FALSE == dxeCtxt->txIntEnable)) { dxeCtxt->txIntEnable = eWLAN_PAL_TRUE; dxeCtxt->lowResourceCB(dxeCtxt->clientCtxt, channel, eWLAN_PAL_FALSE); } break; /* TX complete interrupt will be activated n number of frames transferred */ case WLANDXE_TX_COMP_INT_PER_K_FRAMES: if(channel == WDTS_CHANNEL_TX_LOW_PRI) { currentChannel->numFrameBeforeInt++; } break; /* TX complete interrupt will be activated periodically */ case WLANDXE_TX_COMP_INT_TIMER: break; } dxeCtxt->txCompletedFrames++; /* Update DXE descriptor, this is frame based * if a frame consist of N fragments, N Descriptor will be programed */ status = dxeTXPushFrame(currentChannel, pPacket); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_TxFrame TX Push Frame fail"); status = wpalMutexRelease(&currentChannel->dxeChannelLock); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_TxFrame Mutex Release fail"); } return status; } /* If specific channel hit low resource condition, send notification to upper layer */ if(currentChannel->numFreeDesc <= *lowThreshold) { dxeCtxt->lowResourceCB(dxeCtxt->clientCtxt, channel, eWLAN_PAL_FALSE); currentChannel->hitLowResource = eWLAN_PAL_TRUE; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_WARN, "%11s : Low Resource currentChannel->numRsvdDesc %d", channelType[currentChannel->channelType], currentChannel->numRsvdDesc); if (WLANDXE_RIVA_POWER_STATE_BMPS_UNKNOWN == dxeCtxt->rivaPowerState) { dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); dxeNotifySmsm(eWLAN_PAL_TRUE, eWLAN_PAL_FALSE); } wpalTimerStart(&currentChannel->healthMonitorTimer, T_WLANDXE_PERIODIC_HEALTH_M_TIME); } status = wpalMutexRelease(&currentChannel->dxeChannelLock); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_TxFrame Mutex Release fail"); } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name WLANDXE_CompleteTX @ Description Informs DXE that the current series of Tx packets is complete @ Parameters pContext pDXEContext : DXE Control Block ucTxResReq TX resource number required by TL/WDI @ Return wpt_status ===========================================================================*/ wpt_status WLANDXE_CompleteTX ( void* pContext, wpt_uint32 ucTxResReq ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_CtrlBlkType *dxeCtxt = (WLANDXE_CtrlBlkType *)(pContext); WLANDXE_ChannelCBType *channelCb = NULL; wpt_boolean inLowRes; /* Sanity Check */ if( NULL == pContext ) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_CompleteTX invalid param"); return eWLAN_PAL_STATUS_E_INVAL; } channelCb = &dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI]; inLowRes = channelCb->hitLowResource; if(WLANDXE_TX_LOW_RES_THRESHOLD < ucTxResReq) { /* Raise threshold temporarily if necessary */ dxeCtxt->txCompInt.txLowResourceThreshold_LoPriCh = ucTxResReq; if(eWLAN_PAL_FALSE == inLowRes) { /* Put the channel to low resource condition */ dxeCtxt->lowResourceCB(dxeCtxt->clientCtxt, WDTS_CHANNEL_TX_LOW_PRI, eWLAN_PAL_FALSE); inLowRes = channelCb->hitLowResource = eWLAN_PAL_TRUE; wpalTimerStart(&channelCb->healthMonitorTimer, T_WLANDXE_PERIODIC_HEALTH_M_TIME); } } /*Try to reclaim resources*/ dxeTXCompleteProcessing(dxeCtxt); /* In previous WLANTL_GetFrames call, TL didn't fetch a packet because its fragment size is larger than DXE free resource. */ if(0 < ucTxResReq) { /* DXE successfully claimed enough free DXE resouces for next fetch. */ if(WLANDXE_GetFreeTxDataResNumber(dxeCtxt) >= ucTxResReq) { /* DXE has not been in low resource condition. DXE forces to kick off TX tranmit */ if((eWLAN_PAL_FALSE == inLowRes) && (eWLAN_PAL_FALSE == channelCb->hitLowResource)) { dxeCtxt->lowResourceCB(dxeCtxt->clientCtxt, WDTS_CHANNEL_TX_LOW_PRI, eWLAN_PAL_FALSE); dxeCtxt->lowResourceCB(dxeCtxt->clientCtxt, WDTS_CHANNEL_TX_LOW_PRI, eWLAN_PAL_TRUE); channelCb->hitLowResource = eWLAN_PAL_FALSE; } } else { /* DXE doesn't have enough free DXE resources. Put the channel to low resource condition. */ if(eWLAN_PAL_FALSE == channelCb->hitLowResource) { /* Put the channel to low resource condition */ dxeCtxt->lowResourceCB(dxeCtxt->clientCtxt, WDTS_CHANNEL_TX_LOW_PRI, eWLAN_PAL_FALSE); channelCb->hitLowResource = eWLAN_PAL_TRUE; wpalTimerStart(&channelCb->healthMonitorTimer, T_WLANDXE_PERIODIC_HEALTH_M_TIME); } } } return status; } /*========================================================================== @ Function Name WLANDXE_Stop @ Description Stop DXE channels and DXE engine operations Disable all channel interrupt Stop all channel operation @ Parameters pVoid pDXEContext : DXE Control Block @ Return wpt_status ===========================================================================*/ wpt_status WLANDXE_Stop ( void *pDXEContext ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 idx; WLANDXE_CtrlBlkType *dxeCtxt = NULL; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity */ if(NULL == pDXEContext) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Stop Invalid DXE CB"); return eWLAN_PAL_STATUS_E_INVAL; } dxeCtxt = (WLANDXE_CtrlBlkType *)pDXEContext; for(idx = 0; idx < WDTS_CHANNEL_MAX; idx++) { if(VOS_TIMER_STATE_RUNNING == wpalTimerGetCurStatus(&dxeCtxt->dxeChannel[idx].healthMonitorTimer)) { wpalTimerStop(&dxeCtxt->dxeChannel[idx].healthMonitorTimer); } status = dxeChannelStop(dxeCtxt, &dxeCtxt->dxeChannel[idx]); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Stop Channel %d Stop Fail", idx); } } /* During Stop unregister interrupt */ wpalUnRegisterInterrupt(DXE_INTERRUPT_TX_COMPLE); wpalUnRegisterInterrupt(DXE_INTERRUPT_RX_READY); #ifdef WLAN_DXE_LOW_RESOURCE_TIMER if(VOS_TIMER_STATE_STOPPED != wpalTimerGetCurStatus(&dxeCtxt->rxResourceAvailableTimer)) { wpalTimerStop(&dxeCtxt->rxResourceAvailableTimer); } #endif HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name WLANDXE_Close @ Description Close DXE channels Free DXE related resources DXE descriptor free Descriptor control block free Pre allocated RX buffer free @ Parameters pVoid pDXEContext : DXE Control Block @ Return wpt_status ===========================================================================*/ wpt_status WLANDXE_Close ( void *pDXEContext ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; wpt_uint32 idx; WLANDXE_CtrlBlkType *dxeCtxt = NULL; #ifdef WLANDXE_TEST_CHANNEL_ENABLE wpt_uint32 sIdx; WLANDXE_ChannelCBType *channel = NULL; WLANDXE_DescCtrlBlkType *crntDescCB = NULL; WLANDXE_DescCtrlBlkType *nextDescCB = NULL; #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Sanity */ if(NULL == pDXEContext) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_Stop Invalid DXE CB"); return eWLAN_PAL_STATUS_E_INVAL; } dxeCtxt = (WLANDXE_CtrlBlkType *)pDXEContext; #ifdef WLAN_DXE_LOW_RESOURCE_TIMER wpalTimerDelete(&dxeCtxt->rxResourceAvailableTimer); #endif wpalTimerDelete(&dxeCtxt->dxeSSRTimer); for(idx = 0; idx < WDTS_CHANNEL_MAX; idx++) { wpalMutexDelete(&dxeCtxt->dxeChannel[idx].dxeChannelLock); wpalTimerDelete(&dxeCtxt->dxeChannel[idx].healthMonitorTimer); if(NULL != dxeCtxt->dxeChannel[idx].healthMonitorMsg) { wpalMemoryFree(dxeCtxt->dxeChannel[idx].healthMonitorMsg); } dxeChannelClose(dxeCtxt, &dxeCtxt->dxeChannel[idx]); #ifdef WLANDXE_TEST_CHANNEL_ENABLE channel = &dxeCtxt->dxeChannel[idx]; crntDescCB = channel->headCtrlBlk; for(sIdx = 0; sIdx < channel->numDesc; sIdx++) { nextDescCB = (WLANDXE_DescCtrlBlkType *)crntDescCB->nextCtrlBlk; wpalMemoryFree((void *)crntDescCB); crntDescCB = nextDescCB; if(NULL == crntDescCB) { break; } } #endif /* WLANDXE_TEST_CHANNEL_ENABLE */ } if(NULL != dxeCtxt->rxIsrMsg) { wpalMemoryFree(dxeCtxt->rxIsrMsg); } if(NULL != dxeCtxt->txIsrMsg) { wpalMemoryFree(dxeCtxt->txIsrMsg); } if(NULL != dxeCtxt->rxPktAvailMsg) { wpalMemoryFree(dxeCtxt->rxPktAvailMsg); } wpalMemoryFree(pDXEContext); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name WLANDXE_TriggerTX @ Description TBD @ Parameters pVoid pDXEContext : DXE Control Block @ Return wpt_status ===========================================================================*/ wpt_status WLANDXE_TriggerTX ( void *pDXEContext ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* TBD */ HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name dxeTxThreadSetPowerStateEventHandler @ Description If WDI sends set power state req, this event handler will be called in Tx thread context @ Parameters void *msgPtr Event MSG @ Return None ===========================================================================*/ void dxeTxThreadSetPowerStateEventHandler ( wpt_msg *msgPtr ) { wpt_msg *msgContent = (wpt_msg *)msgPtr; WLANDXE_CtrlBlkType *dxeCtxt; wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_PowerStateType reqPowerState; wpt_int8 i; WLANDXE_ChannelCBType *channelEntry; wpt_log_data_stall_channel_type channelLog; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); dxeCtxt = (WLANDXE_CtrlBlkType *)(msgContent->pContext); reqPowerState = (WLANDXE_PowerStateType)msgContent->val; dxeCtxt->setPowerStateCb = (WLANDXE_SetPowerStateCbType)msgContent->ptr; switch(reqPowerState) { case WLANDXE_POWER_STATE_BMPS: if(WLANDXE_RIVA_POWER_STATE_ACTIVE == dxeCtxt->rivaPowerState) { //don't block MC waiting for num_rsvd to become 0 since it may take a while //based on amount of TX and RX activity - during this time any received // management frames will remain un-processed consuming RX buffers dxeCtxt->rivaPowerState = WLANDXE_RIVA_POWER_STATE_BMPS_UNKNOWN; dxeCtxt->hostPowerState = reqPowerState; } else { status = eWLAN_PAL_STATUS_E_INVAL; } break; case WLANDXE_POWER_STATE_IMPS: if(WLANDXE_RIVA_POWER_STATE_ACTIVE == dxeCtxt->rivaPowerState) { for(i = WDTS_CHANNEL_TX_LOW_PRI; i < WDTS_CHANNEL_RX_LOW_PRI; i++) { channelEntry = &dxeCtxt->dxeChannel[i]; if(channelEntry->tailCtrlBlk != channelEntry->headCtrlBlk) { status = eWLAN_PAL_STATUS_E_FAILURE; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%11s : %s :TX Pending frame", channelType[channelEntry->channelType], __func__); dxeChannelMonitor("DXE_IMP_ERR", channelEntry, &channelLog); dxeDescriptorDump(channelEntry, channelEntry->headCtrlBlk->linkedDesc, 0); dxeChannelRegisterDump(channelEntry, "DXE_IMPS_ERR", &channelLog); dxeChannelAllDescDump(channelEntry, channelEntry->channelType, &channelLog); } } if (eWLAN_PAL_STATUS_SUCCESS == status) { dxeCtxt->rivaPowerState = WLANDXE_RIVA_POWER_STATE_IMPS_UNKNOWN; dxeCtxt->hostPowerState = WLANDXE_POWER_STATE_IMPS; } } else { status = eWLAN_PAL_STATUS_E_INVAL; } break; case WLANDXE_POWER_STATE_FULL: if(WLANDXE_RIVA_POWER_STATE_BMPS_UNKNOWN == dxeCtxt->rivaPowerState) { dxeCtxt->rivaPowerState = WLANDXE_RIVA_POWER_STATE_ACTIVE; } dxeCtxt->hostPowerState = reqPowerState; dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); break; case WLANDXE_POWER_STATE_DOWN: WLANDXE_Stop((void *)dxeCtxt); break; default: //assert break; } if(WLANDXE_POWER_STATE_BMPS_PENDING != dxeCtxt->hostPowerState) { dxeCtxt->setPowerStateCb(status, dxeCtxt->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI].descBottomLocPhyAddr); } else { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "%s State of DXE is WLANDXE_POWER_STATE_BMPS_PENDING, so cannot proceed", __func__); } /* Free MSG buffer */ wpalMemoryFree(msgPtr); HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return; } /*========================================================================== @ Function Name dxeRxThreadSetPowerStateEventHandler @ Description If WDI sends set power state req, this event handler will be called in Rx thread context @ Parameters void *msgPtr Event MSG @ Return None ===========================================================================*/ void dxeRxThreadSetPowerStateEventHandler ( wpt_msg *msgPtr ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); /* Now serialise the message through Tx thread also to make sure * no register access when RIVA is in powersave */ /*Use the same message pointer just change the call back function */ msgPtr->callback = dxeTxThreadSetPowerStateEventHandler; status = wpalPostTxMsg(WDI_GET_PAL_CTX(), msgPtr); if ( eWLAN_PAL_STATUS_SUCCESS != status ) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "Tx thread Set power state req serialize fail status=%d", status); } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); } /*========================================================================== @ Function Name WLANDXE_SetPowerState @ Description From Client let DXE knows what is the WLAN HW(RIVA) power state @ Parameters pVoid pDXEContext : DXE Control Block WLANDXE_PowerStateType powerState @ Return wpt_status ===========================================================================*/ wpt_status WLANDXE_SetPowerState ( void *pDXEContext, WDTS_PowerStateType powerState, WDTS_SetPSCbType cBack ) { wpt_status status = eWLAN_PAL_STATUS_SUCCESS; WLANDXE_CtrlBlkType *pDxeCtrlBlk; WLANDXE_PowerStateType hostPowerState; wpt_msg *rxCompMsg; wpt_msg *txDescReSyncMsg; HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); if(NULL == pDXEContext) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "NULL pDXEContext passed by caller"); return eWLAN_PAL_STATUS_E_FAILURE; } pDxeCtrlBlk = (WLANDXE_CtrlBlkType *)pDXEContext; switch(powerState) { case WDTS_POWER_STATE_FULL: if(WLANDXE_POWER_STATE_IMPS == pDxeCtrlBlk->hostPowerState) { txDescReSyncMsg = (wpt_msg *)wpalMemoryAllocate(sizeof(wpt_msg)); if(NULL == txDescReSyncMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_SetPowerState, TX Resync MSG MEM alloc Fail"); } else { txDescReSyncMsg->callback = dxeTXReSyncDesc; txDescReSyncMsg->pContext = pDxeCtrlBlk; status = wpalPostTxMsg(WDI_GET_PAL_CTX(), txDescReSyncMsg); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_SetPowerState, Post TX re-sync MSG fail"); } } } hostPowerState = WLANDXE_POWER_STATE_FULL; break; case WDTS_POWER_STATE_BMPS: pDxeCtrlBlk->hostPowerState = WLANDXE_POWER_STATE_BMPS; hostPowerState = WLANDXE_POWER_STATE_BMPS; break; case WDTS_POWER_STATE_IMPS: hostPowerState = WLANDXE_POWER_STATE_IMPS; break; case WDTS_POWER_STATE_DOWN: pDxeCtrlBlk->hostPowerState = WLANDXE_POWER_STATE_DOWN; hostPowerState = WLANDXE_POWER_STATE_DOWN; break; default: hostPowerState = WLANDXE_POWER_STATE_MAX; } // A callback i.e. ACK back is needed only when we want to enable BMPS // and the data/management path is active because we want to ensure // DXE registers are not accessed when RIVA may be power-collapsed. So // we need a callback in enter_bmps_req (the request to RIVA is sent // only after ACK back from TX thread). A callback is not needed in // finish_scan_req during BMPS since data-path is resumed only in // finish_scan_rsp and no management frames are sent in between. No // callback is needed when going from BMPS enabled to BMPS suspended/ // disabled when it is known that RIVA is awake and cannot enter power // collapse autonomously so no callback is needed in exit_bmps_rsp or // init_scan_rsp if ( cBack ) { //serialize through Rx thread rxCompMsg = (wpt_msg *)wpalMemoryAllocate(sizeof(wpt_msg)); if(NULL == rxCompMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_SetPowerState, MSG MEM alloc Fail"); return eWLAN_PAL_STATUS_E_RESOURCES; } /* Event type, where it must be defined???? */ /* THIS MUST BE CLEARED ASAP txCompMsg->type = TX_COMPLETE; */ rxCompMsg->callback = dxeRxThreadSetPowerStateEventHandler; rxCompMsg->pContext = pDxeCtrlBlk; rxCompMsg->val = hostPowerState; rxCompMsg->ptr = cBack; status = wpalPostRxMsg(WDI_GET_PAL_CTX(), rxCompMsg); if ( eWLAN_PAL_STATUS_SUCCESS != status ) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "Rx thread Set power state req serialize fail status=%d", status); } } else { if ( WLANDXE_POWER_STATE_FULL == hostPowerState ) { if( WLANDXE_POWER_STATE_BMPS == pDxeCtrlBlk->hostPowerState ) { dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); } else if( WLANDXE_POWER_STATE_IMPS == pDxeCtrlBlk->hostPowerState ) { /* Requested Full power from exit IMPS, reenable the interrupts*/ if(eWLAN_PAL_TRUE == pDxeCtrlBlk->rxIntDisabledByIMPS) { pDxeCtrlBlk->rxIntDisabledByIMPS = eWLAN_PAL_FALSE; /* Enable RX interrupt at here, if new PS is not IMPS */ status = wpalEnableInterrupt(DXE_INTERRUPT_RX_READY); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s Enable RX ready interrupt fail", __func__); return status; } } if(eWLAN_PAL_TRUE == pDxeCtrlBlk->txIntDisabledByIMPS) { pDxeCtrlBlk->txIntDisabledByIMPS = eWLAN_PAL_FALSE; pDxeCtrlBlk->txIntEnable = eWLAN_PAL_TRUE; /* Enable RX interrupt at here, if new PS is not IMPS */ status = wpalEnableInterrupt(DXE_INTERRUPT_TX_COMPLE); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s Enable TX comp interrupt fail", __func__); return status; } } } pDxeCtrlBlk->hostPowerState = hostPowerState; pDxeCtrlBlk->rivaPowerState = WLANDXE_RIVA_POWER_STATE_ACTIVE; } else if ( hostPowerState == WLANDXE_POWER_STATE_BMPS ) { pDxeCtrlBlk->hostPowerState = hostPowerState; pDxeCtrlBlk->rivaPowerState = WLANDXE_RIVA_POWER_STATE_BMPS_UNKNOWN; } else if ( hostPowerState == WLANDXE_POWER_STATE_IMPS ) { pDxeCtrlBlk->hostPowerState = WLANDXE_POWER_STATE_IMPS; } else { HDXE_ASSERT(0); } } HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Exit", __func__); return status; } /*========================================================================== @ Function Name WLANDXE_GetFreeTxDataResNumber @ Description Returns free descriptor numbers for TX data channel (TX high priority) @ Parameters pVoid pDXEContext : DXE Control Block @ Return wpt_uint32 Free descriptor number of TX high pri ch ===========================================================================*/ wpt_uint32 WLANDXE_GetFreeTxDataResNumber ( void *pDXEContext ) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_INFO_LOW, "%s Enter", __func__); if(NULL == pDXEContext) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "NULL parameter passed by caller"); return (0); } return ((WLANDXE_CtrlBlkType *)pDXEContext)->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI].numFreeDesc; } /*========================================================================== @ Function Name WLANDXE_ChannelDebug @ Description Display DXE Channel debugging information User may request to display DXE channel snapshot Or if host driver detects any abnormal stcuk may display @ Parameters displaySnapshot : Display DXE snapshot option debugFlags : Enable stall detect features defined by WPAL_DeviceDebugFlags These features may effect data performance. @ Return NONE ===========================================================================*/ void WLANDXE_ChannelDebug ( wpt_boolean displaySnapshot, wpt_uint8 debugFlags ) { wpt_msg *channelDebugMsg; wpt_msg *txDescReSyncMsg ; wpt_uint32 regValue, regValueLocal = 0; wpt_status status = eWLAN_PAL_STATUS_SUCCESS; /* Debug Type 1, Display current snapshot */ if(displaySnapshot) { /* Whatever RIVA power condition try to wakeup RIVA through SMSM * This will not simply wakeup RIVA * Just incase TX not wanted stuck, Trigger TX again */ dxeNotifySmsm(eWLAN_PAL_FALSE, eWLAN_PAL_TRUE); dxeNotifySmsm(eWLAN_PAL_TRUE, eWLAN_PAL_FALSE); /* Get free BD count */ wpalSleep(10); wpalReadRegister(WLANDXE_BMU_AVAILABLE_BD_PDU, &regValue); #ifdef WCN_PRONTO wpalReadRegister(WLANDXE_BMU_AVAILABLE_BD_PDU_LOCAL, &regValueLocal); #endif HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_FATAL, "===== DXE Dump Start HPS %d, FWS %d, TX PFC %d, ABD %d, ABD LOCAL %d =====", tempDxeCtrlBlk->hostPowerState, tempDxeCtrlBlk->rivaPowerState, tempDxeCtrlBlk->txCompletedFrames, regValue, regValueLocal); wpalPacketStallUpdateInfo((wpt_uint32 *)&tempDxeCtrlBlk->rivaPowerState, &regValue, NULL, 0); channelDebugMsg = (wpt_msg *)wpalMemoryAllocate(sizeof(wpt_msg)); if(NULL == channelDebugMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "WLANDXE_ChannelDebug, MSG MEM alloc Fail"); return ; } channelDebugMsg->callback = dxeRxThreadChannelDebugHandler; status = wpalPostRxMsg(WDI_GET_PAL_CTX(), channelDebugMsg); if ( eWLAN_PAL_STATUS_SUCCESS != status ) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "Tx thread Set power state req serialize fail status=%d", status); } } if(debugFlags & WPAL_DEBUG_TX_DESC_RESYNC) { txDescReSyncMsg = (wpt_msg *)wpalMemoryAllocate(sizeof(wpt_msg)); if(NULL == txDescReSyncMsg) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: Resync MSG MEM alloc Fail",__func__); } else { txDescReSyncMsg->callback = dxeDebugTxDescReSync; txDescReSyncMsg->pContext = tempDxeCtrlBlk; status = wpalPostTxMsg(WDI_GET_PAL_CTX(), txDescReSyncMsg); if(eWLAN_PAL_STATUS_SUCCESS != status) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: Post TX re-sync MSG fail",__func__); } } } if(debugFlags & WPAL_DEBUG_START_HEALTH_TIMER) { HDXE_MSG(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "DXE TX Stall detect"); /* Start Stall detect timer and detect stall */ wpalTimerStart(&tempDxeCtrlBlk->dxeChannel[WDTS_CHANNEL_TX_LOW_PRI].healthMonitorTimer, T_WLANDXE_PERIODIC_HEALTH_M_TIME); } return; }
gpl-2.0
omegamoon/rockchip-rk3188-mk908
sound/core/pcm_native.c
80
96990
/* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/mm.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/pm_qos_params.h> #include <linux/uio.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/timer.h> #include <sound/minors.h> #include <asm/io.h> #if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) #include <dma-coherence.h> #endif /* * Compatibility */ struct snd_pcm_hw_params_old { unsigned int flags; unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT - SNDRV_PCM_HW_PARAM_ACCESS + 1]; struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME - SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1]; unsigned int rmask; unsigned int cmask; unsigned int info; unsigned int msbits; unsigned int rate_num; unsigned int rate_den; snd_pcm_uframes_t fifo_size; unsigned char reserved[64]; }; #ifdef CONFIG_SND_SUPPORT_OLD_API #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old) #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old) static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams); static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams); #endif static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream); /* * */ DEFINE_RWLOCK(snd_pcm_link_rwlock); EXPORT_SYMBOL(snd_pcm_link_rwlock); static DECLARE_RWSEM(snd_pcm_link_rwsem); static inline mm_segment_t snd_enter_user(void) { mm_segment_t fs = get_fs(); set_fs(get_ds()); return fs; } static inline void snd_leave_user(mm_segment_t fs) { set_fs(fs); } int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info) { struct snd_pcm_runtime *runtime; struct snd_pcm *pcm = substream->pcm; struct snd_pcm_str *pstr = substream->pstr; memset(info, 0, sizeof(*info)); info->card = pcm->card->number; info->device = pcm->device; info->stream = substream->stream; info->subdevice = substream->number; strlcpy(info->id, pcm->id, sizeof(info->id)); strlcpy(info->name, pcm->name, sizeof(info->name)); info->dev_class = pcm->dev_class; info->dev_subclass = pcm->dev_subclass; info->subdevices_count = pstr->substream_count; info->subdevices_avail = pstr->substream_count - pstr->substream_opened; strlcpy(info->subname, substream->name, sizeof(info->subname)); runtime = substream->runtime; /* AB: FIXME!!! This is definitely nonsense */ if (runtime) { info->sync = runtime->sync; substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_INFO, info); } return 0; } int snd_pcm_info_user(struct snd_pcm_substream *substream, struct snd_pcm_info __user * _info) { struct snd_pcm_info *info; int err; info = kmalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; err = snd_pcm_info(substream, info); if (err >= 0) { if (copy_to_user(_info, info, sizeof(*info))) err = -EFAULT; } kfree(info); return err; } #undef RULES_DEBUG #ifdef RULES_DEBUG #define HW_PARAM(v) [SNDRV_PCM_HW_PARAM_##v] = #v static const char * const snd_pcm_hw_param_names[] = { HW_PARAM(ACCESS), HW_PARAM(FORMAT), HW_PARAM(SUBFORMAT), HW_PARAM(SAMPLE_BITS), HW_PARAM(FRAME_BITS), HW_PARAM(CHANNELS), HW_PARAM(RATE), HW_PARAM(PERIOD_TIME), HW_PARAM(PERIOD_SIZE), HW_PARAM(PERIOD_BYTES), HW_PARAM(PERIODS), HW_PARAM(BUFFER_TIME), HW_PARAM(BUFFER_SIZE), HW_PARAM(BUFFER_BYTES), HW_PARAM(TICK_TIME), }; #endif int snd_pcm_hw_refine(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { unsigned int k; struct snd_pcm_hardware *hw; struct snd_interval *i = NULL; struct snd_mask *m = NULL; struct snd_pcm_hw_constraints *constrs = &substream->runtime->hw_constraints; unsigned int rstamps[constrs->rules_num]; unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1]; unsigned int stamp = 2; int changed, again; params->info = 0; params->fifo_size = 0; if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS)) params->msbits = 0; if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) { params->rate_num = 0; params->rate_den = 0; } for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) { m = hw_param_mask(params, k); if (snd_mask_empty(m)) return -EINVAL; if (!(params->rmask & (1 << k))) continue; #ifdef RULES_DEBUG printk(KERN_DEBUG "%s = ", snd_pcm_hw_param_names[k]); printk("%04x%04x%04x%04x -> ", m->bits[3], m->bits[2], m->bits[1], m->bits[0]); #endif changed = snd_mask_refine(m, constrs_mask(constrs, k)); #ifdef RULES_DEBUG printk("%04x%04x%04x%04x\n", m->bits[3], m->bits[2], m->bits[1], m->bits[0]); #endif if (changed) params->cmask |= 1 << k; if (changed < 0) return changed; } for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) { i = hw_param_interval(params, k); if (snd_interval_empty(i)) return -EINVAL; if (!(params->rmask & (1 << k))) continue; #ifdef RULES_DEBUG printk(KERN_DEBUG "%s = ", snd_pcm_hw_param_names[k]); if (i->empty) printk("empty"); else printk("%c%u %u%c", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); printk(" -> "); #endif changed = snd_interval_refine(i, constrs_interval(constrs, k)); #ifdef RULES_DEBUG if (i->empty) printk("empty\n"); else printk("%c%u %u%c\n", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); #endif if (changed) params->cmask |= 1 << k; if (changed < 0) return changed; } for (k = 0; k < constrs->rules_num; k++) rstamps[k] = 0; for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0; do { again = 0; for (k = 0; k < constrs->rules_num; k++) { struct snd_pcm_hw_rule *r = &constrs->rules[k]; unsigned int d; int doit = 0; if (r->cond && !(r->cond & params->flags)) continue; for (d = 0; r->deps[d] >= 0; d++) { if (vstamps[r->deps[d]] > rstamps[k]) { doit = 1; break; } } if (!doit) continue; #ifdef RULES_DEBUG printk(KERN_DEBUG "Rule %d [%p]: ", k, r->func); if (r->var >= 0) { printk("%s = ", snd_pcm_hw_param_names[r->var]); if (hw_is_mask(r->var)) { m = hw_param_mask(params, r->var); printk("%x", *m->bits); } else { i = hw_param_interval(params, r->var); if (i->empty) printk("empty"); else printk("%c%u %u%c", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); } } #endif changed = r->func(params, r); #ifdef RULES_DEBUG if (r->var >= 0) { printk(" -> "); if (hw_is_mask(r->var)) printk("%x", *m->bits); else { if (i->empty) printk("empty"); else printk("%c%u %u%c", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); } } printk("\n"); #endif rstamps[k] = stamp; if (changed && r->var >= 0) { params->cmask |= (1 << r->var); vstamps[r->var] = stamp; again = 1; } if (changed < 0) return changed; stamp++; } } while (again); if (!params->msbits) { i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); if (snd_interval_single(i)) params->msbits = snd_interval_value(i); } if (!params->rate_den) { i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); if (snd_interval_single(i)) { params->rate_num = snd_interval_value(i); params->rate_den = 1; } } hw = &substream->runtime->hw; if (!params->info) params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES; if (!params->fifo_size) { m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); if (snd_mask_min(m) == snd_mask_max(m) && snd_interval_min(i) == snd_interval_max(i)) { changed = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_FIFO_SIZE, params); if (changed < 0) return changed; } } params->rmask = 0; return 0; } EXPORT_SYMBOL(snd_pcm_hw_refine); static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params __user * _params) { struct snd_pcm_hw_params *params; int err; params = memdup_user(_params, sizeof(*params)); if (IS_ERR(params)) return PTR_ERR(params); err = snd_pcm_hw_refine(substream, params); if (copy_to_user(_params, params, sizeof(*params))) { if (!err) err = -EFAULT; } kfree(params); return err; } static int period_to_usecs(struct snd_pcm_runtime *runtime) { int usecs; if (! runtime->rate) return -1; /* invalid */ /* take 75% of period time as the deadline */ usecs = (750000 / runtime->rate) * runtime->period_size; usecs += ((750000 % runtime->rate) * runtime->period_size) / runtime->rate; return usecs; } static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state) { snd_pcm_stream_lock_irq(substream); if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED) substream->runtime->status->state = state; snd_pcm_stream_unlock_irq(substream); } static int snd_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime; int err, usecs; unsigned int bits; snd_pcm_uframes_t frames; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: break; default: snd_pcm_stream_unlock_irq(substream); return -EBADFD; } snd_pcm_stream_unlock_irq(substream); #if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE) if (!substream->oss.oss) #endif if (atomic_read(&substream->mmap_count)) return -EBADFD; params->rmask = ~0U; err = snd_pcm_hw_refine(substream, params); if (err < 0) goto _error; err = snd_pcm_hw_params_choose(substream, params); if (err < 0) goto _error; if (substream->ops->hw_params != NULL) { err = substream->ops->hw_params(substream, params); if (err < 0) goto _error; } runtime->access = params_access(params); runtime->format = params_format(params); runtime->subformat = params_subformat(params); runtime->channels = params_channels(params); runtime->rate = params_rate(params); runtime->period_size = params_period_size(params); runtime->periods = params_periods(params); runtime->buffer_size = params_buffer_size(params); runtime->info = params->info; runtime->rate_num = params->rate_num; runtime->rate_den = params->rate_den; runtime->no_period_wakeup = (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) && (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP); bits = snd_pcm_format_physical_width(runtime->format); runtime->sample_bits = bits; bits *= runtime->channels; runtime->frame_bits = bits; frames = 1; while (bits % 8 != 0) { bits *= 2; frames *= 2; } runtime->byte_align = bits / 8; runtime->min_align = frames; /* Default sw params */ runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE; runtime->period_step = 1; runtime->control->avail_min = runtime->period_size; runtime->start_threshold = 1; runtime->stop_threshold = runtime->buffer_size; runtime->silence_threshold = 0; runtime->silence_size = 0; runtime->boundary = runtime->buffer_size; while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size) runtime->boundary *= 2; snd_pcm_timer_resolution_change(substream); snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP); if (pm_qos_request_active(&substream->latency_pm_qos_req)) pm_qos_remove_request(&substream->latency_pm_qos_req); if ((usecs = period_to_usecs(runtime)) >= 0) pm_qos_add_request(&substream->latency_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, usecs); return 0; _error: /* hardware might be unusable from this time, so we force application to retry to set the correct hardware parameter settings */ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); if (substream->ops->hw_free != NULL) substream->ops->hw_free(substream); return err; } static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params __user * _params) { struct snd_pcm_hw_params *params; int err; params = memdup_user(_params, sizeof(*params)); if (IS_ERR(params)) return PTR_ERR(params); err = snd_pcm_hw_params(substream, params); if (copy_to_user(_params, params, sizeof(*params))) { if (!err) err = -EFAULT; } kfree(params); return err; } static int snd_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; int result = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: break; default: snd_pcm_stream_unlock_irq(substream); return -EBADFD; } snd_pcm_stream_unlock_irq(substream); if (atomic_read(&substream->mmap_count)) return -EBADFD; if (substream->ops->hw_free) result = substream->ops->hw_free(substream); snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); pm_qos_remove_request(&substream->latency_pm_qos_req); return result; } static int snd_pcm_sw_params(struct snd_pcm_substream *substream, struct snd_pcm_sw_params *params) { struct snd_pcm_runtime *runtime; int err; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; snd_pcm_stream_lock_irq(substream); if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { snd_pcm_stream_unlock_irq(substream); return -EBADFD; } snd_pcm_stream_unlock_irq(substream); if (params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST) return -EINVAL; if (params->avail_min == 0) return -EINVAL; if (params->silence_size >= runtime->boundary) { if (params->silence_threshold != 0) return -EINVAL; } else { if (params->silence_size > params->silence_threshold) return -EINVAL; if (params->silence_threshold > runtime->buffer_size) return -EINVAL; } err = 0; snd_pcm_stream_lock_irq(substream); runtime->tstamp_mode = params->tstamp_mode; runtime->period_step = params->period_step; runtime->control->avail_min = params->avail_min; runtime->start_threshold = params->start_threshold; runtime->stop_threshold = params->stop_threshold; runtime->silence_threshold = params->silence_threshold; runtime->silence_size = params->silence_size; params->boundary = runtime->boundary; if (snd_pcm_running(substream)) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); err = snd_pcm_update_state(substream, runtime); } snd_pcm_stream_unlock_irq(substream); return err; } static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream, struct snd_pcm_sw_params __user * _params) { struct snd_pcm_sw_params params; int err; if (copy_from_user(&params, _params, sizeof(params))) return -EFAULT; err = snd_pcm_sw_params(substream, &params); if (copy_to_user(_params, &params, sizeof(params))) return -EFAULT; return err; } int snd_pcm_status(struct snd_pcm_substream *substream, struct snd_pcm_status *status) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_stream_lock_irq(substream); status->state = runtime->status->state; status->suspended_state = runtime->status->suspended_state; if (status->state == SNDRV_PCM_STATE_OPEN) goto _end; status->trigger_tstamp = runtime->trigger_tstamp; if (snd_pcm_running(substream)) { snd_pcm_update_hw_ptr(substream); if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { status->tstamp = runtime->status->tstamp; goto _tstamp_end; } } snd_pcm_gettime(runtime, &status->tstamp); _tstamp_end: status->appl_ptr = runtime->control->appl_ptr; status->hw_ptr = runtime->status->hw_ptr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { status->avail = snd_pcm_playback_avail(runtime); if (runtime->status->state == SNDRV_PCM_STATE_RUNNING || runtime->status->state == SNDRV_PCM_STATE_DRAINING) { status->delay = runtime->buffer_size - status->avail; status->delay += runtime->delay; } else status->delay = 0; } else { status->avail = snd_pcm_capture_avail(runtime); if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) status->delay = status->avail + runtime->delay; else status->delay = 0; } status->avail_max = runtime->avail_max; status->overrange = runtime->overrange; runtime->avail_max = 0; runtime->overrange = 0; _end: snd_pcm_stream_unlock_irq(substream); return 0; } static int snd_pcm_status_user(struct snd_pcm_substream *substream, struct snd_pcm_status __user * _status) { struct snd_pcm_status status; int res; memset(&status, 0, sizeof(status)); res = snd_pcm_status(substream, &status); if (res < 0) return res; if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; } static int snd_pcm_channel_info(struct snd_pcm_substream *substream, struct snd_pcm_channel_info * info) { struct snd_pcm_runtime *runtime; unsigned int channel; channel = info->channel; runtime = substream->runtime; snd_pcm_stream_lock_irq(substream); if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { snd_pcm_stream_unlock_irq(substream); return -EBADFD; } snd_pcm_stream_unlock_irq(substream); if (channel >= runtime->channels) return -EINVAL; memset(info, 0, sizeof(*info)); info->channel = channel; return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info); } static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream, struct snd_pcm_channel_info __user * _info) { struct snd_pcm_channel_info info; int res; if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; res = snd_pcm_channel_info(substream, &info); if (res < 0) return res; if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return 0; } static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->trigger_master == NULL) return; if (runtime->trigger_master == substream) { snd_pcm_gettime(runtime, &runtime->trigger_tstamp); } else { snd_pcm_trigger_tstamp(runtime->trigger_master); runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp; } runtime->trigger_master = NULL; } struct action_ops { int (*pre_action)(struct snd_pcm_substream *substream, int state); int (*do_action)(struct snd_pcm_substream *substream, int state); void (*undo_action)(struct snd_pcm_substream *substream, int state); void (*post_action)(struct snd_pcm_substream *substream, int state); }; /* * this functions is core for handling of linked stream * Note: the stream state might be changed also on failure * Note2: call with calling stream lock + link lock */ static int snd_pcm_action_group(struct action_ops *ops, struct snd_pcm_substream *substream, int state, int do_lock) { struct snd_pcm_substream *s = NULL; struct snd_pcm_substream *s1; int res = 0; snd_pcm_group_for_each_entry(s, substream) { if (do_lock && s != substream) spin_lock_nested(&s->self_group.lock, SINGLE_DEPTH_NESTING); res = ops->pre_action(s, state); if (res < 0) goto _unlock; } snd_pcm_group_for_each_entry(s, substream) { res = ops->do_action(s, state); if (res < 0) { if (ops->undo_action) { snd_pcm_group_for_each_entry(s1, substream) { if (s1 == s) /* failed stream */ break; ops->undo_action(s1, state); } } s = NULL; /* unlock all */ goto _unlock; } } snd_pcm_group_for_each_entry(s, substream) { ops->post_action(s, state); } _unlock: if (do_lock) { /* unlock streams */ snd_pcm_group_for_each_entry(s1, substream) { if (s1 != substream) spin_unlock(&s1->self_group.lock); if (s1 == s) /* end */ break; } } return res; } /* * Note: call with stream lock */ static int snd_pcm_action_single(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; res = ops->pre_action(substream, state); if (res < 0) return res; res = ops->do_action(substream, state); if (res == 0) ops->post_action(substream, state); else if (ops->undo_action) ops->undo_action(substream, state); return res; } /* * Note: call with stream lock */ static int snd_pcm_action(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; if (snd_pcm_stream_linked(substream)) { if (!spin_trylock(&substream->group->lock)) { spin_unlock(&substream->self_group.lock); spin_lock(&substream->group->lock); spin_lock(&substream->self_group.lock); } res = snd_pcm_action_group(ops, substream, state, 1); spin_unlock(&substream->group->lock); } else { res = snd_pcm_action_single(ops, substream, state); } return res; } /* * Note: don't use any locks before */ static int snd_pcm_action_lock_irq(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; read_lock_irq(&snd_pcm_link_rwlock); if (snd_pcm_stream_linked(substream)) { spin_lock(&substream->group->lock); spin_lock(&substream->self_group.lock); res = snd_pcm_action_group(ops, substream, state, 1); spin_unlock(&substream->self_group.lock); spin_unlock(&substream->group->lock); } else { spin_lock(&substream->self_group.lock); res = snd_pcm_action_single(ops, substream, state); spin_unlock(&substream->self_group.lock); } read_unlock_irq(&snd_pcm_link_rwlock); return res; } /* */ static int snd_pcm_action_nonatomic(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; down_read(&snd_pcm_link_rwsem); if (snd_pcm_stream_linked(substream)) res = snd_pcm_action_group(ops, substream, state, 0); else res = snd_pcm_action_single(ops, substream, state); up_read(&snd_pcm_link_rwsem); return res; } /* * start callbacks */ static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state != SNDRV_PCM_STATE_PREPARED) return -EBADFD; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !snd_pcm_playback_data(substream)) return -EPIPE; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master != substream) return 0; return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START); } static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master == substream) substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); } static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); runtime->hw_ptr_jiffies = jiffies; runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) / runtime->rate; runtime->status->state = state; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSTART, &runtime->trigger_tstamp); } static struct action_ops snd_pcm_action_start = { .pre_action = snd_pcm_pre_start, .do_action = snd_pcm_do_start, .undo_action = snd_pcm_undo_start, .post_action = snd_pcm_post_start }; /** * snd_pcm_start - start all linked streams * @substream: the PCM substream instance */ int snd_pcm_start(struct snd_pcm_substream *substream) { return snd_pcm_action(&snd_pcm_action_start, substream, SNDRV_PCM_STATE_RUNNING); } /* * stop callbacks */ static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master == substream && snd_pcm_running(substream)) substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); return 0; /* unconditonally stop all substreams */ } static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state != state) { snd_pcm_trigger_tstamp(substream); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSTOP, &runtime->trigger_tstamp); runtime->status->state = state; } wake_up(&runtime->sleep); wake_up(&runtime->tsleep); } static struct action_ops snd_pcm_action_stop = { .pre_action = snd_pcm_pre_stop, .do_action = snd_pcm_do_stop, .post_action = snd_pcm_post_stop }; /** * snd_pcm_stop - try to stop all running streams in the substream group * @substream: the PCM substream instance * @state: PCM state after stopping the stream * * The state of each stream is then changed to the given state unconditionally. */ int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state) { return snd_pcm_action(&snd_pcm_action_stop, substream, state); } EXPORT_SYMBOL(snd_pcm_stop); /** * snd_pcm_drain_done - stop the DMA only when the given stream is playback * @substream: the PCM substream * * After stopping, the state is changed to SETUP. * Unlike snd_pcm_stop(), this affects only the given stream. */ int snd_pcm_drain_done(struct snd_pcm_substream *substream) { return snd_pcm_action_single(&snd_pcm_action_stop, substream, SNDRV_PCM_STATE_SETUP); } /* * pause callbacks */ static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push) { struct snd_pcm_runtime *runtime = substream->runtime; if (!(runtime->info & SNDRV_PCM_INFO_PAUSE)) return -ENOSYS; if (push) { if (runtime->status->state != SNDRV_PCM_STATE_RUNNING) return -EBADFD; } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED) return -EBADFD; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) { if (substream->runtime->trigger_master != substream) return 0; /* some drivers might use hw_ptr to recover from the pause - update the hw_ptr now */ if (push) snd_pcm_update_hw_ptr(substream); /* The jiffies check in snd_pcm_update_hw_ptr*() is done by * a delta between the current jiffies, this gives a large enough * delta, effectively to skip the check once. */ substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000; return substream->ops->trigger(substream, push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH : SNDRV_PCM_TRIGGER_PAUSE_RELEASE); } static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push) { if (substream->runtime->trigger_master == substream) substream->ops->trigger(substream, push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE : SNDRV_PCM_TRIGGER_PAUSE_PUSH); } static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); if (push) { runtime->status->state = SNDRV_PCM_STATE_PAUSED; if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MPAUSE, &runtime->trigger_tstamp); wake_up(&runtime->sleep); wake_up(&runtime->tsleep); } else { runtime->status->state = SNDRV_PCM_STATE_RUNNING; if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MCONTINUE, &runtime->trigger_tstamp); } } static struct action_ops snd_pcm_action_pause = { .pre_action = snd_pcm_pre_pause, .do_action = snd_pcm_do_pause, .undo_action = snd_pcm_undo_pause, .post_action = snd_pcm_post_pause }; /* * Push/release the pause for all linked streams. */ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push) { return snd_pcm_action(&snd_pcm_action_pause, substream, push); } /* * set volume. * add by qiuen */ static int snd_pcm_vol(struct snd_pcm_substream *substream, int push) { substream->number = push; return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_VOLUME); } #ifdef CONFIG_PM /* suspend */ static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) return -EBUSY; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->trigger_master != substream) return 0; if (! snd_pcm_running(substream)) return 0; substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); return 0; /* suspend unconditionally */ } static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSUSPEND, &runtime->trigger_tstamp); runtime->status->suspended_state = runtime->status->state; runtime->status->state = SNDRV_PCM_STATE_SUSPENDED; wake_up(&runtime->sleep); wake_up(&runtime->tsleep); } static struct action_ops snd_pcm_action_suspend = { .pre_action = snd_pcm_pre_suspend, .do_action = snd_pcm_do_suspend, .post_action = snd_pcm_post_suspend }; /** * snd_pcm_suspend - trigger SUSPEND to all linked streams * @substream: the PCM substream * * After this call, all streams are changed to SUSPENDED state. */ int snd_pcm_suspend(struct snd_pcm_substream *substream) { int err; unsigned long flags; if (! substream) return 0; snd_pcm_stream_lock_irqsave(substream, flags); err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0); snd_pcm_stream_unlock_irqrestore(substream, flags); return err; } EXPORT_SYMBOL(snd_pcm_suspend); /** * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm * @pcm: the PCM instance * * After this call, all streams are changed to SUSPENDED state. */ int snd_pcm_suspend_all(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; int stream, err = 0; if (! pcm) return 0; for (stream = 0; stream < 2; stream++) { for (substream = pcm->streams[stream].substream; substream; substream = substream->next) { /* FIXME: the open/close code should lock this as well */ if (substream->runtime == NULL) continue; err = snd_pcm_suspend(substream); if (err < 0 && err != -EBUSY) return err; } } return 0; } EXPORT_SYMBOL(snd_pcm_suspend_all); /* resume */ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (!(runtime->info & SNDRV_PCM_INFO_RESUME)) return -ENOSYS; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->trigger_master != substream) return 0; /* DMA not running previously? */ if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING && (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING || substream->stream != SNDRV_PCM_STREAM_PLAYBACK)) return 0; return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME); } static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master == substream && snd_pcm_running(substream)) substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); } static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MRESUME, &runtime->trigger_tstamp); runtime->status->state = runtime->status->suspended_state; } static struct action_ops snd_pcm_action_resume = { .pre_action = snd_pcm_pre_resume, .do_action = snd_pcm_do_resume, .undo_action = snd_pcm_undo_resume, .post_action = snd_pcm_post_resume }; static int snd_pcm_resume(struct snd_pcm_substream *substream) { struct snd_card *card = substream->pcm->card; int res; snd_power_lock(card); if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0) res = snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0); snd_power_unlock(card); return res; } #else static int snd_pcm_resume(struct snd_pcm_substream *substream) { return -ENOSYS; } #endif /* CONFIG_PM */ /* * xrun ioctl * * Change the RUNNING stream(s) to XRUN state. */ static int snd_pcm_xrun(struct snd_pcm_substream *substream) { struct snd_card *card = substream->pcm->card; struct snd_pcm_runtime *runtime = substream->runtime; int result; snd_power_lock(card); if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result < 0) goto _unlock; } snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_XRUN: result = 0; /* already there */ break; case SNDRV_PCM_STATE_RUNNING: result = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); break; default: result = -EBADFD; } snd_pcm_stream_unlock_irq(substream); _unlock: snd_power_unlock(card); return result; } /* * reset ioctl */ static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; switch (runtime->status->state) { case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: case SNDRV_PCM_STATE_SUSPENDED: return 0; default: return -EBADFD; } } static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL); if (err < 0) return err; runtime->hw_ptr_base = 0; runtime->hw_ptr_interrupt = runtime->status->hw_ptr - runtime->status->hw_ptr % runtime->period_size; runtime->silence_start = runtime->status->hw_ptr; runtime->silence_filled = 0; return 0; } static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->control->appl_ptr = runtime->status->hw_ptr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); } static struct action_ops snd_pcm_action_reset = { .pre_action = snd_pcm_pre_reset, .do_action = snd_pcm_do_reset, .post_action = snd_pcm_post_reset }; static int snd_pcm_reset(struct snd_pcm_substream *substream) { return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0); } /* * prepare ioctl */ /* we use the second argument for updating f_flags */ static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream, int f_flags) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN || runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED) return -EBADFD; if (snd_pcm_running(substream)) return -EBUSY; substream->f_flags = f_flags; return 0; } static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state) { int err; err = substream->ops->prepare(substream); if (err < 0) return err; return snd_pcm_do_reset(substream, 0); } static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->control->appl_ptr = runtime->status->hw_ptr; snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED); } static struct action_ops snd_pcm_action_prepare = { .pre_action = snd_pcm_pre_prepare, .do_action = snd_pcm_do_prepare, .post_action = snd_pcm_post_prepare }; /** * snd_pcm_prepare - prepare the PCM substream to be triggerable * @substream: the PCM substream instance * @file: file to refer f_flags */ static int snd_pcm_prepare(struct snd_pcm_substream *substream, struct file *file) { int res; struct snd_card *card = substream->pcm->card; int f_flags; if (file) f_flags = file->f_flags; else f_flags = substream->f_flags; snd_power_lock(card); if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0) res = snd_pcm_action_nonatomic(&snd_pcm_action_prepare, substream, f_flags); snd_power_unlock(card); return res; } /* * drain ioctl */ static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state) { substream->runtime->trigger_master = substream; return 0; } static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: /* start playback stream if possible */ if (! snd_pcm_playback_empty(substream)) { snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); } break; case SNDRV_PCM_STATE_RUNNING: runtime->status->state = SNDRV_PCM_STATE_DRAINING; break; default: break; } } else { /* stop running stream */ if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) { int new_state = snd_pcm_capture_avail(runtime) > 0 ? SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP; snd_pcm_do_stop(substream, new_state); snd_pcm_post_stop(substream, new_state); } } return 0; } static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state) { } static struct action_ops snd_pcm_action_drain_init = { .pre_action = snd_pcm_pre_drain_init, .do_action = snd_pcm_do_drain_init, .post_action = snd_pcm_post_drain_init }; static int snd_pcm_drop(struct snd_pcm_substream *substream); /* * Drain the stream(s). * When the substream is linked, sync until the draining of all playback streams * is finished. * After this call, all streams are supposed to be either SETUP or DRAINING * (capture only) state. */ static int snd_pcm_drain(struct snd_pcm_substream *substream, struct file *file) { struct snd_card *card; struct snd_pcm_runtime *runtime; struct snd_pcm_substream *s; wait_queue_t wait; int result = 0; int nonblock = 0; card = substream->pcm->card; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; snd_power_lock(card); if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result < 0) { snd_power_unlock(card); return result; } } if (file) { if (file->f_flags & O_NONBLOCK) nonblock = 1; } else if (substream->f_flags & O_NONBLOCK) nonblock = 1; down_read(&snd_pcm_link_rwsem); snd_pcm_stream_lock_irq(substream); /* resume pause */ if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) snd_pcm_pause(substream, 0); /* pre-start/stop - all running streams are changed to DRAINING state */ result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0); if (result < 0) goto unlock; /* in non-blocking, we don't wait in ioctl but let caller poll */ if (nonblock) { result = -EAGAIN; goto unlock; } for (;;) { long tout; struct snd_pcm_runtime *to_check; if (signal_pending(current)) { result = -ERESTARTSYS; break; } /* find a substream to drain */ to_check = NULL; snd_pcm_group_for_each_entry(s, substream) { if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) continue; runtime = s->runtime; if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { to_check = runtime; break; } } if (!to_check) break; /* all drained */ init_waitqueue_entry(&wait, current); add_wait_queue(&to_check->sleep, &wait); snd_pcm_stream_unlock_irq(substream); up_read(&snd_pcm_link_rwsem); snd_power_unlock(card); if (runtime->no_period_wakeup) tout = MAX_SCHEDULE_TIMEOUT; else { tout = 10; if (runtime->rate) { long t = runtime->period_size * 2 / runtime->rate; tout = max(t, tout); } tout = msecs_to_jiffies(tout * 1000); } tout = schedule_timeout_interruptible(tout); snd_power_lock(card); down_read(&snd_pcm_link_rwsem); snd_pcm_stream_lock_irq(substream); remove_wait_queue(&to_check->sleep, &wait); if (card->shutdown) { result = -ENODEV; break; } if (tout == 0) { if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) result = -ESTRPIPE; else { snd_printd("playback drain error (DMA or IRQ trouble?)\n"); snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); result = -EIO; } break; } } unlock: snd_pcm_stream_unlock_irq(substream); up_read(&snd_pcm_link_rwsem); snd_power_unlock(card); return result; } /* * drop ioctl * * Immediately put all linked substreams into SETUP state. */ static int snd_pcm_drop(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; int result = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN || runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) return -EBADFD; snd_pcm_stream_lock_irq(substream); /* resume pause */ if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) snd_pcm_pause(substream, 0); snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); /* runtime->control->appl_ptr = runtime->status->hw_ptr; */ snd_pcm_stream_unlock_irq(substream); return result; } /* WARNING: Don't forget to fput back the file */ static struct file *snd_pcm_file_fd(int fd) { struct file *file; struct inode *inode; unsigned int minor; file = fget(fd); if (!file) return NULL; inode = file->f_path.dentry->d_inode; if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major) { fput(file); return NULL; } minor = iminor(inode); if (!snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) && !snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE)) { fput(file); return NULL; } return file; } /* * PCM link handling */ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) { int res = 0; struct file *file; struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream1; file = snd_pcm_file_fd(fd); if (!file) return -EBADFD; pcm_file = file->private_data; substream1 = pcm_file->substream; down_write(&snd_pcm_link_rwsem); write_lock_irq(&snd_pcm_link_rwlock); if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || substream->runtime->status->state != substream1->runtime->status->state) { res = -EBADFD; goto _end; } if (snd_pcm_stream_linked(substream1)) { res = -EALREADY; goto _end; } if (!snd_pcm_stream_linked(substream)) { substream->group = kmalloc(sizeof(struct snd_pcm_group), GFP_ATOMIC); if (substream->group == NULL) { res = -ENOMEM; goto _end; } spin_lock_init(&substream->group->lock); INIT_LIST_HEAD(&substream->group->substreams); list_add_tail(&substream->link_list, &substream->group->substreams); substream->group->count = 1; } list_add_tail(&substream1->link_list, &substream->group->substreams); substream->group->count++; substream1->group = substream->group; _end: write_unlock_irq(&snd_pcm_link_rwlock); up_write(&snd_pcm_link_rwsem); snd_card_unref(substream1->pcm->card); fput(file); return res; } static void relink_to_local(struct snd_pcm_substream *substream) { substream->group = &substream->self_group; INIT_LIST_HEAD(&substream->self_group.substreams); list_add_tail(&substream->link_list, &substream->self_group.substreams); } static int snd_pcm_unlink(struct snd_pcm_substream *substream) { struct snd_pcm_substream *s; int res = 0; down_write(&snd_pcm_link_rwsem); write_lock_irq(&snd_pcm_link_rwlock); if (!snd_pcm_stream_linked(substream)) { res = -EALREADY; goto _end; } list_del(&substream->link_list); substream->group->count--; if (substream->group->count == 1) { /* detach the last stream, too */ snd_pcm_group_for_each_entry(s, substream) { relink_to_local(s); break; } kfree(substream->group); } relink_to_local(substream); _end: write_unlock_irq(&snd_pcm_link_rwlock); up_write(&snd_pcm_link_rwsem); return res; } /* * hw configurator */ static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_mul(hw_param_interval_c(params, rule->deps[0]), hw_param_interval_c(params, rule->deps[1]), &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_div(hw_param_interval_c(params, rule->deps[0]), hw_param_interval_c(params, rule->deps[1]), &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]), hw_param_interval_c(params, rule->deps[1]), (unsigned long) rule->private, &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]), (unsigned long) rule->private, hw_param_interval_c(params, rule->deps[1]), &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int k; struct snd_interval *i = hw_param_interval(params, rule->deps[0]); struct snd_mask m; struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); snd_mask_any(&m); for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) { int bits; if (! snd_mask_test(mask, k)) continue; bits = snd_pcm_format_physical_width(k); if (bits <= 0) continue; /* ignore invalid formats */ if ((unsigned)bits < i->min || (unsigned)bits > i->max) snd_mask_reset(&m, k); } return snd_mask_refine(mask, &m); } static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; unsigned int k; t.min = UINT_MAX; t.max = 0; t.openmin = 0; t.openmax = 0; for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) { int bits; if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k)) continue; bits = snd_pcm_format_physical_width(k); if (bits <= 0) continue; /* ignore invalid formats */ if (t.min > (unsigned)bits) t.min = bits; if (t.max < (unsigned)bits) t.max = bits; } t.integer = 1; return snd_interval_refine(hw_param_interval(params, rule->var), &t); } #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12 #error "Change this table" #endif static unsigned int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000 }; const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = { .count = ARRAY_SIZE(rates), .list = rates, }; static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hardware *hw = rule->private; return snd_interval_list(hw_param_interval(params, rule->var), snd_pcm_known_rates.count, snd_pcm_known_rates.list, hw->rates); } static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; struct snd_pcm_substream *substream = rule->private; t.min = 0; t.max = substream->buffer_bytes_max; t.openmin = 0; t.openmax = 0; t.integer = 1; return snd_interval_refine(hw_param_interval(params, rule->var), &t); } int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; int k, err; for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) { snd_mask_any(constrs_mask(constrs, k)); } for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) { snd_interval_any(constrs_interval(constrs, k)); } snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS)); err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, snd_pcm_hw_rule_format, NULL, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, snd_pcm_hw_rule_sample_bits, NULL, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, snd_pcm_hw_rule_mul, NULL, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, snd_pcm_hw_rule_muldivk, (void*) 1000000, SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, snd_pcm_hw_rule_mul, NULL, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, snd_pcm_hw_rule_muldivk, (void*) 1000000, SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, snd_pcm_hw_rule_muldivk, (void*) 8, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, snd_pcm_hw_rule_muldivk, (void*) 8, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; return 0; } int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hardware *hw = &runtime->hw; int err; unsigned int mask = 0; if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED; if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED; if (hw->info & SNDRV_PCM_INFO_MMAP) { if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED; if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED; if (hw->info & SNDRV_PCM_INFO_COMPLEX) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX; } err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask); if (err < 0) return err; err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats); if (err < 0) return err; err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, hw->channels_min, hw->channels_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE, hw->rate_min, hw->rate_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, hw->period_bytes_min, hw->period_bytes_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS, hw->periods_min, hw->periods_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, hw->period_bytes_min, hw->buffer_bytes_max); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, snd_pcm_hw_rule_buffer_bytes_max, substream, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1); if (err < 0) return err; /* FIXME: remove */ if (runtime->dma_bytes) { err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes); if (err < 0) return -EINVAL; } if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) { err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_rate, hw, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; } /* FIXME: this belong to lowlevel */ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE); return 0; } static void pcm_release_private(struct snd_pcm_substream *substream) { snd_pcm_unlink(substream); } void snd_pcm_release_substream(struct snd_pcm_substream *substream) { substream->ref_count--; if (substream->ref_count > 0) return; snd_pcm_drop(substream); if (substream->hw_opened) { if (substream->ops->hw_free != NULL) substream->ops->hw_free(substream); substream->ops->close(substream); substream->hw_opened = 0; } if (pm_qos_request_active(&substream->latency_pm_qos_req)) pm_qos_remove_request(&substream->latency_pm_qos_req); if (substream->pcm_release) { substream->pcm_release(substream); substream->pcm_release = NULL; } snd_pcm_detach_substream(substream); } EXPORT_SYMBOL(snd_pcm_release_substream); int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, struct file *file, struct snd_pcm_substream **rsubstream) { struct snd_pcm_substream *substream; int err; err = snd_pcm_attach_substream(pcm, stream, file, &substream); if (err < 0) return err; if (substream->ref_count > 1) { *rsubstream = substream; return 0; } err = snd_pcm_hw_constraints_init(substream); if (err < 0) { snd_printd("snd_pcm_hw_constraints_init failed\n"); goto error; } if ((err = substream->ops->open(substream)) < 0) goto error; substream->hw_opened = 1; err = snd_pcm_hw_constraints_complete(substream); if (err < 0) { snd_printd("snd_pcm_hw_constraints_complete failed\n"); goto error; } *rsubstream = substream; return 0; error: snd_pcm_release_substream(substream); return err; } EXPORT_SYMBOL(snd_pcm_open_substream); static int snd_pcm_open_file(struct file *file, struct snd_pcm *pcm, int stream, struct snd_pcm_file **rpcm_file) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; int err; if (rpcm_file) *rpcm_file = NULL; err = snd_pcm_open_substream(pcm, stream, file, &substream); if (err < 0) return err; pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL); if (pcm_file == NULL) { snd_pcm_release_substream(substream); return -ENOMEM; } pcm_file->substream = substream; if (substream->ref_count == 1) { substream->file = pcm_file; substream->pcm_release = pcm_release_private; } file->private_data = pcm_file; if (rpcm_file) *rpcm_file = pcm_file; return 0; } static int snd_pcm_playback_open(struct inode *inode, struct file *file) { struct snd_pcm *pcm; int err = nonseekable_open(inode, file); if (err < 0) return err; pcm = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_PCM_PLAYBACK); err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK); if (pcm) snd_card_unref(pcm->card); return err; } static int snd_pcm_capture_open(struct inode *inode, struct file *file) { struct snd_pcm *pcm; int err = nonseekable_open(inode, file); if (err < 0) return err; pcm = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_PCM_CAPTURE); err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE); if (pcm) snd_card_unref(pcm->card); return err; } static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) { int err; struct snd_pcm_file *pcm_file; wait_queue_t wait; if (pcm == NULL) { err = -ENODEV; goto __error1; } err = snd_card_file_add(pcm->card, file); if (err < 0) goto __error1; if (!try_module_get(pcm->card->module)) { err = -EFAULT; goto __error2; } init_waitqueue_entry(&wait, current); add_wait_queue(&pcm->open_wait, &wait); mutex_lock(&pcm->open_mutex); while (1) { err = snd_pcm_open_file(file, pcm, stream, &pcm_file); if (err >= 0) break; if (err == -EAGAIN) { if (file->f_flags & O_NONBLOCK) { err = -EBUSY; break; } } else break; set_current_state(TASK_INTERRUPTIBLE); mutex_unlock(&pcm->open_mutex); schedule(); mutex_lock(&pcm->open_mutex); if (pcm->card->shutdown) { err = -ENODEV; break; } if (signal_pending(current)) { err = -ERESTARTSYS; break; } } remove_wait_queue(&pcm->open_wait, &wait); mutex_unlock(&pcm->open_mutex); if (err < 0) goto __error; return err; __error: module_put(pcm->card->module); __error2: snd_card_file_remove(pcm->card, file); __error1: return err; } static int snd_pcm_release(struct inode *inode, struct file *file) { struct snd_pcm *pcm; struct snd_pcm_substream *substream; struct snd_pcm_file *pcm_file; pcm_file = file->private_data; substream = pcm_file->substream; if (snd_BUG_ON(!substream)) return -ENXIO; pcm = substream->pcm; mutex_lock(&pcm->open_mutex); snd_pcm_release_substream(substream); kfree(pcm_file); mutex_unlock(&pcm->open_mutex); wake_up(&pcm->open_wait); module_put(pcm->card->module); snd_card_file_remove(pcm->card, file); return 0; } static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t hw_avail; if (frames == 0) return 0; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: break; case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } hw_avail = snd_pcm_playback_hw_avail(runtime); if (hw_avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)hw_avail) frames = hw_avail; appl_ptr = runtime->control->appl_ptr - frames; if (appl_ptr < 0) appl_ptr += runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: snd_pcm_stream_unlock_irq(substream); return ret; } static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t hw_avail; if (frames == 0) return 0; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_DRAINING: break; case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } hw_avail = snd_pcm_capture_hw_avail(runtime); if (hw_avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)hw_avail) frames = hw_avail; appl_ptr = runtime->control->appl_ptr - frames; if (appl_ptr < 0) appl_ptr += runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: snd_pcm_stream_unlock_irq(substream); return ret; } static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t avail; if (frames == 0) return 0; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: break; case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } avail = snd_pcm_playback_avail(runtime); if (avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)avail) frames = avail; appl_ptr = runtime->control->appl_ptr + frames; if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary) appl_ptr -= runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: snd_pcm_stream_unlock_irq(substream); return ret; } static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t avail; if (frames == 0) return 0; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_PAUSED: break; case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } avail = snd_pcm_capture_avail(runtime); if (avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)avail) frames = avail; appl_ptr = runtime->control->appl_ptr + frames; if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary) appl_ptr -= runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: snd_pcm_stream_unlock_irq(substream); return ret; } static int snd_pcm_hwsync(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; int err; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_DRAINING: if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) goto __badfd; case SNDRV_PCM_STATE_RUNNING: if ((err = snd_pcm_update_hw_ptr(substream)) < 0) break; /* Fall through */ case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_SUSPENDED: err = 0; break; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; break; default: __badfd: err = -EBADFD; break; } snd_pcm_stream_unlock_irq(substream); return err; } static int snd_pcm_delay(struct snd_pcm_substream *substream, snd_pcm_sframes_t __user *res) { struct snd_pcm_runtime *runtime = substream->runtime; int err; snd_pcm_sframes_t n = 0; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_DRAINING: if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) goto __badfd; case SNDRV_PCM_STATE_RUNNING: if ((err = snd_pcm_update_hw_ptr(substream)) < 0) break; /* Fall through */ case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_SUSPENDED: err = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) n = snd_pcm_playback_hw_avail(runtime); else n = snd_pcm_capture_avail(runtime); n += runtime->delay; break; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; break; default: __badfd: err = -EBADFD; break; } snd_pcm_stream_unlock_irq(substream); if (!err) if (put_user(n, res)) err = -EFAULT; return err; } static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, struct snd_pcm_sync_ptr __user *_sync_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_sync_ptr sync_ptr; volatile struct snd_pcm_mmap_status *status; volatile struct snd_pcm_mmap_control *control; int err; memset(&sync_ptr, 0, sizeof(sync_ptr)); if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags))) return -EFAULT; if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control))) return -EFAULT; status = runtime->status; control = runtime->control; if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) { err = snd_pcm_hwsync(substream); if (err < 0) return err; } snd_pcm_stream_lock_irq(substream); if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) control->appl_ptr = sync_ptr.c.control.appl_ptr; else sync_ptr.c.control.appl_ptr = control->appl_ptr; if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) control->avail_min = sync_ptr.c.control.avail_min; else sync_ptr.c.control.avail_min = control->avail_min; sync_ptr.s.status.state = status->state; sync_ptr.s.status.hw_ptr = status->hw_ptr; sync_ptr.s.status.tstamp = status->tstamp; sync_ptr.s.status.suspended_state = status->suspended_state; snd_pcm_stream_unlock_irq(substream); if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) return -EFAULT; return 0; } static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg) { struct snd_pcm_runtime *runtime = substream->runtime; int arg; if (get_user(arg, _arg)) return -EFAULT; if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST) return -EINVAL; runtime->tstamp_type = SNDRV_PCM_TSTAMP_TYPE_GETTIMEOFDAY; if (arg == SNDRV_PCM_TSTAMP_TYPE_MONOTONIC) runtime->tstamp_type = SNDRV_PCM_TSTAMP_TYPE_MONOTONIC; return 0; } static int snd_pcm_common_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { switch (cmd) { case SNDRV_PCM_IOCTL_PVERSION: return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0; case SNDRV_PCM_IOCTL_INFO: return snd_pcm_info_user(substream, arg); case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */ return 0; case SNDRV_PCM_IOCTL_TTSTAMP: return snd_pcm_tstamp(substream, arg); case SNDRV_PCM_IOCTL_HW_REFINE: return snd_pcm_hw_refine_user(substream, arg); case SNDRV_PCM_IOCTL_HW_PARAMS: return snd_pcm_hw_params_user(substream, arg); case SNDRV_PCM_IOCTL_HW_FREE: return snd_pcm_hw_free(substream); case SNDRV_PCM_IOCTL_SW_PARAMS: return snd_pcm_sw_params_user(substream, arg); case SNDRV_PCM_IOCTL_STATUS: return snd_pcm_status_user(substream, arg); case SNDRV_PCM_IOCTL_CHANNEL_INFO: return snd_pcm_channel_info_user(substream, arg); case SNDRV_PCM_IOCTL_PREPARE: return snd_pcm_prepare(substream, file); case SNDRV_PCM_IOCTL_RESET: return snd_pcm_reset(substream); case SNDRV_PCM_IOCTL_START: return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream, SNDRV_PCM_STATE_RUNNING); case SNDRV_PCM_IOCTL_LINK: return snd_pcm_link(substream, (int)(unsigned long) arg); case SNDRV_PCM_IOCTL_UNLINK: return snd_pcm_unlink(substream); case SNDRV_PCM_IOCTL_RESUME: return snd_pcm_resume(substream); case SNDRV_PCM_IOCTL_XRUN: return snd_pcm_xrun(substream); case SNDRV_PCM_IOCTL_HWSYNC: return snd_pcm_hwsync(substream); case SNDRV_PCM_IOCTL_DELAY: return snd_pcm_delay(substream, arg); case SNDRV_PCM_IOCTL_SYNC_PTR: return snd_pcm_sync_ptr(substream, arg); #ifdef CONFIG_SND_SUPPORT_OLD_API case SNDRV_PCM_IOCTL_HW_REFINE_OLD: return snd_pcm_hw_refine_old_user(substream, arg); case SNDRV_PCM_IOCTL_HW_PARAMS_OLD: return snd_pcm_hw_params_old_user(substream, arg); #endif case SNDRV_PCM_IOCTL_DRAIN: return snd_pcm_drain(substream, file); case SNDRV_PCM_IOCTL_DROP: return snd_pcm_drop(substream); /*add by qiuen for volume*/ case SNDRV_PCM_IOCTL_VOL: snd_pcm_vol(substream, (int)(unsigned long)arg); return 0; /**********end***********/ case SNDRV_PCM_IOCTL_PAUSE: { int res; snd_pcm_stream_lock_irq(substream); res = snd_pcm_pause(substream, (int)(unsigned long)arg); snd_pcm_stream_unlock_irq(substream); return res; } } snd_printd("unknown ioctl = 0x%x\n", cmd); return -ENOTTY; } #ifdef CONFIG_FB_MIRRORING int (*audio_data_to_mirroring)(void* data,int size,int channel) = NULL; EXPORT_SYMBOL(audio_data_to_mirroring); #endif static int snd_pcm_playback_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { if (snd_BUG_ON(!substream)) return -ENXIO; if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_PLAYBACK)) return -EINVAL; switch (cmd) { case SNDRV_PCM_IOCTL_WRITEI_FRAMES: { struct snd_xferi xferi; struct snd_xferi __user *_xferi = arg; struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (put_user(0, &_xferi->result)) return -EFAULT; if (copy_from_user(&xferi, _xferi, sizeof(xferi))) return -EFAULT; #ifdef CONFIG_FB_MIRRORING if(audio_data_to_mirroring!=NULL) audio_data_to_mirroring(xferi.buf, xferi.frames*4,2); #endif result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames); __put_user(result, &_xferi->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_WRITEN_FRAMES: { struct snd_xfern xfern; struct snd_xfern __user *_xfern = arg; struct snd_pcm_runtime *runtime = substream->runtime; void __user **bufs; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (runtime->channels > 128) return -EINVAL; if (put_user(0, &_xfern->result)) return -EFAULT; if (copy_from_user(&xfern, _xfern, sizeof(xfern))) return -EFAULT; bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels); if (IS_ERR(bufs)) return PTR_ERR(bufs); result = snd_pcm_lib_writev(substream, bufs, xfern.frames); kfree(bufs); __put_user(result, &_xfern->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_REWIND: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_playback_rewind(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_FORWARD: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_playback_forward(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } } return snd_pcm_common_ioctl1(file, substream, cmd, arg); } static int snd_pcm_capture_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { if (snd_BUG_ON(!substream)) return -ENXIO; if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_CAPTURE)) return -EINVAL; switch (cmd) { case SNDRV_PCM_IOCTL_READI_FRAMES: { struct snd_xferi xferi; struct snd_xferi __user *_xferi = arg; struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (put_user(0, &_xferi->result)) return -EFAULT; if (copy_from_user(&xferi, _xferi, sizeof(xferi))) return -EFAULT; result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames); __put_user(result, &_xferi->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_READN_FRAMES: { struct snd_xfern xfern; struct snd_xfern __user *_xfern = arg; struct snd_pcm_runtime *runtime = substream->runtime; void *bufs; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (runtime->channels > 128) return -EINVAL; if (put_user(0, &_xfern->result)) return -EFAULT; if (copy_from_user(&xfern, _xfern, sizeof(xfern))) return -EFAULT; bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels); if (IS_ERR(bufs)) return PTR_ERR(bufs); result = snd_pcm_lib_readv(substream, bufs, xfern.frames); kfree(bufs); __put_user(result, &_xfern->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_REWIND: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_capture_rewind(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_FORWARD: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_capture_forward(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } } return snd_pcm_common_ioctl1(file, substream, cmd, arg); } static long snd_pcm_playback_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_pcm_file *pcm_file; pcm_file = file->private_data; if (((cmd >> 8) & 0xff) != 'A') return -ENOTTY; return snd_pcm_playback_ioctl1(file, pcm_file->substream, cmd, (void __user *)arg); } static long snd_pcm_capture_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_pcm_file *pcm_file; pcm_file = file->private_data; if (((cmd >> 8) & 0xff) != 'A') return -ENOTTY; return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd, (void __user *)arg); } int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { mm_segment_t fs; int result; fs = snd_enter_user(); switch (substream->stream) { case SNDRV_PCM_STREAM_PLAYBACK: result = snd_pcm_playback_ioctl1(NULL, substream, cmd, (void __user *)arg); break; case SNDRV_PCM_STREAM_CAPTURE: result = snd_pcm_capture_ioctl1(NULL, substream, cmd, (void __user *)arg); break; default: result = -EINVAL; break; } snd_leave_user(fs); return result; } EXPORT_SYMBOL(snd_pcm_kernel_ioctl); static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count, loff_t * offset) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (!frame_aligned(runtime, count)) return -EINVAL; count = bytes_to_frames(runtime, count); result = snd_pcm_lib_read(substream, buf, count); if (result > 0) result = frames_to_bytes(runtime, result); return result; } static ssize_t snd_pcm_write(struct file *file, const char __user *buf, size_t count, loff_t * offset) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (!frame_aligned(runtime, count)) return -EINVAL; count = bytes_to_frames(runtime, count); result = snd_pcm_lib_write(substream, buf, count); if (result > 0) result = frames_to_bytes(runtime, result); return result; } static ssize_t snd_pcm_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; unsigned long i; void __user **bufs; snd_pcm_uframes_t frames; pcm_file = iocb->ki_filp->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (nr_segs > 1024 || nr_segs != runtime->channels) return -EINVAL; if (!frame_aligned(runtime, iov->iov_len)) return -EINVAL; frames = bytes_to_samples(runtime, iov->iov_len); bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL); if (bufs == NULL) return -ENOMEM; for (i = 0; i < nr_segs; ++i) bufs[i] = iov[i].iov_base; result = snd_pcm_lib_readv(substream, bufs, frames); if (result > 0) result = frames_to_bytes(runtime, result); kfree(bufs); return result; } static ssize_t snd_pcm_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; unsigned long i; void __user **bufs; snd_pcm_uframes_t frames; pcm_file = iocb->ki_filp->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (nr_segs > 128 || nr_segs != runtime->channels || !frame_aligned(runtime, iov->iov_len)) return -EINVAL; frames = bytes_to_samples(runtime, iov->iov_len); bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL); if (bufs == NULL) return -ENOMEM; for (i = 0; i < nr_segs; ++i) bufs[i] = iov[i].iov_base; result = snd_pcm_lib_writev(substream, bufs, frames); if (result > 0) result = frames_to_bytes(runtime, result); kfree(bufs); return result; } static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; unsigned int mask; snd_pcm_uframes_t avail; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; poll_wait(file, &runtime->sleep, wait); snd_pcm_stream_lock_irq(substream); avail = snd_pcm_playback_avail(runtime); switch (runtime->status->state) { case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: if (avail >= runtime->control->avail_min) { mask = POLLOUT | POLLWRNORM; break; } /* Fall through */ case SNDRV_PCM_STATE_DRAINING: mask = 0; break; default: mask = POLLOUT | POLLWRNORM | POLLERR; break; } snd_pcm_stream_unlock_irq(substream); return mask; } static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; unsigned int mask; snd_pcm_uframes_t avail; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; poll_wait(file, &runtime->sleep, wait); snd_pcm_stream_lock_irq(substream); avail = snd_pcm_capture_avail(runtime); switch (runtime->status->state) { case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: if (avail >= runtime->control->avail_min) { mask = POLLIN | POLLRDNORM; break; } mask = 0; break; case SNDRV_PCM_STATE_DRAINING: if (avail > 0) { mask = POLLIN | POLLRDNORM; break; } /* Fall through */ default: mask = POLLIN | POLLRDNORM | POLLERR; break; } snd_pcm_stream_unlock_irq(substream); return mask; } /* * mmap support */ /* * Only on coherent architectures, we can mmap the status and the control records * for effcient data transfer. On others, we have to use HWSYNC ioctl... */ #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA) /* * mmap status record */ static int snd_pcm_mmap_status_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct snd_pcm_substream *substream = area->vm_private_data; struct snd_pcm_runtime *runtime; if (substream == NULL) return VM_FAULT_SIGBUS; runtime = substream->runtime; vmf->page = virt_to_page(runtime->status); get_page(vmf->page); return 0; } static const struct vm_operations_struct snd_pcm_vm_ops_status = { .fault = snd_pcm_mmap_status_fault, }; static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { long size; if (!(area->vm_flags & VM_READ)) return -EINVAL; size = area->vm_end - area->vm_start; if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))) return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_status; area->vm_private_data = substream; area->vm_flags |= VM_RESERVED; return 0; } /* * mmap control record */ static int snd_pcm_mmap_control_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct snd_pcm_substream *substream = area->vm_private_data; struct snd_pcm_runtime *runtime; if (substream == NULL) return VM_FAULT_SIGBUS; runtime = substream->runtime; vmf->page = virt_to_page(runtime->control); get_page(vmf->page); return 0; } static const struct vm_operations_struct snd_pcm_vm_ops_control = { .fault = snd_pcm_mmap_control_fault, }; static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { long size; if (!(area->vm_flags & VM_READ)) return -EINVAL; size = area->vm_end - area->vm_start; if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))) return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_control; area->vm_private_data = substream; area->vm_flags |= VM_RESERVED; return 0; } #else /* ! coherent mmap */ /* * don't support mmap for status and control records. */ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { return -ENXIO; } static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { return -ENXIO; } #endif /* coherent mmap */ static inline struct page * snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) { void *vaddr = substream->runtime->dma_area + ofs; #if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) return virt_to_page(CAC_ADDR(vaddr)); #endif #if defined(CONFIG_PPC32) && defined(CONFIG_NOT_COHERENT_CACHE) if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) { dma_addr_t addr = substream->runtime->dma_addr + ofs; addr -= get_dma_offset(substream->dma_buffer.dev.dev); /* assume dma_handle set via pfn_to_phys() in * mm/dma-noncoherent.c */ return pfn_to_page(addr >> PAGE_SHIFT); } #endif return virt_to_page(vaddr); } /* * fault callback for mmapping a RAM page */ static int snd_pcm_mmap_data_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct snd_pcm_substream *substream = area->vm_private_data; struct snd_pcm_runtime *runtime; unsigned long offset; struct page * page; size_t dma_bytes; if (substream == NULL) return VM_FAULT_SIGBUS; runtime = substream->runtime; offset = vmf->pgoff << PAGE_SHIFT; dma_bytes = PAGE_ALIGN(runtime->dma_bytes); if (offset > dma_bytes - PAGE_SIZE) return VM_FAULT_SIGBUS; if (substream->ops->page) page = substream->ops->page(substream, offset); else page = snd_pcm_default_page_ops(substream, offset); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct snd_pcm_vm_ops_data = { .open = snd_pcm_mmap_data_open, .close = snd_pcm_mmap_data_close, }; static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = { .open = snd_pcm_mmap_data_open, .close = snd_pcm_mmap_data_close, .fault = snd_pcm_mmap_data_fault, }; #ifndef ARCH_HAS_DMA_MMAP_COHERENT /* This should be defined / handled globally! */ #ifdef CONFIG_ARM #define ARCH_HAS_DMA_MMAP_COHERENT #endif #endif /* * mmap the DMA buffer on RAM */ static int snd_pcm_default_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *area) { area->vm_flags |= VM_RESERVED; #ifdef ARCH_HAS_DMA_MMAP_COHERENT if (!substream->ops->page && substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) return dma_mmap_coherent(substream->dma_buffer.dev.dev, area, substream->runtime->dma_area, substream->runtime->dma_addr, area->vm_end - area->vm_start); #elif defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV && !plat_device_is_coherent(substream->dma_buffer.dev.dev)) area->vm_page_prot = pgprot_noncached(area->vm_page_prot); #endif /* ARCH_HAS_DMA_MMAP_COHERENT */ /* mmap with fault handler */ area->vm_ops = &snd_pcm_vm_ops_data_fault; return 0; } /* * mmap the DMA buffer on I/O memory area */ #if SNDRV_PCM_INFO_MMAP_IOMEM int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area) { long size; unsigned long offset; area->vm_page_prot = pgprot_noncached(area->vm_page_prot); area->vm_flags |= VM_IO; size = area->vm_end - area->vm_start; offset = area->vm_pgoff << PAGE_SHIFT; if (io_remap_pfn_range(area, area->vm_start, (substream->runtime->dma_addr + offset) >> PAGE_SHIFT, size, area->vm_page_prot)) return -EAGAIN; return 0; } EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); #endif /* SNDRV_PCM_INFO_MMAP */ /* * mmap DMA buffer */ int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { struct snd_pcm_runtime *runtime; long size; unsigned long offset; size_t dma_bytes; int err; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (!(area->vm_flags & (VM_WRITE|VM_READ))) return -EINVAL; } else { if (!(area->vm_flags & VM_READ)) return -EINVAL; } runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) return -ENXIO; if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) return -EINVAL; size = area->vm_end - area->vm_start; offset = area->vm_pgoff << PAGE_SHIFT; dma_bytes = PAGE_ALIGN(runtime->dma_bytes); if ((size_t)size > dma_bytes) return -EINVAL; if (offset > dma_bytes - size) return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_data; area->vm_private_data = substream; if (substream->ops->mmap) err = substream->ops->mmap(substream, area); else err = snd_pcm_default_mmap(substream, area); if (!err) atomic_inc(&substream->mmap_count); return err; } EXPORT_SYMBOL(snd_pcm_mmap_data); static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area) { struct snd_pcm_file * pcm_file; struct snd_pcm_substream *substream; unsigned long offset; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; offset = area->vm_pgoff << PAGE_SHIFT; switch (offset) { case SNDRV_PCM_MMAP_OFFSET_STATUS: if (pcm_file->no_compat_mmap) return -ENXIO; return snd_pcm_mmap_status(substream, file, area); case SNDRV_PCM_MMAP_OFFSET_CONTROL: if (pcm_file->no_compat_mmap) return -ENXIO; return snd_pcm_mmap_control(substream, file, area); default: return snd_pcm_mmap_data(substream, file, area); } return 0; } static int snd_pcm_fasync(int fd, struct file * file, int on) { struct snd_pcm_file * pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; return fasync_helper(fd, file, on, &runtime->fasync); } /* * ioctl32 compat */ #ifdef CONFIG_COMPAT #include "pcm_compat.c" #else #define snd_pcm_ioctl_compat NULL #endif /* * To be removed helpers to keep binary compatibility */ #ifdef CONFIG_SND_SUPPORT_OLD_API #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5)) #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5)) static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params, struct snd_pcm_hw_params_old *oparams) { unsigned int i; memset(params, 0, sizeof(*params)); params->flags = oparams->flags; for (i = 0; i < ARRAY_SIZE(oparams->masks); i++) params->masks[i].bits[0] = oparams->masks[i]; memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals)); params->rmask = __OLD_TO_NEW_MASK(oparams->rmask); params->cmask = __OLD_TO_NEW_MASK(oparams->cmask); params->info = oparams->info; params->msbits = oparams->msbits; params->rate_num = oparams->rate_num; params->rate_den = oparams->rate_den; params->fifo_size = oparams->fifo_size; } static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams, struct snd_pcm_hw_params *params) { unsigned int i; memset(oparams, 0, sizeof(*oparams)); oparams->flags = params->flags; for (i = 0; i < ARRAY_SIZE(oparams->masks); i++) oparams->masks[i] = params->masks[i].bits[0]; memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals)); oparams->rmask = __NEW_TO_OLD_MASK(params->rmask); oparams->cmask = __NEW_TO_OLD_MASK(params->cmask); oparams->info = params->info; oparams->msbits = params->msbits; oparams->rate_num = params->rate_num; oparams->rate_den = params->rate_den; oparams->fifo_size = params->fifo_size; } static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams) { struct snd_pcm_hw_params *params; struct snd_pcm_hw_params_old *oparams = NULL; int err; params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; oparams = memdup_user(_oparams, sizeof(*oparams)); if (IS_ERR(oparams)) { err = PTR_ERR(oparams); goto out; } snd_pcm_hw_convert_from_old_params(params, oparams); err = snd_pcm_hw_refine(substream, params); snd_pcm_hw_convert_to_old_params(oparams, params); if (copy_to_user(_oparams, oparams, sizeof(*oparams))) { if (!err) err = -EFAULT; } kfree(oparams); out: kfree(params); return err; } static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams) { struct snd_pcm_hw_params *params; struct snd_pcm_hw_params_old *oparams = NULL; int err; params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; oparams = memdup_user(_oparams, sizeof(*oparams)); if (IS_ERR(oparams)) { err = PTR_ERR(oparams); goto out; } snd_pcm_hw_convert_from_old_params(params, oparams); err = snd_pcm_hw_params(substream, params); snd_pcm_hw_convert_to_old_params(oparams, params); if (copy_to_user(_oparams, oparams, sizeof(*oparams))) { if (!err) err = -EFAULT; } kfree(oparams); out: kfree(params); return err; } #endif /* CONFIG_SND_SUPPORT_OLD_API */ #ifndef CONFIG_MMU static unsigned long snd_pcm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct snd_pcm_file *pcm_file = file->private_data; struct snd_pcm_substream *substream = pcm_file->substream; struct snd_pcm_runtime *runtime = substream->runtime; unsigned long offset = pgoff << PAGE_SHIFT; switch (offset) { case SNDRV_PCM_MMAP_OFFSET_STATUS: return (unsigned long)runtime->status; case SNDRV_PCM_MMAP_OFFSET_CONTROL: return (unsigned long)runtime->control; default: return (unsigned long)runtime->dma_area + offset; } } #else # define snd_pcm_get_unmapped_area NULL #endif /* * Register section */ const struct file_operations snd_pcm_f_ops[2] = { { .owner = THIS_MODULE, .write = snd_pcm_write, .aio_write = snd_pcm_aio_write, .open = snd_pcm_playback_open, .release = snd_pcm_release, .llseek = no_llseek, .poll = snd_pcm_playback_poll, .unlocked_ioctl = snd_pcm_playback_ioctl, .compat_ioctl = snd_pcm_ioctl_compat, .mmap = snd_pcm_mmap, .fasync = snd_pcm_fasync, .get_unmapped_area = snd_pcm_get_unmapped_area, }, { .owner = THIS_MODULE, .read = snd_pcm_read, .aio_read = snd_pcm_aio_read, .open = snd_pcm_capture_open, .release = snd_pcm_release, .llseek = no_llseek, .poll = snd_pcm_capture_poll, .unlocked_ioctl = snd_pcm_capture_ioctl, .compat_ioctl = snd_pcm_ioctl_compat, .mmap = snd_pcm_mmap, .fasync = snd_pcm_fasync, .get_unmapped_area = snd_pcm_get_unmapped_area, } };
gpl-2.0
aosp/kernel
sound/soc/msm/msm7k-pcm.c
80
15473
/* linux/sound/soc/msm/msm7k-pcm.c * * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * All source code in this file is licensed under the following license except * where indicated. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org. */ #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/control.h> #include <asm/dma.h> #include <linux/dma-mapping.h> #include "msm-pcm.h" #define SND_DRIVER "snd_msm" #define MAX_PCM_DEVICES SNDRV_CARDS #define MAX_PCM_SUBSTREAMS 1 struct snd_msm { struct snd_card *card; struct snd_pcm *pcm; }; int copy_count; struct audio_locks the_locks; EXPORT_SYMBOL(the_locks); struct msm_volume msm_vol_ctl; EXPORT_SYMBOL(msm_vol_ctl); static unsigned convert_dsp_samp_index(unsigned index) { switch (index) { case 48000: return AUDREC_CMD_SAMP_RATE_INDX_48000; case 44100: return AUDREC_CMD_SAMP_RATE_INDX_44100; case 32000: return AUDREC_CMD_SAMP_RATE_INDX_32000; case 24000: return AUDREC_CMD_SAMP_RATE_INDX_24000; case 22050: return AUDREC_CMD_SAMP_RATE_INDX_22050; case 16000: return AUDREC_CMD_SAMP_RATE_INDX_16000; case 12000: return AUDREC_CMD_SAMP_RATE_INDX_12000; case 11025: return AUDREC_CMD_SAMP_RATE_INDX_11025; case 8000: return AUDREC_CMD_SAMP_RATE_INDX_8000; default: return AUDREC_CMD_SAMP_RATE_INDX_44100; } } static unsigned convert_samp_rate(unsigned hz) { switch (hz) { case 48000: return RPC_AUD_DEF_SAMPLE_RATE_48000; case 44100: return RPC_AUD_DEF_SAMPLE_RATE_44100; case 32000: return RPC_AUD_DEF_SAMPLE_RATE_32000; case 24000: return RPC_AUD_DEF_SAMPLE_RATE_24000; case 22050: return RPC_AUD_DEF_SAMPLE_RATE_22050; case 16000: return RPC_AUD_DEF_SAMPLE_RATE_16000; case 12000: return RPC_AUD_DEF_SAMPLE_RATE_12000; case 11025: return RPC_AUD_DEF_SAMPLE_RATE_11025; case 8000: return RPC_AUD_DEF_SAMPLE_RATE_8000; default: return RPC_AUD_DEF_SAMPLE_RATE_44100; } } static struct snd_pcm_hardware msm_pcm_playback_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED, .formats = USE_FORMATS, .rates = USE_RATE, .rate_min = USE_RATE_MIN, .rate_max = USE_RATE_MAX, .channels_min = USE_CHANNELS_MIN, .channels_max = USE_CHANNELS_MAX, .buffer_bytes_max = MAX_BUFFER_PLAYBACK_SIZE, .period_bytes_min = 64, .period_bytes_max = MAX_PERIOD_SIZE, .periods_min = USE_PERIODS_MIN, .periods_max = USE_PERIODS_MAX, .fifo_size = 0, }; static struct snd_pcm_hardware msm_pcm_capture_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED, .formats = USE_FORMATS, .rates = USE_RATE, .rate_min = USE_RATE_MIN, .rate_max = USE_RATE_MAX, .channels_min = USE_CHANNELS_MIN, .channels_max = USE_CHANNELS_MAX, .buffer_bytes_max = MAX_BUFFER_CAPTURE_SIZE, .period_bytes_min = CAPTURE_SIZE, .period_bytes_max = CAPTURE_SIZE, .periods_min = USE_PERIODS_MIN, .periods_max = USE_PERIODS_MAX, .fifo_size = 0, }; /* Conventional and unconventional sample rate supported */ static unsigned int supported_sample_rates[] = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 }; static struct snd_pcm_hw_constraint_list constraints_sample_rates = { .count = ARRAY_SIZE(supported_sample_rates), .list = supported_sample_rates, .mask = 0, }; static void playback_event_handler(void *data) { struct msm_audio *prtd = data; snd_pcm_period_elapsed(prtd->playback_substream); } static void capture_event_handler(void *data) { struct msm_audio *prtd = data; snd_pcm_period_elapsed(prtd->capture_substream); } static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; prtd->pcm_buf_pos = 0; /* rate and channels are sent to audio driver */ prtd->out_sample_rate = runtime->rate; prtd->out_channel_mode = runtime->channels; return 0; } static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; struct audmgr_config cfg; int rc; prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; prtd->pcm_buf_pos = 0; /* rate and channels are sent to audio driver */ prtd->samp_rate = convert_samp_rate(runtime->rate); prtd->samp_rate_index = convert_dsp_samp_index(runtime->rate); prtd->channel_mode = (runtime->channels - 1); prtd->buffer_size = prtd->channel_mode ? STEREO_DATA_SIZE : \ MONO_DATA_SIZE; if (prtd->enabled == 1) return 0; prtd->type = AUDREC_CMD_TYPE_0_INDEX_WAV; cfg.tx_rate = convert_samp_rate(runtime->rate); cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; cfg.def_method = RPC_AUD_DEF_METHOD_RECORD; cfg.codec = RPC_AUD_DEF_CODEC_PCM; cfg.snd_method = RPC_SND_METHOD_MIDI; rc = audmgr_enable(&prtd->audmgr, &cfg); if (rc < 0) return rc; if (msm_adsp_enable(prtd->audpre)) { audmgr_disable(&prtd->audmgr); return -ENODEV; } if (msm_adsp_enable(prtd->audrec)) { msm_adsp_disable(prtd->audpre); audmgr_disable(&prtd->audmgr); return -ENODEV; } prtd->enabled = 1; alsa_rec_dsp_enable(prtd, 1); return 0; } static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { int ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: break; default: ret = -EINVAL; } return ret; } static snd_pcm_uframes_t msm_pcm_playback_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; if (prtd->pcm_irq_pos == prtd->pcm_size) prtd->pcm_irq_pos = 0; return bytes_to_frames(runtime, (prtd->pcm_irq_pos)); } static int msm_pcm_capture_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int rc = 0, rc1 = 0, rc2 = 0; int fbytes = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = substream->runtime->private_data; int monofbytes = 0; char *bufferp = NULL; fbytes = frames_to_bytes(runtime, frames); monofbytes = fbytes / 2; if (runtime->channels == 2) { rc = alsa_buffer_read(prtd, buf, fbytes, NULL); } else { bufferp = buf; rc1 = alsa_buffer_read(prtd, bufferp, monofbytes, NULL); bufferp = buf + monofbytes ; rc2 = alsa_buffer_read(prtd, bufferp, monofbytes, NULL); rc = rc1 + rc2; } prtd->pcm_buf_pos += fbytes; return rc; } static snd_pcm_uframes_t msm_pcm_capture_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; return bytes_to_frames(runtime, (prtd->pcm_irq_pos)); } static int msm_pcm_capture_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; alsa_audrec_disable(prtd); audmgr_close(&prtd->audmgr); msm_adsp_put(prtd->audrec); msm_adsp_put(prtd->audpre); kfree(prtd); return 0; } struct msm_audio_event_callbacks snd_msm_audio_ops = { .playback = playback_event_handler, .capture = capture_event_handler, }; static int msm_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd; int ret = 0; prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL); if (prtd == NULL) { ret = -ENOMEM; return ret; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { msm_vol_ctl.update = 1; /* Update Volume, with Cached value */ runtime->hw = msm_pcm_playback_hardware; prtd->dir = SNDRV_PCM_STREAM_PLAYBACK; prtd->playback_substream = substream; prtd->eos_ack = 0; } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { runtime->hw = msm_pcm_capture_hardware; prtd->dir = SNDRV_PCM_STREAM_CAPTURE; prtd->capture_substream = substream; } ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_sample_rates); if (ret < 0) goto out; /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto out; prtd->ops = &snd_msm_audio_ops; prtd->out[0].used = BUF_INVALID_LEN; prtd->out_head = 1; /* point to second buffer on startup */ runtime->private_data = prtd; ret = alsa_adsp_configure(prtd); if (ret) goto out; copy_count = 0; return 0; out: kfree(prtd); return ret; } static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int rc = 1; int fbytes = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; fbytes = frames_to_bytes(runtime, frames); rc = alsa_send_buffer(prtd, buf, fbytes, NULL); ++copy_count; prtd->pcm_buf_pos += fbytes; if (copy_count == 1) { mutex_lock(&the_locks.lock); alsa_audio_configure(prtd); mutex_unlock(&the_locks.lock); } if ((prtd->running) && (msm_vol_ctl.update)) { rc = msm_audio_volume_update(PCMPLAYBACK_DECODERID, msm_vol_ctl.volume, msm_vol_ctl.pan); msm_vol_ctl.update = 0; } return rc; } static int msm_pcm_playback_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; int rc = 0; pr_debug("%s()\n", __func__); /* pcm dmamiss message is sent continously * when decoder is starved so no race * condition concern */ if (prtd->enabled) rc = wait_event_interruptible(the_locks.eos_wait, prtd->eos_ack); alsa_audio_disable(prtd); audmgr_close(&prtd->audmgr); kfree(prtd); return 0; } static int msm_pcm_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames); return ret; } static int msm_pcm_close(struct snd_pcm_substream *substream) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_close(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_close(substream); return ret; } static int msm_pcm_prepare(struct snd_pcm_substream *substream) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_prepare(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_prepare(substream); return ret; } static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream) { snd_pcm_uframes_t ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_pointer(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_pointer(substream); return ret; } int msm_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; if (substream->pcm->device & 1) { runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED; runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED; } return 0; } static struct snd_pcm_ops msm_pcm_ops = { .open = msm_pcm_open, .copy = msm_pcm_copy, .hw_params = msm_pcm_hw_params, .close = msm_pcm_close, .ioctl = snd_pcm_lib_ioctl, .prepare = msm_pcm_prepare, .trigger = msm_pcm_trigger, .pointer = msm_pcm_pointer, }; static int msm_pcm_remove(struct platform_device *devptr) { #if 0 struct snd_soc_device *socdev = platform_get_drvdata(devptr); snd_soc_free_pcms(socdev); kfree(socdev->codec); platform_set_drvdata(devptr, NULL); return 0; #endif printk("DISABLED %s\n", __func__); return -1; } static int pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size; if (!stream) size = PLAYBACK_DMASZ; else size = CAPTURE_DMASZ; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; buf->area = dma_alloc_coherent(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->bytes = size; return 0; } static void msm_pcm_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_coherent(pcm->card->dev, buf->bytes, buf->area, buf->addr); buf->area = NULL; } } static int msm_pcm_new(struct snd_card *card, struct snd_soc_dai *codec_dai, struct snd_pcm *pcm) { int ret; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_32BIT_MASK; ret = snd_pcm_new_stream(pcm, SNDRV_PCM_STREAM_PLAYBACK, 1); if (ret) return ret; ret = snd_pcm_new_stream(pcm, SNDRV_PCM_STREAM_CAPTURE, 1); if (ret) return ret; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &msm_pcm_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &msm_pcm_ops); ret = pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) return ret; ret = pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) msm_pcm_free_dma_buffers(pcm); return ret; } struct snd_soc_platform msm_soc_platform = { .name = "msm-audio", .remove = msm_pcm_remove, .pcm_ops = &msm_pcm_ops, .pcm_new = msm_pcm_new, .pcm_free = msm_pcm_free_dma_buffers, }; EXPORT_SYMBOL(msm_soc_platform); static int __init msm_soc_platform_init(void) { return snd_soc_register_platform(&msm_soc_platform); } module_init(msm_soc_platform_init); static void __exit msm_soc_platform_exit(void) { snd_soc_unregister_platform(&msm_soc_platform); } module_exit(msm_soc_platform_exit); MODULE_DESCRIPTION("PCM module platform driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
LonoCloud/coreos-linux
net/mac80211/ibss.c
80
48530
/* * IBSS mode implementation * Copyright 2003-2008, Jouni Malinen <j@w1.fi> * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2009, Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/slab.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #define IEEE80211_SCAN_INTERVAL (2 * HZ) #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) #define IEEE80211_IBSS_RSN_INACTIVITY_LIMIT (10 * HZ) #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 static struct beacon_data * ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata, const int beacon_int, const u32 basic_rates, const u16 capability, u64 tsf, struct cfg80211_chan_def *chandef, bool *have_higher_than_11mbit, struct cfg80211_csa_settings *csa_settings) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; int rates_n = 0, i, ri; struct ieee80211_mgmt *mgmt; u8 *pos; struct ieee80211_supported_band *sband; u32 rate_flags, rates = 0, rates_added = 0; struct beacon_data *presp; int frame_len; int shift; /* Build IBSS probe response */ frame_len = sizeof(struct ieee80211_hdr_3addr) + 12 /* struct ieee80211_mgmt.u.beacon */ + 2 + IEEE80211_MAX_SSID_LEN /* max SSID */ + 2 + 8 /* max Supported Rates */ + 3 /* max DS params */ + 4 /* IBSS params */ + 5 /* Channel Switch Announcement */ + 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2 + sizeof(struct ieee80211_ht_cap) + 2 + sizeof(struct ieee80211_ht_operation) + ifibss->ie_len; presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL); if (!presp) return NULL; presp->head = (void *)(presp + 1); mgmt = (void *) presp->head; mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); eth_broadcast_addr(mgmt->da); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); mgmt->u.beacon.timestamp = cpu_to_le64(tsf); mgmt->u.beacon.capab_info = cpu_to_le16(capability); pos = (u8 *)mgmt + offsetof(struct ieee80211_mgmt, u.beacon.variable); *pos++ = WLAN_EID_SSID; *pos++ = ifibss->ssid_len; memcpy(pos, ifibss->ssid, ifibss->ssid_len); pos += ifibss->ssid_len; sband = local->hw.wiphy->bands[chandef->chan->band]; rate_flags = ieee80211_chandef_rate_flags(chandef); shift = ieee80211_chandef_get_shift(chandef); rates_n = 0; if (have_higher_than_11mbit) *have_higher_than_11mbit = false; for (i = 0; i < sband->n_bitrates; i++) { if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; if (sband->bitrates[i].bitrate > 110 && have_higher_than_11mbit) *have_higher_than_11mbit = true; rates |= BIT(i); rates_n++; } *pos++ = WLAN_EID_SUPP_RATES; *pos++ = min_t(int, 8, rates_n); for (ri = 0; ri < sband->n_bitrates; ri++) { int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, 5 * (1 << shift)); u8 basic = 0; if (!(rates & BIT(ri))) continue; if (basic_rates & BIT(ri)) basic = 0x80; *pos++ = basic | (u8) rate; if (++rates_added == 8) { ri++; /* continue at next rate for EXT_SUPP_RATES */ break; } } if (sband->band == IEEE80211_BAND_2GHZ) { *pos++ = WLAN_EID_DS_PARAMS; *pos++ = 1; *pos++ = ieee80211_frequency_to_channel( chandef->chan->center_freq); } *pos++ = WLAN_EID_IBSS_PARAMS; *pos++ = 2; /* FIX: set ATIM window based on scan results */ *pos++ = 0; *pos++ = 0; if (csa_settings) { *pos++ = WLAN_EID_CHANNEL_SWITCH; *pos++ = 3; *pos++ = csa_settings->block_tx ? 1 : 0; *pos++ = ieee80211_frequency_to_channel( csa_settings->chandef.chan->center_freq); presp->csa_counter_offsets[0] = (pos - presp->head); *pos++ = csa_settings->count; } /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */ if (rates_n > 8) { *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = rates_n - 8; for (; ri < sband->n_bitrates; ri++) { int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, 5 * (1 << shift)); u8 basic = 0; if (!(rates & BIT(ri))) continue; if (basic_rates & BIT(ri)) basic = 0x80; *pos++ = basic | (u8) rate; } } if (ifibss->ie_len) { memcpy(pos, ifibss->ie, ifibss->ie_len); pos += ifibss->ie_len; } /* add HT capability and information IEs */ if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT && chandef->width != NL80211_CHAN_WIDTH_5 && chandef->width != NL80211_CHAN_WIDTH_10 && sband->ht_cap.ht_supported) { struct ieee80211_sta_ht_cap ht_cap; memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); ieee80211_apply_htcap_overrides(sdata, &ht_cap); pos = ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); /* * Note: According to 802.11n-2009 9.13.3.1, HT Protection * field and RIFS Mode are reserved in IBSS mode, therefore * keep them at 0 */ pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, chandef, 0); } if (local->hw.queues >= IEEE80211_NUM_ACS) pos = ieee80211_add_wmm_info_ie(pos, 0); /* U-APSD not in use */ presp->head_len = pos - presp->head; if (WARN_ON(presp->head_len > frame_len)) goto error; return presp; error: kfree(presp); return NULL; } static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const int beacon_int, struct cfg80211_chan_def *req_chandef, const u32 basic_rates, const u16 capability, u64 tsf, bool creator) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct ieee80211_mgmt *mgmt; struct cfg80211_bss *bss; u32 bss_change; struct cfg80211_chan_def chandef; struct ieee80211_channel *chan; struct beacon_data *presp; enum nl80211_bss_scan_width scan_width; bool have_higher_than_11mbit; bool radar_required; int err; sdata_assert_lock(sdata); /* Reset own TSF to allow time synchronization work. */ drv_reset_tsf(local, sdata); if (!ether_addr_equal(ifibss->bssid, bssid)) sta_info_flush(sdata); /* if merging, indicate to driver that we leave the old IBSS */ if (sdata->vif.bss_conf.ibss_joined) { sdata->vif.bss_conf.ibss_joined = false; sdata->vif.bss_conf.ibss_creator = false; sdata->vif.bss_conf.enable_beacon = false; netif_carrier_off(sdata->dev); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS | BSS_CHANGED_BEACON_ENABLED); drv_leave_ibss(local, sdata); } presp = rcu_dereference_protected(ifibss->presp, lockdep_is_held(&sdata->wdev.mtx)); RCU_INIT_POINTER(ifibss->presp, NULL); if (presp) kfree_rcu(presp, rcu_head); sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; /* make a copy of the chandef, it could be modified below. */ chandef = *req_chandef; chan = chandef.chan; if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef, NL80211_IFTYPE_ADHOC)) { if (chandef.width == NL80211_CHAN_WIDTH_5 || chandef.width == NL80211_CHAN_WIDTH_10 || chandef.width == NL80211_CHAN_WIDTH_20_NOHT || chandef.width == NL80211_CHAN_WIDTH_20) { sdata_info(sdata, "Failed to join IBSS, beacons forbidden\n"); return; } chandef.width = NL80211_CHAN_WIDTH_20; chandef.center_freq1 = chan->center_freq; /* check again for downgraded chandef */ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef, NL80211_IFTYPE_ADHOC)) { sdata_info(sdata, "Failed to join IBSS, beacons forbidden\n"); return; } } err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, &chandef, NL80211_IFTYPE_ADHOC); if (err < 0) { sdata_info(sdata, "Failed to join IBSS, invalid chandef\n"); return; } if (err > 0 && !ifibss->userspace_handles_dfs) { sdata_info(sdata, "Failed to join IBSS, DFS channel without control program\n"); return; } radar_required = err; mutex_lock(&local->mtx); if (ieee80211_vif_use_channel(sdata, &chandef, ifibss->fixed_channel ? IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE)) { sdata_info(sdata, "Failed to join IBSS, no channel context\n"); mutex_unlock(&local->mtx); return; } sdata->radar_required = radar_required; mutex_unlock(&local->mtx); memcpy(ifibss->bssid, bssid, ETH_ALEN); presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates, capability, tsf, &chandef, &have_higher_than_11mbit, NULL); if (!presp) return; rcu_assign_pointer(ifibss->presp, presp); mgmt = (void *)presp->head; sdata->vif.bss_conf.enable_beacon = true; sdata->vif.bss_conf.beacon_int = beacon_int; sdata->vif.bss_conf.basic_rates = basic_rates; sdata->vif.bss_conf.ssid_len = ifibss->ssid_len; memcpy(sdata->vif.bss_conf.ssid, ifibss->ssid, ifibss->ssid_len); bss_change = BSS_CHANGED_BEACON_INT; bss_change |= ieee80211_reset_erp_info(sdata); bss_change |= BSS_CHANGED_BSSID; bss_change |= BSS_CHANGED_BEACON; bss_change |= BSS_CHANGED_BEACON_ENABLED; bss_change |= BSS_CHANGED_BASIC_RATES; bss_change |= BSS_CHANGED_HT; bss_change |= BSS_CHANGED_IBSS; bss_change |= BSS_CHANGED_SSID; /* * In 5 GHz/802.11a, we can always use short slot time. * (IEEE 802.11-2012 18.3.8.7) * * In 2.4GHz, we must always use long slots in IBSS for compatibility * reasons. * (IEEE 802.11-2012 19.4.5) * * HT follows these specifications (IEEE 802.11-2012 20.3.18) */ sdata->vif.bss_conf.use_short_slot = chan->band == IEEE80211_BAND_5GHZ; bss_change |= BSS_CHANGED_ERP_SLOT; /* cf. IEEE 802.11 9.2.12 */ if (chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit) sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; else sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; ieee80211_set_wmm_default(sdata, true); sdata->vif.bss_conf.ibss_joined = true; sdata->vif.bss_conf.ibss_creator = creator; err = drv_join_ibss(local, sdata); if (err) { sdata->vif.bss_conf.ibss_joined = false; sdata->vif.bss_conf.ibss_creator = false; sdata->vif.bss_conf.enable_beacon = false; sdata->vif.bss_conf.ssid_len = 0; RCU_INIT_POINTER(ifibss->presp, NULL); kfree_rcu(presp, rcu_head); mutex_lock(&local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&local->mtx); sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n", err); return; } ieee80211_bss_info_change_notify(sdata, bss_change); ifibss->state = IEEE80211_IBSS_MLME_JOINED; mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); scan_width = cfg80211_chandef_to_scan_width(&chandef); bss = cfg80211_inform_bss_width_frame(local->hw.wiphy, chan, scan_width, mgmt, presp->head_len, 0, GFP_KERNEL); cfg80211_put_bss(local->hw.wiphy, bss); netif_carrier_on(sdata->dev); cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL); } static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_bss *bss) { struct cfg80211_bss *cbss = container_of((void *)bss, struct cfg80211_bss, priv); struct ieee80211_supported_band *sband; struct cfg80211_chan_def chandef; u32 basic_rates; int i, j; u16 beacon_int = cbss->beacon_interval; const struct cfg80211_bss_ies *ies; enum nl80211_channel_type chan_type; u64 tsf; u32 rate_flags; int shift; sdata_assert_lock(sdata); if (beacon_int < 10) beacon_int = 10; switch (sdata->u.ibss.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef); cfg80211_chandef_create(&chandef, cbss->channel, chan_type); break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: cfg80211_chandef_create(&chandef, cbss->channel, NL80211_CHAN_WIDTH_20_NOHT); chandef.width = sdata->u.ibss.chandef.width; break; default: /* fall back to 20 MHz for unsupported modes */ cfg80211_chandef_create(&chandef, cbss->channel, NL80211_CHAN_WIDTH_20_NOHT); break; } sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; rate_flags = ieee80211_chandef_rate_flags(&sdata->u.ibss.chandef); shift = ieee80211_vif_get_shift(&sdata->vif); basic_rates = 0; for (i = 0; i < bss->supp_rates_len; i++) { int rate = bss->supp_rates[i] & 0x7f; bool is_basic = !!(bss->supp_rates[i] & 0x80); for (j = 0; j < sband->n_bitrates; j++) { int brate; if ((rate_flags & sband->bitrates[j].flags) != rate_flags) continue; brate = DIV_ROUND_UP(sband->bitrates[j].bitrate, 5 * (1 << shift)); if (brate == rate) { if (is_basic) basic_rates |= BIT(j); break; } } } rcu_read_lock(); ies = rcu_dereference(cbss->ies); tsf = ies->tsf; rcu_read_unlock(); __ieee80211_sta_join_ibss(sdata, cbss->bssid, beacon_int, &chandef, basic_rates, cbss->capability, tsf, false); } int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct beacon_data *presp, *old_presp; struct cfg80211_bss *cbss; const struct cfg80211_bss_ies *ies; u16 capability; u64 tsf; int ret = 0; sdata_assert_lock(sdata); capability = WLAN_CAPABILITY_IBSS; if (ifibss->privacy) capability |= WLAN_CAPABILITY_PRIVACY; cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, ifibss->ssid_len, WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY, capability); if (WARN_ON(!cbss)) { ret = -EINVAL; goto out; } rcu_read_lock(); ies = rcu_dereference(cbss->ies); tsf = ies->tsf; rcu_read_unlock(); cfg80211_put_bss(sdata->local->hw.wiphy, cbss); old_presp = rcu_dereference_protected(ifibss->presp, lockdep_is_held(&sdata->wdev.mtx)); presp = ieee80211_ibss_build_presp(sdata, sdata->vif.bss_conf.beacon_int, sdata->vif.bss_conf.basic_rates, capability, tsf, &ifibss->chandef, NULL, csa_settings); if (!presp) { ret = -ENOMEM; goto out; } rcu_assign_pointer(ifibss->presp, presp); if (old_presp) kfree_rcu(old_presp, rcu_head); return BSS_CHANGED_BEACON; out: return ret; } int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct cfg80211_bss *cbss; int err, changed = 0; u16 capability; sdata_assert_lock(sdata); /* update cfg80211 bss information with the new channel */ if (!is_zero_ether_addr(ifibss->bssid)) { capability = WLAN_CAPABILITY_IBSS; if (ifibss->privacy) capability |= WLAN_CAPABILITY_PRIVACY; cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, ifibss->ssid_len, WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY, capability); /* XXX: should not really modify cfg80211 data */ if (cbss) { cbss->channel = sdata->csa_chandef.chan; cfg80211_put_bss(sdata->local->hw.wiphy, cbss); } } ifibss->chandef = sdata->csa_chandef; /* generate the beacon */ err = ieee80211_ibss_csa_beacon(sdata, NULL); if (err < 0) return err; changed |= err; return changed; } void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; cancel_work_sync(&ifibss->csa_connection_drop_work); } static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta) __acquires(RCU) { struct ieee80211_sub_if_data *sdata = sta->sdata; u8 addr[ETH_ALEN]; memcpy(addr, sta->sta.addr, ETH_ALEN); ibss_dbg(sdata, "Adding new IBSS station %pM\n", addr); sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); /* authorize the station only if the network is not RSN protected. If * not wait for the userspace to authorize it */ if (!sta->sdata->u.ibss.control_port) sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED); rate_control_rate_init(sta); /* If it fails, maybe we raced another insertion? */ if (sta_info_insert_rcu(sta)) return sta_info_get(sdata, addr); return sta; } static struct sta_info * ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates) __acquires(RCU) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; enum nl80211_bss_scan_width scan_width; int band; /* * XXX: Consider removing the least recently used entry and * allow new one to be added. */ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n", sdata->name, addr); rcu_read_lock(); return NULL; } if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) { rcu_read_lock(); return NULL; } if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) { rcu_read_lock(); return NULL; } rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) return NULL; band = chanctx_conf->def.chan->band; scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def); rcu_read_unlock(); sta = sta_info_alloc(sdata, addr, GFP_KERNEL); if (!sta) { rcu_read_lock(); return NULL; } sta->last_rx = jiffies; /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband, scan_width); return ieee80211_ibss_finish_sta(sta); } static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; int active = 0; struct sta_info *sta; sdata_assert_lock(sdata); rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sta->sdata == sdata && time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL, jiffies)) { active++; break; } } rcu_read_unlock(); return active; } static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct cfg80211_bss *cbss; struct beacon_data *presp; struct sta_info *sta; u16 capability; if (!is_zero_ether_addr(ifibss->bssid)) { capability = WLAN_CAPABILITY_IBSS; if (ifibss->privacy) capability |= WLAN_CAPABILITY_PRIVACY; cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, ifibss->ssid_len, WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY, capability); if (cbss) { cfg80211_unlink_bss(local->hw.wiphy, cbss); cfg80211_put_bss(sdata->local->hw.wiphy, cbss); } } ifibss->state = IEEE80211_IBSS_MLME_SEARCH; sta_info_flush(sdata); spin_lock_bh(&ifibss->incomplete_lock); while (!list_empty(&ifibss->incomplete_stations)) { sta = list_first_entry(&ifibss->incomplete_stations, struct sta_info, list); list_del(&sta->list); spin_unlock_bh(&ifibss->incomplete_lock); sta_info_free(local, sta); spin_lock_bh(&ifibss->incomplete_lock); } spin_unlock_bh(&ifibss->incomplete_lock); netif_carrier_off(sdata->dev); sdata->vif.bss_conf.ibss_joined = false; sdata->vif.bss_conf.ibss_creator = false; sdata->vif.bss_conf.enable_beacon = false; sdata->vif.bss_conf.ssid_len = 0; /* remove beacon */ presp = rcu_dereference_protected(ifibss->presp, lockdep_is_held(&sdata->wdev.mtx)); RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); if (presp) kfree_rcu(presp, rcu_head); clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_IBSS); drv_leave_ibss(local, sdata); mutex_lock(&local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&local->mtx); } static void ieee80211_csa_connection_drop_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.ibss.csa_connection_drop_work); sdata_lock(sdata); ieee80211_ibss_disconnect(sdata); synchronize_rcu(); skb_queue_purge(&sdata->skb_queue); /* trigger a scan to find another IBSS network to join */ ieee80211_queue_work(&sdata->local->hw, &sdata->work); sdata_unlock(sdata); } static void ieee80211_ibss_csa_mark_radar(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; int err; /* if the current channel is a DFS channel, mark the channel as * unavailable. */ err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, &ifibss->chandef, NL80211_IFTYPE_ADHOC); if (err > 0) cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef, GFP_ATOMIC); } static bool ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, bool beacon) { struct cfg80211_csa_settings params; struct ieee80211_csa_ie csa_ie; struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; enum nl80211_channel_type ch_type; int err; u32 sta_flags; sdata_assert_lock(sdata); sta_flags = IEEE80211_STA_DISABLE_VHT; switch (ifibss->chandef.width) { case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: sta_flags |= IEEE80211_STA_DISABLE_HT; /* fall through */ case NL80211_CHAN_WIDTH_20: sta_flags |= IEEE80211_STA_DISABLE_40MHZ; break; default: break; } memset(&params, 0, sizeof(params)); memset(&csa_ie, 0, sizeof(csa_ie)); err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, ifibss->chandef.chan->band, sta_flags, ifibss->bssid, &csa_ie); /* can't switch to destination channel, fail */ if (err < 0) goto disconnect; /* did not contain a CSA */ if (err) return false; /* channel switch is not supported, disconnect */ if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) goto disconnect; params.count = csa_ie.count; params.chandef = csa_ie.chandef; switch (ifibss->chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: /* keep our current HT mode (HT20/HT40+/HT40-), even if * another mode has been announced. The mode is not adopted * within the beacon while doing CSA and we should therefore * keep the mode which we announce. */ ch_type = cfg80211_get_chandef_type(&ifibss->chandef); cfg80211_chandef_create(&params.chandef, params.chandef.chan, ch_type); break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: if (params.chandef.width != ifibss->chandef.width) { sdata_info(sdata, "IBSS %pM received channel switch from incompatible channel width (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", ifibss->bssid, params.chandef.chan->center_freq, params.chandef.width, params.chandef.center_freq1, params.chandef.center_freq2); goto disconnect; } break; default: /* should not happen, sta_flags should prevent VHT modes. */ WARN_ON(1); goto disconnect; } if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, &params.chandef, NL80211_IFTYPE_ADHOC)) { sdata_info(sdata, "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", ifibss->bssid, params.chandef.chan->center_freq, params.chandef.width, params.chandef.center_freq1, params.chandef.center_freq2); goto disconnect; } err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, &params.chandef, NL80211_IFTYPE_ADHOC); if (err < 0) goto disconnect; if (err > 0 && !ifibss->userspace_handles_dfs) { /* IBSS-DFS only allowed with a control program */ goto disconnect; } params.radar_required = err; if (cfg80211_chandef_identical(&params.chandef, &sdata->vif.bss_conf.chandef)) { ibss_dbg(sdata, "received csa with an identical chandef, ignoring\n"); return true; } /* all checks done, now perform the channel switch. */ ibss_dbg(sdata, "received channel switch announcement to go to channel %d MHz\n", params.chandef.chan->center_freq); params.block_tx = !!csa_ie.mode; if (ieee80211_channel_switch(sdata->local->hw.wiphy, sdata->dev, &params)) goto disconnect; ieee80211_ibss_csa_mark_radar(sdata); return true; disconnect: ibss_dbg(sdata, "Can't handle channel switch, disconnect\n"); ieee80211_queue_work(&sdata->local->hw, &ifibss->csa_connection_drop_work); ieee80211_ibss_csa_mark_radar(sdata); return true; } static void ieee80211_rx_mgmt_spectrum_mgmt(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status, struct ieee802_11_elems *elems) { int required_len; if (len < IEEE80211_MIN_ACTION_SIZE + 1) return; /* CSA is the only action we handle for now */ if (mgmt->u.action.u.measurement.action_code != WLAN_ACTION_SPCT_CHL_SWITCH) return; required_len = IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.chan_switch); if (len < required_len) return; if (!sdata->vif.csa_active) ieee80211_ibss_process_chanswitch(sdata, elems, false); } static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { u16 reason = le16_to_cpu(mgmt->u.deauth.reason_code); if (len < IEEE80211_DEAUTH_FRAME_LEN) return; ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n", mgmt->sa, mgmt->da, mgmt->bssid, reason); sta_info_destroy_addr(sdata, mgmt->sa); } static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { u16 auth_alg, auth_transaction; sdata_assert_lock(sdata); if (len < 24 + 6) return; auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) return; /* * IEEE 802.11 standard does not require authentication in IBSS * networks and most implementations do not seem to use it. * However, try to reply to authentication attempts if someone * has actually implemented this. */ ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0, mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0, 0); } static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status, struct ieee802_11_elems *elems) { struct ieee80211_local *local = sdata->local; struct cfg80211_bss *cbss; struct ieee80211_bss *bss; struct sta_info *sta; struct ieee80211_channel *channel; u64 beacon_timestamp, rx_timestamp; u32 supp_rates = 0; enum ieee80211_band band = rx_status->band; enum nl80211_bss_scan_width scan_width; struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; bool rates_updated = false; channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq); if (!channel) return; if (sdata->vif.type == NL80211_IFTYPE_ADHOC && ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) { rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (elems->supp_rates) { supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL); if (sta) { u32 prev_rates; prev_rates = sta->sta.supp_rates[band]; /* make sure mandatory rates are always added */ scan_width = NL80211_BSS_CHAN_WIDTH_20; if (rx_status->flag & RX_FLAG_5MHZ) scan_width = NL80211_BSS_CHAN_WIDTH_5; if (rx_status->flag & RX_FLAG_10MHZ) scan_width = NL80211_BSS_CHAN_WIDTH_10; sta->sta.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband, scan_width); if (sta->sta.supp_rates[band] != prev_rates) { ibss_dbg(sdata, "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n", sta->sta.addr, prev_rates, sta->sta.supp_rates[band]); rates_updated = true; } } else { rcu_read_unlock(); sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); } } if (sta && elems->wmm_info) set_sta_flag(sta, WLAN_STA_WME); if (sta && elems->ht_operation && elems->ht_cap_elem && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_5 && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_10) { /* we both use HT */ struct ieee80211_ht_cap htcap_ie; struct cfg80211_chan_def chandef; ieee80211_ht_oper_to_chandef(channel, elems->ht_operation, &chandef); memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie)); /* * fall back to HT20 if we don't use or use * the other extension channel */ if (chandef.center_freq1 != sdata->u.ibss.chandef.center_freq1) htcap_ie.cap_info &= cpu_to_le16(~IEEE80211_HT_CAP_SUP_WIDTH_20_40); rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap( sdata, sband, &htcap_ie, sta); } if (sta && rates_updated) { drv_sta_rc_update(local, sdata, &sta->sta, IEEE80211_RC_SUPP_RATES_CHANGED); rate_control_rate_init(sta); } rcu_read_unlock(); } bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, channel); if (!bss) return; cbss = container_of((void *)bss, struct cfg80211_bss, priv); /* same for beacon and probe response */ beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); /* check if we need to merge IBSS */ /* not an IBSS */ if (!(cbss->capability & WLAN_CAPABILITY_IBSS)) goto put_bss; /* different channel */ if (sdata->u.ibss.fixed_channel && sdata->u.ibss.chandef.chan != cbss->channel) goto put_bss; /* different SSID */ if (elems->ssid_len != sdata->u.ibss.ssid_len || memcmp(elems->ssid, sdata->u.ibss.ssid, sdata->u.ibss.ssid_len)) goto put_bss; /* process channel switch */ if (sdata->vif.csa_active || ieee80211_ibss_process_chanswitch(sdata, elems, true)) goto put_bss; /* same BSSID */ if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid)) goto put_bss; /* we use a fixed BSSID */ if (sdata->u.ibss.fixed_bssid) goto put_bss; if (ieee80211_have_rx_timestamp(rx_status)) { /* time when timestamp field was received */ rx_timestamp = ieee80211_calculate_rx_timestamp(local, rx_status, len + FCS_LEN, 24); } else { /* * second best option: get current TSF * (will return -1 if not supported) */ rx_timestamp = drv_get_tsf(local, sdata); } ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", mgmt->sa, mgmt->bssid, (unsigned long long)rx_timestamp, (unsigned long long)beacon_timestamp, (unsigned long long)(rx_timestamp - beacon_timestamp), jiffies); if (beacon_timestamp > rx_timestamp) { ibss_dbg(sdata, "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n", mgmt->bssid); ieee80211_sta_join_ibss(sdata, bss); supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL); ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); rcu_read_unlock(); } put_bss: ieee80211_rx_bss_put(local, bss); } void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; enum nl80211_bss_scan_width scan_width; int band; /* * XXX: Consider removing the least recently used entry and * allow new one to be added. */ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n", sdata->name, addr); return; } if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) return; if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) return; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) { rcu_read_unlock(); return; } band = chanctx_conf->def.chan->band; scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def); rcu_read_unlock(); sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); if (!sta) return; sta->last_rx = jiffies; /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband, scan_width); spin_lock(&ifibss->incomplete_lock); list_add(&sta->list, &ifibss->incomplete_stations); spin_unlock(&ifibss->incomplete_lock); ieee80211_queue_work(&local->hw, &sdata->work); } static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct sta_info *sta, *tmp; unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT; unsigned long exp_rsn_time = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT; mutex_lock(&local->sta_mtx); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { if (sdata != sta->sdata) continue; if (time_after(jiffies, sta->last_rx + exp_time) || (time_after(jiffies, sta->last_rx + exp_rsn_time) && sta->sta_state != IEEE80211_STA_AUTHORIZED)) { sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n", sta->sta_state != IEEE80211_STA_AUTHORIZED ? "not authorized " : "", sta->sta.addr); WARN_ON(__sta_info_destroy(sta)); } } mutex_unlock(&local->sta_mtx); } /* * This function is called with state == IEEE80211_IBSS_MLME_JOINED */ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; enum nl80211_bss_scan_width scan_width; sdata_assert_lock(sdata); mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); ieee80211_ibss_sta_expire(sdata); if (time_before(jiffies, ifibss->last_scan_completed + IEEE80211_IBSS_MERGE_INTERVAL)) return; if (ieee80211_sta_active_ibss(sdata)) return; if (ifibss->fixed_channel) return; sdata_info(sdata, "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n"); scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef); ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, NULL, scan_width); } static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; u8 bssid[ETH_ALEN]; u16 capability; int i; sdata_assert_lock(sdata); if (ifibss->fixed_bssid) { memcpy(bssid, ifibss->bssid, ETH_ALEN); } else { /* Generate random, not broadcast, locally administered BSSID. Mix in * own MAC address to make sure that devices that do not have proper * random number generator get different BSSID. */ get_random_bytes(bssid, ETH_ALEN); for (i = 0; i < ETH_ALEN; i++) bssid[i] ^= sdata->vif.addr[i]; bssid[0] &= ~0x01; bssid[0] |= 0x02; } sdata_info(sdata, "Creating new IBSS network, BSSID %pM\n", bssid); capability = WLAN_CAPABILITY_IBSS; if (ifibss->privacy) capability |= WLAN_CAPABILITY_PRIVACY; else sdata->drop_unencrypted = 0; __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, &ifibss->chandef, ifibss->basic_rates, capability, 0, true); } /* * This function is called with state == IEEE80211_IBSS_MLME_SEARCH */ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct cfg80211_bss *cbss; struct ieee80211_channel *chan = NULL; const u8 *bssid = NULL; enum nl80211_bss_scan_width scan_width; int active_ibss; u16 capability; sdata_assert_lock(sdata); active_ibss = ieee80211_sta_active_ibss(sdata); ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss); if (active_ibss) return; capability = WLAN_CAPABILITY_IBSS; if (ifibss->privacy) capability |= WLAN_CAPABILITY_PRIVACY; if (ifibss->fixed_bssid) bssid = ifibss->bssid; if (ifibss->fixed_channel) chan = ifibss->chandef.chan; if (!is_zero_ether_addr(ifibss->bssid)) bssid = ifibss->bssid; cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid, ifibss->ssid, ifibss->ssid_len, WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY, capability); if (cbss) { struct ieee80211_bss *bss; bss = (void *)cbss->priv; ibss_dbg(sdata, "sta_find_ibss: selected %pM current %pM\n", cbss->bssid, ifibss->bssid); sdata_info(sdata, "Selected IBSS BSSID %pM based on configured SSID\n", cbss->bssid); ieee80211_sta_join_ibss(sdata, bss); ieee80211_rx_bss_put(local, bss); return; } /* if a fixed bssid and a fixed freq have been provided create the IBSS * directly and do not waste time scanning */ if (ifibss->fixed_bssid && ifibss->fixed_channel) { sdata_info(sdata, "Created IBSS using preconfigured BSSID %pM\n", bssid); ieee80211_sta_create_ibss(sdata); return; } ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n"); /* Selected IBSS not found in current scan results - try to scan */ if (time_after(jiffies, ifibss->last_scan_completed + IEEE80211_SCAN_INTERVAL)) { sdata_info(sdata, "Trigger new scan to find an IBSS to join\n"); scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef); ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, chan, scan_width); } else { int interval = IEEE80211_SCAN_INTERVAL; if (time_after(jiffies, ifibss->ibss_join_req + IEEE80211_IBSS_JOIN_TIMEOUT)) ieee80211_sta_create_ibss(sdata); mod_timer(&ifibss->timer, round_jiffies(jiffies + interval)); } } static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, struct sk_buff *req) { struct ieee80211_mgmt *mgmt = (void *)req->data; struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; int tx_last_beacon, len = req->len; struct sk_buff *skb; struct beacon_data *presp; u8 *pos, *end; sdata_assert_lock(sdata); presp = rcu_dereference_protected(ifibss->presp, lockdep_is_held(&sdata->wdev.mtx)); if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || len < 24 + 2 || !presp) return; tx_last_beacon = drv_tx_last_beacon(local); ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n", mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon); if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) return; if (!ether_addr_equal(mgmt->bssid, ifibss->bssid) && !is_broadcast_ether_addr(mgmt->bssid)) return; end = ((u8 *) mgmt) + len; pos = mgmt->u.probe_req.variable; if (pos[0] != WLAN_EID_SSID || pos + 2 + pos[1] > end) { ibss_dbg(sdata, "Invalid SSID IE in ProbeReq from %pM\n", mgmt->sa); return; } if (pos[1] != 0 && (pos[1] != ifibss->ssid_len || memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { /* Ignore ProbeReq for foreign SSID */ return; } /* Reply with ProbeResp */ skb = dev_alloc_skb(local->tx_headroom + presp->head_len); if (!skb) return; skb_reserve(skb, local->tx_headroom); memcpy(skb_put(skb, presp->head_len), presp->head, presp->head_len); memcpy(((struct ieee80211_mgmt *) skb->data)->da, mgmt->sa, ETH_ALEN); ibss_dbg(sdata, "Sending ProbeResp to %pM\n", mgmt->sa); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; /* avoid excessive retries for probe request to wildcard SSIDs */ if (pos[1] == 0) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_NO_ACK; ieee80211_tx_skb(sdata, skb); } static void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { size_t baselen; struct ieee802_11_elems elems; BUILD_BUG_ON(offsetof(typeof(mgmt->u.probe_resp), variable) != offsetof(typeof(mgmt->u.beacon), variable)); /* * either beacon or probe_resp but the variable field is at the * same offset */ baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; if (baselen > len) return; ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, false, &elems); ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); } void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; u16 fc; struct ieee802_11_elems elems; int ies_len; rx_status = IEEE80211_SKB_RXCB(skb); mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); sdata_lock(sdata); if (!sdata->u.ibss.ssid_len) goto mgmt_out; /* not ready to merge yet */ switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_PROBE_REQ: ieee80211_rx_mgmt_probe_req(sdata, skb); break; case IEEE80211_STYPE_PROBE_RESP: case IEEE80211_STYPE_BEACON: ieee80211_rx_mgmt_probe_beacon(sdata, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_AUTH: ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DEAUTH: ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: switch (mgmt->u.action.category) { case WLAN_CATEGORY_SPECTRUM_MGMT: ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); if (ies_len < 0) break; ieee802_11_parse_elems( mgmt->u.action.u.chan_switch.variable, ies_len, true, &elems); if (elems.parse_error) break; ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, skb->len, rx_status, &elems); break; } } mgmt_out: sdata_unlock(sdata); } void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct sta_info *sta; sdata_lock(sdata); /* * Work could be scheduled after scan or similar * when we aren't even joined (or trying) with a * network. */ if (!ifibss->ssid_len) goto out; spin_lock_bh(&ifibss->incomplete_lock); while (!list_empty(&ifibss->incomplete_stations)) { sta = list_first_entry(&ifibss->incomplete_stations, struct sta_info, list); list_del(&sta->list); spin_unlock_bh(&ifibss->incomplete_lock); ieee80211_ibss_finish_sta(sta); rcu_read_unlock(); spin_lock_bh(&ifibss->incomplete_lock); } spin_unlock_bh(&ifibss->incomplete_lock); switch (ifibss->state) { case IEEE80211_IBSS_MLME_SEARCH: ieee80211_sta_find_ibss(sdata); break; case IEEE80211_IBSS_MLME_JOINED: ieee80211_sta_merge_ibss(sdata); break; default: WARN_ON(1); break; } out: sdata_unlock(sdata); } static void ieee80211_ibss_timer(unsigned long data) { struct ieee80211_sub_if_data *sdata = (struct ieee80211_sub_if_data *) data; ieee80211_queue_work(&sdata->local->hw, &sdata->work); } void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; setup_timer(&ifibss->timer, ieee80211_ibss_timer, (unsigned long) sdata); INIT_LIST_HEAD(&ifibss->incomplete_stations); spin_lock_init(&ifibss->incomplete_lock); INIT_WORK(&ifibss->csa_connection_drop_work, ieee80211_csa_connection_drop_work); } /* scan finished notification */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type != NL80211_IFTYPE_ADHOC) continue; sdata->u.ibss.last_scan_completed = jiffies; ieee80211_queue_work(&local->hw, &sdata->work); } mutex_unlock(&local->iflist_mtx); } int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, struct cfg80211_ibss_params *params) { u32 changed = 0; u32 rate_flags; struct ieee80211_supported_band *sband; enum ieee80211_chanctx_mode chanmode; struct ieee80211_local *local = sdata->local; int radar_detect_width = 0; int i; int ret; ret = cfg80211_chandef_dfs_required(local->hw.wiphy, &params->chandef, sdata->wdev.iftype); if (ret < 0) return ret; if (ret > 0) { if (!params->userspace_handles_dfs) return -EINVAL; radar_detect_width = BIT(params->chandef.width); } chanmode = (params->channel_fixed && !ret) ? IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE; mutex_lock(&local->chanctx_mtx); ret = ieee80211_check_combinations(sdata, &params->chandef, chanmode, radar_detect_width); mutex_unlock(&local->chanctx_mtx); if (ret < 0) return ret; if (params->bssid) { memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); sdata->u.ibss.fixed_bssid = true; } else sdata->u.ibss.fixed_bssid = false; sdata->u.ibss.privacy = params->privacy; sdata->u.ibss.control_port = params->control_port; sdata->u.ibss.userspace_handles_dfs = params->userspace_handles_dfs; sdata->u.ibss.basic_rates = params->basic_rates; sdata->u.ibss.last_scan_completed = jiffies; /* fix basic_rates if channel does not support these rates */ rate_flags = ieee80211_chandef_rate_flags(&params->chandef); sband = local->hw.wiphy->bands[params->chandef.chan->band]; for (i = 0; i < sband->n_bitrates; i++) { if ((rate_flags & sband->bitrates[i].flags) != rate_flags) sdata->u.ibss.basic_rates &= ~BIT(i); } memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate, sizeof(params->mcast_rate)); sdata->vif.bss_conf.beacon_int = params->beacon_interval; sdata->u.ibss.chandef = params->chandef; sdata->u.ibss.fixed_channel = params->channel_fixed; if (params->ie) { sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len, GFP_KERNEL); if (sdata->u.ibss.ie) sdata->u.ibss.ie_len = params->ie_len; } sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; sdata->u.ibss.ibss_join_req = jiffies; memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len); sdata->u.ibss.ssid_len = params->ssid_len; memcpy(&sdata->u.ibss.ht_capa, &params->ht_capa, sizeof(sdata->u.ibss.ht_capa)); memcpy(&sdata->u.ibss.ht_capa_mask, &params->ht_capa_mask, sizeof(sdata->u.ibss.ht_capa_mask)); /* * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is * reserved, but an HT STA shall protect HT transmissions as though * the HT Protection field were set to non-HT mixed mode. * * In an IBSS, the RIFS Mode field of the HT Operation element is * also reserved, but an HT STA shall operate as though this field * were set to 1. */ sdata->vif.bss_conf.ht_operation_mode |= IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | IEEE80211_HT_PARAM_RIFS_MODE; changed |= BSS_CHANGED_HT; ieee80211_bss_info_change_notify(sdata, changed); sdata->smps_mode = IEEE80211_SMPS_OFF; sdata->needed_rx_chains = local->rx_chains; ieee80211_queue_work(&local->hw, &sdata->work); return 0; } int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; ieee80211_ibss_disconnect(sdata); ifibss->ssid_len = 0; memset(ifibss->bssid, 0, ETH_ALEN); /* remove beacon */ kfree(sdata->u.ibss.ie); /* on the next join, re-program HT parameters */ memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa)); memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask)); synchronize_rcu(); skb_queue_purge(&sdata->skb_queue); del_timer_sync(&sdata->u.ibss.timer); return 0; }
gpl-2.0
bemolxd/android_kernel_x2xtreme-test
drivers/misc/mediatek/lcm/s6d0170/s6d0170.c
80
8251
#include <linux/string.h> #include "lcm_drv.h" // --------------------------------------------------------------------------- // Local Constants // --------------------------------------------------------------------------- #define FRAME_WIDTH (240) #define FRAME_HEIGHT (320) #define LCM_ID (0x0170) // --------------------------------------------------------------------------- // Local Variables // --------------------------------------------------------------------------- static LCM_UTIL_FUNCS lcm_util = {0}; #define SET_RESET_PIN(v) (lcm_util.set_reset_pin((v))) #define UDELAY(n) (lcm_util.udelay(n)) #define MDELAY(n) (lcm_util.mdelay(n)) // --------------------------------------------------------------------------- // Local Functions // --------------------------------------------------------------------------- static __inline unsigned int to_18_bit_inst_format(unsigned int val) { return ((val & 0xFF00) << 2) | ((val & 0x00FF) << 1); } static __inline unsigned int to_16_bit_inst_format(unsigned int val) { return (((val >> 2) & 0xFF00) | ((val >> 1) & 0x00FF)); } static __inline void send_ctrl_cmd(unsigned int cmd) { lcm_util.send_cmd(to_18_bit_inst_format(cmd)); } static __inline void send_data_cmd(unsigned int data) { lcm_util.send_data(to_18_bit_inst_format(data)); } static __inline unsigned int read_data_cmd(void) { unsigned int data; data = to_16_bit_inst_format(lcm_util.read_data()); return data; } static __inline void set_lcm_register(unsigned int regIndex, unsigned int regData) { send_ctrl_cmd(regIndex); send_data_cmd(regData); } static void init_lcm_registers(void) { send_ctrl_cmd(0x2300); /* Power Supply Setting */ set_lcm_register(0x11, 0x0000); set_lcm_register(0x12, 0x0000); set_lcm_register(0x13, 0x0000); set_lcm_register(0x14, 0x0000); UDELAY(10); set_lcm_register(0x11, 0x0010); set_lcm_register(0x12, 0x3222); set_lcm_register(0x13, 0x204E); set_lcm_register(0x14, 0x0248); set_lcm_register(0x10, 0x0700); UDELAY(10); set_lcm_register(0x11, 0x0112); UDELAY(10); set_lcm_register(0x11, 0x0312); UDELAY(10); set_lcm_register(0x11, 0x0712); UDELAY(10); set_lcm_register(0x11, 0x0F1B); UDELAY(10); set_lcm_register(0x11, 0x0F3B); UDELAY(30); /* Display Contron Register Setup */ set_lcm_register(0x01, 0x0136); set_lcm_register(0x02, 0x0000); set_lcm_register(0x03, 0x9000); set_lcm_register(0x07, 0x0104); set_lcm_register(0x08, 0x00E2); set_lcm_register(0x0B, 0x1100); set_lcm_register(0x0C, 0x0000); set_lcm_register(0x0F, 0x0001); // OSC. freq. UDELAY(40); set_lcm_register(0x15, 0x0031); set_lcm_register(0x46, 0x00EF); set_lcm_register(0x47, 0x0000); set_lcm_register(0x48, 0x01AF); set_lcm_register(0x49, 0x0000); // Gamma (R) set_lcm_register(0x50, 0x0000); set_lcm_register(0x51, 0x030c); set_lcm_register(0x52, 0x0801); set_lcm_register(0x53, 0x0109); set_lcm_register(0x54, 0x0b01); set_lcm_register(0x55, 0x0200); set_lcm_register(0x56, 0x020d); set_lcm_register(0x57, 0x0e00); set_lcm_register(0x58, 0x0002); set_lcm_register(0x59, 0x010b); // Gamma (G) set_lcm_register(0x60, 0x0B00); set_lcm_register(0x61, 0x000D); set_lcm_register(0x62, 0x0000); set_lcm_register(0x63, 0x0002); set_lcm_register(0x64, 0x0604); set_lcm_register(0x65, 0x0000); set_lcm_register(0x66, 0x000C); set_lcm_register(0x67, 0x060F); set_lcm_register(0x68, 0x0F0F); set_lcm_register(0x69, 0x0A06); // Gamma (B) set_lcm_register(0x70, 0x0B00); set_lcm_register(0x71, 0x000D); set_lcm_register(0x72, 0x0000); set_lcm_register(0x73, 0x0002); set_lcm_register(0x74, 0x0604); set_lcm_register(0x75, 0x0000); set_lcm_register(0x76, 0x000C); set_lcm_register(0x77, 0x060F); set_lcm_register(0x78, 0x0F0F); set_lcm_register(0x79, 0x0A06); set_lcm_register(0x80, 0x0101); // Display Sequence set_lcm_register(0x07, 0x0116); UDELAY(40); set_lcm_register(0x07, 0x1117); set_lcm_register(0x13, 0x2055); // Power Control 1(R10h) // SAP: Fast DSTB1F: Off DSTB: Off STB: Off set_lcm_register(0x10, 0x0700); // Blank Period Control(R08h) // FP: 2 BP: 2 set_lcm_register(0x08, 0x0022); // Frame Cycle Control(R0Bh) // NO: 2 INCLK SDT: 2 INCLK DIV: fosc/1 RTN: 17 INCLK set_lcm_register(0x0B, 0x2201); } // --------------------------------------------------------------------------- // LCM Driver Implementations // --------------------------------------------------------------------------- static void lcm_set_util_funcs(const LCM_UTIL_FUNCS *util) { memcpy(&lcm_util, util, sizeof(LCM_UTIL_FUNCS)); } static void lcm_get_params(LCM_PARAMS *params) { memset(params, 0, sizeof(LCM_PARAMS)); params->type = LCM_TYPE_DBI; params->ctrl = LCM_CTRL_PARALLEL_DBI; params->width = FRAME_WIDTH; params->height = FRAME_HEIGHT; params->dbi.port = 0; params->dbi.clock_freq = LCM_DBI_CLOCK_FREQ_52M; params->dbi.data_width = LCM_DBI_DATA_WIDTH_18BITS; params->dbi.data_format.color_order = LCM_COLOR_ORDER_RGB; params->dbi.data_format.trans_seq = LCM_DBI_TRANS_SEQ_MSB_FIRST; params->dbi.data_format.padding = LCM_DBI_PADDING_ON_LSB; params->dbi.data_format.format = LCM_DBI_FORMAT_RGB666; params->dbi.data_format.width = LCM_DBI_DATA_WIDTH_18BITS; params->dbi.cpu_write_bits = LCM_DBI_CPU_WRITE_32_BITS; params->dbi.io_driving_current = 0; // enable tearing-free params->dbi.te_mode = LCM_DBI_TE_MODE_VSYNC_ONLY; params->dbi.te_edge_polarity = LCM_POLARITY_RISING; params->dbi.parallel.write_setup = 0; params->dbi.parallel.write_hold = 1; params->dbi.parallel.write_wait = 2; params->dbi.parallel.read_setup = 2; params->dbi.parallel.read_latency = 11; params->dbi.parallel.wait_period = 0; } static void lcm_init(void) { SET_RESET_PIN(0); MDELAY(100); SET_RESET_PIN(1); MDELAY(500); init_lcm_registers(); } static void lcm_suspend(void) { set_lcm_register(0x15, 0x0000); set_lcm_register(0x07, 0x0112); UDELAY(15); set_lcm_register(0x07, 0x0110); UDELAY(15); set_lcm_register(0x10, 0x0701); } static void lcm_resume(void) { set_lcm_register(0x10, 0x0700); UDELAY(15); set_lcm_register(0x11, 0x0010); set_lcm_register(0x14, 0x1f56); set_lcm_register(0x10, 0x0700); UDELAY(1); set_lcm_register(0x11, 0x0112); UDELAY(1); set_lcm_register(0x11, 0x0312); UDELAY(1); set_lcm_register(0x11, 0x0712); UDELAY(1); set_lcm_register(0x11, 0x0F1B); UDELAY(1); set_lcm_register(0x11, 0x0F3B); UDELAY(3); set_lcm_register(0x15, 0x0031); set_lcm_register(0x07, 0x1116); UDELAY(15); set_lcm_register(0x07, 0x1117); UDELAY(150); } static void lcm_update(unsigned int x, unsigned int y, unsigned int width, unsigned int height) { unsigned int x0 = x; unsigned int y0 = y; unsigned int x1 = x0 + width - 1; unsigned int y1 = y0 + height - 1; set_lcm_register(0x46, x1); set_lcm_register(0x47, x0); set_lcm_register(0x48, y1); set_lcm_register(0x49, y0); send_ctrl_cmd(0x22); } static unsigned int lcm_compare_id(void) { send_ctrl_cmd(0x2300); send_ctrl_cmd(0x05); return (LCM_ID == read_data_cmd())?1:0; } // --------------------------------------------------------------------------- // Get LCM Driver Hooks // --------------------------------------------------------------------------- LCM_DRIVER s6d0170_lcm_drv = { .name = "s6d0170", .set_util_funcs = lcm_set_util_funcs, .get_params = lcm_get_params, .init = lcm_init, .suspend = lcm_suspend, .resume = lcm_resume, .update = lcm_update, .compare_id = lcm_compare_id };
gpl-2.0
OliverG96/android_kernel_samsung_golden
drivers/misc/dispdev/dispdev.c
80
14586
/* * Copyright (C) ST-Ericsson SA 2011 * * Display output device driver * * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> * for ST-Ericsson. * * License terms: GNU General Public License (GPL), version 2. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/idr.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/ioctl.h> #include <linux/dispdev.h> #include <linux/hwmem.h> #include <video/mcde_dss.h> #define DENSITY_CHECK (16) #define MAX_BUFFERS 4 static LIST_HEAD(dev_list); static DEFINE_MUTEX(dev_list_lock); enum buffer_state { BUF_UNUSED = 0, BUF_QUEUED, BUF_ACTIVATED, /*TODO:waitfordone BUF_DEACTIVATED,*/ BUF_FREE, BUF_DEQUEUED, }; struct dispdev_buffer { struct hwmem_alloc *alloc; u32 size; enum buffer_state state; u32 paddr; /* if pinned */ }; struct dispdev { bool open; struct mutex lock; struct miscdevice mdev; struct list_head list; struct mcde_display_device *ddev; struct mcde_overlay *ovly; struct mcde_overlay *parent_ovly; struct dispdev_config config; bool overlay; struct dispdev_buffer buffers[MAX_BUFFERS]; wait_queue_head_t waitq_dq; /* * For the rotation use case * buffers_need_update is used to ensure that a set_config that * changes width or height is followed by a unregister_buffer. */ bool buffers_need_update; /* * For the overlay startup use case. * first_update is used to handle the first update after a set_config. * In this case a queue_buffer will arrive after set_config and not a * unregister_buffer as in the rotation use case. */ bool first_update; char name[sizeof(DISPDEV_DEFAULT_DEVICE_PREFIX) + 3]; }; static int find_buf(struct dispdev *dd, enum buffer_state state) { int i; for (i = 0; i < MAX_BUFFERS; i++) if (dd->buffers[i].state == state) return i; return -1; } int dispdev_open(struct inode *inode, struct file *file) { int ret; struct dispdev *dd = NULL; mutex_lock(&dev_list_lock); list_for_each_entry(dd, &dev_list, list) if (dd->mdev.minor == iminor(inode)) break; if (&dd->list == &dev_list) { mutex_unlock(&dev_list_lock); return -ENODEV; } if (dd->open) { mutex_unlock(&dev_list_lock); return -EBUSY; } dd->open = true; mutex_unlock(&dev_list_lock); ret = mcde_dss_enable_overlay(dd->ovly); if (ret) return ret; file->private_data = dd; return 0; } int dispdev_release(struct inode *inode, struct file *file) { int i; struct dispdev *dd = NULL; mutex_lock(&dev_list_lock); list_for_each_entry(dd, &dev_list, list) if (dd->mdev.minor == iminor(inode)) break; mutex_unlock(&dev_list_lock); if (&dd->list == &dev_list) return -ENODEV; /* TODO: Make sure it waits for completion */ mcde_dss_disable_overlay(dd->ovly); for (i = 0; i < MAX_BUFFERS; i++) { if (dd->buffers[i].paddr) hwmem_unpin(dd->buffers[i].alloc); if (dd->buffers[i].alloc) hwmem_release(dd->buffers[i].alloc); dd->buffers[i].alloc = NULL; dd->buffers[i].state = BUF_UNUSED; dd->buffers[i].size = 0; dd->buffers[i].paddr = 0; } dd->open = false; wake_up(&dd->waitq_dq); return 0; } static enum mcde_ovly_pix_fmt get_ovly_fmt(enum dispdev_fmt fmt) { switch (fmt) { default: case DISPDEV_FMT_RGB565: return MCDE_OVLYPIXFMT_RGB565; case DISPDEV_FMT_RGB888: return MCDE_OVLYPIXFMT_RGB888; case DISPDEV_FMT_RGBA8888: return MCDE_OVLYPIXFMT_RGBA8888; case DISPDEV_FMT_RGBX8888: return MCDE_OVLYPIXFMT_RGBX8888; case DISPDEV_FMT_YUV422: return MCDE_OVLYPIXFMT_YCbCr422; } } static void get_ovly_info(struct dispdev_config *cfg, struct mcde_overlay_info *info) { info->paddr = 0; info->stride = cfg->stride; info->fmt = get_ovly_fmt(cfg->format); info->src_x = 0; info->src_y = 0; info->dst_x = cfg->x; info->dst_y = cfg->y; info->dst_z = cfg->z; info->w = cfg->width; info->h = cfg->height; } static int dispdev_set_config(struct dispdev *dd, struct dispdev_config *cfg) { int ret = 0; if (memcmp(&dd->config, cfg, sizeof(struct dispdev_config)) == 0) return 0; /* * Only update MCDE if format, stride, width and height * is the same. Otherwise just store the new config and update * MCDE in the next queue buffer. This because the buffer that is * active can be have the wrong format, width ... */ if (cfg->format == dd->config.format && cfg->stride == dd->config.stride && cfg->width == dd->config.width && cfg->height == dd->config.height) { int buf_index; if (!dd->buffers_need_update) { buf_index = find_buf(dd, BUF_ACTIVATED); if (buf_index >= 0) { struct mcde_overlay_info info; struct dispdev_buffer *buf; buf = &dd->buffers[buf_index]; get_ovly_info(cfg, &info); info.paddr = buf->paddr; ret = mcde_dss_apply_overlay(dd->ovly, &info); if (!ret) mcde_dss_update_overlay(dd->ovly, false); } } } else { dd->buffers_need_update = true; } dd->config = *cfg; return ret; } static int dispdev_register_buffer(struct dispdev *dd, s32 hwmem_name) { int ret; struct dispdev_buffer *buf; enum hwmem_mem_type memtype; enum hwmem_access access; ret = find_buf(dd, BUF_UNUSED); if (ret < 0) return -ENOMEM; buf = &dd->buffers[ret]; buf->alloc = hwmem_resolve_by_name(hwmem_name); if (IS_ERR(buf->alloc)) { ret = PTR_ERR(buf->alloc); goto resolve_failed; } hwmem_get_info(buf->alloc, &buf->size, &memtype, &access); if (!(access & HWMEM_ACCESS_READ) || memtype == HWMEM_MEM_SCATTERED_SYS) { ret = -EACCES; goto invalid_mem; } buf->state = BUF_FREE; goto out; invalid_mem: hwmem_release(buf->alloc); resolve_failed: out: return ret; } static int dispdev_unregister_buffer(struct dispdev *dd, u32 buf_idx) { struct dispdev_buffer *buf = &dd->buffers[buf_idx]; if (buf_idx >= ARRAY_SIZE(dd->buffers)) return -EINVAL; if (buf->state == BUF_UNUSED) return -EINVAL; if (dd->buffers_need_update) dd->buffers_need_update = false; if (buf->state == BUF_ACTIVATED) { /* Disable the overlay */ struct mcde_overlay_info info; /* TODO Wait for frame done */ get_ovly_info(&dd->config, &info); mcde_dss_apply_overlay(dd->ovly, &info); mcde_dss_update_overlay(dd->ovly, false); hwmem_unpin(dd->buffers[buf_idx].alloc); } hwmem_release(buf->alloc); buf->state = BUF_UNUSED; buf->alloc = NULL; buf->size = 0; buf->paddr = 0; dd->first_update = false; return 0; } /** * @brief Check if the buffer is transparent or black (ARGB = X000) * Note: Only for ARGB32. * Worst case: a ~full transparent buffer * Results: ~2200us @800Mhz for a WVGA screen, with DENSITY_CHECK=8 * ~520us @800Mhz for a WVGA screen, with DENSITY_CHECK=16 * * @param w witdh * @param h height * @param addr buffer addr * * @return 1 if the buffer is transparent, else 0 */ static int is_transparent(int w, int h, u32 *addr) { int i, j; u32 *c, *next_line; u32 sum; next_line = addr; sum = 0; /* TODO Optimize me */ for (j = 0; j < h; j += DENSITY_CHECK) { c = next_line; for (i = 0; i < w; i += DENSITY_CHECK) { sum += ((*c) & 0x00FFFFFF); c += DENSITY_CHECK; } if (sum) return 0; /* Not "transparent" */ next_line += (w * DENSITY_CHECK); } return 1; /* "Transparent" */ } static int dispdev_queue_buffer(struct dispdev *dd, struct dispdev_buffer_info *buffer) { int ret, i; struct mcde_overlay_info info; struct hwmem_mem_chunk mem_chunk; size_t mem_chunk_length = 1; struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 }; struct hwmem_alloc *alloc; u32 buf_idx = buffer->buf_idx; if (buf_idx >= ARRAY_SIZE(dd->buffers) || dd->buffers[buf_idx].state != BUF_DEQUEUED) return -EINVAL; alloc = dd->buffers[buf_idx].alloc; get_ovly_info(&dd->config, &info); ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length); if (ret) { dev_warn(dd->mdev.this_device, "Pin failed, %d\n", ret); return -EINVAL; } rgn.size = rgn.end = dd->buffers[buf_idx].size; ret = hwmem_set_domain(alloc, HWMEM_ACCESS_READ, HWMEM_DOMAIN_SYNC, &rgn); if (ret) dev_warn(dd->mdev.this_device, "Set domain failed, %d\n", ret); i = find_buf(dd, BUF_ACTIVATED); if (i >= 0) { dd->buffers[i].state = BUF_FREE; wake_up(&dd->waitq_dq); } if (!dd->first_update) { dd->first_update = true; dd->buffers_need_update = false; } dd->buffers[buf_idx].paddr = mem_chunk.paddr; if (buffer->display_update && !dd->buffers_need_update && dd->config.width == buffer->buf_cfg.width && dd->config.height == buffer->buf_cfg.height && dd->config.format == buffer->buf_cfg.format && dd->config.stride == buffer->buf_cfg.stride) { info.paddr = mem_chunk.paddr; mcde_dss_apply_overlay(dd->ovly, &info); mcde_dss_update_overlay(dd->ovly, false); } else if (buffer->display_update) { dd->buffers_need_update = true; } /* Disable the MCDE FB overlay */ if ((dd->parent_ovly->state != NULL) && (dd->ddev->check_transparency)) { dd->ddev->check_transparency--; mcde_dss_get_overlay_info(dd->parent_ovly, &info); if (dd->ddev->check_transparency == 0) { if (is_transparent(info.w, info.h, info.vaddr)) { mcde_dss_disable_overlay(dd->parent_ovly); dev_info(dd->mdev.this_device, "Disable overlay\n"); } } } dd->buffers[buf_idx].state = BUF_ACTIVATED; return 0; } static int dispdev_dequeue_buffer(struct dispdev *dd) { int i; i = find_buf(dd, BUF_FREE); if (i < 0) { if (find_buf(dd, BUF_ACTIVATED) < 0) return -EINVAL; mutex_unlock(&dd->lock); wait_event(dd->waitq_dq, (i = find_buf(dd, BUF_FREE)) >= 0); mutex_lock(&dd->lock); } hwmem_unpin(dd->buffers[i].alloc); dd->buffers[i].state = BUF_DEQUEUED; dd->buffers[i].paddr = 0; return i; } long dispdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; struct dispdev *dd = (struct dispdev *)file->private_data; mutex_lock(&dd->lock); switch (cmd) { case DISPDEV_SET_CONFIG_IOC: { struct dispdev_config cfg; if (copy_from_user(&cfg, (void __user *)arg, sizeof(cfg))) ret = -EFAULT; else ret = dispdev_set_config(dd, &cfg); } break; case DISPDEV_GET_CONFIG_IOC: ret = copy_to_user((void __user *)arg, &dd->config, sizeof(dd->config)); if (ret) ret = -EFAULT; break; case DISPDEV_REGISTER_BUFFER_IOC: ret = dispdev_register_buffer(dd, (s32)arg); break; case DISPDEV_UNREGISTER_BUFFER_IOC: ret = dispdev_unregister_buffer(dd, (u32)arg); break; case DISPDEV_QUEUE_BUFFER_IOC: { struct dispdev_buffer_info buffer; if (copy_from_user(&buffer, (void __user *)arg, sizeof(buffer))) ret = -EFAULT; else ret = dispdev_queue_buffer(dd, &buffer); break; } case DISPDEV_DEQUEUE_BUFFER_IOC: ret = dispdev_dequeue_buffer(dd); break; default: ret = -ENOSYS; } mutex_unlock(&dd->lock); return ret; } static const struct file_operations dispdev_fops = { .open = dispdev_open, .release = dispdev_release, .unlocked_ioctl = dispdev_ioctl, }; static void init_dispdev(struct dispdev *dd, struct mcde_display_device *ddev, const char *name, bool overlay) { mutex_init(&dd->lock); INIT_LIST_HEAD(&dd->list); dd->ddev = ddev; dd->overlay = overlay; mcde_dss_get_native_resolution(ddev, &dd->config.width, &dd->config.height); dd->config.format = DISPDEV_FMT_RGB565; dd->config.stride = sizeof(u16) * w; dd->config.x = 0; dd->config.y = 0; dd->config.z = 0; dd->buffers_need_update = false; dd->first_update = false; init_waitqueue_head(&dd->waitq_dq); dd->mdev.minor = MISC_DYNAMIC_MINOR; dd->mdev.name = name; dd->mdev.fops = &dispdev_fops; pr_info("%s: name=%s w=%d, h=%d, fmt=%d, stride=%d\n", __func__, name, dd->config.width, dd->config.height, dd->config.format, dd->config.stride); } int dispdev_create(struct mcde_display_device *ddev, bool overlay, struct mcde_overlay *parent_ovly) { int ret = 0; struct dispdev *dd; struct mcde_overlay_info info = {0}; static int counter; dd = kzalloc(sizeof(struct dispdev), GFP_KERNEL); if (!dd) return -ENOMEM; snprintf(dd->name, sizeof(dd->name), "%s%d", DISPDEV_DEFAULT_DEVICE_PREFIX, counter++); init_dispdev(dd, ddev, dd->name, overlay); get_ovly_info(&dd->config, &info); if (!overlay) { ret = mcde_dss_enable_display(ddev); if (ret) goto fail_enable_display; ret = mcde_dss_set_pixel_format(ddev, info.fmt); if (ret) goto fail_enable_display; ret = mcde_dss_apply_channel(ddev); if (ret) goto fail_enable_display; } /* Save the MCDE FB overlay */ dd->parent_ovly = parent_ovly; dd->ovly = mcde_dss_create_overlay(ddev, &info); if (!dd->ovly) { ret = -ENOMEM; goto fail_create_ovly; } ret = misc_register(&dd->mdev); if (ret) goto fail_register_misc; mutex_lock(&dev_list_lock); list_add_tail(&dd->list, &dev_list); mutex_unlock(&dev_list_lock); goto out; fail_register_misc: mcde_dss_destroy_overlay(dd->ovly); fail_create_ovly: if (!overlay) mcde_dss_disable_display(ddev); fail_set_video_mode: fail_enable_display: kfree(dd); out: return ret; } void dispdev_destroy(struct mcde_display_device *ddev) { struct dispdev *dd; struct dispdev *tmp; mutex_lock(&dev_list_lock); list_for_each_entry_safe(dd, tmp, &dev_list, list) { if (dd->ddev == ddev) { list_del(&dd->list); misc_deregister(&dd->mdev); mcde_dss_destroy_overlay(dd->ovly); /* * TODO: Uncomment when DSS has reference * counting of enable/disable */ /* mcde_dss_disable_display(dd->ddev); */ kfree(dd); break; } } mutex_unlock(&dev_list_lock); } static void dispdev_destroy_all(void) { struct dispdev *dd; struct dispdev *tmp; mutex_lock(&dev_list_lock); list_for_each_entry_safe(dd, tmp, &dev_list, list) { list_del(&dd->list); misc_deregister(&dd->mdev); mcde_dss_destroy_overlay(dd->ovly); /* * TODO: Uncomment when DSS has reference * counting of enable/disable */ /* mcde_dss_disable_display(dd->ddev); */ kfree(dd); } mutex_unlock(&dev_list_lock); mutex_destroy(&dev_list_lock); } static int __init dispdev_init(void) { pr_info("%s\n", __func__); mutex_init(&dev_list_lock); return 0; } module_init(dispdev_init); static void __exit dispdev_exit(void) { dispdev_destroy_all(); pr_info("%s\n", __func__); } module_exit(dispdev_exit); MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Display output device driver");
gpl-2.0
punitvara/linux-1
drivers/net/phy/mdio-mux-mmioreg.c
592
4155
/* * Simple memory-mapped device MDIO MUX driver * * Author: Timur Tabi <timur@freescale.com> * * Copyright 2012 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/platform_device.h> #include <linux/device.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/module.h> #include <linux/phy.h> #include <linux/mdio-mux.h> struct mdio_mux_mmioreg_state { void *mux_handle; phys_addr_t phys; uint8_t mask; }; /* * MDIO multiplexing switch function * * This function is called by the mdio-mux layer when it thinks the mdio bus * multiplexer needs to switch. * * 'current_child' is the current value of the mux register (masked via * s->mask). * * 'desired_child' is the value of the 'reg' property of the target child MDIO * node. * * The first time this function is called, current_child == -1. * * If current_child == desired_child, then the mux is already set to the * correct bus. */ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child, void *data) { struct mdio_mux_mmioreg_state *s = data; if (current_child ^ desired_child) { void __iomem *p = ioremap(s->phys, 1); uint8_t x, y; if (!p) return -ENOMEM; x = ioread8(p); y = (x & ~s->mask) | desired_child; if (x != y) { iowrite8((x & ~s->mask) | desired_child, p); pr_debug("%s: %02x -> %02x\n", __func__, x, y); } iounmap(p); } return 0; } static int mdio_mux_mmioreg_probe(struct platform_device *pdev) { struct device_node *np2, *np = pdev->dev.of_node; struct mdio_mux_mmioreg_state *s; struct resource res; const __be32 *iprop; int len, ret; dev_dbg(&pdev->dev, "probing node %s\n", np->full_name); s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; ret = of_address_to_resource(np, 0, &res); if (ret) { dev_err(&pdev->dev, "could not obtain memory map for node %s\n", np->full_name); return ret; } s->phys = res.start; if (resource_size(&res) != sizeof(uint8_t)) { dev_err(&pdev->dev, "only 8-bit registers are supported\n"); return -EINVAL; } iprop = of_get_property(np, "mux-mask", &len); if (!iprop || len != sizeof(uint32_t)) { dev_err(&pdev->dev, "missing or invalid mux-mask property\n"); return -ENODEV; } if (be32_to_cpup(iprop) > 255) { dev_err(&pdev->dev, "only 8-bit registers are supported\n"); return -EINVAL; } s->mask = be32_to_cpup(iprop); /* * Verify that the 'reg' property of each child MDIO bus does not * set any bits outside of the 'mask'. */ for_each_available_child_of_node(np, np2) { iprop = of_get_property(np2, "reg", &len); if (!iprop || len != sizeof(uint32_t)) { dev_err(&pdev->dev, "mdio-mux child node %s is " "missing a 'reg' property\n", np2->full_name); return -ENODEV; } if (be32_to_cpup(iprop) & ~s->mask) { dev_err(&pdev->dev, "mdio-mux child node %s has " "a 'reg' value with unmasked bits\n", np2->full_name); return -ENODEV; } } ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn, &s->mux_handle, s); if (ret) { dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n", np->full_name); return ret; } pdev->dev.platform_data = s; return 0; } static int mdio_mux_mmioreg_remove(struct platform_device *pdev) { struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); return 0; } static const struct of_device_id mdio_mux_mmioreg_match[] = { { .compatible = "mdio-mux-mmioreg", }, {}, }; MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match); static struct platform_driver mdio_mux_mmioreg_driver = { .driver = { .name = "mdio-mux-mmioreg", .of_match_table = mdio_mux_mmioreg_match, }, .probe = mdio_mux_mmioreg_probe, .remove = mdio_mux_mmioreg_remove, }; module_platform_driver(mdio_mux_mmioreg_driver); MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
shukiz/VAR-SOM-AM33-Kernel-3-14
drivers/net/irda/ali-ircc.c
848
57516
/********************************************************************* * * Filename: ali-ircc.h * Version: 0.5 * Description: Driver for the ALI M1535D and M1543C FIR Controller * Status: Experimental. * Author: Benjamin Kong <benjamin_kong@ali.com.tw> * Created at: 2000/10/16 03:46PM * Modified at: 2001/1/3 02:55PM * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw> * Modified at: 2003/11/6 and support for ALi south-bridge chipsets M1563 * Modified by: Clear Zhang <clear_zhang@ali.com.tw> * * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw> * All Rights Reserved * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * ********************************************************************/ #include <linux/module.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/rtnetlink.h> #include <linux/serial_reg.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include <net/irda/wrapper.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> #include "ali-ircc.h" #define CHIP_IO_EXTENT 8 #define BROKEN_DONGLE_ID #define ALI_IRCC_DRIVER_NAME "ali-ircc" /* Power Management */ static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state); static int ali_ircc_resume(struct platform_device *dev); static struct platform_driver ali_ircc_driver = { .suspend = ali_ircc_suspend, .resume = ali_ircc_resume, .driver = { .name = ALI_IRCC_DRIVER_NAME, .owner = THIS_MODULE, }, }; /* Module parameters */ static int qos_mtt_bits = 0x07; /* 1 ms or more */ /* Use BIOS settions by default, but user may supply module parameters */ static unsigned int io[] = { ~0, ~0, ~0, ~0 }; static unsigned int irq[] = { 0, 0, 0, 0 }; static unsigned int dma[] = { 0, 0, 0, 0 }; static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info); static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info); static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info); /* These are the currently known ALi south-bridge chipsets, the only one difference * is that M1543C doesn't support HP HDSL-3600 */ static ali_chip_t chips[] = { { "M1543", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x43, ali_ircc_probe_53, ali_ircc_init_43 }, { "M1535", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x53, ali_ircc_probe_53, ali_ircc_init_53 }, { "M1563", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x63, ali_ircc_probe_53, ali_ircc_init_53 }, { NULL } }; /* Max 4 instances for now */ static struct ali_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL }; /* Dongle Types */ static char *dongle_types[] = { "TFDS6000", "HP HSDL-3600", "HP HSDL-1100", "No dongle connected", }; /* Some prototypes */ static int ali_ircc_open(int i, chipio_t *info); static int ali_ircc_close(struct ali_ircc_cb *self); static int ali_ircc_setup(chipio_t *info); static int ali_ircc_is_receiving(struct ali_ircc_cb *self); static int ali_ircc_net_open(struct net_device *dev); static int ali_ircc_net_close(struct net_device *dev); static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud); /* SIR function */ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self); static void ali_ircc_sir_receive(struct ali_ircc_cb *self); static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self); static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len); static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed); /* FIR function */ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev); static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 speed); static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self); static int ali_ircc_dma_receive(struct ali_ircc_cb *self); static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self); static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self); static void ali_ircc_dma_xmit(struct ali_ircc_cb *self); /* My Function */ static int ali_ircc_read_dongle_id (int i, chipio_t *info); static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed); /* ALi chip function */ static void SIR2FIR(int iobase); static void FIR2SIR(int iobase); static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable); /* * Function ali_ircc_init () * * Initialize chip. Find out whay kinds of chips we are dealing with * and their configuration registers address */ static int __init ali_ircc_init(void) { ali_chip_t *chip; chipio_t info; int ret; int cfg, cfg_base; int reg, revision; int i = 0; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); ret = platform_driver_register(&ali_ircc_driver); if (ret) { IRDA_ERROR("%s, Can't register driver!\n", ALI_IRCC_DRIVER_NAME); return ret; } ret = -ENODEV; /* Probe for all the ALi chipsets we know about */ for (chip= chips; chip->name; chip++, i++) { IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name); /* Try all config registers for this chip */ for (cfg=0; cfg<2; cfg++) { cfg_base = chip->cfg[cfg]; if (!cfg_base) continue; memset(&info, 0, sizeof(chipio_t)); info.cfg_base = cfg_base; info.fir_base = io[i]; info.dma = dma[i]; info.irq = irq[i]; /* Enter Configuration */ outb(chip->entr1, cfg_base); outb(chip->entr2, cfg_base); /* Select Logical Device 5 Registers (UART2) */ outb(0x07, cfg_base); outb(0x05, cfg_base+1); /* Read Chip Identification Register */ outb(chip->cid_index, cfg_base); reg = inb(cfg_base+1); if (reg == chip->cid_value) { IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base); outb(0x1F, cfg_base); revision = inb(cfg_base+1); IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__, chip->name, revision); /* * If the user supplies the base address, then * we init the chip, if not we probe the values * set by the BIOS */ if (io[i] < 2000) { chip->init(chip, &info); } else { chip->probe(chip, &info); } if (ali_ircc_open(i, &info) == 0) ret = 0; i++; } else { IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base); } /* Exit configuration */ outb(0xbb, cfg_base); } } IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); if (ret) platform_driver_unregister(&ali_ircc_driver); return ret; } /* * Function ali_ircc_cleanup () * * Close all configured chips * */ static void __exit ali_ircc_cleanup(void) { int i; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); for (i=0; i < ARRAY_SIZE(dev_self); i++) { if (dev_self[i]) ali_ircc_close(dev_self[i]); } platform_driver_unregister(&ali_ircc_driver); IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); } static const struct net_device_ops ali_ircc_sir_ops = { .ndo_open = ali_ircc_net_open, .ndo_stop = ali_ircc_net_close, .ndo_start_xmit = ali_ircc_sir_hard_xmit, .ndo_do_ioctl = ali_ircc_net_ioctl, }; static const struct net_device_ops ali_ircc_fir_ops = { .ndo_open = ali_ircc_net_open, .ndo_stop = ali_ircc_net_close, .ndo_start_xmit = ali_ircc_fir_hard_xmit, .ndo_do_ioctl = ali_ircc_net_ioctl, }; /* * Function ali_ircc_open (int i, chipio_t *inf) * * Open driver instance * */ static int ali_ircc_open(int i, chipio_t *info) { struct net_device *dev; struct ali_ircc_cb *self; int dongle_id; int err; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); if (i >= ARRAY_SIZE(dev_self)) { IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __func__); return -ENOMEM; } /* Set FIR FIFO and DMA Threshold */ if ((ali_ircc_setup(info)) == -1) return -1; dev = alloc_irdadev(sizeof(*self)); if (dev == NULL) { IRDA_ERROR("%s(), can't allocate memory for control block!\n", __func__); return -ENOMEM; } self = netdev_priv(dev); self->netdev = dev; spin_lock_init(&self->lock); /* Need to store self somewhere */ dev_self[i] = self; self->index = i; /* Initialize IO */ self->io.cfg_base = info->cfg_base; /* In ali_ircc_probe_53 assign */ self->io.fir_base = info->fir_base; /* info->sir_base = info->fir_base */ self->io.sir_base = info->sir_base; /* ALi SIR and FIR use the same address */ self->io.irq = info->irq; self->io.fir_ext = CHIP_IO_EXTENT; self->io.dma = info->dma; self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */ /* Reserve the ioports that we need */ if (!request_region(self->io.fir_base, self->io.fir_ext, ALI_IRCC_DRIVER_NAME)) { IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__, self->io.fir_base); err = -ENODEV; goto err_out1; } /* Initialize QoS for this device */ irda_init_max_qos_capabilies(&self->qos); /* The only value we must override it the baudrate */ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM self->qos.min_turn_time.bits = qos_mtt_bits; irda_qos_bits_to_value(&self->qos); /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */ self->rx_buff.truesize = 14384; self->tx_buff.truesize = 14384; /* Allocate memory if needed */ self->rx_buff.head = dma_zalloc_coherent(NULL, self->rx_buff.truesize, &self->rx_buff_dma, GFP_KERNEL); if (self->rx_buff.head == NULL) { err = -ENOMEM; goto err_out2; } self->tx_buff.head = dma_zalloc_coherent(NULL, self->tx_buff.truesize, &self->tx_buff_dma, GFP_KERNEL); if (self->tx_buff.head == NULL) { err = -ENOMEM; goto err_out3; } self->rx_buff.in_frame = FALSE; self->rx_buff.state = OUTSIDE_FRAME; self->tx_buff.data = self->tx_buff.head; self->rx_buff.data = self->rx_buff.head; /* Reset Tx queue info */ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; /* Override the network functions we need to use */ dev->netdev_ops = &ali_ircc_sir_ops; err = register_netdev(dev); if (err) { IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); goto err_out4; } IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); /* Check dongle id */ dongle_id = ali_ircc_read_dongle_id(i, info); IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__, ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]); self->io.dongle_id = dongle_id; IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); return 0; err_out4: dma_free_coherent(NULL, self->tx_buff.truesize, self->tx_buff.head, self->tx_buff_dma); err_out3: dma_free_coherent(NULL, self->rx_buff.truesize, self->rx_buff.head, self->rx_buff_dma); err_out2: release_region(self->io.fir_base, self->io.fir_ext); err_out1: dev_self[i] = NULL; free_netdev(dev); return err; } /* * Function ali_ircc_close (self) * * Close driver instance * */ static int __exit ali_ircc_close(struct ali_ircc_cb *self) { int iobase; IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__); IRDA_ASSERT(self != NULL, return -1;); iobase = self->io.fir_base; /* Remove netdevice */ unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) dma_free_coherent(NULL, self->tx_buff.truesize, self->tx_buff.head, self->tx_buff_dma); if (self->rx_buff.head) dma_free_coherent(NULL, self->rx_buff.truesize, self->rx_buff.head, self->rx_buff_dma); dev_self[self->index] = NULL; free_netdev(self->netdev); IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); return 0; } /* * Function ali_ircc_init_43 (chip, info) * * Initialize the ALi M1543 chip. */ static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info) { /* All controller information like I/O address, DMA channel, IRQ * are set by BIOS */ return 0; } /* * Function ali_ircc_init_53 (chip, info) * * Initialize the ALi M1535 chip. */ static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info) { /* All controller information like I/O address, DMA channel, IRQ * are set by BIOS */ return 0; } /* * Function ali_ircc_probe_53 (chip, info) * * Probes for the ALi M1535D or M1535 */ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; int hi, low, reg; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); /* Enter Configuration */ outb(chip->entr1, cfg_base); outb(chip->entr2, cfg_base); /* Select Logical Device 5 Registers (UART2) */ outb(0x07, cfg_base); outb(0x05, cfg_base+1); /* Read address control register */ outb(0x60, cfg_base); hi = inb(cfg_base+1); outb(0x61, cfg_base); low = inb(cfg_base+1); info->fir_base = (hi<<8) + low; info->sir_base = info->fir_base; IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base); /* Read IRQ control register */ outb(0x70, cfg_base); reg = inb(cfg_base+1); info->irq = reg & 0x0f; IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq); /* Read DMA channel */ outb(0x74, cfg_base); reg = inb(cfg_base+1); info->dma = reg & 0x07; if(info->dma == 0x04) IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__); else IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma); /* Read Enabled Status */ outb(0x30, cfg_base); reg = inb(cfg_base+1); info->enabled = (reg & 0x80) && (reg & 0x01); IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled); /* Read Power Status */ outb(0x22, cfg_base); reg = inb(cfg_base+1); info->suspended = (reg & 0x20); IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended); /* Exit configuration */ outb(0xbb, cfg_base); IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); return 0; } /* * Function ali_ircc_setup (info) * * Set FIR FIFO and DMA Threshold * Returns non-negative on success. * */ static int ali_ircc_setup(chipio_t *info) { unsigned char tmp; int version; int iobase = info->fir_base; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); /* Locking comments : * Most operations here need to be protected. We are called before * the device instance is created in ali_ircc_open(), therefore * nobody can bother us - Jean II */ /* Switch to FIR space */ SIR2FIR(iobase); /* Master Reset */ outb(0x40, iobase+FIR_MCR); // benjamin 2000/11/30 11:45AM /* Read FIR ID Version Register */ switch_bank(iobase, BANK3); version = inb(iobase+FIR_ID_VR); /* Should be 0x00 in the M1535/M1535D */ if(version != 0x00) { IRDA_ERROR("%s, Wrong chip version %02x\n", ALI_IRCC_DRIVER_NAME, version); return -1; } /* Set FIR FIFO Threshold Register */ switch_bank(iobase, BANK1); outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR); /* Set FIR DMA Threshold Register */ outb(RX_DMA_Threshold, iobase+FIR_DMA_TR); /* CRC enable */ switch_bank(iobase, BANK2); outb(inb(iobase+FIR_IRDA_CR) | IRDA_CR_CRC, iobase+FIR_IRDA_CR); /* NDIS driver set TX Length here BANK2 Alias 3, Alias4*/ /* Switch to Bank 0 */ switch_bank(iobase, BANK0); tmp = inb(iobase+FIR_LCR_B); tmp &=~0x20; // disable SIP tmp |= 0x80; // these two steps make RX mode tmp &= 0xbf; outb(tmp, iobase+FIR_LCR_B); /* Disable Interrupt */ outb(0x00, iobase+FIR_IER); /* Switch to SIR space */ FIR2SIR(iobase); IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n", ALI_IRCC_DRIVER_NAME); /* Enable receive interrupts */ // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM // Turn on the interrupts in ali_ircc_net_open IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); return 0; } /* * Function ali_ircc_read_dongle_id (int index, info) * * Try to read dongle identification. This procedure needs to be executed * once after power-on/reset. It also needs to be used whenever you suspect * that the user may have plugged/unplugged the IrDA Dongle. */ static int ali_ircc_read_dongle_id (int i, chipio_t *info) { int dongle_id, reg; int cfg_base = info->cfg_base; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); /* Enter Configuration */ outb(chips[i].entr1, cfg_base); outb(chips[i].entr2, cfg_base); /* Select Logical Device 5 Registers (UART2) */ outb(0x07, cfg_base); outb(0x05, cfg_base+1); /* Read Dongle ID */ outb(0xf0, cfg_base); reg = inb(cfg_base+1); dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01); IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__, dongle_id, dongle_types[dongle_id]); /* Exit configuration */ outb(0xbb, cfg_base); IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); return dongle_id; } /* * Function ali_ircc_interrupt (irq, dev_id, regs) * * An interrupt from the chip has arrived. Time to do some work * */ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct ali_ircc_cb *self; int ret; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); self = netdev_priv(dev); spin_lock(&self->lock); /* Dispatch interrupt handler for the current speed */ if (self->io.speed > 115200) ret = ali_ircc_fir_interrupt(self); else ret = ali_ircc_sir_interrupt(self); spin_unlock(&self->lock); IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); return ret; } /* * Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self) * * Handle MIR/FIR interrupt * */ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) { __u8 eir, OldMessageCount; int iobase, tmp; IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__); iobase = self->io.fir_base; switch_bank(iobase, BANK0); self->InterruptID = inb(iobase+FIR_IIR); self->BusStatus = inb(iobase+FIR_BSR); OldMessageCount = (self->LineStatus + 1) & 0x07; self->LineStatus = inb(iobase+FIR_LSR); //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM eir = self->InterruptID & self->ier; /* Mask out the interesting ones */ IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID); IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus); IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier); IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir); /* Disable interrupts */ SetCOMInterrupts(self, FALSE); /* Tx or Rx Interrupt */ if (eir & IIR_EOM) { if (self->io.direction == IO_XMIT) /* TX */ { IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__); if(ali_ircc_dma_xmit_complete(self)) { if (irda_device_txqueue_empty(self->netdev)) { /* Prepare for receive */ ali_ircc_dma_receive(self); self->ier = IER_EOM; } } else { self->ier = IER_EOM; } } else /* RX */ { IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__); if(OldMessageCount > ((self->LineStatus+1) & 0x07)) { self->rcvFramesOverflow = TRUE; IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__); } if (ali_ircc_dma_receive_complete(self)) { IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__); self->ier = IER_EOM; } else { IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__); self->ier = IER_EOM | IER_TIMER; } } } /* Timer Interrupt */ else if (eir & IIR_TIMER) { if(OldMessageCount > ((self->LineStatus+1) & 0x07)) { self->rcvFramesOverflow = TRUE; IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__); } /* Disable Timer */ switch_bank(iobase, BANK1); tmp = inb(iobase+FIR_CR); outb( tmp& ~CR_TIMER_EN, iobase+FIR_CR); /* Check if this is a Tx timer interrupt */ if (self->io.direction == IO_XMIT) { ali_ircc_dma_xmit(self); /* Interrupt on EOM */ self->ier = IER_EOM; } else /* Rx */ { if(ali_ircc_dma_receive_complete(self)) { self->ier = IER_EOM; } else { self->ier = IER_EOM | IER_TIMER; } } } /* Restore Interrupt */ SetCOMInterrupts(self, TRUE); IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__); return IRQ_RETVAL(eir); } /* * Function ali_ircc_sir_interrupt (irq, self, eir) * * Handle SIR interrupt * */ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self) { int iobase; int iir, lsr; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); iobase = self->io.sir_base; iir = inb(iobase+UART_IIR) & UART_IIR_ID; if (iir) { /* Clear interrupt */ lsr = inb(iobase+UART_LSR); IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__, iir, lsr, iobase); switch (iir) { case UART_IIR_RLSI: IRDA_DEBUG(2, "%s(), RLSI\n", __func__); break; case UART_IIR_RDI: /* Receive interrupt */ ali_ircc_sir_receive(self); break; case UART_IIR_THRI: if (lsr & UART_LSR_THRE) { /* Transmitter ready for data */ ali_ircc_sir_write_wakeup(self); } break; default: IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir); break; } } IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); return IRQ_RETVAL(iir); } /* * Function ali_ircc_sir_receive (self) * * Receive one frame from the infrared port * */ static void ali_ircc_sir_receive(struct ali_ircc_cb *self) { int boguscount = 0; int iobase; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); IRDA_ASSERT(self != NULL, return;); iobase = self->io.sir_base; /* * Receive all characters in Rx FIFO, unwrap and unstuff them. * async_unwrap_char will deliver all found frames */ do { async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff, inb(iobase+UART_RX)); /* Make sure we don't stay here too long */ if (boguscount++ > 32) { IRDA_DEBUG(2,"%s(), breaking!\n", __func__); break; } } while (inb(iobase+UART_LSR) & UART_LSR_DR); IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } /* * Function ali_ircc_sir_write_wakeup (tty) * * Called by the driver when there's room for more data. If we have * more packets to send, we send them here. * */ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self) { int actual = 0; int iobase; IRDA_ASSERT(self != NULL, return;); IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); iobase = self->io.sir_base; /* Finished with frame? */ if (self->tx_buff.len > 0) { /* Write data left in transmit buffer */ actual = ali_ircc_sir_write(iobase, self->io.fifo_size, self->tx_buff.data, self->tx_buff.len); self->tx_buff.data += actual; self->tx_buff.len -= actual; } else { if (self->new_speed) { /* We must wait until all data are gone */ while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT)) IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ ); IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed); ali_ircc_change_speed(self, self->new_speed); self->new_speed = 0; // benjamin 2000/11/10 06:32PM if (self->io.speed > 115200) { IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ ); self->ier = IER_EOM; // SetCOMInterrupts(self, TRUE); return; } } else { netif_wake_queue(self->netdev); } self->netdev->stats.tx_packets++; /* Turn on receive interrupts */ outb(UART_IER_RDI, iobase+UART_IER); } IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) { struct net_device *dev = self->netdev; int iobase; IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud); /* This function *must* be called with irq off and spin-lock. * - Jean II */ iobase = self->io.fir_base; SetCOMInterrupts(self, FALSE); // 2000/11/24 11:43AM /* Go to MIR, FIR Speed */ if (baud > 115200) { ali_ircc_fir_change_speed(self, baud); /* Install FIR xmit handler*/ dev->netdev_ops = &ali_ircc_fir_ops; /* Enable Interuupt */ self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM /* Be ready for incoming frames */ ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete } /* Go to SIR Speed */ else { ali_ircc_sir_change_speed(self, baud); /* Install SIR xmit handler*/ dev->netdev_ops = &ali_ircc_sir_ops; } SetCOMInterrupts(self, TRUE); // 2000/11/24 11:43AM netif_wake_queue(self->netdev); IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) { int iobase; struct ali_ircc_cb *self = priv; struct net_device *dev; IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(self != NULL, return;); dev = self->netdev; iobase = self->io.fir_base; IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud); /* Come from SIR speed */ if(self->io.speed <=115200) { SIR2FIR(iobase); } /* Update accounting for new speed */ self->io.speed = baud; // Set Dongle Speed mode ali_ircc_change_dongle_speed(self, baud); IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } /* * Function ali_sir_change_speed (self, speed) * * Set speed of IrDA port to specified baudrate * */ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) { struct ali_ircc_cb *self = priv; unsigned long flags; int iobase; int fcr; /* FIFO control reg */ int lcr; /* Line control reg */ int divisor; IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed); IRDA_ASSERT(self != NULL, return;); iobase = self->io.sir_base; /* Come from MIR or FIR speed */ if(self->io.speed >115200) { // Set Dongle Speed mode first ali_ircc_change_dongle_speed(self, speed); FIR2SIR(iobase); } // Clear Line and Auxiluary status registers 2000/11/24 11:47AM inb(iobase+UART_LSR); inb(iobase+UART_SCR); /* Update accounting for new speed */ self->io.speed = speed; spin_lock_irqsave(&self->lock, flags); divisor = 115200/speed; fcr = UART_FCR_ENABLE_FIFO; /* * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and * almost 1,7 ms at 19200 bps. At speeds above that we can just forget * about this timeout since it will always be fast enough. */ if (self->io.speed < 38400) fcr |= UART_FCR_TRIGGER_1; else fcr |= UART_FCR_TRIGGER_14; /* IrDA ports use 8N1 */ lcr = UART_LCR_WLEN8; outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */ outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */ outb(divisor >> 8, iobase+UART_DLM); outb(lcr, iobase+UART_LCR); /* Set 8N1 */ outb(fcr, iobase+UART_FCR); /* Enable FIFO's */ /* without this, the connection will be broken after come back from FIR speed, but with this, the SIR connection is harder to established */ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR); spin_unlock_irqrestore(&self->lock, flags); IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) { struct ali_ircc_cb *self = priv; int iobase,dongle_id; int tmp = 0; IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */ dongle_id = self->io.dongle_id; /* We are already locked, no need to do it again */ IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed); switch_bank(iobase, BANK2); tmp = inb(iobase+FIR_IRDA_CR); /* IBM type dongle */ if(dongle_id == 0) { if(speed == 4000000) { // __ __ // SD/MODE __| |__ __ // __ __ // IRTX __ __| |__ // T1 T2 T3 T4 T5 tmp &= ~IRDA_CR_HDLC; // HDLC=0 tmp |= IRDA_CR_CRC; // CRC=1 switch_bank(iobase, BANK2); outb(tmp, iobase+FIR_IRDA_CR); // T1 -> SD/MODE:0 IRTX:0 tmp &= ~0x09; tmp |= 0x02; outb(tmp, iobase+FIR_IRDA_CR); udelay(2); // T2 -> SD/MODE:1 IRTX:0 tmp &= ~0x01; tmp |= 0x0a; outb(tmp, iobase+FIR_IRDA_CR); udelay(2); // T3 -> SD/MODE:1 IRTX:1 tmp |= 0x0b; outb(tmp, iobase+FIR_IRDA_CR); udelay(2); // T4 -> SD/MODE:0 IRTX:1 tmp &= ~0x08; tmp |= 0x03; outb(tmp, iobase+FIR_IRDA_CR); udelay(2); // T5 -> SD/MODE:0 IRTX:0 tmp &= ~0x09; tmp |= 0x02; outb(tmp, iobase+FIR_IRDA_CR); udelay(2); // reset -> Normal TX output Signal outb(tmp & ~0x02, iobase+FIR_IRDA_CR); } else /* speed <=1152000 */ { // __ // SD/MODE __| |__ // // IRTX ________ // T1 T2 T3 /* MIR 115200, 57600 */ if (speed==1152000) { tmp |= 0xA0; //HDLC=1, 1.152Mbps=1 } else { tmp &=~0x80; //HDLC 0.576Mbps tmp |= 0x20; //HDLC=1, } tmp |= IRDA_CR_CRC; // CRC=1 switch_bank(iobase, BANK2); outb(tmp, iobase+FIR_IRDA_CR); /* MIR 115200, 57600 */ //switch_bank(iobase, BANK2); // T1 -> SD/MODE:0 IRTX:0 tmp &= ~0x09; tmp |= 0x02; outb(tmp, iobase+FIR_IRDA_CR); udelay(2); // T2 -> SD/MODE:1 IRTX:0 tmp &= ~0x01; tmp |= 0x0a; outb(tmp, iobase+FIR_IRDA_CR); // T3 -> SD/MODE:0 IRTX:0 tmp &= ~0x09; tmp |= 0x02; outb(tmp, iobase+FIR_IRDA_CR); udelay(2); // reset -> Normal TX output Signal outb(tmp & ~0x02, iobase+FIR_IRDA_CR); } } else if (dongle_id == 1) /* HP HDSL-3600 */ { switch(speed) { case 4000000: tmp &= ~IRDA_CR_HDLC; // HDLC=0 break; case 1152000: tmp |= 0xA0; // HDLC=1, 1.152Mbps=1 break; case 576000: tmp &=~0x80; // HDLC 0.576Mbps tmp |= 0x20; // HDLC=1, break; } tmp |= IRDA_CR_CRC; // CRC=1 switch_bank(iobase, BANK2); outb(tmp, iobase+FIR_IRDA_CR); } else /* HP HDSL-1100 */ { if(speed <= 115200) /* SIR */ { tmp &= ~IRDA_CR_FIR_SIN; // HP sin select = 0 switch_bank(iobase, BANK2); outb(tmp, iobase+FIR_IRDA_CR); } else /* MIR FIR */ { switch(speed) { case 4000000: tmp &= ~IRDA_CR_HDLC; // HDLC=0 break; case 1152000: tmp |= 0xA0; // HDLC=1, 1.152Mbps=1 break; case 576000: tmp &=~0x80; // HDLC 0.576Mbps tmp |= 0x20; // HDLC=1, break; } tmp |= IRDA_CR_CRC; // CRC=1 tmp |= IRDA_CR_FIR_SIN; // HP sin select = 1 switch_bank(iobase, BANK2); outb(tmp, iobase+FIR_IRDA_CR); } } switch_bank(iobase, BANK0); IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } /* * Function ali_ircc_sir_write (driver) * * Fill Tx FIFO with transmit data * */ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len) { int actual = 0; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); /* Tx FIFO should be empty! */ if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) { IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ ); return 0; } /* Fill FIFO with current frame */ while ((fifo_size-- > 0) && (actual < len)) { /* Transmit next byte */ outb(buf[actual], iobase+UART_TX); actual++; } IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return actual; } /* * Function ali_ircc_net_open (dev) * * Start the device * */ static int ali_ircc_net_open(struct net_device *dev) { struct ali_ircc_cb *self; int iobase; char hwname[32]; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); iobase = self->io.fir_base; /* Request IRQ and install Interrupt Handler */ if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev)) { IRDA_WARNING("%s, unable to allocate irq=%d\n", ALI_IRCC_DRIVER_NAME, self->io.irq); return -EAGAIN; } /* * Always allocate the DMA channel after the IRQ, and clean up on * failure. */ if (request_dma(self->io.dma, dev->name)) { IRDA_WARNING("%s, unable to allocate dma=%d\n", ALI_IRCC_DRIVER_NAME, self->io.dma); free_irq(self->io.irq, dev); return -EAGAIN; } /* Turn on interrups */ outb(UART_IER_RDI , iobase+UART_IER); /* Ready to play! */ netif_start_queue(dev); //benjamin by irport /* Give self a hardware name */ sprintf(hwname, "ALI-FIR @ 0x%03x", self->io.fir_base); /* * Open new IrLAP layer instance, now that everything should be * initialized properly */ self->irlap = irlap_open(dev, &self->qos, hwname); IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return 0; } /* * Function ali_ircc_net_close (dev) * * Stop the device * */ static int ali_ircc_net_close(struct net_device *dev) { struct ali_ircc_cb *self; //int iobase; IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); /* Stop device */ netif_stop_queue(dev); /* Stop and remove instance of IrLAP */ if (self->irlap) irlap_close(self->irlap); self->irlap = NULL; disable_dma(self->io.dma); /* Disable interrupts */ SetCOMInterrupts(self, FALSE); free_irq(self->io.irq, dev); free_dma(self->io.dma); IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return 0; } /* * Function ali_ircc_fir_hard_xmit (skb, dev) * * Transmit the frame * */ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct ali_ircc_cb *self; unsigned long flags; int iobase; __u32 speed; int mtt, diff; IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); self = netdev_priv(dev); iobase = self->io.fir_base; netif_stop_queue(dev); /* Make sure tests *& speed change are atomic */ spin_lock_irqsave(&self->lock, flags); /* Note : you should make sure that speed changes are not going * to corrupt any outgoing frame. Look at nsc-ircc for the gory * details - Jean II */ /* Check if we need to change the speed */ speed = irda_get_next_speed(skb); if ((speed != self->io.speed) && (speed != -1)) { /* Check for empty frame */ if (!skb->len) { ali_ircc_change_speed(self, speed); dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } else self->new_speed = speed; } /* Register and copy this frame to DMA memory */ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail; self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; self->tx_fifo.tail += skb->len; dev->stats.tx_bytes += skb->len; skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, skb->len); self->tx_fifo.len++; self->tx_fifo.free++; /* Start transmit only if there is currently no transmit going on */ if (self->tx_fifo.len == 1) { /* Check if we must wait the min turn time or not */ mtt = irda_get_mtt(skb); if (mtt) { /* Check how much time we have used already */ do_gettimeofday(&self->now); diff = self->now.tv_usec - self->stamp.tv_usec; /* self->stamp is set from ali_ircc_dma_receive_complete() */ IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff); if (diff < 0) diff += 1000000; /* Check if the mtt is larger than the time we have * already used by all the protocol processing */ if (mtt > diff) { mtt -= diff; /* * Use timer if delay larger than 1000 us, and * use udelay for smaller values which should * be acceptable */ if (mtt > 500) { /* Adjust for timer resolution */ mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */ IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt); /* Setup timer */ if (mtt == 1) /* 500 us */ { switch_bank(iobase, BANK1); outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR); } else if (mtt == 2) /* 1 ms */ { switch_bank(iobase, BANK1); outb(TIMER_IIR_1ms, iobase+FIR_TIMER_IIR); } else /* > 2ms -> 4ms */ { switch_bank(iobase, BANK1); outb(TIMER_IIR_2ms, iobase+FIR_TIMER_IIR); } /* Start timer */ outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR); self->io.direction = IO_XMIT; /* Enable timer interrupt */ self->ier = IER_TIMER; SetCOMInterrupts(self, TRUE); /* Timer will take care of the rest */ goto out; } else udelay(mtt); } // if (if (mtt > diff) }// if (mtt) /* Enable EOM interrupt */ self->ier = IER_EOM; SetCOMInterrupts(self, TRUE); /* Transmit frame */ ali_ircc_dma_xmit(self); } // if (self->tx_fifo.len == 1) out: /* Not busy transmitting anymore if window is not full */ if (self->tx_fifo.free < MAX_TX_WINDOW) netif_wake_queue(self->netdev); /* Restore bank register */ switch_bank(iobase, BANK0); dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return NETDEV_TX_OK; } static void ali_ircc_dma_xmit(struct ali_ircc_cb *self) { int iobase, tmp; unsigned char FIFO_OPTI, Hi, Lo; IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); iobase = self->io.fir_base; /* FIFO threshold , this method comes from NDIS5 code */ if(self->tx_fifo.queue[self->tx_fifo.ptr].len < TX_FIFO_Threshold) FIFO_OPTI = self->tx_fifo.queue[self->tx_fifo.ptr].len-1; else FIFO_OPTI = TX_FIFO_Threshold; /* Disable DMA */ switch_bank(iobase, BANK1); outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR); self->io.direction = IO_XMIT; irda_setup_dma(self->io.dma, ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start - self->tx_buff.head) + self->tx_buff_dma, self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE); /* Reset Tx FIFO */ switch_bank(iobase, BANK0); outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A); /* Set Tx FIFO threshold */ if (self->fifo_opti_buf!=FIFO_OPTI) { switch_bank(iobase, BANK1); outb(FIFO_OPTI, iobase+FIR_FIFO_TR) ; self->fifo_opti_buf=FIFO_OPTI; } /* Set Tx DMA threshold */ switch_bank(iobase, BANK1); outb(TX_DMA_Threshold, iobase+FIR_DMA_TR); /* Set max Tx frame size */ Hi = (self->tx_fifo.queue[self->tx_fifo.ptr].len >> 8) & 0x0f; Lo = self->tx_fifo.queue[self->tx_fifo.ptr].len & 0xff; switch_bank(iobase, BANK2); outb(Hi, iobase+FIR_TX_DSR_HI); outb(Lo, iobase+FIR_TX_DSR_LO); /* Disable SIP , Disable Brick Wall (we don't support in TX mode), Change to TX mode */ switch_bank(iobase, BANK0); tmp = inb(iobase+FIR_LCR_B); tmp &= ~0x20; // Disable SIP outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B); IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B)); outb(0, iobase+FIR_LSR); /* Enable DMA and Burst Mode */ switch_bank(iobase, BANK1); outb(inb(iobase+FIR_CR) | CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR); switch_bank(iobase, BANK0); IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) { int iobase; int ret = TRUE; IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); iobase = self->io.fir_base; /* Disable DMA */ switch_bank(iobase, BANK1); outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR); /* Check for underrun! */ switch_bank(iobase, BANK0); if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT) { IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__); self->netdev->stats.tx_errors++; self->netdev->stats.tx_fifo_errors++; } else { self->netdev->stats.tx_packets++; } /* Check if we need to change the speed */ if (self->new_speed) { ali_ircc_change_speed(self, self->new_speed); self->new_speed = 0; } /* Finished with this frame, so prepare for next */ self->tx_fifo.ptr++; self->tx_fifo.len--; /* Any frames to be sent back-to-back? */ if (self->tx_fifo.len) { ali_ircc_dma_xmit(self); /* Not finished yet! */ ret = FALSE; } else { /* Reset Tx FIFO info */ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; } /* Make sure we have room for more frames */ if (self->tx_fifo.free < MAX_TX_WINDOW) { /* Not busy transmitting anymore */ /* Tell the network layer, that we can accept more frames */ netif_wake_queue(self->netdev); } switch_bank(iobase, BANK0); IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return ret; } /* * Function ali_ircc_dma_receive (self) * * Get ready for receiving a frame. The device will initiate a DMA * if it starts to receive a frame. * */ static int ali_ircc_dma_receive(struct ali_ircc_cb *self) { int iobase, tmp; IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); iobase = self->io.fir_base; /* Reset Tx FIFO info */ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; /* Disable DMA */ switch_bank(iobase, BANK1); outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR); /* Reset Message Count */ switch_bank(iobase, BANK0); outb(0x07, iobase+FIR_LSR); self->rcvFramesOverflow = FALSE; self->LineStatus = inb(iobase+FIR_LSR) ; /* Reset Rx FIFO info */ self->io.direction = IO_RECV; self->rx_buff.data = self->rx_buff.head; /* Reset Rx FIFO */ // switch_bank(iobase, BANK0); outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A); self->st_fifo.len = self->st_fifo.pending_bytes = 0; self->st_fifo.tail = self->st_fifo.head = 0; irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, DMA_RX_MODE); /* Set Receive Mode,Brick Wall */ //switch_bank(iobase, BANK0); tmp = inb(iobase+FIR_LCR_B); outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B)); /* Set Rx Threshold */ switch_bank(iobase, BANK1); outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR); outb(RX_DMA_Threshold, iobase+FIR_DMA_TR); /* Enable DMA and Burst Mode */ // switch_bank(iobase, BANK1); outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR); switch_bank(iobase, BANK0); IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return 0; } static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) { struct st_fifo *st_fifo; struct sk_buff *skb; __u8 status, MessageCount; int len, i, iobase, val; IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); st_fifo = &self->st_fifo; iobase = self->io.fir_base; switch_bank(iobase, BANK0); MessageCount = inb(iobase+ FIR_LSR)&0x07; if (MessageCount > 0) IRDA_DEBUG(0, "%s(), Message count = %d,\n", __func__ , MessageCount); for (i=0; i<=MessageCount; i++) { /* Bank 0 */ switch_bank(iobase, BANK0); status = inb(iobase+FIR_LSR); switch_bank(iobase, BANK2); len = inb(iobase+FIR_RX_DSR_HI) & 0x0f; len = len << 8; len |= inb(iobase+FIR_RX_DSR_LO); IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len); IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status); if (st_fifo->tail >= MAX_RX_WINDOW) { IRDA_DEBUG(0, "%s(), window is full!\n", __func__ ); continue; } st_fifo->entries[st_fifo->tail].status = status; st_fifo->entries[st_fifo->tail].len = len; st_fifo->pending_bytes += len; st_fifo->tail++; st_fifo->len++; } for (i=0; i<=MessageCount; i++) { /* Get first entry */ status = st_fifo->entries[st_fifo->head].status; len = st_fifo->entries[st_fifo->head].len; st_fifo->pending_bytes -= len; st_fifo->head++; st_fifo->len--; /* Check for errors */ if ((status & 0xd8) || self->rcvFramesOverflow || (len==0)) { IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ ); /* Skip frame */ self->netdev->stats.rx_errors++; self->rx_buff.data += len; if (status & LSR_FIFO_UR) { self->netdev->stats.rx_frame_errors++; IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ ); } if (status & LSR_FRAME_ERROR) { self->netdev->stats.rx_frame_errors++; IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ ); } if (status & LSR_CRC_ERROR) { self->netdev->stats.rx_crc_errors++; IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ ); } if(self->rcvFramesOverflow) { self->netdev->stats.rx_frame_errors++; IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ ); } if(len == 0) { self->netdev->stats.rx_frame_errors++; IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ ); } } else { if (st_fifo->pending_bytes < 32) { switch_bank(iobase, BANK0); val = inb(iobase+FIR_BSR); if ((val& BSR_FIFO_NOT_EMPTY)== 0x80) { IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ ); /* Put this entry back in fifo */ st_fifo->head--; st_fifo->len++; st_fifo->pending_bytes += len; st_fifo->entries[st_fifo->head].status = status; st_fifo->entries[st_fifo->head].len = len; /* * DMA not finished yet, so try again * later, set timer value, resolution * 500 us */ switch_bank(iobase, BANK1); outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR); // 2001/1/2 05:07PM /* Enable Timer */ outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR); return FALSE; /* I'll be back! */ } } /* * Remember the time we received this frame, so we can * reduce the min turn time a bit since we will know * how much time we have used for protocol processing */ do_gettimeofday(&self->stamp); skb = dev_alloc_skb(len+1); if (skb == NULL) { IRDA_WARNING("%s(), memory squeeze, " "dropping frame.\n", __func__); self->netdev->stats.rx_dropped++; return FALSE; } /* Make sure IP header gets aligned */ skb_reserve(skb, 1); /* Copy frame without CRC, CRC is removed by hardware*/ skb_put(skb, len); skb_copy_to_linear_data(skb, self->rx_buff.data, len); /* Move to next frame */ self->rx_buff.data += len; self->netdev->stats.rx_bytes += len; self->netdev->stats.rx_packets++; skb->dev = self->netdev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); } } switch_bank(iobase, BANK0); IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return TRUE; } /* * Function ali_ircc_sir_hard_xmit (skb, dev) * * Transmit the frame! * */ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct ali_ircc_cb *self; unsigned long flags; int iobase; __u32 speed; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;); iobase = self->io.sir_base; netif_stop_queue(dev); /* Make sure tests *& speed change are atomic */ spin_lock_irqsave(&self->lock, flags); /* Note : you should make sure that speed changes are not going * to corrupt any outgoing frame. Look at nsc-ircc for the gory * details - Jean II */ /* Check if we need to change the speed */ speed = irda_get_next_speed(skb); if ((speed != self->io.speed) && (speed != -1)) { /* Check for empty frame */ if (!skb->len) { ali_ircc_change_speed(self, speed); dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } else self->new_speed = speed; } /* Init tx buffer */ self->tx_buff.data = self->tx_buff.head; /* Copy skb to tx_buff while wrapping, stuffing and making CRC */ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize); self->netdev->stats.tx_bytes += self->tx_buff.len; /* Turn on transmit finished interrupt. Will fire immediately! */ outb(UART_IER_THRI, iobase+UART_IER); dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return NETDEV_TX_OK; } /* * Function ali_ircc_net_ioctl (dev, rq, cmd) * * Process IOCTL commands for this device * */ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct if_irda_req *irq = (struct if_irda_req *) rq; struct ali_ircc_cb *self; unsigned long flags; int ret = 0; IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return -1;); IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ ); /* * This function will also be used by IrLAP to change the * speed, so we still must allow for speed change within * interrupt context. */ if (!in_interrupt() && !capable(CAP_NET_ADMIN)) return -EPERM; spin_lock_irqsave(&self->lock, flags); ali_ircc_change_speed(self, irq->ifr_baudrate); spin_unlock_irqrestore(&self->lock, flags); break; case SIOCSMEDIABUSY: /* Set media busy */ IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ ); if (!capable(CAP_NET_ADMIN)) return -EPERM; irda_device_set_media_busy(self->netdev, TRUE); break; case SIOCGRECEIVING: /* Check if we are receiving right now */ IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ ); /* This is protected */ irq->ifr_receiving = ali_ircc_is_receiving(self); break; default: ret = -EOPNOTSUPP; } IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return ret; } /* * Function ali_ircc_is_receiving (self) * * Return TRUE is we are currently receiving a frame * */ static int ali_ircc_is_receiving(struct ali_ircc_cb *self) { unsigned long flags; int status = FALSE; int iobase; IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ ); IRDA_ASSERT(self != NULL, return FALSE;); spin_lock_irqsave(&self->lock, flags); if (self->io.speed > 115200) { iobase = self->io.fir_base; switch_bank(iobase, BANK1); if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0) { /* We are receiving something */ IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ ); status = TRUE; } switch_bank(iobase, BANK0); } else { status = (self->rx_buff.state != OUTSIDE_FRAME); } spin_unlock_irqrestore(&self->lock, flags); IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return status; } static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state) { struct ali_ircc_cb *self = platform_get_drvdata(dev); IRDA_MESSAGE("%s, Suspending\n", ALI_IRCC_DRIVER_NAME); if (self->io.suspended) return 0; ali_ircc_net_close(self->netdev); self->io.suspended = 1; return 0; } static int ali_ircc_resume(struct platform_device *dev) { struct ali_ircc_cb *self = platform_get_drvdata(dev); if (!self->io.suspended) return 0; ali_ircc_net_open(self->netdev); IRDA_MESSAGE("%s, Waking up\n", ALI_IRCC_DRIVER_NAME); self->io.suspended = 0; return 0; } /* ALi Chip Function */ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable) { unsigned char newMask; int iobase = self->io.fir_base; /* or sir_base */ IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable); /* Enable the interrupt which we wish to */ if (enable){ if (self->io.direction == IO_XMIT) { if (self->io.speed > 115200) /* FIR, MIR */ { newMask = self->ier; } else /* SIR */ { newMask = UART_IER_THRI | UART_IER_RDI; } } else { if (self->io.speed > 115200) /* FIR, MIR */ { newMask = self->ier; } else /* SIR */ { newMask = UART_IER_RDI; } } } else /* Disable all the interrupts */ { newMask = 0x00; } //SIR and FIR has different registers if (self->io.speed > 115200) { switch_bank(iobase, BANK0); outb(newMask, iobase+FIR_IER); } else outb(newMask, iobase+UART_IER); IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } static void SIR2FIR(int iobase) { //unsigned char tmp; IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); /* Already protected (change_speed() or setup()), no need to lock. * Jean II */ outb(0x28, iobase+UART_MCR); outb(0x68, iobase+UART_MCR); outb(0x88, iobase+UART_MCR); outb(0x60, iobase+FIR_MCR); /* Master Reset */ outb(0x20, iobase+FIR_MCR); /* Master Interrupt Enable */ //tmp = inb(iobase+FIR_LCR_B); /* SIP enable */ //tmp |= 0x20; //outb(tmp, iobase+FIR_LCR_B); IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } static void FIR2SIR(int iobase) { unsigned char val; IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); /* Already protected (change_speed() or setup()), no need to lock. * Jean II */ outb(0x20, iobase+FIR_MCR); /* IRQ to low */ outb(0x00, iobase+UART_IER); outb(0xA0, iobase+FIR_MCR); /* Don't set master reset */ outb(0x00, iobase+UART_FCR); outb(0x07, iobase+UART_FCR); val = inb(iobase+UART_RX); val = inb(iobase+UART_LSR); val = inb(iobase+UART_MSR); IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>"); MODULE_DESCRIPTION("ALi FIR Controller Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" ALI_IRCC_DRIVER_NAME); module_param_array(io, int, NULL, 0); MODULE_PARM_DESC(io, "Base I/O addresses"); module_param_array(irq, int, NULL, 0); MODULE_PARM_DESC(irq, "IRQ lines"); module_param_array(dma, int, NULL, 0); MODULE_PARM_DESC(dma, "DMA channels"); module_init(ali_ircc_init); module_exit(ali_ircc_cleanup);
gpl-2.0
todorez/galileo-linux-stable
drivers/dma/mic_x100_dma.c
848
21073
/* * Intel MIC Platform Software Stack (MPSS) * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Intel MIC X100 DMA Driver. * * Adapted from IOAT dma driver. */ #include <linux/module.h> #include <linux/io.h> #include <linux/seq_file.h> #include "mic_x100_dma.h" #define MIC_DMA_MAX_XFER_SIZE_CARD (1 * 1024 * 1024 -\ MIC_DMA_ALIGN_BYTES) #define MIC_DMA_MAX_XFER_SIZE_HOST (1 * 1024 * 1024 >> 1) #define MIC_DMA_DESC_TYPE_SHIFT 60 #define MIC_DMA_MEMCPY_LEN_SHIFT 46 #define MIC_DMA_STAT_INTR_SHIFT 59 /* high-water mark for pushing dma descriptors */ static int mic_dma_pending_level = 4; /* Status descriptor is used to write a 64 bit value to a memory location */ enum mic_dma_desc_format_type { MIC_DMA_MEMCPY = 1, MIC_DMA_STATUS, }; static inline u32 mic_dma_hw_ring_inc(u32 val) { return (val + 1) % MIC_DMA_DESC_RX_SIZE; } static inline u32 mic_dma_hw_ring_dec(u32 val) { return val ? val - 1 : MIC_DMA_DESC_RX_SIZE - 1; } static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch) { ch->head = mic_dma_hw_ring_inc(ch->head); } /* Prepare a memcpy desc */ static inline void mic_dma_memcpy_desc(struct mic_dma_desc *desc, dma_addr_t src_phys, dma_addr_t dst_phys, u64 size) { u64 qw0, qw1; qw0 = src_phys; qw0 |= (size >> MIC_DMA_ALIGN_SHIFT) << MIC_DMA_MEMCPY_LEN_SHIFT; qw1 = MIC_DMA_MEMCPY; qw1 <<= MIC_DMA_DESC_TYPE_SHIFT; qw1 |= dst_phys; desc->qw0 = qw0; desc->qw1 = qw1; } /* Prepare a status desc. with @data to be written at @dst_phys */ static inline void mic_dma_prep_status_desc(struct mic_dma_desc *desc, u64 data, dma_addr_t dst_phys, bool generate_intr) { u64 qw0, qw1; qw0 = data; qw1 = (u64) MIC_DMA_STATUS << MIC_DMA_DESC_TYPE_SHIFT | dst_phys; if (generate_intr) qw1 |= (1ULL << MIC_DMA_STAT_INTR_SHIFT); desc->qw0 = qw0; desc->qw1 = qw1; } static void mic_dma_cleanup(struct mic_dma_chan *ch) { struct dma_async_tx_descriptor *tx; u32 tail; u32 last_tail; spin_lock(&ch->cleanup_lock); tail = mic_dma_read_cmp_cnt(ch); /* * This is the barrier pair for smp_wmb() in fn. * mic_dma_tx_submit_unlock. It's required so that we read the * updated cookie value from tx->cookie. */ smp_rmb(); for (last_tail = ch->last_tail; tail != last_tail;) { tx = &ch->tx_array[last_tail]; if (tx->cookie) { dma_cookie_complete(tx); if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; } } last_tail = mic_dma_hw_ring_inc(last_tail); } /* finish all completion callbacks before incrementing tail */ smp_mb(); ch->last_tail = last_tail; spin_unlock(&ch->cleanup_lock); } static u32 mic_dma_ring_count(u32 head, u32 tail) { u32 count; if (head >= tail) count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head); else count = tail - head; return count - 1; } /* Returns the num. of free descriptors on success, -ENOMEM on failure */ static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required) { struct device *dev = mic_dma_ch_to_device(ch); u32 count; count = mic_dma_ring_count(ch->head, ch->last_tail); if (count < required) { mic_dma_cleanup(ch); count = mic_dma_ring_count(ch->head, ch->last_tail); } if (count < required) { dev_dbg(dev, "Not enough desc space"); dev_dbg(dev, "%s %d required=%u, avail=%u\n", __func__, __LINE__, required, count); return -ENOMEM; } else { return count; } } /* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/ static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src, dma_addr_t dst, size_t len) { size_t current_transfer_len; size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size; /* 3 is added to make sure we have enough space for status desc */ int num_desc = len / max_xfer_size + 3; int ret; if (len % max_xfer_size) num_desc++; ret = mic_dma_avail_desc_ring_space(ch, num_desc); if (ret < 0) return ret; do { current_transfer_len = min(len, max_xfer_size); mic_dma_memcpy_desc(&ch->desc_ring[ch->head], src, dst, current_transfer_len); mic_dma_hw_ring_inc_head(ch); len -= current_transfer_len; dst = dst + current_transfer_len; src = src + current_transfer_len; } while (len > 0); return 0; } /* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */ static void mic_dma_prog_intr(struct mic_dma_chan *ch) { mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, ch->status_dest_micpa, false); mic_dma_hw_ring_inc_head(ch); mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, ch->status_dest_micpa, true); mic_dma_hw_ring_inc_head(ch); } /* Wrapper function to program memcpy descriptors/status descriptors */ static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src, dma_addr_t dst, size_t len) { if (-ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) return -ENOMEM; /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */ if (flags & DMA_PREP_FENCE) { mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, ch->status_dest_micpa, false); mic_dma_hw_ring_inc_head(ch); } if (flags & DMA_PREP_INTERRUPT) mic_dma_prog_intr(ch); return 0; } static inline void mic_dma_issue_pending(struct dma_chan *ch) { struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); spin_lock(&mic_ch->issue_lock); /* * Write to head triggers h/w to act on the descriptors. * On MIC, writing the same head value twice causes * a h/w error. On second write, h/w assumes we filled * the entire ring & overwrote some of the descriptors. */ if (mic_ch->issued == mic_ch->submitted) goto out; mic_ch->issued = mic_ch->submitted; /* * make descriptor updates visible before advancing head, * this is purposefully not smp_wmb() since we are also * publishing the descriptor updates to a dma device */ wmb(); mic_dma_write_reg(mic_ch, MIC_DMA_REG_DHPR, mic_ch->issued); out: spin_unlock(&mic_ch->issue_lock); } static inline void mic_dma_update_pending(struct mic_dma_chan *ch) { if (mic_dma_ring_count(ch->issued, ch->submitted) > mic_dma_pending_level) mic_dma_issue_pending(&ch->api_ch); } static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx) { struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan); dma_cookie_t cookie; dma_cookie_assign(tx); cookie = tx->cookie; /* * We need an smp write barrier here because another CPU might see * an update to submitted and update h/w head even before we * assigned a cookie to this tx. */ smp_wmb(); mic_ch->submitted = mic_ch->head; spin_unlock(&mic_ch->prep_lock); mic_dma_update_pending(mic_ch); return cookie; } static inline struct dma_async_tx_descriptor * allocate_tx(struct mic_dma_chan *ch) { u32 idx = mic_dma_hw_ring_dec(ch->head); struct dma_async_tx_descriptor *tx = &ch->tx_array[idx]; dma_async_tx_descriptor_init(tx, &ch->api_ch); tx->tx_submit = mic_dma_tx_submit_unlock; return tx; } /* * Prepare a memcpy descriptor to be added to the ring. * Note that the temporary descriptor adds an extra overhead of copying the * descriptor to ring. So, we copy directly to the descriptor ring */ static struct dma_async_tx_descriptor * mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); struct device *dev = mic_dma_ch_to_device(mic_ch); int result; if (!len && !flags) return NULL; spin_lock(&mic_ch->prep_lock); result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); if (result >= 0) return allocate_tx(mic_ch); dev_err(dev, "Error enqueueing dma, error=%d\n", result); spin_unlock(&mic_ch->prep_lock); return NULL; } static struct dma_async_tx_descriptor * mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags) { struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); int ret; spin_lock(&mic_ch->prep_lock); ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); if (!ret) return allocate_tx(mic_ch); spin_unlock(&mic_ch->prep_lock); return NULL; } /* Return the status of the transaction */ static enum dma_status mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate)) mic_dma_cleanup(mic_ch); return dma_cookie_status(ch, cookie, txstate); } static irqreturn_t mic_dma_thread_fn(int irq, void *data) { mic_dma_cleanup((struct mic_dma_chan *)data); return IRQ_HANDLED; } static irqreturn_t mic_dma_intr_handler(int irq, void *data) { struct mic_dma_chan *ch = ((struct mic_dma_chan *)data); mic_dma_ack_interrupt(ch); return IRQ_WAKE_THREAD; } static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch) { u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring); struct device *dev = &to_mbus_device(ch)->dev; desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES); ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL); if (!ch->desc_ring) return -ENOMEM; ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring, desc_ring_size, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ch->desc_ring_micpa)) goto map_error; ch->tx_array = vzalloc(MIC_DMA_DESC_RX_SIZE * sizeof(*ch->tx_array)); if (!ch->tx_array) goto tx_error; return 0; tx_error: dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size, DMA_BIDIRECTIONAL); map_error: kfree(ch->desc_ring); return -ENOMEM; } static void mic_dma_free_desc_ring(struct mic_dma_chan *ch) { u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring); vfree(ch->tx_array); desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES); dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa, desc_ring_size, DMA_BIDIRECTIONAL); kfree(ch->desc_ring); ch->desc_ring = NULL; } static void mic_dma_free_status_dest(struct mic_dma_chan *ch) { dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa, L1_CACHE_BYTES, DMA_BIDIRECTIONAL); kfree(ch->status_dest); } static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch) { struct device *dev = &to_mbus_device(ch)->dev; ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL); if (!ch->status_dest) return -ENOMEM; ch->status_dest_micpa = dma_map_single(dev, ch->status_dest, L1_CACHE_BYTES, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ch->status_dest_micpa)) { kfree(ch->status_dest); ch->status_dest = NULL; return -ENOMEM; } return 0; } static int mic_dma_check_chan(struct mic_dma_chan *ch) { if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) || mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) { mic_dma_disable_chan(ch); mic_dma_chan_mask_intr(ch); dev_err(mic_dma_ch_to_device(ch), "%s %d error setting up mic dma chan %d\n", __func__, __LINE__, ch->ch_num); return -EBUSY; } return 0; } static int mic_dma_chan_setup(struct mic_dma_chan *ch) { if (MIC_DMA_CHAN_MIC == ch->owner) mic_dma_chan_set_owner(ch); mic_dma_disable_chan(ch); mic_dma_chan_mask_intr(ch); mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0); mic_dma_chan_set_desc_ring(ch); ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR); ch->head = ch->last_tail; ch->issued = 0; mic_dma_chan_unmask_intr(ch); mic_dma_enable_chan(ch); return mic_dma_check_chan(ch); } static void mic_dma_chan_destroy(struct mic_dma_chan *ch) { mic_dma_disable_chan(ch); mic_dma_chan_mask_intr(ch); } static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev) { dma_async_device_unregister(&mic_dma_dev->dma_dev); } static int mic_dma_setup_irq(struct mic_dma_chan *ch) { ch->cookie = to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch), mic_dma_intr_handler, mic_dma_thread_fn, "mic dma_channel", ch, ch->ch_num); if (IS_ERR(ch->cookie)) return IS_ERR(ch->cookie); return 0; } static inline void mic_dma_free_irq(struct mic_dma_chan *ch) { to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch); } static int mic_dma_chan_init(struct mic_dma_chan *ch) { int ret = mic_dma_alloc_desc_ring(ch); if (ret) goto ring_error; ret = mic_dma_alloc_status_dest(ch); if (ret) goto status_error; ret = mic_dma_chan_setup(ch); if (ret) goto chan_error; return ret; chan_error: mic_dma_free_status_dest(ch); status_error: mic_dma_free_desc_ring(ch); ring_error: return ret; } static int mic_dma_drain_chan(struct mic_dma_chan *ch) { struct dma_async_tx_descriptor *tx; int err = 0; dma_cookie_t cookie; tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE); if (!tx) { err = -ENOMEM; goto error; } cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) err = -ENOMEM; else err = dma_sync_wait(&ch->api_ch, cookie); if (err) { dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n", __func__, __LINE__, ch->ch_num); err = -EIO; } error: mic_dma_cleanup(ch); return err; } static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch) { mic_dma_chan_destroy(ch); mic_dma_cleanup(ch); mic_dma_free_status_dest(ch); mic_dma_free_desc_ring(ch); } static int mic_dma_init(struct mic_dma_device *mic_dma_dev, enum mic_dma_chan_owner owner) { int i, first_chan = mic_dma_dev->start_ch; struct mic_dma_chan *ch; int ret; for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { unsigned long data; ch = &mic_dma_dev->mic_ch[i]; data = (unsigned long)ch; ch->ch_num = i; ch->owner = owner; spin_lock_init(&ch->cleanup_lock); spin_lock_init(&ch->prep_lock); spin_lock_init(&ch->issue_lock); ret = mic_dma_setup_irq(ch); if (ret) goto error; } return 0; error: for (i = i - 1; i >= first_chan; i--) mic_dma_free_irq(ch); return ret; } static void mic_dma_uninit(struct mic_dma_device *mic_dma_dev) { int i, first_chan = mic_dma_dev->start_ch; struct mic_dma_chan *ch; for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { ch = &mic_dma_dev->mic_ch[i]; mic_dma_free_irq(ch); } } static int mic_dma_alloc_chan_resources(struct dma_chan *ch) { int ret = mic_dma_chan_init(to_mic_dma_chan(ch)); if (ret) return ret; return MIC_DMA_DESC_RX_SIZE; } static void mic_dma_free_chan_resources(struct dma_chan *ch) { struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); mic_dma_drain_chan(mic_ch); mic_dma_chan_uninit(mic_ch); } /* Set the fn. handlers and register the dma device with dma api */ static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev, enum mic_dma_chan_owner owner) { int i, first_chan = mic_dma_dev->start_ch; dma_cap_zero(mic_dma_dev->dma_dev.cap_mask); /* * This dma engine is not capable of host memory to host memory * transfers */ dma_cap_set(DMA_MEMCPY, mic_dma_dev->dma_dev.cap_mask); if (MIC_DMA_CHAN_HOST == owner) dma_cap_set(DMA_PRIVATE, mic_dma_dev->dma_dev.cap_mask); mic_dma_dev->dma_dev.device_alloc_chan_resources = mic_dma_alloc_chan_resources; mic_dma_dev->dma_dev.device_free_chan_resources = mic_dma_free_chan_resources; mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status; mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock; mic_dma_dev->dma_dev.device_prep_dma_interrupt = mic_dma_prep_interrupt_lock; mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending; mic_dma_dev->dma_dev.copy_align = MIC_DMA_ALIGN_SHIFT; INIT_LIST_HEAD(&mic_dma_dev->dma_dev.channels); for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { mic_dma_dev->mic_ch[i].api_ch.device = &mic_dma_dev->dma_dev; dma_cookie_init(&mic_dma_dev->mic_ch[i].api_ch); list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node, &mic_dma_dev->dma_dev.channels); } return dma_async_device_register(&mic_dma_dev->dma_dev); } /* * Initializes dma channels and registers the dma device with the * dma engine api. */ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev, enum mic_dma_chan_owner owner) { struct mic_dma_device *mic_dma_dev; int ret; struct device *dev = &mbdev->dev; mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL); if (!mic_dma_dev) { ret = -ENOMEM; goto alloc_error; } mic_dma_dev->mbdev = mbdev; mic_dma_dev->dma_dev.dev = dev; mic_dma_dev->mmio = mbdev->mmio_va; if (MIC_DMA_CHAN_HOST == owner) { mic_dma_dev->start_ch = 0; mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_HOST; } else { mic_dma_dev->start_ch = 4; mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_CARD; } ret = mic_dma_init(mic_dma_dev, owner); if (ret) goto init_error; ret = mic_dma_register_dma_device(mic_dma_dev, owner); if (ret) goto reg_error; return mic_dma_dev; reg_error: mic_dma_uninit(mic_dma_dev); init_error: kfree(mic_dma_dev); mic_dma_dev = NULL; alloc_error: dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret); return mic_dma_dev; } static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) { mic_dma_unregister_dma_device(mic_dma_dev); mic_dma_uninit(mic_dma_dev); kfree(mic_dma_dev); } /* DEBUGFS CODE */ static int mic_dma_reg_seq_show(struct seq_file *s, void *pos) { struct mic_dma_device *mic_dma_dev = s->private; int i, chan_num, first_chan = mic_dma_dev->start_ch; struct mic_dma_chan *ch; seq_printf(s, "SBOX_DCR: %#x\n", mic_dma_mmio_read(&mic_dma_dev->mic_ch[first_chan], MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR)); seq_puts(s, "DMA Channel Registers\n"); seq_printf(s, "%-10s| %-10s %-10s %-10s %-10s %-10s", "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO"); seq_printf(s, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT"); for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { ch = &mic_dma_dev->mic_ch[i]; chan_num = ch->ch_num; seq_printf(s, "%-10i| %-#10x %-#10x %-#10x %-#10x", chan_num, mic_dma_read_reg(ch, MIC_DMA_REG_DCAR), mic_dma_read_reg(ch, MIC_DMA_REG_DTPR), mic_dma_read_reg(ch, MIC_DMA_REG_DHPR), mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI)); seq_printf(s, " %-#10x %-#10x %-#14x %-#10x\n", mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO), mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR), mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK), mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT)); } return 0; } static int mic_dma_reg_debug_open(struct inode *inode, struct file *file) { return single_open(file, mic_dma_reg_seq_show, inode->i_private); } static int mic_dma_reg_debug_release(struct inode *inode, struct file *file) { return single_release(inode, file); } static const struct file_operations mic_dma_reg_ops = { .owner = THIS_MODULE, .open = mic_dma_reg_debug_open, .read = seq_read, .llseek = seq_lseek, .release = mic_dma_reg_debug_release }; /* Debugfs parent dir */ static struct dentry *mic_dma_dbg; static int mic_dma_driver_probe(struct mbus_device *mbdev) { struct mic_dma_device *mic_dma_dev; enum mic_dma_chan_owner owner; if (MBUS_DEV_DMA_MIC == mbdev->id.device) owner = MIC_DMA_CHAN_MIC; else owner = MIC_DMA_CHAN_HOST; mic_dma_dev = mic_dma_dev_reg(mbdev, owner); dev_set_drvdata(&mbdev->dev, mic_dma_dev); if (mic_dma_dbg) { mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev), mic_dma_dbg); if (mic_dma_dev->dbg_dir) debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir, mic_dma_dev, &mic_dma_reg_ops); } return 0; } static void mic_dma_driver_remove(struct mbus_device *mbdev) { struct mic_dma_device *mic_dma_dev; mic_dma_dev = dev_get_drvdata(&mbdev->dev); debugfs_remove_recursive(mic_dma_dev->dbg_dir); mic_dma_dev_unreg(mic_dma_dev); } static struct mbus_device_id id_table[] = { {MBUS_DEV_DMA_MIC, MBUS_DEV_ANY_ID}, {MBUS_DEV_DMA_HOST, MBUS_DEV_ANY_ID}, {0}, }; static struct mbus_driver mic_dma_driver = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = mic_dma_driver_probe, .remove = mic_dma_driver_remove, }; static int __init mic_x100_dma_init(void) { int rc = mbus_register_driver(&mic_dma_driver); if (rc) return rc; mic_dma_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); return 0; } static void __exit mic_x100_dma_exit(void) { debugfs_remove_recursive(mic_dma_dbg); mbus_unregister_driver(&mic_dma_driver); } module_init(mic_x100_dma_init); module_exit(mic_x100_dma_exit); MODULE_DEVICE_TABLE(mbus, id_table); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
yseung123/android_kernel_oneplus_msm8994
net/phonet/pn_netlink.c
2128
7720
/* * File: pn_netlink.c * * Phonet netlink interface * * Copyright (C) 2008 Nokia Corporation. * * Authors: Sakari Ailus <sakari.ailus@nokia.com> * Remi Denis-Courmont * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/kernel.h> #include <linux/netlink.h> #include <linux/phonet.h> #include <linux/slab.h> #include <net/sock.h> #include <net/phonet/pn_dev.h> /* Device address handling */ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, u32 portid, u32 seq, int event); void phonet_address_notify(int event, struct net_device *dev, u8 addr) { struct sk_buff *skb; int err = -ENOBUFS; skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(1), GFP_KERNEL); if (skb == NULL) goto errout; err = fill_addr(skb, dev, addr, 0, 0, event); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, dev_net(dev), 0, RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL); return; errout: rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err); } static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = { [IFA_LOCAL] = { .type = NLA_U8 }, }; static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct net_device *dev; struct ifaddrmsg *ifm; int err; u8 pnaddr; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; ASSERT_RTNL(); err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy); if (err < 0) return err; ifm = nlmsg_data(nlh); if (tb[IFA_LOCAL] == NULL) return -EINVAL; pnaddr = nla_get_u8(tb[IFA_LOCAL]); if (pnaddr & 3) /* Phonet addresses only have 6 high-order bits */ return -EINVAL; dev = __dev_get_by_index(net, ifm->ifa_index); if (dev == NULL) return -ENODEV; if (nlh->nlmsg_type == RTM_NEWADDR) err = phonet_address_add(dev, pnaddr); else err = phonet_address_del(dev, pnaddr); if (!err) phonet_address_notify(nlh->nlmsg_type, dev, pnaddr); return err; } static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, u32 portid, u32 seq, int event) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), 0); if (nlh == NULL) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifa_family = AF_PHONET; ifm->ifa_prefixlen = 0; ifm->ifa_flags = IFA_F_PERMANENT; ifm->ifa_scope = RT_SCOPE_LINK; ifm->ifa_index = dev->ifindex; if (nla_put_u8(skb, IFA_LOCAL, addr)) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct phonet_device_list *pndevs; struct phonet_device *pnd; int dev_idx = 0, dev_start_idx = cb->args[0]; int addr_idx = 0, addr_start_idx = cb->args[1]; pndevs = phonet_device_list(sock_net(skb->sk)); rcu_read_lock(); list_for_each_entry_rcu(pnd, &pndevs->list, list) { u8 addr; if (dev_idx > dev_start_idx) addr_start_idx = 0; if (dev_idx++ < dev_start_idx) continue; addr_idx = 0; for_each_set_bit(addr, pnd->addrs, 64) { if (addr_idx++ < addr_start_idx) continue; if (fill_addr(skb, pnd->netdev, addr << 2, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0) goto out; } } out: rcu_read_unlock(); cb->args[0] = dev_idx; cb->args[1] = addr_idx; return skb->len; } /* Routes handling */ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, u32 portid, u32 seq, int event) { struct rtmsg *rtm; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), 0); if (nlh == NULL) return -EMSGSIZE; rtm = nlmsg_data(nlh); rtm->rtm_family = AF_PHONET; rtm->rtm_dst_len = 6; rtm->rtm_src_len = 0; rtm->rtm_tos = 0; rtm->rtm_table = RT_TABLE_MAIN; rtm->rtm_protocol = RTPROT_STATIC; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; if (nla_put_u8(skb, RTA_DST, dst) || nla_put_u32(skb, RTA_OIF, dev->ifindex)) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } void rtm_phonet_notify(int event, struct net_device *dev, u8 dst) { struct sk_buff *skb; int err = -ENOBUFS; skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(1) + nla_total_size(4), GFP_KERNEL); if (skb == NULL) goto errout; err = fill_route(skb, dev, dst, 0, 0, event); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, dev_net(dev), 0, RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL); return; errout: rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err); } static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = { [RTA_DST] = { .type = NLA_U8 }, [RTA_OIF] = { .type = NLA_U32 }, }; static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct nlattr *tb[RTA_MAX+1]; struct net_device *dev; struct rtmsg *rtm; int err; u8 dst; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; ASSERT_RTNL(); err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy); if (err < 0) return err; rtm = nlmsg_data(nlh); if (rtm->rtm_table != RT_TABLE_MAIN || rtm->rtm_type != RTN_UNICAST) return -EINVAL; if (tb[RTA_DST] == NULL || tb[RTA_OIF] == NULL) return -EINVAL; dst = nla_get_u8(tb[RTA_DST]); if (dst & 3) /* Phonet addresses only have 6 high-order bits */ return -EINVAL; dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF])); if (dev == NULL) return -ENODEV; if (nlh->nlmsg_type == RTM_NEWROUTE) err = phonet_route_add(dev, dst); else err = phonet_route_del(dev, dst); if (!err) rtm_phonet_notify(nlh->nlmsg_type, dev, dst); return err; } static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); u8 addr, addr_idx = 0, addr_start_idx = cb->args[0]; rcu_read_lock(); for (addr = 0; addr < 64; addr++) { struct net_device *dev; dev = phonet_route_get_rcu(net, addr << 2); if (!dev) continue; if (addr_idx++ < addr_start_idx) continue; if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWROUTE)) goto out; } out: rcu_read_unlock(); cb->args[0] = addr_idx; cb->args[1] = 0; return skb->len; } int __init phonet_netlink_register(void) { int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL, NULL); if (err) return err; /* Further __rtnl_register() cannot fail */ __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL, NULL); __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, NULL); __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL, NULL); __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL, NULL); __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, NULL); return 0; }
gpl-2.0
andip71/boeffla-kernel-samsung-n8000
drivers/staging/tm6000/tm6000-video.c
2384
44343
/* * tm6000-video.c - driver for TM5600/TM6000/TM6010 USB video capture devices * * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> * * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com> * - Fixed module load/unload * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/random.h> #include <linux/version.h> #include <linux/usb.h> #include <linux/videodev2.h> #include <media/v4l2-ioctl.h> #include <media/tuner.h> #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/highmem.h> #include <linux/freezer.h> #include "tm6000-regs.h" #include "tm6000.h" #define BUFFER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ /* Limits minimum and default number of buffers */ #define TM6000_MIN_BUF 4 #define TM6000_DEF_BUF 8 #define TM6000_MAX_ISO_PACKETS 46 /* Max number of ISO packets */ /* Declare static vars that will be used as parameters */ static unsigned int vid_limit = 16; /* Video memory limit, in Mb */ static int video_nr = -1; /* /dev/videoN, -1 for autodetect */ static int radio_nr = -1; /* /dev/radioN, -1 for autodetect */ /* Debug level */ int tm6000_debug; EXPORT_SYMBOL_GPL(tm6000_debug); static const struct v4l2_queryctrl no_ctrl = { .name = "42", .flags = V4L2_CTRL_FLAG_DISABLED, }; /* supported controls */ static struct v4l2_queryctrl tm6000_qctrl[] = { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 255, .step = 1, .default_value = 54, .flags = 0, }, { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, .maximum = 255, .step = 0x1, .default_value = 119, .flags = 0, }, { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Saturation", .minimum = 0, .maximum = 255, .step = 0x1, .default_value = 112, .flags = 0, }, { .id = V4L2_CID_HUE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Hue", .minimum = -128, .maximum = 127, .step = 0x1, .default_value = 0, .flags = 0, }, /* --- audio --- */ { .id = V4L2_CID_AUDIO_MUTE, .name = "Mute", .minimum = 0, .maximum = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, }, { .id = V4L2_CID_AUDIO_VOLUME, .name = "Volume", .minimum = -15, .maximum = 15, .step = 1, .default_value = 0, .type = V4L2_CTRL_TYPE_INTEGER, } }; static const unsigned int CTRLS = ARRAY_SIZE(tm6000_qctrl); static int qctl_regs[ARRAY_SIZE(tm6000_qctrl)]; static struct tm6000_fmt format[] = { { .name = "4:2:2, packed, YVY2", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, }, { .name = "4:2:2, packed, UYVY", .fourcc = V4L2_PIX_FMT_UYVY, .depth = 16, }, { .name = "A/V + VBI mux packet", .fourcc = V4L2_PIX_FMT_TM6000, .depth = 16, } }; static const struct v4l2_queryctrl *ctrl_by_id(unsigned int id) { unsigned int i; for (i = 0; i < CTRLS; i++) if (tm6000_qctrl[i].id == id) return tm6000_qctrl+i; return NULL; } /* ------------------------------------------------------------------ * DMA and thread functions * ------------------------------------------------------------------ */ #define norm_maxw(a) 720 #define norm_maxh(a) 576 #define norm_minw(a) norm_maxw(a) #define norm_minh(a) norm_maxh(a) /* * video-buf generic routine to get the next available buffer */ static inline void get_next_buf(struct tm6000_dmaqueue *dma_q, struct tm6000_buffer **buf) { struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq); char *outp; if (list_empty(&dma_q->active)) { dprintk(dev, V4L2_DEBUG_QUEUE, "No active queue to serve\n"); *buf = NULL; return; } *buf = list_entry(dma_q->active.next, struct tm6000_buffer, vb.queue); if (!buf) return; /* Cleans up buffer - Useful for testing for frame/URB loss */ outp = videobuf_to_vmalloc(&(*buf)->vb); return; } /* * Announces that a buffer were filled and request the next */ static inline void buffer_filled(struct tm6000_core *dev, struct tm6000_dmaqueue *dma_q, struct tm6000_buffer *buf) { /* Advice that buffer was filled */ dprintk(dev, V4L2_DEBUG_ISOC, "[%p/%d] wakeup\n", buf, buf->vb.i); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; do_gettimeofday(&buf->vb.ts); list_del(&buf->vb.queue); wake_up(&buf->vb.done); } const char *tm6000_msg_type[] = { "unknown(0)", /* 0 */ "video", /* 1 */ "audio", /* 2 */ "vbi", /* 3 */ "pts", /* 4 */ "err", /* 5 */ "unknown(6)", /* 6 */ "unknown(7)", /* 7 */ }; /* * Identify the tm5600/6000 buffer header type and properly handles */ static int copy_streams(u8 *data, unsigned long len, struct urb *urb) { struct tm6000_dmaqueue *dma_q = urb->context; struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq); u8 *ptr = data, *endp = data+len, c; unsigned long header = 0; int rc = 0; unsigned int cmd, cpysize, pktsize, size, field, block, line, pos = 0; struct tm6000_buffer *vbuf = NULL; char *voutp = NULL; unsigned int linewidth; if (!dev->radio) { /* get video buffer */ get_next_buf(dma_q, &vbuf); if (!vbuf) return rc; voutp = videobuf_to_vmalloc(&vbuf->vb); if (!voutp) return 0; } for (ptr = data; ptr < endp;) { if (!dev->isoc_ctl.cmd) { /* Header */ if (dev->isoc_ctl.tmp_buf_len > 0) { /* from last urb or packet */ header = dev->isoc_ctl.tmp_buf; if (4 - dev->isoc_ctl.tmp_buf_len > 0) { memcpy((u8 *)&header + dev->isoc_ctl.tmp_buf_len, ptr, 4 - dev->isoc_ctl.tmp_buf_len); ptr += 4 - dev->isoc_ctl.tmp_buf_len; } dev->isoc_ctl.tmp_buf_len = 0; } else { if (ptr + 3 >= endp) { /* have incomplete header */ dev->isoc_ctl.tmp_buf_len = endp - ptr; memcpy(&dev->isoc_ctl.tmp_buf, ptr, dev->isoc_ctl.tmp_buf_len); return rc; } /* Seek for sync */ for (; ptr < endp - 3; ptr++) { if (*(ptr + 3) == 0x47) break; } /* Get message header */ header = *(unsigned long *)ptr; ptr += 4; } /* split the header fields */ c = (header >> 24) & 0xff; size = ((header & 0x7e) << 1); if (size > 0) size -= 4; block = (header >> 7) & 0xf; field = (header >> 11) & 0x1; line = (header >> 12) & 0x1ff; cmd = (header >> 21) & 0x7; /* Validates haeder fields */ if (size > TM6000_URB_MSG_LEN) size = TM6000_URB_MSG_LEN; pktsize = TM6000_URB_MSG_LEN; /* calculate position in buffer * and change the buffer */ switch (cmd) { case TM6000_URB_MSG_VIDEO: if (!dev->radio) { if ((dev->isoc_ctl.vfield != field) && (field == 1)) { /* Announces that a new buffer * were filled */ buffer_filled(dev, dma_q, vbuf); dprintk(dev, V4L2_DEBUG_ISOC, "new buffer filled\n"); get_next_buf(dma_q, &vbuf); if (!vbuf) return rc; voutp = videobuf_to_vmalloc(&vbuf->vb); if (!voutp) return rc; memset(voutp, 0, vbuf->vb.size); } linewidth = vbuf->vb.width << 1; pos = ((line << 1) - field - 1) * linewidth + block * TM6000_URB_MSG_LEN; /* Don't allow to write out of the buffer */ if (pos + size > vbuf->vb.size) cmd = TM6000_URB_MSG_ERR; dev->isoc_ctl.vfield = field; } break; case TM6000_URB_MSG_VBI: break; case TM6000_URB_MSG_AUDIO: case TM6000_URB_MSG_PTS: size = pktsize; /* Size is always 180 bytes */ break; } } else { /* Continue the last copy */ cmd = dev->isoc_ctl.cmd; size = dev->isoc_ctl.size; pos = dev->isoc_ctl.pos; pktsize = dev->isoc_ctl.pktsize; field = dev->isoc_ctl.field; } cpysize = (endp - ptr > size) ? size : endp - ptr; if (cpysize) { /* copy data in different buffers */ switch (cmd) { case TM6000_URB_MSG_VIDEO: /* Fills video buffer */ if (vbuf) memcpy(&voutp[pos], ptr, cpysize); break; case TM6000_URB_MSG_AUDIO: { int i; for (i = 0; i < cpysize; i += 2) swab16s((u16 *)(ptr + i)); tm6000_call_fillbuf(dev, TM6000_AUDIO, ptr, cpysize); break; } case TM6000_URB_MSG_VBI: /* Need some code to copy vbi buffer */ break; case TM6000_URB_MSG_PTS: { /* Need some code to copy pts */ u32 pts; pts = *(u32 *)ptr; dprintk(dev, V4L2_DEBUG_ISOC, "field %d, PTS %x", field, pts); break; } } } if (ptr + pktsize > endp) { /* End of URB packet, but cmd processing is not * complete. Preserve the state for a next packet */ dev->isoc_ctl.pos = pos + cpysize; dev->isoc_ctl.size = size - cpysize; dev->isoc_ctl.cmd = cmd; dev->isoc_ctl.field = field; dev->isoc_ctl.pktsize = pktsize - (endp - ptr); ptr += endp - ptr; } else { dev->isoc_ctl.cmd = 0; ptr += pktsize; } } return 0; } /* * Identify the tm5600/6000 buffer header type and properly handles */ static int copy_multiplexed(u8 *ptr, unsigned long len, struct urb *urb) { struct tm6000_dmaqueue *dma_q = urb->context; struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq); unsigned int pos = dev->isoc_ctl.pos, cpysize; int rc = 1; struct tm6000_buffer *buf; char *outp = NULL; get_next_buf(dma_q, &buf); if (buf) outp = videobuf_to_vmalloc(&buf->vb); if (!outp) return 0; while (len > 0) { cpysize = min(len, buf->vb.size-pos); memcpy(&outp[pos], ptr, cpysize); pos += cpysize; ptr += cpysize; len -= cpysize; if (pos >= buf->vb.size) { pos = 0; /* Announces that a new buffer were filled */ buffer_filled(dev, dma_q, buf); dprintk(dev, V4L2_DEBUG_ISOC, "new buffer filled\n"); get_next_buf(dma_q, &buf); if (!buf) break; outp = videobuf_to_vmalloc(&(buf->vb)); if (!outp) return rc; pos = 0; } } dev->isoc_ctl.pos = pos; return rc; } static inline void print_err_status(struct tm6000_core *dev, int packet, int status) { char *errmsg = "Unknown"; switch (status) { case -ENOENT: errmsg = "unlinked synchronuously"; break; case -ECONNRESET: errmsg = "unlinked asynchronuously"; break; case -ENOSR: errmsg = "Buffer error (overrun)"; break; case -EPIPE: errmsg = "Stalled (device not responding)"; break; case -EOVERFLOW: errmsg = "Babble (bad cable?)"; break; case -EPROTO: errmsg = "Bit-stuff error (bad cable?)"; break; case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break; case -ETIME: errmsg = "Device does not respond"; break; } if (packet < 0) { dprintk(dev, V4L2_DEBUG_QUEUE, "URB status %d [%s].\n", status, errmsg); } else { dprintk(dev, V4L2_DEBUG_QUEUE, "URB packet %d, status %d [%s].\n", packet, status, errmsg); } } /* * Controls the isoc copy of each urb packet */ static inline int tm6000_isoc_copy(struct urb *urb) { struct tm6000_dmaqueue *dma_q = urb->context; struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq); int i, len = 0, rc = 1, status; char *p; if (urb->status < 0) { print_err_status(dev, -1, urb->status); return 0; } for (i = 0; i < urb->number_of_packets; i++) { status = urb->iso_frame_desc[i].status; if (status < 0) { print_err_status(dev, i, status); continue; } len = urb->iso_frame_desc[i].actual_length; if (len > 0) { p = urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (!urb->iso_frame_desc[i].status) { if ((dev->fourcc) == V4L2_PIX_FMT_TM6000) { rc = copy_multiplexed(p, len, urb); if (rc <= 0) return rc; } else { copy_streams(p, len, urb); } } } } return rc; } /* ------------------------------------------------------------------ * URB control * ------------------------------------------------------------------ */ /* * IRQ callback, called by URB callback */ static void tm6000_irq_callback(struct urb *urb) { struct tm6000_dmaqueue *dma_q = urb->context; struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq); int i; if (!dev) return; spin_lock(&dev->slock); tm6000_isoc_copy(urb); spin_unlock(&dev->slock); /* Reset urb buffers */ for (i = 0; i < urb->number_of_packets; i++) { urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } urb->status = usb_submit_urb(urb, GFP_ATOMIC); if (urb->status) tm6000_err("urb resubmit failed (error=%i)\n", urb->status); } /* * Stop and Deallocate URBs */ static void tm6000_uninit_isoc(struct tm6000_core *dev) { struct urb *urb; int i; dev->isoc_ctl.buf = NULL; for (i = 0; i < dev->isoc_ctl.num_bufs; i++) { urb = dev->isoc_ctl.urb[i]; if (urb) { usb_kill_urb(urb); usb_unlink_urb(urb); if (dev->isoc_ctl.transfer_buffer[i]) { usb_free_coherent(dev->udev, urb->transfer_buffer_length, dev->isoc_ctl.transfer_buffer[i], urb->transfer_dma); } usb_free_urb(urb); dev->isoc_ctl.urb[i] = NULL; } dev->isoc_ctl.transfer_buffer[i] = NULL; } kfree(dev->isoc_ctl.urb); kfree(dev->isoc_ctl.transfer_buffer); dev->isoc_ctl.urb = NULL; dev->isoc_ctl.transfer_buffer = NULL; dev->isoc_ctl.num_bufs = 0; } /* * Allocate URBs and start IRQ */ static int tm6000_prepare_isoc(struct tm6000_core *dev) { struct tm6000_dmaqueue *dma_q = &dev->vidq; int i, j, sb_size, pipe, size, max_packets, num_bufs = 8; struct urb *urb; /* De-allocates all pending stuff */ tm6000_uninit_isoc(dev); /* Stop interrupt USB pipe */ tm6000_ir_int_stop(dev); usb_set_interface(dev->udev, dev->isoc_in.bInterfaceNumber, dev->isoc_in.bAlternateSetting); /* Start interrupt USB pipe */ tm6000_ir_int_start(dev); pipe = usb_rcvisocpipe(dev->udev, dev->isoc_in.endp->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe)); if (size > dev->isoc_in.maxsize) size = dev->isoc_in.maxsize; dev->isoc_ctl.max_pkt_size = size; max_packets = TM6000_MAX_ISO_PACKETS; sb_size = max_packets * size; dev->isoc_ctl.num_bufs = num_bufs; dev->isoc_ctl.urb = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL); if (!dev->isoc_ctl.urb) { tm6000_err("cannot alloc memory for usb buffers\n"); return -ENOMEM; } dev->isoc_ctl.transfer_buffer = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL); if (!dev->isoc_ctl.transfer_buffer) { tm6000_err("cannot allocate memory for usbtransfer\n"); kfree(dev->isoc_ctl.urb); return -ENOMEM; } dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets" " (%d bytes) of %d bytes each to handle %u size\n", max_packets, num_bufs, sb_size, dev->isoc_in.maxsize, size); /* allocate urbs and transfer buffers */ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) { urb = usb_alloc_urb(max_packets, GFP_KERNEL); if (!urb) { tm6000_err("cannot alloc isoc_ctl.urb %i\n", i); tm6000_uninit_isoc(dev); usb_free_urb(urb); return -ENOMEM; } dev->isoc_ctl.urb[i] = urb; dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL, &urb->transfer_dma); if (!dev->isoc_ctl.transfer_buffer[i]) { tm6000_err("unable to allocate %i bytes for transfer" " buffer %i%s\n", sb_size, i, in_interrupt() ? " while in int" : ""); tm6000_uninit_isoc(dev); return -ENOMEM; } memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size); usb_fill_bulk_urb(urb, dev->udev, pipe, dev->isoc_ctl.transfer_buffer[i], sb_size, tm6000_irq_callback, dma_q); urb->interval = dev->isoc_in.endp->desc.bInterval; urb->number_of_packets = max_packets; urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; for (j = 0; j < max_packets; j++) { urb->iso_frame_desc[j].offset = size * j; urb->iso_frame_desc[j].length = size; } } return 0; } static int tm6000_start_thread(struct tm6000_core *dev) { struct tm6000_dmaqueue *dma_q = &dev->vidq; int i; dma_q->frame = 0; dma_q->ini_jiffies = jiffies; init_waitqueue_head(&dma_q->wq); /* submit urbs and enables IRQ */ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) { int rc = usb_submit_urb(dev->isoc_ctl.urb[i], GFP_ATOMIC); if (rc) { tm6000_err("submit of urb %i failed (error=%i)\n", i, rc); tm6000_uninit_isoc(dev); return rc; } } return 0; } /* ------------------------------------------------------------------ * Videobuf operations * ------------------------------------------------------------------ */ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct tm6000_fh *fh = vq->priv_data; *size = fh->fmt->depth * fh->width * fh->height >> 3; if (0 == *count) *count = TM6000_DEF_BUF; if (*count < TM6000_MIN_BUF) *count = TM6000_MIN_BUF; while (*size * *count > vid_limit * 1024 * 1024) (*count)--; return 0; } static void free_buffer(struct videobuf_queue *vq, struct tm6000_buffer *buf) { struct tm6000_fh *fh = vq->priv_data; struct tm6000_core *dev = fh->dev; unsigned long flags; if (in_interrupt()) BUG(); /* We used to wait for the buffer to finish here, but this didn't work because, as we were keeping the state as VIDEOBUF_QUEUED, videobuf_queue_cancel marked it as finished for us. (Also, it could wedge forever if the hardware was misconfigured.) This should be safe; by the time we get here, the buffer isn't queued anymore. If we ever start marking the buffers as VIDEOBUF_ACTIVE, it won't be, though. */ spin_lock_irqsave(&dev->slock, flags); if (dev->isoc_ctl.buf == buf) dev->isoc_ctl.buf = NULL; spin_unlock_irqrestore(&dev->slock, flags); videobuf_vmalloc_free(&buf->vb); buf->vb.state = VIDEOBUF_NEEDS_INIT; } static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct tm6000_fh *fh = vq->priv_data; struct tm6000_buffer *buf = container_of(vb, struct tm6000_buffer, vb); struct tm6000_core *dev = fh->dev; int rc = 0, urb_init = 0; BUG_ON(NULL == fh->fmt); /* FIXME: It assumes depth=2 */ /* The only currently supported format is 16 bits/pixel */ buf->vb.size = fh->fmt->depth*fh->width*fh->height >> 3; if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) return -EINVAL; if (buf->fmt != fh->fmt || buf->vb.width != fh->width || buf->vb.height != fh->height || buf->vb.field != field) { buf->fmt = fh->fmt; buf->vb.width = fh->width; buf->vb.height = fh->height; buf->vb.field = field; buf->vb.state = VIDEOBUF_NEEDS_INIT; } if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { if (0 != (rc = videobuf_iolock(vq, &buf->vb, NULL))) goto fail; urb_init = 1; } if (!dev->isoc_ctl.num_bufs) urb_init = 1; if (urb_init) { rc = tm6000_prepare_isoc(dev); if (rc < 0) goto fail; rc = tm6000_start_thread(dev); if (rc < 0) goto fail; } buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: free_buffer(vq, buf); return rc; } static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct tm6000_buffer *buf = container_of(vb, struct tm6000_buffer, vb); struct tm6000_fh *fh = vq->priv_data; struct tm6000_core *dev = fh->dev; struct tm6000_dmaqueue *vidq = &dev->vidq; buf->vb.state = VIDEOBUF_QUEUED; list_add_tail(&buf->vb.queue, &vidq->active); } static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct tm6000_buffer *buf = container_of(vb, struct tm6000_buffer, vb); free_buffer(vq, buf); } static struct videobuf_queue_ops tm6000_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .buf_release = buffer_release, }; /* ------------------------------------------------------------------ * IOCTL handling * ------------------------------------------------------------------ */ static bool is_res_read(struct tm6000_core *dev, struct tm6000_fh *fh) { /* Is the current fh handling it? if so, that's OK */ if (dev->resources == fh && dev->is_res_read) return true; return false; } static bool is_res_streaming(struct tm6000_core *dev, struct tm6000_fh *fh) { /* Is the current fh handling it? if so, that's OK */ if (dev->resources == fh) return true; return false; } static bool res_get(struct tm6000_core *dev, struct tm6000_fh *fh, bool is_res_read) { /* Is the current fh handling it? if so, that's OK */ if (dev->resources == fh && dev->is_res_read == is_res_read) return true; /* is it free? */ if (dev->resources) return false; /* grab it */ dev->resources = fh; dev->is_res_read = is_res_read; dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: get\n"); return true; } static void res_free(struct tm6000_core *dev, struct tm6000_fh *fh) { /* Is the current fh handling it? if so, that's OK */ if (dev->resources != fh) return; dev->resources = NULL; dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: put\n"); } /* ------------------------------------------------------------------ * IOCTL vidioc handling * ------------------------------------------------------------------ */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev; strlcpy(cap->driver, "tm6000", sizeof(cap->driver)); strlcpy(cap->card, "Trident TVMaster TM5600/6000/6010", sizeof(cap->card)); cap->version = TM6000_VERSION; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE; if (dev->tuner_type != TUNER_ABSENT) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (unlikely(f->index >= ARRAY_SIZE(format))) return -EINVAL; strlcpy(f->description, format[f->index].name, sizeof(f->description)); f->pixelformat = format[f->index].fourcc; return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct tm6000_fh *fh = priv; f->fmt.pix.width = fh->width; f->fmt.pix.height = fh->height; f->fmt.pix.field = fh->vb_vidq.field; f->fmt.pix.pixelformat = fh->fmt->fourcc; f->fmt.pix.bytesperline = (f->fmt.pix.width * fh->fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static struct tm6000_fmt *format_by_fourcc(unsigned int fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(format); i++) if (format[i].fourcc == fourcc) return format+i; return NULL; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev; struct tm6000_fmt *fmt; enum v4l2_field field; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (NULL == fmt) { dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Fourcc format (0x%08x)" " invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } field = f->fmt.pix.field; if (field == V4L2_FIELD_ANY) field = V4L2_FIELD_SEQ_TB; else if (V4L2_FIELD_INTERLACED != field) { dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Field type invalid.\n"); return -EINVAL; } tm6000_get_std_res(dev); f->fmt.pix.width = dev->width; f->fmt.pix.height = dev->height; f->fmt.pix.width &= ~0x01; f->fmt.pix.field = field; f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } /*FIXME: This seems to be generic enough to be at videodev2 */ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; int ret = vidioc_try_fmt_vid_cap(file, fh, f); if (ret < 0) return ret; fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat); fh->width = f->fmt.pix.width; fh->height = f->fmt.pix.height; fh->vb_vidq.field = f->fmt.pix.field; fh->type = f->type; dev->fourcc = f->fmt.pix.pixelformat; tm6000_set_fourcc_format(dev); return 0; } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct tm6000_fh *fh = priv; return videobuf_reqbufs(&fh->vb_vidq, p); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct tm6000_fh *fh = priv; return videobuf_querybuf(&fh->vb_vidq, p); } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct tm6000_fh *fh = priv; return videobuf_qbuf(&fh->vb_vidq, p); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct tm6000_fh *fh = priv; return videobuf_dqbuf(&fh->vb_vidq, p, file->f_flags & O_NONBLOCK); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (i != fh->type) return -EINVAL; if (!res_get(dev, fh, false)) return -EBUSY; return (videobuf_streamon(&fh->vb_vidq)); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct tm6000_fh *fh=priv; struct tm6000_core *dev = fh->dev; if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (i != fh->type) return -EINVAL; videobuf_streamoff(&fh->vb_vidq); res_free(dev,fh); return (0); } static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm) { int rc=0; struct tm6000_fh *fh=priv; struct tm6000_core *dev = fh->dev; dev->norm = *norm; rc = tm6000_init_analog_mode(dev); fh->width = dev->width; fh->height = dev->height; if (rc<0) return rc; v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm); return 0; } static const char *iname [] = { [TM6000_INPUT_TV] = "Television", [TM6000_INPUT_COMPOSITE1] = "Composite 1", [TM6000_INPUT_COMPOSITE2] = "Composite 2", [TM6000_INPUT_SVIDEO] = "S-Video", }; static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; unsigned int n; n = i->index; if (n >= 3) return -EINVAL; if (!dev->vinput[n].type) return -EINVAL; i->index = n; if (dev->vinput[n].type == TM6000_INPUT_TV) i->type = V4L2_INPUT_TYPE_TUNER; else i->type = V4L2_INPUT_TYPE_CAMERA; strcpy(i->name, iname[dev->vinput[n].type]); i->std = TM6000_STD; return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; *i = dev->input; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; int rc = 0; if (i >= 3) return -EINVAL; if (!dev->vinput[i].type) return -EINVAL; dev->input = i; rc = vidioc_s_std(file, priv, &dev->vfd->current_norm); return rc; } /* --- controls ---------------------------------------------- */ static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { int i; for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++) if (qc->id && qc->id == tm6000_qctrl[i].id) { memcpy(qc, &(tm6000_qctrl[i]), sizeof(*qc)); return 0; } return -EINVAL; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; int val; /* FIXME: Probably, those won't work! Maybe we need shadow regs */ switch (ctrl->id) { case V4L2_CID_CONTRAST: val = tm6000_get_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0); break; case V4L2_CID_BRIGHTNESS: val = tm6000_get_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0); return 0; case V4L2_CID_SATURATION: val = tm6000_get_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0); return 0; case V4L2_CID_HUE: val = tm6000_get_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, 0); return 0; case V4L2_CID_AUDIO_MUTE: val = dev->ctl_mute; return 0; case V4L2_CID_AUDIO_VOLUME: val = dev->ctl_volume; return 0; default: return -EINVAL; } if (val < 0) return val; ctrl->value = val; return 0; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; u8 val = ctrl->value; switch (ctrl->id) { case V4L2_CID_CONTRAST: tm6000_set_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, val); return 0; case V4L2_CID_BRIGHTNESS: tm6000_set_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, val); return 0; case V4L2_CID_SATURATION: tm6000_set_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, val); return 0; case V4L2_CID_HUE: tm6000_set_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, val); return 0; case V4L2_CID_AUDIO_MUTE: dev->ctl_mute = val; tm6000_tvaudio_set_mute(dev, val); return 0; case V4L2_CID_AUDIO_VOLUME: dev->ctl_volume = val; tm6000_set_volume(dev, val); return 0; } return -EINVAL; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (unlikely(UNSET == dev->tuner_type)) return -EINVAL; if (0 != t->index) return -EINVAL; strcpy(t->name, "Television"); t->type = V4L2_TUNER_ANALOG_TV; t->capability = V4L2_TUNER_CAP_NORM; t->rangehigh = 0xffffffffUL; t->rxsubchans = V4L2_TUNER_SUB_STEREO; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t); t->audmode = dev->amode; return 0; } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (UNSET == dev->tuner_type) return -EINVAL; if (0 != t->index) return -EINVAL; dev->amode = t->audmode; dprintk(dev, 3, "audio mode: %x\n", t->audmode); v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t); return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (unlikely(UNSET == dev->tuner_type)) return -EINVAL; f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; f->frequency = dev->freq; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_frequency, f); return 0; } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (unlikely(UNSET == dev->tuner_type)) return -EINVAL; if (unlikely(f->tuner != 0)) return -EINVAL; if (0 == fh->radio && V4L2_TUNER_ANALOG_TV != f->type) return -EINVAL; if (1 == fh->radio && V4L2_TUNER_RADIO != f->type) return -EINVAL; dev->freq = f->frequency; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f); return 0; } static int radio_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct tm6000_fh *fh = file->private_data; struct tm6000_core *dev = fh->dev; strcpy(cap->driver, "tm6000"); strlcpy(cap->card, dev->name, sizeof(dev->name)); sprintf(cap->bus_info, "USB%04x:%04x", le16_to_cpu(dev->udev->descriptor.idVendor), le16_to_cpu(dev->udev->descriptor.idProduct)); cap->version = dev->dev_type; cap->capabilities = V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; return 0; } static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct tm6000_fh *fh = file->private_data; struct tm6000_core *dev = fh->dev; if (0 != t->index) return -EINVAL; memset(t, 0, sizeof(*t)); strcpy(t->name, "Radio"); t->type = V4L2_TUNER_RADIO; t->rxsubchans = V4L2_TUNER_SUB_STEREO; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t); return 0; } static int radio_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct tm6000_fh *fh = file->private_data; struct tm6000_core *dev = fh->dev; if (0 != t->index) return -EINVAL; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t); return 0; } static int radio_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (i->index != 0) return -EINVAL; if (!dev->rinput.type) return -EINVAL; strcpy(i->name, "Radio"); i->type = V4L2_INPUT_TYPE_TUNER; return 0; } static int radio_g_input(struct file *filp, void *priv, unsigned int *i) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (dev->input !=5) return -EINVAL; *i = dev->input -5; return 0; } static int radio_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { memset(a, 0, sizeof(*a)); strcpy(a->name, "Radio"); return 0; } static int radio_s_audio(struct file *file, void *priv, struct v4l2_audio *a) { return 0; } static int radio_s_input(struct file *filp, void *priv, unsigned int i) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (i) return -EINVAL; if (!dev->rinput.type) return -EINVAL; dev->input = i + 5; return 0; } static int radio_s_std(struct file *file, void *fh, v4l2_std_id *norm) { return 0; } static int radio_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c) { const struct v4l2_queryctrl *ctrl; if (c->id < V4L2_CID_BASE || c->id >= V4L2_CID_LASTP1) return -EINVAL; if (c->id == V4L2_CID_AUDIO_MUTE) { ctrl = ctrl_by_id(c->id); *c = *ctrl; } else *c = no_ctrl; return 0; } /* ------------------------------------------------------------------ File operations for the device ------------------------------------------------------------------*/ static int tm6000_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct tm6000_core *dev = video_drvdata(file); struct tm6000_fh *fh; enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; int i, rc; int radio = 0; printk(KERN_INFO "tm6000: open called (dev=%s)\n", video_device_node_name(vdev)); dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: open called (dev=%s)\n", video_device_node_name(vdev)); switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: type = V4L2_BUF_TYPE_VIDEO_CAPTURE; break; case VFL_TYPE_VBI: type = V4L2_BUF_TYPE_VBI_CAPTURE; break; case VFL_TYPE_RADIO: radio = 1; break; } /* If more than one user, mutex should be added */ dev->users++; dprintk(dev, V4L2_DEBUG_OPEN, "open dev=%s type=%s users=%d\n", video_device_node_name(vdev), v4l2_type_names[type], dev->users); /* allocate + initialize per filehandle data */ fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (NULL == fh) { dev->users--; return -ENOMEM; } file->private_data = fh; fh->dev = dev; fh->radio = radio; dev->radio = radio; fh->type = type; dev->fourcc = format[0].fourcc; fh->fmt = format_by_fourcc(dev->fourcc); tm6000_get_std_res (dev); fh->width = dev->width; fh->height = dev->height; dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, " "dev->vidq=0x%08lx\n", (unsigned long)fh,(unsigned long)dev,(unsigned long)&dev->vidq); dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty " "queued=%d\n",list_empty(&dev->vidq.queued)); dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty " "active=%d\n",list_empty(&dev->vidq.active)); /* initialize hardware on analog mode */ rc = tm6000_init_analog_mode(dev); if (rc < 0) return rc; if (dev->mode != TM6000_MODE_ANALOG) { /* Put all controls at a sane state */ for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++) qctl_regs[i] = tm6000_qctrl[i].default_value; dev->mode = TM6000_MODE_ANALOG; } videobuf_queue_vmalloc_init(&fh->vb_vidq, &tm6000_video_qops, NULL, &dev->slock, fh->type, V4L2_FIELD_INTERLACED, sizeof(struct tm6000_buffer), fh, &dev->lock); if (fh->radio) { dprintk(dev, V4L2_DEBUG_OPEN, "video_open: setting radio device\n"); dev->input = 5; tm6000_set_audio_rinput(dev); v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_radio); tm6000_prepare_isoc(dev); tm6000_start_thread(dev); } return 0; } static ssize_t tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos) { struct tm6000_fh *fh = file->private_data; if (fh->type==V4L2_BUF_TYPE_VIDEO_CAPTURE) { if (!res_get(fh->dev, fh, true)) return -EBUSY; return videobuf_read_stream(&fh->vb_vidq, data, count, pos, 0, file->f_flags & O_NONBLOCK); } return 0; } static unsigned int tm6000_poll(struct file *file, struct poll_table_struct *wait) { struct tm6000_fh *fh = file->private_data; struct tm6000_buffer *buf; if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type) return POLLERR; if (!!is_res_streaming(fh->dev, fh)) return POLLERR; if (!is_res_read(fh->dev, fh)) { /* streaming capture */ if (list_empty(&fh->vb_vidq.stream)) return POLLERR; buf = list_entry(fh->vb_vidq.stream.next,struct tm6000_buffer,vb.stream); } else { /* read() capture */ return videobuf_poll_stream(file, &fh->vb_vidq, wait); } poll_wait(file, &buf->vb.done, wait); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) return POLLIN | POLLRDNORM; return 0; } static int tm6000_release(struct file *file) { struct tm6000_fh *fh = file->private_data; struct tm6000_core *dev = fh->dev; struct video_device *vdev = video_devdata(file); dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: close called (dev=%s, users=%d)\n", video_device_node_name(vdev), dev->users); dev->users--; res_free(dev, fh); if (!dev->users) { tm6000_uninit_isoc(dev); videobuf_mmap_free(&fh->vb_vidq); } kfree(fh); return 0; } static int tm6000_mmap(struct file *file, struct vm_area_struct * vma) { struct tm6000_fh *fh = file->private_data; int ret; ret = videobuf_mmap_mapper(&fh->vb_vidq, vma); return ret; } static struct v4l2_file_operations tm6000_fops = { .owner = THIS_MODULE, .open = tm6000_open, .release = tm6000_release, .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */ .read = tm6000_read, .poll = tm6000_poll, .mmap = tm6000_mmap, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, }; static struct video_device tm6000_template = { .name = "tm6000", .fops = &tm6000_fops, .ioctl_ops = &video_ioctl_ops, .release = video_device_release, .tvnorms = TM6000_STD, .current_norm = V4L2_STD_NTSC_M, }; static const struct v4l2_file_operations radio_fops = { .owner = THIS_MODULE, .open = tm6000_open, .release = tm6000_release, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops radio_ioctl_ops = { .vidioc_querycap = radio_querycap, .vidioc_g_tuner = radio_g_tuner, .vidioc_enum_input = radio_enum_input, .vidioc_g_audio = radio_g_audio, .vidioc_s_tuner = radio_s_tuner, .vidioc_s_audio = radio_s_audio, .vidioc_s_input = radio_s_input, .vidioc_s_std = radio_s_std, .vidioc_queryctrl = radio_queryctrl, .vidioc_g_input = radio_g_input, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, }; struct video_device tm6000_radio_template = { .name = "tm6000", .fops = &radio_fops, .ioctl_ops = &radio_ioctl_ops, }; /* ----------------------------------------------------------------- * Initialization and module stuff * ------------------------------------------------------------------ */ static struct video_device *vdev_init(struct tm6000_core *dev, const struct video_device *template, const char *type_name) { struct video_device *vfd; vfd = video_device_alloc(); if (NULL == vfd) return NULL; *vfd = *template; vfd->v4l2_dev = &dev->v4l2_dev; vfd->release = video_device_release; vfd->debug = tm6000_debug; vfd->lock = &dev->lock; snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name); video_set_drvdata(vfd, dev); return vfd; } int tm6000_v4l2_register(struct tm6000_core *dev) { int ret = -1; dev->vfd = vdev_init(dev, &tm6000_template, "video"); if (!dev->vfd) { printk(KERN_INFO "%s: can't register video device\n", dev->name); return -ENOMEM; } /* init video dma queues */ INIT_LIST_HEAD(&dev->vidq.active); INIT_LIST_HEAD(&dev->vidq.queued); ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr); if (ret < 0) { printk(KERN_INFO "%s: can't register video device\n", dev->name); return ret; } printk(KERN_INFO "%s: registered device %s\n", dev->name, video_device_node_name(dev->vfd)); if (dev->caps.has_radio) { dev->radio_dev = vdev_init(dev, &tm6000_radio_template, "radio"); if (!dev->radio_dev) { printk(KERN_INFO "%s: can't register radio device\n", dev->name); return ret; /* FIXME release resource */ } ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO, radio_nr); if (ret < 0) { printk(KERN_INFO "%s: can't register radio device\n", dev->name); return ret; /* FIXME release resource */ } printk(KERN_INFO "%s: registered device %s\n", dev->name, video_device_node_name(dev->radio_dev)); } printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret); return ret; } int tm6000_v4l2_unregister(struct tm6000_core *dev) { video_unregister_device(dev->vfd); if (dev->radio_dev) { if (video_is_registered(dev->radio_dev)) video_unregister_device(dev->radio_dev); else video_device_release(dev->radio_dev); dev->radio_dev = NULL; } return 0; } int tm6000_v4l2_exit(void) { return 0; } module_param(video_nr, int, 0); MODULE_PARM_DESC(video_nr, "Allow changing video device number"); module_param_named(debug, tm6000_debug, int, 0444); MODULE_PARM_DESC(debug, "activates debug info"); module_param(vid_limit, int, 0644); MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
gpl-2.0
Maroc-OS/android_kernel_bn_encore
drivers/staging/tm6000/tm6000-cards.c
2384
35631
/* * tm6000-cards.c - driver for TM5600/TM6000/TM6010 USB video capture devices * * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/usb.h> #include <linux/version.h> #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/tuner.h> #include <media/tvaudio.h> #include <media/i2c-addr.h> #include <media/rc-map.h> #include "tm6000.h" #include "tm6000-regs.h" #include "tuner-xc2028.h" #include "xc5000.h" #define TM6000_BOARD_UNKNOWN 0 #define TM5600_BOARD_GENERIC 1 #define TM6000_BOARD_GENERIC 2 #define TM6010_BOARD_GENERIC 3 #define TM5600_BOARD_10MOONS_UT821 4 #define TM5600_BOARD_10MOONS_UT330 5 #define TM6000_BOARD_ADSTECH_DUAL_TV 6 #define TM6000_BOARD_FREECOM_AND_SIMILAR 7 #define TM6000_BOARD_ADSTECH_MINI_DUAL_TV 8 #define TM6010_BOARD_HAUPPAUGE_900H 9 #define TM6010_BOARD_BEHOLD_WANDER 10 #define TM6010_BOARD_BEHOLD_VOYAGER 11 #define TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE 12 #define TM6010_BOARD_TWINHAN_TU501 13 #define TM6010_BOARD_BEHOLD_WANDER_LITE 14 #define TM6010_BOARD_BEHOLD_VOYAGER_LITE 15 #define TM5600_BOARD_TERRATEC_GRABSTER 16 #define is_generic(model) ((model == TM6000_BOARD_UNKNOWN) || \ (model == TM5600_BOARD_GENERIC) || \ (model == TM6000_BOARD_GENERIC) || \ (model == TM6010_BOARD_GENERIC)) #define TM6000_MAXBOARDS 16 static unsigned int card[] = {[0 ... (TM6000_MAXBOARDS - 1)] = UNSET }; module_param_array(card, int, NULL, 0444); static unsigned long tm6000_devused; struct tm6000_board { char *name; char eename[16]; /* EEPROM name */ unsigned eename_size; /* size of EEPROM name */ unsigned eename_pos; /* Position where it appears at ROM */ struct tm6000_capabilities caps; enum tm6000_devtype type; /* variant of the chipset */ int tuner_type; /* type of the tuner */ int tuner_addr; /* tuner address */ int demod_addr; /* demodulator address */ struct tm6000_gpio gpio; struct tm6000_input vinput[3]; struct tm6000_input rinput; char *ir_codes; }; struct tm6000_board tm6000_boards[] = { [TM6000_BOARD_UNKNOWN] = { .name = "Unknown tm6000 video grabber", .caps = { .has_tuner = 1, .has_eeprom = 1, }, .gpio = { .tuner_reset = TM6000_GPIO_1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM5600_BOARD_GENERIC] = { .name = "Generic tm5600 board", .type = TM5600, .tuner_type = TUNER_XC2028, .tuner_addr = 0xc2 >> 1, .caps = { .has_tuner = 1, .has_eeprom = 1, }, .gpio = { .tuner_reset = TM6000_GPIO_1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6000_BOARD_GENERIC] = { .name = "Generic tm6000 board", .tuner_type = TUNER_XC2028, .tuner_addr = 0xc2 >> 1, .caps = { .has_tuner = 1, .has_eeprom = 1, }, .gpio = { .tuner_reset = TM6000_GPIO_1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6010_BOARD_GENERIC] = { .name = "Generic tm6010 board", .type = TM6010, .tuner_type = TUNER_XC2028, .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_2, .tuner_on = TM6010_GPIO_3, .demod_reset = TM6010_GPIO_1, .demod_on = TM6010_GPIO_4, .power_led = TM6010_GPIO_7, .dvb_led = TM6010_GPIO_5, .ir = TM6010_GPIO_0, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM5600_BOARD_10MOONS_UT821] = { .name = "10Moons UT 821", .tuner_type = TUNER_XC2028, .eename = { '1', '0', 'M', 'O', 'O', 'N', 'S', '5', '6', '0', '0', 0xff, 0x45, 0x5b}, .eename_size = 14, .eename_pos = 0x14, .type = TM5600, .tuner_addr = 0xc2 >> 1, .caps = { .has_tuner = 1, .has_eeprom = 1, }, .gpio = { .tuner_reset = TM6000_GPIO_1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM5600_BOARD_10MOONS_UT330] = { .name = "10Moons UT 330", .tuner_type = TUNER_PHILIPS_FQ1216AME_MK4, .tuner_addr = 0xc8 >> 1, .caps = { .has_tuner = 1, .has_dvb = 0, .has_zl10353 = 0, .has_eeprom = 1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6000_BOARD_ADSTECH_DUAL_TV] = { .name = "ADSTECH Dual TV USB", .tuner_type = TUNER_XC2028, .tuner_addr = 0xc8 >> 1, .caps = { .has_tuner = 1, .has_tda9874 = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6000_BOARD_FREECOM_AND_SIMILAR] = { .name = "Freecom Hybrid Stick / Moka DVB-T Receiver Dual", .tuner_type = TUNER_XC2028, /* has a XC3028 */ .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 0, .has_remote = 1, }, .gpio = { .tuner_reset = TM6000_GPIO_4, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6000_BOARD_ADSTECH_MINI_DUAL_TV] = { .name = "ADSTECH Mini Dual TV USB", .tuner_type = TUNER_XC2028, /* has a XC3028 */ .tuner_addr = 0xc8 >> 1, .demod_addr = 0x1e >> 1, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 0, }, .gpio = { .tuner_reset = TM6000_GPIO_4, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6010_BOARD_HAUPPAUGE_900H] = { .name = "Hauppauge WinTV HVR-900H / WinTV USB2-Stick", .eename = { 'H', 0, 'V', 0, 'R', 0, '9', 0, '0', 0, '0', 0, 'H', 0 }, .eename_size = 14, .eename_pos = 0x42, .tuner_type = TUNER_XC2028, /* has a XC3028 */ .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_2, .tuner_on = TM6010_GPIO_3, .demod_reset = TM6010_GPIO_1, .demod_on = TM6010_GPIO_4, .power_led = TM6010_GPIO_7, .dvb_led = TM6010_GPIO_5, .ir = TM6010_GPIO_0, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6010_BOARD_BEHOLD_WANDER] = { .name = "Beholder Wander DVB-T/TV/FM USB2.0", .tuner_type = TUNER_XC5000, .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 1, .has_radio = 1. }, .gpio = { .tuner_reset = TM6010_GPIO_0, .demod_reset = TM6010_GPIO_1, .power_led = TM6010_GPIO_6, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, .rinput = { .type = TM6000_INPUT_RADIO, .amux = TM6000_AMUX_ADC1, }, }, [TM6010_BOARD_BEHOLD_VOYAGER] = { .name = "Beholder Voyager TV/FM USB2.0", .tuner_type = TUNER_XC5000, .tuner_addr = 0xc2 >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 0, .has_zl10353 = 0, .has_eeprom = 1, .has_remote = 1, .has_radio = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_0, .power_led = TM6010_GPIO_6, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, .rinput = { .type = TM6000_INPUT_RADIO, .amux = TM6000_AMUX_ADC1, }, }, [TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE] = { .name = "Terratec Cinergy Hybrid XE / Cinergy Hybrid-Stick", .tuner_type = TUNER_XC2028, /* has a XC3028 */ .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_2, .tuner_on = TM6010_GPIO_3, .demod_reset = TM6010_GPIO_1, .demod_on = TM6010_GPIO_4, .power_led = TM6010_GPIO_7, .dvb_led = TM6010_GPIO_5, .ir = TM6010_GPIO_0, }, .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM5600_BOARD_TERRATEC_GRABSTER] = { .name = "Terratec Grabster AV 150/250 MX", .type = TM5600, .tuner_type = TUNER_ABSENT, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6010_BOARD_TWINHAN_TU501] = { .name = "Twinhan TU501(704D1)", .tuner_type = TUNER_XC2028, /* has a XC3028 */ .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_2, .tuner_on = TM6010_GPIO_3, .demod_reset = TM6010_GPIO_1, .demod_on = TM6010_GPIO_4, .power_led = TM6010_GPIO_7, .dvb_led = TM6010_GPIO_5, .ir = TM6010_GPIO_0, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6010_BOARD_BEHOLD_WANDER_LITE] = { .name = "Beholder Wander Lite DVB-T/TV/FM USB2.0", .tuner_type = TUNER_XC5000, .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 0, .has_radio = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_0, .demod_reset = TM6010_GPIO_1, .power_led = TM6010_GPIO_6, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, }, .rinput = { .type = TM6000_INPUT_RADIO, .amux = TM6000_AMUX_ADC1, }, }, [TM6010_BOARD_BEHOLD_VOYAGER_LITE] = { .name = "Beholder Voyager Lite TV/FM USB2.0", .tuner_type = TUNER_XC5000, .tuner_addr = 0xc2 >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 0, .has_zl10353 = 0, .has_eeprom = 1, .has_remote = 0, .has_radio = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_0, .power_led = TM6010_GPIO_6, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, }, .rinput = { .type = TM6000_INPUT_RADIO, .amux = TM6000_AMUX_ADC1, }, }, }; /* table of devices that work with this driver */ struct usb_device_id tm6000_id_table[] = { { USB_DEVICE(0x6000, 0x0001), .driver_info = TM5600_BOARD_GENERIC }, { USB_DEVICE(0x6000, 0x0002), .driver_info = TM6010_BOARD_GENERIC }, { USB_DEVICE(0x06e1, 0xf332), .driver_info = TM6000_BOARD_ADSTECH_DUAL_TV }, { USB_DEVICE(0x14aa, 0x0620), .driver_info = TM6000_BOARD_FREECOM_AND_SIMILAR }, { USB_DEVICE(0x06e1, 0xb339), .driver_info = TM6000_BOARD_ADSTECH_MINI_DUAL_TV }, { USB_DEVICE(0x2040, 0x6600), .driver_info = TM6010_BOARD_HAUPPAUGE_900H }, { USB_DEVICE(0x2040, 0x6601), .driver_info = TM6010_BOARD_HAUPPAUGE_900H }, { USB_DEVICE(0x2040, 0x6610), .driver_info = TM6010_BOARD_HAUPPAUGE_900H }, { USB_DEVICE(0x2040, 0x6611), .driver_info = TM6010_BOARD_HAUPPAUGE_900H }, { USB_DEVICE(0x6000, 0xdec0), .driver_info = TM6010_BOARD_BEHOLD_WANDER }, { USB_DEVICE(0x6000, 0xdec1), .driver_info = TM6010_BOARD_BEHOLD_VOYAGER }, { USB_DEVICE(0x0ccd, 0x0086), .driver_info = TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE }, { USB_DEVICE(0x0ccd, 0x00A5), .driver_info = TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE }, { USB_DEVICE(0x0ccd, 0x0079), .driver_info = TM5600_BOARD_TERRATEC_GRABSTER }, { USB_DEVICE(0x13d3, 0x3240), .driver_info = TM6010_BOARD_TWINHAN_TU501 }, { USB_DEVICE(0x13d3, 0x3241), .driver_info = TM6010_BOARD_TWINHAN_TU501 }, { USB_DEVICE(0x13d3, 0x3243), .driver_info = TM6010_BOARD_TWINHAN_TU501 }, { USB_DEVICE(0x13d3, 0x3264), .driver_info = TM6010_BOARD_TWINHAN_TU501 }, { USB_DEVICE(0x6000, 0xdec2), .driver_info = TM6010_BOARD_BEHOLD_WANDER_LITE }, { USB_DEVICE(0x6000, 0xdec3), .driver_info = TM6010_BOARD_BEHOLD_VOYAGER_LITE }, { }, }; /* Control power led for show some activity */ void tm6000_flash_led(struct tm6000_core *dev, u8 state) { /* Power LED unconfigured */ if (!dev->gpio.power_led) return; /* ON Power LED */ if (state) { switch (dev->model) { case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x00); break; case TM6010_BOARD_BEHOLD_WANDER: case TM6010_BOARD_BEHOLD_VOYAGER: case TM6010_BOARD_BEHOLD_WANDER_LITE: case TM6010_BOARD_BEHOLD_VOYAGER_LITE: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01); break; } } /* OFF Power LED */ else { switch (dev->model) { case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01); break; case TM6010_BOARD_BEHOLD_WANDER: case TM6010_BOARD_BEHOLD_VOYAGER: case TM6010_BOARD_BEHOLD_WANDER_LITE: case TM6010_BOARD_BEHOLD_VOYAGER_LITE: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x00); break; } } } /* Tuner callback to provide the proper gpio changes needed for xc5000 */ int tm6000_xc5000_callback(void *ptr, int component, int command, int arg) { int rc = 0; struct tm6000_core *dev = ptr; if (dev->tuner_type != TUNER_XC5000) return 0; switch (command) { case XC5000_TUNER_RESET: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); msleep(15); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x00); msleep(15); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); break; } return rc; } EXPORT_SYMBOL_GPL(tm6000_xc5000_callback); /* Tuner callback to provide the proper gpio changes needed for xc2028 */ int tm6000_tuner_callback(void *ptr, int component, int command, int arg) { int rc = 0; struct tm6000_core *dev = ptr; if (dev->tuner_type != TUNER_XC2028) return 0; switch (command) { case XC2028_RESET_CLK: tm6000_ir_wait(dev, 0); tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 0x02, arg); msleep(10); rc = tm6000_i2c_reset(dev, 10); break; case XC2028_TUNER_RESET: /* Reset codes during load firmware */ switch (arg) { case 0: /* newer tuner can faster reset */ switch (dev->model) { case TM5600_BOARD_10MOONS_UT821: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, 0x300, 0x01); msleep(10); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x00); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, 0x300, 0x00); msleep(10); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, 0x300, 0x01); break; case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); msleep(60); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x00); msleep(75); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); msleep(60); break; default: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x00); msleep(130); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); msleep(130); break; } tm6000_ir_wait(dev, 1); break; case 1: tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 0x02, 0x01); msleep(10); break; case 2: rc = tm6000_i2c_reset(dev, 100); break; } } return rc; } EXPORT_SYMBOL_GPL(tm6000_tuner_callback); int tm6000_cards_setup(struct tm6000_core *dev) { int i, rc; /* * Board-specific initialization sequence. Handles all GPIO * initialization sequences that are board-specific. * Up to now, all found devices use GPIO1 and GPIO4 at the same way. * Probably, they're all based on some reference device. Due to that, * there's a common routine at the end to handle those GPIO's. Devices * that use different pinups or init sequences can just return at * the board-specific session. */ switch (dev->model) { case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: case TM6010_BOARD_GENERIC: /* Turn xceive 3028 on */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_on, 0x01); msleep(15); /* Turn zarlink zl10353 on */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x00); msleep(15); /* Reset zarlink zl10353 */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x00); msleep(50); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x01); msleep(15); /* Turn zarlink zl10353 off */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x01); msleep(15); /* ir ? */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.ir, 0x01); msleep(15); /* Power led on (blue) */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x00); msleep(15); /* DVB led off (orange) */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.dvb_led, 0x01); msleep(15); /* Turn zarlink zl10353 on */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x00); msleep(15); break; case TM6010_BOARD_BEHOLD_WANDER: case TM6010_BOARD_BEHOLD_WANDER_LITE: /* Power led on (blue) */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01); msleep(15); /* Reset zarlink zl10353 */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x00); msleep(50); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x01); msleep(15); break; case TM6010_BOARD_BEHOLD_VOYAGER: case TM6010_BOARD_BEHOLD_VOYAGER_LITE: /* Power led on (blue) */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01); msleep(15); break; default: break; } /* * Default initialization. Most of the devices seem to use GPIO1 * and GPIO4.on the same way, so, this handles the common sequence * used by most devices. * If a device uses a different sequence or different GPIO pins for * reset, just add the code at the board-specific part */ if (dev->gpio.tuner_reset) { for (i = 0; i < 2; i++) { rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x00); if (rc < 0) { printk(KERN_ERR "Error %i doing tuner reset\n", rc); return rc; } msleep(10); /* Just to be conservative */ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); if (rc < 0) { printk(KERN_ERR "Error %i doing tuner reset\n", rc); return rc; } } } else { printk(KERN_ERR "Tuner reset is not configured\n"); return -1; } msleep(50); return 0; }; static void tm6000_config_tuner(struct tm6000_core *dev) { struct tuner_setup tun_setup; /* Load tuner module */ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", dev->tuner_addr, NULL); memset(&tun_setup, 0, sizeof(tun_setup)); tun_setup.type = dev->tuner_type; tun_setup.addr = dev->tuner_addr; tun_setup.mode_mask = 0; if (dev->caps.has_tuner) tun_setup.mode_mask |= (T_ANALOG_TV | T_RADIO); switch (dev->tuner_type) { case TUNER_XC2028: tun_setup.tuner_callback = tm6000_tuner_callback; break; case TUNER_XC5000: tun_setup.tuner_callback = tm6000_xc5000_callback; break; } v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup); switch (dev->tuner_type) { case TUNER_XC2028: { struct v4l2_priv_tun_config xc2028_cfg; struct xc2028_ctrl ctl; memset(&xc2028_cfg, 0, sizeof(xc2028_cfg)); memset(&ctl, 0, sizeof(ctl)); ctl.demod = XC3028_FE_ZARLINK456; xc2028_cfg.tuner = TUNER_XC2028; xc2028_cfg.priv = &ctl; switch (dev->model) { case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: ctl.fname = "xc3028L-v36.fw"; break; default: if (dev->dev_type == TM6010) ctl.fname = "xc3028-v27.fw"; else ctl.fname = "xc3028-v24.fw"; } printk(KERN_INFO "Setting firmware parameters for xc2028\n"); v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &xc2028_cfg); } break; case TUNER_XC5000: { struct v4l2_priv_tun_config xc5000_cfg; struct xc5000_config ctl = { .i2c_address = dev->tuner_addr, .if_khz = 4570, .radio_input = XC5000_RADIO_FM1_MONO, }; xc5000_cfg.tuner = TUNER_XC5000; xc5000_cfg.priv = &ctl; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &xc5000_cfg); } break; default: printk(KERN_INFO "Unknown tuner type. Tuner is not configured.\n"); break; } } static int fill_board_specific_data(struct tm6000_core *dev) { int rc; dev->dev_type = tm6000_boards[dev->model].type; dev->tuner_type = tm6000_boards[dev->model].tuner_type; dev->tuner_addr = tm6000_boards[dev->model].tuner_addr; dev->gpio = tm6000_boards[dev->model].gpio; dev->ir_codes = tm6000_boards[dev->model].ir_codes; dev->demod_addr = tm6000_boards[dev->model].demod_addr; dev->caps = tm6000_boards[dev->model].caps; dev->vinput[0] = tm6000_boards[dev->model].vinput[0]; dev->vinput[1] = tm6000_boards[dev->model].vinput[1]; dev->vinput[2] = tm6000_boards[dev->model].vinput[2]; dev->rinput = tm6000_boards[dev->model].rinput; /* initialize hardware */ rc = tm6000_init(dev); if (rc < 0) return rc; rc = v4l2_device_register(&dev->udev->dev, &dev->v4l2_dev); if (rc < 0) return rc; return rc; } static void use_alternative_detection_method(struct tm6000_core *dev) { int i, model = -1; if (!dev->eedata_size) return; for (i = 0; i < ARRAY_SIZE(tm6000_boards); i++) { if (!tm6000_boards[i].eename_size) continue; if (dev->eedata_size < tm6000_boards[i].eename_pos + tm6000_boards[i].eename_size) continue; if (!memcmp(&dev->eedata[tm6000_boards[i].eename_pos], tm6000_boards[i].eename, tm6000_boards[i].eename_size)) { model = i; break; } } if (model < 0) { printk(KERN_INFO "Device has eeprom but is currently unknown\n"); return; } dev->model = model; printk(KERN_INFO "Device identified via eeprom as %s (type = %d)\n", tm6000_boards[model].name, model); } static int tm6000_init_dev(struct tm6000_core *dev) { struct v4l2_frequency f; int rc = 0; mutex_init(&dev->lock); mutex_lock(&dev->lock); if (!is_generic(dev->model)) { rc = fill_board_specific_data(dev); if (rc < 0) goto err; /* register i2c bus */ rc = tm6000_i2c_register(dev); if (rc < 0) goto err; } else { /* register i2c bus */ rc = tm6000_i2c_register(dev); if (rc < 0) goto err; use_alternative_detection_method(dev); rc = fill_board_specific_data(dev); if (rc < 0) goto err; } /* Default values for STD and resolutions */ dev->width = 720; dev->height = 480; dev->norm = V4L2_STD_PAL_M; /* Configure tuner */ tm6000_config_tuner(dev); /* Set video standard */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm); /* Set tuner frequency - also loads firmware on xc2028/xc3028 */ f.tuner = 0; f.type = V4L2_TUNER_ANALOG_TV; f.frequency = 3092; /* 193.25 MHz */ dev->freq = f.frequency; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f); if (dev->caps.has_tda9874) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tvaudio", I2C_ADDR_TDA9874, NULL); /* register and initialize V4L2 */ rc = tm6000_v4l2_register(dev); if (rc < 0) goto err; tm6000_add_into_devlist(dev); tm6000_init_extension(dev); tm6000_ir_init(dev); mutex_unlock(&dev->lock); return 0; err: mutex_unlock(&dev->lock); return rc; } /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) static void get_max_endpoint(struct usb_device *udev, struct usb_host_interface *alt, char *msgtype, struct usb_host_endpoint *curr_e, struct tm6000_endpoint *tm_ep) { u16 tmp = le16_to_cpu(curr_e->desc.wMaxPacketSize); unsigned int size = tmp & 0x7ff; if (udev->speed == USB_SPEED_HIGH) size = size * hb_mult(tmp); if (size > tm_ep->maxsize) { tm_ep->endp = curr_e; tm_ep->maxsize = size; tm_ep->bInterfaceNumber = alt->desc.bInterfaceNumber; tm_ep->bAlternateSetting = alt->desc.bAlternateSetting; printk(KERN_INFO "tm6000: %s endpoint: 0x%02x (max size=%u bytes)\n", msgtype, curr_e->desc.bEndpointAddress, size); } } /* * tm6000_usb_probe() * checks for supported devices */ static int tm6000_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usbdev; struct tm6000_core *dev = NULL; int i, rc = 0; int nr = 0; char *speed; usbdev = usb_get_dev(interface_to_usbdev(interface)); /* Selects the proper interface */ rc = usb_set_interface(usbdev, 0, 1); if (rc < 0) goto err; /* Check to see next free device and mark as used */ nr = find_first_zero_bit(&tm6000_devused, TM6000_MAXBOARDS); if (nr >= TM6000_MAXBOARDS) { printk(KERN_ERR "tm6000: Supports only %i tm60xx boards.\n", TM6000_MAXBOARDS); usb_put_dev(usbdev); return -ENOMEM; } /* Create and initialize dev struct */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { printk(KERN_ERR "tm6000" ": out of memory!\n"); usb_put_dev(usbdev); return -ENOMEM; } spin_lock_init(&dev->slock); /* Increment usage count */ tm6000_devused |= 1<<nr; snprintf(dev->name, 29, "tm6000 #%d", nr); dev->model = id->driver_info; if ((card[nr] >= 0) && (card[nr] < ARRAY_SIZE(tm6000_boards))) dev->model = card[nr]; dev->udev = usbdev; dev->devno = nr; switch (usbdev->speed) { case USB_SPEED_LOW: speed = "1.5"; break; case USB_SPEED_UNKNOWN: case USB_SPEED_FULL: speed = "12"; break; case USB_SPEED_HIGH: speed = "480"; break; default: speed = "unknown"; } /* Get endpoints */ for (i = 0; i < interface->num_altsetting; i++) { int ep; for (ep = 0; ep < interface->altsetting[i].desc.bNumEndpoints; ep++) { struct usb_host_endpoint *e; int dir_out; e = &interface->altsetting[i].endpoint[ep]; dir_out = ((e->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT); printk(KERN_INFO "tm6000: alt %d, interface %i, class %i\n", i, interface->altsetting[i].desc.bInterfaceNumber, interface->altsetting[i].desc.bInterfaceClass); switch (e->desc.bmAttributes) { case USB_ENDPOINT_XFER_BULK: if (!dir_out) { get_max_endpoint(usbdev, &interface->altsetting[i], "Bulk IN", e, &dev->bulk_in); } else { get_max_endpoint(usbdev, &interface->altsetting[i], "Bulk OUT", e, &dev->bulk_out); } break; case USB_ENDPOINT_XFER_ISOC: if (!dir_out) { get_max_endpoint(usbdev, &interface->altsetting[i], "ISOC IN", e, &dev->isoc_in); } else { get_max_endpoint(usbdev, &interface->altsetting[i], "ISOC OUT", e, &dev->isoc_out); } break; case USB_ENDPOINT_XFER_INT: if (!dir_out) { get_max_endpoint(usbdev, &interface->altsetting[i], "INT IN", e, &dev->int_in); } else { get_max_endpoint(usbdev, &interface->altsetting[i], "INT OUT", e, &dev->int_out); } break; } } } printk(KERN_INFO "tm6000: New video device @ %s Mbps (%04x:%04x, ifnum %d)\n", speed, le16_to_cpu(dev->udev->descriptor.idVendor), le16_to_cpu(dev->udev->descriptor.idProduct), interface->altsetting->desc.bInterfaceNumber); /* check if the the device has the iso in endpoint at the correct place */ if (!dev->isoc_in.endp) { printk(KERN_ERR "tm6000: probing error: no IN ISOC endpoint!\n"); rc = -ENODEV; goto err; } /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); printk(KERN_INFO "tm6000: Found %s\n", tm6000_boards[dev->model].name); rc = tm6000_init_dev(dev); if (rc < 0) goto err; return 0; err: printk(KERN_ERR "tm6000: Error %d while registering\n", rc); tm6000_devused &= ~(1<<nr); usb_put_dev(usbdev); kfree(dev); return rc; } /* * tm6000_usb_disconnect() * called when the device gets diconencted * video device will be unregistered on v4l2_close in case it is still open */ static void tm6000_usb_disconnect(struct usb_interface *interface) { struct tm6000_core *dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); if (!dev) return; printk(KERN_INFO "tm6000: disconnecting %s\n", dev->name); tm6000_ir_fini(dev); if (dev->gpio.power_led) { switch (dev->model) { case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: /* Power led off */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01); msleep(15); break; case TM6010_BOARD_BEHOLD_WANDER: case TM6010_BOARD_BEHOLD_VOYAGER: case TM6010_BOARD_BEHOLD_WANDER_LITE: case TM6010_BOARD_BEHOLD_VOYAGER_LITE: /* Power led off */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x00); msleep(15); break; } } tm6000_v4l2_unregister(dev); tm6000_i2c_unregister(dev); v4l2_device_unregister(&dev->v4l2_dev); dev->state |= DEV_DISCONNECTED; usb_put_dev(dev->udev); tm6000_close_extension(dev); tm6000_remove_from_devlist(dev); kfree(dev); } static struct usb_driver tm6000_usb_driver = { .name = "tm6000", .probe = tm6000_usb_probe, .disconnect = tm6000_usb_disconnect, .id_table = tm6000_id_table, }; static int __init tm6000_module_init(void) { int result; printk(KERN_INFO "tm6000" " v4l2 driver version %d.%d.%d loaded\n", (TM6000_VERSION >> 16) & 0xff, (TM6000_VERSION >> 8) & 0xff, TM6000_VERSION & 0xff); /* register this driver with the USB subsystem */ result = usb_register(&tm6000_usb_driver); if (result) printk(KERN_ERR "tm6000" " usb_register failed. Error number %d.\n", result); return result; } static void __exit tm6000_module_exit(void) { /* deregister at USB subsystem */ usb_deregister(&tm6000_usb_driver); } module_init(tm6000_module_init); module_exit(tm6000_module_exit); MODULE_DESCRIPTION("Trident TVMaster TM5600/TM6000/TM6010 USB2 adapter"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_LICENSE("GPL");
gpl-2.0
AndroidDeveloperAlliance/kernel_samsung_smdk4210
drivers/mfd/tps65911-comparator.c
2384
4517
/* * tps65910.c -- TI TPS6591x * * Copyright 2010 Texas Instruments Inc. * * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/mfd/tps65910.h> #define COMP 0 #define COMP1 1 #define COMP2 2 /* Comparator 1 voltage selection table in milivolts */ static const u16 COMP_VSEL_TABLE[] = { 0, 2500, 2500, 2500, 2500, 2550, 2600, 2650, 2700, 2750, 2800, 2850, 2900, 2950, 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400, 3450, 3500, }; struct comparator { const char *name; int reg; int uV_max; const u16 *vsel_table; }; static struct comparator tps_comparators[] = { { .name = "COMP1", .reg = TPS65911_VMBCH, .uV_max = 3500, .vsel_table = COMP_VSEL_TABLE, }, { .name = "COMP2", .reg = TPS65911_VMBCH2, .uV_max = 3500, .vsel_table = COMP_VSEL_TABLE, }, }; static int comp_threshold_set(struct tps65910 *tps65910, int id, int voltage) { struct comparator tps_comp = tps_comparators[id]; int curr_voltage = 0; int ret; u8 index = 0, val; if (id == COMP) return 0; while (curr_voltage < tps_comp.uV_max) { curr_voltage = tps_comp.vsel_table[index]; if (curr_voltage >= voltage) break; else if (curr_voltage < voltage) index ++; } if (curr_voltage > tps_comp.uV_max) return -EINVAL; val = index << 1; ret = tps65910->write(tps65910, tps_comp.reg, 1, &val); return ret; } static int comp_threshold_get(struct tps65910 *tps65910, int id) { struct comparator tps_comp = tps_comparators[id]; int ret; u8 val; if (id == COMP) return 0; ret = tps65910->read(tps65910, tps_comp.reg, 1, &val); if (ret < 0) return ret; val >>= 1; return tps_comp.vsel_table[val]; } static ssize_t comp_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tps65910 *tps65910 = dev_get_drvdata(dev->parent); struct attribute comp_attr = attr->attr; int id, uVolt; if (!strcmp(comp_attr.name, "comp1_threshold")) id = COMP1; else if (!strcmp(comp_attr.name, "comp2_threshold")) id = COMP2; else return -EINVAL; uVolt = comp_threshold_get(tps65910, id); return sprintf(buf, "%d\n", uVolt); } static DEVICE_ATTR(comp1_threshold, S_IRUGO, comp_threshold_show, NULL); static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL); static __devinit int tps65911_comparator_probe(struct platform_device *pdev) { struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent); struct tps65910_board *pdata = dev_get_platdata(tps65910->dev); int ret; ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold); if (ret < 0) { dev_err(&pdev->dev, "cannot set COMP1 threshold\n"); return ret; } ret = comp_threshold_set(tps65910, COMP2, pdata->vmbch2_threshold); if (ret < 0) { dev_err(&pdev->dev, "cannot set COMP2 theshold\n"); return ret; } /* Create sysfs entry */ ret = device_create_file(&pdev->dev, &dev_attr_comp1_threshold); if (ret < 0) dev_err(&pdev->dev, "failed to add COMP1 sysfs file\n"); ret = device_create_file(&pdev->dev, &dev_attr_comp2_threshold); if (ret < 0) dev_err(&pdev->dev, "failed to add COMP2 sysfs file\n"); return ret; } static __devexit int tps65911_comparator_remove(struct platform_device *pdev) { struct tps65910 *tps65910; tps65910 = dev_get_drvdata(pdev->dev.parent); return 0; } static struct platform_driver tps65911_comparator_driver = { .driver = { .name = "tps65911-comparator", .owner = THIS_MODULE, }, .probe = tps65911_comparator_probe, .remove = __devexit_p(tps65911_comparator_remove), }; static int __init tps65911_comparator_init(void) { return platform_driver_register(&tps65911_comparator_driver); } subsys_initcall(tps65911_comparator_init); static void __exit tps65911_comparator_exit(void) { platform_driver_unregister(&tps65911_comparator_driver); } module_exit(tps65911_comparator_exit); MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>"); MODULE_DESCRIPTION("TPS65911 comparator driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:tps65911-comparator");
gpl-2.0
superatmos/i9100g_kernel_ics
drivers/net/mlx4/en_rx.c
2384
25678
/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/mlx4/cq.h> #include <linux/slab.h> #include <linux/mlx4/qp.h> #include <linux/skbuff.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/vmalloc.h> #include "mlx4_en.h" static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv, struct mlx4_en_rx_desc *rx_desc, struct skb_frag_struct *skb_frags, struct mlx4_en_rx_alloc *ring_alloc, int i) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i]; struct page *page; dma_addr_t dma; if (page_alloc->offset == frag_info->last_offset) { /* Allocate new page */ page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER); if (!page) return -ENOMEM; skb_frags[i].page = page_alloc->page; skb_frags[i].page_offset = page_alloc->offset; page_alloc->page = page; page_alloc->offset = frag_info->frag_align; } else { page = page_alloc->page; get_page(page); skb_frags[i].page = page; skb_frags[i].page_offset = page_alloc->offset; page_alloc->offset += frag_info->frag_stride; } dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) + skb_frags[i].page_offset, frag_info->frag_size, PCI_DMA_FROMDEVICE); rx_desc->data[i].addr = cpu_to_be64(dma); return 0; } static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { struct mlx4_en_rx_alloc *page_alloc; int i; for (i = 0; i < priv->num_frags; i++) { page_alloc = &ring->page_alloc[i]; page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER); if (!page_alloc->page) goto out; page_alloc->offset = priv->frag_info[i].frag_align; en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", i, page_alloc->page); } return 0; out: while (i--) { page_alloc = &ring->page_alloc[i]; put_page(page_alloc->page); page_alloc->page = NULL; } return -ENOMEM; } static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { struct mlx4_en_rx_alloc *page_alloc; int i; for (i = 0; i < priv->num_frags; i++) { page_alloc = &ring->page_alloc[i]; en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", i, page_count(page_alloc->page)); put_page(page_alloc->page); page_alloc->page = NULL; } } static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, int index) { struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; struct skb_frag_struct *skb_frags = ring->rx_info + (index << priv->log_rx_info); int possible_frags; int i; /* Set size and memtype fields */ for (i = 0; i < priv->num_frags; i++) { skb_frags[i].size = priv->frag_info[i].frag_size; rx_desc->data[i].byte_count = cpu_to_be32(priv->frag_info[i].frag_size); rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); } /* If the number of used fragments does not fill up the ring stride, * remaining (unused) fragments must be padded with null address/size * and a special memory key */ possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; for (i = priv->num_frags; i < possible_frags; i++) { rx_desc->data[i].byte_count = 0; rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); rx_desc->data[i].addr = 0; } } static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, int index) { struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); struct skb_frag_struct *skb_frags = ring->rx_info + (index << priv->log_rx_info); int i; for (i = 0; i < priv->num_frags; i++) if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i)) goto err; return 0; err: while (i--) put_page(skb_frags[i].page); return -ENOMEM; } static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) { *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); } static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, int index) { struct mlx4_en_dev *mdev = priv->mdev; struct skb_frag_struct *skb_frags; struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride); dma_addr_t dma; int nr; skb_frags = ring->rx_info + (index << priv->log_rx_info); for (nr = 0; nr < priv->num_frags; nr++) { en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); dma = be64_to_cpu(rx_desc->data[nr].addr); en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma); pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, PCI_DMA_FROMDEVICE); put_page(skb_frags[nr].page); } } static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) { struct mlx4_en_rx_ring *ring; int ring_ind; int buf_ind; int new_size; for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = &priv->rx_ring[ring_ind]; if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { en_err(priv, "Failed to allocate " "enough rx buffers\n"); return -ENOMEM; } else { new_size = rounddown_pow_of_two(ring->actual_size); en_warn(priv, "Only %d buffers allocated " "reducing ring size to %d", ring->actual_size, new_size); goto reduce_rings; } } ring->actual_size++; ring->prod++; } } return 0; reduce_rings: for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = &priv->rx_ring[ring_ind]; while (ring->actual_size > new_size) { ring->actual_size--; ring->prod--; mlx4_en_free_rx_desc(priv, ring, ring->actual_size); } } return 0; } static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { int index; en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", ring->cons, ring->prod); /* Unmap and free Rx buffers */ BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); while (ring->cons != ring->prod) { index = ring->cons & ring->size_mask; en_dbg(DRV, priv, "Processing descriptor:%d\n", index); mlx4_en_free_rx_desc(priv, ring, index); ++ring->cons; } } int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, u32 size, u16 stride) { struct mlx4_en_dev *mdev = priv->mdev; int err; int tmp; ring->prod = 0; ring->cons = 0; ring->size = size; ring->size_mask = size - 1; ring->stride = stride; ring->log_stride = ffs(ring->stride) - 1; ring->buf_size = ring->size * ring->stride + TXBB_SIZE; tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * sizeof(struct skb_frag_struct)); ring->rx_info = vmalloc(tmp); if (!ring->rx_info) { en_err(priv, "Failed allocating rx_info ring\n"); return -ENOMEM; } en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", ring->rx_info, tmp); err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); if (err) goto err_ring; err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { en_err(priv, "Failed to map RX buffer\n"); goto err_hwq; } ring->buf = ring->wqres.buf.direct.buf; return 0; err_hwq: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_ring: vfree(ring->rx_info); ring->rx_info = NULL; return err; } int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) { struct mlx4_en_rx_ring *ring; int i; int ring_ind; int err; int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + DS_SIZE * priv->num_frags); for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = &priv->rx_ring[ring_ind]; ring->prod = 0; ring->cons = 0; ring->actual_size = 0; ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; ring->stride = stride; if (ring->stride <= TXBB_SIZE) ring->buf += TXBB_SIZE; ring->log_stride = ffs(ring->stride) - 1; ring->buf_size = ring->size * ring->stride; memset(ring->buf, 0, ring->buf_size); mlx4_en_update_rx_prod_db(ring); /* Initailize all descriptors */ for (i = 0; i < ring->size; i++) mlx4_en_init_rx_desc(priv, ring, i); /* Initialize page allocators */ err = mlx4_en_init_allocator(priv, ring); if (err) { en_err(priv, "Failed initializing ring allocator\n"); if (ring->stride <= TXBB_SIZE) ring->buf -= TXBB_SIZE; ring_ind--; goto err_allocator; } } err = mlx4_en_fill_rx_buffers(priv); if (err) goto err_buffers; for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = &priv->rx_ring[ring_ind]; ring->size_mask = ring->actual_size - 1; mlx4_en_update_rx_prod_db(ring); } return 0; err_buffers: for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); ring_ind = priv->rx_ring_num - 1; err_allocator: while (ring_ind >= 0) { if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE) priv->rx_ring[ring_ind].buf -= TXBB_SIZE; mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); ring_ind--; } return err; } void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { struct mlx4_en_dev *mdev = priv->mdev; mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE); vfree(ring->rx_info); ring->rx_info = NULL; } void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { mlx4_en_free_rx_buf(priv, ring); if (ring->stride <= TXBB_SIZE) ring->buf -= TXBB_SIZE; mlx4_en_destroy_allocator(priv, ring); } /* Unmap a completed descriptor and free unused pages */ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_desc *rx_desc, struct skb_frag_struct *skb_frags, struct skb_frag_struct *skb_frags_rx, struct mlx4_en_rx_alloc *page_alloc, int length) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_frag_info *frag_info; int nr; dma_addr_t dma; /* Collect used fragments while replacing them in the HW descirptors */ for (nr = 0; nr < priv->num_frags; nr++) { frag_info = &priv->frag_info[nr]; if (length <= frag_info->frag_prefix_size) break; /* Save page reference in skb */ skb_frags_rx[nr].page = skb_frags[nr].page; skb_frags_rx[nr].size = skb_frags[nr].size; skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset; dma = be64_to_cpu(rx_desc->data[nr].addr); /* Allocate a replacement page */ if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr)) goto fail; /* Unmap buffer */ pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size, PCI_DMA_FROMDEVICE); } /* Adjust size of last fragment to match actual length */ if (nr > 0) skb_frags_rx[nr - 1].size = length - priv->frag_info[nr - 1].frag_prefix_size; return nr; fail: /* Drop all accumulated fragments (which have already been replaced in * the descriptor) of this packet; remaining fragments are reused... */ while (nr > 0) { nr--; put_page(skb_frags_rx[nr].page); } return 0; } static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, struct mlx4_en_rx_desc *rx_desc, struct skb_frag_struct *skb_frags, struct mlx4_en_rx_alloc *page_alloc, unsigned int length) { struct mlx4_en_dev *mdev = priv->mdev; struct sk_buff *skb; void *va; int used_frags; dma_addr_t dma; skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); if (!skb) { en_dbg(RX_ERR, priv, "Failed allocating skb\n"); return NULL; } skb->dev = priv->dev; skb_reserve(skb, NET_IP_ALIGN); skb->len = length; skb->truesize = length + sizeof(struct sk_buff); /* Get pointer to first fragment so we could copy the headers into the * (linear part of the) skb */ va = page_address(skb_frags[0].page) + skb_frags[0].page_offset; if (length <= SMALL_PACKET_SIZE) { /* We are copying all relevant data to the skb - temporarily * synch buffers for the copy */ dma = be64_to_cpu(rx_desc->data[0].addr); dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, va, length); dma_sync_single_for_device(&mdev->pdev->dev, dma, length, DMA_FROM_DEVICE); skb->tail += length; } else { /* Move relevant fragments to skb */ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags, skb_shinfo(skb)->frags, page_alloc, length); if (unlikely(!used_frags)) { kfree_skb(skb); return NULL; } skb_shinfo(skb)->nr_frags = used_frags; /* Copy headers into the skb linear buffer */ memcpy(skb->data, va, HEADER_COPY_SIZE); skb->tail += HEADER_COPY_SIZE; /* Skip headers in first fragment */ skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; /* Adjust size of first fragment */ skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE; skb->data_len = length - HEADER_COPY_SIZE; } return skb; } static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) { int i; int offset = ETH_HLEN; for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { if (*(skb->data + offset) != (unsigned char) (i & 0xff)) goto out_loopback; } /* Loopback found */ priv->loopback_ok = 1; out_loopback: dev_kfree_skb_any(skb); } int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cqe *cqe; struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; struct skb_frag_struct *skb_frags; struct mlx4_en_rx_desc *rx_desc; struct sk_buff *skb; int index; int nr; unsigned int length; int polled = 0; int ip_summed; if (!priv->port_up) return 0; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; cqe = &cq->buf[index]; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cq->mcq.cons_index & cq->size)) { skb_frags = ring->rx_info + (index << priv->log_rx_info); rx_desc = ring->buf + (index << ring->log_stride); /* * make sure we read the CQE after we read the ownership bit */ rmb(); /* Drop packet on bad receive or bad checksum */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { en_err(priv, "CQE completed in error - vendor " "syndrom:%d syndrom:%d\n", ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, ((struct mlx4_err_cqe *) cqe)->syndrome); goto next; } if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); goto next; } /* * Packet is OK - process it. */ length = be32_to_cpu(cqe->byte_cnt); ring->bytes += length; ring->packets++; if (likely(dev->features & NETIF_F_RXCSUM)) { if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && (cqe->checksum == cpu_to_be16(0xffff))) { priv->port_stats.rx_chksum_good++; /* This packet is eligible for LRO if it is: * - DIX Ethernet (type interpretation) * - TCP/IP (v4) * - without IP options * - not an IP fragment */ if (dev->features & NETIF_F_GRO) { struct sk_buff *gro_skb = napi_get_frags(&cq->napi); if (!gro_skb) goto next; nr = mlx4_en_complete_rx_desc( priv, rx_desc, skb_frags, skb_shinfo(gro_skb)->frags, ring->page_alloc, length); if (!nr) goto next; skb_shinfo(gro_skb)->nr_frags = nr; gro_skb->len = length; gro_skb->data_len = length; gro_skb->truesize += length; gro_skb->ip_summed = CHECKSUM_UNNECESSARY; if (priv->vlgrp && (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) vlan_gro_frags(&cq->napi, priv->vlgrp, be16_to_cpu(cqe->sl_vid)); else napi_gro_frags(&cq->napi); goto next; } /* LRO not possible, complete processing here */ ip_summed = CHECKSUM_UNNECESSARY; } else { ip_summed = CHECKSUM_NONE; priv->port_stats.rx_chksum_none++; } } else { ip_summed = CHECKSUM_NONE; priv->port_stats.rx_chksum_none++; } skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc, length); if (!skb) { priv->stats.rx_dropped++; goto next; } if (unlikely(priv->validate_loopback)) { validate_loopback(priv, skb); goto next; } skb->ip_summed = ip_summed; skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, cq->ring); /* Push it up the stack */ if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK)) { vlan_hwaccel_receive_skb(skb, priv->vlgrp, be16_to_cpu(cqe->sl_vid)); } else netif_receive_skb(skb); next: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; cqe = &cq->buf[index]; if (++polled == budget) { /* We are here because we reached the NAPI budget - * flush only pending LRO sessions */ goto out; } } out: AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); mlx4_cq_set_ci(&cq->mcq); wmb(); /* ensure HW sees CQ consumer before we post new buffers */ ring->cons = cq->mcq.cons_index; ring->prod += polled; /* Polled descriptors were realocated in place */ mlx4_en_update_rx_prod_db(ring); return polled; } void mlx4_en_rx_irq(struct mlx4_cq *mcq) { struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); if (priv->port_up) napi_schedule(&cq->napi); else mlx4_en_arm_cq(priv, cq); } /* Rx CQ polling - called by NAPI */ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) { struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); int done; done = mlx4_en_process_rx_cq(dev, cq, budget); /* If we used up all the quota - we're probably not done yet... */ if (done == budget) INC_PERF_COUNTER(priv->pstats.napi_quota); else { /* Done for now */ napi_complete(napi); mlx4_en_arm_cq(priv, cq); } return done; } /* Calculate the last offset position that accommodates a full fragment * (assuming fagment size = stride-align) */ static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align) { u16 res = MLX4_EN_ALLOC_SIZE % stride; u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " "res:%d offset:%d\n", stride, align, res, offset); return offset; } static int frag_sizes[] = { FRAG_SZ0, FRAG_SZ1, FRAG_SZ2, FRAG_SZ3 }; void mlx4_en_calc_rx_buf(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE; int buf_size = 0; int i = 0; while (buf_size < eff_mtu) { priv->frag_info[i].frag_size = (eff_mtu > buf_size + frag_sizes[i]) ? frag_sizes[i] : eff_mtu - buf_size; priv->frag_info[i].frag_prefix_size = buf_size; if (!i) { priv->frag_info[i].frag_align = NET_IP_ALIGN; priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES); } else { priv->frag_info[i].frag_align = 0; priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i], SMP_CACHE_BYTES); } priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset( priv, priv->frag_info[i].frag_stride, priv->frag_info[i].frag_align); buf_size += priv->frag_info[i].frag_size; i++; } priv->num_frags = i; priv->rx_skb_size = eff_mtu; priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " "num_frags:%d):\n", eff_mtu, priv->num_frags); for (i = 0; i < priv->num_frags; i++) { en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " "stride:%d last_offset:%d\n", i, priv->frag_info[i].frag_size, priv->frag_info[i].frag_prefix_size, priv->frag_info[i].frag_align, priv->frag_info[i].frag_stride, priv->frag_info[i].last_offset); } } /* RSS related functions */ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, struct mlx4_en_rx_ring *ring, enum mlx4_qp_state *state, struct mlx4_qp *qp) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_qp_context *context; int err = 0; context = kmalloc(sizeof *context , GFP_KERNEL); if (!context) { en_err(priv, "Failed to allocate qp context\n"); return -ENOMEM; } err = mlx4_qp_alloc(mdev->dev, qpn, qp); if (err) { en_err(priv, "Failed to allocate qp #%x\n", qpn); goto out; } qp->event = mlx4_en_sqp_event; memset(context, 0, sizeof *context); mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, qpn, ring->cqn, context); context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); if (err) { mlx4_qp_remove(mdev->dev, qp); mlx4_qp_free(mdev->dev, qp); } mlx4_en_update_rx_prod_db(ring); out: kfree(context); return err; } /* Allocate rx qp's and configure them according to rss map */ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rss_map *rss_map = &priv->rss_map; struct mlx4_qp_context context; struct mlx4_en_rss_context *rss_context; void *ptr; u8 rss_mask = 0x3f; int i, qpn; int err = 0; int good_qps = 0; en_dbg(DRV, priv, "Configuring rss steering\n"); err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, priv->rx_ring_num, &rss_map->base_qpn); if (err) { en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); return err; } for (i = 0; i < priv->rx_ring_num; i++) { qpn = rss_map->base_qpn + i; err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], &rss_map->state[i], &rss_map->qps[i]); if (err) goto rss_err; ++good_qps; } /* Configure RSS indirection qp */ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); if (err) { en_err(priv, "Failed to allocate RSS indirection QP\n"); goto rss_err; } rss_map->indir_qp.event = mlx4_en_sqp_event; mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, priv->rx_ring[0].cqn, &context); ptr = ((void *) &context) + 0x3c; rss_context = (struct mlx4_en_rss_context *) ptr; rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | (rss_map->base_qpn)); rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); rss_context->flags = rss_mask; if (priv->mdev->profile.udp_rss) rss_context->base_qpn_udp = rss_context->default_qpn; err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, &rss_map->indir_qp, &rss_map->indir_state); if (err) goto indir_err; return 0; indir_err: mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); mlx4_qp_free(mdev->dev, &rss_map->indir_qp); rss_err: for (i = 0; i < good_qps; i++) { mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); mlx4_qp_free(mdev->dev, &rss_map->qps[i]); } mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); return err; } void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rss_map *rss_map = &priv->rss_map; int i; mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); mlx4_qp_free(mdev->dev, &rss_map->indir_qp); for (i = 0; i < priv->rx_ring_num; i++) { mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); mlx4_qp_free(mdev->dev, &rss_map->qps[i]); } mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); }
gpl-2.0
timduru/kernel-asus-tf101
drivers/staging/vme/devices/vme_user.c
2640
22391
/* * VMEbus User access driver * * Author: Martyn Welch <martyn.welch@ge.com> * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. * * Based on work by: * Tom Armistead and Ajit Prem * Copyright 2004 Motorola Inc. * * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/cdev.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/ioctl.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/pci.h> #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/syscalls.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/io.h> #include <linux/uaccess.h> #include "../vme.h" #include "vme_user.h" static DEFINE_MUTEX(vme_user_mutex); static char driver_name[] = "vme_user"; static int bus[USER_BUS_MAX]; static unsigned int bus_num; /* Currently Documentation/devices.txt defines the following for VME: * * 221 char VME bus * 0 = /dev/bus/vme/m0 First master image * 1 = /dev/bus/vme/m1 Second master image * 2 = /dev/bus/vme/m2 Third master image * 3 = /dev/bus/vme/m3 Fourth master image * 4 = /dev/bus/vme/s0 First slave image * 5 = /dev/bus/vme/s1 Second slave image * 6 = /dev/bus/vme/s2 Third slave image * 7 = /dev/bus/vme/s3 Fourth slave image * 8 = /dev/bus/vme/ctl Control * * It is expected that all VME bus drivers will use the * same interface. For interface documentation see * http://www.vmelinux.org/. * * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't * even support the tsi148 chipset (which has 8 master and 8 slave windows). * We'll run with this or now as far as possible, however it probably makes * sense to get rid of the old mappings and just do everything dynamically. * * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as * defined above and try to support at least some of the interface from * http://www.vmelinux.org/ as an alternative drive can be written providing a * saner interface later. * * The vmelinux.org driver never supported slave images, the devices reserved * for slaves were repurposed to support all 8 master images on the UniverseII! * We shall support 4 masters and 4 slaves with this driver. */ #define VME_MAJOR 221 /* VME Major Device Number */ #define VME_DEVS 9 /* Number of dev entries */ #define MASTER_MINOR 0 #define MASTER_MAX 3 #define SLAVE_MINOR 4 #define SLAVE_MAX 7 #define CONTROL_MINOR 8 #define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */ /* * Structure to handle image related parameters. */ typedef struct { void *kern_buf; /* Buffer address in kernel space */ dma_addr_t pci_buf; /* Buffer address in PCI address space */ unsigned long long size_buf; /* Buffer size */ struct semaphore sem; /* Semaphore for locking image */ struct device *device; /* Sysfs device */ struct vme_resource *resource; /* VME resource */ int users; /* Number of current users */ } image_desc_t; static image_desc_t image[VME_DEVS]; typedef struct { unsigned long reads; unsigned long writes; unsigned long ioctls; unsigned long irqs; unsigned long berrs; unsigned long dmaErrors; unsigned long timeouts; unsigned long external; } driver_stats_t; static driver_stats_t statistics; static struct cdev *vme_user_cdev; /* Character device */ static struct class *vme_user_sysfs_class; /* Sysfs class */ static struct device *vme_user_bridge; /* Pointer to bridge device */ static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR, MASTER_MINOR, MASTER_MINOR, SLAVE_MINOR, SLAVE_MINOR, SLAVE_MINOR, SLAVE_MINOR, CONTROL_MINOR }; static int vme_user_open(struct inode *, struct file *); static int vme_user_release(struct inode *, struct file *); static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *); static ssize_t vme_user_write(struct file *, const char __user *, size_t, loff_t *); static loff_t vme_user_llseek(struct file *, loff_t, int); static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long); static int __devinit vme_user_probe(struct device *, int, int); static int __devexit vme_user_remove(struct device *, int, int); static struct file_operations vme_user_fops = { .open = vme_user_open, .release = vme_user_release, .read = vme_user_read, .write = vme_user_write, .llseek = vme_user_llseek, .unlocked_ioctl = vme_user_unlocked_ioctl, }; /* * Reset all the statistic counters */ static void reset_counters(void) { statistics.reads = 0; statistics.writes = 0; statistics.ioctls = 0; statistics.irqs = 0; statistics.berrs = 0; statistics.dmaErrors = 0; statistics.timeouts = 0; } static int vme_user_open(struct inode *inode, struct file *file) { int err; unsigned int minor = MINOR(inode->i_rdev); down(&image[minor].sem); /* Only allow device to be opened if a resource is allocated */ if (image[minor].resource == NULL) { printk(KERN_ERR "No resources allocated for device\n"); err = -EINVAL; goto err_res; } /* Increment user count */ image[minor].users++; up(&image[minor].sem); return 0; err_res: up(&image[minor].sem); return err; } static int vme_user_release(struct inode *inode, struct file *file) { unsigned int minor = MINOR(inode->i_rdev); down(&image[minor].sem); /* Decrement user count */ image[minor].users--; up(&image[minor].sem); return 0; } /* * We are going ot alloc a page during init per window for small transfers. * Small transfers will go VME -> buffer -> user space. Larger (more than a * page) transfers will lock the user space buffer into memory and then * transfer the data directly into the user space buffers. */ static ssize_t resource_to_user(int minor, char __user *buf, size_t count, loff_t *ppos) { ssize_t retval; ssize_t copied = 0; if (count <= image[minor].size_buf) { /* We copy to kernel buffer */ copied = vme_master_read(image[minor].resource, image[minor].kern_buf, count, *ppos); if (copied < 0) return (int)copied; retval = __copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied); if (retval != 0) { copied = (copied - retval); printk(KERN_INFO "User copy failed\n"); return -EINVAL; } } else { /* XXX Need to write this */ printk(KERN_INFO "Currently don't support large transfers\n"); /* Map in pages from userspace */ /* Call vme_master_read to do the transfer */ return -EINVAL; } return copied; } /* * We are going ot alloc a page during init per window for small transfers. * Small transfers will go user space -> buffer -> VME. Larger (more than a * page) transfers will lock the user space buffer into memory and then * transfer the data directly from the user space buffers out to VME. */ static ssize_t resource_from_user(unsigned int minor, const char __user *buf, size_t count, loff_t *ppos) { ssize_t retval; ssize_t copied = 0; if (count <= image[minor].size_buf) { retval = __copy_from_user(image[minor].kern_buf, buf, (unsigned long)count); if (retval != 0) copied = (copied - retval); else copied = count; copied = vme_master_write(image[minor].resource, image[minor].kern_buf, copied, *ppos); } else { /* XXX Need to write this */ printk(KERN_INFO "Currently don't support large transfers\n"); /* Map in pages from userspace */ /* Call vme_master_write to do the transfer */ return -EINVAL; } return copied; } static ssize_t buffer_to_user(unsigned int minor, char __user *buf, size_t count, loff_t *ppos) { void *image_ptr; ssize_t retval; image_ptr = image[minor].kern_buf + *ppos; retval = __copy_to_user(buf, image_ptr, (unsigned long)count); if (retval != 0) { retval = (count - retval); printk(KERN_WARNING "Partial copy to userspace\n"); } else retval = count; /* Return number of bytes successfully read */ return retval; } static ssize_t buffer_from_user(unsigned int minor, const char __user *buf, size_t count, loff_t *ppos) { void *image_ptr; size_t retval; image_ptr = image[minor].kern_buf + *ppos; retval = __copy_from_user(image_ptr, buf, (unsigned long)count); if (retval != 0) { retval = (count - retval); printk(KERN_WARNING "Partial copy to userspace\n"); } else retval = count; /* Return number of bytes successfully read */ return retval; } static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev); ssize_t retval; size_t image_size; size_t okcount; down(&image[minor].sem); /* XXX Do we *really* want this helper - we can use vme_*_get ? */ image_size = vme_get_size(image[minor].resource); /* Ensure we are starting at a valid location */ if ((*ppos < 0) || (*ppos > (image_size - 1))) { up(&image[minor].sem); return 0; } /* Ensure not reading past end of the image */ if (*ppos + count > image_size) okcount = image_size - *ppos; else okcount = count; switch (type[minor]) { case MASTER_MINOR: retval = resource_to_user(minor, buf, okcount, ppos); break; case SLAVE_MINOR: retval = buffer_to_user(minor, buf, okcount, ppos); break; default: retval = -EINVAL; } up(&image[minor].sem); if (retval > 0) *ppos += retval; return retval; } static ssize_t vme_user_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev); ssize_t retval; size_t image_size; size_t okcount; down(&image[minor].sem); image_size = vme_get_size(image[minor].resource); /* Ensure we are starting at a valid location */ if ((*ppos < 0) || (*ppos > (image_size - 1))) { up(&image[minor].sem); return 0; } /* Ensure not reading past end of the image */ if (*ppos + count > image_size) okcount = image_size - *ppos; else okcount = count; switch (type[minor]) { case MASTER_MINOR: retval = resource_from_user(minor, buf, okcount, ppos); break; case SLAVE_MINOR: retval = buffer_from_user(minor, buf, okcount, ppos); break; default: retval = -EINVAL; } up(&image[minor].sem); if (retval > 0) *ppos += retval; return retval; } static loff_t vme_user_llseek(struct file *file, loff_t off, int whence) { loff_t absolute = -1; unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev); size_t image_size; down(&image[minor].sem); image_size = vme_get_size(image[minor].resource); switch (whence) { case SEEK_SET: absolute = off; break; case SEEK_CUR: absolute = file->f_pos + off; break; case SEEK_END: absolute = image_size + off; break; default: up(&image[minor].sem); return -EINVAL; break; } if ((absolute < 0) || (absolute >= image_size)) { up(&image[minor].sem); return -EINVAL; } file->f_pos = absolute; up(&image[minor].sem); return absolute; } /* * The ioctls provided by the old VME access method (the one at vmelinux.org) * are most certainly wrong as the effectively push the registers layout * through to user space. Given that the VME core can handle multiple bridges, * with different register layouts this is most certainly not the way to go. * * We aren't using the structures defined in the Motorola driver either - these * are also quite low level, however we should use the definitions that have * already been defined. */ static int vme_user_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct vme_master master; struct vme_slave slave; unsigned long copied; unsigned int minor = MINOR(inode->i_rdev); int retval; dma_addr_t pci_addr; void __user *argp = (void __user *)arg; statistics.ioctls++; switch (type[minor]) { case CONTROL_MINOR: break; case MASTER_MINOR: switch (cmd) { case VME_GET_MASTER: memset(&master, 0, sizeof(struct vme_master)); /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ retval = vme_master_get(image[minor].resource, &master.enable, &master.vme_addr, &master.size, &master.aspace, &master.cycle, &master.dwidth); copied = copy_to_user(argp, &master, sizeof(struct vme_master)); if (copied != 0) { printk(KERN_WARNING "Partial copy to " "userspace\n"); return -EFAULT; } return retval; break; case VME_SET_MASTER: copied = copy_from_user(&master, argp, sizeof(master)); if (copied != 0) { printk(KERN_WARNING "Partial copy from " "userspace\n"); return -EFAULT; } /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ return vme_master_set(image[minor].resource, master.enable, master.vme_addr, master.size, master.aspace, master.cycle, master.dwidth); break; } break; case SLAVE_MINOR: switch (cmd) { case VME_GET_SLAVE: memset(&slave, 0, sizeof(struct vme_slave)); /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ retval = vme_slave_get(image[minor].resource, &slave.enable, &slave.vme_addr, &slave.size, &pci_addr, &slave.aspace, &slave.cycle); copied = copy_to_user(argp, &slave, sizeof(struct vme_slave)); if (copied != 0) { printk(KERN_WARNING "Partial copy to " "userspace\n"); return -EFAULT; } return retval; break; case VME_SET_SLAVE: copied = copy_from_user(&slave, argp, sizeof(slave)); if (copied != 0) { printk(KERN_WARNING "Partial copy from " "userspace\n"); return -EFAULT; } /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ return vme_slave_set(image[minor].resource, slave.enable, slave.vme_addr, slave.size, image[minor].pci_buf, slave.aspace, slave.cycle); break; } break; } return -EINVAL; } static long vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&vme_user_mutex); ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg); mutex_unlock(&vme_user_mutex); return ret; } /* * Unallocate a previously allocated buffer */ static void buf_unalloc(int num) { if (image[num].kern_buf) { #ifdef VME_DEBUG printk(KERN_DEBUG "UniverseII:Releasing buffer at %p\n", image[num].pci_buf); #endif vme_free_consistent(image[num].resource, image[num].size_buf, image[num].kern_buf, image[num].pci_buf); image[num].kern_buf = NULL; image[num].pci_buf = 0; image[num].size_buf = 0; #ifdef VME_DEBUG } else { printk(KERN_DEBUG "UniverseII: Buffer not allocated\n"); #endif } } static struct vme_driver vme_user_driver = { .name = driver_name, .probe = vme_user_probe, .remove = __devexit_p(vme_user_remove), }; static int __init vme_user_init(void) { int retval = 0; int i; struct vme_device_id *ids; printk(KERN_INFO "VME User Space Access Driver\n"); if (bus_num == 0) { printk(KERN_ERR "%s: No cards, skipping registration\n", driver_name); retval = -ENODEV; goto err_nocard; } /* Let's start by supporting one bus, we can support more than one * in future revisions if that ever becomes necessary. */ if (bus_num > USER_BUS_MAX) { printk(KERN_ERR "%s: Driver only able to handle %d buses\n", driver_name, USER_BUS_MAX); bus_num = USER_BUS_MAX; } /* Dynamically create the bind table based on module parameters */ ids = kmalloc(sizeof(struct vme_device_id) * (bus_num + 1), GFP_KERNEL); if (ids == NULL) { printk(KERN_ERR "%s: Unable to allocate ID table\n", driver_name); retval = -ENOMEM; goto err_id; } memset(ids, 0, (sizeof(struct vme_device_id) * (bus_num + 1))); for (i = 0; i < bus_num; i++) { ids[i].bus = bus[i]; /* * We register the driver against the slot occupied by *this* * card, since it's really a low level way of controlling * the VME bridge */ ids[i].slot = VME_SLOT_CURRENT; } vme_user_driver.bind_table = ids; retval = vme_register_driver(&vme_user_driver); if (retval != 0) goto err_reg; return retval; err_reg: kfree(ids); err_id: err_nocard: return retval; } /* * In this simple access driver, the old behaviour is being preserved as much * as practical. We will therefore reserve the buffers and request the images * here so that we don't have to do it later. */ static int __devinit vme_user_probe(struct device *dev, int cur_bus, int cur_slot) { int i, err; char name[12]; /* Save pointer to the bridge device */ if (vme_user_bridge != NULL) { printk(KERN_ERR "%s: Driver can only be loaded for 1 device\n", driver_name); err = -EINVAL; goto err_dev; } vme_user_bridge = dev; /* Initialise descriptors */ for (i = 0; i < VME_DEVS; i++) { image[i].kern_buf = NULL; image[i].pci_buf = 0; sema_init(&image[i].sem, 1); image[i].device = NULL; image[i].resource = NULL; image[i].users = 0; } /* Initialise statistics counters */ reset_counters(); /* Assign major and minor numbers for the driver */ err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS, driver_name); if (err) { printk(KERN_WARNING "%s: Error getting Major Number %d for " "driver.\n", driver_name, VME_MAJOR); goto err_region; } /* Register the driver as a char device */ vme_user_cdev = cdev_alloc(); vme_user_cdev->ops = &vme_user_fops; vme_user_cdev->owner = THIS_MODULE; err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS); if (err) { printk(KERN_WARNING "%s: cdev_all failed\n", driver_name); goto err_char; } /* Request slave resources and allocate buffers (128kB wide) */ for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) { /* XXX Need to properly request attributes */ /* For ca91cx42 bridge there are only two slave windows * supporting A16 addressing, so we request A24 supported * by all windows. */ image[i].resource = vme_slave_request(vme_user_bridge, VME_A24, VME_SCT); if (image[i].resource == NULL) { printk(KERN_WARNING "Unable to allocate slave " "resource\n"); goto err_slave; } image[i].size_buf = PCI_BUF_SIZE; image[i].kern_buf = vme_alloc_consistent(image[i].resource, image[i].size_buf, &image[i].pci_buf); if (image[i].kern_buf == NULL) { printk(KERN_WARNING "Unable to allocate memory for " "buffer\n"); image[i].pci_buf = 0; vme_slave_free(image[i].resource); err = -ENOMEM; goto err_slave; } } /* * Request master resources allocate page sized buffers for small * reads and writes */ for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { /* XXX Need to properly request attributes */ image[i].resource = vme_master_request(vme_user_bridge, VME_A32, VME_SCT, VME_D32); if (image[i].resource == NULL) { printk(KERN_WARNING "Unable to allocate master " "resource\n"); goto err_master; } image[i].size_buf = PCI_BUF_SIZE; image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL); if (image[i].kern_buf == NULL) { printk(KERN_WARNING "Unable to allocate memory for " "master window buffers\n"); err = -ENOMEM; goto err_master_buf; } } /* Create sysfs entries - on udev systems this creates the dev files */ vme_user_sysfs_class = class_create(THIS_MODULE, driver_name); if (IS_ERR(vme_user_sysfs_class)) { printk(KERN_ERR "Error creating vme_user class.\n"); err = PTR_ERR(vme_user_sysfs_class); goto err_class; } /* Add sysfs Entries */ for (i = 0; i < VME_DEVS; i++) { switch (type[i]) { case MASTER_MINOR: sprintf(name, "bus/vme/m%%d"); break; case CONTROL_MINOR: sprintf(name, "bus/vme/ctl"); break; case SLAVE_MINOR: sprintf(name, "bus/vme/s%%d"); break; default: err = -EINVAL; goto err_sysfs; break; } image[i].device = device_create(vme_user_sysfs_class, NULL, MKDEV(VME_MAJOR, i), NULL, name, (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i); if (IS_ERR(image[i].device)) { printk(KERN_INFO "%s: Error creating sysfs device\n", driver_name); err = PTR_ERR(image[i].device); goto err_sysfs; } } return 0; /* Ensure counter set correcty to destroy all sysfs devices */ i = VME_DEVS; err_sysfs: while (i > 0) { i--; device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i)); } class_destroy(vme_user_sysfs_class); /* Ensure counter set correcty to unalloc all master windows */ i = MASTER_MAX + 1; err_master_buf: for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) kfree(image[i].kern_buf); err_master: while (i > MASTER_MINOR) { i--; vme_master_free(image[i].resource); } /* * Ensure counter set correcty to unalloc all slave windows and buffers */ i = SLAVE_MAX + 1; err_slave: while (i > SLAVE_MINOR) { i--; buf_unalloc(i); vme_slave_free(image[i].resource); } err_class: cdev_del(vme_user_cdev); err_char: unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); err_region: err_dev: return err; } static int __devexit vme_user_remove(struct device *dev, int cur_bus, int cur_slot) { int i; /* Remove sysfs Entries */ for (i = 0; i < VME_DEVS; i++) device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i)); class_destroy(vme_user_sysfs_class); for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { kfree(image[i].kern_buf); vme_master_free(image[i].resource); } for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) { vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0); buf_unalloc(i); vme_slave_free(image[i].resource); } /* Unregister device driver */ cdev_del(vme_user_cdev); /* Unregiser the major and minor device numbers */ unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); return 0; } static void __exit vme_user_exit(void) { vme_unregister_driver(&vme_user_driver); kfree(vme_user_driver.bind_table); } MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected"); module_param_array(bus, int, &bus_num, 0); MODULE_DESCRIPTION("VME User Space Access Driver"); MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com"); MODULE_LICENSE("GPL"); module_init(vme_user_init); module_exit(vme_user_exit);
gpl-2.0
Juansheng/android_kernel_htc_vision
fs/efs/super.c
3152
8660
/* * super.c * * Copyright (c) 1999 Al Smith * * Portions derived from work (c) 1995,1996 Christian Vogelgsang. */ #include <linux/init.h> #include <linux/module.h> #include <linux/exportfs.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include "efs.h" #include <linux/efs_vh.h> #include <linux/efs_fs_sb.h> static int efs_statfs(struct dentry *dentry, struct kstatfs *buf); static int efs_fill_super(struct super_block *s, void *d, int silent); static struct dentry *efs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, efs_fill_super); } static struct file_system_type efs_fs_type = { .owner = THIS_MODULE, .name = "efs", .mount = efs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static struct pt_types sgi_pt_types[] = { {0x00, "SGI vh"}, {0x01, "SGI trkrepl"}, {0x02, "SGI secrepl"}, {0x03, "SGI raw"}, {0x04, "SGI bsd"}, {SGI_SYSV, "SGI sysv"}, {0x06, "SGI vol"}, {SGI_EFS, "SGI efs"}, {0x08, "SGI lv"}, {0x09, "SGI rlv"}, {0x0A, "SGI xfs"}, {0x0B, "SGI xfslog"}, {0x0C, "SGI xlv"}, {0x82, "Linux swap"}, {0x83, "Linux native"}, {0, NULL} }; static struct kmem_cache * efs_inode_cachep; static struct inode *efs_alloc_inode(struct super_block *sb) { struct efs_inode_info *ei; ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void efs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(efs_inode_cachep, INODE_INFO(inode)); } static void efs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, efs_i_callback); } static void init_once(void *foo) { struct efs_inode_info *ei = (struct efs_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { efs_inode_cachep = kmem_cache_create("efs_inode_cache", sizeof(struct efs_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, init_once); if (efs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { kmem_cache_destroy(efs_inode_cachep); } static void efs_put_super(struct super_block *s) { kfree(s->s_fs_info); s->s_fs_info = NULL; } static int efs_remount(struct super_block *sb, int *flags, char *data) { *flags |= MS_RDONLY; return 0; } static const struct super_operations efs_superblock_operations = { .alloc_inode = efs_alloc_inode, .destroy_inode = efs_destroy_inode, .put_super = efs_put_super, .statfs = efs_statfs, .remount_fs = efs_remount, }; static const struct export_operations efs_export_ops = { .fh_to_dentry = efs_fh_to_dentry, .fh_to_parent = efs_fh_to_parent, .get_parent = efs_get_parent, }; static int __init init_efs_fs(void) { int err; printk("EFS: "EFS_VERSION" - http://aeschi.ch.eu.org/efs/\n"); err = init_inodecache(); if (err) goto out1; err = register_filesystem(&efs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_efs_fs(void) { unregister_filesystem(&efs_fs_type); destroy_inodecache(); } module_init(init_efs_fs) module_exit(exit_efs_fs) static efs_block_t efs_validate_vh(struct volume_header *vh) { int i; __be32 cs, *ui; int csum; efs_block_t sblock = 0; /* shuts up gcc */ struct pt_types *pt_entry; int pt_type, slice = -1; if (be32_to_cpu(vh->vh_magic) != VHMAGIC) { /* * assume that we're dealing with a partition and allow * read_super() to try and detect a valid superblock * on the next block. */ return 0; } ui = ((__be32 *) (vh + 1)) - 1; for(csum = 0; ui >= ((__be32 *) vh);) { cs = *ui--; csum += be32_to_cpu(cs); } if (csum) { printk(KERN_INFO "EFS: SGI disklabel: checksum bad, label corrupted\n"); return 0; } #ifdef DEBUG printk(KERN_DEBUG "EFS: bf: \"%16s\"\n", vh->vh_bootfile); for(i = 0; i < NVDIR; i++) { int j; char name[VDNAMESIZE+1]; for(j = 0; j < VDNAMESIZE; j++) { name[j] = vh->vh_vd[i].vd_name[j]; } name[j] = (char) 0; if (name[0]) { printk(KERN_DEBUG "EFS: vh: %8s block: 0x%08x size: 0x%08x\n", name, (int) be32_to_cpu(vh->vh_vd[i].vd_lbn), (int) be32_to_cpu(vh->vh_vd[i].vd_nbytes)); } } #endif for(i = 0; i < NPARTAB; i++) { pt_type = (int) be32_to_cpu(vh->vh_pt[i].pt_type); for(pt_entry = sgi_pt_types; pt_entry->pt_name; pt_entry++) { if (pt_type == pt_entry->pt_type) break; } #ifdef DEBUG if (be32_to_cpu(vh->vh_pt[i].pt_nblks)) { printk(KERN_DEBUG "EFS: pt %2d: start: %08d size: %08d type: 0x%02x (%s)\n", i, (int) be32_to_cpu(vh->vh_pt[i].pt_firstlbn), (int) be32_to_cpu(vh->vh_pt[i].pt_nblks), pt_type, (pt_entry->pt_name) ? pt_entry->pt_name : "unknown"); } #endif if (IS_EFS(pt_type)) { sblock = be32_to_cpu(vh->vh_pt[i].pt_firstlbn); slice = i; } } if (slice == -1) { printk(KERN_NOTICE "EFS: partition table contained no EFS partitions\n"); #ifdef DEBUG } else { printk(KERN_INFO "EFS: using slice %d (type %s, offset 0x%x)\n", slice, (pt_entry->pt_name) ? pt_entry->pt_name : "unknown", sblock); #endif } return sblock; } static int efs_validate_super(struct efs_sb_info *sb, struct efs_super *super) { if (!IS_EFS_MAGIC(be32_to_cpu(super->fs_magic))) return -1; sb->fs_magic = be32_to_cpu(super->fs_magic); sb->total_blocks = be32_to_cpu(super->fs_size); sb->first_block = be32_to_cpu(super->fs_firstcg); sb->group_size = be32_to_cpu(super->fs_cgfsize); sb->data_free = be32_to_cpu(super->fs_tfree); sb->inode_free = be32_to_cpu(super->fs_tinode); sb->inode_blocks = be16_to_cpu(super->fs_cgisize); sb->total_groups = be16_to_cpu(super->fs_ncg); return 0; } static int efs_fill_super(struct super_block *s, void *d, int silent) { struct efs_sb_info *sb; struct buffer_head *bh; struct inode *root; int ret = -EINVAL; sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL); if (!sb) return -ENOMEM; s->s_fs_info = sb; s->s_magic = EFS_SUPER_MAGIC; if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) { printk(KERN_ERR "EFS: device does not support %d byte blocks\n", EFS_BLOCKSIZE); goto out_no_fs_ul; } /* read the vh (volume header) block */ bh = sb_bread(s, 0); if (!bh) { printk(KERN_ERR "EFS: cannot read volume header\n"); goto out_no_fs_ul; } /* * if this returns zero then we didn't find any partition table. * this isn't (yet) an error - just assume for the moment that * the device is valid and go on to search for a superblock. */ sb->fs_start = efs_validate_vh((struct volume_header *) bh->b_data); brelse(bh); if (sb->fs_start == -1) { goto out_no_fs_ul; } bh = sb_bread(s, sb->fs_start + EFS_SUPER); if (!bh) { printk(KERN_ERR "EFS: cannot read superblock\n"); goto out_no_fs_ul; } if (efs_validate_super(sb, (struct efs_super *) bh->b_data)) { #ifdef DEBUG printk(KERN_WARNING "EFS: invalid superblock at block %u\n", sb->fs_start + EFS_SUPER); #endif brelse(bh); goto out_no_fs_ul; } brelse(bh); if (!(s->s_flags & MS_RDONLY)) { #ifdef DEBUG printk(KERN_INFO "EFS: forcing read-only mode\n"); #endif s->s_flags |= MS_RDONLY; } s->s_op = &efs_superblock_operations; s->s_export_op = &efs_export_ops; root = efs_iget(s, EFS_ROOTINODE); if (IS_ERR(root)) { printk(KERN_ERR "EFS: get root inode failed\n"); ret = PTR_ERR(root); goto out_no_fs; } s->s_root = d_alloc_root(root); if (!(s->s_root)) { printk(KERN_ERR "EFS: get root dentry failed\n"); iput(root); ret = -ENOMEM; goto out_no_fs; } return 0; out_no_fs_ul: out_no_fs: s->s_fs_info = NULL; kfree(sb); return ret; } static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct efs_sb_info *sbi = SUPER_INFO(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = EFS_SUPER_MAGIC; /* efs magic number */ buf->f_bsize = EFS_BLOCKSIZE; /* blocksize */ buf->f_blocks = sbi->total_groups * /* total data blocks */ (sbi->group_size - sbi->inode_blocks); buf->f_bfree = sbi->data_free; /* free data blocks */ buf->f_bavail = sbi->data_free; /* free blocks for non-root */ buf->f_files = sbi->total_groups * /* total inodes */ sbi->inode_blocks * (EFS_BLOCKSIZE / sizeof(struct efs_dinode)); buf->f_ffree = sbi->inode_free; /* free inodes */ buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); buf->f_namelen = EFS_MAXNAMELEN; /* max filename length */ return 0; }
gpl-2.0
Stuxnet-Kernel/stuxnet_cancro
drivers/regulator/pmic8058-regulator.c
3408
47110
/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/err.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/mfd/pmic8058.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/mfd/pm8xxx/core.h> #include <linux/regulator/pmic8058-regulator.h> /* Regulator types */ #define REGULATOR_TYPE_LDO 0 #define REGULATOR_TYPE_SMPS 1 #define REGULATOR_TYPE_LVS 2 #define REGULATOR_TYPE_NCP 3 /* Common masks */ #define REGULATOR_EN_MASK 0x80 #define REGULATOR_BANK_MASK 0xF0 #define REGULATOR_BANK_SEL(n) ((n) << 4) #define REGULATOR_BANK_WRITE 0x80 #define LDO_TEST_BANKS 7 #define SMPS_TEST_BANKS 8 #define REGULATOR_TEST_BANKS_MAX SMPS_TEST_BANKS /* LDO programming */ /* CTRL register */ #define LDO_ENABLE_MASK 0x80 #define LDO_ENABLE 0x80 #define LDO_PULL_DOWN_ENABLE_MASK 0x40 #define LDO_PULL_DOWN_ENABLE 0x40 #define LDO_CTRL_PM_MASK 0x20 #define LDO_CTRL_PM_HPM 0x00 #define LDO_CTRL_PM_LPM 0x20 #define LDO_CTRL_VPROG_MASK 0x1F /* TEST register bank 0 */ #define LDO_TEST_LPM_MASK 0x40 #define LDO_TEST_LPM_SEL_CTRL 0x00 #define LDO_TEST_LPM_SEL_TCXO 0x40 /* TEST register bank 2 */ #define LDO_TEST_VPROG_UPDATE_MASK 0x08 #define LDO_TEST_RANGE_SEL_MASK 0x04 #define LDO_TEST_FINE_STEP_MASK 0x02 #define LDO_TEST_FINE_STEP_SHIFT 1 /* TEST register bank 4 */ #define LDO_TEST_RANGE_EXT_MASK 0x01 /* TEST register bank 5 */ #define LDO_TEST_PIN_CTRL_MASK 0x0F #define LDO_TEST_PIN_CTRL_EN3 0x08 #define LDO_TEST_PIN_CTRL_EN2 0x04 #define LDO_TEST_PIN_CTRL_EN1 0x02 #define LDO_TEST_PIN_CTRL_EN0 0x01 /* TEST register bank 6 */ #define LDO_TEST_PIN_CTRL_LPM_MASK 0x0F /* Allowable voltage ranges */ #define PLDO_LOW_UV_MIN 750000 #define PLDO_LOW_UV_MAX 1537500 #define PLDO_LOW_FINE_STEP_UV 12500 #define PLDO_NORM_UV_MIN 1500000 #define PLDO_NORM_UV_MAX 3075000 #define PLDO_NORM_FINE_STEP_UV 25000 #define PLDO_HIGH_UV_MIN 1750000 #define PLDO_HIGH_UV_MAX 4900000 #define PLDO_HIGH_FINE_STEP_UV 50000 #define NLDO_UV_MIN 750000 #define NLDO_UV_MAX 1537500 #define NLDO_FINE_STEP_UV 12500 /* SMPS masks and values */ /* CTRL register */ /* Legacy mode */ #define SMPS_LEGACY_ENABLE 0x80 #define SMPS_LEGACY_PULL_DOWN_ENABLE 0x40 #define SMPS_LEGACY_VREF_SEL_MASK 0x20 #define SMPS_LEGACY_VPROG_MASK 0x1F /* Advanced mode */ #define SMPS_ADVANCED_BAND_MASK 0xC0 #define SMPS_ADVANCED_BAND_OFF 0x00 #define SMPS_ADVANCED_BAND_1 0x40 #define SMPS_ADVANCED_BAND_2 0x80 #define SMPS_ADVANCED_BAND_3 0xC0 #define SMPS_ADVANCED_VPROG_MASK 0x3F /* Legacy mode voltage ranges */ #define SMPS_MODE1_UV_MIN 1500000 #define SMPS_MODE1_UV_MAX 3050000 #define SMPS_MODE1_UV_STEP 50000 #define SMPS_MODE2_UV_MIN 750000 #define SMPS_MODE2_UV_MAX 1525000 #define SMPS_MODE2_UV_STEP 25000 #define SMPS_MODE3_UV_MIN 375000 #define SMPS_MODE3_UV_MAX 1150000 #define SMPS_MODE3_UV_STEP 25000 /* Advanced mode voltage ranges */ #define SMPS_BAND3_UV_MIN 1500000 #define SMPS_BAND3_UV_MAX 3075000 #define SMPS_BAND3_UV_STEP 25000 #define SMPS_BAND2_UV_MIN 750000 #define SMPS_BAND2_UV_MAX 1537500 #define SMPS_BAND2_UV_STEP 12500 #define SMPS_BAND1_UV_MIN 375000 #define SMPS_BAND1_UV_MAX 1162500 #define SMPS_BAND1_UV_STEP 12500 #define SMPS_UV_MIN SMPS_MODE3_UV_MIN #define SMPS_UV_MAX SMPS_MODE1_UV_MAX /* Test2 register bank 1 */ #define SMPS_LEGACY_VLOW_SEL_MASK 0x01 /* Test2 register bank 6 */ #define SMPS_ADVANCED_PULL_DOWN_ENABLE 0x08 /* Test2 register bank 7 */ #define SMPS_ADVANCED_MODE_MASK 0x02 #define SMPS_ADVANCED_MODE 0x02 #define SMPS_LEGACY_MODE 0x00 #define SMPS_IN_ADVANCED_MODE(vreg) \ ((vreg->test_reg[7] & SMPS_ADVANCED_MODE_MASK) == SMPS_ADVANCED_MODE) /* BUCK_SLEEP_CNTRL register */ #define SMPS_PIN_CTRL_MASK 0xF0 #define SMPS_PIN_CTRL_A1 0x80 #define SMPS_PIN_CTRL_A0 0x40 #define SMPS_PIN_CTRL_D1 0x20 #define SMPS_PIN_CTRL_D0 0x10 #define SMPS_PIN_CTRL_LPM_MASK 0x0F #define SMPS_PIN_CTRL_LPM_A1 0x08 #define SMPS_PIN_CTRL_LPM_A0 0x04 #define SMPS_PIN_CTRL_LPM_D1 0x02 #define SMPS_PIN_CTRL_LPM_D0 0x01 /* BUCK_CLOCK_CNTRL register */ #define SMPS_CLK_DIVIDE2 0x40 #define SMPS_CLK_CTRL_MASK 0x30 #define SMPS_CLK_CTRL_FOLLOW_TCXO 0x00 #define SMPS_CLK_CTRL_PWM 0x10 #define SMPS_CLK_CTRL_PFM 0x20 /* LVS masks and values */ /* CTRL register */ #define LVS_ENABLE_MASK 0x80 #define LVS_ENABLE 0x80 #define LVS_PULL_DOWN_ENABLE_MASK 0x40 #define LVS_PULL_DOWN_ENABLE 0x00 #define LVS_PULL_DOWN_DISABLE 0x40 #define LVS_PIN_CTRL_MASK 0x0F #define LVS_PIN_CTRL_EN0 0x08 #define LVS_PIN_CTRL_EN1 0x04 #define LVS_PIN_CTRL_EN2 0x02 #define LVS_PIN_CTRL_EN3 0x01 /* NCP masks and values */ /* CTRL register */ #define NCP_VPROG_MASK 0x1F #define NCP_UV_MIN 1500000 #define NCP_UV_MAX 3050000 #define NCP_UV_STEP 50000 #define GLOBAL_ENABLE_MAX (2) struct pm8058_enable { u16 addr; u8 reg; }; struct pm8058_vreg { struct device *dev; struct pm8058_vreg_pdata *pdata; struct regulator_dev *rdev; struct pm8058_enable *global_enable[GLOBAL_ENABLE_MAX]; int hpm_min_load; int save_uV; unsigned pc_vote; unsigned optimum; unsigned mode_initialized; u16 ctrl_addr; u16 test_addr; u16 clk_ctrl_addr; u16 sleep_ctrl_addr; u8 type; u8 ctrl_reg; u8 test_reg[REGULATOR_TEST_BANKS_MAX]; u8 clk_ctrl_reg; u8 sleep_ctrl_reg; u8 is_nmos; u8 global_enable_mask[GLOBAL_ENABLE_MAX]; }; #define LDO_M2(_id, _ctrl_addr, _test_addr, _is_nmos, _hpm_min_load, \ _en0, _en0_mask, _en1, _en1_mask) \ [PM8058_VREG_ID_##_id] = { \ .ctrl_addr = _ctrl_addr, \ .test_addr = _test_addr, \ .type = REGULATOR_TYPE_LDO, \ .hpm_min_load = PM8058_VREG_##_hpm_min_load##_HPM_MIN_LOAD, \ .is_nmos = _is_nmos, \ .global_enable = { \ [0] = _en0, \ [1] = _en1, \ }, \ .global_enable_mask = { \ [0] = _en0_mask, \ [1] = _en1_mask, \ }, \ } #define LDO(_id, _ctrl_addr, _test_addr, _is_nmos, _hpm_min_load, \ _en0, _en0_mask) \ LDO_M2(_id, _ctrl_addr, _test_addr, _is_nmos, _hpm_min_load, \ _en0, _en0_mask, NULL, 0) #define SMPS(_id, _ctrl_addr, _test_addr, _clk_ctrl_addr, _sleep_ctrl_addr, \ _hpm_min_load, _en0, _en0_mask) \ [PM8058_VREG_ID_##_id] = { \ .ctrl_addr = _ctrl_addr, \ .test_addr = _test_addr, \ .clk_ctrl_addr = _clk_ctrl_addr, \ .sleep_ctrl_addr = _sleep_ctrl_addr, \ .type = REGULATOR_TYPE_SMPS, \ .hpm_min_load = PM8058_VREG_##_hpm_min_load##_HPM_MIN_LOAD, \ .global_enable = { \ [0] = _en0, \ [1] = NULL, \ }, \ .global_enable_mask = { \ [0] = _en0_mask, \ [1] = 0, \ }, \ } #define LVS(_id, _ctrl_addr, _en0, _en0_mask) \ [PM8058_VREG_ID_##_id] = { \ .ctrl_addr = _ctrl_addr, \ .type = REGULATOR_TYPE_LVS, \ .global_enable = { \ [0] = _en0, \ [1] = NULL, \ }, \ .global_enable_mask = { \ [0] = _en0_mask, \ [1] = 0, \ }, \ } #define NCP(_id, _ctrl_addr, _test1) \ [PM8058_VREG_ID_##_id] = { \ .ctrl_addr = _ctrl_addr, \ .type = REGULATOR_TYPE_NCP, \ .test_addr = _test1, \ .global_enable = { \ [0] = NULL, \ [1] = NULL, \ }, \ .global_enable_mask = { \ [0] = 0, \ [1] = 0, \ }, \ } #define MASTER_ENABLE_COUNT 6 #define EN_MSM 0 #define EN_PH 1 #define EN_RF 2 #define EN_GRP_5_4 3 #define EN_GRP_3_2 4 #define EN_GRP_1_0 5 /* Master regulator control registers */ static struct pm8058_enable m_en[MASTER_ENABLE_COUNT] = { [EN_MSM] = { .addr = 0x018, /* VREG_EN_MSM */ }, [EN_PH] = { .addr = 0x019, /* VREG_EN_PH */ }, [EN_RF] = { .addr = 0x01A, /* VREG_EN_RF */ }, [EN_GRP_5_4] = { .addr = 0x1C8, /* VREG_EN_MSM_GRP_5-4 */ }, [EN_GRP_3_2] = { .addr = 0x1C9, /* VREG_EN_MSM_GRP_3-2 */ }, [EN_GRP_1_0] = { .addr = 0x1CA, /* VREG_EN_MSM_GRP_1-0 */ }, }; static struct pm8058_vreg pm8058_vreg[] = { /* id ctrl test n/p hpm_min m_en m_en_mask */ LDO(L0, 0x009, 0x065, 1, LDO_150, &m_en[EN_GRP_5_4], BIT(3)), LDO(L1, 0x00A, 0x066, 1, LDO_300, &m_en[EN_GRP_5_4], BIT(6) | BIT(2)), LDO(L2, 0x00B, 0x067, 0, LDO_300, &m_en[EN_GRP_3_2], BIT(2)), LDO(L3, 0x00C, 0x068, 0, LDO_150, &m_en[EN_GRP_1_0], BIT(1)), LDO(L4, 0x00D, 0x069, 0, LDO_50, &m_en[EN_MSM], 0), LDO(L5, 0x00E, 0x06A, 0, LDO_300, &m_en[EN_GRP_1_0], BIT(7)), LDO(L6, 0x00F, 0x06B, 0, LDO_50, &m_en[EN_GRP_1_0], BIT(2)), LDO(L7, 0x010, 0x06C, 0, LDO_50, &m_en[EN_GRP_3_2], BIT(3)), LDO(L8, 0x011, 0x06D, 0, LDO_300, &m_en[EN_PH], BIT(7)), LDO(L9, 0x012, 0x06E, 0, LDO_300, &m_en[EN_GRP_1_0], BIT(3)), LDO(L10, 0x013, 0x06F, 0, LDO_300, &m_en[EN_GRP_3_2], BIT(4)), LDO(L11, 0x014, 0x070, 0, LDO_150, &m_en[EN_PH], BIT(4)), LDO(L12, 0x015, 0x071, 0, LDO_150, &m_en[EN_PH], BIT(3)), LDO(L13, 0x016, 0x072, 0, LDO_300, &m_en[EN_GRP_3_2], BIT(1)), LDO(L14, 0x017, 0x073, 0, LDO_300, &m_en[EN_GRP_1_0], BIT(5)), LDO(L15, 0x089, 0x0E5, 0, LDO_300, &m_en[EN_GRP_1_0], BIT(4)), LDO(L16, 0x08A, 0x0E6, 0, LDO_300, &m_en[EN_GRP_3_2], BIT(0)), LDO(L17, 0x08B, 0x0E7, 0, LDO_150, &m_en[EN_RF], BIT(7)), LDO(L18, 0x11D, 0x125, 0, LDO_150, &m_en[EN_RF], BIT(6)), LDO(L19, 0x11E, 0x126, 0, LDO_150, &m_en[EN_RF], BIT(5)), LDO(L20, 0x11F, 0x127, 0, LDO_150, &m_en[EN_RF], BIT(4)), LDO_M2(L21, 0x120, 0x128, 1, LDO_150, &m_en[EN_GRP_5_4], BIT(1), &m_en[EN_GRP_1_0], BIT(6)), LDO(L22, 0x121, 0x129, 1, LDO_300, &m_en[EN_GRP_3_2], BIT(7)), LDO(L23, 0x122, 0x12A, 1, LDO_300, &m_en[EN_GRP_5_4], BIT(0)), LDO(L24, 0x123, 0x12B, 1, LDO_150, &m_en[EN_RF], BIT(3)), LDO(L25, 0x124, 0x12C, 1, LDO_150, &m_en[EN_RF], BIT(2)), /* id ctrl test2 clk sleep hpm_min m_en m_en_mask */ SMPS(S0, 0x004, 0x084, 0x1D1, 0x1D8, SMPS, &m_en[EN_MSM], BIT(7)), SMPS(S1, 0x005, 0x085, 0x1D2, 0x1DB, SMPS, &m_en[EN_MSM], BIT(6)), SMPS(S2, 0x110, 0x119, 0x1D3, 0x1DE, SMPS, &m_en[EN_GRP_5_4], BIT(5)), SMPS(S3, 0x111, 0x11A, 0x1D4, 0x1E1, SMPS, &m_en[EN_GRP_5_4], BIT(7) | BIT(4)), SMPS(S4, 0x112, 0x11B, 0x1D5, 0x1E4, SMPS, &m_en[EN_GRP_3_2], BIT(5)), /* id ctrl m_en m_en_mask */ LVS(LVS0, 0x12D, &m_en[EN_RF], BIT(1)), LVS(LVS1, 0x12F, &m_en[EN_GRP_1_0], BIT(0)), /* id ctrl test1 */ NCP(NCP, 0x090, 0x0EC), }; static int pm8058_smps_set_voltage_advanced(struct pm8058_vreg *vreg, int uV, int force_on); static int pm8058_smps_set_voltage_legacy(struct pm8058_vreg *vreg, int uV); static int _pm8058_vreg_is_enabled(struct pm8058_vreg *vreg); static unsigned int pm8058_vreg_get_mode(struct regulator_dev *dev); static void print_write_error(struct pm8058_vreg *vreg, int rc, const char *func); static int pm8058_vreg_write(struct pm8058_vreg *vreg, u16 addr, u8 val, u8 mask, u8 *reg_save) { int rc = 0; u8 reg; reg = (*reg_save & ~mask) | (val & mask); if (reg != *reg_save) rc = pm8xxx_writeb(vreg->dev->parent, addr, reg); if (rc) pr_err("%s: pm8xxx_write failed, rc=%d\n", __func__, rc); else *reg_save = reg; return rc; } static int pm8058_vreg_is_global_enabled(struct pm8058_vreg *vreg) { int ret = 0, i; for (i = 0; (i < GLOBAL_ENABLE_MAX) && !ret && vreg->global_enable[i]; i++) ret = vreg->global_enable[i]->reg & vreg->global_enable_mask[i]; return ret; } static int pm8058_vreg_set_global_enable(struct pm8058_vreg *vreg, int on) { int rc = 0, i; for (i = 0; (i < GLOBAL_ENABLE_MAX) && !rc && vreg->global_enable[i]; i++) rc = pm8058_vreg_write(vreg, vreg->global_enable[i]->addr, (on ? vreg->global_enable_mask[i] : 0), vreg->global_enable_mask[i], &vreg->global_enable[i]->reg); return rc; } static int pm8058_vreg_using_pin_ctrl(struct pm8058_vreg *vreg) { int ret = 0; switch (vreg->type) { case REGULATOR_TYPE_LDO: ret = ((vreg->test_reg[5] & LDO_TEST_PIN_CTRL_MASK) << 4) | (vreg->test_reg[6] & LDO_TEST_PIN_CTRL_LPM_MASK); break; case REGULATOR_TYPE_SMPS: ret = vreg->sleep_ctrl_reg & (SMPS_PIN_CTRL_MASK | SMPS_PIN_CTRL_LPM_MASK); break; case REGULATOR_TYPE_LVS: ret = vreg->ctrl_reg & LVS_PIN_CTRL_MASK; break; } return ret; } static int pm8058_vreg_set_pin_ctrl(struct pm8058_vreg *vreg, int on) { int rc = 0, bank; u8 val = 0, mask; unsigned pc = vreg->pdata->pin_ctrl; unsigned pf = vreg->pdata->pin_fn; switch (vreg->type) { case REGULATOR_TYPE_LDO: if (on) { if (pc & PM8058_VREG_PIN_CTRL_D0) val |= LDO_TEST_PIN_CTRL_EN0; if (pc & PM8058_VREG_PIN_CTRL_D1) val |= LDO_TEST_PIN_CTRL_EN1; if (pc & PM8058_VREG_PIN_CTRL_A0) val |= LDO_TEST_PIN_CTRL_EN2; if (pc & PM8058_VREG_PIN_CTRL_A1) val |= LDO_TEST_PIN_CTRL_EN3; bank = (pf == PM8058_VREG_PIN_FN_ENABLE ? 5 : 6); rc = pm8058_vreg_write(vreg, vreg->test_addr, val | REGULATOR_BANK_SEL(bank) | REGULATOR_BANK_WRITE, LDO_TEST_PIN_CTRL_MASK | REGULATOR_BANK_MASK, &vreg->test_reg[bank]); if (rc) goto bail; val = LDO_TEST_LPM_SEL_CTRL | REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(0); mask = LDO_TEST_LPM_MASK | REGULATOR_BANK_MASK; rc = pm8058_vreg_write(vreg, vreg->test_addr, val, mask, &vreg->test_reg[0]); if (rc) goto bail; if (pf == PM8058_VREG_PIN_FN_ENABLE) { /* Pin control ON/OFF */ rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, LDO_CTRL_PM_HPM, LDO_ENABLE_MASK | LDO_CTRL_PM_MASK, &vreg->ctrl_reg); if (rc) goto bail; rc = pm8058_vreg_set_global_enable(vreg, 0); if (rc) goto bail; } else { /* Pin control LPM/HPM */ rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, LDO_ENABLE | LDO_CTRL_PM_LPM, LDO_ENABLE_MASK | LDO_CTRL_PM_MASK, &vreg->ctrl_reg); if (rc) goto bail; } } else { /* Pin control off */ rc = pm8058_vreg_write(vreg, vreg->test_addr, REGULATOR_BANK_SEL(5) | REGULATOR_BANK_WRITE, LDO_TEST_PIN_CTRL_MASK | REGULATOR_BANK_MASK, &vreg->test_reg[5]); if (rc) goto bail; rc = pm8058_vreg_write(vreg, vreg->test_addr, REGULATOR_BANK_SEL(6) | REGULATOR_BANK_WRITE, LDO_TEST_PIN_CTRL_MASK | REGULATOR_BANK_MASK, &vreg->test_reg[6]); if (rc) goto bail; } break; case REGULATOR_TYPE_SMPS: if (on) { if (pf == PM8058_VREG_PIN_FN_ENABLE) { /* Pin control ON/OFF */ if (pc & PM8058_VREG_PIN_CTRL_D0) val |= SMPS_PIN_CTRL_D0; if (pc & PM8058_VREG_PIN_CTRL_D1) val |= SMPS_PIN_CTRL_D1; if (pc & PM8058_VREG_PIN_CTRL_A0) val |= SMPS_PIN_CTRL_A0; if (pc & PM8058_VREG_PIN_CTRL_A1) val |= SMPS_PIN_CTRL_A1; } else { /* Pin control LPM/HPM */ if (pc & PM8058_VREG_PIN_CTRL_D0) val |= SMPS_PIN_CTRL_LPM_D0; if (pc & PM8058_VREG_PIN_CTRL_D1) val |= SMPS_PIN_CTRL_LPM_D1; if (pc & PM8058_VREG_PIN_CTRL_A0) val |= SMPS_PIN_CTRL_LPM_A0; if (pc & PM8058_VREG_PIN_CTRL_A1) val |= SMPS_PIN_CTRL_LPM_A1; } rc = pm8058_vreg_set_global_enable(vreg, 0); if (rc) goto bail; rc = pm8058_smps_set_voltage_legacy(vreg, vreg->save_uV); if (rc) goto bail; rc = pm8058_vreg_write(vreg, vreg->sleep_ctrl_addr, val, SMPS_PIN_CTRL_MASK | SMPS_PIN_CTRL_LPM_MASK, &vreg->sleep_ctrl_reg); if (rc) goto bail; rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, (pf == PM8058_VREG_PIN_FN_ENABLE ? 0 : SMPS_LEGACY_ENABLE), SMPS_LEGACY_ENABLE, &vreg->ctrl_reg); if (rc) goto bail; rc = pm8058_vreg_write(vreg, vreg->clk_ctrl_addr, (pf == PM8058_VREG_PIN_FN_ENABLE ? SMPS_CLK_CTRL_PWM : SMPS_CLK_CTRL_PFM), SMPS_CLK_CTRL_MASK, &vreg->clk_ctrl_reg); if (rc) goto bail; } else { /* Pin control off */ if (!SMPS_IN_ADVANCED_MODE(vreg)) { if (_pm8058_vreg_is_enabled(vreg)) val = SMPS_LEGACY_ENABLE; rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, val, SMPS_LEGACY_ENABLE, &vreg->ctrl_reg); if (rc) goto bail; } rc = pm8058_vreg_write(vreg, vreg->sleep_ctrl_addr, 0, SMPS_PIN_CTRL_MASK | SMPS_PIN_CTRL_LPM_MASK, &vreg->sleep_ctrl_reg); if (rc) goto bail; rc = pm8058_smps_set_voltage_advanced(vreg, vreg->save_uV, 0); if (rc) goto bail; } break; case REGULATOR_TYPE_LVS: if (on) { if (pc & PM8058_VREG_PIN_CTRL_D0) val |= LVS_PIN_CTRL_EN0; if (pc & PM8058_VREG_PIN_CTRL_D1) val |= LVS_PIN_CTRL_EN1; if (pc & PM8058_VREG_PIN_CTRL_A0) val |= LVS_PIN_CTRL_EN2; if (pc & PM8058_VREG_PIN_CTRL_A1) val |= LVS_PIN_CTRL_EN3; rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, val, LVS_PIN_CTRL_MASK | LVS_ENABLE_MASK, &vreg->ctrl_reg); if (rc) goto bail; rc = pm8058_vreg_set_global_enable(vreg, 0); if (rc) goto bail; } else { /* Pin control off */ if (_pm8058_vreg_is_enabled(vreg)) val = LVS_ENABLE; rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, val, LVS_ENABLE_MASK | LVS_PIN_CTRL_MASK, &vreg->ctrl_reg); if (rc) goto bail; } break; } bail: if (rc) print_write_error(vreg, rc, __func__); return rc; } static int pm8058_vreg_enable(struct regulator_dev *dev) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); int mode; int rc = 0; mode = pm8058_vreg_get_mode(dev); if (mode == REGULATOR_MODE_IDLE) { /* Turn on pin control. */ rc = pm8058_vreg_set_pin_ctrl(vreg, 1); if (rc) goto bail; return rc; } if (vreg->type == REGULATOR_TYPE_SMPS && SMPS_IN_ADVANCED_MODE(vreg)) rc = pm8058_smps_set_voltage_advanced(vreg, vreg->save_uV, 1); else rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, REGULATOR_EN_MASK, REGULATOR_EN_MASK, &vreg->ctrl_reg); bail: if (rc) print_write_error(vreg, rc, __func__); return rc; } static int _pm8058_vreg_is_enabled(struct pm8058_vreg *vreg) { /* * All regulator types except advanced mode SMPS have enable bit in * bit 7 of the control register. Global enable and pin control also * do not work for advanced mode SMPS. */ if (!(vreg->type == REGULATOR_TYPE_SMPS && SMPS_IN_ADVANCED_MODE(vreg)) && ((vreg->ctrl_reg & REGULATOR_EN_MASK) || pm8058_vreg_is_global_enabled(vreg) || pm8058_vreg_using_pin_ctrl(vreg))) return 1; else if (vreg->type == REGULATOR_TYPE_SMPS && SMPS_IN_ADVANCED_MODE(vreg) && ((vreg->ctrl_reg & SMPS_ADVANCED_BAND_MASK) != SMPS_ADVANCED_BAND_OFF)) return 1; return 0; } static int pm8058_vreg_is_enabled(struct regulator_dev *dev) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); return _pm8058_vreg_is_enabled(vreg); } static int pm8058_vreg_disable(struct regulator_dev *dev) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); int rc = 0; /* Disable in global control register. */ rc = pm8058_vreg_set_global_enable(vreg, 0); if (rc) goto bail; /* Turn off pin control. */ rc = pm8058_vreg_set_pin_ctrl(vreg, 0); if (rc) goto bail; /* Disable in local control register. */ if (vreg->type == REGULATOR_TYPE_SMPS && SMPS_IN_ADVANCED_MODE(vreg)) rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, SMPS_ADVANCED_BAND_OFF, SMPS_ADVANCED_BAND_MASK, &vreg->ctrl_reg); else rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, 0, REGULATOR_EN_MASK, &vreg->ctrl_reg); bail: if (rc) print_write_error(vreg, rc, __func__); return rc; } static int pm8058_pldo_set_voltage(struct pm8058_vreg *vreg, int uV) { int vmin, rc = 0; unsigned vprog, fine_step; u8 range_ext, range_sel, fine_step_reg; if (uV < PLDO_LOW_UV_MIN || uV > PLDO_HIGH_UV_MAX) return -EINVAL; if (uV < PLDO_LOW_UV_MAX + PLDO_LOW_FINE_STEP_UV) { vmin = PLDO_LOW_UV_MIN; fine_step = PLDO_LOW_FINE_STEP_UV; range_ext = 0; range_sel = LDO_TEST_RANGE_SEL_MASK; } else if (uV < PLDO_NORM_UV_MAX + PLDO_NORM_FINE_STEP_UV) { vmin = PLDO_NORM_UV_MIN; fine_step = PLDO_NORM_FINE_STEP_UV; range_ext = 0; range_sel = 0; } else { vmin = PLDO_HIGH_UV_MIN; fine_step = PLDO_HIGH_FINE_STEP_UV; range_ext = LDO_TEST_RANGE_EXT_MASK; range_sel = 0; } vprog = (uV - vmin) / fine_step; fine_step_reg = (vprog & 1) << LDO_TEST_FINE_STEP_SHIFT; vprog >>= 1; /* * Disable program voltage update if range extension, range select, * or fine step have changed and the regulator is enabled. */ if (_pm8058_vreg_is_enabled(vreg) && (((range_ext ^ vreg->test_reg[4]) & LDO_TEST_RANGE_EXT_MASK) || ((range_sel ^ vreg->test_reg[2]) & LDO_TEST_RANGE_SEL_MASK) || ((fine_step_reg ^ vreg->test_reg[2]) & LDO_TEST_FINE_STEP_MASK))) { rc = pm8058_vreg_write(vreg, vreg->test_addr, REGULATOR_BANK_SEL(2) | REGULATOR_BANK_WRITE, REGULATOR_BANK_MASK | LDO_TEST_VPROG_UPDATE_MASK, &vreg->test_reg[2]); if (rc) goto bail; } /* Write new voltage. */ rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, vprog, LDO_CTRL_VPROG_MASK, &vreg->ctrl_reg); if (rc) goto bail; /* Write range extension. */ rc = pm8058_vreg_write(vreg, vreg->test_addr, range_ext | REGULATOR_BANK_SEL(4) | REGULATOR_BANK_WRITE, LDO_TEST_RANGE_EXT_MASK | REGULATOR_BANK_MASK, &vreg->test_reg[4]); if (rc) goto bail; /* Write fine step, range select and program voltage update. */ rc = pm8058_vreg_write(vreg, vreg->test_addr, fine_step_reg | range_sel | REGULATOR_BANK_SEL(2) | REGULATOR_BANK_WRITE | LDO_TEST_VPROG_UPDATE_MASK, LDO_TEST_FINE_STEP_MASK | LDO_TEST_RANGE_SEL_MASK | REGULATOR_BANK_MASK | LDO_TEST_VPROG_UPDATE_MASK, &vreg->test_reg[2]); bail: if (rc) print_write_error(vreg, rc, __func__); return rc; } static int pm8058_nldo_set_voltage(struct pm8058_vreg *vreg, int uV) { unsigned vprog, fine_step_reg; int rc; if (uV < NLDO_UV_MIN || uV > NLDO_UV_MAX) return -EINVAL; vprog = (uV - NLDO_UV_MIN) / NLDO_FINE_STEP_UV; fine_step_reg = (vprog & 1) << LDO_TEST_FINE_STEP_SHIFT; vprog >>= 1; /* Write new voltage. */ rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, vprog, LDO_CTRL_VPROG_MASK, &vreg->ctrl_reg); if (rc) goto bail; /* Write fine step. */ rc = pm8058_vreg_write(vreg, vreg->test_addr, fine_step_reg | REGULATOR_BANK_SEL(2) | REGULATOR_BANK_WRITE | LDO_TEST_VPROG_UPDATE_MASK, LDO_TEST_FINE_STEP_MASK | REGULATOR_BANK_MASK | LDO_TEST_VPROG_UPDATE_MASK, &vreg->test_reg[2]); bail: if (rc) print_write_error(vreg, rc, __func__); return rc; } static int pm8058_ldo_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned *selector) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); if (vreg->is_nmos) return pm8058_nldo_set_voltage(vreg, min_uV); else return pm8058_pldo_set_voltage(vreg, min_uV); } static int pm8058_pldo_get_voltage(struct pm8058_vreg *vreg) { int vmin, fine_step; u8 range_ext, range_sel, vprog, fine_step_reg; fine_step_reg = vreg->test_reg[2] & LDO_TEST_FINE_STEP_MASK; range_sel = vreg->test_reg[2] & LDO_TEST_RANGE_SEL_MASK; range_ext = vreg->test_reg[4] & LDO_TEST_RANGE_EXT_MASK; vprog = vreg->ctrl_reg & LDO_CTRL_VPROG_MASK; vprog = (vprog << 1) | (fine_step_reg >> LDO_TEST_FINE_STEP_SHIFT); if (range_sel) { /* low range mode */ fine_step = PLDO_LOW_FINE_STEP_UV; vmin = PLDO_LOW_UV_MIN; } else if (!range_ext) { /* normal mode */ fine_step = PLDO_NORM_FINE_STEP_UV; vmin = PLDO_NORM_UV_MIN; } else { /* high range mode */ fine_step = PLDO_HIGH_FINE_STEP_UV; vmin = PLDO_HIGH_UV_MIN; } return fine_step * vprog + vmin; } static int pm8058_nldo_get_voltage(struct pm8058_vreg *vreg) { u8 vprog, fine_step_reg; fine_step_reg = vreg->test_reg[2] & LDO_TEST_FINE_STEP_MASK; vprog = vreg->ctrl_reg & LDO_CTRL_VPROG_MASK; vprog = (vprog << 1) | (fine_step_reg >> LDO_TEST_FINE_STEP_SHIFT); return NLDO_FINE_STEP_UV * vprog + NLDO_UV_MIN; } static int pm8058_ldo_get_voltage(struct regulator_dev *dev) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); if (vreg->is_nmos) return pm8058_nldo_get_voltage(vreg); else return pm8058_pldo_get_voltage(vreg); } static int pm8058_smps_get_voltage_advanced(struct pm8058_vreg *vreg) { u8 vprog, band; int uV = 0; vprog = vreg->ctrl_reg & SMPS_ADVANCED_VPROG_MASK; band = vreg->ctrl_reg & SMPS_ADVANCED_BAND_MASK; if (band == SMPS_ADVANCED_BAND_1) uV = vprog * SMPS_BAND1_UV_STEP + SMPS_BAND1_UV_MIN; else if (band == SMPS_ADVANCED_BAND_2) uV = vprog * SMPS_BAND2_UV_STEP + SMPS_BAND2_UV_MIN; else if (band == SMPS_ADVANCED_BAND_3) uV = vprog * SMPS_BAND3_UV_STEP + SMPS_BAND3_UV_MIN; else uV = vreg->save_uV; return uV; } static int pm8058_smps_get_voltage_legacy(struct pm8058_vreg *vreg) { u8 vlow, vref, vprog; int uV; vlow = vreg->test_reg[1] & SMPS_LEGACY_VLOW_SEL_MASK; vref = vreg->ctrl_reg & SMPS_LEGACY_VREF_SEL_MASK; vprog = vreg->ctrl_reg & SMPS_LEGACY_VPROG_MASK; if (vlow && vref) { /* mode 3 */ uV = vprog * SMPS_MODE3_UV_STEP + SMPS_MODE3_UV_MIN; } else if (vref) { /* mode 2 */ uV = vprog * SMPS_MODE2_UV_STEP + SMPS_MODE2_UV_MIN; } else { /* mode 1 */ uV = vprog * SMPS_MODE1_UV_STEP + SMPS_MODE1_UV_MIN; } return uV; } static int _pm8058_smps_get_voltage(struct pm8058_vreg *vreg) { if (SMPS_IN_ADVANCED_MODE(vreg)) return pm8058_smps_get_voltage_advanced(vreg); return pm8058_smps_get_voltage_legacy(vreg); } static int pm8058_smps_get_voltage(struct regulator_dev *dev) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); return _pm8058_smps_get_voltage(vreg); } static int pm8058_smps_set_voltage_advanced(struct pm8058_vreg *vreg, int uV, int force_on) { u8 vprog, band; int rc, new_uV; if (uV < SMPS_BAND1_UV_MAX + SMPS_BAND1_UV_STEP) { vprog = ((uV - SMPS_BAND1_UV_MIN) / SMPS_BAND1_UV_STEP); band = SMPS_ADVANCED_BAND_1; new_uV = SMPS_BAND1_UV_MIN + vprog * SMPS_BAND1_UV_STEP; } else if (uV < SMPS_BAND2_UV_MAX + SMPS_BAND2_UV_STEP) { vprog = ((uV - SMPS_BAND2_UV_MIN) / SMPS_BAND2_UV_STEP); band = SMPS_ADVANCED_BAND_2; new_uV = SMPS_BAND2_UV_MIN + vprog * SMPS_BAND2_UV_STEP; } else { vprog = ((uV - SMPS_BAND3_UV_MIN) / SMPS_BAND3_UV_STEP); band = SMPS_ADVANCED_BAND_3; new_uV = SMPS_BAND3_UV_MIN + vprog * SMPS_BAND3_UV_STEP; } /* Do not set band if regulator currently disabled. */ if (!_pm8058_vreg_is_enabled(vreg) && !force_on) band = SMPS_ADVANCED_BAND_OFF; /* Set advanced mode bit to 1. */ rc = pm8058_vreg_write(vreg, vreg->test_addr, SMPS_ADVANCED_MODE | REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(7), SMPS_ADVANCED_MODE_MASK | REGULATOR_BANK_MASK, &vreg->test_reg[7]); if (rc) goto bail; /* Set voltage and voltage band. */ rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, band | vprog, SMPS_ADVANCED_BAND_MASK | SMPS_ADVANCED_VPROG_MASK, &vreg->ctrl_reg); if (rc) goto bail; vreg->save_uV = new_uV; bail: return rc; } static int pm8058_smps_set_voltage_legacy(struct pm8058_vreg *vreg, int uV) { u8 vlow, vref, vprog, pd, en; int rc; if (uV < SMPS_MODE3_UV_MAX + SMPS_MODE3_UV_STEP) { vprog = ((uV - SMPS_MODE3_UV_MIN) / SMPS_MODE3_UV_STEP); vref = SMPS_LEGACY_VREF_SEL_MASK; vlow = SMPS_LEGACY_VLOW_SEL_MASK; } else if (uV < SMPS_MODE2_UV_MAX + SMPS_MODE2_UV_STEP) { vprog = ((uV - SMPS_MODE2_UV_MIN) / SMPS_MODE2_UV_STEP); vref = SMPS_LEGACY_VREF_SEL_MASK; vlow = 0; } else { vprog = ((uV - SMPS_MODE1_UV_MIN) / SMPS_MODE1_UV_STEP); vref = 0; vlow = 0; } /* set vlow bit for ultra low voltage mode */ rc = pm8058_vreg_write(vreg, vreg->test_addr, vlow | REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(1), REGULATOR_BANK_MASK | SMPS_LEGACY_VLOW_SEL_MASK, &vreg->test_reg[1]); if (rc) goto bail; /* Set advanced mode bit to 0. */ rc = pm8058_vreg_write(vreg, vreg->test_addr, SMPS_LEGACY_MODE | REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(7), SMPS_ADVANCED_MODE_MASK | REGULATOR_BANK_MASK, &vreg->test_reg[7]); if (rc) goto bail; en = (_pm8058_vreg_is_enabled(vreg) ? SMPS_LEGACY_ENABLE : 0); pd = (vreg->pdata->pull_down_enable ? SMPS_LEGACY_PULL_DOWN_ENABLE : 0); /* Set voltage (and the rest of the control register). */ rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, en | pd | vref | vprog, SMPS_LEGACY_ENABLE | SMPS_LEGACY_PULL_DOWN_ENABLE | SMPS_LEGACY_VREF_SEL_MASK | SMPS_LEGACY_VPROG_MASK, &vreg->ctrl_reg); vreg->save_uV = pm8058_smps_get_voltage_legacy(vreg); bail: return rc; } static int pm8058_smps_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned *selector) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); int rc = 0; if (min_uV < SMPS_UV_MIN || min_uV > SMPS_UV_MAX) return -EINVAL; if (SMPS_IN_ADVANCED_MODE(vreg)) rc = pm8058_smps_set_voltage_advanced(vreg, min_uV, 0); else rc = pm8058_smps_set_voltage_legacy(vreg, min_uV); if (rc) print_write_error(vreg, rc, __func__); return rc; } static int pm8058_ncp_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned *selector) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); int rc; u8 val; if (min_uV < NCP_UV_MIN || min_uV > NCP_UV_MAX) return -EINVAL; val = (min_uV - NCP_UV_MIN) / NCP_UV_STEP; /* voltage setting */ rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, val, NCP_VPROG_MASK, &vreg->ctrl_reg); if (rc) print_write_error(vreg, rc, __func__); return rc; } static int pm8058_ncp_get_voltage(struct regulator_dev *dev) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); u8 vprog = vreg->ctrl_reg & NCP_VPROG_MASK; return NCP_UV_MIN + vprog * NCP_UV_STEP; } static int pm8058_ldo_set_mode(struct pm8058_vreg *vreg, unsigned int mode) { int rc = 0; u8 mask, val; switch (mode) { case REGULATOR_MODE_FAST: /* HPM */ val = (_pm8058_vreg_is_enabled(vreg) ? LDO_ENABLE : 0) | LDO_CTRL_PM_HPM; mask = LDO_ENABLE_MASK | LDO_CTRL_PM_MASK; rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, val, mask, &vreg->ctrl_reg); if (rc) goto bail; if (pm8058_vreg_using_pin_ctrl(vreg)) rc = pm8058_vreg_set_pin_ctrl(vreg, 0); if (rc) goto bail; break; case REGULATOR_MODE_STANDBY: /* LPM */ val = (_pm8058_vreg_is_enabled(vreg) ? LDO_ENABLE : 0) | LDO_CTRL_PM_LPM; mask = LDO_ENABLE_MASK | LDO_CTRL_PM_MASK; rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, val, mask, &vreg->ctrl_reg); if (rc) goto bail; val = LDO_TEST_LPM_SEL_CTRL | REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(0); mask = LDO_TEST_LPM_MASK | REGULATOR_BANK_MASK; rc = pm8058_vreg_write(vreg, vreg->test_addr, val, mask, &vreg->test_reg[0]); if (rc) goto bail; if (pm8058_vreg_using_pin_ctrl(vreg)) rc = pm8058_vreg_set_pin_ctrl(vreg, 0); if (rc) goto bail; break; case REGULATOR_MODE_IDLE: /* Pin Control */ if (_pm8058_vreg_is_enabled(vreg)) rc = pm8058_vreg_set_pin_ctrl(vreg, 1); if (rc) goto bail; break; default: pr_err("%s: invalid mode: %u\n", __func__, mode); return -EINVAL; } bail: if (rc) print_write_error(vreg, rc, __func__); return rc; } static int pm8058_smps_set_mode(struct pm8058_vreg *vreg, unsigned int mode) { int rc = 0; u8 mask, val; switch (mode) { case REGULATOR_MODE_FAST: /* HPM */ val = SMPS_CLK_CTRL_PWM; mask = SMPS_CLK_CTRL_MASK; rc = pm8058_vreg_write(vreg, vreg->clk_ctrl_addr, val, mask, &vreg->clk_ctrl_reg); if (rc) goto bail; if (pm8058_vreg_using_pin_ctrl(vreg)) rc = pm8058_vreg_set_pin_ctrl(vreg, 0); if (rc) goto bail; break; case REGULATOR_MODE_STANDBY: /* LPM */ val = SMPS_CLK_CTRL_PFM; mask = SMPS_CLK_CTRL_MASK; rc = pm8058_vreg_write(vreg, vreg->clk_ctrl_addr, val, mask, &vreg->clk_ctrl_reg); if (rc) goto bail; if (pm8058_vreg_using_pin_ctrl(vreg)) rc = pm8058_vreg_set_pin_ctrl(vreg, 0); if (rc) goto bail; break; case REGULATOR_MODE_IDLE: /* Pin Control */ if (_pm8058_vreg_is_enabled(vreg)) rc = pm8058_vreg_set_pin_ctrl(vreg, 1); if (rc) goto bail; break; default: pr_err("%s: invalid mode: %u\n", __func__, mode); return -EINVAL; } bail: if (rc) print_write_error(vreg, rc, __func__); return rc; } static int pm8058_lvs_set_mode(struct pm8058_vreg *vreg, unsigned int mode) { int rc = 0; if (mode == REGULATOR_MODE_IDLE) { /* Use pin control. */ if (_pm8058_vreg_is_enabled(vreg)) rc = pm8058_vreg_set_pin_ctrl(vreg, 1); } else { /* Turn off pin control. */ rc = pm8058_vreg_set_pin_ctrl(vreg, 0); } return rc; } /* * Optimum mode programming: * REGULATOR_MODE_FAST: Go to HPM (highest priority) * REGULATOR_MODE_STANDBY: Go to pin ctrl mode if there are any pin ctrl * votes, else go to LPM * * Pin ctrl mode voting via regulator set_mode: * REGULATOR_MODE_IDLE: Go to pin ctrl mode if the optimum mode is LPM, else * go to HPM * REGULATOR_MODE_NORMAL: Go to LPM if it is the optimum mode, else go to HPM */ static int pm8058_vreg_set_mode(struct regulator_dev *dev, unsigned int mode) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); unsigned prev_optimum = vreg->optimum; unsigned prev_pc_vote = vreg->pc_vote; unsigned prev_mode_initialized = vreg->mode_initialized; int new_mode = REGULATOR_MODE_FAST; int rc = 0; /* Determine new mode to go into. */ switch (mode) { case REGULATOR_MODE_FAST: new_mode = REGULATOR_MODE_FAST; vreg->optimum = mode; vreg->mode_initialized = 1; break; case REGULATOR_MODE_STANDBY: if (vreg->pc_vote) new_mode = REGULATOR_MODE_IDLE; else new_mode = REGULATOR_MODE_STANDBY; vreg->optimum = mode; vreg->mode_initialized = 1; break; case REGULATOR_MODE_IDLE: if (vreg->pc_vote++) goto done; /* already taken care of */ if (vreg->mode_initialized && vreg->optimum == REGULATOR_MODE_FAST) new_mode = REGULATOR_MODE_FAST; else new_mode = REGULATOR_MODE_IDLE; break; case REGULATOR_MODE_NORMAL: if (vreg->pc_vote && --(vreg->pc_vote)) goto done; /* already taken care of */ if (vreg->optimum == REGULATOR_MODE_STANDBY) new_mode = REGULATOR_MODE_STANDBY; else new_mode = REGULATOR_MODE_FAST; break; default: pr_err("%s: unknown mode, mode=%u\n", __func__, mode); return -EINVAL; } switch (vreg->type) { case REGULATOR_TYPE_LDO: rc = pm8058_ldo_set_mode(vreg, new_mode); break; case REGULATOR_TYPE_SMPS: rc = pm8058_smps_set_mode(vreg, new_mode); break; case REGULATOR_TYPE_LVS: rc = pm8058_lvs_set_mode(vreg, new_mode); break; } if (rc) { print_write_error(vreg, rc, __func__); vreg->mode_initialized = prev_mode_initialized; vreg->optimum = prev_optimum; vreg->pc_vote = prev_pc_vote; return rc; } done: return 0; } static unsigned int pm8058_vreg_get_mode(struct regulator_dev *dev) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); if (!vreg->mode_initialized && vreg->pc_vote) return REGULATOR_MODE_IDLE; /* Check physical pin control state. */ switch (vreg->type) { case REGULATOR_TYPE_LDO: if (!(vreg->ctrl_reg & LDO_ENABLE_MASK) && !pm8058_vreg_is_global_enabled(vreg) && (vreg->test_reg[5] & LDO_TEST_PIN_CTRL_MASK)) return REGULATOR_MODE_IDLE; else if (((vreg->ctrl_reg & LDO_ENABLE_MASK) || pm8058_vreg_is_global_enabled(vreg)) && (vreg->ctrl_reg & LDO_CTRL_PM_MASK) && (vreg->test_reg[6] & LDO_TEST_PIN_CTRL_LPM_MASK)) return REGULATOR_MODE_IDLE; break; case REGULATOR_TYPE_SMPS: if (!SMPS_IN_ADVANCED_MODE(vreg) && !(vreg->ctrl_reg & REGULATOR_EN_MASK) && !pm8058_vreg_is_global_enabled(vreg) && (vreg->sleep_ctrl_reg & SMPS_PIN_CTRL_MASK)) return REGULATOR_MODE_IDLE; else if (!SMPS_IN_ADVANCED_MODE(vreg) && ((vreg->ctrl_reg & REGULATOR_EN_MASK) || pm8058_vreg_is_global_enabled(vreg)) && ((vreg->clk_ctrl_reg & SMPS_CLK_CTRL_MASK) == SMPS_CLK_CTRL_PFM) && (vreg->sleep_ctrl_reg & SMPS_PIN_CTRL_LPM_MASK)) return REGULATOR_MODE_IDLE; break; case REGULATOR_TYPE_LVS: if (!(vreg->ctrl_reg & LVS_ENABLE_MASK) && !pm8058_vreg_is_global_enabled(vreg) && (vreg->ctrl_reg & LVS_PIN_CTRL_MASK)) return REGULATOR_MODE_IDLE; } if (vreg->optimum == REGULATOR_MODE_FAST) return REGULATOR_MODE_FAST; else if (vreg->pc_vote) return REGULATOR_MODE_IDLE; else if (vreg->optimum == REGULATOR_MODE_STANDBY) return REGULATOR_MODE_STANDBY; return REGULATOR_MODE_FAST; } unsigned int pm8058_vreg_get_optimum_mode(struct regulator_dev *dev, int input_uV, int output_uV, int load_uA) { struct pm8058_vreg *vreg = rdev_get_drvdata(dev); if (load_uA <= 0) { /* * pm8058_vreg_get_optimum_mode is being called before consumers * have specified their load currents via * regulator_set_optimum_mode. Return whatever the existing mode * is. */ return pm8058_vreg_get_mode(dev); } if (load_uA >= vreg->hpm_min_load) return REGULATOR_MODE_FAST; return REGULATOR_MODE_STANDBY; } static struct regulator_ops pm8058_ldo_ops = { .enable = pm8058_vreg_enable, .disable = pm8058_vreg_disable, .is_enabled = pm8058_vreg_is_enabled, .set_voltage = pm8058_ldo_set_voltage, .get_voltage = pm8058_ldo_get_voltage, .set_mode = pm8058_vreg_set_mode, .get_mode = pm8058_vreg_get_mode, .get_optimum_mode = pm8058_vreg_get_optimum_mode, }; static struct regulator_ops pm8058_smps_ops = { .enable = pm8058_vreg_enable, .disable = pm8058_vreg_disable, .is_enabled = pm8058_vreg_is_enabled, .set_voltage = pm8058_smps_set_voltage, .get_voltage = pm8058_smps_get_voltage, .set_mode = pm8058_vreg_set_mode, .get_mode = pm8058_vreg_get_mode, .get_optimum_mode = pm8058_vreg_get_optimum_mode, }; static struct regulator_ops pm8058_lvs_ops = { .enable = pm8058_vreg_enable, .disable = pm8058_vreg_disable, .is_enabled = pm8058_vreg_is_enabled, .set_mode = pm8058_vreg_set_mode, .get_mode = pm8058_vreg_get_mode, }; static struct regulator_ops pm8058_ncp_ops = { .enable = pm8058_vreg_enable, .disable = pm8058_vreg_disable, .is_enabled = pm8058_vreg_is_enabled, .set_voltage = pm8058_ncp_set_voltage, .get_voltage = pm8058_ncp_get_voltage, }; #define VREG_DESCRIP(_id, _name, _ops) \ [_id] = { \ .id = _id, \ .name = _name, \ .ops = _ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ } static struct regulator_desc pm8058_vreg_descrip[] = { VREG_DESCRIP(PM8058_VREG_ID_L0, "8058_l0", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L1, "8058_l1", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L2, "8058_l2", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L3, "8058_l3", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L4, "8058_l4", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L5, "8058_l5", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L6, "8058_l6", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L7, "8058_l7", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L8, "8058_l8", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L9, "8058_l9", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L10, "8058_l10", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L11, "8058_l11", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L12, "8058_l12", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L13, "8058_l13", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L14, "8058_l14", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L15, "8058_l15", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L16, "8058_l16", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L17, "8058_l17", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L18, "8058_l18", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L19, "8058_l19", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L20, "8058_l20", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L21, "8058_l21", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L22, "8058_l22", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L23, "8058_l23", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L24, "8058_l24", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_L25, "8058_l25", &pm8058_ldo_ops), VREG_DESCRIP(PM8058_VREG_ID_S0, "8058_s0", &pm8058_smps_ops), VREG_DESCRIP(PM8058_VREG_ID_S1, "8058_s1", &pm8058_smps_ops), VREG_DESCRIP(PM8058_VREG_ID_S2, "8058_s2", &pm8058_smps_ops), VREG_DESCRIP(PM8058_VREG_ID_S3, "8058_s3", &pm8058_smps_ops), VREG_DESCRIP(PM8058_VREG_ID_S4, "8058_s4", &pm8058_smps_ops), VREG_DESCRIP(PM8058_VREG_ID_LVS0, "8058_lvs0", &pm8058_lvs_ops), VREG_DESCRIP(PM8058_VREG_ID_LVS1, "8058_lvs1", &pm8058_lvs_ops), VREG_DESCRIP(PM8058_VREG_ID_NCP, "8058_ncp", &pm8058_ncp_ops), }; static int pm8058_master_enable_init(struct pm8058_vreg *vreg) { int rc = 0, i; for (i = 0; i < MASTER_ENABLE_COUNT; i++) { rc = pm8xxx_readb(vreg->dev->parent, m_en[i].addr, &(m_en[i].reg)); if (rc) goto bail; } bail: if (rc) pr_err("%s: pm8xxx_read failed, rc=%d\n", __func__, rc); return rc; } static int pm8058_init_ldo(struct pm8058_vreg *vreg) { int rc = 0, i; u8 bank; /* Save the current test register state. */ for (i = 0; i < LDO_TEST_BANKS; i++) { bank = REGULATOR_BANK_SEL(i); rc = pm8xxx_writeb(vreg->dev->parent, vreg->test_addr, bank); if (rc) goto bail; rc = pm8xxx_readb(vreg->dev->parent, vreg->test_addr, &vreg->test_reg[i]); if (rc) goto bail; vreg->test_reg[i] |= REGULATOR_BANK_WRITE; } if ((vreg->ctrl_reg & LDO_CTRL_PM_MASK) == LDO_CTRL_PM_LPM) vreg->optimum = REGULATOR_MODE_STANDBY; else vreg->optimum = REGULATOR_MODE_FAST; /* Set pull down enable based on platform data. */ rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, (vreg->pdata->pull_down_enable ? LDO_PULL_DOWN_ENABLE : 0), LDO_PULL_DOWN_ENABLE_MASK, &vreg->ctrl_reg); bail: return rc; } static int pm8058_init_smps(struct pm8058_vreg *vreg) { int rc = 0, i; u8 bank; /* Save the current test2 register state. */ for (i = 0; i < SMPS_TEST_BANKS; i++) { bank = REGULATOR_BANK_SEL(i); rc = pm8xxx_writeb(vreg->dev->parent, vreg->test_addr, bank); if (rc) goto bail; rc = pm8xxx_readb(vreg->dev->parent, vreg->test_addr, &vreg->test_reg[i]); if (rc) goto bail; vreg->test_reg[i] |= REGULATOR_BANK_WRITE; } /* Save the current clock control register state. */ rc = pm8xxx_readb(vreg->dev->parent, vreg->clk_ctrl_addr, &vreg->clk_ctrl_reg); if (rc) goto bail; /* Save the current sleep control register state. */ rc = pm8xxx_readb(vreg->dev->parent, vreg->sleep_ctrl_addr, &vreg->sleep_ctrl_reg); if (rc) goto bail; vreg->save_uV = 1; /* This is not a no-op. */ vreg->save_uV = _pm8058_smps_get_voltage(vreg); if ((vreg->clk_ctrl_reg & SMPS_CLK_CTRL_MASK) == SMPS_CLK_CTRL_PFM) vreg->optimum = REGULATOR_MODE_STANDBY; else vreg->optimum = REGULATOR_MODE_FAST; /* Set advanced mode pull down enable based on platform data. */ rc = pm8058_vreg_write(vreg, vreg->test_addr, (vreg->pdata->pull_down_enable ? SMPS_ADVANCED_PULL_DOWN_ENABLE : 0) | REGULATOR_BANK_SEL(6) | REGULATOR_BANK_WRITE, REGULATOR_BANK_MASK | SMPS_ADVANCED_PULL_DOWN_ENABLE, &vreg->test_reg[6]); if (rc) goto bail; if (!SMPS_IN_ADVANCED_MODE(vreg)) { /* Set legacy mode pull down enable based on platform data. */ rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, (vreg->pdata->pull_down_enable ? SMPS_LEGACY_PULL_DOWN_ENABLE : 0), SMPS_LEGACY_PULL_DOWN_ENABLE, &vreg->ctrl_reg); if (rc) goto bail; } bail: return rc; } static int pm8058_init_lvs(struct pm8058_vreg *vreg) { int rc = 0; vreg->optimum = REGULATOR_MODE_FAST; /* Set pull down enable based on platform data. */ rc = pm8058_vreg_write(vreg, vreg->ctrl_addr, (vreg->pdata->pull_down_enable ? LVS_PULL_DOWN_ENABLE : LVS_PULL_DOWN_DISABLE), LVS_PULL_DOWN_ENABLE_MASK, &vreg->ctrl_reg); return rc; } static int pm8058_init_ncp(struct pm8058_vreg *vreg) { int rc = 0; /* Save the current test1 register state. */ rc = pm8xxx_readb(vreg->dev->parent, vreg->test_addr, &vreg->test_reg[0]); if (rc) goto bail; vreg->optimum = REGULATOR_MODE_FAST; bail: return rc; } static int pm8058_init_regulator(struct pm8058_vreg *vreg) { static int master_enable_inited; int rc = 0; vreg->mode_initialized = 0; if (!master_enable_inited) { rc = pm8058_master_enable_init(vreg); if (!rc) master_enable_inited = 1; } /* save the current control register state */ rc = pm8xxx_readb(vreg->dev->parent, vreg->ctrl_addr, &vreg->ctrl_reg); if (rc) goto bail; switch (vreg->type) { case REGULATOR_TYPE_LDO: rc = pm8058_init_ldo(vreg); break; case REGULATOR_TYPE_SMPS: rc = pm8058_init_smps(vreg); break; case REGULATOR_TYPE_LVS: rc = pm8058_init_lvs(vreg); break; case REGULATOR_TYPE_NCP: rc = pm8058_init_ncp(vreg); break; } bail: if (rc) pr_err("%s: pm8058_read/write failed; initial register states " "unknown, rc=%d\n", __func__, rc); return rc; } static int __devinit pm8058_vreg_probe(struct platform_device *pdev) { struct regulator_desc *rdesc; struct pm8058_vreg *vreg; const char *reg_name = NULL; int rc = 0; if (pdev == NULL) return -EINVAL; if (pdev->id >= 0 && pdev->id < PM8058_VREG_MAX) { rdesc = &pm8058_vreg_descrip[pdev->id]; vreg = &pm8058_vreg[pdev->id]; vreg->pdata = pdev->dev.platform_data; reg_name = pm8058_vreg_descrip[pdev->id].name; vreg->dev = &pdev->dev; rc = pm8058_init_regulator(vreg); if (rc) goto bail; /* Disallow idle and normal modes if pin control isn't set. */ if (vreg->pdata->pin_ctrl == 0) vreg->pdata->init_data.constraints.valid_modes_mask &= ~(REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE); vreg->rdev = regulator_register(rdesc, &pdev->dev, &vreg->pdata->init_data, vreg, NULL); if (IS_ERR(vreg->rdev)) { rc = PTR_ERR(vreg->rdev); pr_err("%s: regulator_register failed for %s, rc=%d\n", __func__, reg_name, rc); } } else { rc = -ENODEV; } bail: if (rc) pr_err("%s: error for %s, rc=%d\n", __func__, reg_name, rc); return rc; } static int __devexit pm8058_vreg_remove(struct platform_device *pdev) { regulator_unregister(pm8058_vreg[pdev->id].rdev); return 0; } static struct platform_driver pm8058_vreg_driver = { .probe = pm8058_vreg_probe, .remove = __devexit_p(pm8058_vreg_remove), .driver = { .name = "pm8058-regulator", .owner = THIS_MODULE, }, }; static int __init pm8058_vreg_init(void) { return platform_driver_register(&pm8058_vreg_driver); } static void __exit pm8058_vreg_exit(void) { platform_driver_unregister(&pm8058_vreg_driver); } static void print_write_error(struct pm8058_vreg *vreg, int rc, const char *func) { const char *reg_name = NULL; ptrdiff_t id = vreg - pm8058_vreg; if (id >= 0 && id < PM8058_VREG_MAX) reg_name = pm8058_vreg_descrip[id].name; pr_err("%s: pm8058_vreg_write failed for %s, rc=%d\n", func, reg_name, rc); } subsys_initcall(pm8058_vreg_init); module_exit(pm8058_vreg_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC8058 regulator driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:pm8058-regulator");
gpl-2.0
LokiWuh/android_kernel_asus_grouper
arch/arm/mach-s3c64xx/dev-uart.c
3920
3019
/* linux/arch/arm/plat-s3c64xx/dev-uart.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * Base S3C64XX UART resource and device definitions * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/platform_device.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/map.h> #include <plat/devs.h> /* Serial port registrations */ /* 64xx uarts are closer together */ static struct resource s3c64xx_uart0_resource[] = { [0] = { .start = S3C_PA_UART0, .end = S3C_PA_UART0 + 0x100, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_S3CUART_RX0, .end = IRQ_S3CUART_RX0, .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_S3CUART_TX0, .end = IRQ_S3CUART_TX0, .flags = IORESOURCE_IRQ, }, [3] = { .start = IRQ_S3CUART_ERR0, .end = IRQ_S3CUART_ERR0, .flags = IORESOURCE_IRQ, } }; static struct resource s3c64xx_uart1_resource[] = { [0] = { .start = S3C_PA_UART1, .end = S3C_PA_UART1 + 0x100, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_S3CUART_RX1, .end = IRQ_S3CUART_RX1, .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_S3CUART_TX1, .end = IRQ_S3CUART_TX1, .flags = IORESOURCE_IRQ, }, [3] = { .start = IRQ_S3CUART_ERR1, .end = IRQ_S3CUART_ERR1, .flags = IORESOURCE_IRQ, }, }; static struct resource s3c6xx_uart2_resource[] = { [0] = { .start = S3C_PA_UART2, .end = S3C_PA_UART2 + 0x100, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_S3CUART_RX2, .end = IRQ_S3CUART_RX2, .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_S3CUART_TX2, .end = IRQ_S3CUART_TX2, .flags = IORESOURCE_IRQ, }, [3] = { .start = IRQ_S3CUART_ERR2, .end = IRQ_S3CUART_ERR2, .flags = IORESOURCE_IRQ, }, }; static struct resource s3c64xx_uart3_resource[] = { [0] = { .start = S3C_PA_UART3, .end = S3C_PA_UART3 + 0x100, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_S3CUART_RX3, .end = IRQ_S3CUART_RX3, .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_S3CUART_TX3, .end = IRQ_S3CUART_TX3, .flags = IORESOURCE_IRQ, }, [3] = { .start = IRQ_S3CUART_ERR3, .end = IRQ_S3CUART_ERR3, .flags = IORESOURCE_IRQ, }, }; struct s3c24xx_uart_resources s3c64xx_uart_resources[] __initdata = { [0] = { .resources = s3c64xx_uart0_resource, .nr_resources = ARRAY_SIZE(s3c64xx_uart0_resource), }, [1] = { .resources = s3c64xx_uart1_resource, .nr_resources = ARRAY_SIZE(s3c64xx_uart1_resource), }, [2] = { .resources = s3c6xx_uart2_resource, .nr_resources = ARRAY_SIZE(s3c6xx_uart2_resource), }, [3] = { .resources = s3c64xx_uart3_resource, .nr_resources = ARRAY_SIZE(s3c64xx_uart3_resource), }, };
gpl-2.0
elektroschmock/android_kernel_google_msm
kernel/irq/irqdomain.c
4432
21862
#include <linux/debugfs.h> #include <linux/hardirq.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdesc.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/fs.h> #define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs. * ie. legacy 8259, gets irqs 1..15 */ #define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */ #define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */ #define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */ static LIST_HEAD(irq_domain_list); static DEFINE_MUTEX(irq_domain_mutex); static DEFINE_MUTEX(revmap_trees_mutex); static struct irq_domain *irq_default_domain; /** * irq_domain_alloc() - Allocate a new irq_domain data structure * @of_node: optional device-tree node of the interrupt controller * @revmap_type: type of reverse mapping to use * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer * * Allocates and initialize and irq_domain structure. Caller is expected to * register allocated irq_domain with irq_domain_register(). Returns pointer * to IRQ domain, or NULL on failure. */ static struct irq_domain *irq_domain_alloc(struct device_node *of_node, unsigned int revmap_type, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain *domain; domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (WARN_ON(!domain)) return NULL; /* Fill structure */ domain->revmap_type = revmap_type; domain->ops = ops; domain->host_data = host_data; domain->of_node = of_node_get(of_node); return domain; } static void irq_domain_add(struct irq_domain *domain) { mutex_lock(&irq_domain_mutex); list_add(&domain->link, &irq_domain_list); mutex_unlock(&irq_domain_mutex); pr_debug("irq: Allocated domain of type %d @0x%p\n", domain->revmap_type, domain); } static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain, irq_hw_number_t hwirq) { irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq; int size = domain->revmap_data.legacy.size; if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size)) return 0; return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq; } /** * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. * @size: total number of irqs in legacy mapping * @first_irq: first number of irq block assigned to the domain * @first_hwirq: first hwirq number to use for the translation. Should normally * be '0', but a positive integer can be used if the effective * hwirqs numbering does not begin at zero. * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer * * Note: the map() callback will be called before this function returns * for all legacy interrupts except 0 (which is always the invalid irq for * a legacy controller). */ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, unsigned int size, unsigned int first_irq, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain *domain; unsigned int i; domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data); if (!domain) return NULL; domain->revmap_data.legacy.first_irq = first_irq; domain->revmap_data.legacy.first_hwirq = first_hwirq; domain->revmap_data.legacy.size = size; mutex_lock(&irq_domain_mutex); /* Verify that all the irqs are available */ for (i = 0; i < size; i++) { int irq = first_irq + i; struct irq_data *irq_data = irq_get_irq_data(irq); if (WARN_ON(!irq_data || irq_data->domain)) { mutex_unlock(&irq_domain_mutex); of_node_put(domain->of_node); kfree(domain); return NULL; } } /* Claim all of the irqs before registering a legacy domain */ for (i = 0; i < size; i++) { struct irq_data *irq_data = irq_get_irq_data(first_irq + i); irq_data->hwirq = first_hwirq + i; irq_data->domain = domain; } mutex_unlock(&irq_domain_mutex); for (i = 0; i < size; i++) { int irq = first_irq + i; int hwirq = first_hwirq + i; /* IRQ0 gets ignored */ if (!irq) continue; /* Legacy flags are left to default at this point, * one can then use irq_create_mapping() to * explicitly change them */ ops->map(domain, irq, hwirq); /* Clear norequest flags */ irq_clear_status_flags(irq, IRQ_NOREQUEST); } irq_domain_add(domain); return domain; } /** * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer */ struct irq_domain *irq_domain_add_linear(struct device_node *of_node, unsigned int size, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain *domain; unsigned int *revmap; revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL); if (WARN_ON(!revmap)) return NULL; domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data); if (!domain) { kfree(revmap); return NULL; } domain->revmap_data.linear.size = size; domain->revmap_data.linear.revmap = revmap; irq_domain_add(domain); return domain; } struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, unsigned int max_irq, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain *domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_NOMAP, ops, host_data); if (domain) { domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0; irq_domain_add(domain); } return domain; } /** * irq_domain_add_tree() * @of_node: pointer to interrupt controller's device tree node. * @ops: map/unmap domain callbacks * * Note: The radix tree will be allocated later during boot automatically * (the reverse mapping will use the slow path until that happens). */ struct irq_domain *irq_domain_add_tree(struct device_node *of_node, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain *domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_TREE, ops, host_data); if (domain) { INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL); irq_domain_add(domain); } return domain; } /** * irq_find_host() - Locates a domain for a given device node * @node: device-tree node of the interrupt controller */ struct irq_domain *irq_find_host(struct device_node *node) { struct irq_domain *h, *found = NULL; int rc; /* We might want to match the legacy controller last since * it might potentially be set to match all interrupts in * the absence of a device node. This isn't a problem so far * yet though... */ mutex_lock(&irq_domain_mutex); list_for_each_entry(h, &irq_domain_list, link) { if (h->ops->match) rc = h->ops->match(h, node); else rc = (h->of_node != NULL) && (h->of_node == node); if (rc) { found = h; break; } } mutex_unlock(&irq_domain_mutex); return found; } EXPORT_SYMBOL_GPL(irq_find_host); /** * irq_set_default_host() - Set a "default" irq domain * @domain: default domain pointer * * For convenience, it's possible to set a "default" domain that will be used * whenever NULL is passed to irq_create_mapping(). It makes life easier for * platforms that want to manipulate a few hard coded interrupt numbers that * aren't properly represented in the device-tree. */ void irq_set_default_host(struct irq_domain *domain) { pr_debug("irq: Default domain set to @0x%p\n", domain); irq_default_domain = domain; } static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { struct irq_data *irq_data = irq_get_irq_data(virq); irq_data->hwirq = hwirq; irq_data->domain = domain; if (domain->ops->map(domain, virq, hwirq)) { pr_debug("irq: -> mapping failed, freeing\n"); irq_data->domain = NULL; irq_data->hwirq = 0; return -1; } irq_clear_status_flags(virq, IRQ_NOREQUEST); return 0; } /** * irq_create_direct_mapping() - Allocate an irq for direct mapping * @domain: domain to allocate the irq for or NULL for default domain * * This routine is used for irq controllers which can choose the hardware * interrupt numbers they generate. In such a case it's simplest to use * the linux irq as the hardware interrupt number. */ unsigned int irq_create_direct_mapping(struct irq_domain *domain) { unsigned int virq; if (domain == NULL) domain = irq_default_domain; BUG_ON(domain == NULL); WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP); virq = irq_alloc_desc_from(1, 0); if (!virq) { pr_debug("irq: create_direct virq allocation failed\n"); return 0; } if (virq >= domain->revmap_data.nomap.max_irq) { pr_err("ERROR: no free irqs available below %i maximum\n", domain->revmap_data.nomap.max_irq); irq_free_desc(virq); return 0; } pr_debug("irq: create_direct obtained virq %d\n", virq); if (irq_setup_virq(domain, virq, virq)) { irq_free_desc(virq); return 0; } return virq; } /** * irq_create_mapping() - Map a hardware interrupt into linux irq space * @domain: domain owning this hardware interrupt or NULL for default domain * @hwirq: hardware irq number in that domain space * * Only one mapping per hardware interrupt is permitted. Returns a linux * irq number. * If the sense/trigger is to be specified, set_irq_type() should be called * on the number returned from that call. */ unsigned int irq_create_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { unsigned int hint; int virq; pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); /* Look for default domain if nececssary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) { printk(KERN_WARNING "irq_create_mapping called for" " NULL domain, hwirq=%lx\n", hwirq); WARN_ON(1); return 0; } pr_debug("irq: -> using domain @%p\n", domain); /* Check if mapping already exists */ virq = irq_find_mapping(domain, hwirq); if (virq) { pr_debug("irq: -> existing mapping on virq %d\n", virq); return virq; } /* Get a virtual interrupt number */ if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) return irq_domain_legacy_revmap(domain, hwirq); /* Allocate a virtual interrupt number */ hint = hwirq % nr_irqs; if (hint == 0) hint++; virq = irq_alloc_desc_from(hint, 0); if (virq <= 0) virq = irq_alloc_desc_from(1, 0); if (virq <= 0) { pr_debug("irq: -> virq allocation failed\n"); return 0; } if (irq_setup_virq(domain, virq, hwirq)) { if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY) irq_free_desc(virq); return 0; } pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n", hwirq, domain->of_node ? domain->of_node->full_name : "null", virq); return virq; } EXPORT_SYMBOL_GPL(irq_create_mapping); unsigned int irq_create_of_mapping(struct device_node *controller, const u32 *intspec, unsigned int intsize) { struct irq_domain *domain; irq_hw_number_t hwirq; unsigned int type = IRQ_TYPE_NONE; unsigned int virq; domain = controller ? irq_find_host(controller) : irq_default_domain; if (!domain) { #ifdef CONFIG_MIPS /* * Workaround to avoid breaking interrupt controller drivers * that don't yet register an irq_domain. This is temporary * code. ~~~gcl, Feb 24, 2012 * * Scheduled for removal in Linux v3.6. That should be enough * time. */ if (intsize > 0) return intspec[0]; #endif printk(KERN_WARNING "irq: no irq domain found for %s !\n", controller->full_name); return 0; } /* If domain has no translation, then we assume interrupt line */ if (domain->ops->xlate == NULL) hwirq = intspec[0]; else { if (domain->ops->xlate(domain, controller, intspec, intsize, &hwirq, &type)) return 0; } /* Create mapping */ virq = irq_create_mapping(domain, hwirq); if (!virq) return virq; /* Set type if specified and different than the current one */ if (type != IRQ_TYPE_NONE && type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) irq_set_irq_type(virq, type); return virq; } EXPORT_SYMBOL_GPL(irq_create_of_mapping); /** * irq_dispose_mapping() - Unmap an interrupt * @virq: linux irq number of the interrupt to unmap */ void irq_dispose_mapping(unsigned int virq) { struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_domain *domain; irq_hw_number_t hwirq; if (!virq || !irq_data) return; domain = irq_data->domain; if (WARN_ON(domain == NULL)) return; /* Never unmap legacy interrupts */ if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) return; irq_set_status_flags(virq, IRQ_NOREQUEST); /* remove chip and handler */ irq_set_chip_and_handler(virq, NULL, NULL); /* Make sure it's completed */ synchronize_irq(virq); /* Tell the PIC about it */ if (domain->ops->unmap) domain->ops->unmap(domain, virq); smp_mb(); /* Clear reverse map */ hwirq = irq_data->hwirq; switch(domain->revmap_type) { case IRQ_DOMAIN_MAP_LINEAR: if (hwirq < domain->revmap_data.linear.size) domain->revmap_data.linear.revmap[hwirq] = 0; break; case IRQ_DOMAIN_MAP_TREE: mutex_lock(&revmap_trees_mutex); radix_tree_delete(&domain->revmap_data.tree, hwirq); mutex_unlock(&revmap_trees_mutex); break; } irq_free_desc(virq); } EXPORT_SYMBOL_GPL(irq_dispose_mapping); /** * irq_find_mapping() - Find a linux irq from an hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space * * This is a slow path, for use by generic code. It's expected that an * irq controller implementation directly calls the appropriate low level * mapping function. */ unsigned int irq_find_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { unsigned int i; unsigned int hint = hwirq % nr_irqs; /* Look for default domain if nececssary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) return 0; /* legacy -> bail early */ if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) return irq_domain_legacy_revmap(domain, hwirq); /* Slow path does a linear search of the map */ if (hint == 0) hint = 1; i = hint; do { struct irq_data *data = irq_get_irq_data(i); if (data && (data->domain == domain) && (data->hwirq == hwirq)) return i; i++; if (i >= nr_irqs) i = 1; } while(i != hint); return 0; } EXPORT_SYMBOL_GPL(irq_find_mapping); /** * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space * * This is a fast path, for use by irq controller code that uses radix tree * revmaps */ unsigned int irq_radix_revmap_lookup(struct irq_domain *domain, irq_hw_number_t hwirq) { struct irq_data *irq_data; if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE)) return irq_find_mapping(domain, hwirq); /* * Freeing an irq can delete nodes along the path to * do the lookup via call_rcu. */ rcu_read_lock(); irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq); rcu_read_unlock(); /* * If found in radix tree, then fine. * Else fallback to linear lookup - this should not happen in practice * as it means that we failed to insert the node in the radix tree. */ return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq); } /** * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping. * @domain: domain owning this hardware interrupt * @virq: linux irq number * @hwirq: hardware irq number in that domain space * * This is for use by irq controllers that use a radix tree reverse * mapping for fast lookup. */ void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { struct irq_data *irq_data = irq_get_irq_data(virq); if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE)) return; if (virq) { mutex_lock(&revmap_trees_mutex); radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data); mutex_unlock(&revmap_trees_mutex); } } /** * irq_linear_revmap() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space * * This is a fast path, for use by irq controller code that uses linear * revmaps. It does fallback to the slow path if the revmap doesn't exist * yet and will create the revmap entry with appropriate locking */ unsigned int irq_linear_revmap(struct irq_domain *domain, irq_hw_number_t hwirq) { unsigned int *revmap; if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR)) return irq_find_mapping(domain, hwirq); /* Check revmap bounds */ if (unlikely(hwirq >= domain->revmap_data.linear.size)) return irq_find_mapping(domain, hwirq); /* Check if revmap was allocated */ revmap = domain->revmap_data.linear.revmap; if (unlikely(revmap == NULL)) return irq_find_mapping(domain, hwirq); /* Fill up revmap with slow path if no mapping found */ if (unlikely(!revmap[hwirq])) revmap[hwirq] = irq_find_mapping(domain, hwirq); return revmap[hwirq]; } #ifdef CONFIG_IRQ_DOMAIN_DEBUG static int virq_debug_show(struct seq_file *m, void *private) { unsigned long flags; struct irq_desc *desc; const char *p; static const char none[] = "none"; void *data; int i; seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq", "chip name", (int)(2 * sizeof(void *) + 2), "chip data", "domain name"); for (i = 1; i < nr_irqs; i++) { desc = irq_to_desc(i); if (!desc) continue; raw_spin_lock_irqsave(&desc->lock, flags); if (desc->action && desc->action->handler) { struct irq_chip *chip; seq_printf(m, "%5d ", i); seq_printf(m, "0x%05lx ", desc->irq_data.hwirq); chip = irq_desc_get_chip(desc); if (chip && chip->name) p = chip->name; else p = none; seq_printf(m, "%-15s ", p); data = irq_desc_get_chip_data(desc); seq_printf(m, data ? "0x%p " : " %p ", data); if (desc->irq_data.domain && desc->irq_data.domain->of_node) p = desc->irq_data.domain->of_node->full_name; else p = none; seq_printf(m, "%s\n", p); } raw_spin_unlock_irqrestore(&desc->lock, flags); } return 0; } static int virq_debug_open(struct inode *inode, struct file *file) { return single_open(file, virq_debug_show, inode->i_private); } static const struct file_operations virq_debug_fops = { .open = virq_debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init irq_debugfs_init(void) { if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL, NULL, &virq_debug_fops) == NULL) return -ENOMEM; return 0; } __initcall(irq_debugfs_init); #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ int irq_domain_simple_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { return 0; } /** * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings * * Device Tree IRQ specifier translation function which works with one cell * bindings where the cell value maps directly to the hwirq number. */ int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 1)) return -EINVAL; *out_hwirq = intspec[0]; *out_type = IRQ_TYPE_NONE; return 0; } EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); /** * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings * * Device Tree IRQ specifier translation function which works with two cell * bindings where the cell values map directly to the hwirq number * and linux irq flags. */ int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 2)) return -EINVAL; *out_hwirq = intspec[0]; *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; return 0; } EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); /** * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings * * Device Tree IRQ specifier translation function which works with either one * or two cell bindings where the cell values map directly to the hwirq number * and linux irq flags. * * Note: don't use this function unless your interrupt controller explicitly * supports both one and two cell bindings. For the majority of controllers * the _onecell() or _twocell() variants above should be used. */ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 1)) return -EINVAL; *out_hwirq = intspec[0]; *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE; return 0; } EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); const struct irq_domain_ops irq_domain_simple_ops = { .map = irq_domain_simple_map, .xlate = irq_domain_xlate_onetwocell, }; EXPORT_SYMBOL_GPL(irq_domain_simple_ops); #ifdef CONFIG_OF_IRQ void irq_domain_generate_simple(const struct of_device_id *match, u64 phys_base, unsigned int irq_start) { struct device_node *node; pr_debug("looking for phys_base=%llx, irq_start=%i\n", (unsigned long long) phys_base, (int) irq_start); node = of_find_matching_node_by_address(NULL, match, phys_base); if (node) irq_domain_add_legacy(node, 32, irq_start, 0, &irq_domain_simple_ops, NULL); } EXPORT_SYMBOL_GPL(irq_domain_generate_simple); #endif
gpl-2.0
robcore/machinex_kernelv2
drivers/xen/xenbus/xenbus_xs.c
4944
20764
/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/unistd.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/uio.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/kthread.h> #include <linux/rwsem.h> #include <linux/module.h> #include <linux/mutex.h> #include <xen/xenbus.h> #include <xen/xen.h> #include "xenbus_comms.h" struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; static DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { printk(KERN_WARNING "XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; struct xsd_sockmsg req_msg = *msg; int err; if (req_msg.type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((msg->type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } EXPORT_SYMBOL(xenbus_dev_request_and_reply); /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type) { if (printk_ratelimit()) printk(KERN_WARNING "XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len); /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *buf; va_start(ap, fmt); buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); va_end(ap); if (!buf) return -ENOMEM; ret = xenbus_write(t, dir, node, buf); kfree(buf); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); if (err) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) printk(KERN_WARNING "XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; xb_init_comms(); mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent != &watch_events) { msg = list_entry(ent, struct xs_stored_msg, list); msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); } mutex_unlock(&xenwatch_mutex); } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { kfree(msg); err = -EINVAL; goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; for (;;) { err = process_msg(); if (err) printk(KERN_WARNING "XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { int err; struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) return err; task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); return 0; }
gpl-2.0
civato/kernel_p900
drivers/leds/leds-bd2802.c
4944
21273
/* * leds-bd2802.c - RGB LED Driver * * Copyright (C) 2009 Samsung Electronics * Kim Kyuwon <q1.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf * */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/leds.h> #include <linux/leds-bd2802.h> #include <linux/slab.h> #include <linux/pm.h> #define LED_CTL(rgb2en, rgb1en) ((rgb2en) << 4 | ((rgb1en) << 0)) #define BD2802_LED_OFFSET 0xa #define BD2802_COLOR_OFFSET 0x3 #define BD2802_REG_CLKSETUP 0x00 #define BD2802_REG_CONTROL 0x01 #define BD2802_REG_HOURSETUP 0x02 #define BD2802_REG_CURRENT1SETUP 0x03 #define BD2802_REG_CURRENT2SETUP 0x04 #define BD2802_REG_WAVEPATTERN 0x05 #define BD2802_CURRENT_032 0x10 /* 3.2mA */ #define BD2802_CURRENT_000 0x00 /* 0.0mA */ #define BD2802_PATTERN_FULL 0x07 #define BD2802_PATTERN_HALF 0x03 enum led_ids { LED1, LED2, LED_NUM, }; enum led_colors { RED, GREEN, BLUE, }; enum led_bits { BD2802_OFF, BD2802_BLINK, BD2802_ON, }; /* * State '0' : 'off' * State '1' : 'blink' * State '2' : 'on'. */ struct led_state { unsigned r:2; unsigned g:2; unsigned b:2; }; struct bd2802_led { struct bd2802_led_platform_data *pdata; struct i2c_client *client; struct rw_semaphore rwsem; struct work_struct work; struct led_state led[2]; /* * Making led_classdev as array is not recommended, because array * members prevent using 'container_of' macro. So repetitive works * are needed. */ struct led_classdev cdev_led1r; struct led_classdev cdev_led1g; struct led_classdev cdev_led1b; struct led_classdev cdev_led2r; struct led_classdev cdev_led2g; struct led_classdev cdev_led2b; /* * Advanced Configuration Function(ADF) mode: * In ADF mode, user can set registers of BD2802GU directly, * therefore BD2802GU doesn't enter reset state. */ int adf_on; enum led_ids led_id; enum led_colors color; enum led_bits state; /* General attributes of RGB LEDs */ int wave_pattern; int rgb_current; }; /*--------------------------------------------------------------*/ /* BD2802GU helper functions */ /*--------------------------------------------------------------*/ static inline int bd2802_is_rgb_off(struct bd2802_led *led, enum led_ids id, enum led_colors color) { switch (color) { case RED: return !led->led[id].r; case GREEN: return !led->led[id].g; case BLUE: return !led->led[id].b; default: dev_err(&led->client->dev, "%s: Invalid color\n", __func__); return -EINVAL; } } static inline int bd2802_is_led_off(struct bd2802_led *led, enum led_ids id) { if (led->led[id].r || led->led[id].g || led->led[id].b) return 0; return 1; } static inline int bd2802_is_all_off(struct bd2802_led *led) { int i; for (i = 0; i < LED_NUM; i++) if (!bd2802_is_led_off(led, i)) return 0; return 1; } static inline u8 bd2802_get_base_offset(enum led_ids id, enum led_colors color) { return id * BD2802_LED_OFFSET + color * BD2802_COLOR_OFFSET; } static inline u8 bd2802_get_reg_addr(enum led_ids id, enum led_colors color, u8 reg_offset) { return reg_offset + bd2802_get_base_offset(id, color); } /*--------------------------------------------------------------*/ /* BD2802GU core functions */ /*--------------------------------------------------------------*/ static int bd2802_write_byte(struct i2c_client *client, u8 reg, u8 val) { int ret = i2c_smbus_write_byte_data(client, reg, val); if (ret >= 0) return 0; dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", __func__, reg, val, ret); return ret; } static void bd2802_update_state(struct bd2802_led *led, enum led_ids id, enum led_colors color, enum led_bits led_bit) { int i; u8 value; for (i = 0; i < LED_NUM; i++) { if (i == id) { switch (color) { case RED: led->led[i].r = led_bit; break; case GREEN: led->led[i].g = led_bit; break; case BLUE: led->led[i].b = led_bit; break; default: dev_err(&led->client->dev, "%s: Invalid color\n", __func__); return; } } } if (led_bit == BD2802_BLINK || led_bit == BD2802_ON) return; if (!bd2802_is_led_off(led, id)) return; if (bd2802_is_all_off(led) && !led->adf_on) { gpio_set_value(led->pdata->reset_gpio, 0); return; } /* * In this case, other led is turned on, and current led is turned * off. So set RGB LED Control register to stop the current RGB LED */ value = (id == LED1) ? LED_CTL(1, 0) : LED_CTL(0, 1); bd2802_write_byte(led->client, BD2802_REG_CONTROL, value); } static void bd2802_configure(struct bd2802_led *led) { struct bd2802_led_platform_data *pdata = led->pdata; u8 reg; reg = bd2802_get_reg_addr(LED1, RED, BD2802_REG_HOURSETUP); bd2802_write_byte(led->client, reg, pdata->rgb_time); reg = bd2802_get_reg_addr(LED2, RED, BD2802_REG_HOURSETUP); bd2802_write_byte(led->client, reg, pdata->rgb_time); } static void bd2802_reset_cancel(struct bd2802_led *led) { gpio_set_value(led->pdata->reset_gpio, 1); udelay(100); bd2802_configure(led); } static void bd2802_enable(struct bd2802_led *led, enum led_ids id) { enum led_ids other_led = (id == LED1) ? LED2 : LED1; u8 value, other_led_on; other_led_on = !bd2802_is_led_off(led, other_led); if (id == LED1) value = LED_CTL(other_led_on, 1); else value = LED_CTL(1 , other_led_on); bd2802_write_byte(led->client, BD2802_REG_CONTROL, value); } static void bd2802_set_on(struct bd2802_led *led, enum led_ids id, enum led_colors color) { u8 reg; if (bd2802_is_all_off(led) && !led->adf_on) bd2802_reset_cancel(led); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); bd2802_write_byte(led->client, reg, led->rgb_current); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); bd2802_write_byte(led->client, reg, BD2802_PATTERN_FULL); bd2802_enable(led, id); bd2802_update_state(led, id, color, BD2802_ON); } static void bd2802_set_blink(struct bd2802_led *led, enum led_ids id, enum led_colors color) { u8 reg; if (bd2802_is_all_off(led) && !led->adf_on) bd2802_reset_cancel(led); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); bd2802_write_byte(led->client, reg, led->rgb_current); reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN); bd2802_write_byte(led->client, reg, led->wave_pattern); bd2802_enable(led, id); bd2802_update_state(led, id, color, BD2802_BLINK); } static void bd2802_turn_on(struct bd2802_led *led, enum led_ids id, enum led_colors color, enum led_bits led_bit) { if (led_bit == BD2802_OFF) { dev_err(&led->client->dev, "Only 'blink' and 'on' are allowed\n"); return; } if (led_bit == BD2802_BLINK) bd2802_set_blink(led, id, color); else bd2802_set_on(led, id, color); } static void bd2802_turn_off(struct bd2802_led *led, enum led_ids id, enum led_colors color) { u8 reg; if (bd2802_is_rgb_off(led, id, color)) return; reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP); bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP); bd2802_write_byte(led->client, reg, BD2802_CURRENT_000); bd2802_update_state(led, id, color, BD2802_OFF); } #define BD2802_SET_REGISTER(reg_addr, reg_name) \ static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \ struct device_attribute *attr, const char *buf, size_t count) \ { \ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\ unsigned long val; \ int ret; \ if (!count) \ return -EINVAL; \ ret = strict_strtoul(buf, 16, &val); \ if (ret) \ return ret; \ down_write(&led->rwsem); \ bd2802_write_byte(led->client, reg_addr, (u8) val); \ up_write(&led->rwsem); \ return count; \ } \ static struct device_attribute bd2802_reg##reg_addr##_attr = { \ .attr = {.name = reg_name, .mode = 0644}, \ .store = bd2802_store_reg##reg_addr, \ }; BD2802_SET_REGISTER(0x00, "0x00"); BD2802_SET_REGISTER(0x01, "0x01"); BD2802_SET_REGISTER(0x02, "0x02"); BD2802_SET_REGISTER(0x03, "0x03"); BD2802_SET_REGISTER(0x04, "0x04"); BD2802_SET_REGISTER(0x05, "0x05"); BD2802_SET_REGISTER(0x06, "0x06"); BD2802_SET_REGISTER(0x07, "0x07"); BD2802_SET_REGISTER(0x08, "0x08"); BD2802_SET_REGISTER(0x09, "0x09"); BD2802_SET_REGISTER(0x0a, "0x0a"); BD2802_SET_REGISTER(0x0b, "0x0b"); BD2802_SET_REGISTER(0x0c, "0x0c"); BD2802_SET_REGISTER(0x0d, "0x0d"); BD2802_SET_REGISTER(0x0e, "0x0e"); BD2802_SET_REGISTER(0x0f, "0x0f"); BD2802_SET_REGISTER(0x10, "0x10"); BD2802_SET_REGISTER(0x11, "0x11"); BD2802_SET_REGISTER(0x12, "0x12"); BD2802_SET_REGISTER(0x13, "0x13"); BD2802_SET_REGISTER(0x14, "0x14"); BD2802_SET_REGISTER(0x15, "0x15"); static struct device_attribute *bd2802_addr_attributes[] = { &bd2802_reg0x00_attr, &bd2802_reg0x01_attr, &bd2802_reg0x02_attr, &bd2802_reg0x03_attr, &bd2802_reg0x04_attr, &bd2802_reg0x05_attr, &bd2802_reg0x06_attr, &bd2802_reg0x07_attr, &bd2802_reg0x08_attr, &bd2802_reg0x09_attr, &bd2802_reg0x0a_attr, &bd2802_reg0x0b_attr, &bd2802_reg0x0c_attr, &bd2802_reg0x0d_attr, &bd2802_reg0x0e_attr, &bd2802_reg0x0f_attr, &bd2802_reg0x10_attr, &bd2802_reg0x11_attr, &bd2802_reg0x12_attr, &bd2802_reg0x13_attr, &bd2802_reg0x14_attr, &bd2802_reg0x15_attr, }; static void bd2802_enable_adv_conf(struct bd2802_led *led) { int i, ret; for (i = 0; i < ARRAY_SIZE(bd2802_addr_attributes); i++) { ret = device_create_file(&led->client->dev, bd2802_addr_attributes[i]); if (ret) { dev_err(&led->client->dev, "failed: sysfs file %s\n", bd2802_addr_attributes[i]->attr.name); goto failed_remove_files; } } if (bd2802_is_all_off(led)) bd2802_reset_cancel(led); led->adf_on = 1; return; failed_remove_files: for (i--; i >= 0; i--) device_remove_file(&led->client->dev, bd2802_addr_attributes[i]); } static void bd2802_disable_adv_conf(struct bd2802_led *led) { int i; for (i = 0; i < ARRAY_SIZE(bd2802_addr_attributes); i++) device_remove_file(&led->client->dev, bd2802_addr_attributes[i]); if (bd2802_is_all_off(led)) gpio_set_value(led->pdata->reset_gpio, 0); led->adf_on = 0; } static ssize_t bd2802_show_adv_conf(struct device *dev, struct device_attribute *attr, char *buf) { struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev)); ssize_t ret; down_read(&led->rwsem); if (led->adf_on) ret = sprintf(buf, "on\n"); else ret = sprintf(buf, "off\n"); up_read(&led->rwsem); return ret; } static ssize_t bd2802_store_adv_conf(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev)); if (!count) return -EINVAL; down_write(&led->rwsem); if (!led->adf_on && !strncmp(buf, "on", 2)) bd2802_enable_adv_conf(led); else if (led->adf_on && !strncmp(buf, "off", 3)) bd2802_disable_adv_conf(led); up_write(&led->rwsem); return count; } static struct device_attribute bd2802_adv_conf_attr = { .attr = { .name = "advanced_configuration", .mode = 0644, }, .show = bd2802_show_adv_conf, .store = bd2802_store_adv_conf, }; #define BD2802_CONTROL_ATTR(attr_name, name_str) \ static ssize_t bd2802_show_##attr_name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\ ssize_t ret; \ down_read(&led->rwsem); \ ret = sprintf(buf, "0x%02x\n", led->attr_name); \ up_read(&led->rwsem); \ return ret; \ } \ static ssize_t bd2802_store_##attr_name(struct device *dev, \ struct device_attribute *attr, const char *buf, size_t count) \ { \ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\ unsigned long val; \ int ret; \ if (!count) \ return -EINVAL; \ ret = strict_strtoul(buf, 16, &val); \ if (ret) \ return ret; \ down_write(&led->rwsem); \ led->attr_name = val; \ up_write(&led->rwsem); \ return count; \ } \ static struct device_attribute bd2802_##attr_name##_attr = { \ .attr = { \ .name = name_str, \ .mode = 0644, \ }, \ .show = bd2802_show_##attr_name, \ .store = bd2802_store_##attr_name, \ }; BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern"); BD2802_CONTROL_ATTR(rgb_current, "rgb_current"); static struct device_attribute *bd2802_attributes[] = { &bd2802_adv_conf_attr, &bd2802_wave_pattern_attr, &bd2802_rgb_current_attr, }; static void bd2802_led_work(struct work_struct *work) { struct bd2802_led *led = container_of(work, struct bd2802_led, work); if (led->state) bd2802_turn_on(led, led->led_id, led->color, led->state); else bd2802_turn_off(led, led->led_id, led->color); } #define BD2802_CONTROL_RGBS(name, id, clr) \ static void bd2802_set_##name##_brightness(struct led_classdev *led_cdev,\ enum led_brightness value) \ { \ struct bd2802_led *led = \ container_of(led_cdev, struct bd2802_led, cdev_##name); \ led->led_id = id; \ led->color = clr; \ if (value == LED_OFF) \ led->state = BD2802_OFF; \ else \ led->state = BD2802_ON; \ schedule_work(&led->work); \ } \ static int bd2802_set_##name##_blink(struct led_classdev *led_cdev, \ unsigned long *delay_on, unsigned long *delay_off) \ { \ struct bd2802_led *led = \ container_of(led_cdev, struct bd2802_led, cdev_##name); \ if (*delay_on == 0 || *delay_off == 0) \ return -EINVAL; \ led->led_id = id; \ led->color = clr; \ led->state = BD2802_BLINK; \ schedule_work(&led->work); \ return 0; \ } BD2802_CONTROL_RGBS(led1r, LED1, RED); BD2802_CONTROL_RGBS(led1g, LED1, GREEN); BD2802_CONTROL_RGBS(led1b, LED1, BLUE); BD2802_CONTROL_RGBS(led2r, LED2, RED); BD2802_CONTROL_RGBS(led2g, LED2, GREEN); BD2802_CONTROL_RGBS(led2b, LED2, BLUE); static int bd2802_register_led_classdev(struct bd2802_led *led) { int ret; INIT_WORK(&led->work, bd2802_led_work); led->cdev_led1r.name = "led1_R"; led->cdev_led1r.brightness = LED_OFF; led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness; led->cdev_led1r.blink_set = bd2802_set_led1r_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led1r); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led1r.name); goto failed_unregister_led1_R; } led->cdev_led1g.name = "led1_G"; led->cdev_led1g.brightness = LED_OFF; led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness; led->cdev_led1g.blink_set = bd2802_set_led1g_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led1g); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led1g.name); goto failed_unregister_led1_G; } led->cdev_led1b.name = "led1_B"; led->cdev_led1b.brightness = LED_OFF; led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness; led->cdev_led1b.blink_set = bd2802_set_led1b_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led1b); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led1b.name); goto failed_unregister_led1_B; } led->cdev_led2r.name = "led2_R"; led->cdev_led2r.brightness = LED_OFF; led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness; led->cdev_led2r.blink_set = bd2802_set_led2r_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led2r); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led2r.name); goto failed_unregister_led2_R; } led->cdev_led2g.name = "led2_G"; led->cdev_led2g.brightness = LED_OFF; led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness; led->cdev_led2g.blink_set = bd2802_set_led2g_blink; ret = led_classdev_register(&led->client->dev, &led->cdev_led2g); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led2g.name); goto failed_unregister_led2_G; } led->cdev_led2b.name = "led2_B"; led->cdev_led2b.brightness = LED_OFF; led->cdev_led2b.brightness_set = bd2802_set_led2b_brightness; led->cdev_led2b.blink_set = bd2802_set_led2b_blink; led->cdev_led2b.flags |= LED_CORE_SUSPENDRESUME; ret = led_classdev_register(&led->client->dev, &led->cdev_led2b); if (ret < 0) { dev_err(&led->client->dev, "couldn't register LED %s\n", led->cdev_led2b.name); goto failed_unregister_led2_B; } return 0; failed_unregister_led2_B: led_classdev_unregister(&led->cdev_led2g); failed_unregister_led2_G: led_classdev_unregister(&led->cdev_led2r); failed_unregister_led2_R: led_classdev_unregister(&led->cdev_led1b); failed_unregister_led1_B: led_classdev_unregister(&led->cdev_led1g); failed_unregister_led1_G: led_classdev_unregister(&led->cdev_led1r); failed_unregister_led1_R: return ret; } static void bd2802_unregister_led_classdev(struct bd2802_led *led) { cancel_work_sync(&led->work); led_classdev_unregister(&led->cdev_led2b); led_classdev_unregister(&led->cdev_led2g); led_classdev_unregister(&led->cdev_led2r); led_classdev_unregister(&led->cdev_led1b); led_classdev_unregister(&led->cdev_led1g); led_classdev_unregister(&led->cdev_led1r); } static int __devinit bd2802_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct bd2802_led *led; struct bd2802_led_platform_data *pdata; int ret, i; led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL); if (!led) { dev_err(&client->dev, "failed to allocate driver data\n"); return -ENOMEM; } led->client = client; pdata = led->pdata = client->dev.platform_data; i2c_set_clientdata(client, led); /* Configure RESET GPIO (L: RESET, H: RESET cancel) */ gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH, "RGB_RESETB"); /* Tacss = min 0.1ms */ udelay(100); /* Detect BD2802GU */ ret = bd2802_write_byte(client, BD2802_REG_CLKSETUP, 0x00); if (ret < 0) { dev_err(&client->dev, "failed to detect device\n"); goto failed_free; } else dev_info(&client->dev, "return 0x%02x\n", ret); /* To save the power, reset BD2802 after detecting */ gpio_set_value(led->pdata->reset_gpio, 0); /* Default attributes */ led->wave_pattern = BD2802_PATTERN_HALF; led->rgb_current = BD2802_CURRENT_032; init_rwsem(&led->rwsem); for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) { ret = device_create_file(&led->client->dev, bd2802_attributes[i]); if (ret) { dev_err(&led->client->dev, "failed: sysfs file %s\n", bd2802_attributes[i]->attr.name); goto failed_unregister_dev_file; } } ret = bd2802_register_led_classdev(led); if (ret < 0) goto failed_unregister_dev_file; return 0; failed_unregister_dev_file: for (i--; i >= 0; i--) device_remove_file(&led->client->dev, bd2802_attributes[i]); failed_free: kfree(led); return ret; } static int __exit bd2802_remove(struct i2c_client *client) { struct bd2802_led *led = i2c_get_clientdata(client); int i; gpio_set_value(led->pdata->reset_gpio, 0); bd2802_unregister_led_classdev(led); if (led->adf_on) bd2802_disable_adv_conf(led); for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) device_remove_file(&led->client->dev, bd2802_attributes[i]); kfree(led); return 0; } #ifdef CONFIG_PM static void bd2802_restore_state(struct bd2802_led *led) { int i; for (i = 0; i < LED_NUM; i++) { if (led->led[i].r) bd2802_turn_on(led, i, RED, led->led[i].r); if (led->led[i].g) bd2802_turn_on(led, i, GREEN, led->led[i].g); if (led->led[i].b) bd2802_turn_on(led, i, BLUE, led->led[i].b); } } static int bd2802_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct bd2802_led *led = i2c_get_clientdata(client); gpio_set_value(led->pdata->reset_gpio, 0); return 0; } static int bd2802_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct bd2802_led *led = i2c_get_clientdata(client); if (!bd2802_is_all_off(led) || led->adf_on) { bd2802_reset_cancel(led); bd2802_restore_state(led); } return 0; } static SIMPLE_DEV_PM_OPS(bd2802_pm, bd2802_suspend, bd2802_resume); #define BD2802_PM (&bd2802_pm) #else /* CONFIG_PM */ #define BD2802_PM NULL #endif static const struct i2c_device_id bd2802_id[] = { { "BD2802", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, bd2802_id); static struct i2c_driver bd2802_i2c_driver = { .driver = { .name = "BD2802", .pm = BD2802_PM, }, .probe = bd2802_probe, .remove = __exit_p(bd2802_remove), .id_table = bd2802_id, }; module_i2c_driver(bd2802_i2c_driver); MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>"); MODULE_DESCRIPTION("BD2802 LED driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
PyYoshi/android_kernel_kyocera_l03
arch/powerpc/platforms/cell/spu_callbacks.c
4944
2181
/* * System call callback functions for SPUs */ #undef DEBUG #include <linux/kallsyms.h> #include <linux/export.h> #include <linux/syscalls.h> #include <asm/spu.h> #include <asm/syscalls.h> #include <asm/unistd.h> /* * This table defines the system calls that an SPU can call. * It is currently a subset of the 64 bit powerpc system calls, * with the exact semantics. * * The reasons for disabling some of the system calls are: * 1. They interact with the way SPU syscalls are handled * and we can't let them execute ever: * restart_syscall, exit, for, execve, ptrace, ... * 2. They are deprecated and replaced by other means: * uselib, pciconfig_*, sysfs, ... * 3. They are somewhat interacting with the system in a way * we don't want an SPU to: * reboot, init_module, mount, kexec_load * 4. They are optional and we can't rely on them being * linked into the kernel. Unfortunately, the cond_syscall * helper does not work here as it does not add the necessary * opd symbols: * mbind, mq_open, ipc, ... */ static void *spu_syscall_table[] = { #define SYSCALL(func) sys_ni_syscall, #define COMPAT_SYS(func) sys_ni_syscall, #define PPC_SYS(func) sys_ni_syscall, #define OLDSYS(func) sys_ni_syscall, #define SYS32ONLY(func) sys_ni_syscall, #define SYSX(f, f3264, f32) sys_ni_syscall, #define SYSCALL_SPU(func) sys_##func, #define COMPAT_SYS_SPU(func) sys_##func, #define PPC_SYS_SPU(func) ppc_##func, #define SYSX_SPU(f, f3264, f32) f, #include <asm/systbl.h> }; long spu_sys_callback(struct spu_syscall_block *s) { long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret); return -ENOSYS; } syscall = spu_syscall_table[s->nr_ret]; #ifdef DEBUG print_symbol(KERN_DEBUG "SPU-syscall %s:", (unsigned long)syscall); printk("syscall%ld(%lx, %lx, %lx, %lx, %lx, %lx)\n", s->nr_ret, s->parm[0], s->parm[1], s->parm[2], s->parm[3], s->parm[4], s->parm[5]); #endif return syscall(s->parm[0], s->parm[1], s->parm[2], s->parm[3], s->parm[4], s->parm[5]); } EXPORT_SYMBOL_GPL(spu_sys_callback);
gpl-2.0
SebastianFM/SebastianFM-kernel
sound/soc/codecs/wm8974.c
4944
19153
/* * wm8974.c -- WM8974 ALSA Soc Audio driver * * Copyright 2006-2009 Wolfson Microelectronics PLC. * * Author: Liam Girdwood <Liam.Girdwood@wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8974.h" static const u16 wm8974_reg[WM8974_CACHEREGNUM] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0050, 0x0000, 0x0140, 0x0000, 0x0000, 0x0000, 0x0000, 0x00ff, 0x0000, 0x0000, 0x0100, 0x00ff, 0x0000, 0x0000, 0x012c, 0x002c, 0x002c, 0x002c, 0x002c, 0x0000, 0x0032, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0038, 0x000b, 0x0032, 0x0000, 0x0008, 0x000c, 0x0093, 0x00e9, 0x0000, 0x0000, 0x0000, 0x0000, 0x0003, 0x0010, 0x0000, 0x0000, 0x0000, 0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0039, 0x0000, 0x0000, }; #define WM8974_POWER1_BIASEN 0x08 #define WM8974_POWER1_BUFIOEN 0x04 #define wm8974_reset(c) snd_soc_write(c, WM8974_RESET, 0) static const char *wm8974_companding[] = {"Off", "NC", "u-law", "A-law" }; static const char *wm8974_deemp[] = {"None", "32kHz", "44.1kHz", "48kHz" }; static const char *wm8974_eqmode[] = {"Capture", "Playback" }; static const char *wm8974_bw[] = {"Narrow", "Wide" }; static const char *wm8974_eq1[] = {"80Hz", "105Hz", "135Hz", "175Hz" }; static const char *wm8974_eq2[] = {"230Hz", "300Hz", "385Hz", "500Hz" }; static const char *wm8974_eq3[] = {"650Hz", "850Hz", "1.1kHz", "1.4kHz" }; static const char *wm8974_eq4[] = {"1.8kHz", "2.4kHz", "3.2kHz", "4.1kHz" }; static const char *wm8974_eq5[] = {"5.3kHz", "6.9kHz", "9kHz", "11.7kHz" }; static const char *wm8974_alc[] = {"ALC", "Limiter" }; static const struct soc_enum wm8974_enum[] = { SOC_ENUM_SINGLE(WM8974_COMP, 1, 4, wm8974_companding), /* adc */ SOC_ENUM_SINGLE(WM8974_COMP, 3, 4, wm8974_companding), /* dac */ SOC_ENUM_SINGLE(WM8974_DAC, 4, 4, wm8974_deemp), SOC_ENUM_SINGLE(WM8974_EQ1, 8, 2, wm8974_eqmode), SOC_ENUM_SINGLE(WM8974_EQ1, 5, 4, wm8974_eq1), SOC_ENUM_SINGLE(WM8974_EQ2, 8, 2, wm8974_bw), SOC_ENUM_SINGLE(WM8974_EQ2, 5, 4, wm8974_eq2), SOC_ENUM_SINGLE(WM8974_EQ3, 8, 2, wm8974_bw), SOC_ENUM_SINGLE(WM8974_EQ3, 5, 4, wm8974_eq3), SOC_ENUM_SINGLE(WM8974_EQ4, 8, 2, wm8974_bw), SOC_ENUM_SINGLE(WM8974_EQ4, 5, 4, wm8974_eq4), SOC_ENUM_SINGLE(WM8974_EQ5, 8, 2, wm8974_bw), SOC_ENUM_SINGLE(WM8974_EQ5, 5, 4, wm8974_eq5), SOC_ENUM_SINGLE(WM8974_ALC3, 8, 2, wm8974_alc), }; static const char *wm8974_auxmode_text[] = { "Buffer", "Mixer" }; static const struct soc_enum wm8974_auxmode = SOC_ENUM_SINGLE(WM8974_INPUT, 3, 2, wm8974_auxmode_text); static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1); static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1200, 75, 0); static const DECLARE_TLV_DB_SCALE(spk_tlv, -5700, 100, 0); static const struct snd_kcontrol_new wm8974_snd_controls[] = { SOC_SINGLE("Digital Loopback Switch", WM8974_COMP, 0, 1, 0), SOC_ENUM("DAC Companding", wm8974_enum[1]), SOC_ENUM("ADC Companding", wm8974_enum[0]), SOC_ENUM("Playback De-emphasis", wm8974_enum[2]), SOC_SINGLE("DAC Inversion Switch", WM8974_DAC, 0, 1, 0), SOC_SINGLE_TLV("PCM Volume", WM8974_DACVOL, 0, 255, 0, digital_tlv), SOC_SINGLE("High Pass Filter Switch", WM8974_ADC, 8, 1, 0), SOC_SINGLE("High Pass Cut Off", WM8974_ADC, 4, 7, 0), SOC_SINGLE("ADC Inversion Switch", WM8974_ADC, 0, 1, 0), SOC_SINGLE_TLV("Capture Volume", WM8974_ADCVOL, 0, 255, 0, digital_tlv), SOC_ENUM("Equaliser Function", wm8974_enum[3]), SOC_ENUM("EQ1 Cut Off", wm8974_enum[4]), SOC_SINGLE_TLV("EQ1 Volume", WM8974_EQ1, 0, 24, 1, eq_tlv), SOC_ENUM("Equaliser EQ2 Bandwith", wm8974_enum[5]), SOC_ENUM("EQ2 Cut Off", wm8974_enum[6]), SOC_SINGLE_TLV("EQ2 Volume", WM8974_EQ2, 0, 24, 1, eq_tlv), SOC_ENUM("Equaliser EQ3 Bandwith", wm8974_enum[7]), SOC_ENUM("EQ3 Cut Off", wm8974_enum[8]), SOC_SINGLE_TLV("EQ3 Volume", WM8974_EQ3, 0, 24, 1, eq_tlv), SOC_ENUM("Equaliser EQ4 Bandwith", wm8974_enum[9]), SOC_ENUM("EQ4 Cut Off", wm8974_enum[10]), SOC_SINGLE_TLV("EQ4 Volume", WM8974_EQ4, 0, 24, 1, eq_tlv), SOC_ENUM("Equaliser EQ5 Bandwith", wm8974_enum[11]), SOC_ENUM("EQ5 Cut Off", wm8974_enum[12]), SOC_SINGLE_TLV("EQ5 Volume", WM8974_EQ5, 0, 24, 1, eq_tlv), SOC_SINGLE("DAC Playback Limiter Switch", WM8974_DACLIM1, 8, 1, 0), SOC_SINGLE("DAC Playback Limiter Decay", WM8974_DACLIM1, 4, 15, 0), SOC_SINGLE("DAC Playback Limiter Attack", WM8974_DACLIM1, 0, 15, 0), SOC_SINGLE("DAC Playback Limiter Threshold", WM8974_DACLIM2, 4, 7, 0), SOC_SINGLE("DAC Playback Limiter Boost", WM8974_DACLIM2, 0, 15, 0), SOC_SINGLE("ALC Enable Switch", WM8974_ALC1, 8, 1, 0), SOC_SINGLE("ALC Capture Max Gain", WM8974_ALC1, 3, 7, 0), SOC_SINGLE("ALC Capture Min Gain", WM8974_ALC1, 0, 7, 0), SOC_SINGLE("ALC Capture ZC Switch", WM8974_ALC2, 8, 1, 0), SOC_SINGLE("ALC Capture Hold", WM8974_ALC2, 4, 7, 0), SOC_SINGLE("ALC Capture Target", WM8974_ALC2, 0, 15, 0), SOC_ENUM("ALC Capture Mode", wm8974_enum[13]), SOC_SINGLE("ALC Capture Decay", WM8974_ALC3, 4, 15, 0), SOC_SINGLE("ALC Capture Attack", WM8974_ALC3, 0, 15, 0), SOC_SINGLE("ALC Capture Noise Gate Switch", WM8974_NGATE, 3, 1, 0), SOC_SINGLE("ALC Capture Noise Gate Threshold", WM8974_NGATE, 0, 7, 0), SOC_SINGLE("Capture PGA ZC Switch", WM8974_INPPGA, 7, 1, 0), SOC_SINGLE_TLV("Capture PGA Volume", WM8974_INPPGA, 0, 63, 0, inpga_tlv), SOC_SINGLE("Speaker Playback ZC Switch", WM8974_SPKVOL, 7, 1, 0), SOC_SINGLE("Speaker Playback Switch", WM8974_SPKVOL, 6, 1, 1), SOC_SINGLE_TLV("Speaker Playback Volume", WM8974_SPKVOL, 0, 63, 0, spk_tlv), SOC_ENUM("Aux Mode", wm8974_auxmode), SOC_SINGLE("Capture Boost(+20dB)", WM8974_ADCBOOST, 8, 1, 0), SOC_SINGLE("Mono Playback Switch", WM8974_MONOMIX, 6, 1, 1), /* DAC / ADC oversampling */ SOC_SINGLE("DAC 128x Oversampling Switch", WM8974_DAC, 8, 1, 0), SOC_SINGLE("ADC 128x Oversampling Switch", WM8974_ADC, 8, 1, 0), }; /* Speaker Output Mixer */ static const struct snd_kcontrol_new wm8974_speaker_mixer_controls[] = { SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_SPKMIX, 1, 1, 0), SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_SPKMIX, 5, 1, 0), SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_SPKMIX, 0, 1, 0), }; /* Mono Output Mixer */ static const struct snd_kcontrol_new wm8974_mono_mixer_controls[] = { SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_MONOMIX, 1, 1, 0), SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_MONOMIX, 2, 1, 0), SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0), }; /* Boost mixer */ static const struct snd_kcontrol_new wm8974_boost_mixer[] = { SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 0), }; /* Input PGA */ static const struct snd_kcontrol_new wm8974_inpga[] = { SOC_DAPM_SINGLE("Aux Switch", WM8974_INPUT, 2, 1, 0), SOC_DAPM_SINGLE("MicN Switch", WM8974_INPUT, 1, 1, 0), SOC_DAPM_SINGLE("MicP Switch", WM8974_INPUT, 0, 1, 0), }; /* AUX Input boost vol */ static const struct snd_kcontrol_new wm8974_aux_boost_controls = SOC_DAPM_SINGLE("Aux Volume", WM8974_ADCBOOST, 0, 7, 0); /* Mic Input boost vol */ static const struct snd_kcontrol_new wm8974_mic_boost_controls = SOC_DAPM_SINGLE("Mic Volume", WM8974_ADCBOOST, 4, 7, 0); static const struct snd_soc_dapm_widget wm8974_dapm_widgets[] = { SND_SOC_DAPM_MIXER("Speaker Mixer", WM8974_POWER3, 2, 0, &wm8974_speaker_mixer_controls[0], ARRAY_SIZE(wm8974_speaker_mixer_controls)), SND_SOC_DAPM_MIXER("Mono Mixer", WM8974_POWER3, 3, 0, &wm8974_mono_mixer_controls[0], ARRAY_SIZE(wm8974_mono_mixer_controls)), SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM8974_POWER3, 0, 0), SND_SOC_DAPM_ADC("ADC", "HiFi Capture", WM8974_POWER2, 0, 0), SND_SOC_DAPM_PGA("Aux Input", WM8974_POWER1, 6, 0, NULL, 0), SND_SOC_DAPM_PGA("SpkN Out", WM8974_POWER3, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("SpkP Out", WM8974_POWER3, 6, 0, NULL, 0), SND_SOC_DAPM_PGA("Mono Out", WM8974_POWER3, 7, 0, NULL, 0), SND_SOC_DAPM_MIXER("Input PGA", WM8974_POWER2, 2, 0, wm8974_inpga, ARRAY_SIZE(wm8974_inpga)), SND_SOC_DAPM_MIXER("Boost Mixer", WM8974_POWER2, 4, 0, wm8974_boost_mixer, ARRAY_SIZE(wm8974_boost_mixer)), SND_SOC_DAPM_SUPPLY("Mic Bias", WM8974_POWER1, 4, 0, NULL, 0), SND_SOC_DAPM_INPUT("MICN"), SND_SOC_DAPM_INPUT("MICP"), SND_SOC_DAPM_INPUT("AUX"), SND_SOC_DAPM_OUTPUT("MONOOUT"), SND_SOC_DAPM_OUTPUT("SPKOUTP"), SND_SOC_DAPM_OUTPUT("SPKOUTN"), }; static const struct snd_soc_dapm_route wm8974_dapm_routes[] = { /* Mono output mixer */ {"Mono Mixer", "PCM Playback Switch", "DAC"}, {"Mono Mixer", "Aux Playback Switch", "Aux Input"}, {"Mono Mixer", "Line Bypass Switch", "Boost Mixer"}, /* Speaker output mixer */ {"Speaker Mixer", "PCM Playback Switch", "DAC"}, {"Speaker Mixer", "Aux Playback Switch", "Aux Input"}, {"Speaker Mixer", "Line Bypass Switch", "Boost Mixer"}, /* Outputs */ {"Mono Out", NULL, "Mono Mixer"}, {"MONOOUT", NULL, "Mono Out"}, {"SpkN Out", NULL, "Speaker Mixer"}, {"SpkP Out", NULL, "Speaker Mixer"}, {"SPKOUTN", NULL, "SpkN Out"}, {"SPKOUTP", NULL, "SpkP Out"}, /* Boost Mixer */ {"ADC", NULL, "Boost Mixer"}, {"Boost Mixer", "Aux Switch", "Aux Input"}, {"Boost Mixer", NULL, "Input PGA"}, {"Boost Mixer", NULL, "MICP"}, /* Input PGA */ {"Input PGA", "Aux Switch", "Aux Input"}, {"Input PGA", "MicN Switch", "MICN"}, {"Input PGA", "MicP Switch", "MICP"}, /* Inputs */ {"Aux Input", NULL, "AUX"}, }; struct pll_ { unsigned int pre_div:1; unsigned int n:4; unsigned int k; }; /* The size in bits of the pll divide multiplied by 10 * to allow rounding later */ #define FIXED_PLL_SIZE ((1 << 24) * 10) static void pll_factors(struct pll_ *pll_div, unsigned int target, unsigned int source) { unsigned long long Kpart; unsigned int K, Ndiv, Nmod; /* There is a fixed divide by 4 in the output path */ target *= 4; Ndiv = target / source; if (Ndiv < 6) { source /= 2; pll_div->pre_div = 1; Ndiv = target / source; } else pll_div->pre_div = 0; if ((Ndiv < 6) || (Ndiv > 12)) printk(KERN_WARNING "WM8974 N value %u outwith recommended range!\n", Ndiv); pll_div->n = Ndiv; Nmod = target % source; Kpart = FIXED_PLL_SIZE * (long long)Nmod; do_div(Kpart, source); K = Kpart & 0xFFFFFFFF; /* Check if we need to round */ if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ K /= 10; pll_div->k = K; } static int wm8974_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { struct snd_soc_codec *codec = codec_dai->codec; struct pll_ pll_div; u16 reg; if (freq_in == 0 || freq_out == 0) { /* Clock CODEC directly from MCLK */ reg = snd_soc_read(codec, WM8974_CLOCK); snd_soc_write(codec, WM8974_CLOCK, reg & 0x0ff); /* Turn off PLL */ reg = snd_soc_read(codec, WM8974_POWER1); snd_soc_write(codec, WM8974_POWER1, reg & 0x1df); return 0; } pll_factors(&pll_div, freq_out, freq_in); snd_soc_write(codec, WM8974_PLLN, (pll_div.pre_div << 4) | pll_div.n); snd_soc_write(codec, WM8974_PLLK1, pll_div.k >> 18); snd_soc_write(codec, WM8974_PLLK2, (pll_div.k >> 9) & 0x1ff); snd_soc_write(codec, WM8974_PLLK3, pll_div.k & 0x1ff); reg = snd_soc_read(codec, WM8974_POWER1); snd_soc_write(codec, WM8974_POWER1, reg | 0x020); /* Run CODEC from PLL instead of MCLK */ reg = snd_soc_read(codec, WM8974_CLOCK); snd_soc_write(codec, WM8974_CLOCK, reg | 0x100); return 0; } /* * Configure WM8974 clock dividers. */ static int wm8974_set_dai_clkdiv(struct snd_soc_dai *codec_dai, int div_id, int div) { struct snd_soc_codec *codec = codec_dai->codec; u16 reg; switch (div_id) { case WM8974_OPCLKDIV: reg = snd_soc_read(codec, WM8974_GPIO) & 0x1cf; snd_soc_write(codec, WM8974_GPIO, reg | div); break; case WM8974_MCLKDIV: reg = snd_soc_read(codec, WM8974_CLOCK) & 0x11f; snd_soc_write(codec, WM8974_CLOCK, reg | div); break; case WM8974_BCLKDIV: reg = snd_soc_read(codec, WM8974_CLOCK) & 0x1e3; snd_soc_write(codec, WM8974_CLOCK, reg | div); break; default: return -EINVAL; } return 0; } static int wm8974_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = 0; u16 clk = snd_soc_read(codec, WM8974_CLOCK) & 0x1fe; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: clk |= 0x0001; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= 0x0010; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: iface |= 0x0008; break; case SND_SOC_DAIFMT_DSP_A: iface |= 0x00018; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: iface |= 0x0180; break; case SND_SOC_DAIFMT_IB_NF: iface |= 0x0100; break; case SND_SOC_DAIFMT_NB_IF: iface |= 0x0080; break; default: return -EINVAL; } snd_soc_write(codec, WM8974_IFACE, iface); snd_soc_write(codec, WM8974_CLOCK, clk); return 0; } static int wm8974_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; u16 iface = snd_soc_read(codec, WM8974_IFACE) & 0x19f; u16 adn = snd_soc_read(codec, WM8974_ADD) & 0x1f1; /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= 0x0020; break; case SNDRV_PCM_FORMAT_S24_LE: iface |= 0x0040; break; case SNDRV_PCM_FORMAT_S32_LE: iface |= 0x0060; break; } /* filter coefficient */ switch (params_rate(params)) { case 8000: adn |= 0x5 << 1; break; case 11025: adn |= 0x4 << 1; break; case 16000: adn |= 0x3 << 1; break; case 22050: adn |= 0x2 << 1; break; case 32000: adn |= 0x1 << 1; break; case 44100: case 48000: break; } snd_soc_write(codec, WM8974_IFACE, iface); snd_soc_write(codec, WM8974_ADD, adn); return 0; } static int wm8974_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = snd_soc_read(codec, WM8974_DAC) & 0xffbf; if (mute) snd_soc_write(codec, WM8974_DAC, mute_reg | 0x40); else snd_soc_write(codec, WM8974_DAC, mute_reg); return 0; } /* liam need to make this lower power with dapm */ static int wm8974_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { u16 power1 = snd_soc_read(codec, WM8974_POWER1) & ~0x3; switch (level) { case SND_SOC_BIAS_ON: case SND_SOC_BIAS_PREPARE: power1 |= 0x1; /* VMID 50k */ snd_soc_write(codec, WM8974_POWER1, power1); break; case SND_SOC_BIAS_STANDBY: power1 |= WM8974_POWER1_BIASEN | WM8974_POWER1_BUFIOEN; if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { snd_soc_cache_sync(codec); /* Initial cap charge at VMID 5k */ snd_soc_write(codec, WM8974_POWER1, power1 | 0x3); mdelay(100); } power1 |= 0x2; /* VMID 500k */ snd_soc_write(codec, WM8974_POWER1, power1); break; case SND_SOC_BIAS_OFF: snd_soc_write(codec, WM8974_POWER1, 0); snd_soc_write(codec, WM8974_POWER2, 0); snd_soc_write(codec, WM8974_POWER3, 0); break; } codec->dapm.bias_level = level; return 0; } #define WM8974_RATES (SNDRV_PCM_RATE_8000_48000) #define WM8974_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8974_ops = { .hw_params = wm8974_pcm_hw_params, .digital_mute = wm8974_mute, .set_fmt = wm8974_set_dai_fmt, .set_clkdiv = wm8974_set_dai_clkdiv, .set_pll = wm8974_set_dai_pll, }; static struct snd_soc_dai_driver wm8974_dai = { .name = "wm8974-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, /* Only 1 channel of data */ .rates = WM8974_RATES, .formats = WM8974_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, /* Only 1 channel of data */ .rates = WM8974_RATES, .formats = WM8974_FORMATS,}, .ops = &wm8974_ops, .symmetric_rates = 1, }; static int wm8974_suspend(struct snd_soc_codec *codec) { wm8974_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8974_resume(struct snd_soc_codec *codec) { wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8974_probe(struct snd_soc_codec *codec) { int ret = 0; ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_I2C); if (ret < 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } ret = wm8974_reset(codec); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset\n"); return ret; } wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return ret; } /* power down chip */ static int wm8974_remove(struct snd_soc_codec *codec) { wm8974_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8974 = { .probe = wm8974_probe, .remove = wm8974_remove, .suspend = wm8974_suspend, .resume = wm8974_resume, .set_bias_level = wm8974_set_bias_level, .reg_cache_size = ARRAY_SIZE(wm8974_reg), .reg_word_size = sizeof(u16), .reg_cache_default = wm8974_reg, .controls = wm8974_snd_controls, .num_controls = ARRAY_SIZE(wm8974_snd_controls), .dapm_widgets = wm8974_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8974_dapm_widgets), .dapm_routes = wm8974_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8974_dapm_routes), }; static __devinit int wm8974_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { int ret; ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8974, &wm8974_dai, 1); return ret; } static __devexit int wm8974_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id wm8974_i2c_id[] = { { "wm8974", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8974_i2c_id); static struct i2c_driver wm8974_i2c_driver = { .driver = { .name = "wm8974", .owner = THIS_MODULE, }, .probe = wm8974_i2c_probe, .remove = __devexit_p(wm8974_i2c_remove), .id_table = wm8974_i2c_id, }; static int __init wm8974_modinit(void) { int ret = 0; ret = i2c_add_driver(&wm8974_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register wm8974 I2C driver: %d\n", ret); } return ret; } module_init(wm8974_modinit); static void __exit wm8974_exit(void) { i2c_del_driver(&wm8974_i2c_driver); } module_exit(wm8974_exit); MODULE_DESCRIPTION("ASoC WM8974 driver"); MODULE_AUTHOR("Liam Girdwood"); MODULE_LICENSE("GPL");
gpl-2.0
kinsamanka/linux
arch/powerpc/platforms/ps3/htab.c
7760
5397
/* * PS3 pagetable management routines. * * Copyright (C) 2006 Sony Computer Entertainment Inc. * Copyright 2006, 2007 Sony Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/memblock.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/lv1call.h> #include <asm/ps3fb.h> #include "platform.h" /** * enum lpar_vas_id - id of LPAR virtual address space. * @lpar_vas_id_current: Current selected virtual address space * * Identify the target LPAR address space. */ enum ps3_lpar_vas_id { PS3_LPAR_VAS_ID_CURRENT = 0, }; static DEFINE_SPINLOCK(ps3_htab_lock); static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) { int result; u64 hpte_v, hpte_r; u64 inserted_index; u64 evicted_v, evicted_r; u64 hpte_v_array[4], hpte_rs; unsigned long flags; long ret = -1; /* * lv1_insert_htab_entry() will search for victim * entry in both primary and secondary pte group */ vflags &= ~HPTE_V_SECONDARY; hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags; spin_lock_irqsave(&ps3_htab_lock, flags); /* talk hvc to replace entries BOLTED == 0 */ result = lv1_insert_htab_entry(PS3_LPAR_VAS_ID_CURRENT, hpte_group, hpte_v, hpte_r, HPTE_V_BOLTED, 0, &inserted_index, &evicted_v, &evicted_r); if (result) { /* all entries bolted !*/ pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n", __func__, result, va, pa, hpte_group, hpte_v, hpte_r); BUG(); } /* * see if the entry is inserted into secondary pteg */ result = lv1_read_htab_entries(PS3_LPAR_VAS_ID_CURRENT, inserted_index & ~0x3UL, &hpte_v_array[0], &hpte_v_array[1], &hpte_v_array[2], &hpte_v_array[3], &hpte_rs); BUG_ON(result); if (hpte_v_array[inserted_index % 4] & HPTE_V_SECONDARY) ret = (inserted_index & 7) | (1 << 3); else ret = inserted_index & 7; spin_unlock_irqrestore(&ps3_htab_lock, flags); return ret; } static long ps3_hpte_remove(unsigned long hpte_group) { panic("ps3_hpte_remove() not implemented"); return 0; } static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long va, int psize, int ssize, int local) { int result; u64 hpte_v, want_v, hpte_rs; u64 hpte_v_array[4]; unsigned long flags; long ret; want_v = hpte_encode_v(va, psize, ssize); spin_lock_irqsave(&ps3_htab_lock, flags); result = lv1_read_htab_entries(PS3_LPAR_VAS_ID_CURRENT, slot & ~0x3UL, &hpte_v_array[0], &hpte_v_array[1], &hpte_v_array[2], &hpte_v_array[3], &hpte_rs); if (result) { pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n", __func__, result, va, slot, psize); BUG(); } hpte_v = hpte_v_array[slot % 4]; /* * As lv1_read_htab_entries() does not give us the RPN, we can * not synthesize the new hpte_r value here, and therefore can * not update the hpte with lv1_insert_htab_entry(), so we * instead invalidate it and ask the caller to update it via * ps3_hpte_insert() by returning a -1 value. */ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { /* not found */ ret = -1; } else { /* entry found, just invalidate it */ result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); ret = -1; } spin_unlock_irqrestore(&ps3_htab_lock, flags); return ret; } static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { panic("ps3_hpte_updateboltedpp() not implemented"); } static void ps3_hpte_invalidate(unsigned long slot, unsigned long va, int psize, int ssize, int local) { unsigned long flags; int result; spin_lock_irqsave(&ps3_htab_lock, flags); result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); if (result) { pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n", __func__, result, va, slot, psize); BUG(); } spin_unlock_irqrestore(&ps3_htab_lock, flags); } static void ps3_hpte_clear(void) { unsigned long hpte_count = (1UL << ppc64_pft_size) >> 4; u64 i; for (i = 0; i < hpte_count; i++) lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, i, 0, 0); ps3_mm_shutdown(); ps3_mm_vas_destroy(); } void __init ps3_hpte_init(unsigned long htab_size) { ppc_md.hpte_invalidate = ps3_hpte_invalidate; ppc_md.hpte_updatepp = ps3_hpte_updatepp; ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp; ppc_md.hpte_insert = ps3_hpte_insert; ppc_md.hpte_remove = ps3_hpte_remove; ppc_md.hpte_clear_all = ps3_hpte_clear; ppc64_pft_size = __ilog2(htab_size); }
gpl-2.0
subingangadharan/rpmsg
scripts/mod/sumversion.c
10320
12227
#include <netinet/in.h> #ifdef __sun__ #include <inttypes.h> #else #include <stdint.h> #endif #include <ctype.h> #include <errno.h> #include <string.h> #include <limits.h> #include "modpost.h" /* * Stolen form Cryptographic API. * * MD4 Message Digest Algorithm (RFC1320). * * Implementation derived from Andrew Tridgell and Steve French's * CIFS MD4 implementation, and the cryptoapi implementation * originally based on the public domain implementation written * by Colin Plumb in 1993. * * Copyright (c) Andrew Tridgell 1997-1998. * Modified by Steve French (sfrench@us.ibm.com) 2002 * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 David S. Miller (davem@redhat.com) * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #define MD4_DIGEST_SIZE 16 #define MD4_HMAC_BLOCK_SIZE 64 #define MD4_BLOCK_WORDS 16 #define MD4_HASH_WORDS 4 struct md4_ctx { uint32_t hash[MD4_HASH_WORDS]; uint32_t block[MD4_BLOCK_WORDS]; uint64_t byte_count; }; static inline uint32_t lshift(uint32_t x, unsigned int s) { x &= 0xFFFFFFFF; return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s)); } static inline uint32_t F(uint32_t x, uint32_t y, uint32_t z) { return (x & y) | ((~x) & z); } static inline uint32_t G(uint32_t x, uint32_t y, uint32_t z) { return (x & y) | (x & z) | (y & z); } static inline uint32_t H(uint32_t x, uint32_t y, uint32_t z) { return x ^ y ^ z; } #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (uint32_t)0x5A827999,s)) #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (uint32_t)0x6ED9EBA1,s)) /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(uint32_t *buf, unsigned int words) { while (words--) { *buf = ntohl(*buf); buf++; } } static inline void cpu_to_le32_array(uint32_t *buf, unsigned int words) { while (words--) { *buf = htonl(*buf); buf++; } } static void md4_transform(uint32_t *hash, uint32_t const *in) { uint32_t a, b, c, d; a = hash[0]; b = hash[1]; c = hash[2]; d = hash[3]; ROUND1(a, b, c, d, in[0], 3); ROUND1(d, a, b, c, in[1], 7); ROUND1(c, d, a, b, in[2], 11); ROUND1(b, c, d, a, in[3], 19); ROUND1(a, b, c, d, in[4], 3); ROUND1(d, a, b, c, in[5], 7); ROUND1(c, d, a, b, in[6], 11); ROUND1(b, c, d, a, in[7], 19); ROUND1(a, b, c, d, in[8], 3); ROUND1(d, a, b, c, in[9], 7); ROUND1(c, d, a, b, in[10], 11); ROUND1(b, c, d, a, in[11], 19); ROUND1(a, b, c, d, in[12], 3); ROUND1(d, a, b, c, in[13], 7); ROUND1(c, d, a, b, in[14], 11); ROUND1(b, c, d, a, in[15], 19); ROUND2(a, b, c, d,in[ 0], 3); ROUND2(d, a, b, c, in[4], 5); ROUND2(c, d, a, b, in[8], 9); ROUND2(b, c, d, a, in[12], 13); ROUND2(a, b, c, d, in[1], 3); ROUND2(d, a, b, c, in[5], 5); ROUND2(c, d, a, b, in[9], 9); ROUND2(b, c, d, a, in[13], 13); ROUND2(a, b, c, d, in[2], 3); ROUND2(d, a, b, c, in[6], 5); ROUND2(c, d, a, b, in[10], 9); ROUND2(b, c, d, a, in[14], 13); ROUND2(a, b, c, d, in[3], 3); ROUND2(d, a, b, c, in[7], 5); ROUND2(c, d, a, b, in[11], 9); ROUND2(b, c, d, a, in[15], 13); ROUND3(a, b, c, d,in[ 0], 3); ROUND3(d, a, b, c, in[8], 9); ROUND3(c, d, a, b, in[4], 11); ROUND3(b, c, d, a, in[12], 15); ROUND3(a, b, c, d, in[2], 3); ROUND3(d, a, b, c, in[10], 9); ROUND3(c, d, a, b, in[6], 11); ROUND3(b, c, d, a, in[14], 15); ROUND3(a, b, c, d, in[1], 3); ROUND3(d, a, b, c, in[9], 9); ROUND3(c, d, a, b, in[5], 11); ROUND3(b, c, d, a, in[13], 15); ROUND3(a, b, c, d, in[3], 3); ROUND3(d, a, b, c, in[11], 9); ROUND3(c, d, a, b, in[7], 11); ROUND3(b, c, d, a, in[15], 15); hash[0] += a; hash[1] += b; hash[2] += c; hash[3] += d; } static inline void md4_transform_helper(struct md4_ctx *ctx) { le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(uint32_t)); md4_transform(ctx->hash, ctx->block); } static void md4_init(struct md4_ctx *mctx) { mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; mctx->hash[2] = 0x98badcfe; mctx->hash[3] = 0x10325476; mctx->byte_count = 0; } static void md4_update(struct md4_ctx *mctx, const unsigned char *data, unsigned int len) { const uint32_t avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); return; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, avail); md4_transform_helper(mctx); data += avail; len -= avail; while (len >= sizeof(mctx->block)) { memcpy(mctx->block, data, sizeof(mctx->block)); md4_transform_helper(mctx); data += sizeof(mctx->block); len -= sizeof(mctx->block); } memcpy(mctx->block, data, len); } static void md4_final_ascii(struct md4_ctx *mctx, char *out, unsigned int len) { const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof (uint64_t)); md4_transform_helper(mctx); p = (char *)mctx->block; padding = 56; } memset(p, 0, padding); mctx->block[14] = mctx->byte_count << 3; mctx->block[15] = mctx->byte_count >> 29; le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - sizeof(uint64_t)) / sizeof(uint32_t)); md4_transform(mctx->hash, mctx->block); cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(uint32_t)); snprintf(out, len, "%08X%08X%08X%08X", mctx->hash[0], mctx->hash[1], mctx->hash[2], mctx->hash[3]); } static inline void add_char(unsigned char c, struct md4_ctx *md) { md4_update(md, &c, 1); } static int parse_string(const char *file, unsigned long len, struct md4_ctx *md) { unsigned long i; add_char(file[0], md); for (i = 1; i < len; i++) { add_char(file[i], md); if (file[i] == '"' && file[i-1] != '\\') break; } return i; } static int parse_comment(const char *file, unsigned long len) { unsigned long i; for (i = 2; i < len; i++) { if (file[i-1] == '*' && file[i] == '/') break; } return i; } /* FIXME: Handle .s files differently (eg. # starts comments) --RR */ static int parse_file(const char *fname, struct md4_ctx *md) { char *file; unsigned long i, len; file = grab_file(fname, &len); if (!file) return 0; for (i = 0; i < len; i++) { /* Collapse and ignore \ and CR. */ if (file[i] == '\\' && (i+1 < len) && file[i+1] == '\n') { i++; continue; } /* Ignore whitespace */ if (isspace(file[i])) continue; /* Handle strings as whole units */ if (file[i] == '"') { i += parse_string(file+i, len - i, md); continue; } /* Comments: ignore */ if (file[i] == '/' && file[i+1] == '*') { i += parse_comment(file+i, len - i); continue; } add_char(file[i], md); } release_file(file, len); return 1; } /* Check whether the file is a static library or not */ static int is_static_library(const char *objfile) { int len = strlen(objfile); if (objfile[len - 2] == '.' && objfile[len - 1] == 'a') return 1; else return 0; } /* We have dir/file.o. Open dir/.file.o.cmd, look for source_ and deps_ line * to figure out source files. */ static int parse_source_files(const char *objfile, struct md4_ctx *md) { char *cmd, *file, *line, *dir; const char *base; unsigned long flen, pos = 0; int dirlen, ret = 0, check_files = 0; cmd = NOFAIL(malloc(strlen(objfile) + sizeof("..cmd"))); base = strrchr(objfile, '/'); if (base) { base++; dirlen = base - objfile; sprintf(cmd, "%.*s.%s.cmd", dirlen, objfile, base); } else { dirlen = 0; sprintf(cmd, ".%s.cmd", objfile); } dir = NOFAIL(malloc(dirlen + 1)); strncpy(dir, objfile, dirlen); dir[dirlen] = '\0'; file = grab_file(cmd, &flen); if (!file) { warn("could not find %s for %s\n", cmd, objfile); goto out; } /* There will be a line like so: deps_drivers/net/dummy.o := \ drivers/net/dummy.c \ $(wildcard include/config/net/fastroute.h) \ include/linux/module.h \ Sum all files in the same dir or subdirs. */ while ((line = get_next_line(&pos, file, flen)) != NULL) { char* p = line; if (strncmp(line, "source_", sizeof("source_")-1) == 0) { p = strrchr(line, ' '); if (!p) { warn("malformed line: %s\n", line); goto out_file; } p++; if (!parse_file(p, md)) { warn("could not open %s: %s\n", p, strerror(errno)); goto out_file; } continue; } if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) { check_files = 1; continue; } if (!check_files) continue; /* Continue until line does not end with '\' */ if ( *(p + strlen(p)-1) != '\\') break; /* Terminate line at first space, to get rid of final ' \' */ while (*p) { if (isspace(*p)) { *p = '\0'; break; } p++; } /* Check if this file is in same dir as objfile */ if ((strstr(line, dir)+strlen(dir)-1) == strrchr(line, '/')) { if (!parse_file(line, md)) { warn("could not open %s: %s\n", line, strerror(errno)); goto out_file; } } } /* Everyone parsed OK */ ret = 1; out_file: release_file(file, flen); out: free(dir); free(cmd); return ret; } /* Calc and record src checksum. */ void get_src_version(const char *modname, char sum[], unsigned sumlen) { void *file; unsigned long len; struct md4_ctx md; char *sources, *end, *fname; const char *basename; char filelist[PATH_MAX + 1]; char *modverdir = getenv("MODVERDIR"); if (!modverdir) modverdir = "."; /* Source files for module are in .tmp_versions/modname.mod, after the first line. */ if (strrchr(modname, '/')) basename = strrchr(modname, '/') + 1; else basename = modname; sprintf(filelist, "%s/%.*s.mod", modverdir, (int) strlen(basename) - 2, basename); file = grab_file(filelist, &len); if (!file) /* not a module or .mod file missing - ignore */ return; sources = strchr(file, '\n'); if (!sources) { warn("malformed versions file for %s\n", modname); goto release; } sources++; end = strchr(sources, '\n'); if (!end) { warn("bad ending versions file for %s\n", modname); goto release; } *end = '\0'; md4_init(&md); while ((fname = strsep(&sources, " ")) != NULL) { if (!*fname) continue; if (!(is_static_library(fname)) && !parse_source_files(fname, &md)) goto release; } md4_final_ascii(&md, sum, sumlen); release: release_file(file, len); } static void write_version(const char *filename, const char *sum, unsigned long offset) { int fd; fd = open(filename, O_RDWR); if (fd < 0) { warn("changing sum in %s failed: %s\n", filename, strerror(errno)); return; } if (lseek(fd, offset, SEEK_SET) == (off_t)-1) { warn("changing sum in %s:%lu failed: %s\n", filename, offset, strerror(errno)); goto out; } if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) { warn("writing sum in %s failed: %s\n", filename, strerror(errno)); goto out; } out: close(fd); } static int strip_rcs_crap(char *version) { unsigned int len, full_len; if (strncmp(version, "$Revision", strlen("$Revision")) != 0) return 0; /* Space for version string follows. */ full_len = strlen(version) + strlen(version + strlen(version) + 1) + 2; /* Move string to start with version number: prefix will be * $Revision$ or $Revision: */ len = strlen("$Revision"); if (version[len] == ':' || version[len] == '$') len++; while (isspace(version[len])) len++; memmove(version, version+len, full_len-len); full_len -= len; /* Preserve up to next whitespace. */ len = 0; while (version[len] && !isspace(version[len])) len++; memmove(version + len, version + strlen(version), full_len - strlen(version)); return 1; } /* Clean up RCS-style version numbers. */ void maybe_frob_rcs_version(const char *modfilename, char *version, void *modinfo, unsigned long version_offset) { if (strip_rcs_crap(version)) write_version(modfilename, version, version_offset); }
gpl-2.0
skullface1/android_kernel_samsung_i9105
drivers/watchdog/kona_wdt.c
81
5159
/* * Watchdog driver for the KONA architecture * * Copyright (C) 2011 Broadcom Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <mach/hardware.h> #include <linux/io.h> #include <mach/rdb/brcm_rdb_secwatchdog.h> static int nowayout = WATCHDOG_NOWAYOUT; static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */ static unsigned long wdt_status; static spinlock_t wdt_lock; #define WDT_IN_USE 0 #define WDT_OK_TO_CLOSE 1 #define WDT_CLK_RESOLUTION 4 /* 62.5ms */ #define WDT_TICK_RATE 16 /* 16 ticks per sec */ static unsigned long base = KONA_SECWD_VA; static void kona_wdt_enable(void) { unsigned long val; spin_lock(&wdt_lock); /* Sequence to enable the watchdog */ val = ( (readl(base + SECWATCHDOG_SDOGCR_OFFSET) | SECWATCHDOG_SDOGCR_EN_MASK) & (~SECWATCHDOG_SDOGCR_WD_LOAD_FLAG_MASK) ) | ( (WDT_CLK_RESOLUTION << SECWATCHDOG_SDOGCR_CLKS_SHIFT) & SECWATCHDOG_SDOGCR_CLKS_MASK); writel(val, base + SECWATCHDOG_SDOGCR_OFFSET); while (readl(base + SECWATCHDOG_SDOGCR_OFFSET) & SECWATCHDOG_SDOGCR_WD_LOAD_FLAG_MASK); spin_unlock(&wdt_lock); } static void kona_wdt_disable(void) { unsigned long val; spin_lock(&wdt_lock); /* Sequence to disable the watchdog */ val = readl(base + SECWATCHDOG_SDOGCR_OFFSET) & (~SECWATCHDOG_SDOGCR_EN_MASK); writel(val, base + SECWATCHDOG_SDOGCR_OFFSET); while (readl(base + SECWATCHDOG_SDOGCR_OFFSET) & SECWATCHDOG_SDOGCR_WD_LOAD_FLAG_MASK); spin_unlock(&wdt_lock); } static void kona_wdt_keepalive(void) { unsigned long val; spin_lock(&wdt_lock); val = (readl(base + SECWATCHDOG_SDOGCR_OFFSET) & (~SECWATCHDOG_SDOGCR_LD_MASK) ) | (((heartbeat * WDT_TICK_RATE) << SECWATCHDOG_SDOGCR_LD_SHIFT) & SECWATCHDOG_SDOGCR_LD_MASK); writel(val, base + SECWATCHDOG_SDOGCR_OFFSET); spin_unlock(&wdt_lock); } static int kona_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(WDT_IN_USE, &wdt_status)) return -EBUSY; clear_bit(WDT_OK_TO_CLOSE, &wdt_status); kona_wdt_enable(); return nonseekable_open(inode, file); } static ssize_t kona_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; clear_bit(WDT_OK_TO_CLOSE, &wdt_status); for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') set_bit(WDT_OK_TO_CLOSE, &wdt_status); } } kona_wdt_keepalive(); } return len; } static const struct watchdog_info ident = { .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = "KONA Watchdog", .firmware_version = 0, }; static long kona_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; int time; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: ret = put_user(0, (int *)arg); break; case WDIOC_GETBOOTSTATUS: ret = put_user(0, (int *)arg); break; case WDIOC_KEEPALIVE: kona_wdt_enable(); ret = 0; break; case WDIOC_SETTIMEOUT: ret = get_user(time, (int *)arg); if (ret) break; if (time <= 0 || time > 60) { ret = -EINVAL; break; } heartbeat = time; kona_wdt_keepalive(); /* Fall through */ case WDIOC_GETTIMEOUT: ret = put_user(heartbeat, (int *)arg); break; } return ret; } static int kona_wdt_release(struct inode *inode, struct file *file) { if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) kona_wdt_disable(); else printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " "timer will not stop\n"); clear_bit(WDT_IN_USE, &wdt_status); clear_bit(WDT_OK_TO_CLOSE, &wdt_status); return 0; } static const struct file_operations kona_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = kona_wdt_write, .unlocked_ioctl = kona_wdt_ioctl, .open = kona_wdt_open, .release = kona_wdt_release, }; static struct miscdevice kona_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &kona_wdt_fops, }; static int __init kona_wdt_init(void) { spin_lock_init(&wdt_lock); return misc_register(&kona_wdt_miscdev); } static void __exit kona_wdt_exit(void) { misc_deregister(&kona_wdt_miscdev); } module_init(kona_wdt_init); module_exit(kona_wdt_exit); MODULE_AUTHOR("Broadcom Inc."); MODULE_DESCRIPTION("KONA Architecture Watchdog"); module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 60s)"); module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
Kiritoalex/galileo-linux-stable
drivers/spi/spi-ti-qspi.c
81
13868
/* * TI QSPI driver * * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com * Author: Sourav Poddar <sourav.poddar@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GPLv2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/omap-dma.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> #include <linux/spi/spi.h> struct ti_qspi_regs { u32 clkctrl; }; struct ti_qspi { struct completion transfer_complete; /* list synchronization */ struct mutex list_lock; struct spi_master *master; void __iomem *base; void __iomem *ctrl_base; void __iomem *mmap_base; struct clk *fclk; struct device *dev; struct ti_qspi_regs ctx_reg; u32 spi_max_frequency; u32 cmd; u32 dc; bool ctrl_mod; }; #define QSPI_PID (0x0) #define QSPI_SYSCONFIG (0x10) #define QSPI_INTR_STATUS_RAW_SET (0x20) #define QSPI_INTR_STATUS_ENABLED_CLEAR (0x24) #define QSPI_INTR_ENABLE_SET_REG (0x28) #define QSPI_INTR_ENABLE_CLEAR_REG (0x2c) #define QSPI_SPI_CLOCK_CNTRL_REG (0x40) #define QSPI_SPI_DC_REG (0x44) #define QSPI_SPI_CMD_REG (0x48) #define QSPI_SPI_STATUS_REG (0x4c) #define QSPI_SPI_DATA_REG (0x50) #define QSPI_SPI_SETUP0_REG (0x54) #define QSPI_SPI_SWITCH_REG (0x64) #define QSPI_SPI_SETUP1_REG (0x58) #define QSPI_SPI_SETUP2_REG (0x5c) #define QSPI_SPI_SETUP3_REG (0x60) #define QSPI_SPI_DATA_REG_1 (0x68) #define QSPI_SPI_DATA_REG_2 (0x6c) #define QSPI_SPI_DATA_REG_3 (0x70) #define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000) #define QSPI_FCLK 192000000 /* Clock Control */ #define QSPI_CLK_EN (1 << 31) #define QSPI_CLK_DIV_MAX 0xffff /* Command */ #define QSPI_EN_CS(n) (n << 28) #define QSPI_WLEN(n) ((n - 1) << 19) #define QSPI_3_PIN (1 << 18) #define QSPI_RD_SNGL (1 << 16) #define QSPI_WR_SNGL (2 << 16) #define QSPI_RD_DUAL (3 << 16) #define QSPI_RD_QUAD (7 << 16) #define QSPI_INVAL (4 << 16) #define QSPI_WC_CMD_INT_EN (1 << 14) #define QSPI_FLEN(n) ((n - 1) << 0) /* STATUS REGISTER */ #define WC 0x02 /* INTERRUPT REGISTER */ #define QSPI_WC_INT_EN (1 << 1) #define QSPI_WC_INT_DISABLE (1 << 1) /* Device Control */ #define QSPI_DD(m, n) (m << (3 + n * 8)) #define QSPI_CKPHA(n) (1 << (2 + n * 8)) #define QSPI_CSPOL(n) (1 << (1 + n * 8)) #define QSPI_CKPOL(n) (1 << (n * 8)) #define QSPI_FRAME 4096 #define QSPI_AUTOSUSPEND_TIMEOUT 2000 static inline unsigned long ti_qspi_read(struct ti_qspi *qspi, unsigned long reg) { return readl(qspi->base + reg); } static inline void ti_qspi_write(struct ti_qspi *qspi, unsigned long val, unsigned long reg) { writel(val, qspi->base + reg); } static int ti_qspi_setup(struct spi_device *spi) { struct ti_qspi *qspi = spi_master_get_devdata(spi->master); struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg; int clk_div = 0, ret; u32 clk_ctrl_reg, clk_rate, clk_mask; if (spi->master->busy) { dev_dbg(qspi->dev, "master busy doing other trasnfers\n"); return -EBUSY; } if (!qspi->spi_max_frequency) { dev_err(qspi->dev, "spi max frequency not defined\n"); return -EINVAL; } clk_rate = clk_get_rate(qspi->fclk); clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1; if (clk_div < 0) { dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n"); return -EINVAL; } if (clk_div > QSPI_CLK_DIV_MAX) { dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n", QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1); return -EINVAL; } dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", qspi->spi_max_frequency, clk_div); ret = pm_runtime_get_sync(qspi->dev); if (ret < 0) { dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); return ret; } clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG); clk_ctrl_reg &= ~QSPI_CLK_EN; /* disable SCLK */ ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG); /* enable SCLK */ clk_mask = QSPI_CLK_EN | clk_div; ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG); ctx_reg->clkctrl = clk_mask; pm_runtime_mark_last_busy(qspi->dev); ret = pm_runtime_put_autosuspend(qspi->dev); if (ret < 0) { dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n"); return ret; } return 0; } static void ti_qspi_restore_ctx(struct ti_qspi *qspi) { struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg; ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); } static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) { int wlen, count, ret; unsigned int cmd; const u8 *txbuf; txbuf = t->tx_buf; cmd = qspi->cmd | QSPI_WR_SNGL; count = t->len; wlen = t->bits_per_word >> 3; /* in bytes */ while (count) { switch (wlen) { case 1: dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", cmd, qspi->dc, *txbuf); writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG); break; case 2: dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n", cmd, qspi->dc, *txbuf); writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG); break; case 4: dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n", cmd, qspi->dc, *txbuf); writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG); break; } ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); ret = wait_for_completion_timeout(&qspi->transfer_complete, QSPI_COMPLETION_TIMEOUT); if (ret == 0) { dev_err(qspi->dev, "write timed out\n"); return -ETIMEDOUT; } txbuf += wlen; count -= wlen; } return 0; } static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) { int wlen, count, ret; unsigned int cmd; u8 *rxbuf; rxbuf = t->rx_buf; cmd = qspi->cmd; switch (t->rx_nbits) { case SPI_NBITS_DUAL: cmd |= QSPI_RD_DUAL; break; case SPI_NBITS_QUAD: cmd |= QSPI_RD_QUAD; break; default: cmd |= QSPI_RD_SNGL; break; } count = t->len; wlen = t->bits_per_word >> 3; /* in bytes */ while (count) { dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); ret = wait_for_completion_timeout(&qspi->transfer_complete, QSPI_COMPLETION_TIMEOUT); if (ret == 0) { dev_err(qspi->dev, "read timed out\n"); return -ETIMEDOUT; } switch (wlen) { case 1: *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG); break; case 2: *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG); break; case 4: *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG); break; } rxbuf += wlen; count -= wlen; } return 0; } static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t) { int ret; if (t->tx_buf) { ret = qspi_write_msg(qspi, t); if (ret) { dev_dbg(qspi->dev, "Error while writing\n"); return ret; } } if (t->rx_buf) { ret = qspi_read_msg(qspi, t); if (ret) { dev_dbg(qspi->dev, "Error while reading\n"); return ret; } } return 0; } static int ti_qspi_start_transfer_one(struct spi_master *master, struct spi_message *m) { struct ti_qspi *qspi = spi_master_get_devdata(master); struct spi_device *spi = m->spi; struct spi_transfer *t; int status = 0, ret; int frame_length; /* setup device control reg */ qspi->dc = 0; if (spi->mode & SPI_CPHA) qspi->dc |= QSPI_CKPHA(spi->chip_select); if (spi->mode & SPI_CPOL) qspi->dc |= QSPI_CKPOL(spi->chip_select); if (spi->mode & SPI_CS_HIGH) qspi->dc |= QSPI_CSPOL(spi->chip_select); frame_length = (m->frame_length << 3) / spi->bits_per_word; frame_length = clamp(frame_length, 0, QSPI_FRAME); /* setup command reg */ qspi->cmd = 0; qspi->cmd |= QSPI_EN_CS(spi->chip_select); qspi->cmd |= QSPI_FLEN(frame_length); qspi->cmd |= QSPI_WC_CMD_INT_EN; ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG); ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG); mutex_lock(&qspi->list_lock); list_for_each_entry(t, &m->transfers, transfer_list) { qspi->cmd |= QSPI_WLEN(t->bits_per_word); ret = qspi_transfer_msg(qspi, t); if (ret) { dev_dbg(qspi->dev, "transfer message failed\n"); mutex_unlock(&qspi->list_lock); return -EINVAL; } m->actual_length += t->len; } mutex_unlock(&qspi->list_lock); m->status = status; spi_finalize_current_message(master); ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG); return status; } static irqreturn_t ti_qspi_isr(int irq, void *dev_id) { struct ti_qspi *qspi = dev_id; u16 int_stat; u32 stat; irqreturn_t ret = IRQ_HANDLED; int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR); stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); if (!int_stat) { dev_dbg(qspi->dev, "No IRQ triggered\n"); ret = IRQ_NONE; goto out; } ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_STATUS_ENABLED_CLEAR); if (stat & WC) complete(&qspi->transfer_complete); out: return ret; } static int ti_qspi_runtime_resume(struct device *dev) { struct ti_qspi *qspi; qspi = dev_get_drvdata(dev); ti_qspi_restore_ctx(qspi); return 0; } static const struct of_device_id ti_qspi_match[] = { {.compatible = "ti,dra7xxx-qspi" }, {.compatible = "ti,am4372-qspi" }, {}, }; MODULE_DEVICE_TABLE(of, ti_qspi_match); static int ti_qspi_probe(struct platform_device *pdev) { struct ti_qspi *qspi; struct spi_master *master; struct resource *r, *res_ctrl, *res_mmap; struct device_node *np = pdev->dev.of_node; u32 max_freq; int ret = 0, num_cs, irq; master = spi_alloc_master(&pdev->dev, sizeof(*qspi)); if (!master) return -ENOMEM; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD; master->flags = SPI_MASTER_HALF_DUPLEX; master->setup = ti_qspi_setup; master->auto_runtime_pm = true; master->transfer_one_message = ti_qspi_start_transfer_one; master->dev.of_node = pdev->dev.of_node; master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); if (!of_property_read_u32(np, "num-cs", &num_cs)) master->num_chipselect = num_cs; qspi = spi_master_get_devdata(master); qspi->master = master; qspi->dev = &pdev->dev; platform_set_drvdata(pdev, qspi); r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base"); if (r == NULL) { r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (r == NULL) { dev_err(&pdev->dev, "missing platform data\n"); return -ENODEV; } } res_mmap = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap"); if (res_mmap == NULL) { res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res_mmap == NULL) { dev_err(&pdev->dev, "memory mapped resource not required\n"); } } res_ctrl = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_ctrlmod"); if (res_ctrl == NULL) { res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (res_ctrl == NULL) { dev_dbg(&pdev->dev, "control module resources not required\n"); } } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq resource?\n"); return irq; } mutex_init(&qspi->list_lock); qspi->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(qspi->base)) { ret = PTR_ERR(qspi->base); goto free_master; } if (res_ctrl) { qspi->ctrl_mod = true; qspi->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl); if (IS_ERR(qspi->ctrl_base)) { ret = PTR_ERR(qspi->ctrl_base); goto free_master; } } if (res_mmap) { qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap); if (IS_ERR(qspi->mmap_base)) { ret = PTR_ERR(qspi->mmap_base); goto free_master; } } ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0, dev_name(&pdev->dev), qspi); if (ret < 0) { dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", irq); goto free_master; } qspi->fclk = devm_clk_get(&pdev->dev, "fck"); if (IS_ERR(qspi->fclk)) { ret = PTR_ERR(qspi->fclk); dev_err(&pdev->dev, "could not get clk: %d\n", ret); } init_completion(&qspi->transfer_complete); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT); pm_runtime_enable(&pdev->dev); if (!of_property_read_u32(np, "spi-max-frequency", &max_freq)) qspi->spi_max_frequency = max_freq; ret = devm_spi_register_master(&pdev->dev, master); if (ret) goto free_master; return 0; free_master: spi_master_put(master); return ret; } static int ti_qspi_remove(struct platform_device *pdev) { struct ti_qspi *qspi = platform_get_drvdata(pdev); int ret; ret = pm_runtime_get_sync(qspi->dev); if (ret < 0) { dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); return ret; } ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG); pm_runtime_put(qspi->dev); pm_runtime_disable(&pdev->dev); return 0; } static const struct dev_pm_ops ti_qspi_pm_ops = { .runtime_resume = ti_qspi_runtime_resume, }; static struct platform_driver ti_qspi_driver = { .probe = ti_qspi_probe, .remove = ti_qspi_remove, .driver = { .name = "ti-qspi", .pm = &ti_qspi_pm_ops, .of_match_table = ti_qspi_match, } }; module_platform_driver(ti_qspi_driver); MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("TI QSPI controller driver"); MODULE_ALIAS("platform:ti-qspi");
gpl-2.0
nimengyu2/ti-a8-linux-04.06.00.07
fs/fuse/inode.c
81
28932
/* FUSE: Filesystem in Userspace Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #include "fuse_i.h" #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/parser.h> #include <linux/statfs.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/exportfs.h> MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Filesystem in Userspace"); MODULE_LICENSE("GPL"); static struct kmem_cache *fuse_inode_cachep; struct list_head fuse_conn_list; DEFINE_MUTEX(fuse_mutex); static int set_global_limit(const char *val, struct kernel_param *kp); unsigned max_user_bgreq; module_param_call(max_user_bgreq, set_global_limit, param_get_uint, &max_user_bgreq, 0644); __MODULE_PARM_TYPE(max_user_bgreq, "uint"); MODULE_PARM_DESC(max_user_bgreq, "Global limit for the maximum number of backgrounded requests an " "unprivileged user can set"); unsigned max_user_congthresh; module_param_call(max_user_congthresh, set_global_limit, param_get_uint, &max_user_congthresh, 0644); __MODULE_PARM_TYPE(max_user_congthresh, "uint"); MODULE_PARM_DESC(max_user_congthresh, "Global limit for the maximum congestion threshold an " "unprivileged user can set"); #define FUSE_SUPER_MAGIC 0x65735546 #define FUSE_DEFAULT_BLKSIZE 512 /** Maximum number of outstanding background requests */ #define FUSE_DEFAULT_MAX_BACKGROUND 12 /** Congestion starts at 75% of maximum */ #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4) struct fuse_mount_data { int fd; unsigned rootmode; unsigned user_id; unsigned group_id; unsigned fd_present:1; unsigned rootmode_present:1; unsigned user_id_present:1; unsigned group_id_present:1; unsigned flags; unsigned max_read; unsigned blksize; }; struct fuse_forget_link *fuse_alloc_forget(void) { return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL); } static struct inode *fuse_alloc_inode(struct super_block *sb) { struct inode *inode; struct fuse_inode *fi; inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL); if (!inode) return NULL; fi = get_fuse_inode(inode); fi->i_time = 0; fi->nodeid = 0; fi->nlookup = 0; fi->attr_version = 0; fi->writectr = 0; INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); INIT_LIST_HEAD(&fi->writepages); init_waitqueue_head(&fi->page_waitq); fi->forget = fuse_alloc_forget(); if (!fi->forget) { kmem_cache_free(fuse_inode_cachep, inode); return NULL; } return inode; } static void fuse_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(fuse_inode_cachep, inode); } static void fuse_destroy_inode(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); BUG_ON(!list_empty(&fi->write_files)); BUG_ON(!list_empty(&fi->queued_writes)); kfree(fi->forget); call_rcu(&inode->i_rcu, fuse_i_callback); } static void fuse_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); if (inode->i_sb->s_flags & MS_ACTIVE) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup); fi->forget = NULL; } } static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) { if (*flags & MS_MANDLOCK) return -EINVAL; return 0; } void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, u64 attr_valid) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); fi->attr_version = ++fc->attr_version; fi->i_time = attr_valid; inode->i_ino = attr->ino; inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); set_nlink(inode, attr->nlink); inode->i_uid = attr->uid; inode->i_gid = attr->gid; inode->i_blocks = attr->blocks; inode->i_atime.tv_sec = attr->atime; inode->i_atime.tv_nsec = attr->atimensec; inode->i_mtime.tv_sec = attr->mtime; inode->i_mtime.tv_nsec = attr->mtimensec; inode->i_ctime.tv_sec = attr->ctime; inode->i_ctime.tv_nsec = attr->ctimensec; if (attr->blksize != 0) inode->i_blkbits = ilog2(attr->blksize); else inode->i_blkbits = inode->i_sb->s_blocksize_bits; /* * Don't set the sticky bit in i_mode, unless we want the VFS * to check permissions. This prevents failures due to the * check in may_delete(). */ fi->orig_i_mode = inode->i_mode; if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) inode->i_mode &= ~S_ISVTX; } void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, u64 attr_valid, u64 attr_version) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); loff_t oldsize; spin_lock(&fc->lock); if (attr_version != 0 && fi->attr_version > attr_version) { spin_unlock(&fc->lock); return; } fuse_change_attributes_common(inode, attr, attr_valid); oldsize = inode->i_size; i_size_write(inode, attr->size); spin_unlock(&fc->lock); if (S_ISREG(inode->i_mode) && oldsize != attr->size) { truncate_pagecache(inode, oldsize, attr->size); invalidate_inode_pages2(inode->i_mapping); } } static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr) { inode->i_mode = attr->mode & S_IFMT; inode->i_size = attr->size; if (S_ISREG(inode->i_mode)) { fuse_init_common(inode); fuse_init_file_inode(inode); } else if (S_ISDIR(inode->i_mode)) fuse_init_dir(inode); else if (S_ISLNK(inode->i_mode)) fuse_init_symlink(inode); else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { fuse_init_common(inode); init_special_inode(inode, inode->i_mode, new_decode_dev(attr->rdev)); } else BUG(); } int fuse_inode_eq(struct inode *inode, void *_nodeidp) { u64 nodeid = *(u64 *) _nodeidp; if (get_node_id(inode) == nodeid) return 1; else return 0; } static int fuse_inode_set(struct inode *inode, void *_nodeidp) { u64 nodeid = *(u64 *) _nodeidp; get_fuse_inode(inode)->nodeid = nodeid; return 0; } struct inode *fuse_iget(struct super_block *sb, u64 nodeid, int generation, struct fuse_attr *attr, u64 attr_valid, u64 attr_version) { struct inode *inode; struct fuse_inode *fi; struct fuse_conn *fc = get_fuse_conn_super(sb); retry: inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid); if (!inode) return NULL; if ((inode->i_state & I_NEW)) { inode->i_flags |= S_NOATIME|S_NOCMTIME; inode->i_generation = generation; inode->i_data.backing_dev_info = &fc->bdi; fuse_init_inode(inode, attr); unlock_new_inode(inode); } else if ((inode->i_mode ^ attr->mode) & S_IFMT) { /* Inode has changed type, any I/O on the old should fail */ make_bad_inode(inode); iput(inode); goto retry; } fi = get_fuse_inode(inode); spin_lock(&fc->lock); fi->nlookup++; spin_unlock(&fc->lock); fuse_change_attributes(inode, attr, attr_valid, attr_version); return inode; } int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, loff_t offset, loff_t len) { struct inode *inode; pgoff_t pg_start; pgoff_t pg_end; inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid); if (!inode) return -ENOENT; fuse_invalidate_attr(inode); if (offset >= 0) { pg_start = offset >> PAGE_CACHE_SHIFT; if (len <= 0) pg_end = -1; else pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); } iput(inode); return 0; } static void fuse_umount_begin(struct super_block *sb) { fuse_abort_conn(get_fuse_conn_super(sb)); } static void fuse_send_destroy(struct fuse_conn *fc) { struct fuse_req *req = fc->destroy_req; if (req && fc->conn_init) { fc->destroy_req = NULL; req->in.h.opcode = FUSE_DESTROY; req->force = 1; fuse_request_send(fc, req); fuse_put_request(fc, req); } } static void fuse_bdi_destroy(struct fuse_conn *fc) { if (fc->bdi_initialized) bdi_destroy(&fc->bdi); } void fuse_conn_kill(struct fuse_conn *fc) { spin_lock(&fc->lock); fc->connected = 0; fc->blocked = 0; spin_unlock(&fc->lock); /* Flush all readers on this fs */ kill_fasync(&fc->fasync, SIGIO, POLL_IN); wake_up_all(&fc->waitq); wake_up_all(&fc->blocked_waitq); wake_up_all(&fc->reserved_req_waitq); mutex_lock(&fuse_mutex); list_del(&fc->entry); fuse_ctl_remove_conn(fc); mutex_unlock(&fuse_mutex); fuse_bdi_destroy(fc); } EXPORT_SYMBOL_GPL(fuse_conn_kill); static void fuse_put_super(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); fuse_send_destroy(fc); fuse_conn_kill(fc); fuse_conn_put(fc); } static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) { stbuf->f_type = FUSE_SUPER_MAGIC; stbuf->f_bsize = attr->bsize; stbuf->f_frsize = attr->frsize; stbuf->f_blocks = attr->blocks; stbuf->f_bfree = attr->bfree; stbuf->f_bavail = attr->bavail; stbuf->f_files = attr->files; stbuf->f_ffree = attr->ffree; stbuf->f_namelen = attr->namelen; /* fsid is left zero */ } static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct fuse_conn *fc = get_fuse_conn_super(sb); struct fuse_req *req; struct fuse_statfs_out outarg; int err; if (!fuse_allow_task(fc, current)) { buf->f_type = FUSE_SUPER_MAGIC; return 0; } req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); memset(&outarg, 0, sizeof(outarg)); req->in.numargs = 0; req->in.h.opcode = FUSE_STATFS; req->in.h.nodeid = get_node_id(dentry->d_inode); req->out.numargs = 1; req->out.args[0].size = fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; if (!err) convert_fuse_statfs(buf, &outarg.st); fuse_put_request(fc, req); return err; } enum { OPT_FD, OPT_ROOTMODE, OPT_USER_ID, OPT_GROUP_ID, OPT_DEFAULT_PERMISSIONS, OPT_ALLOW_OTHER, OPT_MAX_READ, OPT_BLKSIZE, OPT_ERR }; static const match_table_t tokens = { {OPT_FD, "fd=%u"}, {OPT_ROOTMODE, "rootmode=%o"}, {OPT_USER_ID, "user_id=%u"}, {OPT_GROUP_ID, "group_id=%u"}, {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, {OPT_ALLOW_OTHER, "allow_other"}, {OPT_MAX_READ, "max_read=%u"}, {OPT_BLKSIZE, "blksize=%u"}, {OPT_ERR, NULL} }; static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) { char *p; memset(d, 0, sizeof(struct fuse_mount_data)); d->max_read = ~0; d->blksize = FUSE_DEFAULT_BLKSIZE; while ((p = strsep(&opt, ",")) != NULL) { int token; int value; substring_t args[MAX_OPT_ARGS]; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case OPT_FD: if (match_int(&args[0], &value)) return 0; d->fd = value; d->fd_present = 1; break; case OPT_ROOTMODE: if (match_octal(&args[0], &value)) return 0; if (!fuse_valid_type(value)) return 0; d->rootmode = value; d->rootmode_present = 1; break; case OPT_USER_ID: if (match_int(&args[0], &value)) return 0; d->user_id = value; d->user_id_present = 1; break; case OPT_GROUP_ID: if (match_int(&args[0], &value)) return 0; d->group_id = value; d->group_id_present = 1; break; case OPT_DEFAULT_PERMISSIONS: d->flags |= FUSE_DEFAULT_PERMISSIONS; break; case OPT_ALLOW_OTHER: d->flags |= FUSE_ALLOW_OTHER; break; case OPT_MAX_READ: if (match_int(&args[0], &value)) return 0; d->max_read = value; break; case OPT_BLKSIZE: if (!is_bdev || match_int(&args[0], &value)) return 0; d->blksize = value; break; default: return 0; } } if (!d->fd_present || !d->rootmode_present || !d->user_id_present || !d->group_id_present) return 0; return 1; } static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt) { struct fuse_conn *fc = get_fuse_conn_super(mnt->mnt_sb); seq_printf(m, ",user_id=%u", fc->user_id); seq_printf(m, ",group_id=%u", fc->group_id); if (fc->flags & FUSE_DEFAULT_PERMISSIONS) seq_puts(m, ",default_permissions"); if (fc->flags & FUSE_ALLOW_OTHER) seq_puts(m, ",allow_other"); if (fc->max_read != ~0) seq_printf(m, ",max_read=%u", fc->max_read); if (mnt->mnt_sb->s_bdev && mnt->mnt_sb->s_blocksize != FUSE_DEFAULT_BLKSIZE) seq_printf(m, ",blksize=%lu", mnt->mnt_sb->s_blocksize); return 0; } void fuse_conn_init(struct fuse_conn *fc) { memset(fc, 0, sizeof(*fc)); spin_lock_init(&fc->lock); mutex_init(&fc->inst_mutex); init_rwsem(&fc->killsb); atomic_set(&fc->count, 1); init_waitqueue_head(&fc->waitq); init_waitqueue_head(&fc->blocked_waitq); init_waitqueue_head(&fc->reserved_req_waitq); INIT_LIST_HEAD(&fc->pending); INIT_LIST_HEAD(&fc->processing); INIT_LIST_HEAD(&fc->io); INIT_LIST_HEAD(&fc->interrupts); INIT_LIST_HEAD(&fc->bg_queue); INIT_LIST_HEAD(&fc->entry); fc->forget_list_tail = &fc->forget_list_head; atomic_set(&fc->num_waiting, 0); fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; fc->khctr = 0; fc->polled_files = RB_ROOT; fc->reqctr = 0; fc->blocked = 1; fc->attr_version = 1; get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); } EXPORT_SYMBOL_GPL(fuse_conn_init); void fuse_conn_put(struct fuse_conn *fc) { if (atomic_dec_and_test(&fc->count)) { if (fc->destroy_req) fuse_request_free(fc->destroy_req); mutex_destroy(&fc->inst_mutex); fc->release(fc); } } EXPORT_SYMBOL_GPL(fuse_conn_put); struct fuse_conn *fuse_conn_get(struct fuse_conn *fc) { atomic_inc(&fc->count); return fc; } EXPORT_SYMBOL_GPL(fuse_conn_get); static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) { struct fuse_attr attr; memset(&attr, 0, sizeof(attr)); attr.mode = mode; attr.ino = FUSE_ROOT_ID; attr.nlink = 1; return fuse_iget(sb, 1, 0, &attr, 0, 0); } struct fuse_inode_handle { u64 nodeid; u32 generation; }; static struct dentry *fuse_get_dentry(struct super_block *sb, struct fuse_inode_handle *handle) { struct fuse_conn *fc = get_fuse_conn_super(sb); struct inode *inode; struct dentry *entry; int err = -ESTALE; if (handle->nodeid == 0) goto out_err; inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid); if (!inode) { struct fuse_entry_out outarg; struct qstr name; if (!fc->export_support) goto out_err; name.len = 1; name.name = "."; err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg, &inode); if (err && err != -ENOENT) goto out_err; if (err || !inode) { err = -ESTALE; goto out_err; } err = -EIO; if (get_node_id(inode) != handle->nodeid) goto out_iput; } err = -ESTALE; if (inode->i_generation != handle->generation) goto out_iput; entry = d_obtain_alias(inode); if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(entry); return entry; out_iput: iput(inode); out_err: return ERR_PTR(err); } static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, int connectable) { struct inode *inode = dentry->d_inode; bool encode_parent = connectable && !S_ISDIR(inode->i_mode); int len = encode_parent ? 6 : 3; u64 nodeid; u32 generation; if (*max_len < len) { *max_len = len; return 255; } nodeid = get_fuse_inode(inode)->nodeid; generation = inode->i_generation; fh[0] = (u32)(nodeid >> 32); fh[1] = (u32)(nodeid & 0xffffffff); fh[2] = generation; if (encode_parent) { struct inode *parent; spin_lock(&dentry->d_lock); parent = dentry->d_parent->d_inode; nodeid = get_fuse_inode(parent)->nodeid; generation = parent->i_generation; spin_unlock(&dentry->d_lock); fh[3] = (u32)(nodeid >> 32); fh[4] = (u32)(nodeid & 0xffffffff); fh[5] = generation; } *max_len = len; return encode_parent ? 0x82 : 0x81; } static struct dentry *fuse_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct fuse_inode_handle handle; if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3) return NULL; handle.nodeid = (u64) fid->raw[0] << 32; handle.nodeid |= (u64) fid->raw[1]; handle.generation = fid->raw[2]; return fuse_get_dentry(sb, &handle); } static struct dentry *fuse_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct fuse_inode_handle parent; if (fh_type != 0x82 || fh_len < 6) return NULL; parent.nodeid = (u64) fid->raw[3] << 32; parent.nodeid |= (u64) fid->raw[4]; parent.generation = fid->raw[5]; return fuse_get_dentry(sb, &parent); } static struct dentry *fuse_get_parent(struct dentry *child) { struct inode *child_inode = child->d_inode; struct fuse_conn *fc = get_fuse_conn(child_inode); struct inode *inode; struct dentry *parent; struct fuse_entry_out outarg; struct qstr name; int err; if (!fc->export_support) return ERR_PTR(-ESTALE); name.len = 2; name.name = ".."; err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode), &name, &outarg, &inode); if (err) { if (err == -ENOENT) return ERR_PTR(-ESTALE); return ERR_PTR(err); } parent = d_obtain_alias(inode); if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(parent); return parent; } static const struct export_operations fuse_export_operations = { .fh_to_dentry = fuse_fh_to_dentry, .fh_to_parent = fuse_fh_to_parent, .encode_fh = fuse_encode_fh, .get_parent = fuse_get_parent, }; static const struct super_operations fuse_super_operations = { .alloc_inode = fuse_alloc_inode, .destroy_inode = fuse_destroy_inode, .evict_inode = fuse_evict_inode, .drop_inode = generic_delete_inode, .remount_fs = fuse_remount_fs, .put_super = fuse_put_super, .umount_begin = fuse_umount_begin, .statfs = fuse_statfs, .show_options = fuse_show_options, }; static void sanitize_global_limit(unsigned *limit) { if (*limit == 0) *limit = ((num_physpages << PAGE_SHIFT) >> 13) / sizeof(struct fuse_req); if (*limit >= 1 << 16) *limit = (1 << 16) - 1; } static int set_global_limit(const char *val, struct kernel_param *kp) { int rv; rv = param_set_uint(val, kp); if (rv) return rv; sanitize_global_limit((unsigned *)kp->arg); return 0; } static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg) { int cap_sys_admin = capable(CAP_SYS_ADMIN); if (arg->minor < 13) return; sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); if (arg->max_background) { fc->max_background = arg->max_background; if (!cap_sys_admin && fc->max_background > max_user_bgreq) fc->max_background = max_user_bgreq; } if (arg->congestion_threshold) { fc->congestion_threshold = arg->congestion_threshold; if (!cap_sys_admin && fc->congestion_threshold > max_user_congthresh) fc->congestion_threshold = max_user_congthresh; } } static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_init_out *arg = &req->misc.init_out; if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION) fc->conn_error = 1; else { unsigned long ra_pages; process_init_limits(fc, arg); if (arg->minor >= 6) { ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; if (arg->flags & FUSE_ASYNC_READ) fc->async_read = 1; if (!(arg->flags & FUSE_POSIX_LOCKS)) fc->no_lock = 1; if (arg->minor >= 17) { if (!(arg->flags & FUSE_FLOCK_LOCKS)) fc->no_flock = 1; } else { if (!(arg->flags & FUSE_POSIX_LOCKS)) fc->no_flock = 1; } if (arg->flags & FUSE_ATOMIC_O_TRUNC) fc->atomic_o_trunc = 1; if (arg->minor >= 9) { /* LOOKUP has dependency on proto version */ if (arg->flags & FUSE_EXPORT_SUPPORT) fc->export_support = 1; } if (arg->flags & FUSE_BIG_WRITES) fc->big_writes = 1; if (arg->flags & FUSE_DONT_MASK) fc->dont_mask = 1; } else { ra_pages = fc->max_read / PAGE_CACHE_SIZE; fc->no_lock = 1; fc->no_flock = 1; } fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); fc->minor = arg->minor; fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; fc->max_write = max_t(unsigned, 4096, fc->max_write); fc->conn_init = 1; } fc->blocked = 0; wake_up_all(&fc->blocked_waitq); } static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_init_in *arg = &req->misc.init_in; arg->major = FUSE_KERNEL_VERSION; arg->minor = FUSE_KERNEL_MINOR_VERSION; arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | FUSE_FLOCK_LOCKS; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); req->in.args[0].value = arg; req->out.numargs = 1; /* Variable length argument used for backward compatibility with interface version < 7.5. Rest of init_out is zeroed by do_get_request(), so a short reply is not a problem */ req->out.argvar = 1; req->out.args[0].size = sizeof(struct fuse_init_out); req->out.args[0].value = &req->misc.init_out; req->end = process_init_reply; fuse_request_send_background(fc, req); } static void fuse_free_conn(struct fuse_conn *fc) { kfree(fc); } static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) { int err; fc->bdi.name = "fuse"; fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; /* fuse does it's own writeback accounting */ fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; err = bdi_init(&fc->bdi); if (err) return err; fc->bdi_initialized = 1; if (sb->s_bdev) { err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", MAJOR(fc->dev), MINOR(fc->dev)); } else { err = bdi_register_dev(&fc->bdi, fc->dev); } if (err) return err; /* * For a single fuse filesystem use max 1% of dirty + * writeback threshold. * * This gives about 1M of write buffer for memory maps on a * machine with 1G and 10% dirty_ratio, which should be more * than enough. * * Privileged users can raise it by writing to * * /sys/class/bdi/<bdi>/max_ratio */ bdi_set_max_ratio(&fc->bdi, 1); return 0; } static int fuse_fill_super(struct super_block *sb, void *data, int silent) { struct fuse_conn *fc; struct inode *root; struct fuse_mount_data d; struct file *file; struct dentry *root_dentry; struct fuse_req *init_req; int err; int is_bdev = sb->s_bdev != NULL; err = -EINVAL; if (sb->s_flags & MS_MANDLOCK) goto err; sb->s_flags &= ~MS_NOSEC; if (!parse_fuse_opt((char *) data, &d, is_bdev)) goto err; if (is_bdev) { #ifdef CONFIG_BLOCK err = -EINVAL; if (!sb_set_blocksize(sb, d.blksize)) goto err; #endif } else { sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; } sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_export_op = &fuse_export_operations; file = fget(d.fd); err = -EINVAL; if (!file) goto err; if (file->f_op != &fuse_dev_operations) goto err_fput; fc = kmalloc(sizeof(*fc), GFP_KERNEL); err = -ENOMEM; if (!fc) goto err_fput; fuse_conn_init(fc); fc->dev = sb->s_dev; fc->sb = sb; err = fuse_bdi_init(fc, sb); if (err) goto err_put_conn; sb->s_bdi = &fc->bdi; /* Handle umasking inside the fuse code */ if (sb->s_flags & MS_POSIXACL) fc->dont_mask = 1; sb->s_flags |= MS_POSIXACL; fc->release = fuse_free_conn; fc->flags = d.flags; fc->user_id = d.user_id; fc->group_id = d.group_id; fc->max_read = max_t(unsigned, 4096, d.max_read); /* Used by get_root_inode() */ sb->s_fs_info = fc; err = -ENOMEM; root = fuse_get_root_inode(sb, d.rootmode); if (!root) goto err_put_conn; root_dentry = d_alloc_root(root); if (!root_dentry) { iput(root); goto err_put_conn; } /* only now - we want root dentry with NULL ->d_op */ sb->s_d_op = &fuse_dentry_operations; init_req = fuse_request_alloc(); if (!init_req) goto err_put_root; if (is_bdev) { fc->destroy_req = fuse_request_alloc(); if (!fc->destroy_req) goto err_free_init_req; } mutex_lock(&fuse_mutex); err = -EINVAL; if (file->private_data) goto err_unlock; err = fuse_ctl_add_conn(fc); if (err) goto err_unlock; list_add_tail(&fc->entry, &fuse_conn_list); sb->s_root = root_dentry; fc->connected = 1; file->private_data = fuse_conn_get(fc); mutex_unlock(&fuse_mutex); /* * atomic_dec_and_test() in fput() provides the necessary * memory barrier for file->private_data to be visible on all * CPUs after this */ fput(file); fuse_send_init(fc, init_req); return 0; err_unlock: mutex_unlock(&fuse_mutex); err_free_init_req: fuse_request_free(init_req); err_put_root: dput(root_dentry); err_put_conn: fuse_bdi_destroy(fc); fuse_conn_put(fc); err_fput: fput(file); err: return err; } static struct dentry *fuse_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { return mount_nodev(fs_type, flags, raw_data, fuse_fill_super); } static void fuse_kill_sb_anon(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc) { down_write(&fc->killsb); fc->sb = NULL; up_write(&fc->killsb); } kill_anon_super(sb); } static struct file_system_type fuse_fs_type = { .owner = THIS_MODULE, .name = "fuse", .fs_flags = FS_HAS_SUBTYPE, .mount = fuse_mount, .kill_sb = fuse_kill_sb_anon, }; #ifdef CONFIG_BLOCK static struct dentry *fuse_mount_blk(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { return mount_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super); } static void fuse_kill_sb_blk(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc) { down_write(&fc->killsb); fc->sb = NULL; up_write(&fc->killsb); } kill_block_super(sb); } static struct file_system_type fuseblk_fs_type = { .owner = THIS_MODULE, .name = "fuseblk", .mount = fuse_mount_blk, .kill_sb = fuse_kill_sb_blk, .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, }; static inline int register_fuseblk(void) { return register_filesystem(&fuseblk_fs_type); } static inline void unregister_fuseblk(void) { unregister_filesystem(&fuseblk_fs_type); } #else static inline int register_fuseblk(void) { return 0; } static inline void unregister_fuseblk(void) { } #endif static void fuse_inode_init_once(void *foo) { struct inode *inode = foo; inode_init_once(inode); } static int __init fuse_fs_init(void) { int err; fuse_inode_cachep = kmem_cache_create("fuse_inode", sizeof(struct fuse_inode), 0, SLAB_HWCACHE_ALIGN, fuse_inode_init_once); err = -ENOMEM; if (!fuse_inode_cachep) goto out; err = register_fuseblk(); if (err) goto out2; err = register_filesystem(&fuse_fs_type); if (err) goto out3; return 0; out3: unregister_fuseblk(); out2: kmem_cache_destroy(fuse_inode_cachep); out: return err; } static void fuse_fs_cleanup(void) { unregister_filesystem(&fuse_fs_type); unregister_fuseblk(); kmem_cache_destroy(fuse_inode_cachep); } static struct kobject *fuse_kobj; static struct kobject *connections_kobj; static int fuse_sysfs_init(void) { int err; fuse_kobj = kobject_create_and_add("fuse", fs_kobj); if (!fuse_kobj) { err = -ENOMEM; goto out_err; } connections_kobj = kobject_create_and_add("connections", fuse_kobj); if (!connections_kobj) { err = -ENOMEM; goto out_fuse_unregister; } return 0; out_fuse_unregister: kobject_put(fuse_kobj); out_err: return err; } static void fuse_sysfs_cleanup(void) { kobject_put(connections_kobj); kobject_put(fuse_kobj); } static int __init fuse_init(void) { int res; printk(KERN_INFO "fuse init (API version %i.%i)\n", FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION); INIT_LIST_HEAD(&fuse_conn_list); res = fuse_fs_init(); if (res) goto err; res = fuse_dev_init(); if (res) goto err_fs_cleanup; res = fuse_sysfs_init(); if (res) goto err_dev_cleanup; res = fuse_ctl_init(); if (res) goto err_sysfs_cleanup; sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); return 0; err_sysfs_cleanup: fuse_sysfs_cleanup(); err_dev_cleanup: fuse_dev_cleanup(); err_fs_cleanup: fuse_fs_cleanup(); err: return res; } static void __exit fuse_exit(void) { printk(KERN_DEBUG "fuse exit\n"); fuse_ctl_cleanup(); fuse_sysfs_cleanup(); fuse_fs_cleanup(); fuse_dev_cleanup(); } module_init(fuse_init); module_exit(fuse_exit);
gpl-2.0
vyacht/carambola2
target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesa.c
593
111766
/******************************************************************************* Copyright (C) Marvell International Ltd. and its affiliates This software file (the "File") is owned and distributed by Marvell International Ltd. and/or its affiliates ("Marvell") under the following alternative licensing terms. Once you have made an election to distribute the File under one of the following license alternatives, please (i) delete this introductory statement regarding license alternatives, (ii) delete the two license alternatives that you have not elected to use and (iii) preserve the Marvell copyright notice above. ******************************************************************************** Marvell Commercial License Option If you received this File from Marvell and you have entered into a commercial license agreement (a "Commercial License") with Marvell, the File is licensed to you under the terms of the applicable Commercial License. ******************************************************************************** Marvell GPL License Option If you received this File from Marvell, you may opt to use, redistribute and/or modify this File in accordance with the terms and conditions of the General Public License Version 2, June 1991 (the "GPL License"), a copy of which is available along with the File in the license.txt file or by writing to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or on the worldwide web at http://www.gnu.org/licenses/gpl.txt. THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY DISCLAIMED. The GPL License provides additional details about this warranty disclaimer. ******************************************************************************** Marvell BSD License Option If you received this File from Marvell, you may opt to use, redistribute and/or modify this File under the following licensing terms. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Marvell nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *******************************************************************************/ #include "cesa/mvCesa.h" #include "ctrlEnv/mvCtrlEnvLib.h" #undef CESA_DEBUG /********** Global variables **********/ /* If request size is more than MV_CESA_MAX_BUF_SIZE the * request is processed as fragmented request. */ MV_CESA_STATS cesaStats; MV_BUF_INFO cesaSramSaBuf; short cesaLastSid = -1; MV_CESA_SA* pCesaSAD = NULL; MV_U16 cesaMaxSA = 0; MV_CESA_REQ* pCesaReqFirst = NULL; MV_CESA_REQ* pCesaReqLast = NULL; MV_CESA_REQ* pCesaReqEmpty = NULL; MV_CESA_REQ* pCesaReqProcess = NULL; int cesaQueueDepth = 0; int cesaReqResources = 0; MV_CESA_SRAM_MAP* cesaSramVirtPtr = NULL; MV_U32 cesaCryptEngBase = 0; void *cesaOsHandle = NULL; #if (MV_CESA_VERSION >= 3) MV_U32 cesaChainLength = 0; int chainReqNum = 0; MV_U32 chainIndex = 0; MV_CESA_REQ* pNextActiveChain = 0; MV_CESA_REQ* pEndCurrChain = 0; MV_BOOL isFirstReq = MV_TRUE; #endif static INLINE MV_U8* mvCesaSramAddrGet(void) { #ifdef MV_CESA_NO_SRAM return (MV_U8*)cesaSramVirtPtr; #else return (MV_U8*)cesaCryptEngBase; #endif /* MV_CESA_NO_SRAM */ } static INLINE MV_ULONG mvCesaSramVirtToPhys(void* pDev, MV_U8* pSramVirt) { #ifdef MV_CESA_NO_SRAM return (MV_ULONG)mvOsIoVirtToPhy(NULL, pSramVirt); #else return (MV_ULONG)pSramVirt; #endif /* MV_CESA_NO_SRAM */ } /* Internal Function prototypes */ static INLINE void mvCesaSramDescrBuild(MV_U32 config, int frag, int cryptoOffset, int ivOffset, int cryptoLength, int macOffset, int digestOffset, int macLength, int macTotalLen, MV_CESA_REQ *pCesaReq, MV_DMA_DESC* pDmaDesc); static INLINE void mvCesaSramSaUpdate(short sid, MV_DMA_DESC *pDmaDesc); static INLINE int mvCesaDmaCopyPrepare(MV_CESA_MBUF* pMbuf, MV_U8* pSramBuf, MV_DMA_DESC* pDmaDesc, MV_BOOL isToMbuf, int offset, int copySize, MV_BOOL skipFlush); static void mvCesaHmacIvGet(MV_CESA_MAC_MODE macMode, unsigned char key[], int keyLength, unsigned char innerIV[], unsigned char outerIV[]); static MV_STATUS mvCesaFragAuthComplete(MV_CESA_REQ* pReq, MV_CESA_SA* pSA, int macDataSize); static MV_CESA_COMMAND* mvCesaCtrModeInit(void); static MV_STATUS mvCesaCtrModePrepare(MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd); static MV_STATUS mvCesaCtrModeComplete(MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd); static void mvCesaCtrModeFinish(MV_CESA_COMMAND *pCmd); static INLINE MV_STATUS mvCesaReqProcess(MV_CESA_REQ* pReq); static MV_STATUS mvCesaFragReqProcess(MV_CESA_REQ* pReq, MV_U8 frag); static INLINE MV_STATUS mvCesaParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd, MV_U8* pFixOffset); static INLINE MV_STATUS mvCesaFragParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd); static INLINE void mvCesaFragSizeFind(MV_CESA_SA* pSA, MV_CESA_REQ* pReq, int cryptoOffset, int macOffset, int* pCopySize, int* pCryptoDataSize, int* pMacDataSize); static MV_STATUS mvCesaMbufCacheUnmap(MV_CESA_MBUF* pMbuf, int offset, int size); /* Go to the next request in the request queue */ static INLINE MV_CESA_REQ* MV_CESA_REQ_NEXT_PTR(MV_CESA_REQ* pReq) { if(pReq == pCesaReqLast) return pCesaReqFirst; return pReq+1; } #if (MV_CESA_VERSION >= 3) /* Go to the previous request in the request queue */ static INLINE MV_CESA_REQ* MV_CESA_REQ_PREV_PTR(MV_CESA_REQ* pReq) { if(pReq == pCesaReqFirst) return pCesaReqLast; return pReq-1; } #endif static INLINE void mvCesaReqProcessStart(MV_CESA_REQ* pReq) { int frag; #if (MV_CESA_VERSION >= 3) pReq->state = MV_CESA_CHAIN; #else pReq->state = MV_CESA_PROCESS; #endif cesaStats.startCount++; if(pReq->fragMode == MV_CESA_FRAG_NONE) { frag = 0; } else { frag = pReq->frags.nextFrag; pReq->frags.nextFrag++; } #if (MV_CESA_VERSION >= 2) /* Enable TDMA engine */ MV_REG_WRITE(MV_CESA_TDMA_CURR_DESC_PTR_REG, 0); MV_REG_WRITE(MV_CESA_TDMA_NEXT_DESC_PTR_REG, (MV_U32)mvCesaVirtToPhys(&pReq->dmaDescBuf, pReq->dma[frag].pDmaFirst)); #else /* Enable IDMA engine */ MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(0), 0); MV_REG_WRITE(IDMA_NEXT_DESC_PTR_REG(0), (MV_U32)mvCesaVirtToPhys(&pReq->dmaDescBuf, pReq->dma[frag].pDmaFirst)); #endif /* MV_CESA_VERSION >= 2 */ #if defined(MV_BRIDGE_SYNC_REORDER) mvOsBridgeReorderWA(); #endif /* Start Accelerator */ MV_REG_WRITE(MV_CESA_CMD_REG, MV_CESA_CMD_CHAN_ENABLE_MASK); } /******************************************************************************* * mvCesaHalInit - Initialize the CESA driver * * DESCRIPTION: * This function initialize the CESA driver. * 1) Session database * 2) Request queue * 4) DMA descriptor lists - one list per request. Each list * has MV_CESA_MAX_DMA_DESC descriptors. * * INPUT: * numOfSession - maximum number of supported sessions * queueDepth - number of elements in the request queue. * pSramBase - virtual address of Sram * osHandle - A handle used by the OS to allocate memory for the * module (Passed to the OS Services layer) * * RETURN: * MV_OK - Success * MV_NO_RESOURCE - Fail, can't allocate resources: * Session database, request queue, * DMA descriptors list, LRU cache database. * MV_NOT_ALIGNED - Sram base address is not 8 byte aligned. * *******************************************************************************/ MV_STATUS mvCesaHalInit (int numOfSession, int queueDepth, char* pSramBase, MV_U32 cryptEngBase, void *osHandle) { int i, req; MV_U32 descOffsetReg, configReg; MV_CESA_SRAM_SA *pSramSA; mvOsPrintf("mvCesaInit: sessions=%d, queue=%d, pSram=%p\n", numOfSession, queueDepth, pSramBase); cesaOsHandle = osHandle; /* Create Session database */ pCesaSAD = mvOsMalloc(sizeof(MV_CESA_SA)*numOfSession); if(pCesaSAD == NULL) { mvOsPrintf("mvCesaInit: Can't allocate %u bytes for %d SAs\n", sizeof(MV_CESA_SA)*numOfSession, numOfSession); mvCesaFinish(); return MV_NO_RESOURCE; } memset(pCesaSAD, 0, sizeof(MV_CESA_SA)*numOfSession); cesaMaxSA = numOfSession; /* Allocate imag of sramSA in the DRAM */ cesaSramSaBuf.bufSize = sizeof(MV_CESA_SRAM_SA)*numOfSession + CPU_D_CACHE_LINE_SIZE; cesaSramSaBuf.bufVirtPtr = mvOsIoCachedMalloc(osHandle,cesaSramSaBuf.bufSize, &cesaSramSaBuf.bufPhysAddr, &cesaSramSaBuf.memHandle); if(cesaSramSaBuf.bufVirtPtr == NULL) { mvOsPrintf("mvCesaInit: Can't allocate %d bytes for sramSA structures\n", cesaSramSaBuf.bufSize); mvCesaFinish(); return MV_NO_RESOURCE; } memset(cesaSramSaBuf.bufVirtPtr, 0, cesaSramSaBuf.bufSize); pSramSA = (MV_CESA_SRAM_SA*)MV_ALIGN_UP((MV_ULONG)cesaSramSaBuf.bufVirtPtr, CPU_D_CACHE_LINE_SIZE); for(i=0; i<numOfSession; i++) { pCesaSAD[i].pSramSA = &pSramSA[i]; } /* Create request queue */ pCesaReqFirst = mvOsMalloc(sizeof(MV_CESA_REQ)*queueDepth); if(pCesaReqFirst == NULL) { mvOsPrintf("mvCesaInit: Can't allocate %u bytes for %d requests\n", sizeof(MV_CESA_REQ)*queueDepth, queueDepth); mvCesaFinish(); return MV_NO_RESOURCE; } memset(pCesaReqFirst, 0, sizeof(MV_CESA_REQ)*queueDepth); pCesaReqEmpty = pCesaReqFirst; pCesaReqLast = pCesaReqFirst + (queueDepth-1); pCesaReqProcess = pCesaReqEmpty; cesaQueueDepth = queueDepth; cesaReqResources = queueDepth; #if (MV_CESA_VERSION >= 3) cesaChainLength = MAX_CESA_CHAIN_LENGTH; #endif /* pSramBase must be 8 byte aligned */ if( MV_IS_NOT_ALIGN((MV_ULONG)pSramBase, 8) ) { mvOsPrintf("mvCesaInit: pSramBase (%p) must be 8 byte aligned\n", pSramBase); mvCesaFinish(); return MV_NOT_ALIGNED; } cesaSramVirtPtr = (MV_CESA_SRAM_MAP*)pSramBase; cesaCryptEngBase = cryptEngBase; /*memset(cesaSramVirtPtr, 0, sizeof(MV_CESA_SRAM_MAP));*/ /* Clear registers */ MV_REG_WRITE( MV_CESA_CFG_REG, 0); MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0); MV_REG_WRITE( MV_CESA_ISR_MASK_REG, 0); /* Initialize DMA descriptor lists for all requests in Request queue */ descOffsetReg = configReg = 0; for(req=0; req<queueDepth; req++) { int frag; MV_CESA_REQ* pReq; MV_DMA_DESC* pDmaDesc; pReq = &pCesaReqFirst[req]; pReq->cesaDescBuf.bufSize = sizeof(MV_CESA_DESC)*MV_CESA_MAX_REQ_FRAGS + CPU_D_CACHE_LINE_SIZE; pReq->cesaDescBuf.bufVirtPtr = mvOsIoCachedMalloc(osHandle,pReq->cesaDescBuf.bufSize, &pReq->cesaDescBuf.bufPhysAddr, &pReq->cesaDescBuf.memHandle); if(pReq->cesaDescBuf.bufVirtPtr == NULL) { mvOsPrintf("mvCesaInit: req=%d, Can't allocate %d bytes for CESA descriptors\n", req, pReq->cesaDescBuf.bufSize); mvCesaFinish(); return MV_NO_RESOURCE; } memset(pReq->cesaDescBuf.bufVirtPtr, 0, pReq->cesaDescBuf.bufSize); pReq->pCesaDesc = (MV_CESA_DESC*)MV_ALIGN_UP((MV_ULONG)pReq->cesaDescBuf.bufVirtPtr, CPU_D_CACHE_LINE_SIZE); pReq->dmaDescBuf.bufSize = sizeof(MV_DMA_DESC)*MV_CESA_MAX_DMA_DESC*MV_CESA_MAX_REQ_FRAGS + CPU_D_CACHE_LINE_SIZE; pReq->dmaDescBuf.bufVirtPtr = mvOsIoCachedMalloc(osHandle,pReq->dmaDescBuf.bufSize, &pReq->dmaDescBuf.bufPhysAddr, &pReq->dmaDescBuf.memHandle); if(pReq->dmaDescBuf.bufVirtPtr == NULL) { mvOsPrintf("mvCesaInit: req=%d, Can't allocate %d bytes for DMA descriptor list\n", req, pReq->dmaDescBuf.bufSize); mvCesaFinish(); return MV_NO_RESOURCE; } memset(pReq->dmaDescBuf.bufVirtPtr, 0, pReq->dmaDescBuf.bufSize); pDmaDesc = (MV_DMA_DESC*)MV_ALIGN_UP((MV_ULONG)pReq->dmaDescBuf.bufVirtPtr, CPU_D_CACHE_LINE_SIZE); for(frag=0; frag<MV_CESA_MAX_REQ_FRAGS; frag++) { MV_CESA_DMA* pDma = &pReq->dma[frag]; pDma->pDmaFirst = pDmaDesc; pDma->pDmaLast = NULL; for(i=0; i<MV_CESA_MAX_DMA_DESC-1; i++) { /* link all DMA descriptors together */ pDma->pDmaFirst[i].phyNextDescPtr = MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pDmaDesc[i+1])); } pDma->pDmaFirst[i].phyNextDescPtr = 0; mvOsCacheFlush(NULL, &pDma->pDmaFirst[0], MV_CESA_MAX_DMA_DESC*sizeof(MV_DMA_DESC)); pDmaDesc += MV_CESA_MAX_DMA_DESC; } } /*mvCesaCryptoIvSet(NULL, MV_CESA_MAX_IV_LENGTH);*/ descOffsetReg = (MV_U16)((MV_U8*)&cesaSramVirtPtr->desc - mvCesaSramAddrGet()); MV_REG_WRITE(MV_CESA_CHAN_DESC_OFFSET_REG, descOffsetReg); configReg |= (MV_CESA_CFG_WAIT_DMA_MASK | MV_CESA_CFG_ACT_DMA_MASK); #if (MV_CESA_VERSION >= 3) configReg |= MV_CESA_CFG_CHAIN_MODE_MASK; #endif #if (MV_CESA_VERSION >= 2) /* Initialize TDMA engine */ MV_REG_WRITE(MV_CESA_TDMA_CTRL_REG, MV_CESA_TDMA_CTRL_VALUE); MV_REG_WRITE(MV_CESA_TDMA_BYTE_COUNT_REG, 0); MV_REG_WRITE(MV_CESA_TDMA_CURR_DESC_PTR_REG, 0); #else /* Initialize IDMA #0 engine */ MV_REG_WRITE(IDMA_CTRL_LOW_REG(0), 0); MV_REG_WRITE(IDMA_BYTE_COUNT_REG(0), 0); MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(0), 0); MV_REG_WRITE(IDMA_CTRL_HIGH_REG(0), ICCHR_ENDIAN_LITTLE #ifdef MV_CPU_LE | ICCHR_DESC_BYTE_SWAP_EN #endif ); /* Clear Cause Byte of IDMA channel to be used */ MV_REG_WRITE( IDMA_CAUSE_REG, ~ICICR_CAUSE_MASK_ALL(0)); MV_REG_WRITE(IDMA_CTRL_LOW_REG(0), MV_CESA_IDMA_CTRL_LOW_VALUE); #endif /* (MV_CESA_VERSION >= 2) */ /* Set CESA configuration registers */ MV_REG_WRITE( MV_CESA_CFG_REG, configReg); mvCesaDebugStatsClear(); return MV_OK; } /******************************************************************************* * mvCesaFinish - Shutdown the CESA driver * * DESCRIPTION: * This function shutdown the CESA driver and free all allocted resources. * * INPUT: None * * RETURN: * MV_OK - Success * Other - Fail * *******************************************************************************/ MV_STATUS mvCesaFinish (void) { int req; MV_CESA_REQ* pReq; mvOsPrintf("mvCesaFinish: \n"); cesaSramVirtPtr = NULL; /* Free all resources: DMA list, etc. */ for(req=0; req<cesaQueueDepth; req++) { pReq = &pCesaReqFirst[req]; if(pReq->dmaDescBuf.bufVirtPtr != NULL) { mvOsIoCachedFree(cesaOsHandle,pReq->dmaDescBuf.bufSize, pReq->dmaDescBuf.bufPhysAddr, pReq->dmaDescBuf.bufVirtPtr, pReq->dmaDescBuf.memHandle); } if(pReq->cesaDescBuf.bufVirtPtr != NULL) { mvOsIoCachedFree(cesaOsHandle,pReq->cesaDescBuf.bufSize, pReq->cesaDescBuf.bufPhysAddr, pReq->cesaDescBuf.bufVirtPtr, pReq->cesaDescBuf.memHandle); } } #if (MV_CESA_VERSION < 2) MV_REG_WRITE(IDMA_CTRL_LOW_REG(0), 0); #endif /* (MV_CESA_VERSION < 2) */ /* Free request queue */ if(pCesaReqFirst != NULL) { mvOsFree(pCesaReqFirst); pCesaReqFirst = pCesaReqLast = NULL; pCesaReqEmpty = pCesaReqProcess = NULL; cesaQueueDepth = cesaReqResources = 0; } /* Free SA database */ if(pCesaSAD != NULL) { mvOsFree(pCesaSAD); pCesaSAD = NULL; cesaMaxSA = 0; } MV_REG_WRITE( MV_CESA_CFG_REG, 0); MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0); MV_REG_WRITE( MV_CESA_ISR_MASK_REG, 0); return MV_OK; } /******************************************************************************* * mvCesaCryptoIvSet - Set IV value for Crypto algorithm working in CBC mode * * DESCRIPTION: * This function set IV value using by Crypto algorithms in CBC mode. * Each channel has its own IV value. * This function gets IV value from the caller. If no IV value passed from * the caller or only part of IV passed, the function will init the rest part * of IV value (or the whole IV) by random value. * * INPUT: * MV_U8* pIV - Pointer to IV value supplied by user. If pIV==NULL * the function will generate random IV value. * int ivSize - size (in bytes) of IV provided by user. If ivSize is * smaller than maximum IV size, the function will complete * IV by random value. * * RETURN: * MV_OK - Success * Other - Fail * *******************************************************************************/ MV_STATUS mvCesaCryptoIvSet(MV_U8* pIV, int ivSize) { MV_U8* pSramIV; #if defined(MV646xx) mvOsPrintf("mvCesaCryptoIvSet: ERR. shouldn't use this call on MV64660\n"); #endif pSramIV = cesaSramVirtPtr->cryptoIV; if(ivSize > MV_CESA_MAX_IV_LENGTH) { mvOsPrintf("mvCesaCryptoIvSet: ivSize (%d) is too large\n", ivSize); ivSize = MV_CESA_MAX_IV_LENGTH; } if(pIV != NULL) { memcpy(pSramIV, pIV, ivSize); ivSize = MV_CESA_MAX_IV_LENGTH - ivSize; pSramIV += ivSize; } while(ivSize > 0) { int size, mv_random = mvOsRand(); size = MV_MIN(ivSize, sizeof(mv_random)); memcpy(pSramIV, (void*)&mv_random, size); pSramIV += size; ivSize -= size; } /* mvOsCacheFlush(NULL, cesaSramVirtPtr->cryptoIV, MV_CESA_MAX_IV_LENGTH); mvOsCacheInvalidate(NULL, cesaSramVirtPtr->cryptoIV, MV_CESA_MAX_IV_LENGTH); */ return MV_OK; } /******************************************************************************* * mvCesaSessionOpen - Open new uni-directional crypto session * * DESCRIPTION: * This function open new session. * * INPUT: * MV_CESA_OPEN_SESSION *pSession - pointer to new session input parameters * * OUTPUT: * short *pSid - session ID, should be used for all future * requests over this session. * * RETURN: * MV_OK - Session opend successfully. * MV_FULL - All sessions are in use, no free place in * SA database. * MV_BAD_PARAM - One of session input parameters is invalid. * *******************************************************************************/ MV_STATUS mvCesaSessionOpen(MV_CESA_OPEN_SESSION *pSession, short* pSid) { short sid; MV_U32 config = 0; int digestSize; cesaStats.openedCount++; /* Find free entry in SAD */ for(sid=0; sid<cesaMaxSA; sid++) { if(pCesaSAD[sid].valid == 0) { break; } } if(sid == cesaMaxSA) { mvOsPrintf("mvCesaSessionOpen: SA Database is FULL\n"); return MV_FULL; } /* Check Input parameters for Open session */ if (pSession->operation >= MV_CESA_MAX_OPERATION) { mvOsPrintf("mvCesaSessionOpen: Unexpected operation %d\n", pSession->operation); return MV_BAD_PARAM; } config |= (pSession->operation << MV_CESA_OPERATION_OFFSET); if( (pSession->direction != MV_CESA_DIR_ENCODE) && (pSession->direction != MV_CESA_DIR_DECODE) ) { mvOsPrintf("mvCesaSessionOpen: Unexpected direction %d\n", pSession->direction); return MV_BAD_PARAM; } config |= (pSession->direction << MV_CESA_DIRECTION_BIT); /* Clear SA entry */ /* memset(&pCesaSAD[sid], 0, sizeof(pCesaSAD[sid])); */ /* Check AUTH parameters and update SA entry */ if(pSession->operation != MV_CESA_CRYPTO_ONLY) { /* For HMAC (MD5 and SHA1) - Maximum Key size is 64 bytes */ if( (pSession->macMode == MV_CESA_MAC_HMAC_MD5) || (pSession->macMode == MV_CESA_MAC_HMAC_SHA1) ) { if(pSession->macKeyLength > MV_CESA_MAX_MAC_KEY_LENGTH) { mvOsPrintf("mvCesaSessionOpen: macKeyLength %d is too large\n", pSession->macKeyLength); return MV_BAD_PARAM; } mvCesaHmacIvGet(pSession->macMode, pSession->macKey, pSession->macKeyLength, pCesaSAD[sid].pSramSA->macInnerIV, pCesaSAD[sid].pSramSA->macOuterIV); pCesaSAD[sid].macKeyLength = pSession->macKeyLength; } switch(pSession->macMode) { case MV_CESA_MAC_MD5: case MV_CESA_MAC_HMAC_MD5: digestSize = MV_CESA_MD5_DIGEST_SIZE; break; case MV_CESA_MAC_SHA1: case MV_CESA_MAC_HMAC_SHA1: digestSize = MV_CESA_SHA1_DIGEST_SIZE; break; default: mvOsPrintf("mvCesaSessionOpen: Unexpected macMode %d\n", pSession->macMode); return MV_BAD_PARAM; } config |= (pSession->macMode << MV_CESA_MAC_MODE_OFFSET); /* Supported digest sizes: MD5 - 16 bytes (128 bits), */ /* SHA1 - 20 bytes (160 bits) or 12 bytes (96 bits) for both */ if( (pSession->digestSize != digestSize) && (pSession->digestSize != 12)) { mvOsPrintf("mvCesaSessionOpen: Unexpected digest size %d\n", pSession->digestSize); mvOsPrintf("\t Valid values [bytes]: MD5-16, SHA1-20, Both-12\n"); return MV_BAD_PARAM; } pCesaSAD[sid].digestSize = pSession->digestSize; if(pCesaSAD[sid].digestSize == 12) { /* Set MV_CESA_MAC_DIGEST_SIZE_BIT if digest size is 96 bits */ config |= (MV_CESA_MAC_DIGEST_96B << MV_CESA_MAC_DIGEST_SIZE_BIT); } } /* Check CRYPTO parameters and update SA entry */ if(pSession->operation != MV_CESA_MAC_ONLY) { switch(pSession->cryptoAlgorithm) { case MV_CESA_CRYPTO_DES: pCesaSAD[sid].cryptoKeyLength = MV_CESA_DES_KEY_LENGTH; pCesaSAD[sid].cryptoBlockSize = MV_CESA_DES_BLOCK_SIZE; break; case MV_CESA_CRYPTO_3DES: pCesaSAD[sid].cryptoKeyLength = MV_CESA_3DES_KEY_LENGTH; pCesaSAD[sid].cryptoBlockSize = MV_CESA_DES_BLOCK_SIZE; /* Only EDE mode is supported */ config |= (MV_CESA_CRYPTO_3DES_EDE << MV_CESA_CRYPTO_3DES_MODE_BIT); break; case MV_CESA_CRYPTO_AES: switch(pSession->cryptoKeyLength) { case 16: pCesaSAD[sid].cryptoKeyLength = MV_CESA_AES_128_KEY_LENGTH; config |= (MV_CESA_CRYPTO_AES_KEY_128 << MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET); break; case 24: pCesaSAD[sid].cryptoKeyLength = MV_CESA_AES_192_KEY_LENGTH; config |= (MV_CESA_CRYPTO_AES_KEY_192 << MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET); break; case 32: default: pCesaSAD[sid].cryptoKeyLength = MV_CESA_AES_256_KEY_LENGTH; config |= (MV_CESA_CRYPTO_AES_KEY_256 << MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET); break; } pCesaSAD[sid].cryptoBlockSize = MV_CESA_AES_BLOCK_SIZE; break; default: mvOsPrintf("mvCesaSessionOpen: Unexpected cryptoAlgorithm %d\n", pSession->cryptoAlgorithm); return MV_BAD_PARAM; } config |= (pSession->cryptoAlgorithm << MV_CESA_CRYPTO_ALG_OFFSET); if(pSession->cryptoKeyLength != pCesaSAD[sid].cryptoKeyLength) { mvOsPrintf("cesaSessionOpen: Wrong CryptoKeySize %d != %d\n", pSession->cryptoKeyLength, pCesaSAD[sid].cryptoKeyLength); return MV_BAD_PARAM; } /* Copy Crypto key */ if( (pSession->cryptoAlgorithm == MV_CESA_CRYPTO_AES) && (pSession->direction == MV_CESA_DIR_DECODE)) { /* Crypto Key for AES decode is computed from original key material */ /* and depend on cryptoKeyLength (128/192/256 bits) */ aesMakeKey(pCesaSAD[sid].pSramSA->cryptoKey, pSession->cryptoKey, pSession->cryptoKeyLength*8, MV_CESA_AES_BLOCK_SIZE*8); } else { /*panic("mvCesaSessionOpen2");*/ memcpy(pCesaSAD[sid].pSramSA->cryptoKey, pSession->cryptoKey, pCesaSAD[sid].cryptoKeyLength); } switch(pSession->cryptoMode) { case MV_CESA_CRYPTO_ECB: pCesaSAD[sid].cryptoIvSize = 0; break; case MV_CESA_CRYPTO_CBC: pCesaSAD[sid].cryptoIvSize = pCesaSAD[sid].cryptoBlockSize; break; case MV_CESA_CRYPTO_CTR: /* Supported only for AES algorithm */ if(pSession->cryptoAlgorithm != MV_CESA_CRYPTO_AES) { mvOsPrintf("mvCesaSessionOpen: CRYPTO CTR mode supported for AES only\n"); return MV_BAD_PARAM; } pCesaSAD[sid].cryptoIvSize = 0; pCesaSAD[sid].ctrMode = 1; /* Replace to ECB mode for HW */ pSession->cryptoMode = MV_CESA_CRYPTO_ECB; break; default: mvOsPrintf("mvCesaSessionOpen: Unexpected cryptoMode %d\n", pSession->cryptoMode); return MV_BAD_PARAM; } config |= (pSession->cryptoMode << MV_CESA_CRYPTO_MODE_BIT); } pCesaSAD[sid].config = config; mvOsCacheFlush(NULL, pCesaSAD[sid].pSramSA, sizeof(MV_CESA_SRAM_SA)); if(pSid != NULL) *pSid = sid; pCesaSAD[sid].valid = 1; return MV_OK; } /******************************************************************************* * mvCesaSessionClose - Close active crypto session * * DESCRIPTION: * This function closes existing session * * INPUT: * short sid - Unique identifier of the session to be closed * * RETURN: * MV_OK - Session closed successfully. * MV_BAD_PARAM - Session identifier is out of valid range. * MV_NOT_FOUND - There is no active session with such ID. * *******************************************************************************/ MV_STATUS mvCesaSessionClose(short sid) { cesaStats.closedCount++; if(sid >= cesaMaxSA) { mvOsPrintf("CESA Error: sid (%d) is too big\n", sid); return MV_BAD_PARAM; } if(pCesaSAD[sid].valid == 0) { mvOsPrintf("CESA Warning: Session (sid=%d) is invalid\n", sid); return MV_NOT_FOUND; } if(cesaLastSid == sid) cesaLastSid = -1; pCesaSAD[sid].valid = 0; return MV_OK; } /******************************************************************************* * mvCesaAction - Perform crypto operation * * DESCRIPTION: * This function set new CESA request FIFO queue for further HW processing. * The function checks request parameters before set new request to the queue. * If one of the CESA channels is ready for processing the request will be * passed to HW. When request processing is finished the CESA interrupt will * be generated by HW. The caller should call mvCesaReadyGet() function to * complete request processing and get result. * * INPUT: * MV_CESA_COMMAND *pCmd - pointer to new CESA request. * It includes pointers to Source and Destination * buffers, session identifier get from * mvCesaSessionOpen() function, pointer to caller * private data and all needed crypto parameters. * * RETURN: * MV_OK - request successfully added to request queue * and will be processed. * MV_NO_MORE - request successfully added to request queue and will * be processed, but request queue became Full and next * request will not be accepted. * MV_NO_RESOURCE - request queue is FULL and the request can not * be processed. * MV_OUT_OF_CPU_MEM - memory allocation needed for request processing is * failed. Request can not be processed. * MV_NOT_ALLOWED - This mixed request (CRYPTO+MAC) can not be processed * as one request and should be splitted for two requests: * CRYPTO_ONLY and MAC_ONLY. * MV_BAD_PARAM - One of the request parameters is out of valid range. * The request can not be processed. * *******************************************************************************/ MV_STATUS mvCesaAction (MV_CESA_COMMAND *pCmd) { MV_STATUS status; MV_CESA_REQ* pReq = pCesaReqEmpty; int sid = pCmd->sessionId; MV_CESA_SA* pSA = &pCesaSAD[sid]; #if (MV_CESA_VERSION >= 3) MV_CESA_REQ* pFromReq; MV_CESA_REQ* pToReq; #endif cesaStats.reqCount++; /* Check that the request queue is not FULL */ if(cesaReqResources == 0) return MV_NO_RESOURCE; if( (sid >= cesaMaxSA) || (!pSA->valid) ) { mvOsPrintf("CESA Action Error: Session sid=%d is INVALID\n", sid); return MV_BAD_PARAM; } pSA->count++; if(pSA->ctrMode) { /* AES in CTR mode can't be mixed with Authentication */ if( (pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) ) { mvOsPrintf("mvCesaAction : CRYPTO CTR mode can't be mixed with AUTH\n"); return MV_NOT_ALLOWED; } /* All other request parameters should not be checked because key stream */ /* (not user data) processed by AES HW engine */ pReq->pOrgCmd = pCmd; /* Allocate temporary pCmd structure for Key stream */ pCmd = mvCesaCtrModeInit(); if(pCmd == NULL) return MV_OUT_OF_CPU_MEM; /* Prepare Key stream */ mvCesaCtrModePrepare(pCmd, pReq->pOrgCmd); pReq->fixOffset = 0; } else { /* Check request parameters and calculae fixOffset */ status = mvCesaParamCheck(pSA, pCmd, &pReq->fixOffset); if(status != MV_OK) { return status; } } pReq->pCmd = pCmd; /* Check if the packet need fragmentation */ if(pCmd->pSrc->mbufSize <= sizeof(cesaSramVirtPtr->buf) ) { /* request size is smaller than single buffer size */ pReq->fragMode = MV_CESA_FRAG_NONE; /* Prepare NOT fragmented packets */ status = mvCesaReqProcess(pReq); if(status != MV_OK) { mvOsPrintf("CesaReady: ReqProcess error: pReq=%p, status=0x%x\n", pReq, status); } #if (MV_CESA_VERSION >= 3) pReq->frags.numFrag = 1; #endif } else { MV_U8 frag = 0; /* request size is larger than buffer size - needs fragmentation */ /* Check restrictions for processing fragmented packets */ status = mvCesaFragParamCheck(pSA, pCmd); if(status != MV_OK) return status; pReq->fragMode = MV_CESA_FRAG_FIRST; pReq->frags.nextFrag = 0; /* Prepare Process Fragmented packets */ while(pReq->fragMode != MV_CESA_FRAG_LAST) { if(frag >= MV_CESA_MAX_REQ_FRAGS) { mvOsPrintf("mvCesaAction Error: Too large request frag=%d\n", frag); return MV_OUT_OF_CPU_MEM; } status = mvCesaFragReqProcess(pReq, frag); if(status == MV_OK) { #if (MV_CESA_VERSION >= 3) if(frag) { pReq->dma[frag-1].pDmaLast->phyNextDescPtr = MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, pReq->dma[frag].pDmaFirst)); mvOsCacheFlush(NULL, pReq->dma[frag-1].pDmaLast, sizeof(MV_DMA_DESC)); } #endif frag++; } } pReq->frags.numFrag = frag; #if (MV_CESA_VERSION >= 3) if(chainReqNum) { chainReqNum += pReq->frags.numFrag; if(chainReqNum >= MAX_CESA_CHAIN_LENGTH) chainReqNum = MAX_CESA_CHAIN_LENGTH; } #endif } pReq->state = MV_CESA_PENDING; pCesaReqEmpty = MV_CESA_REQ_NEXT_PTR(pReq); cesaReqResources -= 1; /* #ifdef CESA_DEBUG */ if( (cesaQueueDepth - cesaReqResources) > cesaStats.maxReqCount) cesaStats.maxReqCount = (cesaQueueDepth - cesaReqResources); /* #endif CESA_DEBUG */ cesaLastSid = sid; #if (MV_CESA_VERSION >= 3) /* Are we within chain bounderies and follows the first request ? */ if((chainReqNum > 0) && (chainReqNum < MAX_CESA_CHAIN_LENGTH)) { if(chainIndex) { pFromReq = MV_CESA_REQ_PREV_PTR(pReq); pToReq = pReq; pReq->state = MV_CESA_CHAIN; /* assume concatenating is possible */ pFromReq->dma[pFromReq->frags.numFrag-1].pDmaLast->phyNextDescPtr = MV_32BIT_LE(mvCesaVirtToPhys(&pToReq->dmaDescBuf, pToReq->dma[0].pDmaFirst)); mvOsCacheFlush(NULL, pFromReq->dma[pFromReq->frags.numFrag-1].pDmaLast, sizeof(MV_DMA_DESC)); /* align active & next pointers */ if(pNextActiveChain->state != MV_CESA_PENDING) pEndCurrChain = pNextActiveChain = MV_CESA_REQ_NEXT_PTR(pReq); } else { /* we have only one chain, start new one */ chainReqNum = 0; chainIndex++; /* align active & next pointers */ if(pNextActiveChain->state != MV_CESA_PENDING) pEndCurrChain = pNextActiveChain = pReq; } } else { /* In case we concatenate full chain */ if(chainReqNum == MAX_CESA_CHAIN_LENGTH) { chainIndex++; if(pNextActiveChain->state != MV_CESA_PENDING) pEndCurrChain = pNextActiveChain = pReq; chainReqNum = 0; } pReq = pCesaReqProcess; if(pReq->state == MV_CESA_PENDING) { pNextActiveChain = pReq; pEndCurrChain = MV_CESA_REQ_NEXT_PTR(pReq); /* Start Process new request */ mvCesaReqProcessStart(pReq); } } chainReqNum++; if((chainIndex < MAX_CESA_CHAIN_LENGTH) && (chainReqNum > cesaStats.maxChainUsage)) cesaStats.maxChainUsage = chainReqNum; #else /* Check status of CESA channels and process requests if possible */ pReq = pCesaReqProcess; if(pReq->state == MV_CESA_PENDING) { /* Start Process new request */ mvCesaReqProcessStart(pReq); } #endif /* If request queue became FULL - return MV_NO_MORE */ if(cesaReqResources == 0) return MV_NO_MORE; return MV_OK; } /******************************************************************************* * mvCesaReadyGet - Get crypto request that processing is finished * * DESCRIPTION: * This function complete request processing and return ready request to * caller. To don't miss interrupts the caller must call this function * while MV_OK or MV_TERMINATE values returned. * * INPUT: * MV_U32 chanMap - map of CESA channels finished thier job * accordingly with CESA Cause register. * MV_CESA_RESULT* pResult - pointer to structure contains information * about ready request. It includes pointer to * user private structure "pReqPrv", session identifier * for this request "sessionId" and return code. * Return code set to MV_FAIL if calculated digest value * on decode direction is different than digest value * in the packet. * * RETURN: * MV_OK - Success, ready request is returned. * MV_NOT_READY - Next request is not ready yet. New interrupt will * be generated for futher request processing. * MV_EMPTY - There is no more request for processing. * MV_BUSY - Fragmented request is not ready yet. * MV_TERMINATE - Call this function once more to complete processing * of fragmented request. * *******************************************************************************/ MV_STATUS mvCesaReadyGet(MV_CESA_RESULT* pResult) { MV_STATUS status, readyStatus = MV_NOT_READY; MV_U32 statusReg; MV_CESA_REQ* pReq; MV_CESA_SA* pSA; #if (MV_CESA_VERSION >= 3) if(isFirstReq == MV_TRUE) { if(chainIndex == 0) chainReqNum = 0; isFirstReq = MV_FALSE; if(pNextActiveChain->state == MV_CESA_PENDING) { /* Start request Process */ mvCesaReqProcessStart(pNextActiveChain); pEndCurrChain = pNextActiveChain; if(chainIndex > 0) chainIndex--; /* Update pNextActiveChain to next chain head */ while(pNextActiveChain->state == MV_CESA_CHAIN) pNextActiveChain = MV_CESA_REQ_NEXT_PTR(pNextActiveChain); } } /* Check if there are more processed requests - can we remove pEndCurrChain ??? */ if(pCesaReqProcess == pEndCurrChain) { isFirstReq = MV_TRUE; pEndCurrChain = pNextActiveChain; #else if(pCesaReqProcess->state != MV_CESA_PROCESS) { #endif return MV_EMPTY; } #ifdef CESA_DEBUG statusReg = MV_REG_READ(MV_CESA_STATUS_REG); if( statusReg & MV_CESA_STATUS_ACTIVE_MASK ) { mvOsPrintf("mvCesaReadyGet: Not Ready, Status = 0x%x\n", statusReg); cesaStats.notReadyCount++; return MV_NOT_READY; } #endif /* CESA_DEBUG */ cesaStats.readyCount++; pReq = pCesaReqProcess; pSA = &pCesaSAD[pReq->pCmd->sessionId]; pResult->retCode = MV_OK; if(pReq->fragMode != MV_CESA_FRAG_NONE) { MV_U8* pNewDigest; int frag; #if (MV_CESA_VERSION >= 3) pReq->frags.nextFrag = 1; while(pReq->frags.nextFrag <= pReq->frags.numFrag) { #endif frag = (pReq->frags.nextFrag - 1); /* Restore DMA descriptor list */ pReq->dma[frag].pDmaLast->phyNextDescPtr = MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pReq->dma[frag].pDmaLast[1])); pReq->dma[frag].pDmaLast = NULL; /* Special processing for finished fragmented request */ if(pReq->frags.nextFrag >= pReq->frags.numFrag) { mvCesaMbufCacheUnmap(pReq->pCmd->pDst, 0, pReq->pCmd->pDst->mbufSize); /* Fragmented packet is ready */ if( (pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) ) { int macDataSize = pReq->pCmd->macLength - pReq->frags.macSize; if(macDataSize != 0) { /* Calculate all other blocks by SW */ mvCesaFragAuthComplete(pReq, pSA, macDataSize); } /* Copy new digest from SRAM to the Destination buffer */ pNewDigest = cesaSramVirtPtr->buf + pReq->frags.newDigestOffset; status = mvCesaCopyToMbuf(pNewDigest, pReq->pCmd->pDst, pReq->pCmd->digestOffset, pSA->digestSize); /* For decryption: Compare new digest value with original one */ if((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT)) { if( memcmp(pNewDigest, pReq->frags.orgDigest, pSA->digestSize) != 0) { /* mvOsPrintf("Digest error: chan=%d, newDigest=%p, orgDigest=%p, status = 0x%x\n", chan, pNewDigest, pReq->frags.orgDigest, MV_REG_READ(MV_CESA_STATUS_REG)); */ /* Signiture verification is failed */ pResult->retCode = MV_FAIL; } } } readyStatus = MV_OK; } #if (MV_CESA_VERSION >= 3) pReq->frags.nextFrag++; } #endif } else { mvCesaMbufCacheUnmap(pReq->pCmd->pDst, 0, pReq->pCmd->pDst->mbufSize); /* Restore DMA descriptor list */ pReq->dma[0].pDmaLast->phyNextDescPtr = MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pReq->dma[0].pDmaLast[1])); pReq->dma[0].pDmaLast = NULL; if( ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) ) && ((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT)) ) { /* For AUTH on decode : Check Digest result in Status register */ statusReg = MV_REG_READ(MV_CESA_STATUS_REG); if(statusReg & MV_CESA_STATUS_DIGEST_ERR_MASK) { /* mvOsPrintf("Digest error: chan=%d, status = 0x%x\n", chan, statusReg); */ /* Signiture verification is failed */ pResult->retCode = MV_FAIL; } } readyStatus = MV_OK; } if(readyStatus == MV_OK) { /* If Request is ready - Prepare pResult structure */ pResult->pReqPrv = pReq->pCmd->pReqPrv; pResult->sessionId = pReq->pCmd->sessionId; pReq->state = MV_CESA_IDLE; pCesaReqProcess = MV_CESA_REQ_NEXT_PTR(pReq); cesaReqResources++; if(pSA->ctrMode) { /* For AES CTR mode - complete processing and free allocated resources */ mvCesaCtrModeComplete(pReq->pOrgCmd, pReq->pCmd); mvCesaCtrModeFinish(pReq->pCmd); pReq->pOrgCmd = NULL; } } #if (MV_CESA_VERSION < 3) if(pCesaReqProcess->state == MV_CESA_PROCESS) { /* Start request Process */ mvCesaReqProcessStart(pCesaReqProcess); if(readyStatus == MV_NOT_READY) readyStatus = MV_BUSY; } else if(pCesaReqProcess != pCesaReqEmpty) { /* Start process new request from the queue */ mvCesaReqProcessStart(pCesaReqProcess); } #endif return readyStatus; } /***************** Functions to work with CESA_MBUF structure ******************/ /******************************************************************************* * mvCesaMbufOffset - Locate offset in the Mbuf structure * * DESCRIPTION: * This function locates offset inside Multi-Bufeer structure. * It get fragment number and place in the fragment where the offset * is located. * * * INPUT: * MV_CESA_MBUF* pMbuf - Pointer to multi-buffer structure * int offset - Offset from the beginning of the data presented by * the Mbuf structure. * * OUTPUT: * int* pBufOffset - Offset from the beginning of the fragment where * the offset is located. * * RETURN: * int - Number of fragment, where the offset is located\ * *******************************************************************************/ int mvCesaMbufOffset(MV_CESA_MBUF* pMbuf, int offset, int* pBufOffset) { int frag = 0; while(offset > 0) { if(frag >= pMbuf->numFrags) { mvOsPrintf("mvCesaMbufOffset: Error: frag (%d) > numFrags (%d)\n", frag, pMbuf->numFrags); return MV_INVALID; } if(offset < pMbuf->pFrags[frag].bufSize) { break; } offset -= pMbuf->pFrags[frag].bufSize; frag++; } if(pBufOffset != NULL) *pBufOffset = offset; return frag; } /******************************************************************************* * mvCesaCopyFromMbuf - Copy data from the Mbuf structure to continuous buffer * * DESCRIPTION: * * * INPUT: * MV_U8* pDstBuf - Pointer to continuous buffer, where data is * copied to. * MV_CESA_MBUF* pSrcMbuf - Pointer to multi-buffer structure where data is * copied from. * int offset - Offset in the Mbuf structure where located first * byte of data should be copied. * int size - Size of data should be copied * * RETURN: * MV_OK - Success, all data is copied successfully. * MV_OUT_OF_RANGE - Failed, offset is out of Multi-buffer data range. * No data is copied. * MV_EMPTY - Multi-buffer structure has not enough data to copy * Data from the offset to end of Mbuf data is copied. * *******************************************************************************/ MV_STATUS mvCesaCopyFromMbuf(MV_U8* pDstBuf, MV_CESA_MBUF* pSrcMbuf, int offset, int size) { int frag, fragOffset, bufSize; MV_U8* pBuf; if(size == 0) return MV_OK; frag = mvCesaMbufOffset(pSrcMbuf, offset, &fragOffset); if(frag == MV_INVALID) { mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset); return MV_OUT_OF_RANGE; } bufSize = pSrcMbuf->pFrags[frag].bufSize - fragOffset; pBuf = pSrcMbuf->pFrags[frag].bufVirtPtr + fragOffset; while(MV_TRUE) { if(size <= bufSize) { memcpy(pDstBuf, pBuf, size); return MV_OK; } memcpy(pDstBuf, pBuf, bufSize); size -= bufSize; frag++; pDstBuf += bufSize; if(frag >= pSrcMbuf->numFrags) break; bufSize = pSrcMbuf->pFrags[frag].bufSize; pBuf = pSrcMbuf->pFrags[frag].bufVirtPtr; } mvOsPrintf("mvCesaCopyFromMbuf: Mbuf is EMPTY - %d bytes isn't copied\n", size); return MV_EMPTY; } /******************************************************************************* * mvCesaCopyToMbuf - Copy data from continuous buffer to the Mbuf structure * * DESCRIPTION: * * * INPUT: * MV_U8* pSrcBuf - Pointer to continuous buffer, where data is * copied from. * MV_CESA_MBUF* pDstMbuf - Pointer to multi-buffer structure where data is * copied to. * int offset - Offset in the Mbuf structure where located first * byte of data should be copied. * int size - Size of data should be copied * * RETURN: * MV_OK - Success, all data is copied successfully. * MV_OUT_OF_RANGE - Failed, offset is out of Multi-buffer data range. * No data is copied. * MV_FULL - Multi-buffer structure has not enough place to copy * all data. Data from the offset to end of Mbuf data * is copied. * *******************************************************************************/ MV_STATUS mvCesaCopyToMbuf(MV_U8* pSrcBuf, MV_CESA_MBUF* pDstMbuf, int offset, int size) { int frag, fragOffset, bufSize; MV_U8* pBuf; if(size == 0) return MV_OK; frag = mvCesaMbufOffset(pDstMbuf, offset, &fragOffset); if(frag == MV_INVALID) { mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset); return MV_OUT_OF_RANGE; } bufSize = pDstMbuf->pFrags[frag].bufSize - fragOffset; pBuf = pDstMbuf->pFrags[frag].bufVirtPtr + fragOffset; while(MV_TRUE) { if(size <= bufSize) { memcpy(pBuf, pSrcBuf, size); return MV_OK; } memcpy(pBuf, pSrcBuf, bufSize); size -= bufSize; frag++; pSrcBuf += bufSize; if(frag >= pDstMbuf->numFrags) break; bufSize = pDstMbuf->pFrags[frag].bufSize; pBuf = pDstMbuf->pFrags[frag].bufVirtPtr; } mvOsPrintf("mvCesaCopyToMbuf: Mbuf is FULL - %d bytes isn't copied\n", size); return MV_FULL; } /******************************************************************************* * mvCesaMbufCopy - Copy data from one Mbuf structure to the other Mbuf structure * * DESCRIPTION: * * * INPUT: * * MV_CESA_MBUF* pDstMbuf - Pointer to multi-buffer structure where data is * copied to. * int dstMbufOffset - Offset in the dstMbuf structure where first byte * of data should be copied to. * MV_CESA_MBUF* pSrcMbuf - Pointer to multi-buffer structure where data is * copied from. * int srcMbufOffset - Offset in the srcMbuf structure where first byte * of data should be copied from. * int size - Size of data should be copied * * RETURN: * MV_OK - Success, all data is copied successfully. * MV_OUT_OF_RANGE - Failed, srcMbufOffset or dstMbufOffset is out of * srcMbuf or dstMbuf structure correspondently. * No data is copied. * MV_BAD_SIZE - srcMbuf or dstMbuf structure is too small to copy * all data. Partial data is copied * *******************************************************************************/ MV_STATUS mvCesaMbufCopy(MV_CESA_MBUF* pMbufDst, int dstMbufOffset, MV_CESA_MBUF* pMbufSrc, int srcMbufOffset, int size) { int srcFrag, dstFrag, srcSize, dstSize, srcOffset, dstOffset; int copySize; MV_U8 *pSrc, *pDst; if(size == 0) return MV_OK; srcFrag = mvCesaMbufOffset(pMbufSrc, srcMbufOffset, &srcOffset); if(srcFrag == MV_INVALID) { mvOsPrintf("CESA srcMbuf Error: offset (%d) out of range\n", srcMbufOffset); return MV_OUT_OF_RANGE; } pSrc = pMbufSrc->pFrags[srcFrag].bufVirtPtr + srcOffset; srcSize = pMbufSrc->pFrags[srcFrag].bufSize - srcOffset; dstFrag = mvCesaMbufOffset(pMbufDst, dstMbufOffset, &dstOffset); if(dstFrag == MV_INVALID) { mvOsPrintf("CESA dstMbuf Error: offset (%d) out of range\n", dstMbufOffset); return MV_OUT_OF_RANGE; } pDst = pMbufDst->pFrags[dstFrag].bufVirtPtr + dstOffset; dstSize = pMbufDst->pFrags[dstFrag].bufSize - dstOffset; while(size > 0) { copySize = MV_MIN(srcSize, dstSize); if(size <= copySize) { memcpy(pDst, pSrc, size); return MV_OK; } memcpy(pDst, pSrc, copySize); size -= copySize; srcSize -= copySize; dstSize -= copySize; if(srcSize == 0) { srcFrag++; if(srcFrag >= pMbufSrc->numFrags) break; pSrc = pMbufSrc->pFrags[srcFrag].bufVirtPtr; srcSize = pMbufSrc->pFrags[srcFrag].bufSize; } if(dstSize == 0) { dstFrag++; if(dstFrag >= pMbufDst->numFrags) break; pDst = pMbufDst->pFrags[dstFrag].bufVirtPtr; dstSize = pMbufDst->pFrags[dstFrag].bufSize; } } mvOsPrintf("mvCesaMbufCopy: BAD size - %d bytes isn't copied\n", size); return MV_BAD_SIZE; } static MV_STATUS mvCesaMbufCacheUnmap(MV_CESA_MBUF* pMbuf, int offset, int size) { int frag, fragOffset, bufSize; MV_U8* pBuf; if(size == 0) return MV_OK; frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset); if(frag == MV_INVALID) { mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset); return MV_OUT_OF_RANGE; } bufSize = pMbuf->pFrags[frag].bufSize - fragOffset; pBuf = pMbuf->pFrags[frag].bufVirtPtr + fragOffset; while(MV_TRUE) { if(size <= bufSize) { mvOsCacheUnmap(NULL, mvOsIoVirtToPhy(NULL, pBuf), size); return MV_OK; } mvOsCacheUnmap(NULL, mvOsIoVirtToPhy(NULL, pBuf), bufSize); size -= bufSize; frag++; if(frag >= pMbuf->numFrags) break; bufSize = pMbuf->pFrags[frag].bufSize; pBuf = pMbuf->pFrags[frag].bufVirtPtr; } mvOsPrintf("%s: Mbuf is FULL - %d bytes isn't Unmapped\n", __FUNCTION__, size); return MV_FULL; } /*************************************** Local Functions ******************************/ /******************************************************************************* * mvCesaFragReqProcess - Process fragmented request * * DESCRIPTION: * This function processes a fragment of fragmented request (First, Middle or Last) * * * INPUT: * MV_CESA_REQ* pReq - Pointer to the request in the request queue. * * RETURN: * MV_OK - The fragment is successfully passed to HW for processing. * MV_TERMINATE - Means, that HW finished its work on this packet and no more * interrupts will be generated for this request. * Function mvCesaReadyGet() must be called to complete request * processing and get request result. * *******************************************************************************/ static MV_STATUS mvCesaFragReqProcess(MV_CESA_REQ* pReq, MV_U8 frag) { int i, copySize, cryptoDataSize, macDataSize, sid; int cryptoIvOffset, digestOffset; MV_U32 config; MV_CESA_COMMAND* pCmd = pReq->pCmd; MV_CESA_SA* pSA; MV_CESA_MBUF* pMbuf; MV_DMA_DESC* pDmaDesc = pReq->dma[frag].pDmaFirst; MV_U8* pSramBuf = cesaSramVirtPtr->buf; int macTotalLen = 0; int fixOffset, cryptoOffset, macOffset; cesaStats.fragCount++; sid = pReq->pCmd->sessionId; pSA = &pCesaSAD[sid]; cryptoIvOffset = digestOffset = 0; i = macDataSize = 0; cryptoDataSize = 0; /* First fragment processing */ if(pReq->fragMode == MV_CESA_FRAG_FIRST) { /* pReq->frags monitors processing of fragmented request between fragments */ pReq->frags.bufOffset = 0; pReq->frags.cryptoSize = 0; pReq->frags.macSize = 0; config = pSA->config | (MV_CESA_FRAG_FIRST << MV_CESA_FRAG_MODE_OFFSET); /* fixOffset can be not equal to zero only for FIRST fragment */ fixOffset = pReq->fixOffset; /* For FIRST fragment crypto and mac offsets are taken from pCmd */ cryptoOffset = pCmd->cryptoOffset; macOffset = pCmd->macOffset; copySize = sizeof(cesaSramVirtPtr->buf) - pReq->fixOffset; /* Find fragment size: Must meet all requirements for CRYPTO and MAC * cryptoDataSize - size of data will be encrypted/decrypted in this fragment * macDataSize - size of data will be signed/verified in this fragment * copySize - size of data will be copied from srcMbuf to SRAM and * back to dstMbuf for this fragment */ mvCesaFragSizeFind(pSA, pReq, cryptoOffset, macOffset, &copySize, &cryptoDataSize, &macDataSize); if( (pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) { /* CryptoIV special processing */ if( (pSA->config & MV_CESA_CRYPTO_MODE_MASK) == (MV_CESA_CRYPTO_CBC << MV_CESA_CRYPTO_MODE_BIT) ) { /* In CBC mode for encode direction when IV from user */ if( (pCmd->ivFromUser) && ((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_ENCODE << MV_CESA_DIRECTION_BIT)) ) { /* For Crypto Encode in CBC mode HW always takes IV from SRAM IVPointer, * (not from IVBufPointer). So when ivFromUser==1, we should copy IV from user place * in the buffer to SRAM IVPointer */ i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->cryptoIV, &pDmaDesc[i], MV_FALSE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush); } /* Special processing when IV is not located in the first fragment */ if(pCmd->ivOffset > (copySize - pSA->cryptoIvSize)) { /* Prepare dummy place for cryptoIV in SRAM */ cryptoIvOffset = cesaSramVirtPtr->tempCryptoIV - mvCesaSramAddrGet(); /* For Decryption: Copy IV value from pCmd->ivOffset to Special SRAM place */ if((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT)) { i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->tempCryptoIV, &pDmaDesc[i], MV_FALSE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush); } else { /* For Encryption when IV is NOT from User: */ /* Copy IV from SRAM to buffer (pCmd->ivOffset) */ if(pCmd->ivFromUser == 0) { /* copy IV value from cryptoIV to Buffer (pCmd->ivOffset) */ i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->cryptoIV, &pDmaDesc[i], MV_TRUE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush); } } } else { cryptoIvOffset = pCmd->ivOffset; } } } if( (pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) ) { /* MAC digest special processing on Decode direction */ if((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT)) { /* Save digest from pCmd->digestOffset */ mvCesaCopyFromMbuf(pReq->frags.orgDigest, pCmd->pSrc, pCmd->digestOffset, pSA->digestSize); /* If pCmd->digestOffset is not located on the first */ if(pCmd->digestOffset > (copySize - pSA->digestSize)) { MV_U8 digestZero[MV_CESA_MAX_DIGEST_SIZE]; /* Set zeros to pCmd->digestOffset (DRAM) */ memset(digestZero, 0, MV_CESA_MAX_DIGEST_SIZE); mvCesaCopyToMbuf(digestZero, pCmd->pSrc, pCmd->digestOffset, pSA->digestSize); /* Prepare dummy place for digest in SRAM */ digestOffset = cesaSramVirtPtr->tempDigest - mvCesaSramAddrGet(); } else { digestOffset = pCmd->digestOffset; } } } /* Update SA in SRAM */ if(cesaLastSid != sid) { mvCesaSramSaUpdate(sid, &pDmaDesc[i]); i++; } pReq->fragMode = MV_CESA_FRAG_MIDDLE; } else { /* Continue fragment */ fixOffset = 0; cryptoOffset = 0; macOffset = 0; if( (pCmd->pSrc->mbufSize - pReq->frags.bufOffset) <= sizeof(cesaSramVirtPtr->buf)) { /* Last fragment */ config = pSA->config | (MV_CESA_FRAG_LAST << MV_CESA_FRAG_MODE_OFFSET); pReq->fragMode = MV_CESA_FRAG_LAST; copySize = pCmd->pSrc->mbufSize - pReq->frags.bufOffset; if( (pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) ) { macDataSize = pCmd->macLength - pReq->frags.macSize; /* If pCmd->digestOffset is not located on last fragment */ if(pCmd->digestOffset < pReq->frags.bufOffset) { /* Prepare dummy place for digest in SRAM */ digestOffset = cesaSramVirtPtr->tempDigest - mvCesaSramAddrGet(); } else { digestOffset = pCmd->digestOffset - pReq->frags.bufOffset; } pReq->frags.newDigestOffset = digestOffset; macTotalLen = pCmd->macLength; /* HW can't calculate the Digest correctly for fragmented packets * in the following cases: * - MV88F5182 || * - MV88F5181L when total macLength more that 16 Kbytes || * - total macLength more that 64 Kbytes */ if( (mvCtrlModelGet() == MV_5182_DEV_ID) || ( (mvCtrlModelGet() == MV_5181_DEV_ID) && (mvCtrlRevGet() >= MV_5181L_A0_REV) && (pCmd->macLength >= (1 << 14)) ) ) { return MV_TERMINATE; } } if( (pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET) ) { cryptoDataSize = pCmd->cryptoLength - pReq->frags.cryptoSize; } /* cryptoIvOffset - don't care */ } else { /* WA for MV88F5182 SHA1 and MD5 fragmentation mode */ if( (mvCtrlModelGet() == MV_5182_DEV_ID) && (((pSA->config & MV_CESA_MAC_MODE_MASK) == (MV_CESA_MAC_MD5 << MV_CESA_MAC_MODE_OFFSET)) || ((pSA->config & MV_CESA_MAC_MODE_MASK) == (MV_CESA_MAC_SHA1 << MV_CESA_MAC_MODE_OFFSET))) ) { pReq->frags.newDigestOffset = cesaSramVirtPtr->tempDigest - mvCesaSramAddrGet(); pReq->fragMode = MV_CESA_FRAG_LAST; return MV_TERMINATE; } /* Middle fragment */ config = pSA->config | (MV_CESA_FRAG_MIDDLE << MV_CESA_FRAG_MODE_OFFSET); copySize = sizeof(cesaSramVirtPtr->buf); /* digestOffset and cryptoIvOffset - don't care */ /* Find fragment size */ mvCesaFragSizeFind(pSA, pReq, cryptoOffset, macOffset, &copySize, &cryptoDataSize, &macDataSize); } } /********* Prepare DMA descriptors to copy from pSrc to SRAM *********/ pMbuf = pCmd->pSrc; i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i], MV_FALSE, pReq->frags.bufOffset, copySize, pCmd->skipFlush); /* Prepare CESA descriptor to copy from DRAM to SRAM by DMA */ mvCesaSramDescrBuild(config, frag, cryptoOffset + fixOffset, cryptoIvOffset + fixOffset, cryptoDataSize, macOffset + fixOffset, digestOffset + fixOffset, macDataSize, macTotalLen, pReq, &pDmaDesc[i]); i++; /* Add special descriptor Ownership for CPU */ pDmaDesc[i].byteCnt = 0; pDmaDesc[i].phySrcAdd = 0; pDmaDesc[i].phyDestAdd = 0; i++; /********* Prepare DMA descriptors to copy from SRAM to pDst *********/ pMbuf = pCmd->pDst; i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i], MV_TRUE, pReq->frags.bufOffset, copySize, pCmd->skipFlush); /* Next field of Last DMA descriptor must be NULL */ pDmaDesc[i-1].phyNextDescPtr = 0; pReq->dma[frag].pDmaLast = &pDmaDesc[i-1]; mvOsCacheFlush(NULL, pReq->dma[frag].pDmaFirst, i*sizeof(MV_DMA_DESC)); /*mvCesaDebugDescriptor(&cesaSramVirtPtr->desc[frag]);*/ pReq->frags.bufOffset += copySize; pReq->frags.cryptoSize += cryptoDataSize; pReq->frags.macSize += macDataSize; return MV_OK; } /******************************************************************************* * mvCesaReqProcess - Process regular (Non-fragmented) request * * DESCRIPTION: * This function processes the whole (not fragmented) request * * INPUT: * MV_CESA_REQ* pReq - Pointer to the request in the request queue. * * RETURN: * MV_OK - The request is successfully passed to HW for processing. * Other - Failure. The request will not be processed * *******************************************************************************/ static MV_STATUS mvCesaReqProcess(MV_CESA_REQ* pReq) { MV_CESA_MBUF *pMbuf; MV_DMA_DESC *pDmaDesc; MV_U8 *pSramBuf; int sid, i, fixOffset; MV_CESA_SA *pSA; MV_CESA_COMMAND *pCmd = pReq->pCmd; cesaStats.procCount++; sid = pCmd->sessionId; pSA = &pCesaSAD[sid]; pDmaDesc = pReq->dma[0].pDmaFirst; pSramBuf = cesaSramVirtPtr->buf; fixOffset = pReq->fixOffset; /* mvOsPrintf("mvCesaReqProcess: sid=%d, pSA=%p, pDmaDesc=%p, pSramBuf=%p\n", sid, pSA, pDmaDesc, pSramBuf); */ i = 0; /* Crypto IV Special processing in CBC mode for Encryption direction */ if( ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) && ((pSA->config & MV_CESA_CRYPTO_MODE_MASK) == (MV_CESA_CRYPTO_CBC << MV_CESA_CRYPTO_MODE_BIT)) && ((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_ENCODE << MV_CESA_DIRECTION_BIT)) && (pCmd->ivFromUser) ) { /* For Crypto Encode in CBC mode HW always takes IV from SRAM IVPointer, * (not from IVBufPointer). So when ivFromUser==1, we should copy IV from user place * in the buffer to SRAM IVPointer */ i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->cryptoIV, &pDmaDesc[i], MV_FALSE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush); } /* Update SA in SRAM */ if(cesaLastSid != sid) { mvCesaSramSaUpdate(sid, &pDmaDesc[i]); i++; } /********* Prepare DMA descriptors to copy from pSrc to SRAM *********/ pMbuf = pCmd->pSrc; i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i], MV_FALSE, 0, pMbuf->mbufSize, pCmd->skipFlush); /* Prepare Security Accelerator descriptor to SRAM words 0 - 7 */ mvCesaSramDescrBuild(pSA->config, 0, pCmd->cryptoOffset + fixOffset, pCmd->ivOffset + fixOffset, pCmd->cryptoLength, pCmd->macOffset + fixOffset, pCmd->digestOffset + fixOffset, pCmd->macLength, pCmd->macLength, pReq, &pDmaDesc[i]); i++; /* Add special descriptor Ownership for CPU */ pDmaDesc[i].byteCnt = 0; pDmaDesc[i].phySrcAdd = 0; pDmaDesc[i].phyDestAdd = 0; i++; /********* Prepare DMA descriptors to copy from SRAM to pDst *********/ pMbuf = pCmd->pDst; i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i], MV_TRUE, 0, pMbuf->mbufSize, pCmd->skipFlush); /* Next field of Last DMA descriptor must be NULL */ pDmaDesc[i-1].phyNextDescPtr = 0; pReq->dma[0].pDmaLast = &pDmaDesc[i-1]; mvOsCacheFlush(NULL, pReq->dma[0].pDmaFirst, i*sizeof(MV_DMA_DESC)); return MV_OK; } /******************************************************************************* * mvCesaSramDescrBuild - Set CESA descriptor in SRAM * * DESCRIPTION: * This function builds CESA descriptor in SRAM from all Command parameters * * * INPUT: * int chan - CESA channel uses the descriptor * MV_U32 config - 32 bits of WORD_0 in CESA descriptor structure * int cryptoOffset - Offset from the beginning of SRAM buffer where * data for encryption/decription is started. * int ivOffset - Offset of crypto IV from the SRAM base. Valid only * for first fragment. * int cryptoLength - Size (in bytes) of data for encryption/descryption * operation on this fragment. * int macOffset - Offset from the beginning of SRAM buffer where * data for Authentication is started * int digestOffset - Offset from the beginning of SRAM buffer where * digest is located. Valid for first and last fragments. * int macLength - Size (in bytes) of data for Authentication * operation on this fragment. * int macTotalLen - Toatl size (in bytes) of data for Authentication * operation on the whole request (packet). Valid for * last fragment only. * * RETURN: None * *******************************************************************************/ static void mvCesaSramDescrBuild(MV_U32 config, int frag, int cryptoOffset, int ivOffset, int cryptoLength, int macOffset, int digestOffset, int macLength, int macTotalLen, MV_CESA_REQ* pReq, MV_DMA_DESC* pDmaDesc) { MV_CESA_DESC* pCesaDesc = &pReq->pCesaDesc[frag]; MV_CESA_DESC* pSramDesc = pSramDesc = &cesaSramVirtPtr->desc; MV_U16 sramBufOffset = (MV_U16)((MV_U8*)cesaSramVirtPtr->buf - mvCesaSramAddrGet()); pCesaDesc->config = MV_32BIT_LE(config); if( (config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET) ) { /* word 1 */ pCesaDesc->cryptoSrcOffset = MV_16BIT_LE(sramBufOffset + cryptoOffset); pCesaDesc->cryptoDstOffset = MV_16BIT_LE(sramBufOffset + cryptoOffset); /* word 2 */ pCesaDesc->cryptoDataLen = MV_16BIT_LE(cryptoLength); /* word 3 */ pCesaDesc->cryptoKeyOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->sramSA.cryptoKey - mvCesaSramAddrGet())); /* word 4 */ pCesaDesc->cryptoIvOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->cryptoIV - mvCesaSramAddrGet())); pCesaDesc->cryptoIvBufOffset = MV_16BIT_LE(sramBufOffset + ivOffset); } if( (config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) ) { /* word 5 */ pCesaDesc->macSrcOffset = MV_16BIT_LE(sramBufOffset + macOffset); pCesaDesc->macTotalLen = MV_16BIT_LE(macTotalLen); /* word 6 */ pCesaDesc->macDigestOffset = MV_16BIT_LE(sramBufOffset + digestOffset); pCesaDesc->macDataLen = MV_16BIT_LE(macLength); /* word 7 */ pCesaDesc->macInnerIvOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->sramSA.macInnerIV - mvCesaSramAddrGet())); pCesaDesc->macOuterIvOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->sramSA.macOuterIV - mvCesaSramAddrGet())); } /* Prepare DMA descriptor to CESA descriptor from DRAM to SRAM */ pDmaDesc->phySrcAdd = MV_32BIT_LE(mvCesaVirtToPhys(&pReq->cesaDescBuf, pCesaDesc)); pDmaDesc->phyDestAdd = MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (MV_U8*)pSramDesc)); pDmaDesc->byteCnt = MV_32BIT_LE(sizeof(MV_CESA_DESC) | BIT31); /* flush Source buffer */ mvOsCacheFlush(NULL, pCesaDesc, sizeof(MV_CESA_DESC)); } /******************************************************************************* * mvCesaSramSaUpdate - Move required SA information to SRAM if needed. * * DESCRIPTION: * Copy to SRAM values of the required SA. * * * INPUT: * short sid - Session ID needs SRAM Cache update * MV_DMA_DESC *pDmaDesc - Pointer to DMA descriptor used to * copy SA values from DRAM to SRAM. * * RETURN: * MV_OK - Cache entry for this SA copied to SRAM. * MV_NO_CHANGE - Cache entry for this SA already exist in SRAM * *******************************************************************************/ static INLINE void mvCesaSramSaUpdate(short sid, MV_DMA_DESC *pDmaDesc) { MV_CESA_SA *pSA = &pCesaSAD[sid]; /* Prepare DMA descriptor to Copy CACHE_SA from SA database in DRAM to SRAM */ pDmaDesc->byteCnt = MV_32BIT_LE(sizeof(MV_CESA_SRAM_SA) | BIT31); pDmaDesc->phySrcAdd = MV_32BIT_LE(mvCesaVirtToPhys(&cesaSramSaBuf, pSA->pSramSA)); pDmaDesc->phyDestAdd = MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (MV_U8*)&cesaSramVirtPtr->sramSA)); /* Source buffer is already flushed during OpenSession*/ /*mvOsCacheFlush(NULL, &pSA->sramSA, sizeof(MV_CESA_SRAM_SA));*/ } /******************************************************************************* * mvCesaDmaCopyPrepare - prepare DMA descriptor list to copy data presented by * Mbuf structure from DRAM to SRAM * * DESCRIPTION: * * * INPUT: * MV_CESA_MBUF* pMbuf - pointer to Mbuf structure contains request * data in DRAM * MV_U8* pSramBuf - pointer to buffer in SRAM where data should * be copied to. * MV_DMA_DESC* pDmaDesc - pointer to first DMA descriptor for this copy. * The function set number of DMA descriptors needed * to copy the copySize bytes from Mbuf. * MV_BOOL isToMbuf - Copy direction. * MV_TRUE means copy from SRAM buffer to Mbuf in DRAM. * MV_FALSE means copy from Mbuf in DRAM to SRAM buffer. * int offset - Offset in the Mbuf structure that copy should be * started from. * int copySize - Size of data should be copied. * * RETURN: * int - number of DMA descriptors used for the copy. * *******************************************************************************/ #ifndef MV_NETBSD static INLINE int mvCesaDmaCopyPrepare(MV_CESA_MBUF* pMbuf, MV_U8* pSramBuf, MV_DMA_DESC* pDmaDesc, MV_BOOL isToMbuf, int offset, int copySize, MV_BOOL skipFlush) { int bufOffset, bufSize, size, frag, i; MV_U8* pBuf; i = 0; /* Calculate start place for copy: fragment number and offset in the fragment */ frag = mvCesaMbufOffset(pMbuf, offset, &bufOffset); bufSize = pMbuf->pFrags[frag].bufSize - bufOffset; pBuf = pMbuf->pFrags[frag].bufVirtPtr + bufOffset; /* Size accumulate total copy size */ size = 0; /* Create DMA lists to copy mBuf from pSrc to SRAM */ while(size < copySize) { /* Find copy size for each DMA descriptor */ bufSize = MV_MIN(bufSize, (copySize - size)); pDmaDesc[i].byteCnt = MV_32BIT_LE(bufSize | BIT31); if(isToMbuf) { pDmaDesc[i].phyDestAdd = MV_32BIT_LE(mvOsIoVirtToPhy(NULL, pBuf)); pDmaDesc[i].phySrcAdd = MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (pSramBuf + size))); /* invalidate the buffer */ if(skipFlush == MV_FALSE) mvOsCacheInvalidate(NULL, pBuf, bufSize); } else { pDmaDesc[i].phySrcAdd = MV_32BIT_LE(mvOsIoVirtToPhy(NULL, pBuf)); pDmaDesc[i].phyDestAdd = MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (pSramBuf + size))); /* flush the buffer */ if(skipFlush == MV_FALSE) mvOsCacheFlush(NULL, pBuf, bufSize); } /* Count number of used DMA descriptors */ i++; size += bufSize; /* go to next fragment in the Mbuf */ frag++; pBuf = pMbuf->pFrags[frag].bufVirtPtr; bufSize = pMbuf->pFrags[frag].bufSize; } return i; } #else /* MV_NETBSD */ static int mvCesaDmaCopyPrepare(MV_CESA_MBUF* pMbuf, MV_U8* pSramBuf, MV_DMA_DESC* pDmaDesc, MV_BOOL isToMbuf, int offset, int copySize, MV_BOOL skipFlush) { int bufOffset, bufSize, thisSize, size, frag, i; MV_ULONG bufPhys, sramPhys; MV_U8* pBuf; /* * Calculate start place for copy: fragment number and offset in * the fragment */ frag = mvCesaMbufOffset(pMbuf, offset, &bufOffset); /* * Get SRAM physical address only once. We can update it in-place * as we build the descriptor chain. */ sramPhys = mvCesaSramVirtToPhys(NULL, pSramBuf); /* * 'size' accumulates total copy size, 'i' counts desccriptors. */ size = i = 0; /* Create DMA lists to copy mBuf from pSrc to SRAM */ while (size < copySize) { /* * Calculate # of bytes to copy from the current fragment, * and the pointer to the start of data */ bufSize = pMbuf->pFrags[frag].bufSize - bufOffset; pBuf = pMbuf->pFrags[frag].bufVirtPtr + bufOffset; bufOffset = 0; /* First frag may be non-zero */ frag++; /* * As long as there is data in the current fragment... */ while (bufSize > 0) { /* * Ensure we don't cross an MMU page boundary. * XXX: This is NetBSD-specific, but it is a * quick and dirty way to fix the problem. * A true HAL would rely on the OS-specific * driver to do this... */ thisSize = PAGE_SIZE - (((MV_ULONG)pBuf) & (PAGE_SIZE - 1)); thisSize = MV_MIN(bufSize, thisSize); /* * Make sure we don't copy more than requested */ if (thisSize > (copySize - size)) { thisSize = copySize - size; bufSize = 0; } /* * Physicall address of this fragment */ bufPhys = MV_32BIT_LE(mvOsIoVirtToPhy(NULL, pBuf)); /* * Set up the descriptor */ pDmaDesc[i].byteCnt = MV_32BIT_LE(thisSize | BIT31); if(isToMbuf) { pDmaDesc[i].phyDestAdd = bufPhys; pDmaDesc[i].phySrcAdd = MV_32BIT_LE(sramPhys); /* invalidate the buffer */ if(skipFlush == MV_FALSE) mvOsCacheInvalidate(NULL, pBuf, thisSize); } else { pDmaDesc[i].phySrcAdd = bufPhys; pDmaDesc[i].phyDestAdd = MV_32BIT_LE(sramPhys); /* flush the buffer */ if(skipFlush == MV_FALSE) mvOsCacheFlush(NULL, pBuf, thisSize); } pDmaDesc[i].phyNextDescPtr = MV_32BIT_LE(mvOsIoVirtToPhy(NULL,(&pDmaDesc[i+1]))); /* flush the DMA desc */ mvOsCacheFlush(NULL, &pDmaDesc[i], sizeof(MV_DMA_DESC)); /* Update state */ bufSize -= thisSize; sramPhys += thisSize; pBuf += thisSize; size += thisSize; i++; } } return i; } #endif /* MV_NETBSD */ /******************************************************************************* * mvCesaHmacIvGet - Calculate Inner and Outter values from HMAC key * * DESCRIPTION: * This function calculate Inner and Outer values used for HMAC algorithm. * This operation allows improve performance fro the whole HMAC processing. * * INPUT: * MV_CESA_MAC_MODE macMode - Authentication mode: HMAC_MD5 or HMAC_SHA1. * unsigned char key[] - Pointer to HMAC key. * int keyLength - Size of HMAC key (maximum 64 bytes) * * OUTPUT: * unsigned char innerIV[] - HASH(key^inner) * unsigned char outerIV[] - HASH(key^outter) * * RETURN: None * *******************************************************************************/ static void mvCesaHmacIvGet(MV_CESA_MAC_MODE macMode, unsigned char key[], int keyLength, unsigned char innerIV[], unsigned char outerIV[]) { unsigned char inner[MV_CESA_MAX_MAC_KEY_LENGTH]; unsigned char outer[MV_CESA_MAX_MAC_KEY_LENGTH]; int i, digestSize = 0; #if defined(MV_CPU_LE) || defined(MV_PPC) MV_U32 swapped32, val32, *pVal32; #endif for(i=0; i<keyLength; i++) { inner[i] = 0x36 ^ key[i]; outer[i] = 0x5c ^ key[i]; } for(i=keyLength; i<MV_CESA_MAX_MAC_KEY_LENGTH; i++) { inner[i] = 0x36; outer[i] = 0x5c; } if(macMode == MV_CESA_MAC_HMAC_MD5) { MV_MD5_CONTEXT ctx; mvMD5Init(&ctx); mvMD5Update(&ctx, inner, MV_CESA_MAX_MAC_KEY_LENGTH); memcpy(innerIV, ctx.buf, MV_CESA_MD5_DIGEST_SIZE); memset(&ctx, 0, sizeof(ctx)); mvMD5Init(&ctx); mvMD5Update(&ctx, outer, MV_CESA_MAX_MAC_KEY_LENGTH); memcpy(outerIV, ctx.buf, MV_CESA_MD5_DIGEST_SIZE); memset(&ctx, 0, sizeof(ctx)); digestSize = MV_CESA_MD5_DIGEST_SIZE; } else if(macMode == MV_CESA_MAC_HMAC_SHA1) { MV_SHA1_CTX ctx; mvSHA1Init(&ctx); mvSHA1Update(&ctx, inner, MV_CESA_MAX_MAC_KEY_LENGTH); memcpy(innerIV, ctx.state, MV_CESA_SHA1_DIGEST_SIZE); memset(&ctx, 0, sizeof(ctx)); mvSHA1Init(&ctx); mvSHA1Update(&ctx, outer, MV_CESA_MAX_MAC_KEY_LENGTH); memcpy(outerIV, ctx.state, MV_CESA_SHA1_DIGEST_SIZE); memset(&ctx, 0, sizeof(ctx)); digestSize = MV_CESA_SHA1_DIGEST_SIZE; } else { mvOsPrintf("hmacGetIV: Unexpected macMode %d\n", macMode); } #if defined(MV_CPU_LE) || defined(MV_PPC) /* 32 bits Swap of Inner and Outer values */ pVal32 = (MV_U32*)innerIV; for(i=0; i<digestSize/4; i++) { val32 = *pVal32; swapped32 = MV_BYTE_SWAP_32BIT(val32); *pVal32 = swapped32; pVal32++; } pVal32 = (MV_U32*)outerIV; for(i=0; i<digestSize/4; i++) { val32 = *pVal32; swapped32 = MV_BYTE_SWAP_32BIT(val32); *pVal32 = swapped32; pVal32++; } #endif /* defined(MV_CPU_LE) || defined(MV_PPC) */ } /******************************************************************************* * mvCesaFragSha1Complete - Complete SHA1 authentication started by HW using SW * * DESCRIPTION: * * * INPUT: * MV_CESA_MBUF* pMbuf - Pointer to Mbuf structure where data * for SHA1 is placed. * int offset - Offset in the Mbuf structure where * unprocessed data for SHA1 is started. * MV_U8* pOuterIV - Pointer to OUTER for this session. * If pOuterIV==NULL - MAC mode is HASH_SHA1 * If pOuterIV!=NULL - MAC mode is HMAC_SHA1 * int macLeftSize - Size of unprocessed data for SHA1. * int macTotalSize - Total size of data for SHA1 in the * request (processed + unprocessed) * * OUTPUT: * MV_U8* pDigest - Pointer to place where calculated Digest will * be stored. * * RETURN: None * *******************************************************************************/ static void mvCesaFragSha1Complete(MV_CESA_MBUF* pMbuf, int offset, MV_U8* pOuterIV, int macLeftSize, int macTotalSize, MV_U8* pDigest) { MV_SHA1_CTX ctx; MV_U8 *pData; int i, frag, fragOffset, size; /* Read temporary Digest from HW */ for(i=0; i<MV_CESA_SHA1_DIGEST_SIZE/4; i++) { ctx.state[i] = MV_REG_READ(MV_CESA_AUTH_INIT_VAL_DIGEST_REG(i)); } /* Initialize MV_SHA1_CTX structure */ memset(ctx.buffer, 0, 64); /* Set count[0] in bits. 32 bits is enough for 512 MBytes */ /* so count[1] is always 0 */ ctx.count[0] = ((macTotalSize - macLeftSize) * 8); ctx.count[1] = 0; /* If HMAC - add size of Inner block (64 bytes) ro count[0] */ if(pOuterIV != NULL) ctx.count[0] += (64 * 8); /* Get place of unprocessed data in the Mbuf structure */ frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset); if(frag == MV_INVALID) { mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset); return; } pData = pMbuf->pFrags[frag].bufVirtPtr + fragOffset; size = pMbuf->pFrags[frag].bufSize - fragOffset; /* Complete Inner part */ while(macLeftSize > 0) { if(macLeftSize <= size) { mvSHA1Update(&ctx, pData, macLeftSize); break; } mvSHA1Update(&ctx, pData, size); macLeftSize -= size; frag++; pData = pMbuf->pFrags[frag].bufVirtPtr; size = pMbuf->pFrags[frag].bufSize; } mvSHA1Final(pDigest, &ctx); /* mvOsPrintf("mvCesaFragSha1Complete: pOuterIV=%p, macLeftSize=%d, macTotalSize=%d\n", pOuterIV, macLeftSize, macTotalSize); mvDebugMemDump(pDigest, MV_CESA_SHA1_DIGEST_SIZE, 1); */ if(pOuterIV != NULL) { /* If HMAC - Complete Outer part */ for(i=0; i<MV_CESA_SHA1_DIGEST_SIZE/4; i++) { #if defined(MV_CPU_LE) || defined(MV_ARM) ctx.state[i] = MV_BYTE_SWAP_32BIT(((MV_U32*)pOuterIV)[i]); #else ctx.state[i] = ((MV_U32*)pOuterIV)[i]; #endif } memset(ctx.buffer, 0, 64); ctx.count[0] = 64*8; ctx.count[1] = 0; mvSHA1Update(&ctx, pDigest, MV_CESA_SHA1_DIGEST_SIZE); mvSHA1Final(pDigest, &ctx); } } /******************************************************************************* * mvCesaFragMd5Complete - Complete MD5 authentication started by HW using SW * * DESCRIPTION: * * * INPUT: * MV_CESA_MBUF* pMbuf - Pointer to Mbuf structure where data * for SHA1 is placed. * int offset - Offset in the Mbuf structure where * unprocessed data for MD5 is started. * MV_U8* pOuterIV - Pointer to OUTER for this session. * If pOuterIV==NULL - MAC mode is HASH_MD5 * If pOuterIV!=NULL - MAC mode is HMAC_MD5 * int macLeftSize - Size of unprocessed data for MD5. * int macTotalSize - Total size of data for MD5 in the * request (processed + unprocessed) * * OUTPUT: * MV_U8* pDigest - Pointer to place where calculated Digest will * be stored. * * RETURN: None * *******************************************************************************/ static void mvCesaFragMd5Complete(MV_CESA_MBUF* pMbuf, int offset, MV_U8* pOuterIV, int macLeftSize, int macTotalSize, MV_U8* pDigest) { MV_MD5_CONTEXT ctx; MV_U8 *pData; int i, frag, fragOffset, size; /* Read temporary Digest from HW */ for(i=0; i<MV_CESA_MD5_DIGEST_SIZE/4; i++) { ctx.buf[i] = MV_REG_READ(MV_CESA_AUTH_INIT_VAL_DIGEST_REG(i)); } memset(ctx.in, 0, 64); /* Set count[0] in bits. 32 bits is enough for 512 MBytes */ /* so count[1] is always 0 */ ctx.bits[0] = ((macTotalSize - macLeftSize) * 8); ctx.bits[1] = 0; /* If HMAC - add size of Inner block (64 bytes) ro count[0] */ if(pOuterIV != NULL) ctx.bits[0] += (64 * 8); frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset); if(frag == MV_INVALID) { mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset); return; } pData = pMbuf->pFrags[frag].bufVirtPtr + fragOffset; size = pMbuf->pFrags[frag].bufSize - fragOffset; /* Complete Inner part */ while(macLeftSize > 0) { if(macLeftSize <= size) { mvMD5Update(&ctx, pData, macLeftSize); break; } mvMD5Update(&ctx, pData, size); macLeftSize -= size; frag++; pData = pMbuf->pFrags[frag].bufVirtPtr; size = pMbuf->pFrags[frag].bufSize; } mvMD5Final(pDigest, &ctx); /* mvOsPrintf("mvCesaFragMd5Complete: pOuterIV=%p, macLeftSize=%d, macTotalSize=%d\n", pOuterIV, macLeftSize, macTotalSize); mvDebugMemDump(pDigest, MV_CESA_MD5_DIGEST_SIZE, 1); */ if(pOuterIV != NULL) { /* Complete Outer part */ for(i=0; i<MV_CESA_MD5_DIGEST_SIZE/4; i++) { #if defined(MV_CPU_LE) || defined(MV_ARM) ctx.buf[i] = MV_BYTE_SWAP_32BIT(((MV_U32*)pOuterIV)[i]); #else ctx.buf[i] = ((MV_U32*)pOuterIV)[i]; #endif } memset(ctx.in, 0, 64); ctx.bits[0] = 64*8; ctx.bits[1] = 0; mvMD5Update(&ctx, pDigest, MV_CESA_MD5_DIGEST_SIZE); mvMD5Final(pDigest, &ctx); } } /******************************************************************************* * mvCesaFragAuthComplete - * * DESCRIPTION: * * * INPUT: * MV_CESA_REQ* pReq, * MV_CESA_SA* pSA, * int macDataSize * * RETURN: * MV_STATUS * *******************************************************************************/ static MV_STATUS mvCesaFragAuthComplete(MV_CESA_REQ* pReq, MV_CESA_SA* pSA, int macDataSize) { MV_CESA_COMMAND* pCmd = pReq->pCmd; MV_U8* pDigest; MV_CESA_MAC_MODE macMode; MV_U8* pOuterIV = NULL; /* Copy data from Source fragment to Destination */ if(pCmd->pSrc != pCmd->pDst) { mvCesaMbufCopy(pCmd->pDst, pReq->frags.bufOffset, pCmd->pSrc, pReq->frags.bufOffset, macDataSize); } /* mvCesaCopyFromMbuf(cesaSramVirtPtr->buf[0], pCmd->pSrc, pReq->frags.bufOffset, macDataSize); mvCesaCopyToMbuf(cesaSramVirtPtr->buf[0], pCmd->pDst, pReq->frags.bufOffset, macDataSize); */ pDigest = (mvCesaSramAddrGet() + pReq->frags.newDigestOffset); macMode = (pSA->config & MV_CESA_MAC_MODE_MASK) >> MV_CESA_MAC_MODE_OFFSET; /* mvOsPrintf("macDataSize=%d, macLength=%d, digestOffset=%d, macMode=%d\n", macDataSize, pCmd->macLength, pCmd->digestOffset, macMode); */ switch(macMode) { case MV_CESA_MAC_HMAC_MD5: pOuterIV = pSA->pSramSA->macOuterIV; case MV_CESA_MAC_MD5: mvCesaFragMd5Complete(pCmd->pDst, pReq->frags.bufOffset, pOuterIV, macDataSize, pCmd->macLength, pDigest); break; case MV_CESA_MAC_HMAC_SHA1: pOuterIV = pSA->pSramSA->macOuterIV; case MV_CESA_MAC_SHA1: mvCesaFragSha1Complete(pCmd->pDst, pReq->frags.bufOffset, pOuterIV, macDataSize, pCmd->macLength, pDigest); break; default: mvOsPrintf("mvCesaFragAuthComplete: Unexpected macMode %d\n", macMode); return MV_BAD_PARAM; } return MV_OK; } /******************************************************************************* * mvCesaCtrModeInit - * * DESCRIPTION: * * * INPUT: NONE * * * RETURN: * MV_CESA_COMMAND* * *******************************************************************************/ static MV_CESA_COMMAND* mvCesaCtrModeInit(void) { MV_CESA_MBUF *pMbuf; MV_U8 *pBuf; MV_CESA_COMMAND *pCmd; pBuf = mvOsMalloc(sizeof(MV_CESA_COMMAND) + sizeof(MV_CESA_MBUF) + sizeof(MV_BUF_INFO) + 100); if(pBuf == NULL) { mvOsPrintf("mvCesaSessionOpen: Can't allocate %u bytes for CTR Mode\n", sizeof(MV_CESA_COMMAND) + sizeof(MV_CESA_MBUF) + sizeof(MV_BUF_INFO) ); return NULL; } pCmd = (MV_CESA_COMMAND*)pBuf; pBuf += sizeof(MV_CESA_COMMAND); pMbuf = (MV_CESA_MBUF*)pBuf; pBuf += sizeof(MV_CESA_MBUF); pMbuf->pFrags = (MV_BUF_INFO*)pBuf; pMbuf->numFrags = 1; pCmd->pSrc = pMbuf; pCmd->pDst = pMbuf; /* mvOsPrintf("CtrModeInit: pCmd=%p, pSrc=%p, pDst=%p, pFrags=%p\n", pCmd, pCmd->pSrc, pCmd->pDst, pMbuf->pFrags); */ return pCmd; } /******************************************************************************* * mvCesaCtrModePrepare - * * DESCRIPTION: * * * INPUT: * MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd * * RETURN: * MV_STATUS * *******************************************************************************/ static MV_STATUS mvCesaCtrModePrepare(MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd) { MV_CESA_MBUF *pMbuf; MV_U8 *pBuf, *pIV; MV_U32 counter, *pCounter; int cryptoSize = MV_ALIGN_UP(pCmd->cryptoLength, MV_CESA_AES_BLOCK_SIZE); /* mvOsPrintf("CtrModePrepare: pCmd=%p, pCtrSrc=%p, pCtrDst=%p, pOrgCmd=%p, pOrgSrc=%p, pOrgDst=%p\n", pCmd, pCmd->pSrc, pCmd->pDst, pCtrModeCmd, pCtrModeCmd->pSrc, pCtrModeCmd->pDst); */ pMbuf = pCtrModeCmd->pSrc; /* Allocate buffer for Key stream */ pBuf = mvOsIoCachedMalloc(cesaOsHandle,cryptoSize, &pMbuf->pFrags[0].bufPhysAddr, &pMbuf->pFrags[0].memHandle); if(pBuf == NULL) { mvOsPrintf("mvCesaCtrModePrepare: Can't allocate %d bytes\n", cryptoSize); return MV_OUT_OF_CPU_MEM; } memset(pBuf, 0, cryptoSize); mvOsCacheFlush(NULL, pBuf, cryptoSize); pMbuf->pFrags[0].bufVirtPtr = pBuf; pMbuf->mbufSize = cryptoSize; pMbuf->pFrags[0].bufSize = cryptoSize; pCtrModeCmd->pReqPrv = pCmd->pReqPrv; pCtrModeCmd->sessionId = pCmd->sessionId; /* ivFromUser and ivOffset are don't care */ pCtrModeCmd->cryptoOffset = 0; pCtrModeCmd->cryptoLength = cryptoSize; /* digestOffset, macOffset and macLength are don't care */ mvCesaCopyFromMbuf(pBuf, pCmd->pSrc, pCmd->ivOffset, MV_CESA_AES_BLOCK_SIZE); pCounter = (MV_U32*)(pBuf + (MV_CESA_AES_BLOCK_SIZE - sizeof(counter))); counter = *pCounter; counter = MV_32BIT_BE(counter); pIV = pBuf; cryptoSize -= MV_CESA_AES_BLOCK_SIZE; /* fill key stream */ while(cryptoSize > 0) { pBuf += MV_CESA_AES_BLOCK_SIZE; memcpy(pBuf, pIV, MV_CESA_AES_BLOCK_SIZE - sizeof(counter)); pCounter = (MV_U32*)(pBuf + (MV_CESA_AES_BLOCK_SIZE - sizeof(counter))); counter++; *pCounter = MV_32BIT_BE(counter); cryptoSize -= MV_CESA_AES_BLOCK_SIZE; } return MV_OK; } /******************************************************************************* * mvCesaCtrModeComplete - * * DESCRIPTION: * * * INPUT: * MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd * * RETURN: * MV_STATUS * *******************************************************************************/ static MV_STATUS mvCesaCtrModeComplete(MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd) { int srcFrag, dstFrag, srcOffset, dstOffset, keyOffset, srcSize, dstSize; int cryptoSize = pCmd->cryptoLength; MV_U8 *pSrc, *pDst, *pKey; MV_STATUS status = MV_OK; /* mvOsPrintf("CtrModeComplete: pCmd=%p, pCtrSrc=%p, pCtrDst=%p, pOrgCmd=%p, pOrgSrc=%p, pOrgDst=%p\n", pCmd, pCmd->pSrc, pCmd->pDst, pOrgCmd, pOrgCmd->pSrc, pOrgCmd->pDst); */ /* XOR source data with key stream to destination data */ pKey = pCmd->pDst->pFrags[0].bufVirtPtr; keyOffset = 0; if( (pOrgCmd->pSrc != pOrgCmd->pDst) && (pOrgCmd->cryptoOffset > 0) ) { /* Copy Prefix from source buffer to destination buffer */ status = mvCesaMbufCopy(pOrgCmd->pDst, 0, pOrgCmd->pSrc, 0, pOrgCmd->cryptoOffset); /* status = mvCesaCopyFromMbuf(tempBuf, pOrgCmd->pSrc, 0, pOrgCmd->cryptoOffset); status = mvCesaCopyToMbuf(tempBuf, pOrgCmd->pDst, 0, pOrgCmd->cryptoOffset); */ } srcFrag = mvCesaMbufOffset(pOrgCmd->pSrc, pOrgCmd->cryptoOffset, &srcOffset); pSrc = pOrgCmd->pSrc->pFrags[srcFrag].bufVirtPtr; srcSize = pOrgCmd->pSrc->pFrags[srcFrag].bufSize; dstFrag = mvCesaMbufOffset(pOrgCmd->pDst, pOrgCmd->cryptoOffset, &dstOffset); pDst = pOrgCmd->pDst->pFrags[dstFrag].bufVirtPtr; dstSize = pOrgCmd->pDst->pFrags[dstFrag].bufSize; while(cryptoSize > 0) { pDst[dstOffset] = (pSrc[srcOffset] ^ pKey[keyOffset]); cryptoSize--; dstOffset++; srcOffset++; keyOffset++; if(srcOffset >= srcSize) { srcFrag++; srcOffset = 0; pSrc = pOrgCmd->pSrc->pFrags[srcFrag].bufVirtPtr; srcSize = pOrgCmd->pSrc->pFrags[srcFrag].bufSize; } if(dstOffset >= dstSize) { dstFrag++; dstOffset = 0; pDst = pOrgCmd->pDst->pFrags[dstFrag].bufVirtPtr; dstSize = pOrgCmd->pDst->pFrags[dstFrag].bufSize; } } if(pOrgCmd->pSrc != pOrgCmd->pDst) { /* Copy Suffix from source buffer to destination buffer */ srcOffset = pOrgCmd->cryptoOffset + pOrgCmd->cryptoLength; if( (pOrgCmd->pDst->mbufSize - srcOffset) > 0) { status = mvCesaMbufCopy(pOrgCmd->pDst, srcOffset, pOrgCmd->pSrc, srcOffset, pOrgCmd->pDst->mbufSize - srcOffset); } /* status = mvCesaCopyFromMbuf(tempBuf, pOrgCmd->pSrc, srcOffset, pOrgCmd->pSrc->mbufSize - srcOffset); status = mvCesaCopyToMbuf(tempBuf, pOrgCmd->pDst, srcOffset, pOrgCmd->pDst->mbufSize - srcOffset); */ } /* Free buffer used for Key stream */ mvOsIoCachedFree(cesaOsHandle,pCmd->pDst->pFrags[0].bufSize, pCmd->pDst->pFrags[0].bufPhysAddr, pCmd->pDst->pFrags[0].bufVirtPtr, pCmd->pDst->pFrags[0].memHandle); return MV_OK; } /******************************************************************************* * mvCesaCtrModeFinish - * * DESCRIPTION: * * * INPUT: * MV_CESA_COMMAND* pCmd * * RETURN: * MV_STATUS * *******************************************************************************/ static void mvCesaCtrModeFinish(MV_CESA_COMMAND* pCmd) { mvOsFree(pCmd); } /******************************************************************************* * mvCesaParamCheck - * * DESCRIPTION: * * * INPUT: * MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd, MV_U8* pFixOffset * * RETURN: * MV_STATUS * *******************************************************************************/ static MV_STATUS mvCesaParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd, MV_U8* pFixOffset) { MV_U8 fixOffset = 0xFF; /* Check AUTH operation parameters */ if( ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) ) { /* MAC offset should be at least 4 byte aligned */ if( MV_IS_NOT_ALIGN(pCmd->macOffset, 4) ) { mvOsPrintf("mvCesaAction: macOffset %d must be 4 byte aligned\n", pCmd->macOffset); return MV_BAD_PARAM; } /* Digest offset must be 4 byte aligned */ if( MV_IS_NOT_ALIGN(pCmd->digestOffset, 4) ) { mvOsPrintf("mvCesaAction: digestOffset %d must be 4 byte aligned\n", pCmd->digestOffset); return MV_BAD_PARAM; } /* In addition all offsets should be the same alignment: 8 or 4 */ if(fixOffset == 0xFF) { fixOffset = (pCmd->macOffset % 8); } else { if( (pCmd->macOffset % 8) != fixOffset) { mvOsPrintf("mvCesaAction: macOffset %d mod 8 must be equal %d\n", pCmd->macOffset, fixOffset); return MV_BAD_PARAM; } } if( (pCmd->digestOffset % 8) != fixOffset) { mvOsPrintf("mvCesaAction: digestOffset %d mod 8 must be equal %d\n", pCmd->digestOffset, fixOffset); return MV_BAD_PARAM; } } /* Check CRYPTO operation parameters */ if( ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) ) { /* CryptoOffset should be at least 4 byte aligned */ if( MV_IS_NOT_ALIGN(pCmd->cryptoOffset, 4) ) { mvOsPrintf("CesaAction: cryptoOffset=%d must be 4 byte aligned\n", pCmd->cryptoOffset); return MV_BAD_PARAM; } /* cryptoLength should be the whole number of blocks */ if( MV_IS_NOT_ALIGN(pCmd->cryptoLength, pSA->cryptoBlockSize) ) { mvOsPrintf("mvCesaAction: cryptoLength=%d must be %d byte aligned\n", pCmd->cryptoLength, pSA->cryptoBlockSize); return MV_BAD_PARAM; } if(fixOffset == 0xFF) { fixOffset = (pCmd->cryptoOffset % 8); } else { /* In addition all offsets should be the same alignment: 8 or 4 */ if( (pCmd->cryptoOffset % 8) != fixOffset) { mvOsPrintf("mvCesaAction: cryptoOffset %d mod 8 must be equal %d \n", pCmd->cryptoOffset, fixOffset); return MV_BAD_PARAM; } } /* check for CBC mode */ if(pSA->cryptoIvSize > 0) { /* cryptoIV must not be part of CryptoLength */ if( ((pCmd->ivOffset + pSA->cryptoIvSize) > pCmd->cryptoOffset) && (pCmd->ivOffset < (pCmd->cryptoOffset + pCmd->cryptoLength)) ) { mvOsPrintf("mvCesaFragParamCheck: cryptoIvOffset (%d) is part of cryptoLength (%d+%d)\n", pCmd->ivOffset, pCmd->macOffset, pCmd->macLength); return MV_BAD_PARAM; } /* ivOffset must be 4 byte aligned */ if( MV_IS_NOT_ALIGN(pCmd->ivOffset, 4) ) { mvOsPrintf("CesaAction: ivOffset=%d must be 4 byte aligned\n", pCmd->ivOffset); return MV_BAD_PARAM; } /* In addition all offsets should be the same alignment: 8 or 4 */ if( (pCmd->ivOffset % 8) != fixOffset) { mvOsPrintf("mvCesaAction: ivOffset %d mod 8 must be %d\n", pCmd->ivOffset, fixOffset); return MV_BAD_PARAM; } } } return MV_OK; } /******************************************************************************* * mvCesaFragParamCheck - * * DESCRIPTION: * * * INPUT: * MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd * * RETURN: * MV_STATUS * *******************************************************************************/ static MV_STATUS mvCesaFragParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd) { int offset; if( ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) ) { /* macOffset must be less that SRAM buffer size */ if(pCmd->macOffset > (sizeof(cesaSramVirtPtr->buf) - MV_CESA_AUTH_BLOCK_SIZE)) { mvOsPrintf("mvCesaFragParamCheck: macOffset is too large (%d)\n", pCmd->macOffset); return MV_BAD_PARAM; } /* macOffset+macSize must be more than mbufSize - SRAM buffer size */ if( ((pCmd->macOffset + pCmd->macLength) > pCmd->pSrc->mbufSize) || ((pCmd->pSrc->mbufSize - (pCmd->macOffset + pCmd->macLength)) >= sizeof(cesaSramVirtPtr->buf)) ) { mvOsPrintf("mvCesaFragParamCheck: macLength is too large (%d), mbufSize=%d\n", pCmd->macLength, pCmd->pSrc->mbufSize); return MV_BAD_PARAM; } } if( ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) ) { /* cryptoOffset must be less that SRAM buffer size */ /* 4 for possible fixOffset */ if( (pCmd->cryptoOffset + 4) > (sizeof(cesaSramVirtPtr->buf) - pSA->cryptoBlockSize)) { mvOsPrintf("mvCesaFragParamCheck: cryptoOffset is too large (%d)\n", pCmd->cryptoOffset); return MV_BAD_PARAM; } /* cryptoOffset+cryptoSize must be more than mbufSize - SRAM buffer size */ if( ((pCmd->cryptoOffset + pCmd->cryptoLength) > pCmd->pSrc->mbufSize) || ((pCmd->pSrc->mbufSize - (pCmd->cryptoOffset + pCmd->cryptoLength)) >= (sizeof(cesaSramVirtPtr->buf) - pSA->cryptoBlockSize)) ) { mvOsPrintf("mvCesaFragParamCheck: cryptoLength is too large (%d), mbufSize=%d\n", pCmd->cryptoLength, pCmd->pSrc->mbufSize); return MV_BAD_PARAM; } } /* When MAC_THEN_CRYPTO or CRYPTO_THEN_MAC */ if( ((pSA->config & MV_CESA_OPERATION_MASK) == (MV_CESA_MAC_THEN_CRYPTO << MV_CESA_OPERATION_OFFSET)) || ((pSA->config & MV_CESA_OPERATION_MASK) == (MV_CESA_CRYPTO_THEN_MAC << MV_CESA_OPERATION_OFFSET)) ) { if( (mvCtrlModelGet() == MV_5182_DEV_ID) || ( (mvCtrlModelGet() == MV_5181_DEV_ID) && (mvCtrlRevGet() >= MV_5181L_A0_REV) && (pCmd->macLength >= (1 << 14)) ) ) { return MV_NOT_ALLOWED; } /* abs(cryptoOffset-macOffset) must be aligned cryptoBlockSize */ if(pCmd->cryptoOffset > pCmd->macOffset) { offset = pCmd->cryptoOffset - pCmd->macOffset; } else { offset = pCmd->macOffset - pCmd->cryptoOffset; } if( MV_IS_NOT_ALIGN(offset, pSA->cryptoBlockSize) ) { /* mvOsPrintf("mvCesaFragParamCheck: (cryptoOffset - macOffset) must be %d byte aligned\n", pSA->cryptoBlockSize); */ return MV_NOT_ALLOWED; } /* Digest must not be part of CryptoLength */ if( ((pCmd->digestOffset + pSA->digestSize) > pCmd->cryptoOffset) && (pCmd->digestOffset < (pCmd->cryptoOffset + pCmd->cryptoLength)) ) { /* mvOsPrintf("mvCesaFragParamCheck: digestOffset (%d) is part of cryptoLength (%d+%d)\n", pCmd->digestOffset, pCmd->cryptoOffset, pCmd->cryptoLength); */ return MV_NOT_ALLOWED; } } return MV_OK; } /******************************************************************************* * mvCesaFragSizeFind - * * DESCRIPTION: * * * INPUT: * MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd, * int cryptoOffset, int macOffset, * * OUTPUT: * int* pCopySize, int* pCryptoDataSize, int* pMacDataSize * * RETURN: * MV_STATUS * *******************************************************************************/ static void mvCesaFragSizeFind(MV_CESA_SA* pSA, MV_CESA_REQ* pReq, int cryptoOffset, int macOffset, int* pCopySize, int* pCryptoDataSize, int* pMacDataSize) { MV_CESA_COMMAND *pCmd = pReq->pCmd; int cryptoDataSize, macDataSize, copySize; cryptoDataSize = macDataSize = 0; copySize = *pCopySize; if( (pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET) ) { cryptoDataSize = MV_MIN( (copySize - cryptoOffset), (pCmd->cryptoLength - (pReq->frags.cryptoSize + 1)) ); /* cryptoSize for each fragment must be the whole number of blocksSize */ if( MV_IS_NOT_ALIGN(cryptoDataSize, pSA->cryptoBlockSize) ) { cryptoDataSize = MV_ALIGN_DOWN(cryptoDataSize, pSA->cryptoBlockSize); copySize = cryptoOffset + cryptoDataSize; } } if( (pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) ) { macDataSize = MV_MIN( (copySize - macOffset), (pCmd->macLength - (pReq->frags.macSize + 1))); /* macSize for each fragment (except last) must be the whole number of blocksSize */ if( MV_IS_NOT_ALIGN(macDataSize, MV_CESA_AUTH_BLOCK_SIZE) ) { macDataSize = MV_ALIGN_DOWN(macDataSize, MV_CESA_AUTH_BLOCK_SIZE); copySize = macOffset + macDataSize; } cryptoDataSize = copySize - cryptoOffset; } *pCopySize = copySize; if(pCryptoDataSize != NULL) *pCryptoDataSize = cryptoDataSize; if(pMacDataSize != NULL) *pMacDataSize = macDataSize; }
gpl-2.0
TSCLKS/linux
drivers/video/backlight/jornada720_lcd.c
1873
2938
/* * * LCD driver for HP Jornada 700 series (710/720/728) * Copyright (C) 2006-2009 Kristoffer Ericson <kristoffer.ericson@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 or any later version as published by the Free Software Foundation. * */ #include <linux/device.h> #include <linux/fb.h> #include <linux/kernel.h> #include <linux/lcd.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <mach/jornada720.h> #include <mach/hardware.h> #include <video/s1d13xxxfb.h> #define LCD_MAX_CONTRAST 0xff #define LCD_DEF_CONTRAST 0x80 static int jornada_lcd_get_power(struct lcd_device *ld) { return PPSR & PPC_LDD2 ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; } static int jornada_lcd_get_contrast(struct lcd_device *ld) { int ret; if (jornada_lcd_get_power(ld) != FB_BLANK_UNBLANK) return 0; jornada_ssp_start(); if (jornada_ssp_byte(GETCONTRAST) == TXDUMMY) { ret = jornada_ssp_byte(TXDUMMY); goto success; } dev_err(&ld->dev, "failed to set contrast\n"); ret = -ETIMEDOUT; success: jornada_ssp_end(); return ret; } static int jornada_lcd_set_contrast(struct lcd_device *ld, int value) { int ret = 0; jornada_ssp_start(); /* start by sending our set contrast cmd to mcu */ if (jornada_ssp_byte(SETCONTRAST) == TXDUMMY) { /* if successful push the new value */ if (jornada_ssp_byte(value) == TXDUMMY) goto success; } dev_err(&ld->dev, "failed to set contrast\n"); ret = -ETIMEDOUT; success: jornada_ssp_end(); return ret; } static int jornada_lcd_set_power(struct lcd_device *ld, int power) { if (power != FB_BLANK_UNBLANK) { PPSR &= ~PPC_LDD2; PPDR |= PPC_LDD2; } else { PPSR |= PPC_LDD2; } return 0; } static struct lcd_ops jornada_lcd_props = { .get_contrast = jornada_lcd_get_contrast, .set_contrast = jornada_lcd_set_contrast, .get_power = jornada_lcd_get_power, .set_power = jornada_lcd_set_power, }; static int jornada_lcd_probe(struct platform_device *pdev) { struct lcd_device *lcd_device; int ret; lcd_device = devm_lcd_device_register(&pdev->dev, S1D_DEVICENAME, &pdev->dev, NULL, &jornada_lcd_props); if (IS_ERR(lcd_device)) { ret = PTR_ERR(lcd_device); dev_err(&pdev->dev, "failed to register device\n"); return ret; } platform_set_drvdata(pdev, lcd_device); /* lets set our default values */ jornada_lcd_set_contrast(lcd_device, LCD_DEF_CONTRAST); jornada_lcd_set_power(lcd_device, FB_BLANK_UNBLANK); /* give it some time to startup */ msleep(100); return 0; } static struct platform_driver jornada_lcd_driver = { .probe = jornada_lcd_probe, .driver = { .name = "jornada_lcd", }, }; module_platform_driver(jornada_lcd_driver); MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 710/720/728 LCD driver"); MODULE_LICENSE("GPL");
gpl-2.0
mus1711/nitro_kernel
drivers/net/wireless/rtlwifi/rtl8192se/rf.c
2129
14875
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "reg.h" #include "def.h" #include "phy.h" #include "rf.h" #include "dm.h" static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel, u8 chnl, u32 *ofdmbase, u32 *mcsbase, u8 *p_final_pwridx) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 pwrbase0, pwrbase1; u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0; u8 i, pwrlevel[4]; for (i = 0; i < 2; i++) pwrlevel[i] = p_pwrlevel[i]; /* We only care about the path A for legacy. */ if (rtlefuse->eeprom_version < 2) { pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_httxpowerdiff & 0xf); } else if (rtlefuse->eeprom_version >= 2) { legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff [RF90_PATH_A][chnl - 1]; /* For legacy OFDM, tx pwr always > HT OFDM pwr. * We do not care Path B * legacy OFDM pwr diff. NO BB register * to notify HW. */ pwrbase0 = pwrlevel[0] + legacy_pwrdiff; } pwrbase0 = (pwrbase0 << 24) | (pwrbase0 << 16) | (pwrbase0 << 8) | pwrbase0; *ofdmbase = pwrbase0; /* MCS rates */ if (rtlefuse->eeprom_version >= 2) { /* Check HT20 to HT40 diff */ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { for (i = 0; i < 2; i++) { /* rf-A, rf-B */ /* HT 20<->40 pwr diff */ ht20_pwrdiff = rtlefuse->txpwr_ht20diff [i][chnl - 1]; if (ht20_pwrdiff < 8) /* 0~+7 */ pwrlevel[i] += ht20_pwrdiff; else /* index8-15=-8~-1 */ pwrlevel[i] -= (16 - ht20_pwrdiff); } } } /* use index of rf-A */ pwrbase1 = pwrlevel[0]; pwrbase1 = (pwrbase1 << 24) | (pwrbase1 << 16) | (pwrbase1 << 8) | pwrbase1; *mcsbase = pwrbase1; /* The following is for Antenna * diff from Ant-B to Ant-A */ p_final_pwridx[0] = pwrlevel[0]; p_final_pwridx[1] = pwrlevel[1]; switch (rtlefuse->eeprom_regulatory) { case 3: /* The following is for calculation * of the power diff for Ant-B to Ant-A. */ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { p_final_pwridx[0] += rtlefuse->pwrgroup_ht40 [RF90_PATH_A][ chnl - 1]; p_final_pwridx[1] += rtlefuse->pwrgroup_ht40 [RF90_PATH_B][ chnl - 1]; } else { p_final_pwridx[0] += rtlefuse->pwrgroup_ht20 [RF90_PATH_A][ chnl - 1]; p_final_pwridx[1] += rtlefuse->pwrgroup_ht20 [RF90_PATH_B][ chnl - 1]; } break; default: break; } if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "40MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n", p_final_pwridx[0], p_final_pwridx[1]); } else { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "20MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n", p_final_pwridx[0], p_final_pwridx[1]); } } static void _rtl92s_set_antennadiff(struct ieee80211_hw *hw, u8 *p_final_pwridx) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); char ant_pwr_diff = 0; u32 u4reg_val = 0; if (rtlphy->rf_type == RF_2T2R) { ant_pwr_diff = p_final_pwridx[1] - p_final_pwridx[0]; /* range is from 7~-8, * index = 0x0~0xf */ if (ant_pwr_diff > 7) ant_pwr_diff = 7; if (ant_pwr_diff < -8) ant_pwr_diff = -8; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Antenna Diff from RF-B to RF-A = %d (0x%x)\n", ant_pwr_diff, ant_pwr_diff & 0xf); ant_pwr_diff &= 0xf; } /* Antenna TX power difference */ rtlefuse->antenna_txpwdiff[2] = 0;/* RF-D, don't care */ rtlefuse->antenna_txpwdiff[1] = 0;/* RF-C, don't care */ rtlefuse->antenna_txpwdiff[0] = (u8)(ant_pwr_diff); /* RF-B */ u4reg_val = rtlefuse->antenna_txpwdiff[2] << 8 | rtlefuse->antenna_txpwdiff[1] << 4 | rtlefuse->antenna_txpwdiff[0]; rtl_set_bbreg(hw, RFPGA0_TXGAINSTAGE, (BXBTXAGC | BXCTXAGC | BXDTXAGC), u4reg_val); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Write BCD-Diff(0x%x) = 0x%x\n", RFPGA0_TXGAINSTAGE, u4reg_val); } static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw, u8 chnl, u8 index, u32 pwrbase0, u32 pwrbase1, u32 *p_outwrite_val) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 i, chnlgroup, pwrdiff_limit[4]; u32 writeval, customer_limit; /* Index 0 & 1= legacy OFDM, 2-5=HT_MCS rate */ switch (rtlefuse->eeprom_regulatory) { case 0: /* Realtek better performance increase power diff * defined by Realtek for large power */ chnlgroup = 0; writeval = rtlphy->mcs_offset[chnlgroup][index] + ((index < 2) ? pwrbase0 : pwrbase1); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "RTK better performance, writeval = 0x%x\n", writeval); break; case 1: /* Realtek regulatory increase power diff defined * by Realtek for regulatory */ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { writeval = ((index < 2) ? pwrbase0 : pwrbase1); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Realtek regulatory, 40MHz, writeval = 0x%x\n", writeval); } else { if (rtlphy->pwrgroup_cnt == 1) chnlgroup = 0; if (rtlphy->pwrgroup_cnt >= 3) { if (chnl <= 3) chnlgroup = 0; else if (chnl >= 4 && chnl <= 8) chnlgroup = 1; else if (chnl > 8) chnlgroup = 2; if (rtlphy->pwrgroup_cnt == 4) chnlgroup++; } writeval = rtlphy->mcs_offset[chnlgroup][index] + ((index < 2) ? pwrbase0 : pwrbase1); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Realtek regulatory, 20MHz, writeval = 0x%x\n", writeval); } break; case 2: /* Better regulatory don't increase any power diff */ writeval = ((index < 2) ? pwrbase0 : pwrbase1); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Better regulatory, writeval = 0x%x\n", writeval); break; case 3: /* Customer defined power diff. increase power diff defined by customer. */ chnlgroup = 0; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "customer's limit, 40MHz = 0x%x\n", rtlefuse->pwrgroup_ht40 [RF90_PATH_A][chnl - 1]); } else { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "customer's limit, 20MHz = 0x%x\n", rtlefuse->pwrgroup_ht20 [RF90_PATH_A][chnl - 1]); } for (i = 0; i < 4; i++) { pwrdiff_limit[i] = (u8)((rtlphy->mcs_offset [chnlgroup][index] & (0x7f << (i * 8))) >> (i * 8)); if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { if (pwrdiff_limit[i] > rtlefuse->pwrgroup_ht40 [RF90_PATH_A][chnl - 1]) { pwrdiff_limit[i] = rtlefuse->pwrgroup_ht40 [RF90_PATH_A][chnl - 1]; } } else { if (pwrdiff_limit[i] > rtlefuse->pwrgroup_ht20 [RF90_PATH_A][chnl - 1]) { pwrdiff_limit[i] = rtlefuse->pwrgroup_ht20 [RF90_PATH_A][chnl - 1]; } } } customer_limit = (pwrdiff_limit[3] << 24) | (pwrdiff_limit[2] << 16) | (pwrdiff_limit[1] << 8) | (pwrdiff_limit[0]); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Customer's limit = 0x%x\n", customer_limit); writeval = customer_limit + ((index < 2) ? pwrbase0 : pwrbase1); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Customer, writeval = 0x%x\n", writeval); break; default: chnlgroup = 0; writeval = rtlphy->mcs_offset[chnlgroup][index] + ((index < 2) ? pwrbase0 : pwrbase1); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "RTK better performance, writeval = 0x%x\n", writeval); break; } if (rtlpriv->dm.dynamic_txhighpower_lvl == TX_HIGH_PWR_LEVEL_LEVEL1) writeval = 0x10101010; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TX_HIGH_PWR_LEVEL_LEVEL2) writeval = 0x0; *p_outwrite_val = writeval; } static void _rtl92s_write_ofdm_powerreg(struct ieee80211_hw *hw, u8 index, u32 val) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u16 regoffset[6] = {0xe00, 0xe04, 0xe10, 0xe14, 0xe18, 0xe1c}; u8 i, rfa_pwr[4]; u8 rfa_lower_bound = 0, rfa_upper_bound = 0, rf_pwr_diff = 0; u32 writeval = val; /* If path A and Path B coexist, we must limit Path A tx power. * Protect Path B pwr over or under flow. We need to calculate * upper and lower bound of path A tx power. */ if (rtlphy->rf_type == RF_2T2R) { rf_pwr_diff = rtlefuse->antenna_txpwdiff[0]; /* Diff=-8~-1 */ if (rf_pwr_diff >= 8) { /* Prevent underflow!! */ rfa_lower_bound = 0x10 - rf_pwr_diff; /* if (rf_pwr_diff >= 0) Diff = 0-7 */ } else { rfa_upper_bound = RF6052_MAX_TX_PWR - rf_pwr_diff; } } for (i = 0; i < 4; i++) { rfa_pwr[i] = (u8)((writeval & (0x7f << (i * 8))) >> (i * 8)); if (rfa_pwr[i] > RF6052_MAX_TX_PWR) rfa_pwr[i] = RF6052_MAX_TX_PWR; /* If path A and Path B coexist, we must limit Path A tx power. * Protect Path B pwr over or under flow. We need to calculate * upper and lower bound of path A tx power. */ if (rtlphy->rf_type == RF_2T2R) { /* Diff=-8~-1 */ if (rf_pwr_diff >= 8) { /* Prevent underflow!! */ if (rfa_pwr[i] < rfa_lower_bound) rfa_pwr[i] = rfa_lower_bound; /* Diff = 0-7 */ } else if (rf_pwr_diff >= 1) { /* Prevent overflow */ if (rfa_pwr[i] > rfa_upper_bound) rfa_pwr[i] = rfa_upper_bound; } } } writeval = (rfa_pwr[3] << 24) | (rfa_pwr[2] << 16) | (rfa_pwr[1] << 8) | rfa_pwr[0]; rtl_set_bbreg(hw, regoffset[index], 0x7f7f7f7f, writeval); } void rtl92s_phy_rf6052_set_ofdmtxpower(struct ieee80211_hw *hw, u8 *p_pwrlevel, u8 chnl) { u32 writeval, pwrbase0, pwrbase1; u8 index = 0; u8 finalpwr_idx[4]; _rtl92s_get_powerbase(hw, p_pwrlevel, chnl, &pwrbase0, &pwrbase1, &finalpwr_idx[0]); _rtl92s_set_antennadiff(hw, &finalpwr_idx[0]); for (index = 0; index < 6; index++) { _rtl92s_get_txpower_writeval_byregulatory(hw, chnl, index, pwrbase0, pwrbase1, &writeval); _rtl92s_write_ofdm_powerreg(hw, index, writeval); } } void rtl92s_phy_rf6052_set_ccktxpower(struct ieee80211_hw *hw, u8 pwrlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 txagc = 0; bool dont_inc_cck_or_turboscanoff = false; if (((rtlefuse->eeprom_version >= 2) && (rtlefuse->txpwr_safetyflag == 1)) || ((rtlefuse->eeprom_version >= 2) && (rtlefuse->eeprom_regulatory != 0))) dont_inc_cck_or_turboscanoff = true; if (mac->act_scanning) { txagc = 0x3f; if (dont_inc_cck_or_turboscanoff) txagc = pwrlevel; } else { txagc = pwrlevel; if (rtlpriv->dm.dynamic_txhighpower_lvl == TX_HIGH_PWR_LEVEL_LEVEL1) txagc = 0x10; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TX_HIGH_PWR_LEVEL_LEVEL2) txagc = 0x0; } if (txagc > RF6052_MAX_TX_PWR) txagc = RF6052_MAX_TX_PWR; rtl_set_bbreg(hw, RTXAGC_CCK_MCS32, BTX_AGCRATECCK, txagc); } bool rtl92s_phy_rf6052_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u32 u4reg_val = 0; u8 rfpath; bool rtstatus = true; struct bb_reg_def *pphyreg; /* Initialize RF */ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { pphyreg = &rtlphy->phyreg_def[rfpath]; /* Store original RFENV control type */ switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: u4reg_val = rtl92s_phy_query_bb_reg(hw, pphyreg->rfintfs, BRFSI_RFENV); break; case RF90_PATH_B: case RF90_PATH_D: u4reg_val = rtl92s_phy_query_bb_reg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16); break; } /* Set RF_ENV enable */ rtl92s_phy_set_bb_reg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); /* Set RF_ENV output high */ rtl92s_phy_set_bb_reg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); /* Set bit number of Address and Data for RF register */ rtl92s_phy_set_bb_reg(hw, pphyreg->rfhssi_para2, B3WIRE_ADDRESSLENGTH, 0x0); rtl92s_phy_set_bb_reg(hw, pphyreg->rfhssi_para2, B3WIRE_DATALENGTH, 0x0); /* Initialize RF fom connfiguration file */ switch (rfpath) { case RF90_PATH_A: rtstatus = rtl92s_phy_config_rf(hw, (enum radio_path)rfpath); break; case RF90_PATH_B: rtstatus = rtl92s_phy_config_rf(hw, (enum radio_path)rfpath); break; case RF90_PATH_C: break; case RF90_PATH_D: break; } /* Restore RFENV control type */ switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: rtl92s_phy_set_bb_reg(hw, pphyreg->rfintfs, BRFSI_RFENV, u4reg_val); break; case RF90_PATH_B: case RF90_PATH_D: rtl92s_phy_set_bb_reg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16, u4reg_val); break; } if (!rtstatus) { pr_err("Radio[%d] Fail!!\n", rfpath); goto fail; } } return rtstatus; fail: return rtstatus; } void rtl92s_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); switch (bandwidth) { case HT_CHANNEL_WIDTH_20: rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & 0xfffff3ff) | 0x0400); rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[0]); break; case HT_CHANNEL_WIDTH_20_40: rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & 0xfffff3ff)); rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[0]); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "unknown bandwidth: %#X\n", bandwidth); break; } }
gpl-2.0
Dee-UK/D33_KK_RK3066
drivers/net/enic/vnic_dev.c
2385
20207
/* * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/if_ether.h> #include "vnic_resource.h" #include "vnic_devcmd.h" #include "vnic_dev.h" #include "vnic_stats.h" enum vnic_proxy_type { PROXY_NONE, PROXY_BY_BDF, }; struct vnic_res { void __iomem *vaddr; dma_addr_t bus_addr; unsigned int count; }; struct vnic_dev { void *priv; struct pci_dev *pdev; struct vnic_res res[RES_TYPE_MAX]; enum vnic_dev_intr_mode intr_mode; struct vnic_devcmd __iomem *devcmd; struct vnic_devcmd_notify *notify; struct vnic_devcmd_notify notify_copy; dma_addr_t notify_pa; u32 notify_sz; dma_addr_t linkstatus_pa; struct vnic_stats *stats; dma_addr_t stats_pa; struct vnic_devcmd_fw_info *fw_info; dma_addr_t fw_info_pa; enum vnic_proxy_type proxy; u32 proxy_index; u64 args[VNIC_DEVCMD_NARGS]; }; #define VNIC_MAX_RES_HDR_SIZE \ (sizeof(struct vnic_resource_header) + \ sizeof(struct vnic_resource) * RES_TYPE_MAX) #define VNIC_RES_STRIDE 128 void *vnic_dev_priv(struct vnic_dev *vdev) { return vdev->priv; } static int vnic_dev_discover_res(struct vnic_dev *vdev, struct vnic_dev_bar *bar, unsigned int num_bars) { struct vnic_resource_header __iomem *rh; struct mgmt_barmap_hdr __iomem *mrh; struct vnic_resource __iomem *r; u8 type; if (num_bars == 0) return -EINVAL; if (bar->len < VNIC_MAX_RES_HDR_SIZE) { pr_err("vNIC BAR0 res hdr length error\n"); return -EINVAL; } rh = bar->vaddr; mrh = bar->vaddr; if (!rh) { pr_err("vNIC BAR0 res hdr not mem-mapped\n"); return -EINVAL; } /* Check for mgmt vnic in addition to normal vnic */ if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || (ioread32(&rh->version) != VNIC_RES_VERSION)) { if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { pr_err("vNIC BAR0 res magic/version error " "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", VNIC_RES_MAGIC, VNIC_RES_VERSION, MGMTVNIC_MAGIC, MGMTVNIC_VERSION, ioread32(&rh->magic), ioread32(&rh->version)); return -EINVAL; } } if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) r = (struct vnic_resource __iomem *)(mrh + 1); else r = (struct vnic_resource __iomem *)(rh + 1); while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { u8 bar_num = ioread8(&r->bar); u32 bar_offset = ioread32(&r->bar_offset); u32 count = ioread32(&r->count); u32 len; r++; if (bar_num >= num_bars) continue; if (!bar[bar_num].len || !bar[bar_num].vaddr) continue; switch (type) { case RES_TYPE_WQ: case RES_TYPE_RQ: case RES_TYPE_CQ: case RES_TYPE_INTR_CTRL: /* each count is stride bytes long */ len = count * VNIC_RES_STRIDE; if (len + bar_offset > bar[bar_num].len) { pr_err("vNIC BAR0 resource %d " "out-of-bounds, offset 0x%x + " "size 0x%x > bar len 0x%lx\n", type, bar_offset, len, bar[bar_num].len); return -EINVAL; } break; case RES_TYPE_INTR_PBA_LEGACY: case RES_TYPE_DEVCMD: len = count; break; default: continue; } vdev->res[type].count = count; vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + bar_offset; vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; } return 0; } unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, enum vnic_res_type type) { return vdev->res[type].count; } void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, unsigned int index) { if (!vdev->res[type].vaddr) return NULL; switch (type) { case RES_TYPE_WQ: case RES_TYPE_RQ: case RES_TYPE_CQ: case RES_TYPE_INTR_CTRL: return (char __iomem *)vdev->res[type].vaddr + index * VNIC_RES_STRIDE; default: return (char __iomem *)vdev->res[type].vaddr; } } static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { /* The base address of the desc rings must be 512 byte aligned. * Descriptor count is aligned to groups of 32 descriptors. A * count of 0 means the maximum 4096 descriptors. Descriptor * size is aligned to 16 bytes. */ unsigned int count_align = 32; unsigned int desc_align = 16; ring->base_align = 512; if (desc_count == 0) desc_count = 4096; ring->desc_count = ALIGN(desc_count, count_align); ring->desc_size = ALIGN(desc_size, desc_align); ring->size = ring->desc_count * ring->desc_size; ring->size_unaligned = ring->size + ring->base_align; return ring->size_unaligned; } void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) { memset(ring->descs, 0, ring->size); } int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { vnic_dev_desc_ring_size(ring, desc_count, desc_size); ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, ring->size_unaligned, &ring->base_addr_unaligned); if (!ring->descs_unaligned) { pr_err("Failed to allocate ring (size=%d), aborting\n", (int)ring->size); return -ENOMEM; } ring->base_addr = ALIGN(ring->base_addr_unaligned, ring->base_align); ring->descs = (u8 *)ring->descs_unaligned + (ring->base_addr - ring->base_addr_unaligned); vnic_dev_clear_desc_ring(ring); ring->desc_avail = ring->desc_count - 1; return 0; } void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) { if (ring->descs) { pci_free_consistent(vdev->pdev, ring->size_unaligned, ring->descs_unaligned, ring->base_addr_unaligned); ring->descs = NULL; } } static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; unsigned int i; int delay; u32 status; int err; status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (status & STAT_BUSY) { pr_err("Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { for (i = 0; i < VNIC_DEVCMD_NARGS; i++) writeq(vdev->args[i], &devcmd->args[i]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = (int)readq(&devcmd->args[0]); if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) pr_err("Error %d devcmd %d\n", err, _CMD_N(cmd)); return err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); for (i = 0; i < VNIC_DEVCMD_NARGS; i++) vdev->args[i] = readq(&devcmd->args[i]); } return 0; } } pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; } static int vnic_dev_cmd_proxy_by_bdf(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) { u32 status; int err; memset(vdev->args, 0, sizeof(vdev->args)); vdev->args[0] = vdev->proxy_index; /* bdf */ vdev->args[1] = cmd; vdev->args[2] = *a0; vdev->args[3] = *a1; err = _vnic_dev_cmd(vdev, CMD_PROXY_BY_BDF, wait); if (err) return err; status = (u32)vdev->args[0]; if (status & STAT_ERROR) { err = (int)vdev->args[1]; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); return err; } *a0 = vdev->args[1]; *a1 = vdev->args[2]; return 0; } static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) { int err; vdev->args[0] = *a0; vdev->args[1] = *a1; err = _vnic_dev_cmd(vdev, cmd, wait); *a0 = vdev->args[0]; *a1 = vdev->args[1]; return err; } int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) { memset(vdev->args, 0, sizeof(vdev->args)); switch (vdev->proxy) { case PROXY_BY_BDF: return vnic_dev_cmd_proxy_by_bdf(vdev, cmd, a0, a1, wait); case PROXY_NONE: default: return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait); } } static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) { u64 a0 = (u32)cmd, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); return !(err || a0); } int vnic_dev_fw_info(struct vnic_dev *vdev, struct vnic_devcmd_fw_info **fw_info) { u64 a0, a1 = 0; int wait = 1000; int err = 0; if (!vdev->fw_info) { vdev->fw_info = pci_alloc_consistent(vdev->pdev, sizeof(struct vnic_devcmd_fw_info), &vdev->fw_info_pa); if (!vdev->fw_info) return -ENOMEM; memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info)); a0 = vdev->fw_info_pa; a1 = sizeof(struct vnic_devcmd_fw_info); /* only get fw_info once and cache it */ err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); if (err == ERR_ECMDUNKNOWN) { err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD, &a0, &a1, wait); } } *fw_info = vdev->fw_info; return err; } int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, void *value) { u64 a0, a1; int wait = 1000; int err; a0 = offset; a1 = size; err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); switch (size) { case 1: *(u8 *)value = (u8)a0; break; case 2: *(u16 *)value = (u16)a0; break; case 4: *(u32 *)value = (u32)a0; break; case 8: *(u64 *)value = a0; break; default: BUG(); break; } return err; } int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) { u64 a0, a1; int wait = 1000; if (!vdev->stats) { vdev->stats = pci_alloc_consistent(vdev->pdev, sizeof(struct vnic_stats), &vdev->stats_pa); if (!vdev->stats) return -ENOMEM; } *stats = vdev->stats; a0 = vdev->stats_pa; a1 = sizeof(struct vnic_stats); return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); } int vnic_dev_close(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); } int vnic_dev_enable_wait(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); if (err == ERR_ECMDUNKNOWN) return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); return err; } int vnic_dev_disable(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); } int vnic_dev_open(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); } int vnic_dev_open_done(struct vnic_dev *vdev, int *done) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; *done = 0; err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); if (err) return err; *done = (a0 == 0); return 0; } static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); } static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; *done = 0; err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); if (err) return err; *done = (a0 == 0); return 0; } int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait); if (err == ERR_ECMDUNKNOWN) { err = vnic_dev_soft_reset(vdev, arg); if (err) return err; return vnic_dev_init(vdev, 0); } return err; } int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; *done = 0; err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait); if (err) { if (err == ERR_ECMDUNKNOWN) return vnic_dev_soft_reset_done(vdev, done); return err; } *done = (a0 == 0); return 0; } int vnic_dev_hang_notify(struct vnic_dev *vdev) { u64 a0, a1; int wait = 1000; return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); } int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) { u64 a0, a1; int wait = 1000; int err, i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = 0; err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); if (err) return err; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = ((u8 *)&a0)[i]; return 0; } int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti) { u64 a0, a1 = 0; int wait = 1000; int err; a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | (multicast ? CMD_PFILTER_MULTICAST : 0) | (broadcast ? CMD_PFILTER_BROADCAST : 0) | (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); if (err) pr_err("Can't set packet filter\n"); return err; } int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) ((u8 *)&a0)[i] = addr[i]; err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) pr_err("Can't add addr [%pM], %d\n", addr, err); return err; } int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) { u64 a0 = 0, a1 = 0; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) ((u8 *)&a0)[i] = addr[i]; err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) pr_err("Can't del addr [%pM], %d\n", addr, err); return err; } int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, u8 ig_vlan_rewrite_mode) { u64 a0 = ig_vlan_rewrite_mode, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait); if (err == ERR_ECMDUNKNOWN) return 0; return err; } static int vnic_dev_notify_setcmd(struct vnic_dev *vdev, void *notify_addr, dma_addr_t notify_pa, u16 intr) { u64 a0, a1; int wait = 1000; int r; memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); vdev->notify = notify_addr; vdev->notify_pa = notify_pa; a0 = (u64)notify_pa; a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; a1 += sizeof(struct vnic_devcmd_notify); r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); vdev->notify_sz = (r == 0) ? (u32)a1 : 0; return r; } int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) { void *notify_addr; dma_addr_t notify_pa; if (vdev->notify || vdev->notify_pa) { pr_err("notify block %p still allocated", vdev->notify); return -EINVAL; } notify_addr = pci_alloc_consistent(vdev->pdev, sizeof(struct vnic_devcmd_notify), &notify_pa); if (!notify_addr) return -ENOMEM; return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); } static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) { u64 a0, a1; int wait = 1000; int err; a0 = 0; /* paddr = 0 to unset notify buffer */ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ a1 += sizeof(struct vnic_devcmd_notify); err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); vdev->notify = NULL; vdev->notify_pa = 0; vdev->notify_sz = 0; return err; } int vnic_dev_notify_unset(struct vnic_dev *vdev) { if (vdev->notify) { pci_free_consistent(vdev->pdev, sizeof(struct vnic_devcmd_notify), vdev->notify, vdev->notify_pa); } return vnic_dev_notify_unsetcmd(vdev); } static int vnic_dev_notify_ready(struct vnic_dev *vdev) { u32 *words; unsigned int nwords = vdev->notify_sz / 4; unsigned int i; u32 csum; if (!vdev->notify || !vdev->notify_sz) return 0; do { csum = 0; memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); words = (u32 *)&vdev->notify_copy; for (i = 1; i < nwords; i++) csum += words[i]; } while (csum != words[0]); return 1; } int vnic_dev_init(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; int r = 0; if (vnic_dev_capable(vdev, CMD_INIT)) r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); else { vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); if (a0 & CMD_INITF_DEFAULT_MAC) { /* Emulate these for old CMD_INIT_v1 which * didn't pass a0 so no CMD_INITF_*. */ vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); } } return r; } int vnic_dev_deinit(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait); } int vnic_dev_link_status(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.link_state; } u32 vnic_dev_port_speed(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.port_speed; } u32 vnic_dev_msg_lvl(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.msglvl; } u32 vnic_dev_mtu(struct vnic_dev *vdev) { if (!vnic_dev_notify_ready(vdev)) return 0; return vdev->notify_copy.mtu; } void vnic_dev_set_intr_mode(struct vnic_dev *vdev, enum vnic_dev_intr_mode intr_mode) { vdev->intr_mode = intr_mode; } enum vnic_dev_intr_mode vnic_dev_get_intr_mode( struct vnic_dev *vdev) { return vdev->intr_mode; } void vnic_dev_unregister(struct vnic_dev *vdev) { if (vdev) { if (vdev->notify) pci_free_consistent(vdev->pdev, sizeof(struct vnic_devcmd_notify), vdev->notify, vdev->notify_pa); if (vdev->stats) pci_free_consistent(vdev->pdev, sizeof(struct vnic_stats), vdev->stats, vdev->stats_pa); if (vdev->fw_info) pci_free_consistent(vdev->pdev, sizeof(struct vnic_devcmd_fw_info), vdev->fw_info, vdev->fw_info_pa); kfree(vdev); } } struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, unsigned int num_bars) { if (!vdev) { vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC); if (!vdev) return NULL; } vdev->priv = priv; vdev->pdev = pdev; if (vnic_dev_discover_res(vdev, bar, num_bars)) goto err_out; vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); if (!vdev->devcmd) goto err_out; return vdev; err_out: vnic_dev_unregister(vdev); return NULL; } int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) { u64 a0, a1 = len; int wait = 1000; dma_addr_t prov_pa; void *prov_buf; int ret; prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa); if (!prov_buf) return -ENOMEM; memcpy(prov_buf, buf, len); a0 = prov_pa; ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait); pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa); return ret; } int vnic_dev_enable2(struct vnic_dev *vdev, int active) { u64 a0, a1 = 0; int wait = 1000; a0 = (active ? CMD_ENABLE2_ACTIVE : 0); return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait); } static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int *status) { u64 a0 = cmd, a1 = 0; int wait = 1000; int ret; ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait); if (!ret) *status = (int)a0; return ret; } int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status) { return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status); } int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status) { return vnic_dev_cmd_status(vdev, CMD_DEINIT, status); }
gpl-2.0
TOPEET-Develop/iTop4412_kernel_public
drivers/acpi/fan.c
2641
5794
/* * acpi_fan.c - ACPI Fan Driver ($Revision: 29 $) * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <asm/uaccess.h> #include <linux/thermal.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #define PREFIX "ACPI: " #define ACPI_FAN_CLASS "fan" #define ACPI_FAN_FILE_STATE "state" #define _COMPONENT ACPI_FAN_COMPONENT ACPI_MODULE_NAME("fan"); MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI Fan Driver"); MODULE_LICENSE("GPL"); static int acpi_fan_add(struct acpi_device *device); static int acpi_fan_remove(struct acpi_device *device, int type); static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state); static int acpi_fan_resume(struct acpi_device *device); static const struct acpi_device_id fan_device_ids[] = { {"PNP0C0B", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, fan_device_ids); static struct acpi_driver acpi_fan_driver = { .name = "fan", .class = ACPI_FAN_CLASS, .ids = fan_device_ids, .ops = { .add = acpi_fan_add, .remove = acpi_fan_remove, .suspend = acpi_fan_suspend, .resume = acpi_fan_resume, }, }; /* thermal cooling device callbacks */ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { /* ACPI fan device only support two states: ON/OFF */ *state = 1; return 0; } static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct acpi_device *device = cdev->devdata; int result; int acpi_state; if (!device) return -EINVAL; result = acpi_bus_update_power(device->handle, &acpi_state); if (result) return result; *state = (acpi_state == ACPI_STATE_D3 ? 0 : (acpi_state == ACPI_STATE_D0 ? 1 : -1)); return 0; } static int fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { struct acpi_device *device = cdev->devdata; int result; if (!device || (state != 0 && state != 1)) return -EINVAL; result = acpi_bus_set_power(device->handle, state ? ACPI_STATE_D0 : ACPI_STATE_D3); return result; } static struct thermal_cooling_device_ops fan_cooling_ops = { .get_max_state = fan_get_max_state, .get_cur_state = fan_get_cur_state, .set_cur_state = fan_set_cur_state, }; /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ static int acpi_fan_add(struct acpi_device *device) { int result = 0; struct thermal_cooling_device *cdev; if (!device) return -EINVAL; strcpy(acpi_device_name(device), "Fan"); strcpy(acpi_device_class(device), ACPI_FAN_CLASS); result = acpi_bus_update_power(device->handle, NULL); if (result) { printk(KERN_ERR PREFIX "Setting initial power state\n"); goto end; } cdev = thermal_cooling_device_register("Fan", device, &fan_cooling_ops); if (IS_ERR(cdev)) { result = PTR_ERR(cdev); goto end; } dev_dbg(&device->dev, "registered as cooling_device%d\n", cdev->id); device->driver_data = cdev; result = sysfs_create_link(&device->dev.kobj, &cdev->device.kobj, "thermal_cooling"); if (result) dev_err(&device->dev, "Failed to create sysfs link " "'thermal_cooling'\n"); result = sysfs_create_link(&cdev->device.kobj, &device->dev.kobj, "device"); if (result) dev_err(&device->dev, "Failed to create sysfs link " "'device'\n"); printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device), acpi_device_bid(device), !device->power.state ? "on" : "off"); end: return result; } static int acpi_fan_remove(struct acpi_device *device, int type) { struct thermal_cooling_device *cdev = acpi_driver_data(device); if (!device || !cdev) return -EINVAL; sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); sysfs_remove_link(&cdev->device.kobj, "device"); thermal_cooling_device_unregister(cdev); return 0; } static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state) { if (!device) return -EINVAL; acpi_bus_set_power(device->handle, ACPI_STATE_D0); return AE_OK; } static int acpi_fan_resume(struct acpi_device *device) { int result; if (!device) return -EINVAL; result = acpi_bus_update_power(device->handle, NULL); if (result) printk(KERN_ERR PREFIX "Error updating fan power state\n"); return result; } static int __init acpi_fan_init(void) { int result = 0; result = acpi_bus_register_driver(&acpi_fan_driver); if (result < 0) return -ENODEV; return 0; } static void __exit acpi_fan_exit(void) { acpi_bus_unregister_driver(&acpi_fan_driver); return; } module_init(acpi_fan_init); module_exit(acpi_fan_exit);
gpl-2.0
StelixROM/android_kernel_htc_msm8974
drivers/i2c/busses/i2c-puv3.c
4945
6478
/* * I2C driver for PKUnity-v3 SoC * Code specific to PKUnity SoC and UniCore ISA * * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> * Copyright (C) 2001-2010 Guan Xuetao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/io.h> #include <mach/hardware.h> /* * Poll the i2c status register until the specified bit is set. * Returns 0 if timed out (100 msec). */ static short poll_status(unsigned long bit) { int loop_cntr = 1000; if (bit & I2C_STATUS_TFNF) { do { udelay(10); } while (!(readl(I2C_STATUS) & bit) && (--loop_cntr > 0)); } else { /* RXRDY handler */ do { if (readl(I2C_TAR) == I2C_TAR_EEPROM) msleep(20); else udelay(10); } while (!(readl(I2C_RXFLR) & 0xf) && (--loop_cntr > 0)); } return (loop_cntr > 0); } static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length) { int i2c_reg = *buf; /* Read data */ while (length--) { if (!poll_status(I2C_STATUS_TFNF)) { dev_dbg(&adap->dev, "Tx FIFO Not Full timeout\n"); return -ETIMEDOUT; } /* send addr */ writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD); /* get ready to next write */ i2c_reg++; /* send read CMD */ writel(I2C_DATACMD_READ, I2C_DATACMD); /* wait until the Rx FIFO have available */ if (!poll_status(I2C_STATUS_RFNE)) { dev_dbg(&adap->dev, "RXRDY timeout\n"); return -ETIMEDOUT; } /* read the data to buf */ *buf = (readl(I2C_DATACMD) & I2C_DATACMD_DAT_MASK); buf++; } return 0; } static int xfer_write(struct i2c_adapter *adap, unsigned char *buf, int length) { int i2c_reg = *buf; /* Do nothing but storing the reg_num to a static variable */ if (i2c_reg == -1) { printk(KERN_WARNING "Error i2c reg\n"); return -ETIMEDOUT; } if (length == 1) return 0; buf++; length--; while (length--) { /* send addr */ writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD); /* send write CMD */ writel(*buf | I2C_DATACMD_WRITE, I2C_DATACMD); /* wait until the Rx FIFO have available */ msleep(20); /* read the data to buf */ i2c_reg++; buf++; } return 0; } /* * Generic i2c master transfer entrypoint. * */ static int puv3_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg, int num) { int i, ret; unsigned char swap; /* Disable i2c */ writel(I2C_ENABLE_DISABLE, I2C_ENABLE); /* Set the work mode and speed*/ writel(I2C_CON_MASTER | I2C_CON_SPEED_STD | I2C_CON_SLAVEDISABLE, I2C_CON); writel(pmsg->addr, I2C_TAR); /* Enable i2c */ writel(I2C_ENABLE_ENABLE, I2C_ENABLE); dev_dbg(&adap->dev, "puv3_i2c_xfer: processing %d messages:\n", num); for (i = 0; i < num; i++) { dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i, pmsg->flags & I2C_M_RD ? "read" : "writ", pmsg->len, pmsg->len > 1 ? "s" : "", pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr); if (pmsg->len && pmsg->buf) { /* sanity check */ if (pmsg->flags & I2C_M_RD) ret = xfer_read(adap, pmsg->buf, pmsg->len); else ret = xfer_write(adap, pmsg->buf, pmsg->len); if (ret) return ret; } dev_dbg(&adap->dev, "transfer complete\n"); pmsg++; /* next message */ } /* XXX: fixup be16_to_cpu in bq27x00_battery.c */ if (pmsg->addr == I2C_TAR_PWIC) { swap = pmsg->buf[0]; pmsg->buf[0] = pmsg->buf[1]; pmsg->buf[1] = swap; } return i; } /* * Return list of supported functionality. */ static u32 puv3_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm puv3_i2c_algorithm = { .master_xfer = puv3_i2c_xfer, .functionality = puv3_i2c_func, }; /* * Main initialization routine. */ static int __devinit puv3_i2c_probe(struct platform_device *pdev) { struct i2c_adapter *adapter; struct resource *mem; int rc; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) return -ENODEV; if (!request_mem_region(mem->start, resource_size(mem), "puv3_i2c")) return -EBUSY; adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL); if (adapter == NULL) { dev_err(&pdev->dev, "can't allocate inteface!\n"); rc = -ENOMEM; goto fail_nomem; } snprintf(adapter->name, sizeof(adapter->name), "PUV3-I2C at 0x%08x", mem->start); adapter->algo = &puv3_i2c_algorithm; adapter->class = I2C_CLASS_HWMON; adapter->dev.parent = &pdev->dev; platform_set_drvdata(pdev, adapter); adapter->nr = pdev->id; rc = i2c_add_numbered_adapter(adapter); if (rc) { dev_err(&pdev->dev, "Adapter '%s' registration failed\n", adapter->name); goto fail_add_adapter; } dev_info(&pdev->dev, "PKUnity v3 i2c bus adapter.\n"); return 0; fail_add_adapter: platform_set_drvdata(pdev, NULL); kfree(adapter); fail_nomem: release_mem_region(mem->start, resource_size(mem)); return rc; } static int __devexit puv3_i2c_remove(struct platform_device *pdev) { struct i2c_adapter *adapter = platform_get_drvdata(pdev); struct resource *mem; int rc; rc = i2c_del_adapter(adapter); if (rc) { dev_err(&pdev->dev, "Adapter '%s' delete fail\n", adapter->name); return rc; } put_device(&pdev->dev); platform_set_drvdata(pdev, NULL); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); return rc; } #ifdef CONFIG_PM static int puv3_i2c_suspend(struct platform_device *dev, pm_message_t state) { int poll_count; /* Disable the IIC */ writel(I2C_ENABLE_DISABLE, I2C_ENABLE); for (poll_count = 0; poll_count < 50; poll_count++) { if (readl(I2C_ENSTATUS) & I2C_ENSTATUS_ENABLE) udelay(25); } return 0; } static int puv3_i2c_resume(struct platform_device *dev) { return 0 ; } #else #define puv3_i2c_suspend NULL #define puv3_i2c_resume NULL #endif static struct platform_driver puv3_i2c_driver = { .probe = puv3_i2c_probe, .remove = __devexit_p(puv3_i2c_remove), .suspend = puv3_i2c_suspend, .resume = puv3_i2c_resume, .driver = { .name = "PKUnity-v3-I2C", .owner = THIS_MODULE, } }; module_platform_driver(puv3_i2c_driver); MODULE_DESCRIPTION("PKUnity v3 I2C driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:puv3_i2c");
gpl-2.0
vaginessa/Googy-Max3-Kernel
drivers/media/video/ov6650.c
4945
27511
/* * V4L2 SoC Camera driver for OmniVision OV6650 Camera Sensor * * Copyright (C) 2010 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl> * * Based on OmniVision OV96xx Camera Driver * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com> * * Based on ov772x camera driver: * Copyright (C) 2008 Renesas Solutions Corp. * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * Based on ov7670 and soc_camera_platform driver, * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net> * Copyright (C) 2008 Magnus Damm * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * Hardware specific bits initialy based on former work by Matt Callow * drivers/media/video/omap/sensor_ov6650.c * Copyright (C) 2006 Matt Callow * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/v4l2-mediabus.h> #include <linux/module.h> #include <media/soc_camera.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-ctrls.h> /* Register definitions */ #define REG_GAIN 0x00 /* range 00 - 3F */ #define REG_BLUE 0x01 #define REG_RED 0x02 #define REG_SAT 0x03 /* [7:4] saturation [0:3] reserved */ #define REG_HUE 0x04 /* [7:6] rsrvd [5] hue en [4:0] hue */ #define REG_BRT 0x06 #define REG_PIDH 0x0a #define REG_PIDL 0x0b #define REG_AECH 0x10 #define REG_CLKRC 0x11 /* Data Format and Internal Clock */ /* [7:6] Input system clock (MHz)*/ /* 00=8, 01=12, 10=16, 11=24 */ /* [5:0]: Internal Clock Pre-Scaler */ #define REG_COMA 0x12 /* [7] Reset */ #define REG_COMB 0x13 #define REG_COMC 0x14 #define REG_COMD 0x15 #define REG_COML 0x16 #define REG_HSTRT 0x17 #define REG_HSTOP 0x18 #define REG_VSTRT 0x19 #define REG_VSTOP 0x1a #define REG_PSHFT 0x1b #define REG_MIDH 0x1c #define REG_MIDL 0x1d #define REG_HSYNS 0x1e #define REG_HSYNE 0x1f #define REG_COME 0x20 #define REG_YOFF 0x21 #define REG_UOFF 0x22 #define REG_VOFF 0x23 #define REG_AEW 0x24 #define REG_AEB 0x25 #define REG_COMF 0x26 #define REG_COMG 0x27 #define REG_COMH 0x28 #define REG_COMI 0x29 #define REG_FRARL 0x2b #define REG_COMJ 0x2c #define REG_COMK 0x2d #define REG_AVGY 0x2e #define REG_REF0 0x2f #define REG_REF1 0x30 #define REG_REF2 0x31 #define REG_FRAJH 0x32 #define REG_FRAJL 0x33 #define REG_FACT 0x34 #define REG_L1AEC 0x35 #define REG_AVGU 0x36 #define REG_AVGV 0x37 #define REG_SPCB 0x60 #define REG_SPCC 0x61 #define REG_GAM1 0x62 #define REG_GAM2 0x63 #define REG_GAM3 0x64 #define REG_SPCD 0x65 #define REG_SPCE 0x68 #define REG_ADCL 0x69 #define REG_RMCO 0x6c #define REG_GMCO 0x6d #define REG_BMCO 0x6e /* Register bits, values, etc. */ #define OV6650_PIDH 0x66 /* high byte of product ID number */ #define OV6650_PIDL 0x50 /* low byte of product ID number */ #define OV6650_MIDH 0x7F /* high byte of mfg ID */ #define OV6650_MIDL 0xA2 /* low byte of mfg ID */ #define DEF_GAIN 0x00 #define DEF_BLUE 0x80 #define DEF_RED 0x80 #define SAT_SHIFT 4 #define SAT_MASK (0xf << SAT_SHIFT) #define SET_SAT(x) (((x) << SAT_SHIFT) & SAT_MASK) #define HUE_EN BIT(5) #define HUE_MASK 0x1f #define DEF_HUE 0x10 #define SET_HUE(x) (HUE_EN | ((x) & HUE_MASK)) #define DEF_AECH 0x4D #define CLKRC_6MHz 0x00 #define CLKRC_12MHz 0x40 #define CLKRC_16MHz 0x80 #define CLKRC_24MHz 0xc0 #define CLKRC_DIV_MASK 0x3f #define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1) #define COMA_RESET BIT(7) #define COMA_QCIF BIT(5) #define COMA_RAW_RGB BIT(4) #define COMA_RGB BIT(3) #define COMA_BW BIT(2) #define COMA_WORD_SWAP BIT(1) #define COMA_BYTE_SWAP BIT(0) #define DEF_COMA 0x00 #define COMB_FLIP_V BIT(7) #define COMB_FLIP_H BIT(5) #define COMB_BAND_FILTER BIT(4) #define COMB_AWB BIT(2) #define COMB_AGC BIT(1) #define COMB_AEC BIT(0) #define DEF_COMB 0x5f #define COML_ONE_CHANNEL BIT(7) #define DEF_HSTRT 0x24 #define DEF_HSTOP 0xd4 #define DEF_VSTRT 0x04 #define DEF_VSTOP 0x94 #define COMF_HREF_LOW BIT(4) #define COMJ_PCLK_RISING BIT(4) #define COMJ_VSYNC_HIGH BIT(0) /* supported resolutions */ #define W_QCIF (DEF_HSTOP - DEF_HSTRT) #define W_CIF (W_QCIF << 1) #define H_QCIF (DEF_VSTOP - DEF_VSTRT) #define H_CIF (H_QCIF << 1) #define FRAME_RATE_MAX 30 struct ov6650_reg { u8 reg; u8 val; }; struct ov6650 { struct v4l2_subdev subdev; struct v4l2_ctrl_handler hdl; struct { /* exposure/autoexposure cluster */ struct v4l2_ctrl *autoexposure; struct v4l2_ctrl *exposure; }; struct { /* gain/autogain cluster */ struct v4l2_ctrl *autogain; struct v4l2_ctrl *gain; }; struct { /* blue/red/autowhitebalance cluster */ struct v4l2_ctrl *autowb; struct v4l2_ctrl *blue; struct v4l2_ctrl *red; }; bool half_scale; /* scale down output by 2 */ struct v4l2_rect rect; /* sensor cropping window */ unsigned long pclk_limit; /* from host */ unsigned long pclk_max; /* from resolution and format */ struct v4l2_fract tpf; /* as requested with s_parm */ enum v4l2_mbus_pixelcode code; enum v4l2_colorspace colorspace; }; static enum v4l2_mbus_pixelcode ov6650_codes[] = { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_MBUS_FMT_UYVY8_2X8, V4L2_MBUS_FMT_YVYU8_2X8, V4L2_MBUS_FMT_VYUY8_2X8, V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_Y8_1X8, }; /* read a register */ static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val) { int ret; u8 data = reg; struct i2c_msg msg = { .addr = client->addr, .flags = 0, .len = 1, .buf = &data, }; ret = i2c_transfer(client->adapter, &msg, 1); if (ret < 0) goto err; msg.flags = I2C_M_RD; ret = i2c_transfer(client->adapter, &msg, 1); if (ret < 0) goto err; *val = data; return 0; err: dev_err(&client->dev, "Failed reading register 0x%02x!\n", reg); return ret; } /* write a register */ static int ov6650_reg_write(struct i2c_client *client, u8 reg, u8 val) { int ret; unsigned char data[2] = { reg, val }; struct i2c_msg msg = { .addr = client->addr, .flags = 0, .len = 2, .buf = data, }; ret = i2c_transfer(client->adapter, &msg, 1); udelay(100); if (ret < 0) { dev_err(&client->dev, "Failed writing register 0x%02x!\n", reg); return ret; } return 0; } /* Read a register, alter its bits, write it back */ static int ov6650_reg_rmw(struct i2c_client *client, u8 reg, u8 set, u8 mask) { u8 val; int ret; ret = ov6650_reg_read(client, reg, &val); if (ret) { dev_err(&client->dev, "[Read]-Modify-Write of register 0x%02x failed!\n", reg); return ret; } val &= ~mask; val |= set; ret = ov6650_reg_write(client, reg, val); if (ret) dev_err(&client->dev, "Read-Modify-[Write] of register 0x%02x failed!\n", reg); return ret; } static struct ov6650 *to_ov6650(const struct i2c_client *client) { return container_of(i2c_get_clientdata(client), struct ov6650, subdev); } /* Start/Stop streaming from the device */ static int ov6650_s_stream(struct v4l2_subdev *sd, int enable) { return 0; } /* Get status of additional camera capabilities */ static int ov6550_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl); struct v4l2_subdev *sd = &priv->subdev; struct i2c_client *client = v4l2_get_subdevdata(sd); uint8_t reg, reg2; int ret; switch (ctrl->id) { case V4L2_CID_AUTOGAIN: ret = ov6650_reg_read(client, REG_GAIN, &reg); if (!ret) priv->gain->val = reg; return ret; case V4L2_CID_AUTO_WHITE_BALANCE: ret = ov6650_reg_read(client, REG_BLUE, &reg); if (!ret) ret = ov6650_reg_read(client, REG_RED, &reg2); if (!ret) { priv->blue->val = reg; priv->red->val = reg2; } return ret; case V4L2_CID_EXPOSURE_AUTO: ret = ov6650_reg_read(client, REG_AECH, &reg); if (!ret) priv->exposure->val = reg; return ret; } return -EINVAL; } /* Set status of additional camera capabilities */ static int ov6550_s_ctrl(struct v4l2_ctrl *ctrl) { struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl); struct v4l2_subdev *sd = &priv->subdev; struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; switch (ctrl->id) { case V4L2_CID_AUTOGAIN: ret = ov6650_reg_rmw(client, REG_COMB, ctrl->val ? COMB_AGC : 0, COMB_AGC); if (!ret && !ctrl->val) ret = ov6650_reg_write(client, REG_GAIN, priv->gain->val); return ret; case V4L2_CID_AUTO_WHITE_BALANCE: ret = ov6650_reg_rmw(client, REG_COMB, ctrl->val ? COMB_AWB : 0, COMB_AWB); if (!ret && !ctrl->val) { ret = ov6650_reg_write(client, REG_BLUE, priv->blue->val); if (!ret) ret = ov6650_reg_write(client, REG_RED, priv->red->val); } return ret; case V4L2_CID_SATURATION: return ov6650_reg_rmw(client, REG_SAT, SET_SAT(ctrl->val), SAT_MASK); case V4L2_CID_HUE: return ov6650_reg_rmw(client, REG_HUE, SET_HUE(ctrl->val), HUE_MASK); case V4L2_CID_BRIGHTNESS: return ov6650_reg_write(client, REG_BRT, ctrl->val); case V4L2_CID_EXPOSURE_AUTO: ret = ov6650_reg_rmw(client, REG_COMB, ctrl->val == V4L2_EXPOSURE_AUTO ? COMB_AEC : 0, COMB_AEC); if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL) ret = ov6650_reg_write(client, REG_AECH, priv->exposure->val); return ret; case V4L2_CID_GAMMA: return ov6650_reg_write(client, REG_GAM1, ctrl->val); case V4L2_CID_VFLIP: return ov6650_reg_rmw(client, REG_COMB, ctrl->val ? COMB_FLIP_V : 0, COMB_FLIP_V); case V4L2_CID_HFLIP: return ov6650_reg_rmw(client, REG_COMB, ctrl->val ? COMB_FLIP_H : 0, COMB_FLIP_H); } return -EINVAL; } /* Get chip identification */ static int ov6650_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *id) { id->ident = V4L2_IDENT_OV6650; id->revision = 0; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int ov6650_get_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; u8 val; if (reg->reg & ~0xff) return -EINVAL; reg->size = 1; ret = ov6650_reg_read(client, reg->reg, &val); if (!ret) reg->val = (__u64)val; return ret; } static int ov6650_set_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->reg & ~0xff || reg->val & ~0xff) return -EINVAL; return ov6650_reg_write(client, reg->reg, reg->val); } #endif static int ov6650_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->c = priv->rect; return 0; } static int ov6650_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); struct v4l2_rect *rect = &a->c; int ret; if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; rect->left = ALIGN(rect->left, 2); rect->width = ALIGN(rect->width, 2); rect->top = ALIGN(rect->top, 2); rect->height = ALIGN(rect->height, 2); soc_camera_limit_side(&rect->left, &rect->width, DEF_HSTRT << 1, 2, W_CIF); soc_camera_limit_side(&rect->top, &rect->height, DEF_VSTRT << 1, 2, H_CIF); ret = ov6650_reg_write(client, REG_HSTRT, rect->left >> 1); if (!ret) { priv->rect.left = rect->left; ret = ov6650_reg_write(client, REG_HSTOP, (rect->left + rect->width) >> 1); } if (!ret) { priv->rect.width = rect->width; ret = ov6650_reg_write(client, REG_VSTRT, rect->top >> 1); } if (!ret) { priv->rect.top = rect->top; ret = ov6650_reg_write(client, REG_VSTOP, (rect->top + rect->height) >> 1); } if (!ret) priv->rect.height = rect->height; return ret; } static int ov6650_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) { if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; a->bounds.left = DEF_HSTRT << 1; a->bounds.top = DEF_VSTRT << 1; a->bounds.width = W_CIF; a->bounds.height = H_CIF; a->defrect = a->bounds; a->pixelaspect.numerator = 1; a->pixelaspect.denominator = 1; return 0; } static int ov6650_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); mf->width = priv->rect.width >> priv->half_scale; mf->height = priv->rect.height >> priv->half_scale; mf->code = priv->code; mf->colorspace = priv->colorspace; mf->field = V4L2_FIELD_NONE; return 0; } static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect) { return width > rect->width >> 1 || height > rect->height >> 1; } static u8 to_clkrc(struct v4l2_fract *timeperframe, unsigned long pclk_limit, unsigned long pclk_max) { unsigned long pclk; if (timeperframe->numerator && timeperframe->denominator) pclk = pclk_max * timeperframe->denominator / (FRAME_RATE_MAX * timeperframe->numerator); else pclk = pclk_max; if (pclk_limit && pclk_limit < pclk) pclk = pclk_limit; return (pclk_max - 1) / pclk; } /* set the format we will capture in */ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd); struct soc_camera_sense *sense = icd->sense; struct ov6650 *priv = to_ov6650(client); bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect); struct v4l2_crop a = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .c = { .left = priv->rect.left + (priv->rect.width >> 1) - (mf->width >> (1 - half_scale)), .top = priv->rect.top + (priv->rect.height >> 1) - (mf->height >> (1 - half_scale)), .width = mf->width << half_scale, .height = mf->height << half_scale, }, }; enum v4l2_mbus_pixelcode code = mf->code; unsigned long mclk, pclk; u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask, clkrc; int ret; /* select color matrix configuration for given color encoding */ switch (code) { case V4L2_MBUS_FMT_Y8_1X8: dev_dbg(&client->dev, "pixel format GREY8_1X8\n"); coma_mask |= COMA_RGB | COMA_WORD_SWAP | COMA_BYTE_SWAP; coma_set |= COMA_BW; break; case V4L2_MBUS_FMT_YUYV8_2X8: dev_dbg(&client->dev, "pixel format YUYV8_2X8_LE\n"); coma_mask |= COMA_RGB | COMA_BW | COMA_BYTE_SWAP; coma_set |= COMA_WORD_SWAP; break; case V4L2_MBUS_FMT_YVYU8_2X8: dev_dbg(&client->dev, "pixel format YVYU8_2X8_LE (untested)\n"); coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP | COMA_BYTE_SWAP; break; case V4L2_MBUS_FMT_UYVY8_2X8: dev_dbg(&client->dev, "pixel format YUYV8_2X8_BE\n"); if (half_scale) { coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP; coma_set |= COMA_BYTE_SWAP; } else { coma_mask |= COMA_RGB | COMA_BW; coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP; } break; case V4L2_MBUS_FMT_VYUY8_2X8: dev_dbg(&client->dev, "pixel format YVYU8_2X8_BE (untested)\n"); if (half_scale) { coma_mask |= COMA_RGB | COMA_BW; coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP; } else { coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP; coma_set |= COMA_BYTE_SWAP; } break; case V4L2_MBUS_FMT_SBGGR8_1X8: dev_dbg(&client->dev, "pixel format SBGGR8_1X8 (untested)\n"); coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP; coma_set |= COMA_RAW_RGB | COMA_RGB; break; default: dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code); return -EINVAL; } priv->code = code; if (code == V4L2_MBUS_FMT_Y8_1X8 || code == V4L2_MBUS_FMT_SBGGR8_1X8) { coml_mask = COML_ONE_CHANNEL; coml_set = 0; priv->pclk_max = 4000000; } else { coml_mask = 0; coml_set = COML_ONE_CHANNEL; priv->pclk_max = 8000000; } if (code == V4L2_MBUS_FMT_SBGGR8_1X8) priv->colorspace = V4L2_COLORSPACE_SRGB; else if (code != 0) priv->colorspace = V4L2_COLORSPACE_JPEG; if (half_scale) { dev_dbg(&client->dev, "max resolution: QCIF\n"); coma_set |= COMA_QCIF; priv->pclk_max /= 2; } else { dev_dbg(&client->dev, "max resolution: CIF\n"); coma_mask |= COMA_QCIF; } priv->half_scale = half_scale; if (sense) { if (sense->master_clock == 8000000) { dev_dbg(&client->dev, "8MHz input clock\n"); clkrc = CLKRC_6MHz; } else if (sense->master_clock == 12000000) { dev_dbg(&client->dev, "12MHz input clock\n"); clkrc = CLKRC_12MHz; } else if (sense->master_clock == 16000000) { dev_dbg(&client->dev, "16MHz input clock\n"); clkrc = CLKRC_16MHz; } else if (sense->master_clock == 24000000) { dev_dbg(&client->dev, "24MHz input clock\n"); clkrc = CLKRC_24MHz; } else { dev_err(&client->dev, "unsupported input clock, check platform data\n"); return -EINVAL; } mclk = sense->master_clock; priv->pclk_limit = sense->pixel_clock_max; } else { clkrc = CLKRC_24MHz; mclk = 24000000; priv->pclk_limit = 0; dev_dbg(&client->dev, "using default 24MHz input clock\n"); } clkrc |= to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max); pclk = priv->pclk_max / GET_CLKRC_DIV(clkrc); dev_dbg(&client->dev, "pixel clock divider: %ld.%ld\n", mclk / pclk, 10 * mclk % pclk / pclk); ret = ov6650_s_crop(sd, &a); if (!ret) ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask); if (!ret) ret = ov6650_reg_write(client, REG_CLKRC, clkrc); if (!ret) ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask); if (!ret) { mf->colorspace = priv->colorspace; mf->width = priv->rect.width >> half_scale; mf->height = priv->rect.height >> half_scale; } return ret; } static int ov6650_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); if (is_unscaled_ok(mf->width, mf->height, &priv->rect)) v4l_bound_align_image(&mf->width, 2, W_CIF, 1, &mf->height, 2, H_CIF, 1, 0); mf->field = V4L2_FIELD_NONE; switch (mf->code) { case V4L2_MBUS_FMT_Y10_1X10: mf->code = V4L2_MBUS_FMT_Y8_1X8; case V4L2_MBUS_FMT_Y8_1X8: case V4L2_MBUS_FMT_YVYU8_2X8: case V4L2_MBUS_FMT_YUYV8_2X8: case V4L2_MBUS_FMT_VYUY8_2X8: case V4L2_MBUS_FMT_UYVY8_2X8: mf->colorspace = V4L2_COLORSPACE_JPEG; break; default: mf->code = V4L2_MBUS_FMT_SBGGR8_1X8; case V4L2_MBUS_FMT_SBGGR8_1X8: mf->colorspace = V4L2_COLORSPACE_SRGB; break; } return 0; } static int ov6650_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (index >= ARRAY_SIZE(ov6650_codes)) return -EINVAL; *code = ov6650_codes[index]; return 0; } static int ov6650_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); struct v4l2_captureparm *cp = &parms->parm.capture; if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; memset(cp, 0, sizeof(*cp)); cp->capability = V4L2_CAP_TIMEPERFRAME; cp->timeperframe.numerator = GET_CLKRC_DIV(to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max)); cp->timeperframe.denominator = FRAME_RATE_MAX; dev_dbg(&client->dev, "Frame interval: %u/%u s\n", cp->timeperframe.numerator, cp->timeperframe.denominator); return 0; } static int ov6650_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); struct v4l2_captureparm *cp = &parms->parm.capture; struct v4l2_fract *tpf = &cp->timeperframe; int div, ret; u8 clkrc; if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (cp->extendedmode != 0) return -EINVAL; if (tpf->numerator == 0 || tpf->denominator == 0) div = 1; /* Reset to full rate */ else div = (tpf->numerator * FRAME_RATE_MAX) / tpf->denominator; if (div == 0) div = 1; else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK)) div = GET_CLKRC_DIV(CLKRC_DIV_MASK); /* * Keep result to be used as tpf limit * for subseqent clock divider calculations */ priv->tpf.numerator = div; priv->tpf.denominator = FRAME_RATE_MAX; clkrc = to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max); ret = ov6650_reg_rmw(client, REG_CLKRC, clkrc, CLKRC_DIV_MASK); if (!ret) { tpf->numerator = GET_CLKRC_DIV(clkrc); tpf->denominator = FRAME_RATE_MAX; } return ret; } /* Soft reset the camera. This has nothing to do with the RESET pin! */ static int ov6650_reset(struct i2c_client *client) { int ret; dev_dbg(&client->dev, "reset\n"); ret = ov6650_reg_rmw(client, REG_COMA, COMA_RESET, 0); if (ret) dev_err(&client->dev, "An error occurred while entering soft reset!\n"); return ret; } /* program default register values */ static int ov6650_prog_dflt(struct i2c_client *client) { int ret; dev_dbg(&client->dev, "initializing\n"); ret = ov6650_reg_write(client, REG_COMA, 0); /* ~COMA_RESET */ if (!ret) ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_BAND_FILTER); return ret; } static int ov6650_video_probe(struct i2c_client *client) { u8 pidh, pidl, midh, midl; int ret = 0; /* * check and show product ID and manufacturer ID */ ret = ov6650_reg_read(client, REG_PIDH, &pidh); if (!ret) ret = ov6650_reg_read(client, REG_PIDL, &pidl); if (!ret) ret = ov6650_reg_read(client, REG_MIDH, &midh); if (!ret) ret = ov6650_reg_read(client, REG_MIDL, &midl); if (ret) return ret; if ((pidh != OV6650_PIDH) || (pidl != OV6650_PIDL)) { dev_err(&client->dev, "Product ID error 0x%02x:0x%02x\n", pidh, pidl); return -ENODEV; } dev_info(&client->dev, "ov6650 Product ID 0x%02x:0x%02x Manufacturer ID 0x%02x:0x%02x\n", pidh, pidl, midh, midl); ret = ov6650_reset(client); if (!ret) ret = ov6650_prog_dflt(client); return ret; } static const struct v4l2_ctrl_ops ov6550_ctrl_ops = { .g_volatile_ctrl = ov6550_g_volatile_ctrl, .s_ctrl = ov6550_s_ctrl, }; static struct v4l2_subdev_core_ops ov6650_core_ops = { .g_chip_ident = ov6650_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = ov6650_get_register, .s_register = ov6650_set_register, #endif }; /* Request bus settings on camera side */ static int ov6650_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_link *icl = soc_camera_i2c_to_link(client); cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW | V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW | V4L2_MBUS_DATA_ACTIVE_HIGH; cfg->type = V4L2_MBUS_PARALLEL; cfg->flags = soc_camera_apply_board_flags(icl, cfg); return 0; } /* Alter bus settings on camera side */ static int ov6650_s_mbus_config(struct v4l2_subdev *sd, const struct v4l2_mbus_config *cfg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_link *icl = soc_camera_i2c_to_link(client); unsigned long flags = soc_camera_apply_board_flags(icl, cfg); int ret; if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING) ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0); else ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING); if (ret) return ret; if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0); else ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW); if (ret) return ret; if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0); else ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH); return ret; } static struct v4l2_subdev_video_ops ov6650_video_ops = { .s_stream = ov6650_s_stream, .g_mbus_fmt = ov6650_g_fmt, .s_mbus_fmt = ov6650_s_fmt, .try_mbus_fmt = ov6650_try_fmt, .enum_mbus_fmt = ov6650_enum_fmt, .cropcap = ov6650_cropcap, .g_crop = ov6650_g_crop, .s_crop = ov6650_s_crop, .g_parm = ov6650_g_parm, .s_parm = ov6650_s_parm, .g_mbus_config = ov6650_g_mbus_config, .s_mbus_config = ov6650_s_mbus_config, }; static struct v4l2_subdev_ops ov6650_subdev_ops = { .core = &ov6650_core_ops, .video = &ov6650_video_ops, }; /* * i2c_driver function */ static int ov6650_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct ov6650 *priv; struct soc_camera_link *icl = soc_camera_i2c_to_link(client); int ret; if (!icl) { dev_err(&client->dev, "Missing platform_data for driver\n"); return -EINVAL; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&client->dev, "Failed to allocate memory for private data!\n"); return -ENOMEM; } v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops); v4l2_ctrl_handler_init(&priv->hdl, 13); v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); priv->autogain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); priv->gain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_GAIN, 0, 0x3f, 1, DEF_GAIN); priv->autowb = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); priv->blue = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 0xff, 1, DEF_BLUE); priv->red = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 0xff, 1, DEF_RED); v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_SATURATION, 0, 0xf, 1, 0x8); v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_HUE, 0, HUE_MASK, 1, DEF_HUE); v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 0xff, 1, 0x80); priv->autoexposure = v4l2_ctrl_new_std_menu(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO); priv->exposure = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_EXPOSURE, 0, 0xff, 1, DEF_AECH); v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops, V4L2_CID_GAMMA, 0, 0xff, 1, 0x12); priv->subdev.ctrl_handler = &priv->hdl; if (priv->hdl.error) { int err = priv->hdl.error; kfree(priv); return err; } v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true); v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true); v4l2_ctrl_auto_cluster(2, &priv->autoexposure, V4L2_EXPOSURE_MANUAL, true); priv->rect.left = DEF_HSTRT << 1; priv->rect.top = DEF_VSTRT << 1; priv->rect.width = W_CIF; priv->rect.height = H_CIF; priv->half_scale = false; priv->code = V4L2_MBUS_FMT_YUYV8_2X8; priv->colorspace = V4L2_COLORSPACE_JPEG; ret = ov6650_video_probe(client); if (!ret) ret = v4l2_ctrl_handler_setup(&priv->hdl); if (ret) { v4l2_ctrl_handler_free(&priv->hdl); kfree(priv); } return ret; } static int ov6650_remove(struct i2c_client *client) { struct ov6650 *priv = to_ov6650(client); v4l2_device_unregister_subdev(&priv->subdev); v4l2_ctrl_handler_free(&priv->hdl); kfree(priv); return 0; } static const struct i2c_device_id ov6650_id[] = { { "ov6650", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ov6650_id); static struct i2c_driver ov6650_i2c_driver = { .driver = { .name = "ov6650", }, .probe = ov6650_probe, .remove = ov6650_remove, .id_table = ov6650_id, }; module_i2c_driver(ov6650_i2c_driver); MODULE_DESCRIPTION("SoC Camera driver for OmniVision OV6650"); MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>"); MODULE_LICENSE("GPL v2");
gpl-2.0
ptmr3/android_kernel_msm8974_2
drivers/scsi/mpt2sas/mpt2sas_config.c
4945
46868
/* * This module provides common API for accessing firmware configuration pages * * This code is based on drivers/scsi/mpt2sas/mpt2_base.c * Copyright (C) 2007-2010 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/blkdev.h> #include <linux/sched.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/slab.h> #include "mpt2sas_base.h" /* local definitions */ /* Timeout for config page request (in seconds) */ #define MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT 15 /* Common sgl flags for READING a config page. */ #define MPT2_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT) /* Common sgl flags for WRITING a config page. */ #define MPT2_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \ << MPI2_SGE_FLAGS_SHIFT) /** * struct config_request - obtain dma memory via routine * @sz: size * @page: virt pointer * @page_dma: phys pointer * */ struct config_request{ u16 sz; void *page; dma_addr_t page_dma; }; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _config_display_some_debug - debug routine * @ioc: per adapter object * @smid: system request message index * @calling_function_name: string pass from calling function * @mpi_reply: reply message frame * Context: none. * * Function for displaying debug info helpful when debugging issues * in this module. */ static void _config_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid, char *calling_function_name, MPI2DefaultReply_t *mpi_reply) { Mpi2ConfigRequest_t *mpi_request; char *desc = NULL; if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) return; mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) { case MPI2_CONFIG_PAGETYPE_IO_UNIT: desc = "io_unit"; break; case MPI2_CONFIG_PAGETYPE_IOC: desc = "ioc"; break; case MPI2_CONFIG_PAGETYPE_BIOS: desc = "bios"; break; case MPI2_CONFIG_PAGETYPE_RAID_VOLUME: desc = "raid_volume"; break; case MPI2_CONFIG_PAGETYPE_MANUFACTURING: desc = "manufaucturing"; break; case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK: desc = "physdisk"; break; case MPI2_CONFIG_PAGETYPE_EXTENDED: switch (mpi_request->ExtPageType) { case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT: desc = "sas_io_unit"; break; case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER: desc = "sas_expander"; break; case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE: desc = "sas_device"; break; case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY: desc = "sas_phy"; break; case MPI2_CONFIG_EXTPAGETYPE_LOG: desc = "log"; break; case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE: desc = "enclosure"; break; case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG: desc = "raid_config"; break; case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: desc = "driver_mapping"; break; } break; } if (!desc) return; printk(MPT2SAS_INFO_FMT "%s: %s(%d), action(%d), form(0x%08x), " "smid(%d)\n", ioc->name, calling_function_name, desc, mpi_request->Header.PageNumber, mpi_request->Action, le32_to_cpu(mpi_request->PageAddress), smid); if (!mpi_reply) return; if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) printk(MPT2SAS_INFO_FMT "\tiocstatus(0x%04x), loginfo(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo)); } #endif /** * _config_alloc_config_dma_memory - obtain physical memory * @ioc: per adapter object * @mem: struct config_request * * A wrapper for obtaining dma-able memory for config page request. * * Returns 0 for success, non-zero for failure. */ static int _config_alloc_config_dma_memory(struct MPT2SAS_ADAPTER *ioc, struct config_request *mem) { int r = 0; if (mem->sz > ioc->config_page_sz) { mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, &mem->page_dma, GFP_KERNEL); if (!mem->page) { printk(MPT2SAS_ERR_FMT "%s: dma_alloc_coherent" " failed asking for (%d) bytes!!\n", ioc->name, __func__, mem->sz); r = -ENOMEM; } } else { /* use tmp buffer if less than 512 bytes */ mem->page = ioc->config_page; mem->page_dma = ioc->config_page_dma; } return r; } /** * _config_free_config_dma_memory - wrapper to free the memory * @ioc: per adapter object * @mem: struct config_request * * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. * * Returns 0 for success, non-zero for failure. */ static void _config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc, struct config_request *mem) { if (mem->sz > ioc->config_page_sz) dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page, mem->page_dma); } /** * mpt2sas_config_done - config page completion routine * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: none. * * The callback handler when using _config_request. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ u8 mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { MPI2DefaultReply_t *mpi_reply; if (ioc->config_cmds.status == MPT2_CMD_NOT_USED) return 1; if (ioc->config_cmds.smid != smid) return 1; ioc->config_cmds.status |= MPT2_CMD_COMPLETE; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (mpi_reply) { ioc->config_cmds.status |= MPT2_CMD_REPLY_VALID; memcpy(ioc->config_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); } ioc->config_cmds.status &= ~MPT2_CMD_PENDING; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _config_display_some_debug(ioc, smid, "config_done", mpi_reply); #endif ioc->config_cmds.smid = USHRT_MAX; complete(&ioc->config_cmds.done); return 1; } /** * _config_request - main routine for sending config page requests * @ioc: per adapter object * @mpi_request: request message frame * @mpi_reply: reply mf payload returned from firmware * @timeout: timeout in seconds * @config_page: contents of the config page * @config_page_sz: size of config page * Context: sleep * * A generic API for config page requests to firmware. * * The ioc->config_cmds.status flag should be MPT2_CMD_NOT_USED before calling * this API. * * The callback index is set inside `ioc->config_cb_idx. * * Returns 0 for success, non-zero for failure. */ static int _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout, void *config_page, u16 config_page_sz) { u16 smid; u32 ioc_state; unsigned long timeleft; Mpi2ConfigRequest_t *config_request; int r; u8 retry_count, issue_host_reset = 0; u16 wait_state_count; struct config_request mem; mutex_lock(&ioc->config_cmds.mutex); if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n", ioc->name, __func__); mutex_unlock(&ioc->config_cmds.mutex); return -EAGAIN; } retry_count = 0; memset(&mem, 0, sizeof(struct config_request)); mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; if (config_page) { mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; mpi_request->Header.PageType = mpi_reply->Header.PageType; mpi_request->Header.PageLength = mpi_reply->Header.PageLength; mpi_request->ExtPageLength = mpi_reply->ExtPageLength; mpi_request->ExtPageType = mpi_reply->ExtPageType; if (mpi_request->Header.PageLength) mem.sz = mpi_request->Header.PageLength * 4; else mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; r = _config_alloc_config_dma_memory(ioc, &mem); if (r != 0) goto out; if (mpi_request->Action == MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT || mpi_request->Action == MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) { ioc->base_add_sg_single(&mpi_request->PageBufferSGE, MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, mem.page_dma); memcpy(mem.page, config_page, min_t(u16, mem.sz, config_page_sz)); } else { memset(config_page, 0, config_page_sz); ioc->base_add_sg_single(&mpi_request->PageBufferSGE, MPT2_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma); } } retry_config: if (retry_count) { if (retry_count > 2) { /* attempt only 2 retries */ r = -EFAULT; goto free_mem; } printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n", ioc->name, __func__, retry_count); } wait_state_count = 0; ioc_state = mpt2sas_base_get_iocstate(ioc, 1); while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { if (wait_state_count++ == MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT) { printk(MPT2SAS_ERR_FMT "%s: failed due to ioc not operational\n", ioc->name, __func__); ioc->config_cmds.status = MPT2_CMD_NOT_USED; r = -EFAULT; goto free_mem; } ssleep(1); ioc_state = mpt2sas_base_get_iocstate(ioc, 1); printk(MPT2SAS_INFO_FMT "%s: waiting for " "operational state(count=%d)\n", ioc->name, __func__, wait_state_count); } if (wait_state_count) printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n", ioc->name, __func__); smid = mpt2sas_base_get_smid(ioc, ioc->config_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); ioc->config_cmds.status = MPT2_CMD_NOT_USED; r = -EAGAIN; goto free_mem; } r = 0; memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t)); ioc->config_cmds.status = MPT2_CMD_PENDING; config_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->config_cmds.smid = smid; memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _config_display_some_debug(ioc, smid, "config_request", NULL); #endif init_completion(&ioc->config_cmds.done); mpt2sas_base_put_smid_default(ioc, smid); timeleft = wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); if (!(ioc->config_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2ConfigRequest_t)/4); retry_count++; if (ioc->config_cmds.smid == smid) mpt2sas_base_free_smid(ioc, smid); if ((ioc->shost_recovery) || (ioc->config_cmds.status & MPT2_CMD_RESET) || ioc->pci_error_recovery) goto retry_config; issue_host_reset = 1; r = -EFAULT; goto free_mem; } if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID) memcpy(mpi_reply, ioc->config_cmds.reply, sizeof(Mpi2ConfigReply_t)); if (retry_count) printk(MPT2SAS_INFO_FMT "%s: retry (%d) completed!!\n", ioc->name, __func__, retry_count); if (config_page && mpi_request->Action == MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) memcpy(config_page, mem.page, min_t(u16, mem.sz, config_page_sz)); free_mem: if (config_page) _config_free_config_dma_memory(ioc, &mem); out: ioc->config_cmds.status = MPT2_CMD_NOT_USED; mutex_unlock(&ioc->config_cmds.mutex); if (issue_host_reset) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); return r; } /** * mpt2sas_config_get_manufacturing_pg0 - obtain manufacturing page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_manufacturing_pg10 - obtain manufacturing page 10 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; mpi_request.Header.PageNumber = 10; mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_bios_pg2 - obtain bios page 2 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; mpi_request.Header.PageNumber = 2; mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_bios_pg3 - obtain bios page 3 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage3_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; mpi_request.Header.PageNumber = 3; mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_iounit_pg0 - obtain iounit page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_iounit_pg1 - obtain iounit page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_set_iounit_pg1 - set iounit page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; mpi_request.Header.PageNumber = 8; mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_sas_device_pg0 - obtain sas device page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: device handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, u32 form, u32 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION; mpi_request.Header.PageNumber = 0; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_sas_device_pg1 - obtain sas device page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: device handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, u32 form, u32 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION; mpi_request.Header.PageNumber = 1; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_number_hba_phys - obtain number of phys on the host * @ioc: per adapter object * @num_phys: pointer returned with the number of phys * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys) { Mpi2ConfigRequest_t mpi_request; int r; u16 ioc_status; Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage0_t config_page; *num_phys = 0; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, sizeof(Mpi2SasIOUnitPage0_t)); if (!r) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) *num_phys = config_page.NumPhys; } out: return r; } /** * mpt2sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @sz: size of buffer passed in config_page * Context: sleep. * * Calling function should call config_get_number_hba_phys prior to * this function, so enough memory is allocated for config_page. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, u16 sz) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } /** * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @sz: size of buffer passed in config_page * Context: sleep. * * Calling function should call config_get_number_hba_phys prior to * this function, so enough memory is allocated for config_page. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } /** * mpt2sas_config_set_sas_iounit_pg1 - send sas iounit page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @sz: size of buffer passed in config_page * Context: sleep. * * Calling function should call config_get_number_hba_phys prior to * this function, so enough memory is allocated for config_page. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } /** * mpt2sas_config_get_expander_pg0 - obtain expander page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: expander handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_expander_pg1 - obtain expander page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @phy_number: phy number * @handle: expander handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number, u16 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_enclosure_pg0 - obtain enclosure page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: expander handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_phy_pg0 - obtain phy page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @phy_number: phy number * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_phy_pg1 - obtain phy page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @phy_number: phy number * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_raid_volume_pg1 - obtain raid volume page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: volume handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, u32 handle) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; mpi_request.Header.PageNumber = 1; mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_number_pds - obtain number of phys disk assigned to volume * @ioc: per adapter object * @handle: volume handle * @num_pds: returns pds count * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 *num_pds) { Mpi2ConfigRequest_t mpi_request; Mpi2RaidVolPage0_t config_page; Mpi2ConfigReply_t mpi_reply; int r; u16 ioc_status; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); *num_pds = 0; mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, sizeof(Mpi2RaidVolPage0_t)); if (!r) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) *num_pds = config_page.NumPhysDisks; } out: return r; } /** * mpt2sas_config_get_raid_volume_pg0 - obtain raid volume page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: volume handle * @sz: size of buffer passed in config_page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, u32 handle, u16 sz) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } /** * mpt2sas_config_get_phys_disk_pg0 - obtain phys disk page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE * @form_specific: specific to the form * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form, u32 form_specific) { Mpi2ConfigRequest_t mpi_request; int r; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK; mpi_request.Header.PageNumber = 0; mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | form_specific); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; r = _config_request(ioc, &mpi_request, mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); out: return r; } /** * mpt2sas_config_get_volume_handle - returns volume handle for give hidden raid components * @ioc: per adapter object * @pd_handle: phys disk handle * @volume_handle: volume handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, u16 *volume_handle) { Mpi2RaidConfigurationPage0_t *config_page = NULL; Mpi2ConfigRequest_t mpi_request; Mpi2ConfigReply_t mpi_reply; int r, i, config_page_sz; u16 ioc_status; int config_num; u16 element_type; u16 phys_disk_dev_handle; *volume_handle = 0; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG; mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION; mpi_request.Header.PageNumber = 0; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); config_page = kmalloc(config_page_sz, GFP_KERNEL); if (!config_page) { r = -1; goto out; } config_num = 0xff; while (1) { mpi_request.PageAddress = cpu_to_le32(config_num + MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM); r = _config_request(ioc, &mpi_request, &mpi_reply, MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, config_page_sz); if (r) goto out; r = -1; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) goto out; for (i = 0; i < config_page->NumElements; i++) { element_type = le16_to_cpu(config_page-> ConfigElement[i].ElementFlags) & MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE; if (element_type == MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT || element_type == MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) { phys_disk_dev_handle = le16_to_cpu(config_page->ConfigElement[i]. PhysDiskDevHandle); if (phys_disk_dev_handle == pd_handle) { *volume_handle = le16_to_cpu(config_page-> ConfigElement[i].VolDevHandle); r = 0; goto out; } } else if (element_type == MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) { *volume_handle = 0; r = 0; goto out; } } config_num = config_page->ConfigNum; } out: kfree(config_page); return r; } /** * mpt2sas_config_get_volume_wwid - returns wwid given the volume handle * @ioc: per adapter object * @volume_handle: volume handle * @wwid: volume wwid * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle, u64 *wwid) { Mpi2ConfigReply_t mpi_reply; Mpi2RaidVolPage1_t raid_vol_pg1; *wwid = 0; if (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, volume_handle))) { *wwid = le64_to_cpu(raid_vol_pg1.WWID); return 0; } else return -1; }
gpl-2.0
thicklizard/komodo-revisited
drivers/usb/host/imx21-hcd.c
4945
48636
/* * USB Host Controller Driver for IMX21 * * Copyright (C) 2006 Loping Dog Embedded Systems * Copyright (C) 2009 Martin Fuzzey * Originally written by Jay Monkman <jtm@lopingdog.com> * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * The i.MX21 USB hardware contains * * 32 transfer descriptors (called ETDs) * * 4Kb of Data memory * * The data memory is shared between the host and function controllers * (but this driver only supports the host controller) * * So setting up a transfer involves: * * Allocating a ETD * * Fill in ETD with appropriate information * * Allocating data memory (and putting the offset in the ETD) * * Activate the ETD * * Get interrupt when done. * * An ETD is assigned to each active endpoint. * * Low resource (ETD and Data memory) situations are handled differently for * isochronous and non insosynchronous transactions : * * Non ISOC transfers are queued if either ETDs or Data memory are unavailable * * ISOC transfers use 2 ETDs per endpoint to achieve double buffering. * They allocate both ETDs and Data memory during URB submission * (and fail if unavailable). */ #include <linux/clk.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/dma-mapping.h> #include "imx21-hcd.h" #ifdef DEBUG #define DEBUG_LOG_FRAME(imx21, etd, event) \ (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB) #else #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0) #endif static const char hcd_name[] = "imx21-hcd"; static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd) { return (struct imx21 *)hcd->hcd_priv; } /* =========================================== */ /* Hardware access helpers */ /* =========================================== */ static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask) { void __iomem *reg = imx21->regs + offset; writel(readl(reg) | mask, reg); } static inline void clear_register_bits(struct imx21 *imx21, u32 offset, u32 mask) { void __iomem *reg = imx21->regs + offset; writel(readl(reg) & ~mask, reg); } static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) { void __iomem *reg = imx21->regs + offset; if (readl(reg) & mask) writel(mask, reg); } static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) { void __iomem *reg = imx21->regs + offset; if (!(readl(reg) & mask)) writel(mask, reg); } static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value) { writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword)); } static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword) { return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword)); } static inline int wrap_frame(int counter) { return counter & 0xFFFF; } static inline int frame_after(int frame, int after) { /* handle wrapping like jiffies time_afer */ return (s16)((s16)after - (s16)frame) < 0; } static int imx21_hc_get_frame(struct usb_hcd *hcd) { struct imx21 *imx21 = hcd_to_imx21(hcd); return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); } static inline bool unsuitable_for_dma(dma_addr_t addr) { return (addr & 3) != 0; } #include "imx21-dbg.c" static void nonisoc_urb_completed_for_etd( struct imx21 *imx21, struct etd_priv *etd, int status); static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb); static void free_dmem(struct imx21 *imx21, struct etd_priv *etd); /* =========================================== */ /* ETD management */ /* =========================================== */ static int alloc_etd(struct imx21 *imx21) { int i; struct etd_priv *etd = imx21->etd; for (i = 0; i < USB_NUM_ETD; i++, etd++) { if (etd->alloc == 0) { memset(etd, 0, sizeof(imx21->etd[0])); etd->alloc = 1; debug_etd_allocated(imx21); return i; } } return -1; } static void disactivate_etd(struct imx21 *imx21, int num) { int etd_mask = (1 << num); struct etd_priv *etd = &imx21->etd[num]; writel(etd_mask, imx21->regs + USBH_ETDENCLR); clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask); writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR); clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); etd->active_count = 0; DEBUG_LOG_FRAME(imx21, etd, disactivated); } static void reset_etd(struct imx21 *imx21, int num) { struct etd_priv *etd = imx21->etd + num; int i; disactivate_etd(imx21, num); for (i = 0; i < 4; i++) etd_writel(imx21, num, i, 0); etd->urb = NULL; etd->ep = NULL; etd->td = NULL; etd->bounce_buffer = NULL; } static void free_etd(struct imx21 *imx21, int num) { if (num < 0) return; if (num >= USB_NUM_ETD) { dev_err(imx21->dev, "BAD etd=%d!\n", num); return; } if (imx21->etd[num].alloc == 0) { dev_err(imx21->dev, "ETD %d already free!\n", num); return; } debug_etd_freed(imx21); reset_etd(imx21, num); memset(&imx21->etd[num], 0, sizeof(imx21->etd[0])); } static void setup_etd_dword0(struct imx21 *imx21, int etd_num, struct urb *urb, u8 dir, u16 maxpacket) { etd_writel(imx21, etd_num, 0, ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS | ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) | ((u32) dir << DW0_DIRECT) | ((u32) ((urb->dev->speed == USB_SPEED_LOW) ? 1 : 0) << DW0_SPEED) | ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) | ((u32) maxpacket << DW0_MAXPKTSIZ)); } /** * Copy buffer to data controller data memory. * We cannot use memcpy_toio() because the hardware requires 32bit writes */ static void copy_to_dmem( struct imx21 *imx21, int dmem_offset, void *src, int count) { void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset; u32 word = 0; u8 *p = src; int byte = 0; int i; for (i = 0; i < count; i++) { byte = i % 4; word += (*p++ << (byte * 8)); if (byte == 3) { writel(word, dmem); dmem += 4; word = 0; } } if (count && byte != 3) writel(word, dmem); } static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir) { u32 etd_mask = 1 << etd_num; struct etd_priv *etd = &imx21->etd[etd_num]; if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) { /* For non aligned isoc the condition below is always true */ if (etd->len <= etd->dmem_size) { /* Fits into data memory, use PIO */ if (dir != TD_DIR_IN) { copy_to_dmem(imx21, etd->dmem_offset, etd->cpu_buffer, etd->len); } etd->dma_handle = 0; } else { /* Too big for data memory, use bounce buffer */ enum dma_data_direction dmadir; if (dir == TD_DIR_IN) { dmadir = DMA_FROM_DEVICE; etd->bounce_buffer = kmalloc(etd->len, GFP_ATOMIC); } else { dmadir = DMA_TO_DEVICE; etd->bounce_buffer = kmemdup(etd->cpu_buffer, etd->len, GFP_ATOMIC); } if (!etd->bounce_buffer) { dev_err(imx21->dev, "failed bounce alloc\n"); goto err_bounce_alloc; } etd->dma_handle = dma_map_single(imx21->dev, etd->bounce_buffer, etd->len, dmadir); if (dma_mapping_error(imx21->dev, etd->dma_handle)) { dev_err(imx21->dev, "failed bounce map\n"); goto err_bounce_map; } } } clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); if (etd->dma_handle) { set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num)); set_register_bits(imx21, USB_ETDDMAEN, etd_mask); } else { if (dir != TD_DIR_IN) { /* need to set for ZLP and PIO */ set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); } } DEBUG_LOG_FRAME(imx21, etd, activated); #ifdef DEBUG if (!etd->active_count) { int i; etd->activated_frame = readl(imx21->regs + USBH_FRMNUB); etd->disactivated_frame = -1; etd->last_int_frame = -1; etd->last_req_frame = -1; for (i = 0; i < 4; i++) etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i); } #endif etd->active_count = 1; writel(etd_mask, imx21->regs + USBH_ETDENSET); return; err_bounce_map: kfree(etd->bounce_buffer); err_bounce_alloc: free_dmem(imx21, etd); nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM); } /* =========================================== */ /* Data memory management */ /* =========================================== */ static int alloc_dmem(struct imx21 *imx21, unsigned int size, struct usb_host_endpoint *ep) { unsigned int offset = 0; struct imx21_dmem_area *area; struct imx21_dmem_area *tmp; size += (~size + 1) & 0x3; /* Round to 4 byte multiple */ if (size > DMEM_SIZE) { dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n", size, DMEM_SIZE); return -EINVAL; } list_for_each_entry(tmp, &imx21->dmem_list, list) { if ((size + offset) < offset) goto fail; if ((size + offset) <= tmp->offset) break; offset = tmp->size + tmp->offset; if ((offset + size) > DMEM_SIZE) goto fail; } area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC); if (area == NULL) return -ENOMEM; area->ep = ep; area->offset = offset; area->size = size; list_add_tail(&area->list, &tmp->list); debug_dmem_allocated(imx21, size); return offset; fail: return -ENOMEM; } /* Memory now available for a queued ETD - activate it */ static void activate_queued_etd(struct imx21 *imx21, struct etd_priv *etd, u32 dmem_offset) { struct urb_priv *urb_priv = etd->urb->hcpriv; int etd_num = etd - &imx21->etd[0]; u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD; u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03; dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n", etd_num); etd_writel(imx21, etd_num, 1, ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset); etd->dmem_offset = dmem_offset; urb_priv->active = 1; activate_etd(imx21, etd_num, dir); } static void free_dmem(struct imx21 *imx21, struct etd_priv *etd) { struct imx21_dmem_area *area; struct etd_priv *tmp; int found = 0; int offset; if (!etd->dmem_size) return; etd->dmem_size = 0; offset = etd->dmem_offset; list_for_each_entry(area, &imx21->dmem_list, list) { if (area->offset == offset) { debug_dmem_freed(imx21, area->size); list_del(&area->list); kfree(area); found = 1; break; } } if (!found) { dev_err(imx21->dev, "Trying to free unallocated DMEM %d\n", offset); return; } /* Try again to allocate memory for anything we've queued */ list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) { offset = alloc_dmem(imx21, etd->dmem_size, etd->ep); if (offset >= 0) { list_del(&etd->queue); activate_queued_etd(imx21, etd, (u32)offset); } } } static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep) { struct imx21_dmem_area *area, *tmp; list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) { if (area->ep == ep) { dev_err(imx21->dev, "Active DMEM %d for disabled ep=%p\n", area->offset, ep); list_del(&area->list); kfree(area); } } } /* =========================================== */ /* End handling */ /* =========================================== */ /* Endpoint now idle - release its ETD(s) or assign to queued request */ static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) { int i; for (i = 0; i < NUM_ISO_ETDS; i++) { int etd_num = ep_priv->etd[i]; struct etd_priv *etd; if (etd_num < 0) continue; etd = &imx21->etd[etd_num]; ep_priv->etd[i] = -1; free_dmem(imx21, etd); /* for isoc */ if (list_empty(&imx21->queue_for_etd)) { free_etd(imx21, etd_num); continue; } dev_dbg(imx21->dev, "assigning idle etd %d for queued request\n", etd_num); ep_priv = list_first_entry(&imx21->queue_for_etd, struct ep_priv, queue); list_del(&ep_priv->queue); reset_etd(imx21, etd_num); ep_priv->waiting_etd = 0; ep_priv->etd[i] = etd_num; if (list_empty(&ep_priv->ep->urb_list)) { dev_err(imx21->dev, "No urb for queued ep!\n"); continue; } schedule_nonisoc_etd(imx21, list_first_entry( &ep_priv->ep->urb_list, struct urb, urb_list)); } } static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status) __releases(imx21->lock) __acquires(imx21->lock) { struct imx21 *imx21 = hcd_to_imx21(hcd); struct ep_priv *ep_priv = urb->ep->hcpriv; struct urb_priv *urb_priv = urb->hcpriv; debug_urb_completed(imx21, urb, status); dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status); kfree(urb_priv->isoc_td); kfree(urb->hcpriv); urb->hcpriv = NULL; usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock(&imx21->lock); usb_hcd_giveback_urb(hcd, urb, status); spin_lock(&imx21->lock); if (list_empty(&ep_priv->ep->urb_list)) ep_idle(imx21, ep_priv); } static void nonisoc_urb_completed_for_etd( struct imx21 *imx21, struct etd_priv *etd, int status) { struct usb_host_endpoint *ep = etd->ep; urb_done(imx21->hcd, etd->urb, status); etd->urb = NULL; if (!list_empty(&ep->urb_list)) { struct urb *urb = list_first_entry( &ep->urb_list, struct urb, urb_list); dev_vdbg(imx21->dev, "next URB %p\n", urb); schedule_nonisoc_etd(imx21, urb); } } /* =========================================== */ /* ISOC Handling ... */ /* =========================================== */ static void schedule_isoc_etds(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct imx21 *imx21 = hcd_to_imx21(hcd); struct ep_priv *ep_priv = ep->hcpriv; struct etd_priv *etd; struct urb_priv *urb_priv; struct td *td; int etd_num; int i; int cur_frame; u8 dir; for (i = 0; i < NUM_ISO_ETDS; i++) { too_late: if (list_empty(&ep_priv->td_list)) break; etd_num = ep_priv->etd[i]; if (etd_num < 0) break; etd = &imx21->etd[etd_num]; if (etd->urb) continue; td = list_entry(ep_priv->td_list.next, struct td, list); list_del(&td->list); urb_priv = td->urb->hcpriv; cur_frame = imx21_hc_get_frame(hcd); if (frame_after(cur_frame, td->frame)) { dev_dbg(imx21->dev, "isoc too late frame %d > %d\n", cur_frame, td->frame); urb_priv->isoc_status = -EXDEV; td->urb->iso_frame_desc[ td->isoc_index].actual_length = 0; td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV; if (--urb_priv->isoc_remaining == 0) urb_done(hcd, td->urb, urb_priv->isoc_status); goto too_late; } urb_priv->active = 1; etd->td = td; etd->ep = td->ep; etd->urb = td->urb; etd->len = td->len; etd->dma_handle = td->dma_handle; etd->cpu_buffer = td->cpu_buffer; debug_isoc_submitted(imx21, cur_frame, td); dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN; setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size); etd_writel(imx21, etd_num, 1, etd->dmem_offset); etd_writel(imx21, etd_num, 2, (TD_NOTACCESSED << DW2_COMPCODE) | ((td->frame & 0xFFFF) << DW2_STARTFRM)); etd_writel(imx21, etd_num, 3, (TD_NOTACCESSED << DW3_COMPCODE0) | (td->len << DW3_PKTLEN0)); activate_etd(imx21, etd_num, dir); } } static void isoc_etd_done(struct usb_hcd *hcd, int etd_num) { struct imx21 *imx21 = hcd_to_imx21(hcd); int etd_mask = 1 << etd_num; struct etd_priv *etd = imx21->etd + etd_num; struct urb *urb = etd->urb; struct urb_priv *urb_priv = urb->hcpriv; struct td *td = etd->td; struct usb_host_endpoint *ep = etd->ep; int isoc_index = td->isoc_index; unsigned int pipe = urb->pipe; int dir_in = usb_pipein(pipe); int cc; int bytes_xfrd; disactivate_etd(imx21, etd_num); cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf; bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff; /* Input doesn't always fill the buffer, don't generate an error * when this happens. */ if (dir_in && (cc == TD_DATAUNDERRUN)) cc = TD_CC_NOERROR; if (cc == TD_NOTACCESSED) bytes_xfrd = 0; debug_isoc_completed(imx21, imx21_hc_get_frame(hcd), td, cc, bytes_xfrd); if (cc) { urb_priv->isoc_status = -EXDEV; dev_dbg(imx21->dev, "bad iso cc=0x%X frame=%d sched frame=%d " "cnt=%d len=%d urb=%p etd=%d index=%d\n", cc, imx21_hc_get_frame(hcd), td->frame, bytes_xfrd, td->len, urb, etd_num, isoc_index); } if (dir_in) { clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); if (!etd->dma_handle) memcpy_fromio(etd->cpu_buffer, imx21->regs + USBOTG_DMEM + etd->dmem_offset, bytes_xfrd); } urb->actual_length += bytes_xfrd; urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; urb->iso_frame_desc[isoc_index].status = cc_to_error[cc]; etd->td = NULL; etd->urb = NULL; etd->ep = NULL; if (--urb_priv->isoc_remaining == 0) urb_done(hcd, urb, urb_priv->isoc_status); schedule_isoc_etds(hcd, ep); } static struct ep_priv *alloc_isoc_ep( struct imx21 *imx21, struct usb_host_endpoint *ep) { struct ep_priv *ep_priv; int i; ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); if (!ep_priv) return NULL; for (i = 0; i < NUM_ISO_ETDS; i++) ep_priv->etd[i] = -1; INIT_LIST_HEAD(&ep_priv->td_list); ep_priv->ep = ep; ep->hcpriv = ep_priv; return ep_priv; } static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv) { int i, j; int etd_num; /* Allocate the ETDs if required */ for (i = 0; i < NUM_ISO_ETDS; i++) { if (ep_priv->etd[i] < 0) { etd_num = alloc_etd(imx21); if (etd_num < 0) goto alloc_etd_failed; ep_priv->etd[i] = etd_num; imx21->etd[etd_num].ep = ep_priv->ep; } } return 0; alloc_etd_failed: dev_err(imx21->dev, "isoc: Couldn't allocate etd\n"); for (j = 0; j < i; j++) { free_etd(imx21, ep_priv->etd[j]); ep_priv->etd[j] = -1; } return -ENOMEM; } static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, struct usb_host_endpoint *ep, struct urb *urb, gfp_t mem_flags) { struct imx21 *imx21 = hcd_to_imx21(hcd); struct urb_priv *urb_priv; unsigned long flags; struct ep_priv *ep_priv; struct td *td = NULL; int i; int ret; int cur_frame; u16 maxpacket; urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags); if (urb_priv == NULL) return -ENOMEM; urb_priv->isoc_td = kzalloc( sizeof(struct td) * urb->number_of_packets, mem_flags); if (urb_priv->isoc_td == NULL) { ret = -ENOMEM; goto alloc_td_failed; } spin_lock_irqsave(&imx21->lock, flags); if (ep->hcpriv == NULL) { ep_priv = alloc_isoc_ep(imx21, ep); if (ep_priv == NULL) { ret = -ENOMEM; goto alloc_ep_failed; } } else { ep_priv = ep->hcpriv; } ret = alloc_isoc_etds(imx21, ep_priv); if (ret) goto alloc_etd_failed; ret = usb_hcd_link_urb_to_ep(hcd, urb); if (ret) goto link_failed; urb->status = -EINPROGRESS; urb->actual_length = 0; urb->error_count = 0; urb->hcpriv = urb_priv; urb_priv->ep = ep; /* allocate data memory for largest packets if not already done */ maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); for (i = 0; i < NUM_ISO_ETDS; i++) { struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]]; if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) { /* not sure if this can really occur.... */ dev_err(imx21->dev, "increasing isoc buffer %d->%d\n", etd->dmem_size, maxpacket); ret = -EMSGSIZE; goto alloc_dmem_failed; } if (etd->dmem_size == 0) { etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep); if (etd->dmem_offset < 0) { dev_dbg(imx21->dev, "failed alloc isoc dmem\n"); ret = -EAGAIN; goto alloc_dmem_failed; } etd->dmem_size = maxpacket; } } /* calculate frame */ cur_frame = imx21_hc_get_frame(hcd); if (urb->transfer_flags & URB_ISO_ASAP) { if (list_empty(&ep_priv->td_list)) urb->start_frame = cur_frame + 5; else urb->start_frame = list_entry( ep_priv->td_list.prev, struct td, list)->frame + urb->interval; } urb->start_frame = wrap_frame(urb->start_frame); if (frame_after(cur_frame, urb->start_frame)) { dev_dbg(imx21->dev, "enqueue: adjusting iso start %d (cur=%d) asap=%d\n", urb->start_frame, cur_frame, (urb->transfer_flags & URB_ISO_ASAP) != 0); urb->start_frame = wrap_frame(cur_frame + 1); } /* set up transfers */ td = urb_priv->isoc_td; for (i = 0; i < urb->number_of_packets; i++, td++) { unsigned int offset = urb->iso_frame_desc[i].offset; td->ep = ep; td->urb = urb; td->len = urb->iso_frame_desc[i].length; td->isoc_index = i; td->frame = wrap_frame(urb->start_frame + urb->interval * i); td->dma_handle = urb->transfer_dma + offset; td->cpu_buffer = urb->transfer_buffer + offset; list_add_tail(&td->list, &ep_priv->td_list); } urb_priv->isoc_remaining = urb->number_of_packets; dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n", urb->number_of_packets, urb->start_frame, td->frame); debug_urb_submitted(imx21, urb); schedule_isoc_etds(hcd, ep); spin_unlock_irqrestore(&imx21->lock, flags); return 0; alloc_dmem_failed: usb_hcd_unlink_urb_from_ep(hcd, urb); link_failed: alloc_etd_failed: alloc_ep_failed: spin_unlock_irqrestore(&imx21->lock, flags); kfree(urb_priv->isoc_td); alloc_td_failed: kfree(urb_priv); return ret; } static void dequeue_isoc_urb(struct imx21 *imx21, struct urb *urb, struct ep_priv *ep_priv) { struct urb_priv *urb_priv = urb->hcpriv; struct td *td, *tmp; int i; if (urb_priv->active) { for (i = 0; i < NUM_ISO_ETDS; i++) { int etd_num = ep_priv->etd[i]; if (etd_num != -1 && imx21->etd[etd_num].urb == urb) { struct etd_priv *etd = imx21->etd + etd_num; reset_etd(imx21, etd_num); free_dmem(imx21, etd); } } } list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) { if (td->urb == urb) { dev_vdbg(imx21->dev, "removing td %p\n", td); list_del(&td->list); } } } /* =========================================== */ /* NON ISOC Handling ... */ /* =========================================== */ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb) { unsigned int pipe = urb->pipe; struct urb_priv *urb_priv = urb->hcpriv; struct ep_priv *ep_priv = urb_priv->ep->hcpriv; int state = urb_priv->state; int etd_num = ep_priv->etd[0]; struct etd_priv *etd; u32 count; u16 etd_buf_size; u16 maxpacket; u8 dir; u8 bufround; u8 datatoggle; u8 interval = 0; u8 relpolpos = 0; if (etd_num < 0) { dev_err(imx21->dev, "No valid ETD\n"); return; } if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num)) dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num); etd = &imx21->etd[etd_num]; maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe)); if (!maxpacket) maxpacket = 8; if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) { if (state == US_CTRL_SETUP) { dir = TD_DIR_SETUP; if (unsuitable_for_dma(urb->setup_dma)) usb_hcd_unmap_urb_setup_for_dma(imx21->hcd, urb); etd->dma_handle = urb->setup_dma; etd->cpu_buffer = urb->setup_packet; bufround = 0; count = 8; datatoggle = TD_TOGGLE_DATA0; } else { /* US_CTRL_ACK */ dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT; bufround = 0; count = 0; datatoggle = TD_TOGGLE_DATA1; } } else { dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN; bufround = (dir == TD_DIR_IN) ? 1 : 0; if (unsuitable_for_dma(urb->transfer_dma)) usb_hcd_unmap_urb_for_dma(imx21->hcd, urb); etd->dma_handle = urb->transfer_dma; etd->cpu_buffer = urb->transfer_buffer; if (usb_pipebulk(pipe) && (state == US_BULK0)) count = 0; else count = urb->transfer_buffer_length; if (usb_pipecontrol(pipe)) { datatoggle = TD_TOGGLE_DATA1; } else { if (usb_gettoggle( urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe))) datatoggle = TD_TOGGLE_DATA1; else datatoggle = TD_TOGGLE_DATA0; } } etd->urb = urb; etd->ep = urb_priv->ep; etd->len = count; if (usb_pipeint(pipe)) { interval = urb->interval; relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff; } /* Write ETD to device memory */ setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket); etd_writel(imx21, etd_num, 2, (u32) interval << DW2_POLINTERV | ((u32) relpolpos << DW2_RELPOLPOS) | ((u32) dir << DW2_DIRPID) | ((u32) bufround << DW2_BUFROUND) | ((u32) datatoggle << DW2_DATATOG) | ((u32) TD_NOTACCESSED << DW2_COMPCODE)); /* DMA will always transfer buffer size even if TOBYCNT in DWORD3 is smaller. Make sure we don't overrun the buffer! */ if (count && count < maxpacket) etd_buf_size = count; else etd_buf_size = maxpacket; etd_writel(imx21, etd_num, 3, ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count); if (!count) etd->dma_handle = 0; /* allocate x and y buffer space at once */ etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket; etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); if (etd->dmem_offset < 0) { /* Setup everything we can in HW and update when we get DMEM */ etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16); dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num); debug_urb_queued_for_dmem(imx21, urb); list_add_tail(&etd->queue, &imx21->queue_for_dmem); return; } etd_writel(imx21, etd_num, 1, (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) | (u32) etd->dmem_offset); urb_priv->active = 1; /* enable the ETD to kick off transfer */ dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", etd_num, count, dir != TD_DIR_IN ? "out" : "in"); activate_etd(imx21, etd_num, dir); } static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num) { struct imx21 *imx21 = hcd_to_imx21(hcd); struct etd_priv *etd = &imx21->etd[etd_num]; struct urb *urb = etd->urb; u32 etd_mask = 1 << etd_num; struct urb_priv *urb_priv = urb->hcpriv; int dir; int cc; u32 bytes_xfrd; int etd_done; disactivate_etd(imx21, etd_num); dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3; cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf; bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff); /* save toggle carry */ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1); if (dir == TD_DIR_IN) { clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); if (etd->bounce_buffer) { memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd); dma_unmap_single(imx21->dev, etd->dma_handle, etd->len, DMA_FROM_DEVICE); } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */ memcpy_fromio(etd->cpu_buffer, imx21->regs + USBOTG_DMEM + etd->dmem_offset, bytes_xfrd); } } kfree(etd->bounce_buffer); etd->bounce_buffer = NULL; free_dmem(imx21, etd); urb->error_count = 0; if (!(urb->transfer_flags & URB_SHORT_NOT_OK) && (cc == TD_DATAUNDERRUN)) cc = TD_CC_NOERROR; if (cc != 0) dev_vdbg(imx21->dev, "cc is 0x%x\n", cc); etd_done = (cc_to_error[cc] != 0); /* stop if error */ switch (usb_pipetype(urb->pipe)) { case PIPE_CONTROL: switch (urb_priv->state) { case US_CTRL_SETUP: if (urb->transfer_buffer_length > 0) urb_priv->state = US_CTRL_DATA; else urb_priv->state = US_CTRL_ACK; break; case US_CTRL_DATA: urb->actual_length += bytes_xfrd; urb_priv->state = US_CTRL_ACK; break; case US_CTRL_ACK: etd_done = 1; break; default: dev_err(imx21->dev, "Invalid pipe state %d\n", urb_priv->state); etd_done = 1; break; } break; case PIPE_BULK: urb->actual_length += bytes_xfrd; if ((urb_priv->state == US_BULK) && (urb->transfer_flags & URB_ZERO_PACKET) && urb->transfer_buffer_length > 0 && ((urb->transfer_buffer_length % usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) == 0)) { /* need a 0-packet */ urb_priv->state = US_BULK0; } else { etd_done = 1; } break; case PIPE_INTERRUPT: urb->actual_length += bytes_xfrd; etd_done = 1; break; } if (etd_done) nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]); else { dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); schedule_nonisoc_etd(imx21, urb); } } static struct ep_priv *alloc_ep(void) { int i; struct ep_priv *ep_priv; ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); if (!ep_priv) return NULL; for (i = 0; i < NUM_ISO_ETDS; ++i) ep_priv->etd[i] = -1; return ep_priv; } static int imx21_hc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct imx21 *imx21 = hcd_to_imx21(hcd); struct usb_host_endpoint *ep = urb->ep; struct urb_priv *urb_priv; struct ep_priv *ep_priv; struct etd_priv *etd; int ret; unsigned long flags; dev_vdbg(imx21->dev, "enqueue urb=%p ep=%p len=%d " "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n", urb, ep, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma, urb->setup_packet, urb->setup_dma); if (usb_pipeisoc(urb->pipe)) return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags); urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags); if (!urb_priv) return -ENOMEM; spin_lock_irqsave(&imx21->lock, flags); ep_priv = ep->hcpriv; if (ep_priv == NULL) { ep_priv = alloc_ep(); if (!ep_priv) { ret = -ENOMEM; goto failed_alloc_ep; } ep->hcpriv = ep_priv; ep_priv->ep = ep; } ret = usb_hcd_link_urb_to_ep(hcd, urb); if (ret) goto failed_link; urb->status = -EINPROGRESS; urb->actual_length = 0; urb->error_count = 0; urb->hcpriv = urb_priv; urb_priv->ep = ep; switch (usb_pipetype(urb->pipe)) { case PIPE_CONTROL: urb_priv->state = US_CTRL_SETUP; break; case PIPE_BULK: urb_priv->state = US_BULK; break; } debug_urb_submitted(imx21, urb); if (ep_priv->etd[0] < 0) { if (ep_priv->waiting_etd) { dev_dbg(imx21->dev, "no ETD available already queued %p\n", ep_priv); debug_urb_queued_for_etd(imx21, urb); goto out; } ep_priv->etd[0] = alloc_etd(imx21); if (ep_priv->etd[0] < 0) { dev_dbg(imx21->dev, "no ETD available queueing %p\n", ep_priv); debug_urb_queued_for_etd(imx21, urb); list_add_tail(&ep_priv->queue, &imx21->queue_for_etd); ep_priv->waiting_etd = 1; goto out; } } /* Schedule if no URB already active for this endpoint */ etd = &imx21->etd[ep_priv->etd[0]]; if (etd->urb == NULL) { DEBUG_LOG_FRAME(imx21, etd, last_req); schedule_nonisoc_etd(imx21, urb); } out: spin_unlock_irqrestore(&imx21->lock, flags); return 0; failed_link: failed_alloc_ep: spin_unlock_irqrestore(&imx21->lock, flags); kfree(urb_priv); return ret; } static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct imx21 *imx21 = hcd_to_imx21(hcd); unsigned long flags; struct usb_host_endpoint *ep; struct ep_priv *ep_priv; struct urb_priv *urb_priv = urb->hcpriv; int ret = -EINVAL; dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n", urb, usb_pipeisoc(urb->pipe), status); spin_lock_irqsave(&imx21->lock, flags); ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret) goto fail; ep = urb_priv->ep; ep_priv = ep->hcpriv; debug_urb_unlinked(imx21, urb); if (usb_pipeisoc(urb->pipe)) { dequeue_isoc_urb(imx21, urb, ep_priv); schedule_isoc_etds(hcd, ep); } else if (urb_priv->active) { int etd_num = ep_priv->etd[0]; if (etd_num != -1) { struct etd_priv *etd = &imx21->etd[etd_num]; disactivate_etd(imx21, etd_num); free_dmem(imx21, etd); etd->urb = NULL; kfree(etd->bounce_buffer); etd->bounce_buffer = NULL; } } urb_done(hcd, urb, status); spin_unlock_irqrestore(&imx21->lock, flags); return 0; fail: spin_unlock_irqrestore(&imx21->lock, flags); return ret; } /* =========================================== */ /* Interrupt dispatch */ /* =========================================== */ static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof) { int etd_num; int enable_sof_int = 0; unsigned long flags; spin_lock_irqsave(&imx21->lock, flags); for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) { u32 etd_mask = 1 << etd_num; u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask; u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask; struct etd_priv *etd = &imx21->etd[etd_num]; if (done) { DEBUG_LOG_FRAME(imx21, etd, last_int); } else { /* * Kludge warning! * * When multiple transfers are using the bus we sometimes get into a state * where the transfer has completed (the CC field of the ETD is != 0x0F), * the ETD has self disabled but the ETDDONESTAT flag is not set * (and hence no interrupt occurs). * This causes the transfer in question to hang. * The kludge below checks for this condition at each SOF and processes any * blocked ETDs (after an arbitrary 10 frame wait) * * With a single active transfer the usbtest test suite will run for days * without the kludge. * With other bus activity (eg mass storage) even just test1 will hang without * the kludge. */ u32 dword0; int cc; if (etd->active_count && !enabled) /* suspicious... */ enable_sof_int = 1; if (!sof || enabled || !etd->active_count) continue; cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE; if (cc == TD_NOTACCESSED) continue; if (++etd->active_count < 10) continue; dword0 = etd_readl(imx21, etd_num, 0); dev_dbg(imx21->dev, "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n", etd_num, dword0 & 0x7F, (dword0 >> DW0_ENDPNT) & 0x0F, cc); #ifdef DEBUG dev_dbg(imx21->dev, "frame: act=%d disact=%d" " int=%d req=%d cur=%d\n", etd->activated_frame, etd->disactivated_frame, etd->last_int_frame, etd->last_req_frame, readl(imx21->regs + USBH_FRMNUB)); imx21->debug_unblocks++; #endif etd->active_count = 0; /* End of kludge */ } if (etd->ep == NULL || etd->urb == NULL) { dev_dbg(imx21->dev, "Interrupt for unexpected etd %d" " ep=%p urb=%p\n", etd_num, etd->ep, etd->urb); disactivate_etd(imx21, etd_num); continue; } if (usb_pipeisoc(etd->urb->pipe)) isoc_etd_done(hcd, etd_num); else nonisoc_etd_done(hcd, etd_num); } /* only enable SOF interrupt if it may be needed for the kludge */ if (enable_sof_int) set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); else clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); spin_unlock_irqrestore(&imx21->lock, flags); } static irqreturn_t imx21_irq(struct usb_hcd *hcd) { struct imx21 *imx21 = hcd_to_imx21(hcd); u32 ints = readl(imx21->regs + USBH_SYSISR); if (ints & USBH_SYSIEN_HERRINT) dev_dbg(imx21->dev, "Scheduling error\n"); if (ints & USBH_SYSIEN_SORINT) dev_dbg(imx21->dev, "Scheduling overrun\n"); if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT)) process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT); writel(ints, imx21->regs + USBH_SYSISR); return IRQ_HANDLED; } static void imx21_hc_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct imx21 *imx21 = hcd_to_imx21(hcd); unsigned long flags; struct ep_priv *ep_priv; int i; if (ep == NULL) return; spin_lock_irqsave(&imx21->lock, flags); ep_priv = ep->hcpriv; dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv); if (!list_empty(&ep->urb_list)) dev_dbg(imx21->dev, "ep's URB list is not empty\n"); if (ep_priv != NULL) { for (i = 0; i < NUM_ISO_ETDS; i++) { if (ep_priv->etd[i] > -1) dev_dbg(imx21->dev, "free etd %d for disable\n", ep_priv->etd[i]); free_etd(imx21, ep_priv->etd[i]); } kfree(ep_priv); ep->hcpriv = NULL; } for (i = 0; i < USB_NUM_ETD; i++) { if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) { dev_err(imx21->dev, "Active etd %d for disabled ep=%p!\n", i, ep); free_etd(imx21, i); } } free_epdmem(imx21, ep); spin_unlock_irqrestore(&imx21->lock, flags); } /* =========================================== */ /* Hub handling */ /* =========================================== */ static int get_hub_descriptor(struct usb_hcd *hcd, struct usb_hub_descriptor *desc) { struct imx21 *imx21 = hcd_to_imx21(hcd); desc->bDescriptorType = 0x29; /* HUB descriptor */ desc->bHubContrCurrent = 0; desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA) & USBH_ROOTHUBA_NDNSTMPRT_MASK; desc->bDescLength = 9; desc->bPwrOn2PwrGood = 0; desc->wHubCharacteristics = (__force __u16) cpu_to_le16( 0x0002 | /* No power switching */ 0x0010 | /* No over current protection */ 0); desc->u.hs.DeviceRemovable[0] = 1 << 1; desc->u.hs.DeviceRemovable[1] = ~0; return 0; } static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf) { struct imx21 *imx21 = hcd_to_imx21(hcd); int ports; int changed = 0; int i; unsigned long flags; spin_lock_irqsave(&imx21->lock, flags); ports = readl(imx21->regs + USBH_ROOTHUBA) & USBH_ROOTHUBA_NDNSTMPRT_MASK; if (ports > 7) { ports = 7; dev_err(imx21->dev, "ports %d > 7\n", ports); } for (i = 0; i < ports; i++) { if (readl(imx21->regs + USBH_PORTSTAT(i)) & (USBH_PORTSTAT_CONNECTSC | USBH_PORTSTAT_PRTENBLSC | USBH_PORTSTAT_PRTSTATSC | USBH_PORTSTAT_OVRCURIC | USBH_PORTSTAT_PRTRSTSC)) { changed = 1; buf[0] |= 1 << (i + 1); } } spin_unlock_irqrestore(&imx21->lock, flags); if (changed) dev_info(imx21->dev, "Hub status changed\n"); return changed; } static int imx21_hc_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct imx21 *imx21 = hcd_to_imx21(hcd); int rc = 0; u32 status_write = 0; switch (typeReq) { case ClearHubFeature: dev_dbg(imx21->dev, "ClearHubFeature\n"); switch (wValue) { case C_HUB_OVER_CURRENT: dev_dbg(imx21->dev, " OVER_CURRENT\n"); break; case C_HUB_LOCAL_POWER: dev_dbg(imx21->dev, " LOCAL_POWER\n"); break; default: dev_dbg(imx21->dev, " unknown\n"); rc = -EINVAL; break; } break; case ClearPortFeature: dev_dbg(imx21->dev, "ClearPortFeature\n"); switch (wValue) { case USB_PORT_FEAT_ENABLE: dev_dbg(imx21->dev, " ENABLE\n"); status_write = USBH_PORTSTAT_CURCONST; break; case USB_PORT_FEAT_SUSPEND: dev_dbg(imx21->dev, " SUSPEND\n"); status_write = USBH_PORTSTAT_PRTOVRCURI; break; case USB_PORT_FEAT_POWER: dev_dbg(imx21->dev, " POWER\n"); status_write = USBH_PORTSTAT_LSDEVCON; break; case USB_PORT_FEAT_C_ENABLE: dev_dbg(imx21->dev, " C_ENABLE\n"); status_write = USBH_PORTSTAT_PRTENBLSC; break; case USB_PORT_FEAT_C_SUSPEND: dev_dbg(imx21->dev, " C_SUSPEND\n"); status_write = USBH_PORTSTAT_PRTSTATSC; break; case USB_PORT_FEAT_C_CONNECTION: dev_dbg(imx21->dev, " C_CONNECTION\n"); status_write = USBH_PORTSTAT_CONNECTSC; break; case USB_PORT_FEAT_C_OVER_CURRENT: dev_dbg(imx21->dev, " C_OVER_CURRENT\n"); status_write = USBH_PORTSTAT_OVRCURIC; break; case USB_PORT_FEAT_C_RESET: dev_dbg(imx21->dev, " C_RESET\n"); status_write = USBH_PORTSTAT_PRTRSTSC; break; default: dev_dbg(imx21->dev, " unknown\n"); rc = -EINVAL; break; } break; case GetHubDescriptor: dev_dbg(imx21->dev, "GetHubDescriptor\n"); rc = get_hub_descriptor(hcd, (void *)buf); break; case GetHubStatus: dev_dbg(imx21->dev, " GetHubStatus\n"); *(__le32 *) buf = 0; break; case GetPortStatus: dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n", wIndex, USBH_PORTSTAT(wIndex - 1)); *(__le32 *) buf = readl(imx21->regs + USBH_PORTSTAT(wIndex - 1)); break; case SetHubFeature: dev_dbg(imx21->dev, "SetHubFeature\n"); switch (wValue) { case C_HUB_OVER_CURRENT: dev_dbg(imx21->dev, " OVER_CURRENT\n"); break; case C_HUB_LOCAL_POWER: dev_dbg(imx21->dev, " LOCAL_POWER\n"); break; default: dev_dbg(imx21->dev, " unknown\n"); rc = -EINVAL; break; } break; case SetPortFeature: dev_dbg(imx21->dev, "SetPortFeature\n"); switch (wValue) { case USB_PORT_FEAT_SUSPEND: dev_dbg(imx21->dev, " SUSPEND\n"); status_write = USBH_PORTSTAT_PRTSUSPST; break; case USB_PORT_FEAT_POWER: dev_dbg(imx21->dev, " POWER\n"); status_write = USBH_PORTSTAT_PRTPWRST; break; case USB_PORT_FEAT_RESET: dev_dbg(imx21->dev, " RESET\n"); status_write = USBH_PORTSTAT_PRTRSTST; break; default: dev_dbg(imx21->dev, " unknown\n"); rc = -EINVAL; break; } break; default: dev_dbg(imx21->dev, " unknown\n"); rc = -EINVAL; break; } if (status_write) writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1)); return rc; } /* =========================================== */ /* Host controller management */ /* =========================================== */ static int imx21_hc_reset(struct usb_hcd *hcd) { struct imx21 *imx21 = hcd_to_imx21(hcd); unsigned long timeout; unsigned long flags; spin_lock_irqsave(&imx21->lock, flags); /* Reset the Host controller modules */ writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH | USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC, imx21->regs + USBOTG_RST_CTRL); /* Wait for reset to finish */ timeout = jiffies + HZ; while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) { if (time_after(jiffies, timeout)) { spin_unlock_irqrestore(&imx21->lock, flags); dev_err(imx21->dev, "timeout waiting for reset\n"); return -ETIMEDOUT; } spin_unlock_irq(&imx21->lock); schedule_timeout_uninterruptible(1); spin_lock_irq(&imx21->lock); } spin_unlock_irqrestore(&imx21->lock, flags); return 0; } static int __devinit imx21_hc_start(struct usb_hcd *hcd) { struct imx21 *imx21 = hcd_to_imx21(hcd); unsigned long flags; int i, j; u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST; u32 usb_control = 0; hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) & USBOTG_HWMODE_HOSTXCVR_MASK); hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) & USBOTG_HWMODE_OTGXCVR_MASK); if (imx21->pdata->host1_txenoe) usb_control |= USBCTRL_HOST1_TXEN_OE; if (!imx21->pdata->host1_xcverless) usb_control |= USBCTRL_HOST1_BYP_TLL; if (imx21->pdata->otg_ext_xcvr) usb_control |= USBCTRL_OTC_RCV_RXDP; spin_lock_irqsave(&imx21->lock, flags); writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN), imx21->regs + USBOTG_CLK_CTRL); writel(hw_mode, imx21->regs + USBOTG_HWMODE); writel(usb_control, imx21->regs + USBCTRL); writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE, imx21->regs + USB_MISCCONTROL); /* Clear the ETDs */ for (i = 0; i < USB_NUM_ETD; i++) for (j = 0; j < 4; j++) etd_writel(imx21, i, j, 0); /* Take the HC out of reset */ writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1, imx21->regs + USBH_HOST_CTRL); /* Enable ports */ if (imx21->pdata->enable_otg_host) writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, imx21->regs + USBH_PORTSTAT(0)); if (imx21->pdata->enable_host1) writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, imx21->regs + USBH_PORTSTAT(1)); if (imx21->pdata->enable_host2) writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, imx21->regs + USBH_PORTSTAT(2)); hcd->state = HC_STATE_RUNNING; /* Enable host controller interrupts */ set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_HERRINT | USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT); set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); spin_unlock_irqrestore(&imx21->lock, flags); return 0; } static void imx21_hc_stop(struct usb_hcd *hcd) { struct imx21 *imx21 = hcd_to_imx21(hcd); unsigned long flags; spin_lock_irqsave(&imx21->lock, flags); writel(0, imx21->regs + USBH_SYSIEN); clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN, USBOTG_CLK_CTRL); spin_unlock_irqrestore(&imx21->lock, flags); } /* =========================================== */ /* Driver glue */ /* =========================================== */ static struct hc_driver imx21_hc_driver = { .description = hcd_name, .product_desc = "IMX21 USB Host Controller", .hcd_priv_size = sizeof(struct imx21), .flags = HCD_USB11, .irq = imx21_irq, .reset = imx21_hc_reset, .start = imx21_hc_start, .stop = imx21_hc_stop, /* I/O requests */ .urb_enqueue = imx21_hc_urb_enqueue, .urb_dequeue = imx21_hc_urb_dequeue, .endpoint_disable = imx21_hc_endpoint_disable, /* scheduling support */ .get_frame_number = imx21_hc_get_frame, /* Root hub support */ .hub_status_data = imx21_hc_hub_status_data, .hub_control = imx21_hc_hub_control, }; static struct mx21_usbh_platform_data default_pdata = { .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF, .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF, .enable_host1 = 1, .enable_host2 = 1, .enable_otg_host = 1, }; static int imx21_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct imx21 *imx21 = hcd_to_imx21(hcd); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); remove_debug_files(imx21); usb_remove_hcd(hcd); if (res != NULL) { clk_disable(imx21->clk); clk_put(imx21->clk); iounmap(imx21->regs); release_mem_region(res->start, resource_size(res)); } kfree(hcd); return 0; } static int imx21_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct imx21 *imx21; struct resource *res; int ret; int irq; printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; irq = platform_get_irq(pdev, 0); if (irq < 0) return -ENXIO; hcd = usb_create_hcd(&imx21_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (hcd == NULL) { dev_err(&pdev->dev, "Cannot create hcd (%s)\n", dev_name(&pdev->dev)); return -ENOMEM; } imx21 = hcd_to_imx21(hcd); imx21->hcd = hcd; imx21->dev = &pdev->dev; imx21->pdata = pdev->dev.platform_data; if (!imx21->pdata) imx21->pdata = &default_pdata; spin_lock_init(&imx21->lock); INIT_LIST_HEAD(&imx21->dmem_list); INIT_LIST_HEAD(&imx21->queue_for_etd); INIT_LIST_HEAD(&imx21->queue_for_dmem); create_debug_files(imx21); res = request_mem_region(res->start, resource_size(res), hcd_name); if (!res) { ret = -EBUSY; goto failed_request_mem; } imx21->regs = ioremap(res->start, resource_size(res)); if (imx21->regs == NULL) { dev_err(imx21->dev, "Cannot map registers\n"); ret = -ENOMEM; goto failed_ioremap; } /* Enable clocks source */ imx21->clk = clk_get(imx21->dev, NULL); if (IS_ERR(imx21->clk)) { dev_err(imx21->dev, "no clock found\n"); ret = PTR_ERR(imx21->clk); goto failed_clock_get; } ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000)); if (ret) goto failed_clock_set; ret = clk_enable(imx21->clk); if (ret) goto failed_clock_enable; dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n", (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF); ret = usb_add_hcd(hcd, irq, 0); if (ret != 0) { dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret); goto failed_add_hcd; } return 0; failed_add_hcd: clk_disable(imx21->clk); failed_clock_enable: failed_clock_set: clk_put(imx21->clk); failed_clock_get: iounmap(imx21->regs); failed_ioremap: release_mem_region(res->start, resource_size(res)); failed_request_mem: remove_debug_files(imx21); usb_put_hcd(hcd); return ret; } static struct platform_driver imx21_hcd_driver = { .driver = { .name = (char *)hcd_name, }, .probe = imx21_probe, .remove = imx21_remove, .suspend = NULL, .resume = NULL, }; module_platform_driver(imx21_hcd_driver); MODULE_DESCRIPTION("i.MX21 USB Host controller"); MODULE_AUTHOR("Martin Fuzzey"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:imx21-hcd");
gpl-2.0
MingoAllenII/draconis_msm8226
drivers/staging/comedi/drivers/adl_pci9118.c
5457
72820
/* * comedi/drivers/adl_pci9118.c * * hardware driver for ADLink cards: * card: PCI-9118DG, PCI-9118HG, PCI-9118HR * driver: pci9118dg, pci9118hg, pci9118hr * * Author: Michal Dobes <dobes@tesnet.cz> * */ /* Driver: adl_pci9118 Description: Adlink PCI-9118DG, PCI-9118HG, PCI-9118HR Author: Michal Dobes <dobes@tesnet.cz> Devices: [ADLink] PCI-9118DG (pci9118dg), PCI-9118HG (pci9118hg), PCI-9118HR (pci9118hr) Status: works This driver supports AI, AO, DI and DO subdevices. AI subdevice supports cmd and insn interface, other subdevices support only insn interface. For AI: - If cmd->scan_begin_src=TRIG_EXT then trigger input is TGIN (pin 46). - If cmd->convert_src=TRIG_EXT then trigger input is EXTTRG (pin 44). - If cmd->start_src/stop_src=TRIG_EXT then trigger input is TGIN (pin 46). - It is not necessary to have cmd.scan_end_arg=cmd.chanlist_len but cmd.scan_end_arg modulo cmd.chanlist_len must by 0. - If return value of cmdtest is 5 then you've bad channel list (it isn't possible mixture S.E. and DIFF inputs or bipolar and unipolar ranges). There are some hardware limitations: a) You cann't use mixture of unipolar/bipoar ranges or differencial/single ended inputs. b) DMA transfers must have the length aligned to two samples (32 bit), so there is some problems if cmd->chanlist_len is odd. This driver tries bypass this with adding one sample to the end of the every scan and discard it on output but this cann't be used if cmd->scan_begin_src=TRIG_FOLLOW and is used flag TRIG_WAKE_EOS, then driver switch to interrupt driven mode with interrupt after every sample. c) If isn't used DMA then you can use only mode where cmd->scan_begin_src=TRIG_FOLLOW. Configuration options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, then first available PCI card will be used. [2] - 0= standard 8 DIFF/16 SE channels configuration n = external multiplexer connected, 1 <= n <= 256 [3] - 0=autoselect DMA or EOC interrupts operation 1 = disable DMA mode 3 = disable DMA and INT, only insn interface will work [4] - sample&hold signal - card can generate signal for external S&H board 0 = use SSHO(pin 45) signal is generated in onboard hardware S&H logic 0 != use ADCHN7(pin 23) signal is generated from driver, number say how long delay is requested in ns and sign polarity of the hold (in this case external multiplexor can serve only 128 channels) [5] - 0=stop measure on all hardware errors 2 | = ignore ADOR - A/D Overrun status 8|=ignore Bover - A/D Burst Mode Overrun status 256|=ignore nFull - A/D FIFO Full status */ #include "../comedidev.h" #include <linux/delay.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/io.h> #include "amcc_s5933.h" #include "8253.h" #include "comedi_pci.h" #include "comedi_fc.h" #define PCI_VENDOR_ID_AMCC 0x10e8 /* paranoid checks are broken */ #undef PCI9118_PARANOIDCHECK /* * if defined, then is used code which control * correct channel number on every 12 bit sample */ #undef PCI9118_EXTDEBUG /* * if defined then driver prints * a lot of messages */ #undef DPRINTK #ifdef PCI9118_EXTDEBUG #define DPRINTK(fmt, args...) printk(fmt, ## args) #else #define DPRINTK(fmt, args...) #endif #define IORANGE_9118 64 /* I hope */ #define PCI9118_CHANLEN 255 /* * len of chanlist, some source say 256, * but reality looks like 255 :-( */ #define PCI9118_CNT0 0x00 /* R/W: 8254 counter 0 */ #define PCI9118_CNT1 0x04 /* R/W: 8254 counter 0 */ #define PCI9118_CNT2 0x08 /* R/W: 8254 counter 0 */ #define PCI9118_CNTCTRL 0x0c /* W: 8254 counter control */ #define PCI9118_AD_DATA 0x10 /* R: A/D data */ #define PCI9118_DA1 0x10 /* W: D/A registers */ #define PCI9118_DA2 0x14 #define PCI9118_ADSTAT 0x18 /* R: A/D status register */ #define PCI9118_ADCNTRL 0x18 /* W: A/D control register */ #define PCI9118_DI 0x1c /* R: digi input register */ #define PCI9118_DO 0x1c /* W: digi output register */ #define PCI9118_SOFTTRG 0x20 /* W: soft trigger for A/D */ #define PCI9118_GAIN 0x24 /* W: A/D gain/channel register */ #define PCI9118_BURST 0x28 /* W: A/D burst number register */ #define PCI9118_SCANMOD 0x2c /* W: A/D auto scan mode */ #define PCI9118_ADFUNC 0x30 /* W: A/D function register */ #define PCI9118_DELFIFO 0x34 /* W: A/D data FIFO reset */ #define PCI9118_INTSRC 0x38 /* R: interrupt reason register */ #define PCI9118_INTCTRL 0x38 /* W: interrupt control register */ /* bits from A/D control register (PCI9118_ADCNTRL) */ #define AdControl_UniP 0x80 /* 1=bipolar, 0=unipolar */ #define AdControl_Diff 0x40 /* 1=differential, 0= single end inputs */ #define AdControl_SoftG 0x20 /* 1=8254 counter works, 0=counter stops */ #define AdControl_ExtG 0x10 /* * 1=8254 countrol controlled by TGIN(pin 46), * 0=controlled by SoftG */ #define AdControl_ExtM 0x08 /* * 1=external hardware trigger (pin 44), * 0=internal trigger */ #define AdControl_TmrTr 0x04 /* * 1=8254 is iternal trigger source, * 0=software trigger is source * (register PCI9118_SOFTTRG) */ #define AdControl_Int 0x02 /* 1=enable INT, 0=disable */ #define AdControl_Dma 0x01 /* 1=enable DMA, 0=disable */ /* bits from A/D function register (PCI9118_ADFUNC) */ #define AdFunction_PDTrg 0x80 /* * 1=positive, * 0=negative digital trigger * (only positive is correct) */ #define AdFunction_PETrg 0x40 /* * 1=positive, * 0=negative external trigger * (only positive is correct) */ #define AdFunction_BSSH 0x20 /* 1=with sample&hold, 0=without */ #define AdFunction_BM 0x10 /* 1=burst mode, 0=normal mode */ #define AdFunction_BS 0x08 /* * 1=burst mode start, * 0=burst mode stop */ #define AdFunction_PM 0x04 /* * 1=post trigger mode, * 0=not post trigger */ #define AdFunction_AM 0x02 /* * 1=about trigger mode, * 0=not about trigger */ #define AdFunction_Start 0x01 /* 1=trigger start, 0=trigger stop */ /* bits from A/D status register (PCI9118_ADSTAT) */ #define AdStatus_nFull 0x100 /* 0=FIFO full (fatal), 1=not full */ #define AdStatus_nHfull 0x080 /* 0=FIFO half full, 1=FIFO not half full */ #define AdStatus_nEpty 0x040 /* 0=FIFO empty, 1=FIFO not empty */ #define AdStatus_Acmp 0x020 /* */ #define AdStatus_DTH 0x010 /* 1=external digital trigger */ #define AdStatus_Bover 0x008 /* 1=burst mode overrun (fatal) */ #define AdStatus_ADOS 0x004 /* 1=A/D over speed (warning) */ #define AdStatus_ADOR 0x002 /* 1=A/D overrun (fatal) */ #define AdStatus_ADrdy 0x001 /* 1=A/D already ready, 0=not ready */ /* bits for interrupt reason and control (PCI9118_INTSRC, PCI9118_INTCTRL) */ /* 1=interrupt occur, enable source, 0=interrupt not occur, disable source */ #define Int_Timer 0x08 /* timer interrupt */ #define Int_About 0x04 /* about trigger complete */ #define Int_Hfull 0x02 /* A/D FIFO hlaf full */ #define Int_DTrg 0x01 /* external digital trigger */ #define START_AI_EXT 0x01 /* start measure on external trigger */ #define STOP_AI_EXT 0x02 /* stop measure on external trigger */ #define START_AI_INT 0x04 /* start measure on internal trigger */ #define STOP_AI_INT 0x08 /* stop measure on internal trigger */ #define EXTTRG_AI 0 /* ext trg is used by AI */ static const struct comedi_lrange range_pci9118dg_hr = { 8, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; static const struct comedi_lrange range_pci9118hg = { 8, { BIP_RANGE(5), BIP_RANGE(0.5), BIP_RANGE(0.05), BIP_RANGE(0.005), UNI_RANGE(10), UNI_RANGE(1), UNI_RANGE(0.1), UNI_RANGE(0.01) } }; #define PCI9118_BIPOLAR_RANGES 4 /* * used for test on mixture * of BIP/UNI ranges */ static int pci9118_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pci9118_detach(struct comedi_device *dev); struct boardtype { const char *name; /* board name */ int vendor_id; /* PCI vendor a device ID of card */ int device_id; int iorange_amcc; /* iorange for own S5933 region */ int iorange_9118; /* pass thru card region size */ int n_aichan; /* num of A/D chans */ int n_aichand; /* num of A/D chans in diff mode */ int mux_aichan; /* * num of A/D chans with * external multiplexor */ int n_aichanlist; /* len of chanlist */ int n_aochan; /* num of D/A chans */ int ai_maxdata; /* resolution of A/D */ int ao_maxdata; /* resolution of D/A */ const struct comedi_lrange *rangelist_ai; /* rangelist for A/D */ const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */ unsigned int ai_ns_min; /* max sample speed of card v ns */ unsigned int ai_pacer_min; /* * minimal pacer value * (c1*c2 or c1 in burst) */ int half_fifo_size; /* size of FIFO/2 */ }; static DEFINE_PCI_DEVICE_TABLE(pci9118_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80d9) }, { 0 } }; MODULE_DEVICE_TABLE(pci, pci9118_pci_table); static const struct boardtype boardtypes[] = { {"pci9118dg", PCI_VENDOR_ID_AMCC, 0x80d9, AMCC_OP_REG_SIZE, IORANGE_9118, 16, 8, 256, PCI9118_CHANLEN, 2, 0x0fff, 0x0fff, &range_pci9118dg_hr, &range_bipolar10, 3000, 12, 512}, {"pci9118hg", PCI_VENDOR_ID_AMCC, 0x80d9, AMCC_OP_REG_SIZE, IORANGE_9118, 16, 8, 256, PCI9118_CHANLEN, 2, 0x0fff, 0x0fff, &range_pci9118hg, &range_bipolar10, 3000, 12, 512}, {"pci9118hr", PCI_VENDOR_ID_AMCC, 0x80d9, AMCC_OP_REG_SIZE, IORANGE_9118, 16, 8, 256, PCI9118_CHANLEN, 2, 0xffff, 0x0fff, &range_pci9118dg_hr, &range_bipolar10, 10000, 40, 512}, }; #define n_boardtypes (sizeof(boardtypes)/sizeof(struct boardtype)) static struct comedi_driver driver_pci9118 = { .driver_name = "adl_pci9118", .module = THIS_MODULE, .attach = pci9118_attach, .detach = pci9118_detach, .num_names = n_boardtypes, .board_name = &boardtypes[0].name, .offset = sizeof(struct boardtype), }; static int __devinit driver_pci9118_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_pci9118.driver_name); } static void __devexit driver_pci9118_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_pci9118_pci_driver = { .id_table = pci9118_pci_table, .probe = &driver_pci9118_pci_probe, .remove = __devexit_p(&driver_pci9118_pci_remove) }; static int __init driver_pci9118_init_module(void) { int retval; retval = comedi_driver_register(&driver_pci9118); if (retval < 0) return retval; driver_pci9118_pci_driver.name = (char *)driver_pci9118.driver_name; return pci_register_driver(&driver_pci9118_pci_driver); } static void __exit driver_pci9118_cleanup_module(void) { pci_unregister_driver(&driver_pci9118_pci_driver); comedi_driver_unregister(&driver_pci9118); } module_init(driver_pci9118_init_module); module_exit(driver_pci9118_cleanup_module); struct pci9118_private { unsigned long iobase_a; /* base+size for AMCC chip */ unsigned int master; /* master capable */ struct pci_dev *pcidev; /* ptr to actual pcidev */ unsigned int usemux; /* we want to use external multiplexor! */ #ifdef PCI9118_PARANOIDCHECK unsigned short chanlist[PCI9118_CHANLEN + 1]; /* * list of * scanned channel */ unsigned char chanlistlen; /* number of scanlist */ #endif unsigned char AdControlReg; /* A/D control register */ unsigned char IntControlReg; /* Interrupt control register */ unsigned char AdFunctionReg; /* A/D function register */ char valid; /* driver is ok */ char ai_neverending; /* we do unlimited AI */ unsigned int i8254_osc_base; /* frequence of onboard oscilator */ unsigned int ai_do; /* what do AI? 0=nothing, 1 to 4 mode */ unsigned int ai_act_scan; /* how many scans we finished */ unsigned int ai_buf_ptr; /* data buffer ptr in samples */ unsigned int ai_n_chan; /* how many channels is measured */ unsigned int ai_n_scanlen; /* len of actual scanlist */ unsigned int ai_n_realscanlen; /* * what we must transfer for one * outgoing scan include front/back adds */ unsigned int ai_act_dmapos; /* position in actual real stream */ unsigned int ai_add_front; /* * how many channels we must add * before scan to satisfy S&H? */ unsigned int ai_add_back; /* * how many channels we must add * before scan to satisfy DMA? */ unsigned int *ai_chanlist; /* actual chanlist */ unsigned int ai_timer1; unsigned int ai_timer2; unsigned int ai_flags; char ai12_startstop; /* * measure can start/stop * on external trigger */ unsigned int ai_divisor1, ai_divisor2; /* * divisors for start of measure * on external start */ unsigned int ai_data_len; short *ai_data; short ao_data[2]; /* data output buffer */ unsigned int ai_scans; /* number of scans to do */ char dma_doublebuf; /* we can use double buffring */ unsigned int dma_actbuf; /* which buffer is used now */ short *dmabuf_virt[2]; /* * pointers to begin of * DMA buffer */ unsigned long dmabuf_hw[2]; /* hw address of DMA buff */ unsigned int dmabuf_size[2]; /* * size of dma buffer in bytes */ unsigned int dmabuf_use_size[2]; /* * which size we may now use * for transfer */ unsigned int dmabuf_used_size[2]; /* which size was truly used */ unsigned int dmabuf_panic_size[2]; unsigned int dmabuf_samples[2]; /* size in samples */ int dmabuf_pages[2]; /* number of pages in buffer */ unsigned char cnt0_users; /* * bit field of 8254 CNT0 users * (0-unused, 1-AO, 2-DI, 3-DO) */ unsigned char exttrg_users; /* * bit field of external trigger * users(0-AI, 1-AO, 2-DI, 3-DO) */ unsigned int cnt0_divisor; /* actual CNT0 divisor */ void (*int_ai_func) (struct comedi_device *, struct comedi_subdevice *, unsigned short, unsigned int, unsigned short); /* * ptr to actual interrupt * AI function */ unsigned char ai16bits; /* =1 16 bit card */ unsigned char usedma; /* =1 use DMA transfer and not INT */ unsigned char useeoshandle; /* * =1 change WAKE_EOS DMA transfer * to fit on every second */ unsigned char usessh; /* =1 turn on S&H support */ int softsshdelay; /* * >0 use software S&H, * numer is requested delay in ns */ unsigned char softsshsample; /* * polarity of S&H signal * in sample state */ unsigned char softsshhold; /* * polarity of S&H signal * in hold state */ unsigned int ai_maskerr; /* which warning was printed */ unsigned int ai_maskharderr; /* on which error bits stops */ unsigned int ai_inttrig_start; /* TRIG_INT for start */ }; #define devpriv ((struct pci9118_private *)dev->private) #define this_board ((struct boardtype *)dev->board_ptr) /* ============================================================================== */ static int check_channel_list(struct comedi_device *dev, struct comedi_subdevice *s, int n_chan, unsigned int *chanlist, int frontadd, int backadd); static int setup_channel_list(struct comedi_device *dev, struct comedi_subdevice *s, int n_chan, unsigned int *chanlist, int rot, int frontadd, int backadd, int usedma, char eoshandle); static void start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1, unsigned int divisor2); static int pci9118_reset(struct comedi_device *dev); static int pci9118_exttrg_add(struct comedi_device *dev, unsigned char source); static int pci9118_exttrg_del(struct comedi_device *dev, unsigned char source); static int pci9118_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void pci9118_calc_divisors(char mode, struct comedi_device *dev, struct comedi_subdevice *s, unsigned int *tim1, unsigned int *tim2, unsigned int flags, int chans, unsigned int *div1, unsigned int *div2, char usessh, unsigned int chnsshfront); /* ============================================================================== */ static int pci9118_insn_read_ai(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, timeout; devpriv->AdControlReg = AdControl_Int & 0xff; devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg; outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); /* * positive triggers, no S&H, * no burst, burst stop, * no post trigger, * no about trigger, * trigger stop */ if (!setup_channel_list(dev, s, 1, &insn->chanspec, 0, 0, 0, 0, 0)) return -EINVAL; outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */ for (n = 0; n < insn->n; n++) { outw(0, dev->iobase + PCI9118_SOFTTRG); /* start conversion */ udelay(2); timeout = 100; while (timeout--) { if (inl(dev->iobase + PCI9118_ADSTAT) & AdStatus_ADrdy) goto conv_finish; udelay(1); } comedi_error(dev, "A/D insn timeout"); data[n] = 0; outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */ return -ETIME; conv_finish: if (devpriv->ai16bits) { data[n] = (inl(dev->iobase + PCI9118_AD_DATA) & 0xffff) ^ 0x8000; } else { data[n] = (inw(dev->iobase + PCI9118_AD_DATA) >> 4) & 0xfff; } } outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */ return n; } /* ============================================================================== */ static int pci9118_insn_write_ao(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, chanreg, ch; ch = CR_CHAN(insn->chanspec); if (ch) chanreg = PCI9118_DA2; else chanreg = PCI9118_DA1; for (n = 0; n < insn->n; n++) { outl(data[n], dev->iobase + chanreg); devpriv->ao_data[ch] = data[n]; } return n; } /* ============================================================================== */ static int pci9118_insn_read_ao(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, chan; chan = CR_CHAN(insn->chanspec); for (n = 0; n < insn->n; n++) data[n] = devpriv->ao_data[chan]; return n; } /* ============================================================================== */ static int pci9118_insn_bits_di(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = inl(dev->iobase + PCI9118_DI) & 0xf; return 2; } /* ============================================================================== */ static int pci9118_insn_bits_do(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); outl(s->state & 0x0f, dev->iobase + PCI9118_DO); } data[1] = s->state; return 2; } /* ============================================================================== */ static void interrupt_pci9118_ai_mode4_switch(struct comedi_device *dev) { devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg | AdFunction_AM; outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); outl(0x30, dev->iobase + PCI9118_CNTCTRL); outl((devpriv->dmabuf_hw[1 - devpriv->dma_actbuf] >> 1) & 0xff, dev->iobase + PCI9118_CNT0); outl((devpriv->dmabuf_hw[1 - devpriv->dma_actbuf] >> 9) & 0xff, dev->iobase + PCI9118_CNT0); devpriv->AdFunctionReg |= AdFunction_Start; outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); } static unsigned int defragment_dma_buffer(struct comedi_device *dev, struct comedi_subdevice *s, short *dma_buffer, unsigned int num_samples) { unsigned int i = 0, j = 0; unsigned int start_pos = devpriv->ai_add_front, stop_pos = devpriv->ai_add_front + devpriv->ai_n_chan; unsigned int raw_scanlen = devpriv->ai_add_front + devpriv->ai_n_chan + devpriv->ai_add_back; for (i = 0; i < num_samples; i++) { if (devpriv->ai_act_dmapos >= start_pos && devpriv->ai_act_dmapos < stop_pos) { dma_buffer[j++] = dma_buffer[i]; } devpriv->ai_act_dmapos++; devpriv->ai_act_dmapos %= raw_scanlen; } return j; } /* ============================================================================== */ static int move_block_from_dma(struct comedi_device *dev, struct comedi_subdevice *s, short *dma_buffer, unsigned int num_samples) { unsigned int num_bytes; num_samples = defragment_dma_buffer(dev, s, dma_buffer, num_samples); devpriv->ai_act_scan += (s->async->cur_chan + num_samples) / devpriv->ai_n_scanlen; s->async->cur_chan += num_samples; s->async->cur_chan %= devpriv->ai_n_scanlen; num_bytes = cfc_write_array_to_buffer(s, dma_buffer, num_samples * sizeof(short)); if (num_bytes < num_samples * sizeof(short)) return -1; return 0; } /* ============================================================================== */ static char pci9118_decode_error_status(struct comedi_device *dev, struct comedi_subdevice *s, unsigned char m) { if (m & 0x100) { comedi_error(dev, "A/D FIFO Full status (Fatal Error!)"); devpriv->ai_maskerr &= ~0x100L; } if (m & 0x008) { comedi_error(dev, "A/D Burst Mode Overrun Status (Fatal Error!)"); devpriv->ai_maskerr &= ~0x008L; } if (m & 0x004) { comedi_error(dev, "A/D Over Speed Status (Warning!)"); devpriv->ai_maskerr &= ~0x004L; } if (m & 0x002) { comedi_error(dev, "A/D Overrun Status (Fatal Error!)"); devpriv->ai_maskerr &= ~0x002L; } if (m & devpriv->ai_maskharderr) { s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; pci9118_ai_cancel(dev, s); comedi_event(dev, s); return 1; } return 0; } static void pci9118_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s, void *data, unsigned int num_bytes, unsigned int start_chan_index) { unsigned int i, num_samples = num_bytes / sizeof(short); short *array = data; for (i = 0; i < num_samples; i++) { if (devpriv->usedma) array[i] = be16_to_cpu(array[i]); if (devpriv->ai16bits) array[i] ^= 0x8000; else array[i] = (array[i] >> 4) & 0x0fff; } } /* ============================================================================== */ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev, struct comedi_subdevice *s, unsigned short int_adstat, unsigned int int_amcc, unsigned short int_daq) { register short sampl; s->async->events = 0; if (int_adstat & devpriv->ai_maskerr) if (pci9118_decode_error_status(dev, s, int_adstat)) return; sampl = inw(dev->iobase + PCI9118_AD_DATA); #ifdef PCI9118_PARANOIDCHECK if (devpriv->ai16bits == 0) { if ((sampl & 0x000f) != devpriv->chanlist[s->async->cur_chan]) { /* data dropout! */ printk ("comedi: A/D SAMPL - data dropout: " "received channel %d, expected %d!\n", sampl & 0x000f, devpriv->chanlist[s->async->cur_chan]); s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; pci9118_ai_cancel(dev, s); comedi_event(dev, s); return; } } #endif cfc_write_to_buffer(s, sampl); s->async->cur_chan++; if (s->async->cur_chan >= devpriv->ai_n_scanlen) { /* one scan done */ s->async->cur_chan %= devpriv->ai_n_scanlen; devpriv->ai_act_scan++; if (!(devpriv->ai_neverending)) if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data sampled */ pci9118_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA; } } if (s->async->events) comedi_event(dev, s); } /* ============================================================================== */ static void interrupt_pci9118_ai_dma(struct comedi_device *dev, struct comedi_subdevice *s, unsigned short int_adstat, unsigned int int_amcc, unsigned short int_daq) { unsigned int next_dma_buf, samplesinbuf, sampls, m; if (int_amcc & MASTER_ABORT_INT) { comedi_error(dev, "AMCC IRQ - MASTER DMA ABORT!"); s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; pci9118_ai_cancel(dev, s); comedi_event(dev, s); return; } if (int_amcc & TARGET_ABORT_INT) { comedi_error(dev, "AMCC IRQ - TARGET DMA ABORT!"); s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; pci9118_ai_cancel(dev, s); comedi_event(dev, s); return; } if (int_adstat & devpriv->ai_maskerr) /* if (int_adstat & 0x106) */ if (pci9118_decode_error_status(dev, s, int_adstat)) return; samplesinbuf = devpriv->dmabuf_use_size[devpriv->dma_actbuf] >> 1; /* number of received real samples */ /* DPRINTK("dma_actbuf=%d\n",devpriv->dma_actbuf); */ if (devpriv->dma_doublebuf) { /* * switch DMA buffers if is used * double buffering */ next_dma_buf = 1 - devpriv->dma_actbuf; outl(devpriv->dmabuf_hw[next_dma_buf], devpriv->iobase_a + AMCC_OP_REG_MWAR); outl(devpriv->dmabuf_use_size[next_dma_buf], devpriv->iobase_a + AMCC_OP_REG_MWTC); devpriv->dmabuf_used_size[next_dma_buf] = devpriv->dmabuf_use_size[next_dma_buf]; if (devpriv->ai_do == 4) interrupt_pci9118_ai_mode4_switch(dev); } if (samplesinbuf) { m = devpriv->ai_data_len >> 1; /* * how many samples is to * end of buffer */ /* * DPRINTK("samps=%d m=%d %d %d\n", * samplesinbuf,m,s->async->buf_int_count,s->async->buf_int_ptr); */ sampls = m; move_block_from_dma(dev, s, devpriv->dmabuf_virt[devpriv->dma_actbuf], samplesinbuf); m = m - sampls; /* m= how many samples was transferred */ } /* DPRINTK("YYY\n"); */ if (!devpriv->ai_neverending) if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data sampled */ pci9118_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA; } if (devpriv->dma_doublebuf) { /* switch dma buffers */ devpriv->dma_actbuf = 1 - devpriv->dma_actbuf; } else { /* restart DMA if is not used double buffering */ outl(devpriv->dmabuf_hw[0], devpriv->iobase_a + AMCC_OP_REG_MWAR); outl(devpriv->dmabuf_use_size[0], devpriv->iobase_a + AMCC_OP_REG_MWTC); if (devpriv->ai_do == 4) interrupt_pci9118_ai_mode4_switch(dev); } comedi_event(dev, s); } /* ============================================================================== */ static irqreturn_t interrupt_pci9118(int irq, void *d) { struct comedi_device *dev = d; unsigned int int_daq = 0, int_amcc, int_adstat; if (!dev->attached) return IRQ_NONE; /* not fully initialized */ int_daq = inl(dev->iobase + PCI9118_INTSRC) & 0xf; /* get IRQ reasons from card */ int_amcc = inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* get INT register from AMCC chip */ /* * DPRINTK("INT daq=0x%01x amcc=0x%08x MWAR=0x%08x * MWTC=0x%08x ADSTAT=0x%02x ai_do=%d\n", * int_daq, int_amcc, inl(devpriv->iobase_a+AMCC_OP_REG_MWAR), * inl(devpriv->iobase_a+AMCC_OP_REG_MWTC), * inw(dev->iobase+PCI9118_ADSTAT)&0x1ff,devpriv->ai_do); */ if ((!int_daq) && (!(int_amcc & ANY_S593X_INT))) return IRQ_NONE; /* interrupt from other source */ outl(int_amcc | 0x00ff0000, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* shutdown IRQ reasons in AMCC */ int_adstat = inw(dev->iobase + PCI9118_ADSTAT) & 0x1ff; /* get STATUS register */ if (devpriv->ai_do) { if (devpriv->ai12_startstop) if ((int_adstat & AdStatus_DTH) && (int_daq & Int_DTrg)) { /* start stop of measure */ if (devpriv->ai12_startstop & START_AI_EXT) { devpriv->ai12_startstop &= ~START_AI_EXT; if (!(devpriv->ai12_startstop & STOP_AI_EXT)) pci9118_exttrg_del (dev, EXTTRG_AI); /* deactivate EXT trigger */ start_pacer(dev, devpriv->ai_do, devpriv->ai_divisor1, devpriv->ai_divisor2); /* start pacer */ outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); } else { if (devpriv->ai12_startstop & STOP_AI_EXT) { devpriv->ai12_startstop &= ~STOP_AI_EXT; pci9118_exttrg_del (dev, EXTTRG_AI); /* deactivate EXT trigger */ devpriv->ai_neverending = 0; /* * well, on next interrupt from * DMA/EOC measure will stop */ } } } (devpriv->int_ai_func) (dev, dev->subdevices + 0, int_adstat, int_amcc, int_daq); } return IRQ_HANDLED; } /* ============================================================================== */ static int pci9118_ai_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { if (trignum != devpriv->ai_inttrig_start) return -EINVAL; devpriv->ai12_startstop &= ~START_AI_INT; s->async->inttrig = NULL; outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL); outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); if (devpriv->ai_do != 3) { start_pacer(dev, devpriv->ai_do, devpriv->ai_divisor1, devpriv->ai_divisor2); devpriv->AdControlReg |= AdControl_SoftG; } outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); return 1; } /* ============================================================================== */ static int pci9118_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; unsigned int divisor1 = 0, divisor2 = 0; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT | TRIG_INT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; if (devpriv->master) cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW; else cmd->scan_begin_src &= TRIG_FOLLOW; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; if (devpriv->master) cmd->convert_src &= TRIG_TIMER | TRIG_EXT | TRIG_NOW; else cmd->convert_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE | TRIG_EXT; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* * step 2: * make sure trigger sources are * unique and mutually compatible */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_INT && cmd->start_src != TRIG_EXT) { cmd->start_src = TRIG_NOW; err++; } if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT && cmd->scan_begin_src != TRIG_INT && cmd->scan_begin_src != TRIG_FOLLOW) { cmd->scan_begin_src = TRIG_FOLLOW; err++; } if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_NOW) { cmd->convert_src = TRIG_TIMER; err++; } if (cmd->scan_end_src != TRIG_COUNT) { cmd->scan_end_src = TRIG_COUNT; err++; } if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_INT && cmd->stop_src != TRIG_EXT) { cmd->stop_src = TRIG_COUNT; err++; } if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_EXT) { cmd->start_src = TRIG_NOW; err++; } if (cmd->start_src == TRIG_INT && cmd->scan_begin_src == TRIG_INT) { cmd->start_src = TRIG_NOW; err++; } if ((cmd->scan_begin_src & (TRIG_TIMER | TRIG_EXT)) && (!(cmd->convert_src & (TRIG_TIMER | TRIG_NOW)))) { cmd->convert_src = TRIG_TIMER; err++; } if ((cmd->scan_begin_src == TRIG_FOLLOW) && (!(cmd->convert_src & (TRIG_TIMER | TRIG_EXT)))) { cmd->convert_src = TRIG_TIMER; err++; } if (cmd->stop_src == TRIG_EXT && cmd->scan_begin_src == TRIG_EXT) { cmd->stop_src = TRIG_COUNT; err++; } if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_src & (TRIG_NOW | TRIG_EXT)) if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src & (TRIG_FOLLOW | TRIG_EXT)) if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } if ((cmd->scan_begin_src == TRIG_TIMER) && (cmd->convert_src == TRIG_TIMER) && (cmd->scan_end_arg == 1)) { cmd->scan_begin_src = TRIG_FOLLOW; cmd->convert_arg = cmd->scan_begin_arg; cmd->scan_begin_arg = 0; } if (cmd->scan_begin_src == TRIG_TIMER) if (cmd->scan_begin_arg < this_board->ai_ns_min) { cmd->scan_begin_arg = this_board->ai_ns_min; err++; } if (cmd->scan_begin_src == TRIG_EXT) if (cmd->scan_begin_arg) { cmd->scan_begin_arg = 0; err++; if (cmd->scan_end_arg > 65535) { cmd->scan_end_arg = 65535; err++; } } if (cmd->convert_src & (TRIG_TIMER | TRIG_NOW)) if (cmd->convert_arg < this_board->ai_ns_min) { cmd->convert_arg = this_board->ai_ns_min; err++; } if (cmd->convert_src == TRIG_EXT) if (cmd->convert_arg) { cmd->convert_arg = 0; err++; } if (cmd->stop_src == TRIG_COUNT) { if (!cmd->stop_arg) { cmd->stop_arg = 1; err++; } } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (!cmd->chanlist_len) { cmd->chanlist_len = 1; err++; } if (cmd->chanlist_len > this_board->n_aichanlist) { cmd->chanlist_len = this_board->n_aichanlist; err++; } if (cmd->scan_end_arg < cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if ((cmd->scan_end_arg % cmd->chanlist_len)) { cmd->scan_end_arg = cmd->chanlist_len * (cmd->scan_end_arg / cmd->chanlist_len); err++; } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; /* printk("S1 timer1=%u timer2=%u\n",cmd->scan_begin_arg,cmd->convert_arg); */ i8253_cascade_ns_to_timer(devpriv->i8254_osc_base, &divisor1, &divisor2, &cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); /* printk("S2 timer1=%u timer2=%u\n",cmd->scan_begin_arg,cmd->convert_arg); */ if (cmd->scan_begin_arg < this_board->ai_ns_min) cmd->scan_begin_arg = this_board->ai_ns_min; if (tmp != cmd->scan_begin_arg) err++; } if (cmd->convert_src & (TRIG_TIMER | TRIG_NOW)) { tmp = cmd->convert_arg; i8253_cascade_ns_to_timer(devpriv->i8254_osc_base, &divisor1, &divisor2, &cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); /* printk("s1 timer1=%u timer2=%u\n",cmd->scan_begin_arg,cmd->convert_arg); */ if (cmd->convert_arg < this_board->ai_ns_min) cmd->convert_arg = this_board->ai_ns_min; if (tmp != cmd->convert_arg) err++; if (cmd->scan_begin_src == TRIG_TIMER && cmd->convert_src == TRIG_NOW) { if (cmd->convert_arg == 0) { if (cmd->scan_begin_arg < this_board->ai_ns_min * (cmd->scan_end_arg + 2)) { cmd->scan_begin_arg = this_board->ai_ns_min * (cmd->scan_end_arg + 2); /* printk("s2 timer1=%u timer2=%u\n",cmd->scan_begin_arg,cmd->convert_arg); */ err++; } } else { if (cmd->scan_begin_arg < cmd->convert_arg * cmd->chanlist_len) { cmd->scan_begin_arg = cmd->convert_arg * cmd->chanlist_len; /* printk("s3 timer1=%u timer2=%u\n",cmd->scan_begin_arg,cmd->convert_arg); */ err++; } } } } if (err) return 4; if (cmd->chanlist) if (!check_channel_list(dev, s, cmd->chanlist_len, cmd->chanlist, 0, 0)) return 5; /* incorrect channels list */ return 0; } /* ============================================================================== */ static int Compute_and_setup_dma(struct comedi_device *dev) { unsigned int dmalen0, dmalen1, i; DPRINTK("adl_pci9118 EDBG: BGN: Compute_and_setup_dma()\n"); dmalen0 = devpriv->dmabuf_size[0]; dmalen1 = devpriv->dmabuf_size[1]; DPRINTK("1 dmalen0=%d dmalen1=%d ai_data_len=%d\n", dmalen0, dmalen1, devpriv->ai_data_len); /* isn't output buff smaller that our DMA buff? */ if (dmalen0 > (devpriv->ai_data_len)) { dmalen0 = devpriv->ai_data_len & ~3L; /* * align to 32bit down */ } if (dmalen1 > (devpriv->ai_data_len)) { dmalen1 = devpriv->ai_data_len & ~3L; /* * align to 32bit down */ } DPRINTK("2 dmalen0=%d dmalen1=%d\n", dmalen0, dmalen1); /* we want wake up every scan? */ if (devpriv->ai_flags & TRIG_WAKE_EOS) { if (dmalen0 < (devpriv->ai_n_realscanlen << 1)) { /* uff, too short DMA buffer, disable EOS support! */ devpriv->ai_flags &= (~TRIG_WAKE_EOS); printk ("comedi%d: WAR: DMA0 buf too short, can't " "support TRIG_WAKE_EOS (%d<%d)\n", dev->minor, dmalen0, devpriv->ai_n_realscanlen << 1); } else { /* short first DMA buffer to one scan */ dmalen0 = devpriv->ai_n_realscanlen << 1; DPRINTK ("21 dmalen0=%d ai_n_realscanlen=%d " "useeoshandle=%d\n", dmalen0, devpriv->ai_n_realscanlen, devpriv->useeoshandle); if (devpriv->useeoshandle) dmalen0 += 2; if (dmalen0 < 4) { printk ("comedi%d: ERR: DMA0 buf len bug? " "(%d<4)\n", dev->minor, dmalen0); dmalen0 = 4; } } } if (devpriv->ai_flags & TRIG_WAKE_EOS) { if (dmalen1 < (devpriv->ai_n_realscanlen << 1)) { /* uff, too short DMA buffer, disable EOS support! */ devpriv->ai_flags &= (~TRIG_WAKE_EOS); printk ("comedi%d: WAR: DMA1 buf too short, " "can't support TRIG_WAKE_EOS (%d<%d)\n", dev->minor, dmalen1, devpriv->ai_n_realscanlen << 1); } else { /* short second DMA buffer to one scan */ dmalen1 = devpriv->ai_n_realscanlen << 1; DPRINTK ("22 dmalen1=%d ai_n_realscanlen=%d " "useeoshandle=%d\n", dmalen1, devpriv->ai_n_realscanlen, devpriv->useeoshandle); if (devpriv->useeoshandle) dmalen1 -= 2; if (dmalen1 < 4) { printk ("comedi%d: ERR: DMA1 buf len bug? " "(%d<4)\n", dev->minor, dmalen1); dmalen1 = 4; } } } DPRINTK("3 dmalen0=%d dmalen1=%d\n", dmalen0, dmalen1); /* transfer without TRIG_WAKE_EOS */ if (!(devpriv->ai_flags & TRIG_WAKE_EOS)) { /* if it's possible then align DMA buffers to length of scan */ i = dmalen0; dmalen0 = (dmalen0 / (devpriv->ai_n_realscanlen << 1)) * (devpriv->ai_n_realscanlen << 1); dmalen0 &= ~3L; if (!dmalen0) dmalen0 = i; /* uff. very long scan? */ i = dmalen1; dmalen1 = (dmalen1 / (devpriv->ai_n_realscanlen << 1)) * (devpriv->ai_n_realscanlen << 1); dmalen1 &= ~3L; if (!dmalen1) dmalen1 = i; /* uff. very long scan? */ /* * if measure isn't neverending then test, if it fits whole * into one or two DMA buffers */ if (!devpriv->ai_neverending) { /* fits whole measure into one DMA buffer? */ if (dmalen0 > ((devpriv->ai_n_realscanlen << 1) * devpriv->ai_scans)) { DPRINTK ("3.0 ai_n_realscanlen=%d ai_scans=%d\n", devpriv->ai_n_realscanlen, devpriv->ai_scans); dmalen0 = (devpriv->ai_n_realscanlen << 1) * devpriv->ai_scans; DPRINTK("3.1 dmalen0=%d dmalen1=%d\n", dmalen0, dmalen1); dmalen0 &= ~3L; } else { /* * fits whole measure into * two DMA buffer? */ if (dmalen1 > ((devpriv->ai_n_realscanlen << 1) * devpriv->ai_scans - dmalen0)) dmalen1 = (devpriv->ai_n_realscanlen << 1) * devpriv->ai_scans - dmalen0; DPRINTK("3.2 dmalen0=%d dmalen1=%d\n", dmalen0, dmalen1); dmalen1 &= ~3L; } } } DPRINTK("4 dmalen0=%d dmalen1=%d\n", dmalen0, dmalen1); /* these DMA buffer size will be used */ devpriv->dma_actbuf = 0; devpriv->dmabuf_use_size[0] = dmalen0; devpriv->dmabuf_use_size[1] = dmalen1; DPRINTK("5 dmalen0=%d dmalen1=%d\n", dmalen0, dmalen1); #if 0 if (devpriv->ai_n_scanlen < this_board->half_fifo_size) { devpriv->dmabuf_panic_size[0] = (this_board->half_fifo_size / devpriv->ai_n_scanlen + 1) * devpriv->ai_n_scanlen * sizeof(short); devpriv->dmabuf_panic_size[1] = (this_board->half_fifo_size / devpriv->ai_n_scanlen + 1) * devpriv->ai_n_scanlen * sizeof(short); } else { devpriv->dmabuf_panic_size[0] = (devpriv->ai_n_scanlen << 1) % devpriv->dmabuf_size[0]; devpriv->dmabuf_panic_size[1] = (devpriv->ai_n_scanlen << 1) % devpriv->dmabuf_size[1]; } #endif outl(inl(devpriv->iobase_a + AMCC_OP_REG_MCSR) & (~EN_A2P_TRANSFERS), devpriv->iobase_a + AMCC_OP_REG_MCSR); /* stop DMA */ outl(devpriv->dmabuf_hw[0], devpriv->iobase_a + AMCC_OP_REG_MWAR); outl(devpriv->dmabuf_use_size[0], devpriv->iobase_a + AMCC_OP_REG_MWTC); /* init DMA transfer */ outl(0x00000000 | AINT_WRITE_COMPL, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* outl(0x02000000|AINT_WRITE_COMPL, devpriv->iobase_a+AMCC_OP_REG_INTCSR); */ outl(inl(devpriv->iobase_a + AMCC_OP_REG_MCSR) | RESET_A2P_FLAGS | A2P_HI_PRIORITY | EN_A2P_TRANSFERS, devpriv->iobase_a + AMCC_OP_REG_MCSR); outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | EN_A2P_TRANSFERS, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* allow bus mastering */ DPRINTK("adl_pci9118 EDBG: END: Compute_and_setup_dma()\n"); return 0; } /* ============================================================================== */ static int pci9118_ai_docmd_sampl(struct comedi_device *dev, struct comedi_subdevice *s) { DPRINTK("adl_pci9118 EDBG: BGN: pci9118_ai_docmd_sampl(%d,) [%d]\n", dev->minor, devpriv->ai_do); switch (devpriv->ai_do) { case 1: devpriv->AdControlReg |= AdControl_TmrTr; break; case 2: comedi_error(dev, "pci9118_ai_docmd_sampl() mode 2 bug!\n"); return -EIO; case 3: devpriv->AdControlReg |= AdControl_ExtM; break; case 4: comedi_error(dev, "pci9118_ai_docmd_sampl() mode 4 bug!\n"); return -EIO; default: comedi_error(dev, "pci9118_ai_docmd_sampl() mode number bug!\n"); return -EIO; } devpriv->int_ai_func = interrupt_pci9118_ai_onesample; /* transfer function */ if (devpriv->ai12_startstop) pci9118_exttrg_add(dev, EXTTRG_AI); /* activate EXT trigger */ if ((devpriv->ai_do == 1) || (devpriv->ai_do == 2)) devpriv->IntControlReg |= Int_Timer; devpriv->AdControlReg |= AdControl_Int; outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | 0x1f00, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* allow INT in AMCC */ if (!(devpriv->ai12_startstop & (START_AI_EXT | START_AI_INT))) { outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL); outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); if (devpriv->ai_do != 3) { start_pacer(dev, devpriv->ai_do, devpriv->ai_divisor1, devpriv->ai_divisor2); devpriv->AdControlReg |= AdControl_SoftG; } outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL); } DPRINTK("adl_pci9118 EDBG: END: pci9118_ai_docmd_sampl()\n"); return 0; } /* ============================================================================== */ static int pci9118_ai_docmd_dma(struct comedi_device *dev, struct comedi_subdevice *s) { DPRINTK("adl_pci9118 EDBG: BGN: pci9118_ai_docmd_dma(%d,) [%d,%d]\n", dev->minor, devpriv->ai_do, devpriv->usedma); Compute_and_setup_dma(dev); switch (devpriv->ai_do) { case 1: devpriv->AdControlReg |= ((AdControl_TmrTr | AdControl_Dma) & 0xff); break; case 2: devpriv->AdControlReg |= ((AdControl_TmrTr | AdControl_Dma) & 0xff); devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg | AdFunction_BM | AdFunction_BS; if (devpriv->usessh && (!devpriv->softsshdelay)) devpriv->AdFunctionReg |= AdFunction_BSSH; outl(devpriv->ai_n_realscanlen, dev->iobase + PCI9118_BURST); break; case 3: devpriv->AdControlReg |= ((AdControl_ExtM | AdControl_Dma) & 0xff); devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg; break; case 4: devpriv->AdControlReg |= ((AdControl_TmrTr | AdControl_Dma) & 0xff); devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg | AdFunction_AM; outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); outl(0x30, dev->iobase + PCI9118_CNTCTRL); outl((devpriv->dmabuf_hw[0] >> 1) & 0xff, dev->iobase + PCI9118_CNT0); outl((devpriv->dmabuf_hw[0] >> 9) & 0xff, dev->iobase + PCI9118_CNT0); devpriv->AdFunctionReg |= AdFunction_Start; break; default: comedi_error(dev, "pci9118_ai_docmd_dma() mode number bug!\n"); return -EIO; } if (devpriv->ai12_startstop) { pci9118_exttrg_add(dev, EXTTRG_AI); /* activate EXT trigger */ } devpriv->int_ai_func = interrupt_pci9118_ai_dma; /* transfer function */ outl(0x02000000 | AINT_WRITE_COMPL, devpriv->iobase_a + AMCC_OP_REG_INTCSR); if (!(devpriv->ai12_startstop & (START_AI_EXT | START_AI_INT))) { outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL); if (devpriv->ai_do != 3) { start_pacer(dev, devpriv->ai_do, devpriv->ai_divisor1, devpriv->ai_divisor2); devpriv->AdControlReg |= AdControl_SoftG; } outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); } DPRINTK("adl_pci9118 EDBG: BGN: pci9118_ai_docmd_dma()\n"); return 0; } /* ============================================================================== */ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; unsigned int addchans = 0; int ret = 0; DPRINTK("adl_pci9118 EDBG: BGN: pci9118_ai_cmd(%d,)\n", dev->minor); devpriv->ai12_startstop = 0; devpriv->ai_flags = cmd->flags; devpriv->ai_n_chan = cmd->chanlist_len; devpriv->ai_n_scanlen = cmd->scan_end_arg; devpriv->ai_chanlist = cmd->chanlist; devpriv->ai_data = s->async->prealloc_buf; devpriv->ai_data_len = s->async->prealloc_bufsz; devpriv->ai_timer1 = 0; devpriv->ai_timer2 = 0; devpriv->ai_add_front = 0; devpriv->ai_add_back = 0; devpriv->ai_maskerr = 0x10e; /* prepare for start/stop conditions */ if (cmd->start_src == TRIG_EXT) devpriv->ai12_startstop |= START_AI_EXT; if (cmd->stop_src == TRIG_EXT) { devpriv->ai_neverending = 1; devpriv->ai12_startstop |= STOP_AI_EXT; } if (cmd->start_src == TRIG_INT) { devpriv->ai12_startstop |= START_AI_INT; devpriv->ai_inttrig_start = cmd->start_arg; s->async->inttrig = pci9118_ai_inttrig; } #if 0 if (cmd->stop_src == TRIG_INT) { devpriv->ai_neverending = 1; devpriv->ai12_startstop |= STOP_AI_INT; } #endif if (cmd->stop_src == TRIG_NONE) devpriv->ai_neverending = 1; if (cmd->stop_src == TRIG_COUNT) { devpriv->ai_scans = cmd->stop_arg; devpriv->ai_neverending = 0; } else { devpriv->ai_scans = 0; } /* use sample&hold signal? */ if (cmd->convert_src == TRIG_NOW) devpriv->usessh = 1; /* yes */ else devpriv->usessh = 0; /* no */ DPRINTK("1 neverending=%d scans=%u usessh=%d ai_startstop=0x%2x\n", devpriv->ai_neverending, devpriv->ai_scans, devpriv->usessh, devpriv->ai12_startstop); /* * use additional sample at end of every scan * to satisty DMA 32 bit transfer? */ devpriv->ai_add_front = 0; devpriv->ai_add_back = 0; devpriv->useeoshandle = 0; if (devpriv->master) { devpriv->usedma = 1; if ((cmd->flags & TRIG_WAKE_EOS) && (devpriv->ai_n_scanlen == 1)) { if (cmd->convert_src == TRIG_NOW) devpriv->ai_add_back = 1; if (cmd->convert_src == TRIG_TIMER) { devpriv->usedma = 0; /* * use INT transfer if scanlist * have only one channel */ } } if ((cmd->flags & TRIG_WAKE_EOS) && (devpriv->ai_n_scanlen & 1) && (devpriv->ai_n_scanlen > 1)) { if (cmd->scan_begin_src == TRIG_FOLLOW) { /* * vpriv->useeoshandle=1; // change DMA transfer * block to fit EOS on every second call */ devpriv->usedma = 0; /* * XXX maybe can be corrected to use 16 bit DMA */ } else { /* * well, we must insert one sample * to end of EOS to meet 32 bit transfer */ devpriv->ai_add_back = 1; } } } else { /* interrupt transfer don't need any correction */ devpriv->usedma = 0; } /* * we need software S&H signal? * It adds two samples before every scan as minimum */ if (devpriv->usessh && devpriv->softsshdelay) { devpriv->ai_add_front = 2; if ((devpriv->usedma == 1) && (devpriv->ai_add_back == 1)) { /* move it to front */ devpriv->ai_add_front++; devpriv->ai_add_back = 0; } if (cmd->convert_arg < this_board->ai_ns_min) cmd->convert_arg = this_board->ai_ns_min; addchans = devpriv->softsshdelay / cmd->convert_arg; if (devpriv->softsshdelay % cmd->convert_arg) addchans++; if (addchans > (devpriv->ai_add_front - 1)) { /* uff, still short */ devpriv->ai_add_front = addchans + 1; if (devpriv->usedma == 1) if ((devpriv->ai_add_front + devpriv->ai_n_chan + devpriv->ai_add_back) & 1) devpriv->ai_add_front++; /* round up to 32 bit */ } } /* well, we now know what must be all added */ devpriv->ai_n_realscanlen = /* * what we must take from card in real * to have ai_n_scanlen on output? */ (devpriv->ai_add_front + devpriv->ai_n_chan + devpriv->ai_add_back) * (devpriv->ai_n_scanlen / devpriv->ai_n_chan); DPRINTK("2 usedma=%d realscan=%d af=%u n_chan=%d ab=%d n_scanlen=%d\n", devpriv->usedma, devpriv->ai_n_realscanlen, devpriv->ai_add_front, devpriv->ai_n_chan, devpriv->ai_add_back, devpriv->ai_n_scanlen); /* check and setup channel list */ if (!check_channel_list(dev, s, devpriv->ai_n_chan, devpriv->ai_chanlist, devpriv->ai_add_front, devpriv->ai_add_back)) return -EINVAL; if (!setup_channel_list(dev, s, devpriv->ai_n_chan, devpriv->ai_chanlist, 0, devpriv->ai_add_front, devpriv->ai_add_back, devpriv->usedma, devpriv->useeoshandle)) return -EINVAL; /* compute timers settings */ /* * simplest way, fr=4Mhz/(tim1*tim2), * channel manipulation without timers effect */ if (((cmd->scan_begin_src == TRIG_FOLLOW) || (cmd->scan_begin_src == TRIG_EXT) || (cmd->scan_begin_src == TRIG_INT)) && (cmd->convert_src == TRIG_TIMER)) { /* both timer is used for one time */ if (cmd->scan_begin_src == TRIG_EXT) devpriv->ai_do = 4; else devpriv->ai_do = 1; pci9118_calc_divisors(devpriv->ai_do, dev, s, &cmd->scan_begin_arg, &cmd->convert_arg, devpriv->ai_flags, devpriv->ai_n_realscanlen, &devpriv->ai_divisor1, &devpriv->ai_divisor2, devpriv->usessh, devpriv->ai_add_front); devpriv->ai_timer2 = cmd->convert_arg; } if ((cmd->scan_begin_src == TRIG_TIMER) && ((cmd->convert_src == TRIG_TIMER) || (cmd->convert_src == TRIG_NOW))) { /* double timed action */ if (!devpriv->usedma) { comedi_error(dev, "cmd->scan_begin_src=TRIG_TIMER works " "only with bus mastering!"); return -EIO; } devpriv->ai_do = 2; pci9118_calc_divisors(devpriv->ai_do, dev, s, &cmd->scan_begin_arg, &cmd->convert_arg, devpriv->ai_flags, devpriv->ai_n_realscanlen, &devpriv->ai_divisor1, &devpriv->ai_divisor2, devpriv->usessh, devpriv->ai_add_front); devpriv->ai_timer1 = cmd->scan_begin_arg; devpriv->ai_timer2 = cmd->convert_arg; } if ((cmd->scan_begin_src == TRIG_FOLLOW) && (cmd->convert_src == TRIG_EXT)) { devpriv->ai_do = 3; } start_pacer(dev, -1, 0, 0); /* stop pacer */ devpriv->AdControlReg = 0; /* * bipolar, S.E., use 8254, stop 8354, * internal trigger, soft trigger, * disable DMA */ outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg; /* * positive triggers, no S&H, no burst, * burst stop, no post trigger, * no about trigger, trigger stop */ outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); udelay(1); outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */ inl(dev->iobase + PCI9118_ADSTAT); /* * flush A/D and INT * status register */ inl(dev->iobase + PCI9118_INTSRC); devpriv->ai_act_scan = 0; devpriv->ai_act_dmapos = 0; s->async->cur_chan = 0; devpriv->ai_buf_ptr = 0; if (devpriv->usedma) ret = pci9118_ai_docmd_dma(dev, s); else ret = pci9118_ai_docmd_sampl(dev, s); DPRINTK("adl_pci9118 EDBG: END: pci9118_ai_cmd()\n"); return ret; } /* ============================================================================== */ static int check_channel_list(struct comedi_device *dev, struct comedi_subdevice *s, int n_chan, unsigned int *chanlist, int frontadd, int backadd) { unsigned int i, differencial = 0, bipolar = 0; /* correct channel and range number check itself comedi/range.c */ if (n_chan < 1) { comedi_error(dev, "range/channel list is empty!"); return 0; } if ((frontadd + n_chan + backadd) > s->len_chanlist) { printk ("comedi%d: range/channel list is too long for " "actual configuration (%d>%d)!", dev->minor, n_chan, s->len_chanlist - frontadd - backadd); return 0; } if (CR_AREF(chanlist[0]) == AREF_DIFF) differencial = 1; /* all input must be diff */ if (CR_RANGE(chanlist[0]) < PCI9118_BIPOLAR_RANGES) bipolar = 1; /* all input must be bipolar */ if (n_chan > 1) for (i = 1; i < n_chan; i++) { /* check S.E/diff */ if ((CR_AREF(chanlist[i]) == AREF_DIFF) != (differencial)) { comedi_error(dev, "Differencial and single ended " "inputs can't be mixtured!"); return 0; } if ((CR_RANGE(chanlist[i]) < PCI9118_BIPOLAR_RANGES) != (bipolar)) { comedi_error(dev, "Bipolar and unipolar ranges " "can't be mixtured!"); return 0; } if ((!devpriv->usemux) & (differencial) & (CR_CHAN(chanlist[i]) >= this_board->n_aichand)) { comedi_error(dev, "If AREF_DIFF is used then is " "available only first 8 channels!"); return 0; } } return 1; } /* ============================================================================== */ static int setup_channel_list(struct comedi_device *dev, struct comedi_subdevice *s, int n_chan, unsigned int *chanlist, int rot, int frontadd, int backadd, int usedma, char useeos) { unsigned int i, differencial = 0, bipolar = 0; unsigned int scanquad, gain, ssh = 0x00; DPRINTK ("adl_pci9118 EDBG: BGN: setup_channel_list" "(%d,.,%d,.,%d,%d,%d,%d)\n", dev->minor, n_chan, rot, frontadd, backadd, usedma); if (usedma == 1) { rot = 8; usedma = 0; } if (CR_AREF(chanlist[0]) == AREF_DIFF) differencial = 1; /* all input must be diff */ if (CR_RANGE(chanlist[0]) < PCI9118_BIPOLAR_RANGES) bipolar = 1; /* all input must be bipolar */ /* All is ok, so we can setup channel/range list */ if (!bipolar) { devpriv->AdControlReg |= AdControl_UniP; /* set unibipolar */ } else { devpriv->AdControlReg &= ((~AdControl_UniP) & 0xff); /* enable bipolar */ } if (differencial) { devpriv->AdControlReg |= AdControl_Diff; /* enable diff inputs */ } else { devpriv->AdControlReg &= ((~AdControl_Diff) & 0xff); /* set single ended inputs */ } outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); /* setup mode */ outl(2, dev->iobase + PCI9118_SCANMOD); /* gods know why this sequence! */ outl(0, dev->iobase + PCI9118_SCANMOD); outl(1, dev->iobase + PCI9118_SCANMOD); #ifdef PCI9118_PARANOIDCHECK devpriv->chanlistlen = n_chan; for (i = 0; i < (PCI9118_CHANLEN + 1); i++) devpriv->chanlist[i] = 0x55aa; #endif if (frontadd) { /* insert channels for S&H */ ssh = devpriv->softsshsample; DPRINTK("FA: %04x: ", ssh); for (i = 0; i < frontadd; i++) { /* store range list to card */ scanquad = CR_CHAN(chanlist[0]); /* get channel number; */ gain = CR_RANGE(chanlist[0]); /* get gain number */ scanquad |= ((gain & 0x03) << 8); outl(scanquad | ssh, dev->iobase + PCI9118_GAIN); DPRINTK("%02x ", scanquad | ssh); ssh = devpriv->softsshhold; } DPRINTK("\n "); } DPRINTK("SL: ", ssh); for (i = 0; i < n_chan; i++) { /* store range list to card */ scanquad = CR_CHAN(chanlist[i]); /* get channel number */ #ifdef PCI9118_PARANOIDCHECK devpriv->chanlist[i ^ usedma] = (scanquad & 0xf) << rot; #endif gain = CR_RANGE(chanlist[i]); /* get gain number */ scanquad |= ((gain & 0x03) << 8); outl(scanquad | ssh, dev->iobase + PCI9118_GAIN); DPRINTK("%02x ", scanquad | ssh); } DPRINTK("\n "); if (backadd) { /* insert channels for fit onto 32bit DMA */ DPRINTK("BA: %04x: ", ssh); for (i = 0; i < backadd; i++) { /* store range list to card */ scanquad = CR_CHAN(chanlist[0]); /* get channel number */ gain = CR_RANGE(chanlist[0]); /* get gain number */ scanquad |= ((gain & 0x03) << 8); outl(scanquad | ssh, dev->iobase + PCI9118_GAIN); DPRINTK("%02x ", scanquad | ssh); } DPRINTK("\n "); } #ifdef PCI9118_PARANOIDCHECK devpriv->chanlist[n_chan ^ usedma] = devpriv->chanlist[0 ^ usedma]; /* for 32bit operations */ if (useeos) { for (i = 1; i < n_chan; i++) { /* store range list to card */ devpriv->chanlist[(n_chan + i) ^ usedma] = (CR_CHAN(chanlist[i]) & 0xf) << rot; } devpriv->chanlist[(2 * n_chan) ^ usedma] = devpriv->chanlist[0 ^ usedma]; /* for 32bit operations */ useeos = 2; } else { useeos = 1; } #ifdef PCI9118_EXTDEBUG DPRINTK("CHL: "); for (i = 0; i <= (useeos * n_chan); i++) DPRINTK("%04x ", devpriv->chanlist[i]); DPRINTK("\n "); #endif #endif outl(0, dev->iobase + PCI9118_SCANMOD); /* close scan queue */ /* udelay(100); important delay, or first sample will be crippled */ DPRINTK("adl_pci9118 EDBG: END: setup_channel_list()\n"); return 1; /* we can serve this with scan logic */ } /* ============================================================================== calculate 8254 divisors if they are used for dual timing */ static void pci9118_calc_divisors(char mode, struct comedi_device *dev, struct comedi_subdevice *s, unsigned int *tim1, unsigned int *tim2, unsigned int flags, int chans, unsigned int *div1, unsigned int *div2, char usessh, unsigned int chnsshfront) { DPRINTK ("adl_pci9118 EDBG: BGN: pci9118_calc_divisors" "(%d,%d,.,%u,%u,%u,%d,.,.,,%u,%u)\n", mode, dev->minor, *tim1, *tim2, flags, chans, usessh, chnsshfront); switch (mode) { case 1: case 4: if (*tim2 < this_board->ai_ns_min) *tim2 = this_board->ai_ns_min; i8253_cascade_ns_to_timer(devpriv->i8254_osc_base, div1, div2, tim2, flags & TRIG_ROUND_NEAREST); DPRINTK("OSC base=%u div1=%u div2=%u timer1=%u\n", devpriv->i8254_osc_base, *div1, *div2, *tim1); break; case 2: if (*tim2 < this_board->ai_ns_min) *tim2 = this_board->ai_ns_min; DPRINTK("1 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2, *tim1, *tim2); *div1 = *tim2 / devpriv->i8254_osc_base; /* convert timer (burst) */ DPRINTK("2 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2, *tim1, *tim2); if (*div1 < this_board->ai_pacer_min) *div1 = this_board->ai_pacer_min; DPRINTK("3 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2, *tim1, *tim2); *div2 = *tim1 / devpriv->i8254_osc_base; /* scan timer */ DPRINTK("4 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2, *tim1, *tim2); *div2 = *div2 / *div1; /* major timer is c1*c2 */ DPRINTK("5 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2, *tim1, *tim2); if (*div2 < chans) *div2 = chans; DPRINTK("6 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2, *tim1, *tim2); *tim2 = *div1 * devpriv->i8254_osc_base; /* real convert timer */ if (usessh & (chnsshfront == 0)) /* use BSSH signal */ if (*div2 < (chans + 2)) *div2 = chans + 2; DPRINTK("7 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2, *tim1, *tim2); *tim1 = *div1 * *div2 * devpriv->i8254_osc_base; DPRINTK("OSC base=%u div1=%u div2=%u timer1=%u timer2=%u\n", devpriv->i8254_osc_base, *div1, *div2, *tim1, *tim2); break; } DPRINTK("adl_pci9118 EDBG: END: pci9118_calc_divisors(%u,%u)\n", *div1, *div2); } /* ============================================================================== */ static void start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1, unsigned int divisor2) { outl(0x74, dev->iobase + PCI9118_CNTCTRL); outl(0xb4, dev->iobase + PCI9118_CNTCTRL); /* outl(0x30, dev->iobase + PCI9118_CNTCTRL); */ udelay(1); if ((mode == 1) || (mode == 2) || (mode == 4)) { outl(divisor2 & 0xff, dev->iobase + PCI9118_CNT2); outl((divisor2 >> 8) & 0xff, dev->iobase + PCI9118_CNT2); outl(divisor1 & 0xff, dev->iobase + PCI9118_CNT1); outl((divisor1 >> 8) & 0xff, dev->iobase + PCI9118_CNT1); } } /* ============================================================================== */ static int pci9118_exttrg_add(struct comedi_device *dev, unsigned char source) { if (source > 3) return -1; /* incorrect source */ devpriv->exttrg_users |= (1 << source); devpriv->IntControlReg |= Int_DTrg; outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL); outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | 0x1f00, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* allow INT in AMCC */ return 0; } /* ============================================================================== */ static int pci9118_exttrg_del(struct comedi_device *dev, unsigned char source) { if (source > 3) return -1; /* incorrect source */ devpriv->exttrg_users &= ~(1 << source); if (!devpriv->exttrg_users) { /* shutdown ext trg intterrupts */ devpriv->IntControlReg &= ~Int_DTrg; if (!devpriv->IntControlReg) /* all IRQ disabled */ outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) & (~0x00001f00), devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* disable int in AMCC */ outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL); } return 0; } /* ============================================================================== */ static int pci9118_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { if (devpriv->usedma) outl(inl(devpriv->iobase_a + AMCC_OP_REG_MCSR) & (~EN_A2P_TRANSFERS), devpriv->iobase_a + AMCC_OP_REG_MCSR); /* stop DMA */ pci9118_exttrg_del(dev, EXTTRG_AI); start_pacer(dev, 0, 0, 0); /* stop 8254 counters */ devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg; outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); /* * positive triggers, no S&H, no burst, * burst stop, no post trigger, * no about trigger, trigger stop */ devpriv->AdControlReg = 0x00; outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); /* * bipolar, S.E., use 8254, stop 8354, * internal trigger, soft trigger, * disable INT and DMA */ outl(0, dev->iobase + PCI9118_BURST); outl(1, dev->iobase + PCI9118_SCANMOD); outl(2, dev->iobase + PCI9118_SCANMOD); /* reset scan queue */ outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */ devpriv->ai_do = 0; devpriv->usedma = 0; devpriv->ai_act_scan = 0; devpriv->ai_act_dmapos = 0; s->async->cur_chan = 0; s->async->inttrig = NULL; devpriv->ai_buf_ptr = 0; devpriv->ai_neverending = 0; devpriv->dma_actbuf = 0; if (!devpriv->IntControlReg) outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | 0x1f00, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* allow INT in AMCC */ return 0; } /* ============================================================================== */ static int pci9118_reset(struct comedi_device *dev) { devpriv->IntControlReg = 0; devpriv->exttrg_users = 0; inl(dev->iobase + PCI9118_INTCTRL); outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL); /* disable interrupts source */ outl(0x30, dev->iobase + PCI9118_CNTCTRL); /* outl(0xb4, dev->iobase + PCI9118_CNTCTRL); */ start_pacer(dev, 0, 0, 0); /* stop 8254 counters */ devpriv->AdControlReg = 0; outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); /* * bipolar, S.E., use 8254, * stop 8354, internal trigger, * soft trigger, * disable INT and DMA */ outl(0, dev->iobase + PCI9118_BURST); outl(1, dev->iobase + PCI9118_SCANMOD); outl(2, dev->iobase + PCI9118_SCANMOD); /* reset scan queue */ devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg; outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); /* * positive triggers, no S&H, * no burst, burst stop, * no post trigger, * no about trigger, * trigger stop */ devpriv->ao_data[0] = 2047; devpriv->ao_data[1] = 2047; outl(devpriv->ao_data[0], dev->iobase + PCI9118_DA1); /* reset A/D outs to 0V */ outl(devpriv->ao_data[1], dev->iobase + PCI9118_DA2); outl(0, dev->iobase + PCI9118_DO); /* reset digi outs to L */ udelay(10); inl(dev->iobase + PCI9118_AD_DATA); outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */ outl(0, dev->iobase + PCI9118_INTSRC); /* remove INT requests */ inl(dev->iobase + PCI9118_ADSTAT); /* flush A/D status register */ inl(dev->iobase + PCI9118_INTSRC); /* flush INT requests */ devpriv->AdControlReg = 0; outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); /* * bipolar, S.E., use 8254, * stop 8354, internal trigger, * soft trigger, * disable INT and DMA */ devpriv->cnt0_users = 0; devpriv->exttrg_users = 0; return 0; } /* ============================================================================== */ static int pci9118_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret, pages, i; unsigned short master; unsigned int irq; unsigned long iobase_a, iobase_9; struct pci_dev *pcidev; int opt_bus, opt_slot; const char *errstr; unsigned char pci_bus, pci_slot, pci_func; u16 u16w; printk("comedi%d: adl_pci9118: board=%s", dev->minor, this_board->name); opt_bus = it->options[0]; opt_slot = it->options[1]; if (it->options[3] & 1) master = 0; /* user don't want use bus master */ else master = 1; ret = alloc_private(dev, sizeof(struct pci9118_private)); if (ret < 0) { printk(" - Allocation failed!\n"); return -ENOMEM; } /* Look for matching PCI device */ errstr = "not found!"; pcidev = NULL; while (NULL != (pcidev = pci_get_device(PCI_VENDOR_ID_AMCC, this_board->device_id, pcidev))) { /* Found matching vendor/device. */ if (opt_bus || opt_slot) { /* Check bus/slot. */ if (opt_bus != pcidev->bus->number || opt_slot != PCI_SLOT(pcidev->devfn)) continue; /* no match */ } /* * Look for device that isn't in use. * Enable PCI device and request regions. */ if (comedi_pci_enable(pcidev, "adl_pci9118")) { errstr = "failed to enable PCI device and request regions!"; continue; } break; } if (!pcidev) { if (opt_bus || opt_slot) { printk(KERN_ERR " - Card at b:s %d:%d %s\n", opt_bus, opt_slot, errstr); } else { printk(KERN_ERR " - Card %s\n", errstr); } return -EIO; } if (master) pci_set_master(pcidev); pci_bus = pcidev->bus->number; pci_slot = PCI_SLOT(pcidev->devfn); pci_func = PCI_FUNC(pcidev->devfn); irq = pcidev->irq; iobase_a = pci_resource_start(pcidev, 0); iobase_9 = pci_resource_start(pcidev, 2); printk(KERN_ERR ", b:s:f=%d:%d:%d, io=0x%4lx, 0x%4lx", pci_bus, pci_slot, pci_func, iobase_9, iobase_a); dev->iobase = iobase_9; dev->board_name = this_board->name; devpriv->pcidev = pcidev; devpriv->iobase_a = iobase_a; pci9118_reset(dev); if (it->options[3] & 2) irq = 0; /* user don't want use IRQ */ if (irq > 0) { if (request_irq(irq, interrupt_pci9118, IRQF_SHARED, "ADLink PCI-9118", dev)) { printk(", unable to allocate IRQ %d, DISABLING IT", irq); irq = 0; /* Can't use IRQ */ } else { printk(", irq=%u", irq); } } else { printk(", IRQ disabled"); } dev->irq = irq; if (master) { /* alloc DMA buffers */ devpriv->dma_doublebuf = 0; for (i = 0; i < 2; i++) { for (pages = 4; pages >= 0; pages--) { devpriv->dmabuf_virt[i] = (short *)__get_free_pages(GFP_KERNEL, pages); if (devpriv->dmabuf_virt[i]) break; } if (devpriv->dmabuf_virt[i]) { devpriv->dmabuf_pages[i] = pages; devpriv->dmabuf_size[i] = PAGE_SIZE * pages; devpriv->dmabuf_samples[i] = devpriv->dmabuf_size[i] >> 1; devpriv->dmabuf_hw[i] = virt_to_bus((void *) devpriv->dmabuf_virt[i]); } } if (!devpriv->dmabuf_virt[0]) { printk(", Can't allocate DMA buffer, DMA disabled!"); master = 0; } if (devpriv->dmabuf_virt[1]) devpriv->dma_doublebuf = 1; } devpriv->master = master; if (devpriv->master) printk(", bus master"); else printk(", no bus master"); devpriv->usemux = 0; if (it->options[2] > 0) { devpriv->usemux = it->options[2]; if (devpriv->usemux > 256) devpriv->usemux = 256; /* max 256 channels! */ if (it->options[4] > 0) if (devpriv->usemux > 128) { devpriv->usemux = 128; /* max 128 channels with softare S&H! */ } printk(", ext. mux %d channels", devpriv->usemux); } devpriv->softsshdelay = it->options[4]; if (devpriv->softsshdelay < 0) { /* select sample&hold signal polarity */ devpriv->softsshdelay = -devpriv->softsshdelay; devpriv->softsshsample = 0x80; devpriv->softsshhold = 0x00; } else { devpriv->softsshsample = 0x00; devpriv->softsshhold = 0x80; } printk(".\n"); pci_read_config_word(devpriv->pcidev, PCI_COMMAND, &u16w); pci_write_config_word(devpriv->pcidev, PCI_COMMAND, u16w | 64); /* Enable parity check for parity error */ ret = alloc_subdevices(dev, 4); if (ret < 0) return ret; s = dev->subdevices + 0; dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF; if (devpriv->usemux) s->n_chan = devpriv->usemux; else s->n_chan = this_board->n_aichan; s->maxdata = this_board->ai_maxdata; s->len_chanlist = this_board->n_aichanlist; s->range_table = this_board->rangelist_ai; s->cancel = pci9118_ai_cancel; s->insn_read = pci9118_insn_read_ai; if (dev->irq) { s->subdev_flags |= SDF_CMD_READ; s->do_cmdtest = pci9118_ai_cmdtest; s->do_cmd = pci9118_ai_cmd; s->munge = pci9118_ai_munge; } s = dev->subdevices + 1; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->n_aochan; s->maxdata = this_board->ao_maxdata; s->len_chanlist = this_board->n_aochan; s->range_table = this_board->rangelist_ao; s->insn_write = pci9118_insn_write_ao; s->insn_read = pci9118_insn_read_ao; s = dev->subdevices + 2; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON; s->n_chan = 4; s->maxdata = 1; s->len_chanlist = 4; s->range_table = &range_digital; s->io_bits = 0; /* all bits input */ s->insn_bits = pci9118_insn_bits_di; s = dev->subdevices + 3; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON; s->n_chan = 4; s->maxdata = 1; s->len_chanlist = 4; s->range_table = &range_digital; s->io_bits = 0xf; /* all bits output */ s->insn_bits = pci9118_insn_bits_do; devpriv->valid = 1; devpriv->i8254_osc_base = 250; /* 250ns=4MHz */ devpriv->ai_maskharderr = 0x10a; /* default measure crash condition */ if (it->options[5]) /* disable some requested */ devpriv->ai_maskharderr &= ~it->options[5]; switch (this_board->ai_maxdata) { case 0xffff: devpriv->ai16bits = 1; break; default: devpriv->ai16bits = 0; break; } return 0; } /* ============================================================================== */ static int pci9118_detach(struct comedi_device *dev) { if (dev->private) { if (devpriv->valid) pci9118_reset(dev); if (dev->irq) free_irq(dev->irq, dev); if (devpriv->pcidev) { if (dev->iobase) comedi_pci_disable(devpriv->pcidev); pci_dev_put(devpriv->pcidev); } if (devpriv->dmabuf_virt[0]) free_pages((unsigned long)devpriv->dmabuf_virt[0], devpriv->dmabuf_pages[0]); if (devpriv->dmabuf_virt[1]) free_pages((unsigned long)devpriv->dmabuf_virt[1], devpriv->dmabuf_pages[1]); } return 0; } /* ============================================================================== */ MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
ryrzy/yoda-kernel-i9300-JB-update13
drivers/net/chelsio/tp.c
8017
4146
/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */ #include "common.h" #include "regs.h" #include "tp.h" #ifdef CONFIG_CHELSIO_T1_1G #include "fpga_defs.h" #endif struct petp { adapter_t *adapter; }; /* Pause deadlock avoidance parameters */ #define DROP_MSEC 16 #define DROP_PKTS_CNT 1 static void tp_init(adapter_t * ap, const struct tp_params *p, unsigned int tp_clk) { u32 val; if (!t1_is_asic(ap)) return; val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM | F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET; if (!p->pm_size) val |= F_OFFLOAD_DISABLE; else val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM; writel(val, ap->regs + A_TP_IN_CONFIG); writel(F_TP_OUT_CSPI_CPL | F_TP_OUT_ESPI_ETHERNET | F_TP_OUT_ESPI_GENERATE_IP_CSUM | F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG); writel(V_IP_TTL(64) | F_PATH_MTU /* IP DF bit */ | V_5TUPLE_LOOKUP(p->use_5tuple_mode) | V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG); /* * Enable pause frame deadlock prevention. */ if (is_T2(ap) && ap->params.nports > 1) { u32 drop_ticks = DROP_MSEC * (tp_clk / 1000); writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR | V_DROP_TICKS_CNT(drop_ticks) | V_NUM_PKTS_DROPPED(DROP_PKTS_CNT), ap->regs + A_TP_TX_DROP_CONFIG); } } void t1_tp_destroy(struct petp *tp) { kfree(tp); } struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p) { struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); if (!tp) return NULL; tp->adapter = adapter; return tp; } void t1_tp_intr_enable(struct petp *tp) { u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE); #ifdef CONFIG_CHELSIO_T1_1G if (!t1_is_asic(tp->adapter)) { /* FPGA */ writel(0xffffffff, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE); writel(tp_intr | FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_ENABLE); } else #endif { /* We don't use any TP interrupts */ writel(0, tp->adapter->regs + A_TP_INT_ENABLE); writel(tp_intr | F_PL_INTR_TP, tp->adapter->regs + A_PL_ENABLE); } } void t1_tp_intr_disable(struct petp *tp) { u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE); #ifdef CONFIG_CHELSIO_T1_1G if (!t1_is_asic(tp->adapter)) { /* FPGA */ writel(0, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE); writel(tp_intr & ~FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_ENABLE); } else #endif { writel(0, tp->adapter->regs + A_TP_INT_ENABLE); writel(tp_intr & ~F_PL_INTR_TP, tp->adapter->regs + A_PL_ENABLE); } } void t1_tp_intr_clear(struct petp *tp) { #ifdef CONFIG_CHELSIO_T1_1G if (!t1_is_asic(tp->adapter)) { writel(0xffffffff, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); writel(FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_CAUSE); return; } #endif writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE); writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE); } int t1_tp_intr_handler(struct petp *tp) { u32 cause; #ifdef CONFIG_CHELSIO_T1_1G /* FPGA doesn't support TP interrupts. */ if (!t1_is_asic(tp->adapter)) return 1; #endif cause = readl(tp->adapter->regs + A_TP_INT_CAUSE); writel(cause, tp->adapter->regs + A_TP_INT_CAUSE); return 0; } static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable) { u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG); if (enable) val |= csum_bit; else val &= ~csum_bit; writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG); } void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable) { set_csum_offload(tp, F_IP_CSUM, enable); } void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable) { set_csum_offload(tp, F_TCP_CSUM, enable); } /* * Initialize TP state. tp_params contains initial settings for some TP * parameters, particularly the one-time PM and CM settings. */ int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk) { adapter_t *adapter = tp->adapter; tp_init(adapter, p, tp_clk); writel(F_TP_RESET, adapter->regs + A_TP_RESET); return 0; }
gpl-2.0
atilag/android_kernel_samsung_smdk4412
drivers/message/fusion/mptspi.c
8017
43777
/* * linux/drivers/message/fusion/mptspi.c * For use with LSI PCI chip/adapter(s) * running LSI Fusion MPT (Message Passing Technology) firmware. * * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. NO WARRANTY THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. DISCLAIMER OF LIABILITY NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <linux/delay.h> /* for mdelay */ #include <linux/interrupt.h> /* needed for in_interrupt() proto */ #include <linux/reboot.h> /* notifier code */ #include <linux/workqueue.h> #include <linux/raid_class.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include <scsi/scsi_dbg.h> #include "mptbase.h" #include "mptscsih.h" /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #define my_NAME "Fusion MPT SPI Host driver" #define my_VERSION MPT_LINUX_VERSION_COMMON #define MYNAM "mptspi" MODULE_AUTHOR(MODULEAUTHOR); MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); MODULE_VERSION(my_VERSION); /* Command line args */ static int mpt_saf_te = MPTSCSIH_SAF_TE; module_param(mpt_saf_te, int, 0); MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)"); static void mptspi_write_offset(struct scsi_target *, int); static void mptspi_write_width(struct scsi_target *, int); static int mptspi_write_spi_device_pg1(struct scsi_target *, struct _CONFIG_PAGE_SCSI_DEVICE_1 *); static struct scsi_transport_template *mptspi_transport_template = NULL; static u8 mptspiDoneCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptspiTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptspiInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */ /** * mptspi_setTargetNegoParms - Update the target negotiation parameters * @hd: Pointer to a SCSI Host Structure * @target: per target private data * @sdev: SCSI device * * Update the target negotiation parameters based on the the Inquiry * data, adapter capabilities, and NVRAM settings. **/ static void mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target, struct scsi_device *sdev) { MPT_ADAPTER *ioc = hd->ioc; SpiCfgData *pspi_data = &ioc->spi_data; int id = (int) target->id; int nvram; u8 width = MPT_NARROW; u8 factor = MPT_ASYNC; u8 offset = 0; u8 nfactor; u8 noQas = 1; target->negoFlags = pspi_data->noQas; if (sdev->scsi_level < SCSI_2) { width = 0; factor = MPT_ULTRA2; offset = pspi_data->maxSyncOffset; target->tflags &= ~MPT_TARGET_FLAGS_Q_YES; } else { if (scsi_device_wide(sdev)) width = 1; if (scsi_device_sync(sdev)) { factor = pspi_data->minSyncFactor; if (!scsi_device_dt(sdev)) factor = MPT_ULTRA2; else { if (!scsi_device_ius(sdev) && !scsi_device_qas(sdev)) factor = MPT_ULTRA160; else { factor = MPT_ULTRA320; if (scsi_device_qas(sdev)) { ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Enabling QAS due to " "byte56=%02x on id=%d!\n", ioc->name, scsi_device_qas(sdev), id)); noQas = 0; } if (sdev->type == TYPE_TAPE && scsi_device_ius(sdev)) target->negoFlags |= MPT_TAPE_NEGO_IDP; } } offset = pspi_data->maxSyncOffset; /* If RAID, never disable QAS * else if non RAID, do not disable * QAS if bit 1 is set * bit 1 QAS support, non-raid only * bit 0 IU support */ if (target->raidVolume == 1) noQas = 0; } else { factor = MPT_ASYNC; offset = 0; } } if (!sdev->tagged_supported) target->tflags &= ~MPT_TARGET_FLAGS_Q_YES; /* Update tflags based on NVRAM settings. (SCSI only) */ if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) { nvram = pspi_data->nvram[id]; nfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8; if (width) width = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1; if (offset > 0) { /* Ensure factor is set to the * maximum of: adapter, nvram, inquiry */ if (nfactor) { if (nfactor < pspi_data->minSyncFactor ) nfactor = pspi_data->minSyncFactor; factor = max(factor, nfactor); if (factor == MPT_ASYNC) offset = 0; } else { offset = 0; factor = MPT_ASYNC; } } else { factor = MPT_ASYNC; } } /* Make sure data is consistent */ if ((!width) && (factor < MPT_ULTRA2)) factor = MPT_ULTRA2; /* Save the data to the target structure. */ target->minSyncFactor = factor; target->maxOffset = offset; target->maxWidth = width; spi_min_period(scsi_target(sdev)) = factor; spi_max_offset(scsi_target(sdev)) = offset; spi_max_width(scsi_target(sdev)) = width; target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO; /* Disable unused features. */ if (!width) target->negoFlags |= MPT_TARGET_NO_NEGO_WIDE; if (!offset) target->negoFlags |= MPT_TARGET_NO_NEGO_SYNC; if ( factor > MPT_ULTRA320 ) noQas = 0; if (noQas && (pspi_data->noQas == 0)) { pspi_data->noQas |= MPT_TARGET_NO_NEGO_QAS; target->negoFlags |= MPT_TARGET_NO_NEGO_QAS; /* Disable QAS in a mixed configuration case */ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Disabling QAS due to noQas=%02x on id=%d!\n", ioc->name, noQas, id)); } } /** * mptspi_writeIOCPage4 - write IOC Page 4 * @hd: Pointer to a SCSI Host Structure * @channel: channel number * @id: write IOC Page4 for this ID & Bus * * Return: -EAGAIN if unable to obtain a Message Frame * or 0 if success. * * Remark: We do not wait for a return, write pages sequentially. **/ static int mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id) { MPT_ADAPTER *ioc = hd->ioc; Config_t *pReq; IOCPage4_t *IOCPage4Ptr; MPT_FRAME_HDR *mf; dma_addr_t dataDma; u16 req_idx; u32 frameOffset; u32 flagsLength; int ii; /* Get a MF for this command. */ if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "writeIOCPage4 : no msg frames!\n",ioc->name)); return -EAGAIN; } /* Set the request and the data pointers. * Place data at end of MF. */ pReq = (Config_t *)mf; req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); frameOffset = ioc->req_sz - sizeof(IOCPage4_t); /* Complete the request frame (same for all requests). */ pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; pReq->Reserved = 0; pReq->ChainOffset = 0; pReq->Function = MPI_FUNCTION_CONFIG; pReq->ExtPageLength = 0; pReq->ExtPageType = 0; pReq->MsgFlags = 0; for (ii=0; ii < 8; ii++) { pReq->Reserved2[ii] = 0; } IOCPage4Ptr = ioc->spi_data.pIocPg4; dataDma = ioc->spi_data.IocPg4_dma; ii = IOCPage4Ptr->ActiveSEP++; IOCPage4Ptr->SEP[ii].SEPTargetID = id; IOCPage4Ptr->SEP[ii].SEPBus = channel; pReq->Header = IOCPage4Ptr->Header; pReq->PageAddress = cpu_to_le32(id | (channel << 8 )); /* Add a SGE to the config request. */ flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | (IOCPage4Ptr->Header.PageLength + ii) * 4; ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma); ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n", ioc->name, IOCPage4Ptr->MaxSEP, IOCPage4Ptr->ActiveSEP, id, channel)); mpt_put_msg_frame(ioc->DoneCtx, ioc, mf); return 0; } /** * mptspi_initTarget - Target, LUN alloc/free functionality. * @hd: Pointer to MPT_SCSI_HOST structure * @vtarget: per target private data * @sdev: SCSI device * * NOTE: It's only SAFE to call this routine if data points to * sane & valid STANDARD INQUIRY data! * * Allocate and initialize memory for this target. * Save inquiry data. * **/ static void mptspi_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget, struct scsi_device *sdev) { /* Is LUN supported? If so, upper 2 bits will be 0 * in first byte of inquiry data. */ if (sdev->inq_periph_qual != 0) return; if (vtarget == NULL) return; vtarget->type = sdev->type; if ((sdev->type == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) { /* Treat all Processors as SAF-TE if * command line option is set */ vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED; mptspi_writeIOCPage4(hd, vtarget->channel, vtarget->id); }else if ((sdev->type == TYPE_PROCESSOR) && !(vtarget->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) { if (sdev->inquiry_len > 49 ) { if (sdev->inquiry[44] == 'S' && sdev->inquiry[45] == 'A' && sdev->inquiry[46] == 'F' && sdev->inquiry[47] == '-' && sdev->inquiry[48] == 'T' && sdev->inquiry[49] == 'E' ) { vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED; mptspi_writeIOCPage4(hd, vtarget->channel, vtarget->id); } } } mptspi_setTargetNegoParms(hd, vtarget, sdev); } /** * mptspi_is_raid - Determines whether target is belonging to volume * @hd: Pointer to a SCSI HOST structure * @id: target device id * * Return: * non-zero = true * zero = false * */ static int mptspi_is_raid(struct _MPT_SCSI_HOST *hd, u32 id) { int i, rc = 0; MPT_ADAPTER *ioc = hd->ioc; if (!ioc->raid_data.pIocPg2) goto out; if (!ioc->raid_data.pIocPg2->NumActiveVolumes) goto out; for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id) { rc = 1; goto out; } } out: return rc; } static int mptspi_target_alloc(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct _MPT_SCSI_HOST *hd = shost_priv(shost); VirtTarget *vtarget; MPT_ADAPTER *ioc; if (hd == NULL) return -ENODEV; ioc = hd->ioc; vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL); if (!vtarget) return -ENOMEM; vtarget->ioc_id = ioc->id; vtarget->tflags = MPT_TARGET_FLAGS_Q_YES; vtarget->id = (u8)starget->id; vtarget->channel = (u8)starget->channel; vtarget->starget = starget; starget->hostdata = vtarget; if (starget->channel == 1) { if (mptscsih_is_phys_disk(ioc, 0, starget->id) == 0) return 0; vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT; /* The real channel for this device is zero */ vtarget->channel = 0; /* The actual physdisknum (for RAID passthrough) */ vtarget->id = mptscsih_raid_id_to_num(ioc, 0, starget->id); } if (starget->channel == 0 && mptspi_is_raid(hd, starget->id)) { vtarget->raidVolume = 1; ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume @ channel=%d id=%d\n", ioc->name, starget->channel, starget->id)); } if (ioc->spi_data.nvram && ioc->spi_data.nvram[starget->id] != MPT_HOST_NVRAM_INVALID) { u32 nvram = ioc->spi_data.nvram[starget->id]; spi_min_period(starget) = (nvram & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT; spi_max_width(starget) = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1; } else { spi_min_period(starget) = ioc->spi_data.minSyncFactor; spi_max_width(starget) = ioc->spi_data.maxBusWidth; } spi_max_offset(starget) = ioc->spi_data.maxSyncOffset; spi_offset(starget) = 0; spi_period(starget) = 0xFF; mptspi_write_width(starget, 0); return 0; } static void mptspi_target_destroy(struct scsi_target *starget) { if (starget->hostdata) kfree(starget->hostdata); starget->hostdata = NULL; } /** * mptspi_print_write_nego - negotiation parameters debug info that is being sent * @hd: Pointer to a SCSI HOST structure * @starget: SCSI target * @ii: negotiation parameters * */ static void mptspi_print_write_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii) { ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d Requested = 0x%08x" " ( %s factor = 0x%02x @ offset = 0x%02x %s%s%s%s%s%s%s%s)\n", hd->ioc->name, starget->id, ii, ii & MPI_SCSIDEVPAGE0_NP_WIDE ? "Wide ": "", ((ii >> 8) & 0xFF), ((ii >> 16) & 0xFF), ii & MPI_SCSIDEVPAGE0_NP_IU ? "IU ": "", ii & MPI_SCSIDEVPAGE0_NP_DT ? "DT ": "", ii & MPI_SCSIDEVPAGE0_NP_QAS ? "QAS ": "", ii & MPI_SCSIDEVPAGE0_NP_HOLD_MCS ? "HOLDMCS ": "", ii & MPI_SCSIDEVPAGE0_NP_WR_FLOW ? "WRFLOW ": "", ii & MPI_SCSIDEVPAGE0_NP_RD_STRM ? "RDSTRM ": "", ii & MPI_SCSIDEVPAGE0_NP_RTI ? "RTI ": "", ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": "")); } /** * mptspi_print_read_nego - negotiation parameters debug info that is being read * @hd: Pointer to a SCSI HOST structure * @starget: SCSI target * @ii: negotiation parameters * */ static void mptspi_print_read_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii) { ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d Read = 0x%08x" " ( %s factor = 0x%02x @ offset = 0x%02x %s%s%s%s%s%s%s%s)\n", hd->ioc->name, starget->id, ii, ii & MPI_SCSIDEVPAGE0_NP_WIDE ? "Wide ": "", ((ii >> 8) & 0xFF), ((ii >> 16) & 0xFF), ii & MPI_SCSIDEVPAGE0_NP_IU ? "IU ": "", ii & MPI_SCSIDEVPAGE0_NP_DT ? "DT ": "", ii & MPI_SCSIDEVPAGE0_NP_QAS ? "QAS ": "", ii & MPI_SCSIDEVPAGE0_NP_HOLD_MCS ? "HOLDMCS ": "", ii & MPI_SCSIDEVPAGE0_NP_WR_FLOW ? "WRFLOW ": "", ii & MPI_SCSIDEVPAGE0_NP_RD_STRM ? "RDSTRM ": "", ii & MPI_SCSIDEVPAGE0_NP_RTI ? "RTI ": "", ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": "")); } static int mptspi_read_spi_device_pg0(struct scsi_target *starget, struct _CONFIG_PAGE_SCSI_DEVICE_0 *pass_pg0) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct _MPT_SCSI_HOST *hd = shost_priv(shost); struct _MPT_ADAPTER *ioc = hd->ioc; struct _CONFIG_PAGE_SCSI_DEVICE_0 *spi_dev_pg0; dma_addr_t spi_dev_pg0_dma; int size; struct _x_config_parms cfg; struct _CONFIG_PAGE_HEADER hdr; int err = -EBUSY; /* No SPI parameters for RAID devices */ if (starget->channel == 0 && mptspi_is_raid(hd, starget->id)) return -1; size = ioc->spi_data.sdp0length * 4; /* if (ioc->spi_data.sdp0length & 1) size += size + 4; size += 2048; */ spi_dev_pg0 = dma_alloc_coherent(&ioc->pcidev->dev, size, &spi_dev_pg0_dma, GFP_KERNEL); if (spi_dev_pg0 == NULL) { starget_printk(KERN_ERR, starget, MYIOC_s_FMT "dma_alloc_coherent for parameters failed\n", ioc->name); return -EINVAL; } memset(&hdr, 0, sizeof(hdr)); hdr.PageVersion = ioc->spi_data.sdp0version; hdr.PageLength = ioc->spi_data.sdp0length; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; memset(&cfg, 0, sizeof(cfg)); cfg.cfghdr.hdr = &hdr; cfg.physAddr = spi_dev_pg0_dma; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; cfg.dir = 0; cfg.pageAddr = starget->id; cfg.timeout = 60; if (mpt_config(ioc, &cfg)) { starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name); goto out_free; } err = 0; memcpy(pass_pg0, spi_dev_pg0, size); mptspi_print_read_nego(hd, starget, le32_to_cpu(spi_dev_pg0->NegotiatedParameters)); out_free: dma_free_coherent(&ioc->pcidev->dev, size, spi_dev_pg0, spi_dev_pg0_dma); return err; } static u32 mptspi_getRP(struct scsi_target *starget) { u32 nego = 0; nego |= spi_iu(starget) ? MPI_SCSIDEVPAGE1_RP_IU : 0; nego |= spi_dt(starget) ? MPI_SCSIDEVPAGE1_RP_DT : 0; nego |= spi_qas(starget) ? MPI_SCSIDEVPAGE1_RP_QAS : 0; nego |= spi_hold_mcs(starget) ? MPI_SCSIDEVPAGE1_RP_HOLD_MCS : 0; nego |= spi_wr_flow(starget) ? MPI_SCSIDEVPAGE1_RP_WR_FLOW : 0; nego |= spi_rd_strm(starget) ? MPI_SCSIDEVPAGE1_RP_RD_STRM : 0; nego |= spi_rti(starget) ? MPI_SCSIDEVPAGE1_RP_RTI : 0; nego |= spi_pcomp_en(starget) ? MPI_SCSIDEVPAGE1_RP_PCOMP_EN : 0; nego |= (spi_period(starget) << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD) & MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; nego |= (spi_offset(starget) << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET) & MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; nego |= spi_width(starget) ? MPI_SCSIDEVPAGE1_RP_WIDE : 0; return nego; } static void mptspi_read_parameters(struct scsi_target *starget) { int nego; struct _CONFIG_PAGE_SCSI_DEVICE_0 spi_dev_pg0; mptspi_read_spi_device_pg0(starget, &spi_dev_pg0); nego = le32_to_cpu(spi_dev_pg0.NegotiatedParameters); spi_iu(starget) = (nego & MPI_SCSIDEVPAGE0_NP_IU) ? 1 : 0; spi_dt(starget) = (nego & MPI_SCSIDEVPAGE0_NP_DT) ? 1 : 0; spi_qas(starget) = (nego & MPI_SCSIDEVPAGE0_NP_QAS) ? 1 : 0; spi_wr_flow(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WR_FLOW) ? 1 : 0; spi_rd_strm(starget) = (nego & MPI_SCSIDEVPAGE0_NP_RD_STRM) ? 1 : 0; spi_rti(starget) = (nego & MPI_SCSIDEVPAGE0_NP_RTI) ? 1 : 0; spi_pcomp_en(starget) = (nego & MPI_SCSIDEVPAGE0_NP_PCOMP_EN) ? 1 : 0; spi_hold_mcs(starget) = (nego & MPI_SCSIDEVPAGE0_NP_HOLD_MCS) ? 1 : 0; spi_period(starget) = (nego & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; spi_offset(starget) = (nego & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0; } int mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) { MPT_ADAPTER *ioc = hd->ioc; MpiRaidActionRequest_t *pReq; MPT_FRAME_HDR *mf; int ret; unsigned long timeleft; mutex_lock(&ioc->internal_cmds.mutex); /* Get and Populate a free Frame */ if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!\n", ioc->name, __func__)); ret = -EAGAIN; goto out; } pReq = (MpiRaidActionRequest_t *)mf; if (quiesce) pReq->Action = MPI_RAID_ACTION_QUIESCE_PHYS_IO; else pReq->Action = MPI_RAID_ACTION_ENABLE_PHYS_IO; pReq->Reserved1 = 0; pReq->ChainOffset = 0; pReq->Function = MPI_FUNCTION_RAID_ACTION; pReq->VolumeID = id; pReq->VolumeBus = channel; pReq->PhysDiskNum = 0; pReq->MsgFlags = 0; pReq->Reserved2 = 0; pReq->ActionDataWord = 0; /* Reserved for this action */ ioc->add_sge((char *)&pReq->ActionDataSGE, MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", ioc->name, pReq->Action, channel, id)); INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status) mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ); if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { ret = -ETIME; dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n", ioc->name, __func__)); if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) goto out; if (!timeleft) { printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", ioc->name, __func__); mpt_HardResetHandler(ioc, CAN_SLEEP); mpt_free_msg_frame(ioc, mf); } goto out; } ret = ioc->internal_cmds.completion_code; out: CLEAR_MGMT_STATUS(ioc->internal_cmds.status) mutex_unlock(&ioc->internal_cmds.mutex); return ret; } static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd, struct scsi_device *sdev) { VirtTarget *vtarget = scsi_target(sdev)->hostdata; MPT_ADAPTER *ioc = hd->ioc; /* no DV on RAID devices */ if (sdev->channel == 0 && mptspi_is_raid(hd, sdev->id)) return; /* If this is a piece of a RAID, then quiesce first */ if (sdev->channel == 1 && mptscsih_quiesce_raid(hd, 1, vtarget->channel, vtarget->id) < 0) { starget_printk(KERN_ERR, scsi_target(sdev), MYIOC_s_FMT "Integrated RAID quiesce failed\n", ioc->name); return; } hd->spi_pending |= (1 << sdev->id); spi_dv_device(sdev); hd->spi_pending &= ~(1 << sdev->id); if (sdev->channel == 1 && mptscsih_quiesce_raid(hd, 0, vtarget->channel, vtarget->id) < 0) starget_printk(KERN_ERR, scsi_target(sdev), MYIOC_s_FMT "Integrated RAID resume failed\n", ioc->name); mptspi_read_parameters(sdev->sdev_target); spi_display_xfer_agreement(sdev->sdev_target); mptspi_read_parameters(sdev->sdev_target); } static int mptspi_slave_alloc(struct scsi_device *sdev) { MPT_SCSI_HOST *hd = shost_priv(sdev->host); VirtTarget *vtarget; VirtDevice *vdevice; struct scsi_target *starget; MPT_ADAPTER *ioc = hd->ioc; if (sdev->channel == 1 && mptscsih_is_phys_disk(ioc, 0, sdev->id) == 0) return -ENXIO; vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL); if (!vdevice) { printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n", ioc->name, sizeof(VirtDevice)); return -ENOMEM; } vdevice->lun = sdev->lun; sdev->hostdata = vdevice; starget = scsi_target(sdev); vtarget = starget->hostdata; vdevice->vtarget = vtarget; vtarget->num_luns++; if (sdev->channel == 1) sdev->no_uld_attach = 1; return 0; } static int mptspi_slave_configure(struct scsi_device *sdev) { struct _MPT_SCSI_HOST *hd = shost_priv(sdev->host); VirtTarget *vtarget = scsi_target(sdev)->hostdata; int ret; mptspi_initTarget(hd, vtarget, sdev); ret = mptscsih_slave_configure(sdev); if (ret) return ret; ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d min_period=0x%02x" " max_offset=0x%02x max_width=%d\n", hd->ioc->name, sdev->id, spi_min_period(scsi_target(sdev)), spi_max_offset(scsi_target(sdev)), spi_max_width(scsi_target(sdev)))); if ((sdev->channel == 1 || !(mptspi_is_raid(hd, sdev->id))) && !spi_initial_dv(sdev->sdev_target)) mptspi_dv_device(hd, sdev); return 0; } static int mptspi_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host); VirtDevice *vdevice = SCpnt->device->hostdata; MPT_ADAPTER *ioc = hd->ioc; if (!vdevice || !vdevice->vtarget) { SCpnt->result = DID_NO_CONNECT << 16; done(SCpnt); return 0; } if (SCpnt->device->channel == 1 && mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) { SCpnt->result = DID_NO_CONNECT << 16; done(SCpnt); return 0; } if (spi_dv_pending(scsi_target(SCpnt->device))) ddvprintk(ioc, scsi_print_command(SCpnt)); return mptscsih_qcmd(SCpnt,done); } static DEF_SCSI_QCMD(mptspi_qcmd) static void mptspi_slave_destroy(struct scsi_device *sdev) { struct scsi_target *starget = scsi_target(sdev); VirtTarget *vtarget = starget->hostdata; VirtDevice *vdevice = sdev->hostdata; /* Will this be the last lun on a non-raid device? */ if (vtarget->num_luns == 1 && vdevice->configured_lun) { struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; /* Async Narrow */ pg1.RequestedParameters = 0; pg1.Reserved = 0; pg1.Configuration = 0; mptspi_write_spi_device_pg1(starget, &pg1); } mptscsih_slave_destroy(sdev); } static struct scsi_host_template mptspi_driver_template = { .module = THIS_MODULE, .proc_name = "mptspi", .proc_info = mptscsih_proc_info, .name = "MPT SPI Host", .info = mptscsih_info, .queuecommand = mptspi_qcmd, .target_alloc = mptspi_target_alloc, .slave_alloc = mptspi_slave_alloc, .slave_configure = mptspi_slave_configure, .target_destroy = mptspi_target_destroy, .slave_destroy = mptspi_slave_destroy, .change_queue_depth = mptscsih_change_queue_depth, .eh_abort_handler = mptscsih_abort, .eh_device_reset_handler = mptscsih_dev_reset, .eh_bus_reset_handler = mptscsih_bus_reset, .eh_host_reset_handler = mptscsih_host_reset, .bios_param = mptscsih_bios_param, .can_queue = MPT_SCSI_CAN_QUEUE, .this_id = -1, .sg_tablesize = MPT_SCSI_SG_DEPTH, .max_sectors = 8192, .cmd_per_lun = 7, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = mptscsih_host_attrs, }; static int mptspi_write_spi_device_pg1(struct scsi_target *starget, struct _CONFIG_PAGE_SCSI_DEVICE_1 *pass_pg1) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct _MPT_SCSI_HOST *hd = shost_priv(shost); struct _MPT_ADAPTER *ioc = hd->ioc; struct _CONFIG_PAGE_SCSI_DEVICE_1 *pg1; dma_addr_t pg1_dma; int size; struct _x_config_parms cfg; struct _CONFIG_PAGE_HEADER hdr; int err = -EBUSY; u32 nego_parms; u32 period; struct scsi_device *sdev; int i; /* don't allow updating nego parameters on RAID devices */ if (starget->channel == 0 && mptspi_is_raid(hd, starget->id)) return -1; size = ioc->spi_data.sdp1length * 4; pg1 = dma_alloc_coherent(&ioc->pcidev->dev, size, &pg1_dma, GFP_KERNEL); if (pg1 == NULL) { starget_printk(KERN_ERR, starget, MYIOC_s_FMT "dma_alloc_coherent for parameters failed\n", ioc->name); return -EINVAL; } memset(&hdr, 0, sizeof(hdr)); hdr.PageVersion = ioc->spi_data.sdp1version; hdr.PageLength = ioc->spi_data.sdp1length; hdr.PageNumber = 1; hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; memset(&cfg, 0, sizeof(cfg)); cfg.cfghdr.hdr = &hdr; cfg.physAddr = pg1_dma; cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; cfg.dir = 1; cfg.pageAddr = starget->id; memcpy(pg1, pass_pg1, size); pg1->Header.PageVersion = hdr.PageVersion; pg1->Header.PageLength = hdr.PageLength; pg1->Header.PageNumber = hdr.PageNumber; pg1->Header.PageType = hdr.PageType; nego_parms = le32_to_cpu(pg1->RequestedParameters); period = (nego_parms & MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK) >> MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; if (period == 8) { /* Turn on inline data padding for TAPE when running U320 */ for (i = 0 ; i < 16; i++) { sdev = scsi_device_lookup_by_target(starget, i); if (sdev && sdev->type == TYPE_TAPE) { sdev_printk(KERN_DEBUG, sdev, MYIOC_s_FMT "IDP:ON\n", ioc->name); nego_parms |= MPI_SCSIDEVPAGE1_RP_IDP; pg1->RequestedParameters = cpu_to_le32(nego_parms); break; } } } mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters)); if (mpt_config(ioc, &cfg)) { starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name); goto out_free; } err = 0; out_free: dma_free_coherent(&ioc->pcidev->dev, size, pg1, pg1_dma); return err; } static void mptspi_write_offset(struct scsi_target *starget, int offset) { struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; u32 nego; if (offset < 0) offset = 0; if (offset > 255) offset = 255; if (spi_offset(starget) == -1) mptspi_read_parameters(starget); spi_offset(starget) = offset; nego = mptspi_getRP(starget); pg1.RequestedParameters = cpu_to_le32(nego); pg1.Reserved = 0; pg1.Configuration = 0; mptspi_write_spi_device_pg1(starget, &pg1); } static void mptspi_write_period(struct scsi_target *starget, int period) { struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; u32 nego; if (period < 8) period = 8; if (period > 255) period = 255; if (spi_period(starget) == -1) mptspi_read_parameters(starget); if (period == 8) { spi_iu(starget) = 1; spi_dt(starget) = 1; } else if (period == 9) { spi_dt(starget) = 1; } spi_period(starget) = period; nego = mptspi_getRP(starget); pg1.RequestedParameters = cpu_to_le32(nego); pg1.Reserved = 0; pg1.Configuration = 0; mptspi_write_spi_device_pg1(starget, &pg1); } static void mptspi_write_dt(struct scsi_target *starget, int dt) { struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; u32 nego; if (spi_period(starget) == -1) mptspi_read_parameters(starget); if (!dt && spi_period(starget) < 10) spi_period(starget) = 10; spi_dt(starget) = dt; nego = mptspi_getRP(starget); pg1.RequestedParameters = cpu_to_le32(nego); pg1.Reserved = 0; pg1.Configuration = 0; mptspi_write_spi_device_pg1(starget, &pg1); } static void mptspi_write_iu(struct scsi_target *starget, int iu) { struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; u32 nego; if (spi_period(starget) == -1) mptspi_read_parameters(starget); if (!iu && spi_period(starget) < 9) spi_period(starget) = 9; spi_iu(starget) = iu; nego = mptspi_getRP(starget); pg1.RequestedParameters = cpu_to_le32(nego); pg1.Reserved = 0; pg1.Configuration = 0; mptspi_write_spi_device_pg1(starget, &pg1); } #define MPTSPI_SIMPLE_TRANSPORT_PARM(parm) \ static void mptspi_write_##parm(struct scsi_target *starget, int parm)\ { \ struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; \ u32 nego; \ \ spi_##parm(starget) = parm; \ \ nego = mptspi_getRP(starget); \ \ pg1.RequestedParameters = cpu_to_le32(nego); \ pg1.Reserved = 0; \ pg1.Configuration = 0; \ \ mptspi_write_spi_device_pg1(starget, &pg1); \ } MPTSPI_SIMPLE_TRANSPORT_PARM(rd_strm) MPTSPI_SIMPLE_TRANSPORT_PARM(wr_flow) MPTSPI_SIMPLE_TRANSPORT_PARM(rti) MPTSPI_SIMPLE_TRANSPORT_PARM(hold_mcs) MPTSPI_SIMPLE_TRANSPORT_PARM(pcomp_en) static void mptspi_write_qas(struct scsi_target *starget, int qas) { struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct _MPT_SCSI_HOST *hd = shost_priv(shost); VirtTarget *vtarget = starget->hostdata; u32 nego; if ((vtarget->negoFlags & MPT_TARGET_NO_NEGO_QAS) || hd->ioc->spi_data.noQas) spi_qas(starget) = 0; else spi_qas(starget) = qas; nego = mptspi_getRP(starget); pg1.RequestedParameters = cpu_to_le32(nego); pg1.Reserved = 0; pg1.Configuration = 0; mptspi_write_spi_device_pg1(starget, &pg1); } static void mptspi_write_width(struct scsi_target *starget, int width) { struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; u32 nego; if (!width) { spi_dt(starget) = 0; if (spi_period(starget) < 10) spi_period(starget) = 10; } spi_width(starget) = width; nego = mptspi_getRP(starget); pg1.RequestedParameters = cpu_to_le32(nego); pg1.Reserved = 0; pg1.Configuration = 0; mptspi_write_spi_device_pg1(starget, &pg1); } struct work_queue_wrapper { struct work_struct work; struct _MPT_SCSI_HOST *hd; int disk; }; static void mpt_work_wrapper(struct work_struct *work) { struct work_queue_wrapper *wqw = container_of(work, struct work_queue_wrapper, work); struct _MPT_SCSI_HOST *hd = wqw->hd; MPT_ADAPTER *ioc = hd->ioc; struct Scsi_Host *shost = ioc->sh; struct scsi_device *sdev; int disk = wqw->disk; struct _CONFIG_PAGE_IOC_3 *pg3; kfree(wqw); mpt_findImVolumes(ioc); pg3 = ioc->raid_data.pIocPg3; if (!pg3) return; shost_for_each_device(sdev,shost) { struct scsi_target *starget = scsi_target(sdev); VirtTarget *vtarget = starget->hostdata; /* only want to search RAID components */ if (sdev->channel != 1) continue; /* The id is the raid PhysDiskNum, even if * starget->id is the actual target address */ if(vtarget->id != disk) continue; starget_printk(KERN_INFO, vtarget->starget, MYIOC_s_FMT "Integrated RAID requests DV of new device\n", ioc->name); mptspi_dv_device(hd, sdev); } shost_printk(KERN_INFO, shost, MYIOC_s_FMT "Integrated RAID detects new device %d\n", ioc->name, disk); scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1); } static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk) { struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC); MPT_ADAPTER *ioc = hd->ioc; if (!wqw) { shost_printk(KERN_ERR, ioc->sh, MYIOC_s_FMT "Failed to act on RAID event for physical disk %d\n", ioc->name, disk); return; } INIT_WORK(&wqw->work, mpt_work_wrapper); wqw->hd = hd; wqw->disk = disk; schedule_work(&wqw->work); } static int mptspi_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); if (ioc->bus_type != SPI) return 0; if (hd && event == MPI_EVENT_INTEGRATED_RAID) { int reason = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16; if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) { int disk = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24; mpt_dv_raid(hd, disk); } } return mptscsih_event_process(ioc, pEvReply); } static int mptspi_deny_binding(struct scsi_target *starget) { struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)dev_to_shost(starget->dev.parent)->hostdata; return ((mptspi_is_raid(hd, starget->id)) && starget->channel == 0) ? 1 : 0; } static struct spi_function_template mptspi_transport_functions = { .get_offset = mptspi_read_parameters, .set_offset = mptspi_write_offset, .show_offset = 1, .get_period = mptspi_read_parameters, .set_period = mptspi_write_period, .show_period = 1, .get_width = mptspi_read_parameters, .set_width = mptspi_write_width, .show_width = 1, .get_iu = mptspi_read_parameters, .set_iu = mptspi_write_iu, .show_iu = 1, .get_dt = mptspi_read_parameters, .set_dt = mptspi_write_dt, .show_dt = 1, .get_qas = mptspi_read_parameters, .set_qas = mptspi_write_qas, .show_qas = 1, .get_wr_flow = mptspi_read_parameters, .set_wr_flow = mptspi_write_wr_flow, .show_wr_flow = 1, .get_rd_strm = mptspi_read_parameters, .set_rd_strm = mptspi_write_rd_strm, .show_rd_strm = 1, .get_rti = mptspi_read_parameters, .set_rti = mptspi_write_rti, .show_rti = 1, .get_pcomp_en = mptspi_read_parameters, .set_pcomp_en = mptspi_write_pcomp_en, .show_pcomp_en = 1, .get_hold_mcs = mptspi_read_parameters, .set_hold_mcs = mptspi_write_hold_mcs, .show_hold_mcs = 1, .deny_binding = mptspi_deny_binding, }; /**************************************************************************** * Supported hardware */ static struct pci_device_id mptspi_pci_table[] = { { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_53C1030, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_ATTO, MPI_MANUFACTPAGE_DEVID_53C1030, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_53C1035, PCI_ANY_ID, PCI_ANY_ID }, {0} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, mptspi_pci_table); /* * renegotiate for a given target */ static void mptspi_dv_renegotiate_work(struct work_struct *work) { struct work_queue_wrapper *wqw = container_of(work, struct work_queue_wrapper, work); struct _MPT_SCSI_HOST *hd = wqw->hd; struct scsi_device *sdev; struct scsi_target *starget; struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; u32 nego; MPT_ADAPTER *ioc = hd->ioc; kfree(wqw); if (hd->spi_pending) { shost_for_each_device(sdev, ioc->sh) { if (hd->spi_pending & (1 << sdev->id)) continue; starget = scsi_target(sdev); nego = mptspi_getRP(starget); pg1.RequestedParameters = cpu_to_le32(nego); pg1.Reserved = 0; pg1.Configuration = 0; mptspi_write_spi_device_pg1(starget, &pg1); } } else { shost_for_each_device(sdev, ioc->sh) mptspi_dv_device(hd, sdev); } } static void mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd) { struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC); if (!wqw) return; INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work); wqw->hd = hd; schedule_work(&wqw->work); } /* * spi module reset handler */ static int mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { int rc; rc = mptscsih_ioc_reset(ioc, reset_phase); if ((ioc->bus_type != SPI) || (!rc)) return rc; /* only try to do a renegotiation if we're properly set up * if we get an ioc fault on bringup, ioc->sh will be NULL */ if (reset_phase == MPT_IOC_POST_RESET && ioc->sh) { struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); mptspi_dv_renegotiate(hd); } return rc; } #ifdef CONFIG_PM /* * spi module resume handler */ static int mptspi_resume(struct pci_dev *pdev) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); int rc; rc = mptscsih_resume(pdev); mptspi_dv_renegotiate(hd); return rc; } #endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptspi_probe - Installs scsi devices per bus. * @pdev: Pointer to pci_dev structure * * Returns 0 for success, non-zero for failure. * */ static int mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *sh; MPT_SCSI_HOST *hd; MPT_ADAPTER *ioc; unsigned long flags; int ii; int numSGE = 0; int scale; int ioc_cap; int error=0; int r; if ((r = mpt_attach(pdev,id)) != 0) return r; ioc = pci_get_drvdata(pdev); ioc->DoneCtx = mptspiDoneCtx; ioc->TaskCtx = mptspiTaskCtx; ioc->InternalCtx = mptspiInternalCtx; /* Added sanity check on readiness of the MPT adapter. */ if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) { printk(MYIOC_s_WARN_FMT "Skipping because it's not operational!\n", ioc->name); error = -ENODEV; goto out_mptspi_probe; } if (!ioc->active) { printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n", ioc->name); error = -ENODEV; goto out_mptspi_probe; } /* Sanity check - ensure at least 1 port is INITIATOR capable */ ioc_cap = 0; for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { if (ioc->pfacts[ii].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) ioc_cap ++; } if (!ioc_cap) { printk(MYIOC_s_WARN_FMT "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", ioc->name, ioc); return 0; } sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST)); if (!sh) { printk(MYIOC_s_WARN_FMT "Unable to register controller with SCSI subsystem\n", ioc->name); error = -1; goto out_mptspi_probe; } spin_lock_irqsave(&ioc->FreeQlock, flags); /* Attach the SCSI Host to the IOC structure */ ioc->sh = sh; sh->io_port = 0; sh->n_io_port = 0; sh->irq = 0; /* set 16 byte cdb's */ sh->max_cmd_len = 16; /* Yikes! This is important! * Otherwise, by default, linux * only scans target IDs 0-7! * pfactsN->MaxDevices unreliable * (not supported in early * versions of the FW). * max_id = 1 + actual max id, * max_lun = 1 + actual last lun, * see hosts.h :o( */ sh->max_id = ioc->devices_per_bus; sh->max_lun = MPT_LAST_LUN + 1; /* * If RAID Firmware Detected, setup virtual channel */ if (ioc->ir_firmware) sh->max_channel = 1; else sh->max_channel = 0; sh->this_id = ioc->pfacts[0].PortSCSIID; /* Required entry. */ sh->unique_id = ioc->id; /* Verify that we won't exceed the maximum * number of chain buffers * We can optimize: ZZ = req_sz/sizeof(SGE) * For 32bit SGE's: * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ * + (req_sz - 64)/sizeof(SGE) * A slightly different algorithm is required for * 64bit SGEs. */ scale = ioc->req_sz/ioc->SGE_size; if (ioc->sg_addr_size == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + (ioc->req_sz - 60) / ioc->SGE_size; } else { numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + (ioc->req_sz - 64) / ioc->SGE_size; } if (numSGE < sh->sg_tablesize) { /* Reset this value */ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Resetting sg_tablesize to %d from %d\n", ioc->name, numSGE, sh->sg_tablesize)); sh->sg_tablesize = numSGE; } spin_unlock_irqrestore(&ioc->FreeQlock, flags); hd = shost_priv(sh); hd->ioc = ioc; /* SCSI needs scsi_cmnd lookup table! * (with size equal to req_depth*PtrSz!) */ ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC); if (!ioc->ScsiLookup) { error = -ENOMEM; goto out_mptspi_probe; } spin_lock_init(&ioc->scsi_lookup_lock); dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", ioc->name, ioc->ScsiLookup)); ioc->spi_data.Saf_Te = mpt_saf_te; ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "saf_te %x\n", ioc->name, mpt_saf_te)); ioc->spi_data.noQas = 0; hd->last_queue_full = 0; hd->spi_pending = 0; /* Some versions of the firmware don't support page 0; without * that we can't get the parameters */ if (ioc->spi_data.sdp0length != 0) sh->transportt = mptspi_transport_template; error = scsi_add_host (sh, &ioc->pcidev->dev); if(error) { dprintk(ioc, printk(MYIOC_s_ERR_FMT "scsi_add_host failed\n", ioc->name)); goto out_mptspi_probe; } /* * issue internal bus reset */ if (ioc->spi_data.bus_reset) mptscsih_IssueTaskMgmt(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 0, 0, 0, 0, 5); scsi_scan_host(sh); return 0; out_mptspi_probe: mptscsih_remove(pdev); return error; } static struct pci_driver mptspi_driver = { .name = "mptspi", .id_table = mptspi_pci_table, .probe = mptspi_probe, .remove = __devexit_p(mptscsih_remove), .shutdown = mptscsih_shutdown, #ifdef CONFIG_PM .suspend = mptscsih_suspend, .resume = mptspi_resume, #endif }; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptspi_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer. * * Returns 0 for success, non-zero for failure. */ static int __init mptspi_init(void) { int error; show_mptmod_ver(my_NAME, my_VERSION); mptspi_transport_template = spi_attach_transport(&mptspi_transport_functions); if (!mptspi_transport_template) return -ENODEV; mptspiDoneCtx = mpt_register(mptscsih_io_done, MPTSPI_DRIVER, "mptscsih_io_done"); mptspiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSPI_DRIVER, "mptscsih_taskmgmt_complete"); mptspiInternalCtx = mpt_register(mptscsih_scandv_complete, MPTSPI_DRIVER, "mptscsih_scandv_complete"); mpt_event_register(mptspiDoneCtx, mptspi_event_process); mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset); error = pci_register_driver(&mptspi_driver); if (error) spi_release_transport(mptspi_transport_template); return error; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptspi_exit - Unregisters MPT adapter(s) */ static void __exit mptspi_exit(void) { pci_unregister_driver(&mptspi_driver); mpt_reset_deregister(mptspiDoneCtx); mpt_event_deregister(mptspiDoneCtx); mpt_deregister(mptspiInternalCtx); mpt_deregister(mptspiTaskCtx); mpt_deregister(mptspiDoneCtx); spi_release_transport(mptspi_transport_template); } module_init(mptspi_init); module_exit(mptspi_exit);
gpl-2.0
cmenard/T889_Kernel
drivers/w1/masters/omap_hdq.c
8017
18222
/* * drivers/w1/masters/omap_hdq.c * * Copyright (C) 2007 Texas Instruments, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/sched.h> #include <asm/irq.h> #include <mach/hardware.h> #include "../w1.h" #include "../w1_int.h" #define MOD_NAME "OMAP_HDQ:" #define OMAP_HDQ_REVISION 0x00 #define OMAP_HDQ_TX_DATA 0x04 #define OMAP_HDQ_RX_DATA 0x08 #define OMAP_HDQ_CTRL_STATUS 0x0c #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6) #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5) #define OMAP_HDQ_CTRL_STATUS_GO (1<<4) #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2) #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1) #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0) #define OMAP_HDQ_INT_STATUS 0x10 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2) #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1) #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0) #define OMAP_HDQ_SYSCONFIG 0x14 #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1) #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0) #define OMAP_HDQ_SYSSTATUS 0x18 #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0) #define OMAP_HDQ_FLAG_CLEAR 0 #define OMAP_HDQ_FLAG_SET 1 #define OMAP_HDQ_TIMEOUT (HZ/5) #define OMAP_HDQ_MAX_USER 4 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue); static int w1_id; struct hdq_data { struct device *dev; void __iomem *hdq_base; /* lock status update */ struct mutex hdq_mutex; int hdq_usecount; struct clk *hdq_ick; struct clk *hdq_fck; u8 hdq_irqstatus; /* device lock */ spinlock_t hdq_spinlock; /* * Used to control the call to omap_hdq_get and omap_hdq_put. * HDQ Protocol: Write the CMD|REG_address first, followed by * the data wrire or read. */ int init_trans; }; static int __devinit omap_hdq_probe(struct platform_device *pdev); static int omap_hdq_remove(struct platform_device *pdev); static struct platform_driver omap_hdq_driver = { .probe = omap_hdq_probe, .remove = omap_hdq_remove, .driver = { .name = "omap_hdq", }, }; static u8 omap_w1_read_byte(void *_hdq); static void omap_w1_write_byte(void *_hdq, u8 byte); static u8 omap_w1_reset_bus(void *_hdq); static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, u8 search_type, w1_slave_found_callback slave_found); static struct w1_bus_master omap_w1_master = { .read_byte = omap_w1_read_byte, .write_byte = omap_w1_write_byte, .reset_bus = omap_w1_reset_bus, .search = omap_w1_search_bus, }; /* HDQ register I/O routines */ static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset) { return __raw_readb(hdq_data->hdq_base + offset); } static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val) { __raw_writeb(val, hdq_data->hdq_base + offset); } static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset, u8 val, u8 mask) { u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask) | (val & mask); __raw_writeb(new_val, hdq_data->hdq_base + offset); return new_val; } /* * Wait for one or more bits in flag change. * HDQ_FLAG_SET: wait until any bit in the flag is set. * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared. * return 0 on success and -ETIMEDOUT in the case of timeout. */ static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset, u8 flag, u8 flag_set, u8 *status) { int ret = 0; unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT; if (flag_set == OMAP_HDQ_FLAG_CLEAR) { /* wait for the flag clear */ while (((*status = hdq_reg_in(hdq_data, offset)) & flag) && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); } if (*status & flag) ret = -ETIMEDOUT; } else if (flag_set == OMAP_HDQ_FLAG_SET) { /* wait for the flag set */ while (!((*status = hdq_reg_in(hdq_data, offset)) & flag) && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); } if (!(*status & flag)) ret = -ETIMEDOUT; } else return -EINVAL; return ret; } /* write out a byte and fill *status with HDQ_INT_STATUS */ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) { int ret; u8 tmp_status; unsigned long irqflags; *status = 0; spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); /* clear interrupt flags via a dummy read */ hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); /* ISR loads it with new INT_STATUS */ hdq_data->hdq_irqstatus = 0; spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val); /* set the GO bit */ hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); /* wait for the TXCOMPLETE bit */ ret = wait_event_timeout(hdq_wait_queue, hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); if (ret == 0) { dev_dbg(hdq_data->dev, "TX wait elapsed\n"); goto out; } *status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { dev_dbg(hdq_data->dev, "timeout waiting for" "TXCOMPLETE/RXCOMPLETE, %x", *status); ret = -ETIMEDOUT; goto out; } /* wait for the GO bit return to zero */ ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR, &tmp_status); if (ret) { dev_dbg(hdq_data->dev, "timeout waiting GO bit" "return to zero, %x", tmp_status); } out: return ret; } /* HDQ Interrupt service routine */ static irqreturn_t hdq_isr(int irq, void *_hdq) { struct hdq_data *hdq_data = _hdq; unsigned long irqflags; spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); if (hdq_data->hdq_irqstatus & (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE | OMAP_HDQ_INT_STATUS_TIMEOUT)) { /* wake up sleeping process */ wake_up(&hdq_wait_queue); } return IRQ_HANDLED; } /* HDQ Mode: always return success */ static u8 omap_w1_reset_bus(void *_hdq) { return 0; } /* W1 search callback function */ static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, u8 search_type, w1_slave_found_callback slave_found) { u64 module_id, rn_le, cs, id; if (w1_id) module_id = w1_id; else module_id = 0x1; rn_le = cpu_to_le64(module_id); /* * HDQ might not obey truly the 1-wire spec. * So calculate CRC based on module parameter. */ cs = w1_calc_crc8((u8 *)&rn_le, 7); id = (cs << 56) | module_id; slave_found(master_dev, id); } static int _omap_hdq_reset(struct hdq_data *hdq_data) { int ret; u8 tmp_status; hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET); /* * Select HDQ mode & enable clocks. * It is observed that INT flags can't be cleared via a read and GO/INIT * won't return to zero if interrupt is disabled. So we always enable * interrupt. */ hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); /* wait for reset to complete */ ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS, OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status); if (ret) dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x", tmp_status); else { hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_AUTOIDLE); } return ret; } /* Issue break pulse to the device */ static int omap_hdq_break(struct hdq_data *hdq_data) { int ret = 0; u8 tmp_status; unsigned long irqflags; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); ret = -EINTR; goto rtn; } spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); /* clear interrupt flags via a dummy read */ hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); /* ISR loads it with new INT_STATUS */ hdq_data->hdq_irqstatus = 0; spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); /* set the INIT and GO bit */ hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO); /* wait for the TIMEOUT bit */ ret = wait_event_timeout(hdq_wait_queue, hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); if (ret == 0) { dev_dbg(hdq_data->dev, "break wait elapsed\n"); ret = -EINTR; goto out; } tmp_status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", tmp_status); ret = -ETIMEDOUT; goto out; } /* * wait for both INIT and GO bits rerurn to zero. * zero wait time expected for interrupt mode. */ ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR, &tmp_status); if (ret) dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" "return to zero, %x", tmp_status); out: mutex_unlock(&hdq_data->hdq_mutex); rtn: return ret; } static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val) { int ret = 0; u8 status; unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { ret = -EINTR; goto rtn; } if (!hdq_data->hdq_usecount) { ret = -EINVAL; goto out; } if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); /* * The RX comes immediately after TX. It * triggers another interrupt before we * sleep. So we have to wait for RXCOMPLETE bit. */ while (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE) && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); } hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0, OMAP_HDQ_CTRL_STATUS_DIR); status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { dev_dbg(hdq_data->dev, "timeout waiting for" "RXCOMPLETE, %x", status); ret = -ETIMEDOUT; goto out; } } /* the data is ready. Read it in! */ *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA); out: mutex_unlock(&hdq_data->hdq_mutex); rtn: return 0; } /* Enable clocks and set the controller to HDQ mode */ static int omap_hdq_get(struct hdq_data *hdq_data) { int ret = 0; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { ret = -EINTR; goto rtn; } if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) { dev_dbg(hdq_data->dev, "attempt to exceed the max use count"); ret = -EINVAL; goto out; } else { hdq_data->hdq_usecount++; try_module_get(THIS_MODULE); if (1 == hdq_data->hdq_usecount) { if (clk_enable(hdq_data->hdq_ick)) { dev_dbg(hdq_data->dev, "Can not enable ick\n"); ret = -ENODEV; goto clk_err; } if (clk_enable(hdq_data->hdq_fck)) { dev_dbg(hdq_data->dev, "Can not enable fck\n"); clk_disable(hdq_data->hdq_ick); ret = -ENODEV; goto clk_err; } /* make sure HDQ is out of reset */ if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) & OMAP_HDQ_SYSSTATUS_RESETDONE)) { ret = _omap_hdq_reset(hdq_data); if (ret) /* back up the count */ hdq_data->hdq_usecount--; } else { /* select HDQ mode & enable clocks */ hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_AUTOIDLE); hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); } } } clk_err: clk_put(hdq_data->hdq_ick); clk_put(hdq_data->hdq_fck); out: mutex_unlock(&hdq_data->hdq_mutex); rtn: return ret; } /* Disable clocks to the module */ static int omap_hdq_put(struct hdq_data *hdq_data) { int ret = 0; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) return -EINTR; if (0 == hdq_data->hdq_usecount) { dev_dbg(hdq_data->dev, "attempt to decrement use count" "when it is zero"); ret = -EINVAL; } else { hdq_data->hdq_usecount--; module_put(THIS_MODULE); if (0 == hdq_data->hdq_usecount) { clk_disable(hdq_data->hdq_ick); clk_disable(hdq_data->hdq_fck); } } mutex_unlock(&hdq_data->hdq_mutex); return ret; } /* Read a byte of data from the device */ static u8 omap_w1_read_byte(void *_hdq) { struct hdq_data *hdq_data = _hdq; u8 val = 0; int ret; ret = hdq_read_byte(hdq_data, &val); if (ret) { ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return -EINTR; } hdq_data->init_trans = 0; mutex_unlock(&hdq_data->hdq_mutex); omap_hdq_put(hdq_data); return -1; } /* Write followed by a read, release the module */ if (hdq_data->init_trans) { ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return -EINTR; } hdq_data->init_trans = 0; mutex_unlock(&hdq_data->hdq_mutex); omap_hdq_put(hdq_data); } return val; } /* Write a byte of data to the device */ static void omap_w1_write_byte(void *_hdq, u8 byte) { struct hdq_data *hdq_data = _hdq; int ret; u8 status; /* First write to initialize the transfer */ if (hdq_data->init_trans == 0) omap_hdq_get(hdq_data); ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return; } hdq_data->init_trans++; mutex_unlock(&hdq_data->hdq_mutex); ret = hdq_write_byte(hdq_data, byte, &status); if (ret == 0) { dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status); return; } /* Second write, data transferred. Release the module */ if (hdq_data->init_trans > 1) { omap_hdq_put(hdq_data); ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return; } hdq_data->init_trans = 0; mutex_unlock(&hdq_data->hdq_mutex); } return; } static int __devinit omap_hdq_probe(struct platform_device *pdev) { struct hdq_data *hdq_data; struct resource *res; int ret, irq; u8 rev; hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL); if (!hdq_data) { dev_dbg(&pdev->dev, "unable to allocate memory\n"); ret = -ENOMEM; goto err_kmalloc; } hdq_data->dev = &pdev->dev; platform_set_drvdata(pdev, hdq_data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_dbg(&pdev->dev, "unable to get resource\n"); ret = -ENXIO; goto err_resource; } hdq_data->hdq_base = ioremap(res->start, SZ_4K); if (!hdq_data->hdq_base) { dev_dbg(&pdev->dev, "ioremap failed\n"); ret = -EINVAL; goto err_ioremap; } /* get interface & functional clock objects */ hdq_data->hdq_ick = clk_get(&pdev->dev, "ick"); if (IS_ERR(hdq_data->hdq_ick)) { dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n"); ret = PTR_ERR(hdq_data->hdq_ick); goto err_ick; } hdq_data->hdq_fck = clk_get(&pdev->dev, "fck"); if (IS_ERR(hdq_data->hdq_fck)) { dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n"); ret = PTR_ERR(hdq_data->hdq_fck); goto err_fck; } hdq_data->hdq_usecount = 0; mutex_init(&hdq_data->hdq_mutex); if (clk_enable(hdq_data->hdq_ick)) { dev_dbg(&pdev->dev, "Can not enable ick\n"); ret = -ENODEV; goto err_intfclk; } if (clk_enable(hdq_data->hdq_fck)) { dev_dbg(&pdev->dev, "Can not enable fck\n"); ret = -ENODEV; goto err_fnclk; } rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION); dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n", (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt"); spin_lock_init(&hdq_data->hdq_spinlock); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENXIO; goto err_irq; } ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data); if (ret < 0) { dev_dbg(&pdev->dev, "could not request irq\n"); goto err_irq; } omap_hdq_break(hdq_data); /* don't clock the HDQ until it is needed */ clk_disable(hdq_data->hdq_ick); clk_disable(hdq_data->hdq_fck); omap_w1_master.data = hdq_data; ret = w1_add_master_device(&omap_w1_master); if (ret) { dev_dbg(&pdev->dev, "Failure in registering w1 master\n"); goto err_w1; } return 0; err_w1: err_irq: clk_disable(hdq_data->hdq_fck); err_fnclk: clk_disable(hdq_data->hdq_ick); err_intfclk: clk_put(hdq_data->hdq_fck); err_fck: clk_put(hdq_data->hdq_ick); err_ick: iounmap(hdq_data->hdq_base); err_ioremap: err_resource: platform_set_drvdata(pdev, NULL); kfree(hdq_data); err_kmalloc: return ret; } static int omap_hdq_remove(struct platform_device *pdev) { struct hdq_data *hdq_data = platform_get_drvdata(pdev); mutex_lock(&hdq_data->hdq_mutex); if (hdq_data->hdq_usecount) { dev_dbg(&pdev->dev, "removed when use count is not zero\n"); mutex_unlock(&hdq_data->hdq_mutex); return -EBUSY; } mutex_unlock(&hdq_data->hdq_mutex); /* remove module dependency */ clk_put(hdq_data->hdq_ick); clk_put(hdq_data->hdq_fck); free_irq(INT_24XX_HDQ_IRQ, hdq_data); platform_set_drvdata(pdev, NULL); iounmap(hdq_data->hdq_base); kfree(hdq_data); return 0; } static int __init omap_hdq_init(void) { return platform_driver_register(&omap_hdq_driver); } module_init(omap_hdq_init); static void __exit omap_hdq_exit(void) { platform_driver_unregister(&omap_hdq_driver); } module_exit(omap_hdq_exit); module_param(w1_id, int, S_IRUSR); MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection"); MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("HDQ driver Library"); MODULE_LICENSE("GPL");
gpl-2.0
alcobar/asuswrt-merlin
release/src-rt/linux/linux-2.6/arch/arm/mach-s3c2440/s3c2440.c
82
1280
/* linux/arch/arm/mach-s3c2440/s3c2440.c * * Copyright (c) 2004-2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * Samsung S3C2440 Mobile CPU support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/sysdev.h> #include <linux/clk.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/hardware.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/plat-s3c24xx/s3c2440.h> #include <asm/plat-s3c24xx/devs.h> #include <asm/plat-s3c24xx/cpu.h> static struct sys_device s3c2440_sysdev = { .cls = &s3c2440_sysclass, }; int __init s3c2440_init(void) { printk("S3C2440: Initialising architecture\n"); /* change irq for watchdog */ s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; s3c_device_wdt.resource[1].end = IRQ_S3C2440_WDT; /* register our system device for everything else */ return sysdev_register(&s3c2440_sysdev); }
gpl-2.0
jongwonk/linux_samsung
drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
82
34290
/* * linux/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c * * Copyright (C) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * Kamil Debski, <k.debski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/version.h> #include <linux/videodev2.h> #include <linux/workqueue.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/videobuf2-core.h> #include "s5p_mfc_common.h" #include "s5p_mfc_ctrl.h" #include "s5p_mfc_debug.h" #include "s5p_mfc_dec.h" #include "s5p_mfc_intr.h" #include "s5p_mfc_opr.h" #include "s5p_mfc_pm.h" static struct s5p_mfc_fmt formats[] = { { .name = "4:2:0 2 Planes 16x16 Tiles", .fourcc = V4L2_PIX_FMT_NV12MT_16X16, .codec_mode = S5P_MFC_CODEC_NONE, .type = MFC_FMT_RAW, .num_planes = 2, .versions = MFC_V6_BIT | MFC_V7_BIT, }, { .name = "4:2:0 2 Planes 64x32 Tiles", .fourcc = V4L2_PIX_FMT_NV12MT, .codec_mode = S5P_MFC_CODEC_NONE, .type = MFC_FMT_RAW, .num_planes = 2, .versions = MFC_V5_BIT, }, { .name = "4:2:0 2 Planes Y/CbCr", .fourcc = V4L2_PIX_FMT_NV12M, .codec_mode = S5P_MFC_CODEC_NONE, .type = MFC_FMT_RAW, .num_planes = 2, .versions = MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "4:2:0 2 Planes Y/CrCb", .fourcc = V4L2_PIX_FMT_NV21M, .codec_mode = S5P_MFC_CODEC_NONE, .type = MFC_FMT_RAW, .num_planes = 2, .versions = MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "H264 Encoded Stream", .fourcc = V4L2_PIX_FMT_H264, .codec_mode = S5P_MFC_CODEC_H264_DEC, .type = MFC_FMT_DEC, .num_planes = 1, .versions = MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "H264/MVC Encoded Stream", .fourcc = V4L2_PIX_FMT_H264_MVC, .codec_mode = S5P_MFC_CODEC_H264_MVC_DEC, .type = MFC_FMT_DEC, .num_planes = 1, .versions = MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "H263 Encoded Stream", .fourcc = V4L2_PIX_FMT_H263, .codec_mode = S5P_MFC_CODEC_H263_DEC, .type = MFC_FMT_DEC, .num_planes = 1, .versions = MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "MPEG1 Encoded Stream", .fourcc = V4L2_PIX_FMT_MPEG1, .codec_mode = S5P_MFC_CODEC_MPEG2_DEC, .type = MFC_FMT_DEC, .num_planes = 1, .versions = MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "MPEG2 Encoded Stream", .fourcc = V4L2_PIX_FMT_MPEG2, .codec_mode = S5P_MFC_CODEC_MPEG2_DEC, .type = MFC_FMT_DEC, .num_planes = 1, .versions = MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "MPEG4 Encoded Stream", .fourcc = V4L2_PIX_FMT_MPEG4, .codec_mode = S5P_MFC_CODEC_MPEG4_DEC, .type = MFC_FMT_DEC, .num_planes = 1, .versions = MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "XviD Encoded Stream", .fourcc = V4L2_PIX_FMT_XVID, .codec_mode = S5P_MFC_CODEC_MPEG4_DEC, .type = MFC_FMT_DEC, .num_planes = 1, .versions = MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "VC1 Encoded Stream", .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G, .codec_mode = S5P_MFC_CODEC_VC1_DEC, .type = MFC_FMT_DEC, .num_planes = 1, .versions = MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "VC1 RCV Encoded Stream", .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L, .codec_mode = S5P_MFC_CODEC_VC1RCV_DEC, .type = MFC_FMT_DEC, .num_planes = 1, .versions = MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, { .name = "VP8 Encoded Stream", .fourcc = V4L2_PIX_FMT_VP8, .codec_mode = S5P_MFC_CODEC_VP8_DEC, .type = MFC_FMT_DEC, .num_planes = 1, .versions = MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT, }, }; #define NUM_FORMATS ARRAY_SIZE(formats) /* Find selected format description */ static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t) { unsigned int i; for (i = 0; i < NUM_FORMATS; i++) { if (formats[i].fourcc == f->fmt.pix_mp.pixelformat && formats[i].type == t) return &formats[i]; } return NULL; } static struct mfc_control controls[] = { { .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY, .type = V4L2_CTRL_TYPE_INTEGER, .name = "H264 Display Delay", .minimum = 0, .maximum = 16383, .step = 1, .default_value = 0, }, { .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "H264 Display Delay Enable", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mpeg4 Loop Filter Enable", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Slice Interface Enable", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Minimum number of cap bufs", .minimum = 1, .maximum = 32, .step = 1, .default_value = 1, .is_volatile = 1, }, }; #define NUM_CTRLS ARRAY_SIZE(controls) /* Check whether a context should be run on hardware */ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) { /* Context is to parse header */ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_GOT_INST) return 1; /* Context is to decode a frame */ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_RUNNING && ctx->dst_queue_cnt >= ctx->pb_count) return 1; /* Context is to return last frame */ if (ctx->state == MFCINST_FINISHING && ctx->dst_queue_cnt >= ctx->pb_count) return 1; /* Context is to set buffers */ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_HEAD_PARSED && ctx->capture_state == QUEUE_BUFS_MMAPED) return 1; /* Resolution change */ if ((ctx->state == MFCINST_RES_CHANGE_INIT || ctx->state == MFCINST_RES_CHANGE_FLUSH) && ctx->dst_queue_cnt >= ctx->pb_count) return 1; if (ctx->state == MFCINST_RES_CHANGE_END && ctx->src_queue_cnt >= 1) return 1; mfc_debug(2, "ctx is not ready\n"); return 0; } static struct s5p_mfc_codec_ops decoder_codec_ops = { .pre_seq_start = NULL, .post_seq_start = NULL, .pre_frame_start = NULL, .post_frame_start = NULL, }; /* Query capabilities of the device */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct s5p_mfc_dev *dev = video_drvdata(file); strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1); strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); cap->bus_info[0] = 0; /* * This is only a mem-to-mem video device. The capture and output * device capability flags are left only for backward compatibility * and are scheduled for removal. */ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } /* Enumerate format */ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f, bool out) { struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_fmt *fmt; int i, j = 0; for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (out && formats[i].type != MFC_FMT_DEC) continue; else if (!out && formats[i].type != MFC_FMT_RAW) continue; else if ((dev->variant->version_bit & formats[i].versions) == 0) continue; if (j == f->index) break; ++j; } if (i == ARRAY_SIZE(formats)) return -EINVAL; fmt = &formats[i]; strlcpy(f->description, fmt->name, sizeof(f->description)); f->pixelformat = fmt->fourcc; return 0; } static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv, struct v4l2_fmtdesc *f) { return vidioc_enum_fmt(file, f, false); } static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return vidioc_enum_fmt(file, f, true); } /* Get format */ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); struct v4l2_pix_format_mplane *pix_mp; mfc_debug_enter(); pix_mp = &f->fmt.pix_mp; if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && (ctx->state == MFCINST_GOT_INST || ctx->state == MFCINST_RES_CHANGE_END)) { /* If the MFC is parsing the header, * so wait until it is finished */ s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0); } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { /* This is run on CAPTURE (decode output) */ /* Width and height are set to the dimensions of the movie, the buffer is bigger and further processing stages should crop to this rectangle. */ pix_mp->width = ctx->buf_width; pix_mp->height = ctx->buf_height; pix_mp->field = V4L2_FIELD_NONE; pix_mp->num_planes = 2; /* Set pixelformat to the format in which MFC outputs the decoded frame */ pix_mp->pixelformat = ctx->dst_fmt->fourcc; pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; pix_mp->plane_fmt[1].sizeimage = ctx->chroma_size; } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { /* This is run on OUTPUT The buffer contains compressed image so width and height have no meaning */ pix_mp->width = 0; pix_mp->height = 0; pix_mp->field = V4L2_FIELD_NONE; pix_mp->plane_fmt[0].bytesperline = ctx->dec_src_buf_size; pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size; pix_mp->pixelformat = ctx->src_fmt->fourcc; pix_mp->num_planes = ctx->src_fmt->num_planes; } else { mfc_err("Format could not be read\n"); mfc_debug(2, "%s-- with error\n", __func__); return -EINVAL; } mfc_debug_leave(); return 0; } /* Try format */ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_fmt *fmt; mfc_debug(2, "Type is %d\n", f->type); if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { fmt = find_format(f, MFC_FMT_DEC); if (!fmt) { mfc_err("Unsupported format for source.\n"); return -EINVAL; } if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) { mfc_err("Unknown codec\n"); return -EINVAL; } if ((dev->variant->version_bit & fmt->versions) == 0) { mfc_err("Unsupported format by this MFC version.\n"); return -EINVAL; } } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { fmt = find_format(f, MFC_FMT_RAW); if (!fmt) { mfc_err("Unsupported format for destination.\n"); return -EINVAL; } if ((dev->variant->version_bit & fmt->versions) == 0) { mfc_err("Unsupported format by this MFC version.\n"); return -EINVAL; } } return 0; } /* Set format */ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); int ret = 0; struct v4l2_pix_format_mplane *pix_mp; struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size; mfc_debug_enter(); ret = vidioc_try_fmt(file, priv, f); pix_mp = &f->fmt.pix_mp; if (ret) return ret; if (ctx->vq_src.streaming || ctx->vq_dst.streaming) { v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__); ret = -EBUSY; goto out; } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { /* dst_fmt is validated by call to vidioc_try_fmt */ ctx->dst_fmt = find_format(f, MFC_FMT_RAW); ret = 0; goto out; } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { /* src_fmt is validated by call to vidioc_try_fmt */ ctx->src_fmt = find_format(f, MFC_FMT_DEC); ctx->codec_mode = ctx->src_fmt->codec_mode; mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); pix_mp->height = 0; pix_mp->width = 0; if (pix_mp->plane_fmt[0].sizeimage == 0) pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = DEF_CPB_SIZE; else if (pix_mp->plane_fmt[0].sizeimage > buf_size->cpb) ctx->dec_src_buf_size = buf_size->cpb; else ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; pix_mp->plane_fmt[0].bytesperline = 0; ctx->state = MFCINST_INIT; ret = 0; goto out; } else { mfc_err("Wrong type error for S_FMT : %d", f->type); ret = -EINVAL; goto out; } out: mfc_debug_leave(); return ret; } static int reqbufs_output(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx, struct v4l2_requestbuffers *reqbufs) { int ret = 0; s5p_mfc_clock_on(); if (reqbufs->count == 0) { mfc_debug(2, "Freeing buffers\n"); ret = vb2_reqbufs(&ctx->vq_src, reqbufs); if (ret) goto out; s5p_mfc_close_mfc_inst(dev, ctx); ctx->src_bufs_cnt = 0; ctx->output_state = QUEUE_FREE; } else if (ctx->output_state == QUEUE_FREE) { /* Can only request buffers when we have a valid format set. */ WARN_ON(ctx->src_bufs_cnt != 0); if (ctx->state != MFCINST_INIT) { mfc_err("Reqbufs called in an invalid state\n"); ret = -EINVAL; goto out; } mfc_debug(2, "Allocating %d buffers for OUTPUT queue\n", reqbufs->count); ret = vb2_reqbufs(&ctx->vq_src, reqbufs); if (ret) goto out; ret = s5p_mfc_open_mfc_inst(dev, ctx); if (ret) { reqbufs->count = 0; vb2_reqbufs(&ctx->vq_src, reqbufs); goto out; } ctx->output_state = QUEUE_BUFS_REQUESTED; } else { mfc_err("Buffers have already been requested\n"); ret = -EINVAL; } out: s5p_mfc_clock_off(); if (ret) mfc_err("Failed allocating buffers for OUTPUT queue\n"); return ret; } static int reqbufs_capture(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx, struct v4l2_requestbuffers *reqbufs) { int ret = 0; s5p_mfc_clock_on(); if (reqbufs->count == 0) { mfc_debug(2, "Freeing buffers\n"); ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); if (ret) goto out; s5p_mfc_hw_call_void(dev->mfc_ops, release_codec_buffers, ctx); ctx->dst_bufs_cnt = 0; } else if (ctx->capture_state == QUEUE_FREE) { WARN_ON(ctx->dst_bufs_cnt != 0); mfc_debug(2, "Allocating %d buffers for CAPTURE queue\n", reqbufs->count); ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); if (ret) goto out; ctx->capture_state = QUEUE_BUFS_REQUESTED; ctx->total_dpb_count = reqbufs->count; ret = s5p_mfc_hw_call(dev->mfc_ops, alloc_codec_buffers, ctx); if (ret) { mfc_err("Failed to allocate decoding buffers\n"); reqbufs->count = 0; vb2_reqbufs(&ctx->vq_dst, reqbufs); ret = -ENOMEM; ctx->capture_state = QUEUE_FREE; goto out; } WARN_ON(ctx->dst_bufs_cnt != ctx->total_dpb_count); ctx->capture_state = QUEUE_BUFS_MMAPED; if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_INIT_BUFFERS_RET, 0); } else { mfc_err("Buffers have already been requested\n"); ret = -EINVAL; } out: s5p_mfc_clock_off(); if (ret) mfc_err("Failed allocating buffers for CAPTURE queue\n"); return ret; } /* Reqeust buffers */ static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); if (reqbufs->memory != V4L2_MEMORY_MMAP) { mfc_err("Only V4L2_MEMORY_MAP is supported\n"); return -EINVAL; } if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { return reqbufs_output(dev, ctx, reqbufs); } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { return reqbufs_capture(dev, ctx, reqbufs); } else { mfc_err("Invalid type requested\n"); return -EINVAL; } } /* Query buffer */ static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); int ret; int i; if (buf->memory != V4L2_MEMORY_MMAP) { mfc_err("Only mmaped buffers can be used\n"); return -EINVAL; } mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type); if (ctx->state == MFCINST_GOT_INST && buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { ret = vb2_querybuf(&ctx->vq_src, buf); } else if (ctx->state == MFCINST_RUNNING && buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { ret = vb2_querybuf(&ctx->vq_dst, buf); for (i = 0; i < buf->length; i++) buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE; } else { mfc_err("vidioc_querybuf called in an inappropriate state\n"); ret = -EINVAL; } mfc_debug_leave(); return ret; } /* Queue a buffer */ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); if (ctx->state == MFCINST_ERROR) { mfc_err("Call on QBUF after unrecoverable error\n"); return -EIO; } if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return vb2_qbuf(&ctx->vq_src, buf); else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return vb2_qbuf(&ctx->vq_dst, buf); return -EINVAL; } /* Dequeue a buffer */ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { const struct v4l2_event ev = { .type = V4L2_EVENT_EOS }; struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); int ret; if (ctx->state == MFCINST_ERROR) { mfc_err("Call on DQBUF after unrecoverable error\n"); return -EIO; } if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ret = vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK); else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { ret = vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK); if (ret == 0 && ctx->state == MFCINST_FINISHED && list_empty(&ctx->vq_dst.done_list)) v4l2_event_queue_fh(&ctx->fh, &ev); } else { ret = -EINVAL; } return ret; } /* Export DMA buffer */ static int vidioc_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *eb) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return vb2_expbuf(&ctx->vq_src, eb); if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return vb2_expbuf(&ctx->vq_dst, eb); return -EINVAL; } /* Stream on */ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); int ret = -EINVAL; mfc_debug_enter(); if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ret = vb2_streamon(&ctx->vq_src, type); else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) ret = vb2_streamon(&ctx->vq_dst, type); mfc_debug_leave(); return ret; } /* Stream off, which equals to a pause */ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return vb2_streamoff(&ctx->vq_src, type); else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return vb2_streamoff(&ctx->vq_dst, type); return -EINVAL; } /* Set controls - v4l2 control framework */ static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl) { struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl); switch (ctrl->id) { case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY: ctx->display_delay = ctrl->val; break; case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE: ctx->display_delay_enable = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: ctx->loop_filter_mpeg4 = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: ctx->slice_interface = ctrl->val; break; default: mfc_err("Invalid control 0x%08x\n", ctrl->id); return -EINVAL; } return 0; } static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl) { struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl); struct s5p_mfc_dev *dev = ctx->dev; switch (ctrl->id) { case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: if (ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { ctrl->val = ctx->pb_count; break; } else if (ctx->state != MFCINST_INIT && ctx->state != MFCINST_RES_CHANGE_END) { v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); return -EINVAL; } /* Should wait for the header to be parsed */ s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0); if (ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { ctrl->val = ctx->pb_count; } else { v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); return -EINVAL; } break; } return 0; } static const struct v4l2_ctrl_ops s5p_mfc_dec_ctrl_ops = { .s_ctrl = s5p_mfc_dec_s_ctrl, .g_volatile_ctrl = s5p_mfc_dec_g_v_ctrl, }; /* Get cropping information */ static int vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *cr) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); struct s5p_mfc_dev *dev = ctx->dev; u32 left, right, top, bottom; if (ctx->state != MFCINST_HEAD_PARSED && ctx->state != MFCINST_RUNNING && ctx->state != MFCINST_FINISHING && ctx->state != MFCINST_FINISHED) { mfc_err("Cannont set crop\n"); return -EINVAL; } if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) { left = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_h, ctx); right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT; left = left & S5P_FIMV_SHARED_CROP_LEFT_MASK; top = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_v, ctx); bottom = top >> S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT; top = top & S5P_FIMV_SHARED_CROP_TOP_MASK; cr->c.left = left; cr->c.top = top; cr->c.width = ctx->img_width - left - right; cr->c.height = ctx->img_height - top - bottom; mfc_debug(2, "Cropping info [h264]: l=%d t=%d " "w=%d h=%d (r=%d b=%d fw=%d fh=%d\n", left, top, cr->c.width, cr->c.height, right, bottom, ctx->buf_width, ctx->buf_height); } else { cr->c.left = 0; cr->c.top = 0; cr->c.width = ctx->img_width; cr->c.height = ctx->img_height; mfc_debug(2, "Cropping info: w=%d h=%d fw=%d " "fh=%d\n", cr->c.width, cr->c.height, ctx->buf_width, ctx->buf_height); } return 0; } static int vidioc_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *cmd) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *buf; unsigned long flags; switch (cmd->cmd) { case V4L2_ENC_CMD_STOP: if (cmd->flags != 0) return -EINVAL; if (!ctx->vq_src.streaming) return -EINVAL; spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue)) { mfc_err("EOS: empty src queue, entering finishing state"); ctx->state = MFCINST_FINISHING; if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); spin_unlock_irqrestore(&dev->irqlock, flags); s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); } else { mfc_err("EOS: marking last buffer of stream"); buf = list_entry(ctx->src_queue.prev, struct s5p_mfc_buf, list); if (buf->flags & MFC_BUF_FLAG_USED) ctx->state = MFCINST_FINISHING; else buf->flags |= MFC_BUF_FLAG_EOS; spin_unlock_irqrestore(&dev->irqlock, flags); } break; default: return -EINVAL; } return 0; } static int vidioc_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_EOS: return v4l2_event_subscribe(fh, sub, 2, NULL); case V4L2_EVENT_SOURCE_CHANGE: return v4l2_src_change_event_subscribe(fh, sub); default: return -EINVAL; } } /* v4l2_ioctl_ops */ static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane, .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane, .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt, .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt, .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt, .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt, .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt, .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_expbuf = vidioc_expbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_crop = vidioc_g_crop, .vidioc_decoder_cmd = vidioc_decoder_cmd, .vidioc_subscribe_event = vidioc_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static int s5p_mfc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *buf_count, unsigned int *plane_count, unsigned int psize[], void *allocators[]) { struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; /* Video output for decoding (source) * this can be set after getting an instance */ if (ctx->state == MFCINST_INIT && vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { /* A single plane is required for input */ *plane_count = 1; if (*buf_count < 1) *buf_count = 1; if (*buf_count > MFC_MAX_BUFFERS) *buf_count = MFC_MAX_BUFFERS; /* Video capture for decoding (destination) * this can be set after the header was parsed */ } else if (ctx->state == MFCINST_HEAD_PARSED && vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { /* Output plane count is 2 - one for Y and one for CbCr */ *plane_count = 2; /* Setup buffer count */ if (*buf_count < ctx->pb_count) *buf_count = ctx->pb_count; if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB) *buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB; if (*buf_count > MFC_MAX_BUFFERS) *buf_count = MFC_MAX_BUFFERS; } else { mfc_err("State seems invalid. State = %d, vq->type = %d\n", ctx->state, vq->type); return -EINVAL; } mfc_debug(2, "Buffer count=%d, plane count=%d\n", *buf_count, *plane_count); if (ctx->state == MFCINST_HEAD_PARSED && vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { psize[0] = ctx->luma_size; psize[1] = ctx->chroma_size; if (IS_MFCV6_PLUS(dev)) allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; else allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX]; allocators[1] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && ctx->state == MFCINST_INIT) { psize[0] = ctx->dec_src_buf_size; allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; } else { mfc_err("This video node is dedicated to decoding. Decoding not initialized\n"); return -EINVAL; } return 0; } static void s5p_mfc_unlock(struct vb2_queue *q) { struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; mutex_unlock(&dev->mfc_mutex); } static void s5p_mfc_lock(struct vb2_queue *q) { struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; mutex_lock(&dev->mfc_mutex); } static int s5p_mfc_buf_init(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); unsigned int i; if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { if (ctx->capture_state == QUEUE_BUFS_MMAPED) return 0; for (i = 0; i < ctx->dst_fmt->num_planes; i++) { if (IS_ERR_OR_NULL(ERR_PTR( vb2_dma_contig_plane_dma_addr(vb, i)))) { mfc_err("Plane mem not allocated\n"); return -EINVAL; } } if (vb2_plane_size(vb, 0) < ctx->luma_size || vb2_plane_size(vb, 1) < ctx->chroma_size) { mfc_err("Plane buffer (CAPTURE) is too small\n"); return -EINVAL; } i = vb->v4l2_buf.index; ctx->dst_bufs[i].b = vb; ctx->dst_bufs[i].cookie.raw.luma = vb2_dma_contig_plane_dma_addr(vb, 0); ctx->dst_bufs[i].cookie.raw.chroma = vb2_dma_contig_plane_dma_addr(vb, 1); ctx->dst_bufs_cnt++; } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { if (IS_ERR_OR_NULL(ERR_PTR( vb2_dma_contig_plane_dma_addr(vb, 0)))) { mfc_err("Plane memory not allocated\n"); return -EINVAL; } if (vb2_plane_size(vb, 0) < ctx->dec_src_buf_size) { mfc_err("Plane buffer (OUTPUT) is too small\n"); return -EINVAL; } i = vb->v4l2_buf.index; ctx->src_bufs[i].b = vb; ctx->src_bufs[i].cookie.stream = vb2_dma_contig_plane_dma_addr(vb, 0); ctx->src_bufs_cnt++; } else { mfc_err("s5p_mfc_buf_init: unknown queue type\n"); return -EINVAL; } return 0; } static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count) { struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; v4l2_ctrl_handler_setup(&ctx->ctrl_handler); if (ctx->state == MFCINST_FINISHING || ctx->state == MFCINST_FINISHED) ctx->state = MFCINST_RUNNING; /* If context is ready then dev = work->data;schedule it to run */ if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); return 0; } static void s5p_mfc_stop_streaming(struct vb2_queue *q) { unsigned long flags; struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; int aborted = 0; if ((ctx->state == MFCINST_FINISHING || ctx->state == MFCINST_RUNNING) && dev->curr_ctx == ctx->num && dev->hw_lock) { ctx->state = MFCINST_ABORT; s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_FRAME_DONE_RET, 0); aborted = 1; } if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { spin_lock_irqsave(&dev->irqlock, flags); s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, &ctx->dst_queue, &ctx->vq_dst); INIT_LIST_HEAD(&ctx->dst_queue); ctx->dst_queue_cnt = 0; ctx->dpb_flush_flag = 1; ctx->dec_dst_flag = 0; spin_unlock_irqrestore(&dev->irqlock, flags); if (IS_MFCV6_PLUS(dev) && (ctx->state == MFCINST_RUNNING)) { ctx->state = MFCINST_FLUSH; set_work_bit_irqsave(ctx); s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); if (s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0)) mfc_err("Err flushing buffers\n"); } } if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { spin_lock_irqsave(&dev->irqlock, flags); s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, &ctx->src_queue, &ctx->vq_src); INIT_LIST_HEAD(&ctx->src_queue); ctx->src_queue_cnt = 0; spin_unlock_irqrestore(&dev->irqlock, flags); } if (aborted) ctx->state = MFCINST_RUNNING; } static void s5p_mfc_buf_queue(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *mfc_buf; if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index]; mfc_buf->flags &= ~MFC_BUF_FLAG_USED; spin_lock_irqsave(&dev->irqlock, flags); list_add_tail(&mfc_buf->list, &ctx->src_queue); ctx->src_queue_cnt++; spin_unlock_irqrestore(&dev->irqlock, flags); } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index]; mfc_buf->flags &= ~MFC_BUF_FLAG_USED; /* Mark destination as available for use by MFC */ spin_lock_irqsave(&dev->irqlock, flags); set_bit(vb->v4l2_buf.index, &ctx->dec_dst_flag); list_add_tail(&mfc_buf->list, &ctx->dst_queue); ctx->dst_queue_cnt++; spin_unlock_irqrestore(&dev->irqlock, flags); } else { mfc_err("Unsupported buffer type (%d)\n", vq->type); } if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); } static struct vb2_ops s5p_mfc_dec_qops = { .queue_setup = s5p_mfc_queue_setup, .wait_prepare = s5p_mfc_unlock, .wait_finish = s5p_mfc_lock, .buf_init = s5p_mfc_buf_init, .start_streaming = s5p_mfc_start_streaming, .stop_streaming = s5p_mfc_stop_streaming, .buf_queue = s5p_mfc_buf_queue, }; struct s5p_mfc_codec_ops *get_dec_codec_ops(void) { return &decoder_codec_ops; } struct vb2_ops *get_dec_queue_ops(void) { return &s5p_mfc_dec_qops; } const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void) { return &s5p_mfc_dec_ioctl_ops; } #define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \ && V4L2_CTRL_DRIVER_PRIV(x)) int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx) { struct v4l2_ctrl_config cfg; int i; v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS); if (ctx->ctrl_handler.error) { mfc_err("v4l2_ctrl_handler_init failed\n"); return ctx->ctrl_handler.error; } for (i = 0; i < NUM_CTRLS; i++) { if (IS_MFC51_PRIV(controls[i].id)) { memset(&cfg, 0, sizeof(struct v4l2_ctrl_config)); cfg.ops = &s5p_mfc_dec_ctrl_ops; cfg.id = controls[i].id; cfg.min = controls[i].minimum; cfg.max = controls[i].maximum; cfg.def = controls[i].default_value; cfg.name = controls[i].name; cfg.type = controls[i].type; cfg.step = controls[i].step; cfg.menu_skip_mask = 0; ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler, &cfg, NULL); } else { ctx->ctrls[i] = v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_mfc_dec_ctrl_ops, controls[i].id, controls[i].minimum, controls[i].maximum, controls[i].step, controls[i].default_value); } if (ctx->ctrl_handler.error) { mfc_err("Adding control (%d) failed\n", i); return ctx->ctrl_handler.error; } if (controls[i].is_volatile && ctx->ctrls[i]) ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE; } return 0; } void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx) { int i; v4l2_ctrl_handler_free(&ctx->ctrl_handler); for (i = 0; i < NUM_CTRLS; i++) ctx->ctrls[i] = NULL; } void s5p_mfc_dec_init(struct s5p_mfc_ctx *ctx) { struct v4l2_format f; f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264; ctx->src_fmt = find_format(&f, MFC_FMT_DEC); if (IS_MFCV8(ctx->dev)) f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12M; else if (IS_MFCV6_PLUS(ctx->dev)) f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12MT_16X16; else f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12MT; ctx->dst_fmt = find_format(&f, MFC_FMT_RAW); mfc_debug(2, "Default src_fmt is %p, dest_fmt is %p\n", ctx->src_fmt, ctx->dst_fmt); }
gpl-2.0
nemomobile/kernel-adaptation-n950-n9
arch/arm/mach-at91/cpuidle.c
338
1975
/* * based on arch/arm/mach-kirkwood/cpuidle.c * * CPU idle support for AT91 SoC * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * The cpu idle uses wait-for-interrupt and RAM self refresh in order * to implement two idle states - * #1 wait-for-interrupt * #2 wait-for-interrupt and RAM self refresh */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/cpuidle.h> #include <linux/io.h> #include <linux/export.h> #include <asm/proc-fns.h> #include <asm/cpuidle.h> #include <mach/cpu.h> #include "pm.h" #define AT91_MAX_STATES 2 static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device); /* Actual code that puts the SoC in different idle states */ static int at91_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { if (cpu_is_at91rm9200()) at91rm9200_standby(); else if (cpu_is_at91sam9g45()) at91sam9g45_standby(); else at91sam9_standby(); return index; } static struct cpuidle_driver at91_idle_driver = { .name = "at91_idle", .owner = THIS_MODULE, .en_core_tk_irqen = 1, .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = at91_enter_idle, .exit_latency = 10, .target_residency = 100000, .flags = CPUIDLE_FLAG_TIME_VALID, .name = "RAM_SR", .desc = "WFI and DDR Self Refresh", }, .state_count = AT91_MAX_STATES, }; /* Initialize CPU idle by registering the idle states */ static int at91_init_cpuidle(void) { struct cpuidle_device *device; device = &per_cpu(at91_cpuidle_device, smp_processor_id()); device->state_count = AT91_MAX_STATES; cpuidle_register_driver(&at91_idle_driver); if (cpuidle_register_device(device)) { printk(KERN_ERR "at91_init_cpuidle: Failed registering\n"); return -EIO; } return 0; } device_initcall(at91_init_cpuidle);
gpl-2.0
gablg1/ubuntu-vivid-docker-cr
sound/pci/cs46xx/dsp_spos.c
594
56945
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * 2002-07 Benny Sjostrand benny@hostmobility.com */ #include <asm/io.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/asoundef.h> #include "cs46xx.h" #include "cs46xx_lib.h" #include "dsp_spos.h" static int cs46xx_dsp_async_init (struct snd_cs46xx *chip, struct dsp_scb_descriptor * fg_entry); static enum wide_opcode wide_opcodes[] = { WIDE_FOR_BEGIN_LOOP, WIDE_FOR_BEGIN_LOOP2, WIDE_COND_GOTO_ADDR, WIDE_COND_GOTO_CALL, WIDE_TBEQ_COND_GOTO_ADDR, WIDE_TBEQ_COND_CALL_ADDR, WIDE_TBEQ_NCOND_GOTO_ADDR, WIDE_TBEQ_NCOND_CALL_ADDR, WIDE_TBEQ_COND_GOTO1_ADDR, WIDE_TBEQ_COND_CALL1_ADDR, WIDE_TBEQ_NCOND_GOTOI_ADDR, WIDE_TBEQ_NCOND_CALL1_ADDR }; static int shadow_and_reallocate_code (struct snd_cs46xx * chip, u32 * data, u32 size, u32 overlay_begin_address) { unsigned int i = 0, j, nreallocated = 0; u32 hival,loval,address; u32 mop_operands,mop_type,wide_op; struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (snd_BUG_ON(size %2)) return -EINVAL; while (i < size) { loval = data[i++]; hival = data[i++]; if (ins->code.offset > 0) { mop_operands = (hival >> 6) & 0x03fff; mop_type = mop_operands >> 10; /* check for wide type instruction */ if (mop_type == 0 && (mop_operands & WIDE_LADD_INSTR_MASK) == 0 && (mop_operands & WIDE_INSTR_MASK) != 0) { wide_op = loval & 0x7f; for (j = 0;j < ARRAY_SIZE(wide_opcodes); ++j) { if (wide_opcodes[j] == wide_op) { /* need to reallocate instruction */ address = (hival & 0x00FFF) << 5; address |= loval >> 15; dev_dbg(chip->card->dev, "handle_wideop[1]: %05x:%05x addr %04x\n", hival, loval, address); if ( !(address & 0x8000) ) { address += (ins->code.offset / 2) - overlay_begin_address; } else { dev_dbg(chip->card->dev, "handle_wideop[1]: ROM symbol not reallocated\n"); } hival &= 0xFF000; loval &= 0x07FFF; hival |= ( (address >> 5) & 0x00FFF); loval |= ( (address << 15) & 0xF8000); address = (hival & 0x00FFF) << 5; address |= loval >> 15; dev_dbg(chip->card->dev, "handle_wideop:[2] %05x:%05x addr %04x\n", hival, loval, address); nreallocated++; } /* wide_opcodes[j] == wide_op */ } /* for */ } /* mod_type == 0 ... */ } /* ins->code.offset > 0 */ ins->code.data[ins->code.size++] = loval; ins->code.data[ins->code.size++] = hival; } dev_dbg(chip->card->dev, "dsp_spos: %d instructions reallocated\n", nreallocated); return nreallocated; } static struct dsp_segment_desc * get_segment_desc (struct dsp_module_desc * module, int seg_type) { int i; for (i = 0;i < module->nsegments; ++i) { if (module->segments[i].segment_type == seg_type) { return (module->segments + i); } } return NULL; }; static int find_free_symbol_index (struct dsp_spos_instance * ins) { int index = ins->symbol_table.nsymbols,i; for (i = ins->symbol_table.highest_frag_index; i < ins->symbol_table.nsymbols; ++i) { if (ins->symbol_table.symbols[i].deleted) { index = i; break; } } return index; } static int add_symbols (struct snd_cs46xx * chip, struct dsp_module_desc * module) { int i; struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (module->symbol_table.nsymbols > 0) { if (!strcmp(module->symbol_table.symbols[0].symbol_name, "OVERLAYBEGINADDRESS") && module->symbol_table.symbols[0].symbol_type == SYMBOL_CONSTANT ) { module->overlay_begin_address = module->symbol_table.symbols[0].address; } } for (i = 0;i < module->symbol_table.nsymbols; ++i) { if (ins->symbol_table.nsymbols == (DSP_MAX_SYMBOLS - 1)) { dev_err(chip->card->dev, "dsp_spos: symbol table is full\n"); return -ENOMEM; } if (cs46xx_dsp_lookup_symbol(chip, module->symbol_table.symbols[i].symbol_name, module->symbol_table.symbols[i].symbol_type) == NULL) { ins->symbol_table.symbols[ins->symbol_table.nsymbols] = module->symbol_table.symbols[i]; ins->symbol_table.symbols[ins->symbol_table.nsymbols].address += ((ins->code.offset / 2) - module->overlay_begin_address); ins->symbol_table.symbols[ins->symbol_table.nsymbols].module = module; ins->symbol_table.symbols[ins->symbol_table.nsymbols].deleted = 0; if (ins->symbol_table.nsymbols > ins->symbol_table.highest_frag_index) ins->symbol_table.highest_frag_index = ins->symbol_table.nsymbols; ins->symbol_table.nsymbols++; } else { #if 0 dev_dbg(chip->card->dev, "dsp_spos: symbol <%s> duplicated, probably nothing wrong with that (Cirrus?)\n", module->symbol_table.symbols[i].symbol_name); */ #endif } } return 0; } static struct dsp_symbol_entry * add_symbol (struct snd_cs46xx * chip, char * symbol_name, u32 address, int type) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_symbol_entry * symbol = NULL; int index; if (ins->symbol_table.nsymbols == (DSP_MAX_SYMBOLS - 1)) { dev_err(chip->card->dev, "dsp_spos: symbol table is full\n"); return NULL; } if (cs46xx_dsp_lookup_symbol(chip, symbol_name, type) != NULL) { dev_err(chip->card->dev, "dsp_spos: symbol <%s> duplicated\n", symbol_name); return NULL; } index = find_free_symbol_index (ins); strcpy (ins->symbol_table.symbols[index].symbol_name, symbol_name); ins->symbol_table.symbols[index].address = address; ins->symbol_table.symbols[index].symbol_type = type; ins->symbol_table.symbols[index].module = NULL; ins->symbol_table.symbols[index].deleted = 0; symbol = (ins->symbol_table.symbols + index); if (index > ins->symbol_table.highest_frag_index) ins->symbol_table.highest_frag_index = index; if (index == ins->symbol_table.nsymbols) ins->symbol_table.nsymbols++; /* no frag. in list */ return symbol; } struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip) { struct dsp_spos_instance * ins = kzalloc(sizeof(struct dsp_spos_instance), GFP_KERNEL); if (ins == NULL) return NULL; /* better to use vmalloc for this big table */ ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) * DSP_MAX_SYMBOLS); ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL); ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL); if (!ins->symbol_table.symbols || !ins->code.data || !ins->modules) { cs46xx_dsp_spos_destroy(chip); goto error; } ins->symbol_table.nsymbols = 0; ins->symbol_table.highest_frag_index = 0; ins->code.offset = 0; ins->code.size = 0; ins->nscb = 0; ins->ntask = 0; ins->nmodules = 0; /* default SPDIF input sample rate to 48000 khz */ ins->spdif_in_sample_rate = 48000; /* maximize volume */ ins->dac_volume_right = 0x8000; ins->dac_volume_left = 0x8000; ins->spdif_input_volume_right = 0x8000; ins->spdif_input_volume_left = 0x8000; /* set left and right validity bits and default channel status */ ins->spdif_csuv_default = ins->spdif_csuv_stream = /* byte 0 */ ((unsigned int)_wrap_all_bits( (SNDRV_PCM_DEFAULT_CON_SPDIF & 0xff)) << 24) | /* byte 1 */ ((unsigned int)_wrap_all_bits( ((SNDRV_PCM_DEFAULT_CON_SPDIF >> 8) & 0xff)) << 16) | /* byte 3 */ (unsigned int)_wrap_all_bits( (SNDRV_PCM_DEFAULT_CON_SPDIF >> 24) & 0xff) | /* left and right validity bits */ (1 << 13) | (1 << 12); return ins; error: kfree(ins->modules); kfree(ins->code.data); vfree(ins->symbol_table.symbols); kfree(ins); return NULL; } void cs46xx_dsp_spos_destroy (struct snd_cs46xx * chip) { int i; struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (snd_BUG_ON(!ins)) return; mutex_lock(&chip->spos_mutex); for (i = 0; i < ins->nscb; ++i) { if (ins->scbs[i].deleted) continue; cs46xx_dsp_proc_free_scb_desc ( (ins->scbs + i) ); #ifdef CONFIG_PM_SLEEP kfree(ins->scbs[i].data); #endif } kfree(ins->code.data); vfree(ins->symbol_table.symbols); kfree(ins->modules); kfree(ins); mutex_unlock(&chip->spos_mutex); } static int dsp_load_parameter(struct snd_cs46xx *chip, struct dsp_segment_desc *parameter) { u32 doffset, dsize; if (!parameter) { dev_dbg(chip->card->dev, "dsp_spos: module got no parameter segment\n"); return 0; } doffset = (parameter->offset * 4 + DSP_PARAMETER_BYTE_OFFSET); dsize = parameter->size * 4; dev_dbg(chip->card->dev, "dsp_spos: downloading parameter data to chip (%08x-%08x)\n", doffset,doffset + dsize); if (snd_cs46xx_download (chip, parameter->data, doffset, dsize)) { dev_err(chip->card->dev, "dsp_spos: failed to download parameter data to DSP\n"); return -EINVAL; } return 0; } static int dsp_load_sample(struct snd_cs46xx *chip, struct dsp_segment_desc *sample) { u32 doffset, dsize; if (!sample) { dev_dbg(chip->card->dev, "dsp_spos: module got no sample segment\n"); return 0; } doffset = (sample->offset * 4 + DSP_SAMPLE_BYTE_OFFSET); dsize = sample->size * 4; dev_dbg(chip->card->dev, "dsp_spos: downloading sample data to chip (%08x-%08x)\n", doffset,doffset + dsize); if (snd_cs46xx_download (chip,sample->data,doffset,dsize)) { dev_err(chip->card->dev, "dsp_spos: failed to sample data to DSP\n"); return -EINVAL; } return 0; } int cs46xx_dsp_load_module (struct snd_cs46xx * chip, struct dsp_module_desc * module) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_segment_desc * code = get_segment_desc (module,SEGTYPE_SP_PROGRAM); u32 doffset, dsize; int err; if (ins->nmodules == DSP_MAX_MODULES - 1) { dev_err(chip->card->dev, "dsp_spos: to many modules loaded into DSP\n"); return -ENOMEM; } dev_dbg(chip->card->dev, "dsp_spos: loading module %s into DSP\n", module->module_name); if (ins->nmodules == 0) { dev_dbg(chip->card->dev, "dsp_spos: clearing parameter area\n"); snd_cs46xx_clear_BA1(chip, DSP_PARAMETER_BYTE_OFFSET, DSP_PARAMETER_BYTE_SIZE); } err = dsp_load_parameter(chip, get_segment_desc(module, SEGTYPE_SP_PARAMETER)); if (err < 0) return err; if (ins->nmodules == 0) { dev_dbg(chip->card->dev, "dsp_spos: clearing sample area\n"); snd_cs46xx_clear_BA1(chip, DSP_SAMPLE_BYTE_OFFSET, DSP_SAMPLE_BYTE_SIZE); } err = dsp_load_sample(chip, get_segment_desc(module, SEGTYPE_SP_SAMPLE)); if (err < 0) return err; if (ins->nmodules == 0) { dev_dbg(chip->card->dev, "dsp_spos: clearing code area\n"); snd_cs46xx_clear_BA1(chip, DSP_CODE_BYTE_OFFSET, DSP_CODE_BYTE_SIZE); } if (code == NULL) { dev_dbg(chip->card->dev, "dsp_spos: module got no code segment\n"); } else { if (ins->code.offset + code->size > DSP_CODE_BYTE_SIZE) { dev_err(chip->card->dev, "dsp_spos: no space available in DSP\n"); return -ENOMEM; } module->load_address = ins->code.offset; module->overlay_begin_address = 0x000; /* if module has a code segment it must have symbol table */ if (snd_BUG_ON(!module->symbol_table.symbols)) return -ENOMEM; if (add_symbols(chip,module)) { dev_err(chip->card->dev, "dsp_spos: failed to load symbol table\n"); return -ENOMEM; } doffset = (code->offset * 4 + ins->code.offset * 4 + DSP_CODE_BYTE_OFFSET); dsize = code->size * 4; dev_dbg(chip->card->dev, "dsp_spos: downloading code to chip (%08x-%08x)\n", doffset,doffset + dsize); module->nfixups = shadow_and_reallocate_code(chip,code->data,code->size,module->overlay_begin_address); if (snd_cs46xx_download (chip,(ins->code.data + ins->code.offset),doffset,dsize)) { dev_err(chip->card->dev, "dsp_spos: failed to download code to DSP\n"); return -EINVAL; } ins->code.offset += code->size; } /* NOTE: module segments and symbol table must be statically allocated. Case that module data is not generated by the ospparser */ ins->modules[ins->nmodules] = *module; ins->nmodules++; return 0; } struct dsp_symbol_entry * cs46xx_dsp_lookup_symbol (struct snd_cs46xx * chip, char * symbol_name, int symbol_type) { int i; struct dsp_spos_instance * ins = chip->dsp_spos_instance; for ( i = 0; i < ins->symbol_table.nsymbols; ++i ) { if (ins->symbol_table.symbols[i].deleted) continue; if (!strcmp(ins->symbol_table.symbols[i].symbol_name,symbol_name) && ins->symbol_table.symbols[i].symbol_type == symbol_type) { return (ins->symbol_table.symbols + i); } } #if 0 dev_err(chip->card->dev, "dsp_spos: symbol <%s> type %02x not found\n", symbol_name,symbol_type); #endif return NULL; } #ifdef CONFIG_PROC_FS static struct dsp_symbol_entry * cs46xx_dsp_lookup_symbol_addr (struct snd_cs46xx * chip, u32 address, int symbol_type) { int i; struct dsp_spos_instance * ins = chip->dsp_spos_instance; for ( i = 0; i < ins->symbol_table.nsymbols; ++i ) { if (ins->symbol_table.symbols[i].deleted) continue; if (ins->symbol_table.symbols[i].address == address && ins->symbol_table.symbols[i].symbol_type == symbol_type) { return (ins->symbol_table.symbols + i); } } return NULL; } static void cs46xx_dsp_proc_symbol_table_read (struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_cs46xx *chip = entry->private_data; struct dsp_spos_instance * ins = chip->dsp_spos_instance; int i; snd_iprintf(buffer, "SYMBOLS:\n"); for ( i = 0; i < ins->symbol_table.nsymbols; ++i ) { char *module_str = "system"; if (ins->symbol_table.symbols[i].deleted) continue; if (ins->symbol_table.symbols[i].module != NULL) { module_str = ins->symbol_table.symbols[i].module->module_name; } snd_iprintf(buffer, "%04X <%02X> %s [%s]\n", ins->symbol_table.symbols[i].address, ins->symbol_table.symbols[i].symbol_type, ins->symbol_table.symbols[i].symbol_name, module_str); } } static void cs46xx_dsp_proc_modules_read (struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_cs46xx *chip = entry->private_data; struct dsp_spos_instance * ins = chip->dsp_spos_instance; int i,j; mutex_lock(&chip->spos_mutex); snd_iprintf(buffer, "MODULES:\n"); for ( i = 0; i < ins->nmodules; ++i ) { snd_iprintf(buffer, "\n%s:\n", ins->modules[i].module_name); snd_iprintf(buffer, " %d symbols\n", ins->modules[i].symbol_table.nsymbols); snd_iprintf(buffer, " %d fixups\n", ins->modules[i].nfixups); for (j = 0; j < ins->modules[i].nsegments; ++ j) { struct dsp_segment_desc * desc = (ins->modules[i].segments + j); snd_iprintf(buffer, " segment %02x offset %08x size %08x\n", desc->segment_type,desc->offset, desc->size); } } mutex_unlock(&chip->spos_mutex); } static void cs46xx_dsp_proc_task_tree_read (struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_cs46xx *chip = entry->private_data; struct dsp_spos_instance * ins = chip->dsp_spos_instance; int i, j, col; void __iomem *dst = chip->region.idx[1].remap_addr + DSP_PARAMETER_BYTE_OFFSET; mutex_lock(&chip->spos_mutex); snd_iprintf(buffer, "TASK TREES:\n"); for ( i = 0; i < ins->ntask; ++i) { snd_iprintf(buffer,"\n%04x %s:\n",ins->tasks[i].address,ins->tasks[i].task_name); for (col = 0,j = 0;j < ins->tasks[i].size; j++,col++) { u32 val; if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } val = readl(dst + (ins->tasks[i].address + j) * sizeof(u32)); snd_iprintf(buffer,"%08x ",val); } } snd_iprintf(buffer,"\n"); mutex_unlock(&chip->spos_mutex); } static void cs46xx_dsp_proc_scb_read (struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_cs46xx *chip = entry->private_data; struct dsp_spos_instance * ins = chip->dsp_spos_instance; int i; mutex_lock(&chip->spos_mutex); snd_iprintf(buffer, "SCB's:\n"); for ( i = 0; i < ins->nscb; ++i) { if (ins->scbs[i].deleted) continue; snd_iprintf(buffer,"\n%04x %s:\n\n",ins->scbs[i].address,ins->scbs[i].scb_name); if (ins->scbs[i].parent_scb_ptr != NULL) { snd_iprintf(buffer,"parent [%s:%04x] ", ins->scbs[i].parent_scb_ptr->scb_name, ins->scbs[i].parent_scb_ptr->address); } else snd_iprintf(buffer,"parent [none] "); snd_iprintf(buffer,"sub_list_ptr [%s:%04x]\nnext_scb_ptr [%s:%04x] task_entry [%s:%04x]\n", ins->scbs[i].sub_list_ptr->scb_name, ins->scbs[i].sub_list_ptr->address, ins->scbs[i].next_scb_ptr->scb_name, ins->scbs[i].next_scb_ptr->address, ins->scbs[i].task_entry->symbol_name, ins->scbs[i].task_entry->address); } snd_iprintf(buffer,"\n"); mutex_unlock(&chip->spos_mutex); } static void cs46xx_dsp_proc_parameter_dump_read (struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_cs46xx *chip = entry->private_data; /*struct dsp_spos_instance * ins = chip->dsp_spos_instance; */ unsigned int i, col = 0; void __iomem *dst = chip->region.idx[1].remap_addr + DSP_PARAMETER_BYTE_OFFSET; struct dsp_symbol_entry * symbol; for (i = 0;i < DSP_PARAMETER_BYTE_SIZE; i += sizeof(u32),col ++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } if ( (symbol = cs46xx_dsp_lookup_symbol_addr (chip,i / sizeof(u32), SYMBOL_PARAMETER)) != NULL) { col = 0; snd_iprintf (buffer,"\n%s:\n",symbol->symbol_name); } if (col == 0) { snd_iprintf(buffer, "%04X ", i / (unsigned int)sizeof(u32)); } snd_iprintf(buffer,"%08X ",readl(dst + i)); } } static void cs46xx_dsp_proc_sample_dump_read (struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_cs46xx *chip = entry->private_data; int i,col = 0; void __iomem *dst = chip->region.idx[2].remap_addr; snd_iprintf(buffer,"PCMREADER:\n"); for (i = PCM_READER_BUF1;i < PCM_READER_BUF1 + 0x30; i += sizeof(u32),col ++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } if (col == 0) { snd_iprintf(buffer, "%04X ",i); } snd_iprintf(buffer,"%08X ",readl(dst + i)); } snd_iprintf(buffer,"\nMIX_SAMPLE_BUF1:\n"); col = 0; for (i = MIX_SAMPLE_BUF1;i < MIX_SAMPLE_BUF1 + 0x40; i += sizeof(u32),col ++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } if (col == 0) { snd_iprintf(buffer, "%04X ",i); } snd_iprintf(buffer,"%08X ",readl(dst + i)); } snd_iprintf(buffer,"\nSRC_TASK_SCB1:\n"); col = 0; for (i = 0x2480 ; i < 0x2480 + 0x40 ; i += sizeof(u32),col ++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } if (col == 0) { snd_iprintf(buffer, "%04X ",i); } snd_iprintf(buffer,"%08X ",readl(dst + i)); } snd_iprintf(buffer,"\nSPDIFO_BUFFER:\n"); col = 0; for (i = SPDIFO_IP_OUTPUT_BUFFER1;i < SPDIFO_IP_OUTPUT_BUFFER1 + 0x30; i += sizeof(u32),col ++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } if (col == 0) { snd_iprintf(buffer, "%04X ",i); } snd_iprintf(buffer,"%08X ",readl(dst + i)); } snd_iprintf(buffer,"\n...\n"); col = 0; for (i = SPDIFO_IP_OUTPUT_BUFFER1+0xD0;i < SPDIFO_IP_OUTPUT_BUFFER1 + 0x110; i += sizeof(u32),col ++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } if (col == 0) { snd_iprintf(buffer, "%04X ",i); } snd_iprintf(buffer,"%08X ",readl(dst + i)); } snd_iprintf(buffer,"\nOUTPUT_SNOOP:\n"); col = 0; for (i = OUTPUT_SNOOP_BUFFER;i < OUTPUT_SNOOP_BUFFER + 0x40; i += sizeof(u32),col ++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } if (col == 0) { snd_iprintf(buffer, "%04X ",i); } snd_iprintf(buffer,"%08X ",readl(dst + i)); } snd_iprintf(buffer,"\nCODEC_INPUT_BUF1: \n"); col = 0; for (i = CODEC_INPUT_BUF1;i < CODEC_INPUT_BUF1 + 0x40; i += sizeof(u32),col ++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } if (col == 0) { snd_iprintf(buffer, "%04X ",i); } snd_iprintf(buffer,"%08X ",readl(dst + i)); } #if 0 snd_iprintf(buffer,"\nWRITE_BACK_BUF1: \n"); col = 0; for (i = WRITE_BACK_BUF1;i < WRITE_BACK_BUF1 + 0x40; i += sizeof(u32),col ++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } if (col == 0) { snd_iprintf(buffer, "%04X ",i); } snd_iprintf(buffer,"%08X ",readl(dst + i)); } #endif snd_iprintf(buffer,"\nSPDIFI_IP_OUTPUT_BUFFER1: \n"); col = 0; for (i = SPDIFI_IP_OUTPUT_BUFFER1;i < SPDIFI_IP_OUTPUT_BUFFER1 + 0x80; i += sizeof(u32),col ++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } if (col == 0) { snd_iprintf(buffer, "%04X ",i); } snd_iprintf(buffer,"%08X ",readl(dst + i)); } snd_iprintf(buffer,"\n"); } int cs46xx_dsp_proc_init (struct snd_card *card, struct snd_cs46xx *chip) { struct snd_info_entry *entry; struct dsp_spos_instance * ins = chip->dsp_spos_instance; int i; ins->snd_card = card; if ((entry = snd_info_create_card_entry(card, "dsp", card->proc_root)) != NULL) { entry->content = SNDRV_INFO_CONTENT_TEXT; entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } ins->proc_dsp_dir = entry; if (!ins->proc_dsp_dir) return -ENOMEM; if ((entry = snd_info_create_card_entry(card, "spos_symbols", ins->proc_dsp_dir)) != NULL) { entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = chip; entry->mode = S_IFREG | S_IRUGO | S_IWUSR; entry->c.text.read = cs46xx_dsp_proc_symbol_table_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } ins->proc_sym_info_entry = entry; if ((entry = snd_info_create_card_entry(card, "spos_modules", ins->proc_dsp_dir)) != NULL) { entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = chip; entry->mode = S_IFREG | S_IRUGO | S_IWUSR; entry->c.text.read = cs46xx_dsp_proc_modules_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } ins->proc_modules_info_entry = entry; if ((entry = snd_info_create_card_entry(card, "parameter", ins->proc_dsp_dir)) != NULL) { entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = chip; entry->mode = S_IFREG | S_IRUGO | S_IWUSR; entry->c.text.read = cs46xx_dsp_proc_parameter_dump_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } ins->proc_parameter_dump_info_entry = entry; if ((entry = snd_info_create_card_entry(card, "sample", ins->proc_dsp_dir)) != NULL) { entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = chip; entry->mode = S_IFREG | S_IRUGO | S_IWUSR; entry->c.text.read = cs46xx_dsp_proc_sample_dump_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } ins->proc_sample_dump_info_entry = entry; if ((entry = snd_info_create_card_entry(card, "task_tree", ins->proc_dsp_dir)) != NULL) { entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = chip; entry->mode = S_IFREG | S_IRUGO | S_IWUSR; entry->c.text.read = cs46xx_dsp_proc_task_tree_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } ins->proc_task_info_entry = entry; if ((entry = snd_info_create_card_entry(card, "scb_info", ins->proc_dsp_dir)) != NULL) { entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = chip; entry->mode = S_IFREG | S_IRUGO | S_IWUSR; entry->c.text.read = cs46xx_dsp_proc_scb_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } ins->proc_scb_info_entry = entry; mutex_lock(&chip->spos_mutex); /* register/update SCB's entries on proc */ for (i = 0; i < ins->nscb; ++i) { if (ins->scbs[i].deleted) continue; cs46xx_dsp_proc_register_scb_desc (chip, (ins->scbs + i)); } mutex_unlock(&chip->spos_mutex); return 0; } int cs46xx_dsp_proc_done (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; int i; snd_info_free_entry(ins->proc_sym_info_entry); ins->proc_sym_info_entry = NULL; snd_info_free_entry(ins->proc_modules_info_entry); ins->proc_modules_info_entry = NULL; snd_info_free_entry(ins->proc_parameter_dump_info_entry); ins->proc_parameter_dump_info_entry = NULL; snd_info_free_entry(ins->proc_sample_dump_info_entry); ins->proc_sample_dump_info_entry = NULL; snd_info_free_entry(ins->proc_scb_info_entry); ins->proc_scb_info_entry = NULL; snd_info_free_entry(ins->proc_task_info_entry); ins->proc_task_info_entry = NULL; mutex_lock(&chip->spos_mutex); for (i = 0; i < ins->nscb; ++i) { if (ins->scbs[i].deleted) continue; cs46xx_dsp_proc_free_scb_desc ( (ins->scbs + i) ); } mutex_unlock(&chip->spos_mutex); snd_info_free_entry(ins->proc_dsp_dir); ins->proc_dsp_dir = NULL; return 0; } #endif /* CONFIG_PROC_FS */ static void _dsp_create_task_tree (struct snd_cs46xx *chip, u32 * task_data, u32 dest, int size) { void __iomem *spdst = chip->region.idx[1].remap_addr + DSP_PARAMETER_BYTE_OFFSET + dest * sizeof(u32); int i; for (i = 0; i < size; ++i) { dev_dbg(chip->card->dev, "addr %p, val %08x\n", spdst, task_data[i]); writel(task_data[i],spdst); spdst += sizeof(u32); } } static void _dsp_create_scb (struct snd_cs46xx *chip, u32 * scb_data, u32 dest) { void __iomem *spdst = chip->region.idx[1].remap_addr + DSP_PARAMETER_BYTE_OFFSET + dest * sizeof(u32); int i; for (i = 0; i < 0x10; ++i) { dev_dbg(chip->card->dev, "addr %p, val %08x\n", spdst, scb_data[i]); writel(scb_data[i],spdst); spdst += sizeof(u32); } } static int find_free_scb_index (struct dsp_spos_instance * ins) { int index = ins->nscb, i; for (i = ins->scb_highest_frag_index; i < ins->nscb; ++i) { if (ins->scbs[i].deleted) { index = i; break; } } return index; } static struct dsp_scb_descriptor * _map_scb (struct snd_cs46xx *chip, char * name, u32 dest) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * desc = NULL; int index; if (ins->nscb == DSP_MAX_SCB_DESC - 1) { dev_err(chip->card->dev, "dsp_spos: got no place for other SCB\n"); return NULL; } index = find_free_scb_index (ins); memset(&ins->scbs[index], 0, sizeof(ins->scbs[index])); strcpy(ins->scbs[index].scb_name, name); ins->scbs[index].address = dest; ins->scbs[index].index = index; ins->scbs[index].ref_count = 1; desc = (ins->scbs + index); ins->scbs[index].scb_symbol = add_symbol (chip, name, dest, SYMBOL_PARAMETER); if (index > ins->scb_highest_frag_index) ins->scb_highest_frag_index = index; if (index == ins->nscb) ins->nscb++; return desc; } static struct dsp_task_descriptor * _map_task_tree (struct snd_cs46xx *chip, char * name, u32 dest, u32 size) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_task_descriptor * desc = NULL; if (ins->ntask == DSP_MAX_TASK_DESC - 1) { dev_err(chip->card->dev, "dsp_spos: got no place for other TASK\n"); return NULL; } if (name) strcpy(ins->tasks[ins->ntask].task_name, name); else strcpy(ins->tasks[ins->ntask].task_name, "(NULL)"); ins->tasks[ins->ntask].address = dest; ins->tasks[ins->ntask].size = size; /* quick find in list */ ins->tasks[ins->ntask].index = ins->ntask; desc = (ins->tasks + ins->ntask); ins->ntask++; if (name) add_symbol (chip,name,dest,SYMBOL_PARAMETER); return desc; } #define SCB_BYTES (0x10 * 4) struct dsp_scb_descriptor * cs46xx_dsp_create_scb (struct snd_cs46xx *chip, char * name, u32 * scb_data, u32 dest) { struct dsp_scb_descriptor * desc; #ifdef CONFIG_PM_SLEEP /* copy the data for resume */ scb_data = kmemdup(scb_data, SCB_BYTES, GFP_KERNEL); if (!scb_data) return NULL; #endif desc = _map_scb (chip,name,dest); if (desc) { desc->data = scb_data; _dsp_create_scb(chip,scb_data,dest); } else { dev_err(chip->card->dev, "dsp_spos: failed to map SCB\n"); #ifdef CONFIG_PM_SLEEP kfree(scb_data); #endif } return desc; } static struct dsp_task_descriptor * cs46xx_dsp_create_task_tree (struct snd_cs46xx *chip, char * name, u32 * task_data, u32 dest, int size) { struct dsp_task_descriptor * desc; desc = _map_task_tree (chip,name,dest,size); if (desc) { desc->data = task_data; _dsp_create_task_tree(chip,task_data,dest,size); } else { dev_err(chip->card->dev, "dsp_spos: failed to map TASK\n"); } return desc; } int cs46xx_dsp_scb_and_task_init (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_symbol_entry * fg_task_tree_header_code; struct dsp_symbol_entry * task_tree_header_code; struct dsp_symbol_entry * task_tree_thread; struct dsp_symbol_entry * null_algorithm; struct dsp_symbol_entry * magic_snoop_task; struct dsp_scb_descriptor * timing_master_scb; struct dsp_scb_descriptor * codec_out_scb; struct dsp_scb_descriptor * codec_in_scb; struct dsp_scb_descriptor * src_task_scb; struct dsp_scb_descriptor * master_mix_scb; struct dsp_scb_descriptor * rear_mix_scb; struct dsp_scb_descriptor * record_mix_scb; struct dsp_scb_descriptor * write_back_scb; struct dsp_scb_descriptor * vari_decimate_scb; struct dsp_scb_descriptor * rear_codec_out_scb; struct dsp_scb_descriptor * clfe_codec_out_scb; struct dsp_scb_descriptor * magic_snoop_scb; int fifo_addr, fifo_span, valid_slots; static struct dsp_spos_control_block sposcb = { /* 0 */ HFG_TREE_SCB,HFG_STACK, /* 1 */ SPOSCB_ADDR,BG_TREE_SCB_ADDR, /* 2 */ DSP_SPOS_DC,0, /* 3 */ DSP_SPOS_DC,DSP_SPOS_DC, /* 4 */ 0,0, /* 5 */ DSP_SPOS_UU,0, /* 6 */ FG_TASK_HEADER_ADDR,0, /* 7 */ 0,0, /* 8 */ DSP_SPOS_UU,DSP_SPOS_DC, /* 9 */ 0, /* A */ 0,HFG_FIRST_EXECUTE_MODE, /* B */ DSP_SPOS_UU,DSP_SPOS_UU, /* C */ DSP_SPOS_DC_DC, /* D */ DSP_SPOS_DC_DC, /* E */ DSP_SPOS_DC_DC, /* F */ DSP_SPOS_DC_DC }; cs46xx_dsp_create_task_tree(chip, "sposCB", (u32 *)&sposcb, SPOSCB_ADDR, 0x10); null_algorithm = cs46xx_dsp_lookup_symbol(chip, "NULLALGORITHM", SYMBOL_CODE); if (null_algorithm == NULL) { dev_err(chip->card->dev, "dsp_spos: symbol NULLALGORITHM not found\n"); return -EIO; } fg_task_tree_header_code = cs46xx_dsp_lookup_symbol(chip, "FGTASKTREEHEADERCODE", SYMBOL_CODE); if (fg_task_tree_header_code == NULL) { dev_err(chip->card->dev, "dsp_spos: symbol FGTASKTREEHEADERCODE not found\n"); return -EIO; } task_tree_header_code = cs46xx_dsp_lookup_symbol(chip, "TASKTREEHEADERCODE", SYMBOL_CODE); if (task_tree_header_code == NULL) { dev_err(chip->card->dev, "dsp_spos: symbol TASKTREEHEADERCODE not found\n"); return -EIO; } task_tree_thread = cs46xx_dsp_lookup_symbol(chip, "TASKTREETHREAD", SYMBOL_CODE); if (task_tree_thread == NULL) { dev_err(chip->card->dev, "dsp_spos: symbol TASKTREETHREAD not found\n"); return -EIO; } magic_snoop_task = cs46xx_dsp_lookup_symbol(chip, "MAGICSNOOPTASK", SYMBOL_CODE); if (magic_snoop_task == NULL) { dev_err(chip->card->dev, "dsp_spos: symbol MAGICSNOOPTASK not found\n"); return -EIO; } { /* create the null SCB */ static struct dsp_generic_scb null_scb = { { 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, NULL_SCB_ADDR, NULL_SCB_ADDR, 0, 0, 0, 0, 0, { 0,0, 0,0, } }; null_scb.entry_point = null_algorithm->address; ins->the_null_scb = cs46xx_dsp_create_scb(chip, "nullSCB", (u32 *)&null_scb, NULL_SCB_ADDR); ins->the_null_scb->task_entry = null_algorithm; ins->the_null_scb->sub_list_ptr = ins->the_null_scb; ins->the_null_scb->next_scb_ptr = ins->the_null_scb; ins->the_null_scb->parent_scb_ptr = NULL; cs46xx_dsp_proc_register_scb_desc (chip,ins->the_null_scb); } { /* setup foreground task tree */ static struct dsp_task_tree_control_block fg_task_tree_hdr = { { FG_TASK_HEADER_ADDR | (DSP_SPOS_DC << 0x10), DSP_SPOS_DC_DC, DSP_SPOS_DC_DC, 0x0000,DSP_SPOS_DC, DSP_SPOS_DC, DSP_SPOS_DC, DSP_SPOS_DC_DC, DSP_SPOS_DC_DC, DSP_SPOS_DC_DC, DSP_SPOS_DC,DSP_SPOS_DC }, { BG_TREE_SCB_ADDR,TIMINGMASTER_SCB_ADDR, 0, FG_TASK_HEADER_ADDR + TCBData, }, { 4,0, 1,0, 2,SPOSCB_ADDR + HFGFlags, 0,0, FG_TASK_HEADER_ADDR + TCBContextBlk,FG_STACK }, { DSP_SPOS_DC,0, DSP_SPOS_DC,DSP_SPOS_DC, DSP_SPOS_DC,DSP_SPOS_DC, DSP_SPOS_DC,DSP_SPOS_DC, DSP_SPOS_DC,DSP_SPOS_DC, DSP_SPOS_DCDC, DSP_SPOS_UU,1, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC }, { FG_INTERVAL_TIMER_PERIOD,DSP_SPOS_UU, 0,0 } }; fg_task_tree_hdr.links.entry_point = fg_task_tree_header_code->address; fg_task_tree_hdr.context_blk.stack0 = task_tree_thread->address; cs46xx_dsp_create_task_tree(chip,"FGtaskTreeHdr",(u32 *)&fg_task_tree_hdr,FG_TASK_HEADER_ADDR,0x35); } { /* setup foreground task tree */ static struct dsp_task_tree_control_block bg_task_tree_hdr = { { DSP_SPOS_DC_DC, DSP_SPOS_DC_DC, DSP_SPOS_DC_DC, DSP_SPOS_DC, DSP_SPOS_DC, DSP_SPOS_DC, DSP_SPOS_DC, DSP_SPOS_DC_DC, DSP_SPOS_DC_DC, DSP_SPOS_DC_DC, DSP_SPOS_DC,DSP_SPOS_DC }, { NULL_SCB_ADDR,NULL_SCB_ADDR, /* Set up the background to do nothing */ 0, BG_TREE_SCB_ADDR + TCBData, }, { 9999,0, 0,1, 0,SPOSCB_ADDR + HFGFlags, 0,0, BG_TREE_SCB_ADDR + TCBContextBlk,BG_STACK }, { DSP_SPOS_DC,0, DSP_SPOS_DC,DSP_SPOS_DC, DSP_SPOS_DC,DSP_SPOS_DC, DSP_SPOS_DC,DSP_SPOS_DC, DSP_SPOS_DC,DSP_SPOS_DC, DSP_SPOS_DCDC, DSP_SPOS_UU,1, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC, DSP_SPOS_DCDC }, { BG_INTERVAL_TIMER_PERIOD,DSP_SPOS_UU, 0,0 } }; bg_task_tree_hdr.links.entry_point = task_tree_header_code->address; bg_task_tree_hdr.context_blk.stack0 = task_tree_thread->address; cs46xx_dsp_create_task_tree(chip,"BGtaskTreeHdr",(u32 *)&bg_task_tree_hdr,BG_TREE_SCB_ADDR,0x35); } /* create timing master SCB */ timing_master_scb = cs46xx_dsp_create_timing_master_scb(chip); /* create the CODEC output task */ codec_out_scb = cs46xx_dsp_create_codec_out_scb(chip,"CodecOutSCB_I",0x0010,0x0000, MASTERMIX_SCB_ADDR, CODECOUT_SCB_ADDR,timing_master_scb, SCB_ON_PARENT_SUBLIST_SCB); if (!codec_out_scb) goto _fail_end; /* create the master mix SCB */ master_mix_scb = cs46xx_dsp_create_mix_only_scb(chip,"MasterMixSCB", MIX_SAMPLE_BUF1,MASTERMIX_SCB_ADDR, codec_out_scb, SCB_ON_PARENT_SUBLIST_SCB); ins->master_mix_scb = master_mix_scb; if (!master_mix_scb) goto _fail_end; /* create codec in */ codec_in_scb = cs46xx_dsp_create_codec_in_scb(chip,"CodecInSCB",0x0010,0x00A0, CODEC_INPUT_BUF1, CODECIN_SCB_ADDR,codec_out_scb, SCB_ON_PARENT_NEXT_SCB); if (!codec_in_scb) goto _fail_end; ins->codec_in_scb = codec_in_scb; /* create write back scb */ write_back_scb = cs46xx_dsp_create_mix_to_ostream_scb(chip,"WriteBackSCB", WRITE_BACK_BUF1,WRITE_BACK_SPB, WRITEBACK_SCB_ADDR, timing_master_scb, SCB_ON_PARENT_NEXT_SCB); if (!write_back_scb) goto _fail_end; { static struct dsp_mix2_ostream_spb mix2_ostream_spb = { 0x00020000, 0x0000ffff }; if (!cs46xx_dsp_create_task_tree(chip, NULL, (u32 *)&mix2_ostream_spb, WRITE_BACK_SPB, 2)) goto _fail_end; } /* input sample converter */ vari_decimate_scb = cs46xx_dsp_create_vari_decimate_scb(chip,"VariDecimateSCB", VARI_DECIMATE_BUF0, VARI_DECIMATE_BUF1, VARIDECIMATE_SCB_ADDR, write_back_scb, SCB_ON_PARENT_SUBLIST_SCB); if (!vari_decimate_scb) goto _fail_end; /* create the record mixer SCB */ record_mix_scb = cs46xx_dsp_create_mix_only_scb(chip,"RecordMixerSCB", MIX_SAMPLE_BUF2, RECORD_MIXER_SCB_ADDR, vari_decimate_scb, SCB_ON_PARENT_SUBLIST_SCB); ins->record_mixer_scb = record_mix_scb; if (!record_mix_scb) goto _fail_end; valid_slots = snd_cs46xx_peekBA0(chip, BA0_ACOSV); if (snd_BUG_ON(chip->nr_ac97_codecs != 1 && chip->nr_ac97_codecs != 2)) goto _fail_end; if (chip->nr_ac97_codecs == 1) { /* output on slot 5 and 11 on primary CODEC */ fifo_addr = 0x20; fifo_span = 0x60; /* enable slot 5 and 11 */ valid_slots |= ACOSV_SLV5 | ACOSV_SLV11; } else { /* output on slot 7 and 8 on secondary CODEC */ fifo_addr = 0x40; fifo_span = 0x10; /* enable slot 7 and 8 */ valid_slots |= ACOSV_SLV7 | ACOSV_SLV8; } /* create CODEC tasklet for rear speakers output*/ rear_codec_out_scb = cs46xx_dsp_create_codec_out_scb(chip,"CodecOutSCB_Rear",fifo_span,fifo_addr, REAR_MIXER_SCB_ADDR, REAR_CODECOUT_SCB_ADDR,codec_in_scb, SCB_ON_PARENT_NEXT_SCB); if (!rear_codec_out_scb) goto _fail_end; /* create the rear PCM channel mixer SCB */ rear_mix_scb = cs46xx_dsp_create_mix_only_scb(chip,"RearMixerSCB", MIX_SAMPLE_BUF3, REAR_MIXER_SCB_ADDR, rear_codec_out_scb, SCB_ON_PARENT_SUBLIST_SCB); ins->rear_mix_scb = rear_mix_scb; if (!rear_mix_scb) goto _fail_end; if (chip->nr_ac97_codecs == 2) { /* create CODEC tasklet for rear Center/LFE output slot 6 and 9 on seconadry CODEC */ clfe_codec_out_scb = cs46xx_dsp_create_codec_out_scb(chip,"CodecOutSCB_CLFE",0x0030,0x0030, CLFE_MIXER_SCB_ADDR, CLFE_CODEC_SCB_ADDR, rear_codec_out_scb, SCB_ON_PARENT_NEXT_SCB); if (!clfe_codec_out_scb) goto _fail_end; /* create the rear PCM channel mixer SCB */ ins->center_lfe_mix_scb = cs46xx_dsp_create_mix_only_scb(chip,"CLFEMixerSCB", MIX_SAMPLE_BUF4, CLFE_MIXER_SCB_ADDR, clfe_codec_out_scb, SCB_ON_PARENT_SUBLIST_SCB); if (!ins->center_lfe_mix_scb) goto _fail_end; /* enable slot 6 and 9 */ valid_slots |= ACOSV_SLV6 | ACOSV_SLV9; } else { clfe_codec_out_scb = rear_codec_out_scb; ins->center_lfe_mix_scb = rear_mix_scb; } /* enable slots depending on CODEC configuration */ snd_cs46xx_pokeBA0(chip, BA0_ACOSV, valid_slots); /* the magic snooper */ magic_snoop_scb = cs46xx_dsp_create_magic_snoop_scb (chip,"MagicSnoopSCB_I",OUTPUTSNOOP_SCB_ADDR, OUTPUT_SNOOP_BUFFER, codec_out_scb, clfe_codec_out_scb, SCB_ON_PARENT_NEXT_SCB); if (!magic_snoop_scb) goto _fail_end; ins->ref_snoop_scb = magic_snoop_scb; /* SP IO access */ if (!cs46xx_dsp_create_spio_write_scb(chip,"SPIOWriteSCB",SPIOWRITE_SCB_ADDR, magic_snoop_scb, SCB_ON_PARENT_NEXT_SCB)) goto _fail_end; /* SPDIF input sampel rate converter */ src_task_scb = cs46xx_dsp_create_src_task_scb(chip,"SrcTaskSCB_SPDIFI", ins->spdif_in_sample_rate, SRC_OUTPUT_BUF1, SRC_DELAY_BUF1,SRCTASK_SCB_ADDR, master_mix_scb, SCB_ON_PARENT_SUBLIST_SCB,1); if (!src_task_scb) goto _fail_end; cs46xx_src_unlink(chip,src_task_scb); /* NOTE: when we now how to detect the SPDIF input sample rate we will use this SRC to adjust it */ ins->spdif_in_src = src_task_scb; cs46xx_dsp_async_init(chip,timing_master_scb); return 0; _fail_end: dev_err(chip->card->dev, "dsp_spos: failed to setup SCB's in DSP\n"); return -EINVAL; } static int cs46xx_dsp_async_init (struct snd_cs46xx *chip, struct dsp_scb_descriptor * fg_entry) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_symbol_entry * s16_async_codec_input_task; struct dsp_symbol_entry * spdifo_task; struct dsp_symbol_entry * spdifi_task; struct dsp_scb_descriptor * spdifi_scb_desc, * spdifo_scb_desc, * async_codec_scb_desc; s16_async_codec_input_task = cs46xx_dsp_lookup_symbol(chip, "S16_ASYNCCODECINPUTTASK", SYMBOL_CODE); if (s16_async_codec_input_task == NULL) { dev_err(chip->card->dev, "dsp_spos: symbol S16_ASYNCCODECINPUTTASK not found\n"); return -EIO; } spdifo_task = cs46xx_dsp_lookup_symbol(chip, "SPDIFOTASK", SYMBOL_CODE); if (spdifo_task == NULL) { dev_err(chip->card->dev, "dsp_spos: symbol SPDIFOTASK not found\n"); return -EIO; } spdifi_task = cs46xx_dsp_lookup_symbol(chip, "SPDIFITASK", SYMBOL_CODE); if (spdifi_task == NULL) { dev_err(chip->card->dev, "dsp_spos: symbol SPDIFITASK not found\n"); return -EIO; } { /* 0xBC0 */ struct dsp_spdifoscb spdifo_scb = { /* 0 */ DSP_SPOS_UUUU, { /* 1 */ 0xb0, /* 2 */ 0, /* 3 */ 0, /* 4 */ 0, }, /* NOTE: the SPDIF output task read samples in mono format, the AsynchFGTxSCB task writes to buffer in stereo format */ /* 5 */ RSCONFIG_SAMPLE_16MONO + RSCONFIG_MODULO_256, /* 6 */ ( SPDIFO_IP_OUTPUT_BUFFER1 << 0x10 ) | 0xFFFC, /* 7 */ 0,0, /* 8 */ 0, /* 9 */ FG_TASK_HEADER_ADDR, NULL_SCB_ADDR, /* A */ spdifo_task->address, SPDIFO_SCB_INST + SPDIFOFIFOPointer, { /* B */ 0x0040, /*DSP_SPOS_UUUU,*/ /* C */ 0x20ff, /*DSP_SPOS_UUUU,*/ }, /* D */ 0x804c,0, /* SPDIFOFIFOPointer:SPDIFOStatRegAddr; */ /* E */ 0x0108,0x0001, /* SPDIFOStMoFormat:SPDIFOFIFOBaseAddr; */ /* F */ DSP_SPOS_UUUU /* SPDIFOFree; */ }; /* 0xBB0 */ struct dsp_spdifiscb spdifi_scb = { /* 0 */ DSP_SPOS_UULO,DSP_SPOS_UUHI, /* 1 */ 0, /* 2 */ 0, /* 3 */ 1,4000, /* SPDIFICountLimit SPDIFICount */ /* 4 */ DSP_SPOS_UUUU, /* SPDIFIStatusData */ /* 5 */ 0,DSP_SPOS_UUHI, /* StatusData, Free4 */ /* 6 */ DSP_SPOS_UUUU, /* Free3 */ /* 7 */ DSP_SPOS_UU,DSP_SPOS_DC, /* Free2 BitCount*/ /* 8 */ DSP_SPOS_UUUU, /* TempStatus */ /* 9 */ SPDIFO_SCB_INST, NULL_SCB_ADDR, /* A */ spdifi_task->address, SPDIFI_SCB_INST + SPDIFIFIFOPointer, /* NOTE: The SPDIF input task write the sample in mono format from the HW FIFO, the AsynchFGRxSCB task reads them in stereo */ /* B */ RSCONFIG_SAMPLE_16MONO + RSCONFIG_MODULO_128, /* C */ (SPDIFI_IP_OUTPUT_BUFFER1 << 0x10) | 0xFFFC, /* D */ 0x8048,0, /* E */ 0x01f0,0x0001, /* F */ DSP_SPOS_UUUU /* SPDIN_STATUS monitor */ }; /* 0xBA0 */ struct dsp_async_codec_input_scb async_codec_input_scb = { /* 0 */ DSP_SPOS_UUUU, /* 1 */ 0, /* 2 */ 0, /* 3 */ 1,4000, /* 4 */ 0x0118,0x0001, /* 5 */ RSCONFIG_SAMPLE_16MONO + RSCONFIG_MODULO_64, /* 6 */ (ASYNC_IP_OUTPUT_BUFFER1 << 0x10) | 0xFFFC, /* 7 */ DSP_SPOS_UU,0x3, /* 8 */ DSP_SPOS_UUUU, /* 9 */ SPDIFI_SCB_INST,NULL_SCB_ADDR, /* A */ s16_async_codec_input_task->address, HFG_TREE_SCB + AsyncCIOFIFOPointer, /* B */ RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_64, /* C */ (ASYNC_IP_OUTPUT_BUFFER1 << 0x10), /*(ASYNC_IP_OUTPUT_BUFFER1 << 0x10) | 0xFFFC,*/ #ifdef UseASER1Input /* short AsyncCIFIFOPointer:AsyncCIStatRegAddr; Init. 0000:8042: for ASER1 0000:8044: for ASER2 */ /* D */ 0x8042,0, /* short AsyncCIStMoFormat:AsyncCIFIFOBaseAddr; Init 1 stero:8050 ASER1 Init 0 mono:8070 ASER2 Init 1 Stereo : 0100 ASER1 (Set by script) */ /* E */ 0x0100,0x0001, #endif #ifdef UseASER2Input /* short AsyncCIFIFOPointer:AsyncCIStatRegAddr; Init. 0000:8042: for ASER1 0000:8044: for ASER2 */ /* D */ 0x8044,0, /* short AsyncCIStMoFormat:AsyncCIFIFOBaseAddr; Init 1 stero:8050 ASER1 Init 0 mono:8070 ASER2 Init 1 Stereo : 0100 ASER1 (Set by script) */ /* E */ 0x0110,0x0001, #endif /* short AsyncCIOutputBufModulo:AsyncCIFree; AsyncCIOutputBufModulo: The modulo size for the output buffer of this task */ /* F */ 0, /* DSP_SPOS_UUUU */ }; spdifo_scb_desc = cs46xx_dsp_create_scb(chip,"SPDIFOSCB",(u32 *)&spdifo_scb,SPDIFO_SCB_INST); if (snd_BUG_ON(!spdifo_scb_desc)) return -EIO; spdifi_scb_desc = cs46xx_dsp_create_scb(chip,"SPDIFISCB",(u32 *)&spdifi_scb,SPDIFI_SCB_INST); if (snd_BUG_ON(!spdifi_scb_desc)) return -EIO; async_codec_scb_desc = cs46xx_dsp_create_scb(chip,"AsynCodecInputSCB",(u32 *)&async_codec_input_scb, HFG_TREE_SCB); if (snd_BUG_ON(!async_codec_scb_desc)) return -EIO; async_codec_scb_desc->parent_scb_ptr = NULL; async_codec_scb_desc->next_scb_ptr = spdifi_scb_desc; async_codec_scb_desc->sub_list_ptr = ins->the_null_scb; async_codec_scb_desc->task_entry = s16_async_codec_input_task; spdifi_scb_desc->parent_scb_ptr = async_codec_scb_desc; spdifi_scb_desc->next_scb_ptr = spdifo_scb_desc; spdifi_scb_desc->sub_list_ptr = ins->the_null_scb; spdifi_scb_desc->task_entry = spdifi_task; spdifo_scb_desc->parent_scb_ptr = spdifi_scb_desc; spdifo_scb_desc->next_scb_ptr = fg_entry; spdifo_scb_desc->sub_list_ptr = ins->the_null_scb; spdifo_scb_desc->task_entry = spdifo_task; /* this one is faked, as the parnet of SPDIFO task is the FG task tree */ fg_entry->parent_scb_ptr = spdifo_scb_desc; /* for proc fs */ cs46xx_dsp_proc_register_scb_desc (chip,spdifo_scb_desc); cs46xx_dsp_proc_register_scb_desc (chip,spdifi_scb_desc); cs46xx_dsp_proc_register_scb_desc (chip,async_codec_scb_desc); /* Async MASTER ENABLE, affects both SPDIF input and output */ snd_cs46xx_pokeBA0(chip, BA0_ASER_MASTER, 0x1 ); } return 0; } static void cs46xx_dsp_disable_spdif_hw (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; /* set SPDIF output FIFO slot */ snd_cs46xx_pokeBA0(chip, BA0_ASER_FADDR, 0); /* SPDIF output MASTER ENABLE */ cs46xx_poke_via_dsp (chip,SP_SPDOUT_CONTROL, 0); /* right and left validate bit */ /*cs46xx_poke_via_dsp (chip,SP_SPDOUT_CSUV, ins->spdif_csuv_default);*/ cs46xx_poke_via_dsp (chip,SP_SPDOUT_CSUV, 0x0); /* clear fifo pointer */ cs46xx_poke_via_dsp (chip,SP_SPDIN_FIFOPTR, 0x0); /* monitor state */ ins->spdif_status_out &= ~DSP_SPDIF_STATUS_HW_ENABLED; } int cs46xx_dsp_enable_spdif_hw (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; /* if hw-ctrl already enabled, turn off to reset logic ... */ cs46xx_dsp_disable_spdif_hw (chip); udelay(50); /* set SPDIF output FIFO slot */ snd_cs46xx_pokeBA0(chip, BA0_ASER_FADDR, ( 0x8000 | ((SP_SPDOUT_FIFO >> 4) << 4) )); /* SPDIF output MASTER ENABLE */ cs46xx_poke_via_dsp (chip,SP_SPDOUT_CONTROL, 0x80000000); /* right and left validate bit */ cs46xx_poke_via_dsp (chip,SP_SPDOUT_CSUV, ins->spdif_csuv_default); /* monitor state */ ins->spdif_status_out |= DSP_SPDIF_STATUS_HW_ENABLED; return 0; } int cs46xx_dsp_enable_spdif_in (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; /* turn on amplifier */ chip->active_ctrl(chip, 1); chip->amplifier_ctrl(chip, 1); if (snd_BUG_ON(ins->asynch_rx_scb)) return -EINVAL; if (snd_BUG_ON(!ins->spdif_in_src)) return -EINVAL; mutex_lock(&chip->spos_mutex); if ( ! (ins->spdif_status_out & DSP_SPDIF_STATUS_INPUT_CTRL_ENABLED) ) { /* time countdown enable */ cs46xx_poke_via_dsp (chip,SP_ASER_COUNTDOWN, 0x80000005); /* NOTE: 80000005 value is just magic. With all values that I've tested this one seem to give the best result. Got no explication why. (Benny) */ /* SPDIF input MASTER ENABLE */ cs46xx_poke_via_dsp (chip,SP_SPDIN_CONTROL, 0x800003ff); ins->spdif_status_out |= DSP_SPDIF_STATUS_INPUT_CTRL_ENABLED; } /* create and start the asynchronous receiver SCB */ ins->asynch_rx_scb = cs46xx_dsp_create_asynch_fg_rx_scb(chip,"AsynchFGRxSCB", ASYNCRX_SCB_ADDR, SPDIFI_SCB_INST, SPDIFI_IP_OUTPUT_BUFFER1, ins->spdif_in_src, SCB_ON_PARENT_SUBLIST_SCB); spin_lock_irq(&chip->reg_lock); /* reset SPDIF input sample buffer pointer */ /*snd_cs46xx_poke (chip, (SPDIFI_SCB_INST + 0x0c) << 2, (SPDIFI_IP_OUTPUT_BUFFER1 << 0x10) | 0xFFFC);*/ /* reset FIFO ptr */ /*cs46xx_poke_via_dsp (chip,SP_SPDIN_FIFOPTR, 0x0);*/ cs46xx_src_link(chip,ins->spdif_in_src); /* unmute SRC volume */ cs46xx_dsp_scb_set_volume (chip,ins->spdif_in_src,0x7fff,0x7fff); spin_unlock_irq(&chip->reg_lock); /* set SPDIF input sample rate and unmute NOTE: only 48khz support for SPDIF input this time */ /* cs46xx_dsp_set_src_sample_rate(chip,ins->spdif_in_src,48000); */ /* monitor state */ ins->spdif_status_in = 1; mutex_unlock(&chip->spos_mutex); return 0; } int cs46xx_dsp_disable_spdif_in (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (snd_BUG_ON(!ins->asynch_rx_scb)) return -EINVAL; if (snd_BUG_ON(!ins->spdif_in_src)) return -EINVAL; mutex_lock(&chip->spos_mutex); /* Remove the asynchronous receiver SCB */ cs46xx_dsp_remove_scb (chip,ins->asynch_rx_scb); ins->asynch_rx_scb = NULL; cs46xx_src_unlink(chip,ins->spdif_in_src); /* monitor state */ ins->spdif_status_in = 0; mutex_unlock(&chip->spos_mutex); /* restore amplifier */ chip->active_ctrl(chip, -1); chip->amplifier_ctrl(chip, -1); return 0; } int cs46xx_dsp_enable_pcm_capture (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (snd_BUG_ON(ins->pcm_input)) return -EINVAL; if (snd_BUG_ON(!ins->ref_snoop_scb)) return -EINVAL; mutex_lock(&chip->spos_mutex); ins->pcm_input = cs46xx_add_record_source(chip,ins->ref_snoop_scb,PCMSERIALIN_PCM_SCB_ADDR, "PCMSerialInput_Wave"); mutex_unlock(&chip->spos_mutex); return 0; } int cs46xx_dsp_disable_pcm_capture (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (snd_BUG_ON(!ins->pcm_input)) return -EINVAL; mutex_lock(&chip->spos_mutex); cs46xx_dsp_remove_scb (chip,ins->pcm_input); ins->pcm_input = NULL; mutex_unlock(&chip->spos_mutex); return 0; } int cs46xx_dsp_enable_adc_capture (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (snd_BUG_ON(ins->adc_input)) return -EINVAL; if (snd_BUG_ON(!ins->codec_in_scb)) return -EINVAL; mutex_lock(&chip->spos_mutex); ins->adc_input = cs46xx_add_record_source(chip,ins->codec_in_scb,PCMSERIALIN_SCB_ADDR, "PCMSerialInput_ADC"); mutex_unlock(&chip->spos_mutex); return 0; } int cs46xx_dsp_disable_adc_capture (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (snd_BUG_ON(!ins->adc_input)) return -EINVAL; mutex_lock(&chip->spos_mutex); cs46xx_dsp_remove_scb (chip,ins->adc_input); ins->adc_input = NULL; mutex_unlock(&chip->spos_mutex); return 0; } int cs46xx_poke_via_dsp (struct snd_cs46xx *chip, u32 address, u32 data) { u32 temp; int i; /* santiy check the parameters. (These numbers are not 100% correct. They are a rough guess from looking at the controller spec.) */ if (address < 0x8000 || address >= 0x9000) return -EINVAL; /* initialize the SP_IO_WRITE SCB with the data. */ temp = ( address << 16 ) | ( address & 0x0000FFFF); /* offset 0 <-- address2 : address1 */ snd_cs46xx_poke(chip,( SPIOWRITE_SCB_ADDR << 2), temp); snd_cs46xx_poke(chip,((SPIOWRITE_SCB_ADDR + 1) << 2), data); /* offset 1 <-- data1 */ snd_cs46xx_poke(chip,((SPIOWRITE_SCB_ADDR + 2) << 2), data); /* offset 1 <-- data2 */ /* Poke this location to tell the task to start */ snd_cs46xx_poke(chip,((SPIOWRITE_SCB_ADDR + 6) << 2), SPIOWRITE_SCB_ADDR << 0x10); /* Verify that the task ran */ for (i=0; i<25; i++) { udelay(125); temp = snd_cs46xx_peek(chip,((SPIOWRITE_SCB_ADDR + 6) << 2)); if (temp == 0x00000000) break; } if (i == 25) { dev_err(chip->card->dev, "dsp_spos: SPIOWriteTask not responding\n"); return -EBUSY; } return 0; } int cs46xx_dsp_set_dac_volume (struct snd_cs46xx * chip, u16 left, u16 right) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * scb; mutex_lock(&chip->spos_mutex); /* main output */ scb = ins->master_mix_scb->sub_list_ptr; while (scb != ins->the_null_scb) { cs46xx_dsp_scb_set_volume (chip,scb,left,right); scb = scb->next_scb_ptr; } /* rear output */ scb = ins->rear_mix_scb->sub_list_ptr; while (scb != ins->the_null_scb) { cs46xx_dsp_scb_set_volume (chip,scb,left,right); scb = scb->next_scb_ptr; } ins->dac_volume_left = left; ins->dac_volume_right = right; mutex_unlock(&chip->spos_mutex); return 0; } int cs46xx_dsp_set_iec958_volume (struct snd_cs46xx * chip, u16 left, u16 right) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; mutex_lock(&chip->spos_mutex); if (ins->asynch_rx_scb != NULL) cs46xx_dsp_scb_set_volume (chip,ins->asynch_rx_scb, left,right); ins->spdif_input_volume_left = left; ins->spdif_input_volume_right = right; mutex_unlock(&chip->spos_mutex); return 0; } #ifdef CONFIG_PM_SLEEP int cs46xx_dsp_resume(struct snd_cs46xx * chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; int i, err; /* clear parameter, sample and code areas */ snd_cs46xx_clear_BA1(chip, DSP_PARAMETER_BYTE_OFFSET, DSP_PARAMETER_BYTE_SIZE); snd_cs46xx_clear_BA1(chip, DSP_SAMPLE_BYTE_OFFSET, DSP_SAMPLE_BYTE_SIZE); snd_cs46xx_clear_BA1(chip, DSP_CODE_BYTE_OFFSET, DSP_CODE_BYTE_SIZE); for (i = 0; i < ins->nmodules; i++) { struct dsp_module_desc *module = &ins->modules[i]; struct dsp_segment_desc *seg; u32 doffset, dsize; seg = get_segment_desc(module, SEGTYPE_SP_PARAMETER); err = dsp_load_parameter(chip, seg); if (err < 0) return err; seg = get_segment_desc(module, SEGTYPE_SP_SAMPLE); err = dsp_load_sample(chip, seg); if (err < 0) return err; seg = get_segment_desc(module, SEGTYPE_SP_PROGRAM); if (!seg) continue; doffset = seg->offset * 4 + module->load_address * 4 + DSP_CODE_BYTE_OFFSET; dsize = seg->size * 4; err = snd_cs46xx_download(chip, ins->code.data + module->load_address, doffset, dsize); if (err < 0) return err; } for (i = 0; i < ins->ntask; i++) { struct dsp_task_descriptor *t = &ins->tasks[i]; _dsp_create_task_tree(chip, t->data, t->address, t->size); } for (i = 0; i < ins->nscb; i++) { struct dsp_scb_descriptor *s = &ins->scbs[i]; if (s->deleted) continue; _dsp_create_scb(chip, s->data, s->address); } for (i = 0; i < ins->nscb; i++) { struct dsp_scb_descriptor *s = &ins->scbs[i]; if (s->deleted) continue; if (s->updated) cs46xx_dsp_spos_update_scb(chip, s); if (s->volume_set) cs46xx_dsp_scb_set_volume(chip, s, s->volume[0], s->volume[1]); } if (ins->spdif_status_out & DSP_SPDIF_STATUS_HW_ENABLED) { cs46xx_dsp_enable_spdif_hw(chip); snd_cs46xx_poke(chip, (ins->ref_snoop_scb->address + 2) << 2, (OUTPUT_SNOOP_BUFFER + 0x10) << 0x10); if (ins->spdif_status_out & DSP_SPDIF_STATUS_PLAYBACK_OPEN) cs46xx_poke_via_dsp(chip, SP_SPDOUT_CSUV, ins->spdif_csuv_stream); } if (chip->dsp_spos_instance->spdif_status_in) { cs46xx_poke_via_dsp(chip, SP_ASER_COUNTDOWN, 0x80000005); cs46xx_poke_via_dsp(chip, SP_SPDIN_CONTROL, 0x800003ff); } return 0; } #endif
gpl-2.0
cattleprod/GT-I9100
arch/powerpc/kvm/book3s_emulate.c
850
13746
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <agraf@suse.de> */ #include <asm/kvm_ppc.h> #include <asm/disassemble.h> #include <asm/kvm_book3s.h> #include <asm/reg.h> #define OP_19_XOP_RFID 18 #define OP_19_XOP_RFI 50 #define OP_31_XOP_MFMSR 83 #define OP_31_XOP_MTMSR 146 #define OP_31_XOP_MTMSRD 178 #define OP_31_XOP_MTSR 210 #define OP_31_XOP_MTSRIN 242 #define OP_31_XOP_TLBIEL 274 #define OP_31_XOP_TLBIE 306 #define OP_31_XOP_SLBMTE 402 #define OP_31_XOP_SLBIE 434 #define OP_31_XOP_SLBIA 498 #define OP_31_XOP_MFSR 595 #define OP_31_XOP_MFSRIN 659 #define OP_31_XOP_DCBA 758 #define OP_31_XOP_SLBMFEV 851 #define OP_31_XOP_EIOIO 854 #define OP_31_XOP_SLBMFEE 915 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ #define OP_31_XOP_DCBZ 1010 #define OP_LFS 48 #define OP_LFD 50 #define OP_STFS 52 #define OP_STFD 54 #define SPRN_GQR0 912 #define SPRN_GQR1 913 #define SPRN_GQR2 914 #define SPRN_GQR3 915 #define SPRN_GQR4 916 #define SPRN_GQR5 917 #define SPRN_GQR6 918 #define SPRN_GQR7 919 /* Book3S_32 defines mfsrin(v) - but that messes up our abstract * function pointers, so let's just disable the define. */ #undef mfsrin int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; switch (get_op(inst)) { case 19: switch (get_xop(inst)) { case OP_19_XOP_RFID: case OP_19_XOP_RFI: kvmppc_set_pc(vcpu, vcpu->arch.srr0); kvmppc_set_msr(vcpu, vcpu->arch.srr1); *advance = 0; break; default: emulated = EMULATE_FAIL; break; } break; case 31: switch (get_xop(inst)) { case OP_31_XOP_MFMSR: kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); break; case OP_31_XOP_MTMSRD: { ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); if (inst & 0x10000) { vcpu->arch.msr &= ~(MSR_RI | MSR_EE); vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); } else kvmppc_set_msr(vcpu, rs); break; } case OP_31_XOP_MTMSR: kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); break; case OP_31_XOP_MFSR: { int srnum; srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32); if (vcpu->arch.mmu.mfsrin) { u32 sr; sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); kvmppc_set_gpr(vcpu, get_rt(inst), sr); } break; } case OP_31_XOP_MFSRIN: { int srnum; srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; if (vcpu->arch.mmu.mfsrin) { u32 sr; sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); kvmppc_set_gpr(vcpu, get_rt(inst), sr); } break; } case OP_31_XOP_MTSR: vcpu->arch.mmu.mtsrin(vcpu, (inst >> 16) & 0xf, kvmppc_get_gpr(vcpu, get_rs(inst))); break; case OP_31_XOP_MTSRIN: vcpu->arch.mmu.mtsrin(vcpu, (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, kvmppc_get_gpr(vcpu, get_rs(inst))); break; case OP_31_XOP_TLBIE: case OP_31_XOP_TLBIEL: { bool large = (inst & 0x00200000) ? true : false; ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); vcpu->arch.mmu.tlbie(vcpu, addr, large); break; } case OP_31_XOP_EIOIO: break; case OP_31_XOP_SLBMTE: if (!vcpu->arch.mmu.slbmte) return EMULATE_FAIL; vcpu->arch.mmu.slbmte(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)), kvmppc_get_gpr(vcpu, get_rb(inst))); break; case OP_31_XOP_SLBIE: if (!vcpu->arch.mmu.slbie) return EMULATE_FAIL; vcpu->arch.mmu.slbie(vcpu, kvmppc_get_gpr(vcpu, get_rb(inst))); break; case OP_31_XOP_SLBIA: if (!vcpu->arch.mmu.slbia) return EMULATE_FAIL; vcpu->arch.mmu.slbia(vcpu); break; case OP_31_XOP_SLBMFEE: if (!vcpu->arch.mmu.slbmfee) { emulated = EMULATE_FAIL; } else { ulong t, rb; rb = kvmppc_get_gpr(vcpu, get_rb(inst)); t = vcpu->arch.mmu.slbmfee(vcpu, rb); kvmppc_set_gpr(vcpu, get_rt(inst), t); } break; case OP_31_XOP_SLBMFEV: if (!vcpu->arch.mmu.slbmfev) { emulated = EMULATE_FAIL; } else { ulong t, rb; rb = kvmppc_get_gpr(vcpu, get_rb(inst)); t = vcpu->arch.mmu.slbmfev(vcpu, rb); kvmppc_set_gpr(vcpu, get_rt(inst), t); } break; case OP_31_XOP_DCBA: /* Gets treated as NOP */ break; case OP_31_XOP_DCBZ: { ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); ulong ra = 0; ulong addr, vaddr; u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; u32 dsisr; int r; if (get_ra(inst)) ra = kvmppc_get_gpr(vcpu, get_ra(inst)); addr = (ra + rb) & ~31ULL; if (!(vcpu->arch.msr & MSR_SF)) addr &= 0xffffffff; vaddr = addr; r = kvmppc_st(vcpu, &addr, 32, zeros, true); if ((r == -ENOENT) || (r == -EPERM)) { *advance = 0; vcpu->arch.dear = vaddr; to_svcpu(vcpu)->fault_dar = vaddr; dsisr = DSISR_ISSTORE; if (r == -ENOENT) dsisr |= DSISR_NOHPTE; else if (r == -EPERM) dsisr |= DSISR_PROTFAULT; to_book3s(vcpu)->dsisr = dsisr; to_svcpu(vcpu)->fault_dsisr = dsisr; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); } break; } default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) emulated = kvmppc_emulate_paired_single(run, vcpu); return emulated; } void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, u32 val) { if (upper) { /* Upper BAT */ u32 bl = (val >> 2) & 0x7ff; bat->bepi_mask = (~bl << 17); bat->bepi = val & 0xfffe0000; bat->vs = (val & 2) ? 1 : 0; bat->vp = (val & 1) ? 1 : 0; bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; } else { /* Lower BAT */ bat->brpn = val & 0xfffe0000; bat->wimg = (val >> 3) & 0xf; bat->pp = val & 3; bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); } } static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_bat *bat; switch (sprn) { case SPRN_IBAT0U ... SPRN_IBAT3L: bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; break; case SPRN_IBAT4U ... SPRN_IBAT7L: bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; break; case SPRN_DBAT0U ... SPRN_DBAT3L: bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; break; case SPRN_DBAT4U ... SPRN_DBAT7L: bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; break; default: BUG(); } if (sprn % 2) return bat->raw >> 32; else return bat->raw; } static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_bat *bat; switch (sprn) { case SPRN_IBAT0U ... SPRN_IBAT3L: bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; break; case SPRN_IBAT4U ... SPRN_IBAT7L: bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; break; case SPRN_DBAT0U ... SPRN_DBAT3L: bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; break; case SPRN_DBAT4U ... SPRN_DBAT7L: bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; break; default: BUG(); } kvmppc_set_bat(vcpu, bat, !(sprn % 2), val); } int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) { int emulated = EMULATE_DONE; ulong spr_val = kvmppc_get_gpr(vcpu, rs); switch (sprn) { case SPRN_SDR1: to_book3s(vcpu)->sdr1 = spr_val; break; case SPRN_DSISR: to_book3s(vcpu)->dsisr = spr_val; break; case SPRN_DAR: vcpu->arch.dear = spr_val; break; case SPRN_HIOR: to_book3s(vcpu)->hior = spr_val; break; case SPRN_IBAT0U ... SPRN_IBAT3L: case SPRN_IBAT4U ... SPRN_IBAT7L: case SPRN_DBAT0U ... SPRN_DBAT3L: case SPRN_DBAT4U ... SPRN_DBAT7L: kvmppc_write_bat(vcpu, sprn, (u32)spr_val); /* BAT writes happen so rarely that we're ok to flush * everything here */ kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_flush_segments(vcpu); break; case SPRN_HID0: to_book3s(vcpu)->hid[0] = spr_val; break; case SPRN_HID1: to_book3s(vcpu)->hid[1] = spr_val; break; case SPRN_HID2: to_book3s(vcpu)->hid[2] = spr_val; break; case SPRN_HID2_GEKKO: to_book3s(vcpu)->hid[2] = spr_val; /* HID2.PSE controls paired single on gekko */ switch (vcpu->arch.pvr) { case 0x00080200: /* lonestar 2.0 */ case 0x00088202: /* lonestar 2.2 */ case 0x70000100: /* gekko 1.0 */ case 0x00080100: /* gekko 2.0 */ case 0x00083203: /* gekko 2.3a */ case 0x00083213: /* gekko 2.3b */ case 0x00083204: /* gekko 2.4 */ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ case 0x00087200: /* broadway */ if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { /* Native paired singles */ } else if (spr_val & (1 << 29)) { /* HID2.PSE */ vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; kvmppc_giveup_ext(vcpu, MSR_FP); } else { vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; } break; } break; case SPRN_HID4: case SPRN_HID4_GEKKO: to_book3s(vcpu)->hid[4] = spr_val; break; case SPRN_HID5: to_book3s(vcpu)->hid[5] = spr_val; /* guest HID5 set can change is_dcbz32 */ if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV)) vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; break; case SPRN_GQR0: case SPRN_GQR1: case SPRN_GQR2: case SPRN_GQR3: case SPRN_GQR4: case SPRN_GQR5: case SPRN_GQR6: case SPRN_GQR7: to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; break; case SPRN_ICTC: case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: case SPRN_CTRLF: case SPRN_CTRLT: case SPRN_L2CR: case SPRN_MMCR0_GEKKO: case SPRN_MMCR1_GEKKO: case SPRN_PMC1_GEKKO: case SPRN_PMC2_GEKKO: case SPRN_PMC3_GEKKO: case SPRN_PMC4_GEKKO: case SPRN_WPAR_GEKKO: break; default: printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); #ifndef DEBUG_SPR emulated = EMULATE_FAIL; #endif break; } return emulated; } int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) { int emulated = EMULATE_DONE; switch (sprn) { case SPRN_IBAT0U ... SPRN_IBAT3L: case SPRN_IBAT4U ... SPRN_IBAT7L: case SPRN_DBAT0U ... SPRN_DBAT3L: case SPRN_DBAT4U ... SPRN_DBAT7L: kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn)); break; case SPRN_SDR1: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); break; case SPRN_DSISR: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); break; case SPRN_DAR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break; case SPRN_HIOR: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); break; case SPRN_HID0: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); break; case SPRN_HID1: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); break; case SPRN_HID2: case SPRN_HID2_GEKKO: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); break; case SPRN_HID4: case SPRN_HID4_GEKKO: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); break; case SPRN_HID5: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); break; case SPRN_GQR0: case SPRN_GQR1: case SPRN_GQR2: case SPRN_GQR3: case SPRN_GQR4: case SPRN_GQR5: case SPRN_GQR6: case SPRN_GQR7: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]); break; case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: case SPRN_CTRLF: case SPRN_CTRLT: case SPRN_L2CR: case SPRN_MMCR0_GEKKO: case SPRN_MMCR1_GEKKO: case SPRN_PMC1_GEKKO: case SPRN_PMC2_GEKKO: case SPRN_PMC3_GEKKO: case SPRN_PMC4_GEKKO: case SPRN_WPAR_GEKKO: kvmppc_set_gpr(vcpu, rt, 0); break; default: printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); #ifndef DEBUG_SPR emulated = EMULATE_FAIL; #endif break; } return emulated; } u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) { u32 dsisr = 0; /* * This is what the spec says about DSISR bits (not mentioned = 0): * * 12:13 [DS] Set to bits 30:31 * 15:16 [X] Set to bits 29:30 * 17 [X] Set to bit 25 * [D/DS] Set to bit 5 * 18:21 [X] Set to bits 21:24 * [D/DS] Set to bits 1:4 * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS) * 27:31 Set to bits 11:15 (RA) */ switch (get_op(inst)) { /* D-form */ case OP_LFS: case OP_LFD: case OP_STFD: case OP_STFS: dsisr |= (inst >> 12) & 0x4000; /* bit 17 */ dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */ break; /* X-form */ case 31: dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */ dsisr |= (inst << 8) & 0x04000; /* bit 17 */ dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */ break; default: printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); break; } dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */ return dsisr; } ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) { ulong dar = 0; ulong ra; switch (get_op(inst)) { case OP_LFS: case OP_LFD: case OP_STFD: case OP_STFS: ra = get_ra(inst); if (ra) dar = kvmppc_get_gpr(vcpu, ra); dar += (s32)((s16)inst); break; case 31: ra = get_ra(inst); if (ra) dar = kvmppc_get_gpr(vcpu, ra); dar += kvmppc_get_gpr(vcpu, get_rb(inst)); break; default: printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); break; } return dar; }
gpl-2.0
m2mselect/owrt
DLpatch/linux-3.18.29/arch/arc/kernel/time.c
850
6927
/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * vineetg: Jan 1011 * -sched_clock( ) no longer jiffies based. Uses the same clocksource * as gtod * * Rajeshwarr/Vineetg: Mar 2008 * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code) * for arch independent gettimeofday() * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers * * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c */ /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1 * Each can programmed to go from @count to @limit and optionally * interrupt when that happens. * A write to Control Register clears the Interrupt * * We've designated TIMER0 for events (clockevents) * while TIMER1 for free running (clocksource) * * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1 */ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/init.h> #include <linux/timex.h> #include <linux/profile.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <asm/irq.h> #include <asm/arcregs.h> #include <asm/clk.h> #include <asm/mach_desc.h> /* Timer related Aux registers */ #define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */ #define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */ #define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */ #define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */ #define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */ #define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */ #define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */ #define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */ #define ARC_TIMER_MAX 0xFFFFFFFF /********** Clock Source Device *********/ #ifdef CONFIG_ARC_HAS_RTSC int arc_counter_setup(void) { /* * For SMP this needs to be 0. However Kconfig glue doesn't * enable this option for SMP configs */ return 1; } static cycle_t arc_counter_read(struct clocksource *cs) { unsigned long flags; union { #ifdef CONFIG_CPU_BIG_ENDIAN struct { u32 high, low; }; #else struct { u32 low, high; }; #endif cycle_t full; } stamp; flags = arch_local_irq_save(); __asm__ __volatile( " .extCoreRegister tsch, 58, r, cannot_shortcut \n" " rtsc %0, 0 \n" " mov %1, 0 \n" : "=r" (stamp.low), "=r" (stamp.high)); arch_local_irq_restore(flags); return stamp.full; } static struct clocksource arc_counter = { .name = "ARC RTSC", .rating = 300, .read = arc_counter_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; #else /* !CONFIG_ARC_HAS_RTSC */ static bool is_usable_as_clocksource(void) { #ifdef CONFIG_SMP return 0; #else return 1; #endif } /* * set 32bit TIMER1 to keep counting monotonically and wraparound */ int arc_counter_setup(void) { write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX); write_aux_reg(ARC_REG_TIMER1_CNT, 0); write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); return is_usable_as_clocksource(); } static cycle_t arc_counter_read(struct clocksource *cs) { return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT); } static struct clocksource arc_counter = { .name = "ARC Timer1", .rating = 300, .read = arc_counter_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; #endif /********** Clock Event Device *********/ /* * Arm the timer to interrupt after @cycles * The distinction for oneshot/periodic is done in arc_event_timer_ack() below */ static void arc_timer_event_setup(unsigned int cycles) { write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles); write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */ write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH); } static int arc_clkevent_set_next_event(unsigned long delta, struct clock_event_device *dev) { arc_timer_event_setup(delta); return 0; } static void arc_clkevent_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: /* * At X Hz, 1 sec = 1000ms -> X cycles; * 10ms -> X / 100 cycles */ arc_timer_event_setup(arc_get_core_freq() / HZ); break; case CLOCK_EVT_MODE_ONESHOT: break; default: break; } return; } static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { .name = "ARC Timer0", .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, .mode = CLOCK_EVT_MODE_UNUSED, .rating = 300, .irq = TIMER0_IRQ, /* hardwired, no need for resources */ .set_next_event = arc_clkevent_set_next_event, .set_mode = arc_clkevent_set_mode, }; static irqreturn_t timer_irq_handler(int irq, void *dev_id) { /* * Note that generic IRQ core could have passed @evt for @dev_id if * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() */ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; /* * Any write to CTRL reg ACks the interrupt, we rewrite the * Count when [N]ot [H]alted bit. * And re-arm it if perioid by [I]nterrupt [E]nable bit */ write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); evt->event_handler(evt); return IRQ_HANDLED; } /* * Setup the local event timer for @cpu */ void arc_local_timer_setup() { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); int cpu = smp_processor_id(); evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, arc_get_core_freq(), 0, ARC_TIMER_MAX); /* setup the per-cpu timer IRQ handler - for all cpus */ arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler, "Timer0 (per-cpu-tick)", evt); } /* * Called from start_kernel() - boot CPU only * * -Sets up h/w timers as applicable on boot cpu * -Also sets up any global state needed for timer subsystem: * - for "counting" timer, registers a clocksource, usable across CPUs * (provided that underlying counter h/w is synchronized across cores) * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic) */ void __init time_init(void) { /* * sets up the timekeeping free-flowing counter which also returns * whether the counter is usable as clocksource */ if (arc_counter_setup()) /* * CLK upto 4.29 GHz can be safely represented in 32 bits * because Max 32 bit number is 4,294,967,295 */ clocksource_register_hz(&arc_counter, arc_get_core_freq()); /* sets up the periodic event timer */ arc_local_timer_setup(); if (machine_desc->init_time) machine_desc->init_time(); }
gpl-2.0
jab2/android_kernel_lge_l45c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
850
20196
/* * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Tom Tucker <tom@opengridcomputing.com> */ #include <linux/sunrpc/debug.h> #include <linux/sunrpc/rpc_rdma.h> #include <linux/spinlock.h> #include <asm/unaligned.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <linux/sunrpc/svc_rdma.h> #define RPCDBG_FACILITY RPCDBG_SVCXPRT /* * Replace the pages in the rq_argpages array with the pages from the SGE in * the RDMA_RECV completion. The SGL should contain full pages up until the * last one. */ static void rdma_build_arg_xdr(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *ctxt, u32 byte_count) { struct page *page; u32 bc; int sge_no; /* Swap the page in the SGE with the page in argpages */ page = ctxt->pages[0]; put_page(rqstp->rq_pages[0]); rqstp->rq_pages[0] = page; /* Set up the XDR head */ rqstp->rq_arg.head[0].iov_base = page_address(page); rqstp->rq_arg.head[0].iov_len = min(byte_count, ctxt->sge[0].length); rqstp->rq_arg.len = byte_count; rqstp->rq_arg.buflen = byte_count; /* Compute bytes past head in the SGL */ bc = byte_count - rqstp->rq_arg.head[0].iov_len; /* If data remains, store it in the pagelist */ rqstp->rq_arg.page_len = bc; rqstp->rq_arg.page_base = 0; rqstp->rq_arg.pages = &rqstp->rq_pages[1]; sge_no = 1; while (bc && sge_no < ctxt->count) { page = ctxt->pages[sge_no]; put_page(rqstp->rq_pages[sge_no]); rqstp->rq_pages[sge_no] = page; bc -= min(bc, ctxt->sge[sge_no].length); rqstp->rq_arg.buflen += ctxt->sge[sge_no].length; sge_no++; } rqstp->rq_respages = &rqstp->rq_pages[sge_no]; /* We should never run out of SGE because the limit is defined to * support the max allowed RPC data length */ BUG_ON(bc && (sge_no == ctxt->count)); BUG_ON((rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len) != byte_count); BUG_ON(rqstp->rq_arg.len != byte_count); /* If not all pages were used from the SGL, free the remaining ones */ bc = sge_no; while (sge_no < ctxt->count) { page = ctxt->pages[sge_no++]; put_page(page); } ctxt->count = bc; /* Set up tail */ rqstp->rq_arg.tail[0].iov_base = NULL; rqstp->rq_arg.tail[0].iov_len = 0; } /* Encode a read-chunk-list as an array of IB SGE * * Assumptions: * - chunk[0]->position points to pages[0] at an offset of 0 * - pages[] is not physically or virtually contiguous and consists of * PAGE_SIZE elements. * * Output: * - sge array pointing into pages[] array. * - chunk_sge array specifying sge index and count for each * chunk in the read list * */ static int map_read_chunks(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head, struct rpcrdma_msg *rmsgp, struct svc_rdma_req_map *rpl_map, struct svc_rdma_req_map *chl_map, int ch_count, int byte_count) { int sge_no; int sge_bytes; int page_off; int page_no; int ch_bytes; int ch_no; struct rpcrdma_read_chunk *ch; sge_no = 0; page_no = 0; page_off = 0; ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; ch_no = 0; ch_bytes = ch->rc_target.rs_length; head->arg.head[0] = rqstp->rq_arg.head[0]; head->arg.tail[0] = rqstp->rq_arg.tail[0]; head->arg.pages = &head->pages[head->count]; head->hdr_count = head->count; /* save count of hdr pages */ head->arg.page_base = 0; head->arg.page_len = ch_bytes; head->arg.len = rqstp->rq_arg.len + ch_bytes; head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes; head->count++; chl_map->ch[0].start = 0; while (byte_count) { rpl_map->sge[sge_no].iov_base = page_address(rqstp->rq_arg.pages[page_no]) + page_off; sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes); rpl_map->sge[sge_no].iov_len = sge_bytes; /* * Don't bump head->count here because the same page * may be used by multiple SGE. */ head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no]; rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1]; byte_count -= sge_bytes; ch_bytes -= sge_bytes; sge_no++; /* * If all bytes for this chunk have been mapped to an * SGE, move to the next SGE */ if (ch_bytes == 0) { chl_map->ch[ch_no].count = sge_no - chl_map->ch[ch_no].start; ch_no++; ch++; chl_map->ch[ch_no].start = sge_no; ch_bytes = ch->rc_target.rs_length; /* If bytes remaining account for next chunk */ if (byte_count) { head->arg.page_len += ch_bytes; head->arg.len += ch_bytes; head->arg.buflen += ch_bytes; } } /* * If this SGE consumed all of the page, move to the * next page */ if ((sge_bytes + page_off) == PAGE_SIZE) { page_no++; page_off = 0; /* * If there are still bytes left to map, bump * the page count */ if (byte_count) head->count++; } else page_off += sge_bytes; } BUG_ON(byte_count != 0); return sge_no; } /* Map a read-chunk-list to an XDR and fast register the page-list. * * Assumptions: * - chunk[0] position points to pages[0] at an offset of 0 * - pages[] will be made physically contiguous by creating a one-off memory * region using the fastreg verb. * - byte_count is # of bytes in read-chunk-list * - ch_count is # of chunks in read-chunk-list * * Output: * - sge array pointing into pages[] array. * - chunk_sge array specifying sge index and count for each * chunk in the read list */ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head, struct rpcrdma_msg *rmsgp, struct svc_rdma_req_map *rpl_map, struct svc_rdma_req_map *chl_map, int ch_count, int byte_count) { int page_no; int ch_no; u32 offset; struct rpcrdma_read_chunk *ch; struct svc_rdma_fastreg_mr *frmr; int ret = 0; frmr = svc_rdma_get_frmr(xprt); if (IS_ERR(frmr)) return -ENOMEM; head->frmr = frmr; head->arg.head[0] = rqstp->rq_arg.head[0]; head->arg.tail[0] = rqstp->rq_arg.tail[0]; head->arg.pages = &head->pages[head->count]; head->hdr_count = head->count; /* save count of hdr pages */ head->arg.page_base = 0; head->arg.page_len = byte_count; head->arg.len = rqstp->rq_arg.len + byte_count; head->arg.buflen = rqstp->rq_arg.buflen + byte_count; /* Fast register the page list */ frmr->kva = page_address(rqstp->rq_arg.pages[0]); frmr->direction = DMA_FROM_DEVICE; frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE); frmr->map_len = byte_count; frmr->page_list_len = PAGE_ALIGN(byte_count) >> PAGE_SHIFT; for (page_no = 0; page_no < frmr->page_list_len; page_no++) { frmr->page_list->page_list[page_no] = ib_dma_map_single(xprt->sc_cm_id->device, page_address(rqstp->rq_arg.pages[page_no]), PAGE_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, frmr->page_list->page_list[page_no])) goto fatal_err; atomic_inc(&xprt->sc_dma_used); head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no]; } head->count += page_no; /* rq_respages points one past arg pages */ rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; /* Create the reply and chunk maps */ offset = 0; ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; for (ch_no = 0; ch_no < ch_count; ch_no++) { rpl_map->sge[ch_no].iov_base = frmr->kva + offset; rpl_map->sge[ch_no].iov_len = ch->rc_target.rs_length; chl_map->ch[ch_no].count = 1; chl_map->ch[ch_no].start = ch_no; offset += ch->rc_target.rs_length; ch++; } ret = svc_rdma_fastreg(xprt, frmr); if (ret) goto fatal_err; return ch_no; fatal_err: printk("svcrdma: error fast registering xdr for xprt %p", xprt); svc_rdma_put_frmr(xprt, frmr); return -EIO; } static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt, struct svc_rdma_op_ctxt *ctxt, struct svc_rdma_fastreg_mr *frmr, struct kvec *vec, u64 *sgl_offset, int count) { int i; ctxt->count = count; ctxt->direction = DMA_FROM_DEVICE; for (i = 0; i < count; i++) { ctxt->sge[i].length = 0; /* in case map fails */ if (!frmr) { ctxt->sge[i].addr = ib_dma_map_single(xprt->sc_cm_id->device, vec[i].iov_base, vec[i].iov_len, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[i].addr)) return -EINVAL; ctxt->sge[i].lkey = xprt->sc_dma_lkey; atomic_inc(&xprt->sc_dma_used); } else { ctxt->sge[i].addr = (unsigned long)vec[i].iov_base; ctxt->sge[i].lkey = frmr->mr->lkey; } ctxt->sge[i].length = vec[i].iov_len; *sgl_offset = *sgl_offset + vec[i].iov_len; } return 0; } static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) { if ((rdma_node_get_transport(xprt->sc_cm_id->device->node_type) == RDMA_TRANSPORT_IWARP) && sge_count > 1) return 1; else return min_t(int, sge_count, xprt->sc_max_sge); } /* * Use RDMA_READ to read data from the advertised client buffer into the * XDR stream starting at rq_arg.head[0].iov_base. * Each chunk in the array * contains the following fields: * discrim - '1', This isn't used for data placement * position - The xdr stream offset (the same for every chunk) * handle - RMR for client memory region * length - data transfer length * offset - 64 bit tagged offset in remote memory region * * On our side, we need to read into a pagelist. The first page immediately * follows the RPC header. * * This function returns: * 0 - No error and no read-list found. * * 1 - Successful read-list processing. The data is not yet in * the pagelist and therefore the RPC request must be deferred. The * I/O completion will enqueue the transport again and * svc_rdma_recvfrom will complete the request. * * <0 - Error processing/posting read-list. * * NOTE: The ctxt must not be touched after the last WR has been posted * because the I/O completion processing may occur on another * processor and free / modify the context. Ne touche pas! */ static int rdma_read_xdr(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *hdr_ctxt) { struct ib_send_wr read_wr; struct ib_send_wr inv_wr; int err = 0; int ch_no; int ch_count; int byte_count; int sge_count; u64 sgl_offset; struct rpcrdma_read_chunk *ch; struct svc_rdma_op_ctxt *ctxt = NULL; struct svc_rdma_req_map *rpl_map; struct svc_rdma_req_map *chl_map; /* If no read list is present, return 0 */ ch = svc_rdma_get_read_chunk(rmsgp); if (!ch) return 0; svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count); if (ch_count > RPCSVC_MAXPAGES) return -EINVAL; /* Allocate temporary reply and chunk maps */ rpl_map = svc_rdma_get_req_map(); chl_map = svc_rdma_get_req_map(); if (!xprt->sc_frmr_pg_list_len) sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp, rpl_map, chl_map, ch_count, byte_count); else sge_count = fast_reg_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp, rpl_map, chl_map, ch_count, byte_count); if (sge_count < 0) { err = -EIO; goto out; } sgl_offset = 0; ch_no = 0; for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; ch->rc_discrim != 0; ch++, ch_no++) { next_sge: ctxt = svc_rdma_get_context(xprt); ctxt->direction = DMA_FROM_DEVICE; ctxt->frmr = hdr_ctxt->frmr; ctxt->read_hdr = NULL; clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); /* Prepare READ WR */ memset(&read_wr, 0, sizeof read_wr); read_wr.wr_id = (unsigned long)ctxt; read_wr.opcode = IB_WR_RDMA_READ; ctxt->wr_op = read_wr.opcode; read_wr.send_flags = IB_SEND_SIGNALED; read_wr.wr.rdma.rkey = ch->rc_target.rs_handle; read_wr.wr.rdma.remote_addr = get_unaligned(&(ch->rc_target.rs_offset)) + sgl_offset; read_wr.sg_list = ctxt->sge; read_wr.num_sge = rdma_read_max_sge(xprt, chl_map->ch[ch_no].count); err = rdma_set_ctxt_sge(xprt, ctxt, hdr_ctxt->frmr, &rpl_map->sge[chl_map->ch[ch_no].start], &sgl_offset, read_wr.num_sge); if (err) { svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 0); goto out; } if (((ch+1)->rc_discrim == 0) && (read_wr.num_sge == chl_map->ch[ch_no].count)) { /* * Mark the last RDMA_READ with a bit to * indicate all RPC data has been fetched from * the client and the RPC needs to be enqueued. */ set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); if (hdr_ctxt->frmr) { set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); /* * Invalidate the local MR used to map the data * sink. */ if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) { read_wr.opcode = IB_WR_RDMA_READ_WITH_INV; ctxt->wr_op = read_wr.opcode; read_wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey; } else { /* Prepare INVALIDATE WR */ memset(&inv_wr, 0, sizeof inv_wr); inv_wr.opcode = IB_WR_LOCAL_INV; inv_wr.send_flags = IB_SEND_SIGNALED; inv_wr.ex.invalidate_rkey = hdr_ctxt->frmr->mr->lkey; read_wr.next = &inv_wr; } } ctxt->read_hdr = hdr_ctxt; } /* Post the read */ err = svc_rdma_send(xprt, &read_wr); if (err) { printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n", err); set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); svc_rdma_put_context(ctxt, 0); goto out; } atomic_inc(&rdma_stat_read); if (read_wr.num_sge < chl_map->ch[ch_no].count) { chl_map->ch[ch_no].count -= read_wr.num_sge; chl_map->ch[ch_no].start += read_wr.num_sge; goto next_sge; } sgl_offset = 0; err = 1; } out: svc_rdma_put_req_map(rpl_map); svc_rdma_put_req_map(chl_map); /* Detach arg pages. svc_recv will replenish them */ for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++) rqstp->rq_pages[ch_no] = NULL; /* * Detach res pages. svc_release must see a resused count of * zero or it will attempt to put them. */ while (rqstp->rq_resused) rqstp->rq_respages[--rqstp->rq_resused] = NULL; return err; } static int rdma_read_complete(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head) { int page_no; int ret; BUG_ON(!head); /* Copy RPC pages */ for (page_no = 0; page_no < head->count; page_no++) { put_page(rqstp->rq_pages[page_no]); rqstp->rq_pages[page_no] = head->pages[page_no]; } /* Point rq_arg.pages past header */ rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; rqstp->rq_arg.page_len = head->arg.page_len; rqstp->rq_arg.page_base = head->arg.page_base; /* rq_respages starts after the last arg page */ rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; rqstp->rq_resused = 0; /* Rebuild rq_arg head and tail. */ rqstp->rq_arg.head[0] = head->arg.head[0]; rqstp->rq_arg.tail[0] = head->arg.tail[0]; rqstp->rq_arg.len = head->arg.len; rqstp->rq_arg.buflen = head->arg.buflen; /* Free the context */ svc_rdma_put_context(head, 0); /* XXX: What should this be? */ rqstp->rq_prot = IPPROTO_MAX; svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt); ret = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len + rqstp->rq_arg.tail[0].iov_len; dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, " "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n", ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, rqstp->rq_arg.head[0].iov_len); return ret; } /* * Set up the rqstp thread context to point to the RQ buffer. If * necessary, pull additional data from the client with an RDMA_READ * request. */ int svc_rdma_recvfrom(struct svc_rqst *rqstp) { struct svc_xprt *xprt = rqstp->rq_xprt; struct svcxprt_rdma *rdma_xprt = container_of(xprt, struct svcxprt_rdma, sc_xprt); struct svc_rdma_op_ctxt *ctxt = NULL; struct rpcrdma_msg *rmsgp; int ret = 0; int len; dprintk("svcrdma: rqstp=%p\n", rqstp); spin_lock_bh(&rdma_xprt->sc_rq_dto_lock); if (!list_empty(&rdma_xprt->sc_read_complete_q)) { ctxt = list_entry(rdma_xprt->sc_read_complete_q.next, struct svc_rdma_op_ctxt, dto_q); list_del_init(&ctxt->dto_q); } if (ctxt) { spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); return rdma_read_complete(rqstp, ctxt); } if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, struct svc_rdma_op_ctxt, dto_q); list_del_init(&ctxt->dto_q); } else { atomic_inc(&rdma_stat_rq_starve); clear_bit(XPT_DATA, &xprt->xpt_flags); ctxt = NULL; } spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); if (!ctxt) { /* This is the EAGAIN path. The svc_recv routine will * return -EAGAIN, the nfsd thread will go to call into * svc_recv again and we shouldn't be on the active * transport list */ if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) goto close_out; BUG_ON(ret); goto out; } dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n", ctxt, rdma_xprt, rqstp, ctxt->wc_status); BUG_ON(ctxt->wc_status != IB_WC_SUCCESS); atomic_inc(&rdma_stat_recv); /* Build up the XDR from the receive buffers. */ rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); /* Decode the RDMA header. */ len = svc_rdma_xdr_decode_req(&rmsgp, rqstp); rqstp->rq_xprt_hlen = len; /* If the request is invalid, reply with an error */ if (len < 0) { if (len == -ENOSYS) svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS); goto close_out; } /* Read read-list data. */ ret = rdma_read_xdr(rdma_xprt, rmsgp, rqstp, ctxt); if (ret > 0) { /* read-list posted, defer until data received from client. */ goto defer; } if (ret < 0) { /* Post of read-list failed, free context. */ svc_rdma_put_context(ctxt, 1); return 0; } ret = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len + rqstp->rq_arg.tail[0].iov_len; svc_rdma_put_context(ctxt, 0); out: dprintk("svcrdma: ret = %d, rq_arg.len =%d, " "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n", ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, rqstp->rq_arg.head[0].iov_len); rqstp->rq_prot = IPPROTO_MAX; svc_xprt_copy_addrs(rqstp, xprt); return ret; close_out: if (ctxt) svc_rdma_put_context(ctxt, 1); dprintk("svcrdma: transport %p is closing\n", xprt); /* * Set the close bit and enqueue it. svc_recv will see the * close bit and call svc_xprt_delete */ set_bit(XPT_CLOSE, &xprt->xpt_flags); defer: return 0; }
gpl-2.0
inferiorhumanorgans/android_kernel_thunderc
drivers/isdn/hardware/eicon/capimain.c
850
3336
/* $Id: capimain.c,v 1.24 2003/09/09 06:51:05 schindler Exp $ * * ISDN interface module for Eicon active cards DIVA. * CAPI Interface * * Copyright 2000-2003 by Armin Schindler (mac@melware.de) * Copyright 2000-2003 Cytronics & Melware (info@melware.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include <linux/module.h> #include <linux/init.h> #include <asm/uaccess.h> #include <linux/skbuff.h> #include "os_capi.h" #include "platform.h" #include "di_defs.h" #include "capi20.h" #include "divacapi.h" #include "cp_vers.h" #include "capifunc.h" static char *main_revision = "$Revision: 1.24 $"; static char *DRIVERNAME = "Eicon DIVA - CAPI Interface driver (http://www.melware.net)"; static char *DRIVERLNAME = "divacapi"; MODULE_DESCRIPTION("CAPI driver for Eicon DIVA cards"); MODULE_AUTHOR("Cytronics & Melware, Eicon Networks"); MODULE_SUPPORTED_DEVICE("CAPI and DIVA card drivers"); MODULE_LICENSE("GPL"); /* * get revision number from revision string */ static char *getrev(const char *revision) { char *rev; char *p; if ((p = strchr(revision, ':'))) { rev = p + 2; p = strchr(rev, '$'); *--p = 0; } else rev = "1.0"; return rev; } /* * alloc a message buffer */ diva_os_message_buffer_s *diva_os_alloc_message_buffer(unsigned long size, void **data_buf) { diva_os_message_buffer_s *dmb = alloc_skb(size, GFP_ATOMIC); if (dmb) { *data_buf = skb_put(dmb, size); } return (dmb); } /* * free a message buffer */ void diva_os_free_message_buffer(diva_os_message_buffer_s * dmb) { kfree_skb(dmb); } /* * proc function for controller info */ static int diva_ctl_read_proc(char *page, char **start, off_t off, int count, int *eof, struct capi_ctr *ctrl) { diva_card *card = (diva_card *) ctrl->driverdata; int len = 0; len += sprintf(page + len, "%s\n", ctrl->name); len += sprintf(page + len, "Serial No. : %s\n", ctrl->serial); len += sprintf(page + len, "Id : %d\n", card->Id); len += sprintf(page + len, "Channels : %d\n", card->d.channels); if (off + count >= len) *eof = 1; if (len < off) return 0; *start = page + off; return ((count < len - off) ? count : len - off); } /* * set additional os settings in capi_ctr struct */ void diva_os_set_controller_struct(struct capi_ctr *ctrl) { ctrl->driver_name = DRIVERLNAME; ctrl->load_firmware = NULL; ctrl->reset_ctr = NULL; ctrl->ctr_read_proc = diva_ctl_read_proc; ctrl->owner = THIS_MODULE; } /* * module init */ static int DIVA_INIT_FUNCTION divacapi_init(void) { char tmprev[32]; int ret = 0; sprintf(DRIVERRELEASE_CAPI, "%d.%d%s", DRRELMAJOR, DRRELMINOR, DRRELEXTRA); printk(KERN_INFO "%s\n", DRIVERNAME); printk(KERN_INFO "%s: Rel:%s Rev:", DRIVERLNAME, DRIVERRELEASE_CAPI); strcpy(tmprev, main_revision); printk("%s Build: %s(%s)\n", getrev(tmprev), diva_capi_common_code_build, DIVA_BUILD); if (!(init_capifunc())) { printk(KERN_ERR "%s: failed init capi_driver.\n", DRIVERLNAME); ret = -EIO; } return ret; } /* * module exit */ static void DIVA_EXIT_FUNCTION divacapi_exit(void) { finit_capifunc(); printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME); } module_init(divacapi_init); module_exit(divacapi_exit);
gpl-2.0
ntb-ch/linux
arch/sh/mm/cache.c
1362
9475
/* * arch/sh/mm/cache.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 2002 - 2010 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ #include <linux/mm.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/fs.h> #include <linux/smp.h> #include <linux/highmem.h> #include <linux/module.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> void (*local_flush_cache_all)(void *args) = cache_noop; void (*local_flush_cache_mm)(void *args) = cache_noop; void (*local_flush_cache_dup_mm)(void *args) = cache_noop; void (*local_flush_cache_page)(void *args) = cache_noop; void (*local_flush_cache_range)(void *args) = cache_noop; void (*local_flush_dcache_page)(void *args) = cache_noop; void (*local_flush_icache_range)(void *args) = cache_noop; void (*local_flush_icache_page)(void *args) = cache_noop; void (*local_flush_cache_sigtramp)(void *args) = cache_noop; void (*__flush_wback_region)(void *start, int size); EXPORT_SYMBOL(__flush_wback_region); void (*__flush_purge_region)(void *start, int size); EXPORT_SYMBOL(__flush_purge_region); void (*__flush_invalidate_region)(void *start, int size); EXPORT_SYMBOL(__flush_invalidate_region); static inline void noop__flush_region(void *start, int size) { } static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info, int wait) { preempt_disable(); /* * It's possible that this gets called early on when IRQs are * still disabled due to ioremapping by the boot CPU, so don't * even attempt IPIs unless there are other CPUs online. */ if (num_online_cpus() > 1) smp_call_function(func, info, wait); func(info); preempt_enable(); } void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && test_bit(PG_dcache_clean, &page->flags)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); kunmap_coherent(vto); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) clear_bit(PG_dcache_clean, &page->flags); } if (vma->vm_flags & VM_EXEC) flush_cache_page(vma, vaddr, page_to_pfn(page)); } void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && test_bit(PG_dcache_clean, &page->flags)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); kunmap_coherent(vfrom); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) clear_bit(PG_dcache_clean, &page->flags); } } void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *vfrom, *vto; vto = kmap_atomic(to); if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && test_bit(PG_dcache_clean, &from->flags)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(vfrom); } else { vfrom = kmap_atomic(from); copy_page(vto, vfrom); kunmap_atomic(vfrom); } if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || (vma->vm_flags & VM_EXEC)) __flush_purge_region(vto, PAGE_SIZE); kunmap_atomic(vto); /* Make sure this page is cleared on other CPU's too before using it */ smp_wmb(); } EXPORT_SYMBOL(copy_user_highpage); void clear_user_highpage(struct page *page, unsigned long vaddr) { void *kaddr = kmap_atomic(page); clear_page(kaddr); if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) __flush_purge_region(kaddr, PAGE_SIZE); kunmap_atomic(kaddr); } EXPORT_SYMBOL(clear_user_highpage); void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn = pte_pfn(pte); if (!boot_cpu_data.dcache.n_aliases) return; page = pfn_to_page(pfn); if (pfn_valid(pfn)) { int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags); if (dirty) __flush_purge_region(page_address(page), PAGE_SIZE); } } void __flush_anon_page(struct page *page, unsigned long vmaddr) { unsigned long addr = (unsigned long) page_address(page); if (pages_do_alias(addr, vmaddr)) { if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && test_bit(PG_dcache_clean, &page->flags)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); /* XXX.. For now kunmap_coherent() does a purge */ /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */ kunmap_coherent(kaddr); } else __flush_purge_region((void *)addr, PAGE_SIZE); } } void flush_cache_all(void) { cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); } EXPORT_SYMBOL(flush_cache_all); void flush_cache_mm(struct mm_struct *mm) { if (boot_cpu_data.dcache.n_aliases == 0) return; cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); } void flush_cache_dup_mm(struct mm_struct *mm) { if (boot_cpu_data.dcache.n_aliases == 0) return; cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); } void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { struct flusher_data data; data.vma = vma; data.addr1 = addr; data.addr2 = pfn; cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1); } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct flusher_data data; data.vma = vma; data.addr1 = start; data.addr2 = end; cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); } EXPORT_SYMBOL(flush_cache_range); void flush_dcache_page(struct page *page) { cacheop_on_each_cpu(local_flush_dcache_page, page, 1); } EXPORT_SYMBOL(flush_dcache_page); void flush_icache_range(unsigned long start, unsigned long end) { struct flusher_data data; data.vma = NULL; data.addr1 = start; data.addr2 = end; cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1); } EXPORT_SYMBOL(flush_icache_range); void flush_icache_page(struct vm_area_struct *vma, struct page *page) { /* Nothing uses the VMA, so just pass the struct page along */ cacheop_on_each_cpu(local_flush_icache_page, page, 1); } void flush_cache_sigtramp(unsigned long address) { cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); } static void compute_alias(struct cache_info *c) { c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; } static void __init emit_cache_params(void) { printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n", boot_cpu_data.icache.ways, boot_cpu_data.icache.sets, boot_cpu_data.icache.way_incr); printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", boot_cpu_data.icache.entry_mask, boot_cpu_data.icache.alias_mask, boot_cpu_data.icache.n_aliases); printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n", boot_cpu_data.dcache.ways, boot_cpu_data.dcache.sets, boot_cpu_data.dcache.way_incr); printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", boot_cpu_data.dcache.entry_mask, boot_cpu_data.dcache.alias_mask, boot_cpu_data.dcache.n_aliases); /* * Emit Secondary Cache parameters if the CPU has a probed L2. */ if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n", boot_cpu_data.scache.ways, boot_cpu_data.scache.sets, boot_cpu_data.scache.way_incr); printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", boot_cpu_data.scache.entry_mask, boot_cpu_data.scache.alias_mask, boot_cpu_data.scache.n_aliases); } } void __init cpu_cache_init(void) { unsigned int cache_disabled = 0; #ifdef SH_CCR cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE); #endif compute_alias(&boot_cpu_data.icache); compute_alias(&boot_cpu_data.dcache); compute_alias(&boot_cpu_data.scache); __flush_wback_region = noop__flush_region; __flush_purge_region = noop__flush_region; __flush_invalidate_region = noop__flush_region; /* * No flushing is necessary in the disabled cache case so we can * just keep the noop functions in local_flush_..() and __flush_..() */ if (unlikely(cache_disabled)) goto skip; if (boot_cpu_data.family == CPU_FAMILY_SH2) { extern void __weak sh2_cache_init(void); sh2_cache_init(); } if (boot_cpu_data.family == CPU_FAMILY_SH2A) { extern void __weak sh2a_cache_init(void); sh2a_cache_init(); } if (boot_cpu_data.family == CPU_FAMILY_SH3) { extern void __weak sh3_cache_init(void); sh3_cache_init(); if ((boot_cpu_data.type == CPU_SH7705) && (boot_cpu_data.dcache.sets == 512)) { extern void __weak sh7705_cache_init(void); sh7705_cache_init(); } } if ((boot_cpu_data.family == CPU_FAMILY_SH4) || (boot_cpu_data.family == CPU_FAMILY_SH4A) || (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { extern void __weak sh4_cache_init(void); sh4_cache_init(); if ((boot_cpu_data.type == CPU_SH7786) || (boot_cpu_data.type == CPU_SHX3)) { extern void __weak shx3_cache_init(void); shx3_cache_init(); } } if (boot_cpu_data.family == CPU_FAMILY_SH5) { extern void __weak sh5_cache_init(void); sh5_cache_init(); } skip: emit_cache_params(); }
gpl-2.0
0ps/inception
zlib/trees.c
1874
44027
/* trees.c -- output deflated data using Huffman coding * Copyright (C) 1995-2005 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* * ALGORITHM * * The "deflation" process uses several Huffman trees. The more * common source values are represented by shorter bit sequences. * * Each code tree is stored in a compressed form which is itself * a Huffman encoding of the lengths of all the code strings (in * ascending order by source values). The actual code strings are * reconstructed from the lengths in the inflate process, as described * in the deflate specification. * * REFERENCES * * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc * * Storer, James A. * Data Compression: Methods and Theory, pp. 49-50. * Computer Science Press, 1988. ISBN 0-7167-8156-5. * * Sedgewick, R. * Algorithms, p290. * Addison-Wesley, 1983. ISBN 0-201-06672-6. */ /* @(#) $Id$ */ /* #define GEN_TREES_H */ #include "deflate.h" #ifdef DEBUG # include <ctype.h> #endif /* =========================================================================== * Constants */ #define MAX_BL_BITS 7 /* Bit length codes must not exceed MAX_BL_BITS bits */ #define END_BLOCK 256 /* end of block literal code */ #define REP_3_6 16 /* repeat previous bit length 3-6 times (2 bits of repeat count) */ #define REPZ_3_10 17 /* repeat a zero length 3-10 times (3 bits of repeat count) */ #define REPZ_11_138 18 /* repeat a zero length 11-138 times (7 bits of repeat count) */ local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; local const int extra_dbits[D_CODES] /* extra bits for each distance code */ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; local const uch bl_order[BL_CODES] = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; /* The lengths of the bit length codes are sent in order of decreasing * probability, to avoid transmitting the lengths for unused bit length codes. */ #define Buf_size (8 * 2*sizeof(char)) /* Number of bits used within bi_buf. (bi_buf might be implemented on * more than 16 bits on some systems.) */ /* =========================================================================== * Local data. These are initialized only once. */ #define DIST_CODE_LEN 512 /* see definition of array dist_code below */ #if defined(GEN_TREES_H) || !defined(STDC) /* non ANSI compilers may not accept trees.h */ local ct_data static_ltree[L_CODES+2]; /* The static literal tree. Since the bit lengths are imposed, there is no * need for the L_CODES extra codes used during heap construction. However * The codes 286 and 287 are needed to build a canonical tree (see _tr_init * below). */ local ct_data static_dtree[D_CODES]; /* The static distance tree. (Actually a trivial tree since all codes use * 5 bits.) */ uch _dist_code[DIST_CODE_LEN]; /* Distance codes. The first 256 values correspond to the distances * 3 .. 258, the last 256 values correspond to the top 8 bits of * the 15 bit distances. */ uch _length_code[MAX_MATCH-MIN_MATCH+1]; /* length code for each normalized match length (0 == MIN_MATCH) */ local int base_length[LENGTH_CODES]; /* First normalized length for each code (0 = MIN_MATCH) */ local int base_dist[D_CODES]; /* First normalized distance for each code (0 = distance of 1) */ #else # include "trees.h" #endif /* GEN_TREES_H */ struct static_tree_desc_s { const ct_data *static_tree; /* static tree or NULL */ const intf *extra_bits; /* extra bits for each code or NULL */ int extra_base; /* base index for extra_bits */ int elems; /* max number of elements in the tree */ int max_length; /* max bit length for the codes */ }; local static_tree_desc static_l_desc = {static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; local static_tree_desc static_d_desc = {static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; local static_tree_desc static_bl_desc = {(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; /* =========================================================================== * Local (static) routines in this file. */ local void tr_static_init OF((void)); local void init_block OF((deflate_state *s)); local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count)); local void build_tree OF((deflate_state *s, tree_desc *desc)); local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code)); local void send_tree OF((deflate_state *s, ct_data *tree, int max_code)); local int build_bl_tree OF((deflate_state *s)); local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, int blcodes)); local void compress_block OF((deflate_state *s, ct_data *ltree, ct_data *dtree)); local void set_data_type OF((deflate_state *s)); local unsigned bi_reverse OF((unsigned value, int length)); local void bi_windup OF((deflate_state *s)); local void bi_flush OF((deflate_state *s)); local void copy_block OF((deflate_state *s, charf *buf, unsigned len, int header)); #ifdef GEN_TREES_H local void gen_trees_header OF((void)); #endif #ifndef DEBUG # define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) /* Send a code of the given tree. c and tree must not have side effects */ #else /* DEBUG */ # define send_code(s, c, tree) \ { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ send_bits(s, tree[c].Code, tree[c].Len); } #endif /* =========================================================================== * Output a short LSB first on the stream. * IN assertion: there is enough room in pendingBuf. */ #define put_short(s, w) { \ put_byte(s, (uch)((w) & 0xff)); \ put_byte(s, (uch)((ush)(w) >> 8)); \ } /* =========================================================================== * Send a value on a given number of bits. * IN assertion: length <= 16 and value fits in length bits. */ #ifdef DEBUG local void send_bits OF((deflate_state *s, int value, int length)); local void send_bits(s, value, length) deflate_state *s; int value; /* value to send */ int length; /* number of bits */ { Tracevv((stderr," l %2d v %4x ", length, value)); Assert(length > 0 && length <= 15, "invalid length"); s->bits_sent += (ulg)length; /* If not enough room in bi_buf, use (valid) bits from bi_buf and * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) * unused bits in value. */ if (s->bi_valid > (int)Buf_size - length) { s->bi_buf |= (value << s->bi_valid); put_short(s, s->bi_buf); s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); s->bi_valid += length - Buf_size; } else { s->bi_buf |= value << s->bi_valid; s->bi_valid += length; } } #else /* !DEBUG */ #define send_bits(s, value, length) \ { int len = length;\ if (s->bi_valid > (int)Buf_size - len) {\ int val = value;\ s->bi_buf |= (val << s->bi_valid);\ put_short(s, s->bi_buf);\ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ s->bi_valid += len - Buf_size;\ } else {\ s->bi_buf |= (value) << s->bi_valid;\ s->bi_valid += len;\ }\ } #endif /* DEBUG */ /* the arguments must not have side effects */ /* =========================================================================== * Initialize the various 'constant' tables. */ local void tr_static_init() { #if defined(GEN_TREES_H) || !defined(STDC) static int static_init_done = 0; int n; /* iterates over tree elements */ int bits; /* bit counter */ int length; /* length value */ int code; /* code value */ int dist; /* distance index */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ if (static_init_done) return; /* For some embedded targets, global variables are not initialized: */ static_l_desc.static_tree = static_ltree; static_l_desc.extra_bits = extra_lbits; static_d_desc.static_tree = static_dtree; static_d_desc.extra_bits = extra_dbits; static_bl_desc.extra_bits = extra_blbits; /* Initialize the mapping length (0..255) -> length code (0..28) */ length = 0; for (code = 0; code < LENGTH_CODES-1; code++) { base_length[code] = length; for (n = 0; n < (1<<extra_lbits[code]); n++) { _length_code[length++] = (uch)code; } } Assert (length == 256, "tr_static_init: length != 256"); /* Note that the length 255 (match length 258) can be represented * in two different ways: code 284 + 5 bits or code 285, so we * overwrite length_code[255] to use the best encoding: */ _length_code[length-1] = (uch)code; /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ dist = 0; for (code = 0 ; code < 16; code++) { base_dist[code] = dist; for (n = 0; n < (1<<extra_dbits[code]); n++) { _dist_code[dist++] = (uch)code; } } Assert (dist == 256, "tr_static_init: dist != 256"); dist >>= 7; /* from now on, all distances are divided by 128 */ for ( ; code < D_CODES; code++) { base_dist[code] = dist << 7; for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { _dist_code[256 + dist++] = (uch)code; } } Assert (dist == 256, "tr_static_init: 256+dist != 512"); /* Construct the codes of the static literal tree */ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; n = 0; while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; /* Codes 286 and 287 do not exist, but we must include them in the * tree construction to get a canonical Huffman tree (longest code * all ones) */ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); /* The static distance tree is trivial: */ for (n = 0; n < D_CODES; n++) { static_dtree[n].Len = 5; static_dtree[n].Code = bi_reverse((unsigned)n, 5); } static_init_done = 1; # ifdef GEN_TREES_H gen_trees_header(); # endif #endif /* defined(GEN_TREES_H) || !defined(STDC) */ } /* =========================================================================== * Genererate the file trees.h describing the static trees. */ #ifdef GEN_TREES_H # ifndef DEBUG # include <stdio.h> # endif # define SEPARATOR(i, last, width) \ ((i) == (last)? "\n};\n\n" : \ ((i) % (width) == (width)-1 ? ",\n" : ", ")) void gen_trees_header() { FILE *header = fopen("trees.h", "w"); int i; Assert (header != NULL, "Can't open trees.h"); fprintf(header, "/* header created automatically with -DGEN_TREES_H */\n\n"); fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n"); for (i = 0; i < L_CODES+2; i++) { fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code, static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5)); } fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n"); for (i = 0; i < D_CODES; i++) { fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code, static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5)); } fprintf(header, "const uch _dist_code[DIST_CODE_LEN] = {\n"); for (i = 0; i < DIST_CODE_LEN; i++) { fprintf(header, "%2u%s", _dist_code[i], SEPARATOR(i, DIST_CODE_LEN-1, 20)); } fprintf(header, "const uch _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) { fprintf(header, "%2u%s", _length_code[i], SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20)); } fprintf(header, "local const int base_length[LENGTH_CODES] = {\n"); for (i = 0; i < LENGTH_CODES; i++) { fprintf(header, "%1u%s", base_length[i], SEPARATOR(i, LENGTH_CODES-1, 20)); } fprintf(header, "local const int base_dist[D_CODES] = {\n"); for (i = 0; i < D_CODES; i++) { fprintf(header, "%5u%s", base_dist[i], SEPARATOR(i, D_CODES-1, 10)); } fclose(header); } #endif /* GEN_TREES_H */ /* =========================================================================== * Initialize the tree data structures for a new zlib stream. */ void _tr_init(s) deflate_state *s; { tr_static_init(); s->l_desc.dyn_tree = s->dyn_ltree; s->l_desc.stat_desc = &static_l_desc; s->d_desc.dyn_tree = s->dyn_dtree; s->d_desc.stat_desc = &static_d_desc; s->bl_desc.dyn_tree = s->bl_tree; s->bl_desc.stat_desc = &static_bl_desc; s->bi_buf = 0; s->bi_valid = 0; s->last_eob_len = 8; /* enough lookahead for inflate */ #ifdef DEBUG s->compressed_len = 0L; s->bits_sent = 0L; #endif /* Initialize the first block of the first file: */ init_block(s); } /* =========================================================================== * Initialize a new block. */ local void init_block(s) deflate_state *s; { int n; /* iterates over tree elements */ /* Initialize the trees. */ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; s->dyn_ltree[END_BLOCK].Freq = 1; s->opt_len = s->static_len = 0L; s->last_lit = s->matches = 0; } #define SMALLEST 1 /* Index within the heap array of least frequent node in the Huffman tree */ /* =========================================================================== * Remove the smallest element from the heap and recreate the heap with * one less element. Updates heap and heap_len. */ #define pqremove(s, tree, top) \ {\ top = s->heap[SMALLEST]; \ s->heap[SMALLEST] = s->heap[s->heap_len--]; \ pqdownheap(s, tree, SMALLEST); \ } /* =========================================================================== * Compares to subtrees, using the tree depth as tie breaker when * the subtrees have equal frequency. This minimizes the worst case length. */ #define smaller(tree, n, m, depth) \ (tree[n].Freq < tree[m].Freq || \ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) /* =========================================================================== * Restore the heap property by moving down the tree starting at node k, * exchanging a node with the smallest of its two sons if necessary, stopping * when the heap property is re-established (each father smaller than its * two sons). */ local void pqdownheap(s, tree, k) deflate_state *s; ct_data *tree; /* the tree to restore */ int k; /* node to move down */ { int v = s->heap[k]; int j = k << 1; /* left son of k */ while (j <= s->heap_len) { /* Set j to the smallest of the two sons: */ if (j < s->heap_len && smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { j++; } /* Exit if v is smaller than both sons */ if (smaller(tree, v, s->heap[j], s->depth)) break; /* Exchange v with the smallest son */ s->heap[k] = s->heap[j]; k = j; /* And continue down the tree, setting j to the left son of k */ j <<= 1; } s->heap[k] = v; } /* =========================================================================== * Compute the optimal bit lengths for a tree and update the total bit length * for the current block. * IN assertion: the fields freq and dad are set, heap[heap_max] and * above are the tree nodes sorted by increasing frequency. * OUT assertions: the field len is set to the optimal bit length, the * array bl_count contains the frequencies for each bit length. * The length opt_len is updated; static_len is also updated if stree is * not null. */ local void gen_bitlen(s, desc) deflate_state *s; tree_desc *desc; /* the tree descriptor */ { ct_data *tree = desc->dyn_tree; int max_code = desc->max_code; const ct_data *stree = desc->stat_desc->static_tree; const intf *extra = desc->stat_desc->extra_bits; int base = desc->stat_desc->extra_base; int max_length = desc->stat_desc->max_length; int h; /* heap index */ int n, m; /* iterate over the tree elements */ int bits; /* bit length */ int xbits; /* extra bits */ ush f; /* frequency */ int overflow = 0; /* number of elements with bit length too large */ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; /* In a first pass, compute the optimal bit lengths (which may * overflow in the case of the bit length tree). */ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ for (h = s->heap_max+1; h < HEAP_SIZE; h++) { n = s->heap[h]; bits = tree[tree[n].Dad].Len + 1; if (bits > max_length) bits = max_length, overflow++; tree[n].Len = (ush)bits; /* We overwrite tree[n].Dad which is no longer needed */ if (n > max_code) continue; /* not a leaf node */ s->bl_count[bits]++; xbits = 0; if (n >= base) xbits = extra[n-base]; f = tree[n].Freq; s->opt_len += (ulg)f * (bits + xbits); if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); } if (overflow == 0) return; Trace((stderr,"\nbit length overflow\n")); /* This happens for example on obj2 and pic of the Calgary corpus */ /* Find the first bit length which could increase: */ do { bits = max_length-1; while (s->bl_count[bits] == 0) bits--; s->bl_count[bits]--; /* move one leaf down the tree */ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ s->bl_count[max_length]--; /* The brother of the overflow item also moves one step up, * but this does not affect bl_count[max_length] */ overflow -= 2; } while (overflow > 0); /* Now recompute all bit lengths, scanning in increasing frequency. * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all * lengths instead of fixing only the wrong ones. This idea is taken * from 'ar' written by Haruhiko Okumura.) */ for (bits = max_length; bits != 0; bits--) { n = s->bl_count[bits]; while (n != 0) { m = s->heap[--h]; if (m > max_code) continue; if ((unsigned) tree[m].Len != (unsigned) bits) { Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); s->opt_len += ((long)bits - (long)tree[m].Len) *(long)tree[m].Freq; tree[m].Len = (ush)bits; } n--; } } } /* =========================================================================== * Generate the codes for a given tree and bit counts (which need not be * optimal). * IN assertion: the array bl_count contains the bit length statistics for * the given tree and the field len is set for all tree elements. * OUT assertion: the field code is set for all tree elements of non * zero code length. */ local void gen_codes (tree, max_code, bl_count) ct_data *tree; /* the tree to decorate */ int max_code; /* largest code with non zero frequency */ ushf *bl_count; /* number of codes at each bit length */ { ush next_code[MAX_BITS+1]; /* next code value for each bit length */ ush code = 0; /* running code value */ int bits; /* bit index */ int n; /* code index */ /* The distribution counts are first used to generate the code values * without bit reversal. */ for (bits = 1; bits <= MAX_BITS; bits++) { next_code[bits] = code = (code + bl_count[bits-1]) << 1; } /* Check that the bit counts in bl_count are consistent. The last code * must be all ones. */ Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1, "inconsistent bit counts"); Tracev((stderr,"\ngen_codes: max_code %d ", max_code)); for (n = 0; n <= max_code; n++) { int len = tree[n].Len; if (len == 0) continue; /* Now reverse the bits */ tree[n].Code = bi_reverse(next_code[len]++, len); Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); } } /* =========================================================================== * Construct one Huffman tree and assigns the code bit strings and lengths. * Update the total bit length for the current block. * IN assertion: the field freq is set for all tree elements. * OUT assertions: the fields len and code are set to the optimal bit length * and corresponding code. The length opt_len is updated; static_len is * also updated if stree is not null. The field max_code is set. */ local void build_tree(s, desc) deflate_state *s; tree_desc *desc; /* the tree descriptor */ { ct_data *tree = desc->dyn_tree; const ct_data *stree = desc->stat_desc->static_tree; int elems = desc->stat_desc->elems; int n, m; /* iterate over heap elements */ int max_code = -1; /* largest code with non zero frequency */ int node; /* new node being created */ /* Construct the initial heap, with least frequent element in * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. * heap[0] is not used. */ s->heap_len = 0, s->heap_max = HEAP_SIZE; for (n = 0; n < elems; n++) { if (tree[n].Freq != 0) { s->heap[++(s->heap_len)] = max_code = n; s->depth[n] = 0; } else { tree[n].Len = 0; } } /* The pkzip format requires that at least one distance code exists, * and that at least one bit should be sent even if there is only one * possible code. So to avoid special checks later on we force at least * two codes of non zero frequency. */ while (s->heap_len < 2) { node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); tree[node].Freq = 1; s->depth[node] = 0; s->opt_len--; if (stree) s->static_len -= stree[node].Len; /* node is 0 or 1 so it does not have extra bits */ } desc->max_code = max_code; /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, * establish sub-heaps of increasing lengths: */ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); /* Construct the Huffman tree by repeatedly combining the least two * frequent nodes. */ node = elems; /* next internal node of the tree */ do { pqremove(s, tree, n); /* n = node of least frequency */ m = s->heap[SMALLEST]; /* m = node of next least frequency */ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ s->heap[--(s->heap_max)] = m; /* Create a new node father of n and m */ tree[node].Freq = tree[n].Freq + tree[m].Freq; s->depth[node] = (uch)((s->depth[n] >= s->depth[m] ? s->depth[n] : s->depth[m]) + 1); tree[n].Dad = tree[m].Dad = (ush)node; #ifdef DUMP_BL_TREE if (tree == s->bl_tree) { fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); } #endif /* and insert the new node in the heap */ s->heap[SMALLEST] = node++; pqdownheap(s, tree, SMALLEST); } while (s->heap_len >= 2); s->heap[--(s->heap_max)] = s->heap[SMALLEST]; /* At this point, the fields freq and dad are set. We can now * generate the bit lengths. */ gen_bitlen(s, (tree_desc *)desc); /* The field len is now set, we can generate the bit codes */ gen_codes ((ct_data *)tree, max_code, s->bl_count); } /* =========================================================================== * Scan a literal or distance tree to determine the frequencies of the codes * in the bit length tree. */ local void scan_tree (s, tree, max_code) deflate_state *s; ct_data *tree; /* the tree to be scanned */ int max_code; /* and its largest code of non zero frequency */ { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ if (nextlen == 0) max_count = 138, min_count = 3; tree[max_code+1].Len = (ush)0xffff; /* guard */ for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n+1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { s->bl_tree[curlen].Freq += count; } else if (curlen != 0) { if (curlen != prevlen) s->bl_tree[curlen].Freq++; s->bl_tree[REP_3_6].Freq++; } else if (count <= 10) { s->bl_tree[REPZ_3_10].Freq++; } else { s->bl_tree[REPZ_11_138].Freq++; } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Send a literal or distance tree in compressed form, using the codes in * bl_tree. */ local void send_tree (s, tree, max_code) deflate_state *s; ct_data *tree; /* the tree to be scanned */ int max_code; /* and its largest code of non zero frequency */ { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ /* tree[max_code+1].Len = -1; */ /* guard already set */ if (nextlen == 0) max_count = 138, min_count = 3; for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n+1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { do { send_code(s, curlen, s->bl_tree); } while (--count != 0); } else if (curlen != 0) { if (curlen != prevlen) { send_code(s, curlen, s->bl_tree); count--; } Assert(count >= 3 && count <= 6, " 3_6?"); send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); } else if (count <= 10) { send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); } else { send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Construct the Huffman tree for the bit lengths and return the index in * bl_order of the last bit length code to send. */ local int build_bl_tree(s) deflate_state *s; { int max_blindex; /* index of last bit length code of non zero freq */ /* Determine the bit length frequencies for literal and distance trees */ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); /* Build the bit length tree: */ build_tree(s, (tree_desc *)(&(s->bl_desc))); /* opt_len now includes the length of the tree representations, except * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. */ /* Determine the number of bit length codes to send. The pkzip format * requires that at least 4 bit length codes be sent. (appnote.txt says * 3 but the actual value used is 4.) */ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; } /* Update opt_len to include the bit length tree and counts */ s->opt_len += 3*(max_blindex+1) + 5+5+4; Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", s->opt_len, s->static_len)); return max_blindex; } /* =========================================================================== * Send the header for a block using dynamic Huffman trees: the counts, the * lengths of the bit length codes, the literal tree and the distance tree. * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. */ local void send_all_trees(s, lcodes, dcodes, blcodes) deflate_state *s; int lcodes, dcodes, blcodes; /* number of codes for each tree */ { int rank; /* index in bl_order */ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, "too many codes"); Tracev((stderr, "\nbl counts: ")); send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ send_bits(s, dcodes-1, 5); send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ for (rank = 0; rank < blcodes; rank++) { Tracev((stderr, "\nbl code %2d ", bl_order[rank])); send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); } Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); } /* =========================================================================== * Send a stored block */ void _tr_stored_block(s, buf, stored_len, eof) deflate_state *s; charf *buf; /* input block */ ulg stored_len; /* length of input block */ int eof; /* true if this is the last block for a file */ { send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */ #ifdef DEBUG s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; s->compressed_len += (stored_len + 4) << 3; #endif copy_block(s, buf, (unsigned)stored_len, 1); /* with header */ } /* =========================================================================== * Send one empty static block to give enough lookahead for inflate. * This takes 10 bits, of which 7 may remain in the bit buffer. * The current inflate code requires 9 bits of lookahead. If the * last two codes for the previous block (real code plus EOB) were coded * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode * the last real code. In this case we send two empty static blocks instead * of one. (There are no problems if the previous block is stored or fixed.) * To simplify the code, we assume the worst case of last real code encoded * on one bit only. */ void _tr_align(s) deflate_state *s; { send_bits(s, STATIC_TREES<<1, 3); send_code(s, END_BLOCK, static_ltree); #ifdef DEBUG s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ #endif bi_flush(s); /* Of the 10 bits for the empty block, we have already sent * (10 - bi_valid) bits. The lookahead for the last real code (before * the EOB of the previous block) was thus at least one plus the length * of the EOB plus what we have just sent of the empty static block. */ if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { send_bits(s, STATIC_TREES<<1, 3); send_code(s, END_BLOCK, static_ltree); #ifdef DEBUG s->compressed_len += 10L; #endif bi_flush(s); } s->last_eob_len = 7; } /* =========================================================================== * Determine the best encoding for the current block: dynamic trees, static * trees or store, and output the encoded block to the zip file. */ void _tr_flush_block(s, buf, stored_len, eof) deflate_state *s; charf *buf; /* input block, or NULL if too old */ ulg stored_len; /* length of input block */ int eof; /* true if this is the last block for a file */ { ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ int max_blindex = 0; /* index of last bit length code of non zero freq */ /* Build the Huffman trees unless a stored block is forced */ if (s->level > 0) { /* Check if the file is binary or text */ if (stored_len > 0 && s->strm->data_type == Z_UNKNOWN) set_data_type(s); /* Construct the literal and distance trees */ build_tree(s, (tree_desc *)(&(s->l_desc))); Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, s->static_len)); build_tree(s, (tree_desc *)(&(s->d_desc))); Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, s->static_len)); /* At this point, opt_len and static_len are the total bit lengths of * the compressed block data, excluding the tree representations. */ /* Build the bit length tree for the above two trees, and get the index * in bl_order of the last bit length code to send. */ max_blindex = build_bl_tree(s); /* Determine the best encoding. Compute the block lengths in bytes. */ opt_lenb = (s->opt_len+3+7)>>3; static_lenb = (s->static_len+3+7)>>3; Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, s->last_lit)); if (static_lenb <= opt_lenb) opt_lenb = static_lenb; } else { Assert(buf != (char*)0, "lost buf"); opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ } #ifdef FORCE_STORED if (buf != (char*)0) { /* force stored block */ #else if (stored_len+4 <= opt_lenb && buf != (char*)0) { /* 4: two words for the lengths */ #endif /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. * Otherwise we can't have processed more than WSIZE input bytes since * the last block flush, because compression would have been * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to * transform a block into a stored block. */ _tr_stored_block(s, buf, stored_len, eof); #ifdef FORCE_STATIC } else if (static_lenb >= 0) { /* force static trees */ #else } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) { #endif send_bits(s, (STATIC_TREES<<1)+eof, 3); compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); #ifdef DEBUG s->compressed_len += 3 + s->static_len; #endif } else { send_bits(s, (DYN_TREES<<1)+eof, 3); send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, max_blindex+1); compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); #ifdef DEBUG s->compressed_len += 3 + s->opt_len; #endif } Assert (s->compressed_len == s->bits_sent, "bad compressed size"); /* The above check is made mod 2^32, for files larger than 512 MB * and uLong implemented on 32 bits. */ init_block(s); if (eof) { bi_windup(s); #ifdef DEBUG s->compressed_len += 7; /* align on byte boundary */ #endif } Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, s->compressed_len-7*eof)); } /* =========================================================================== * Save the match info and tally the frequency counts. Return true if * the current block must be flushed. */ int _tr_tally (s, dist, lc) deflate_state *s; unsigned dist; /* distance of matched string */ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ { s->d_buf[s->last_lit] = (ush)dist; s->l_buf[s->last_lit++] = (uch)lc; if (dist == 0) { /* lc is the unmatched char */ s->dyn_ltree[lc].Freq++; } else { s->matches++; /* Here, lc is the match length - MIN_MATCH */ dist--; /* dist = match distance - 1 */ Assert((ush)dist < (ush)MAX_DIST(s) && (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; s->dyn_dtree[d_code(dist)].Freq++; } #ifdef TRUNCATE_BLOCK /* Try to guess if it is profitable to stop the current block here */ if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { /* Compute an upper bound for the compressed length */ ulg out_length = (ulg)s->last_lit*8L; ulg in_length = (ulg)((long)s->strstart - s->block_start); int dcode; for (dcode = 0; dcode < D_CODES; dcode++) { out_length += (ulg)s->dyn_dtree[dcode].Freq * (5L+extra_dbits[dcode]); } out_length >>= 3; Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", s->last_lit, in_length, out_length, 100L - out_length*100L/in_length)); if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; } #endif return (s->last_lit == s->lit_bufsize-1); /* We avoid equality with lit_bufsize because of wraparound at 64K * on 16 bit machines and because stored blocks are restricted to * 64K-1 bytes. */ } /* =========================================================================== * Send the block data compressed using the given Huffman trees */ local void compress_block(s, ltree, dtree) deflate_state *s; ct_data *ltree; /* literal tree */ ct_data *dtree; /* distance tree */ { unsigned dist; /* distance of matched string */ int lc; /* match length or unmatched char (if dist == 0) */ unsigned lx = 0; /* running index in l_buf */ unsigned code; /* the code to send */ int extra; /* number of extra bits to send */ if (s->last_lit != 0) do { dist = s->d_buf[lx]; lc = s->l_buf[lx++]; if (dist == 0) { send_code(s, lc, ltree); /* send a literal byte */ Tracecv(isgraph(lc), (stderr," '%c' ", lc)); } else { /* Here, lc is the match length - MIN_MATCH */ code = _length_code[lc]; send_code(s, code+LITERALS+1, ltree); /* send the length code */ extra = extra_lbits[code]; if (extra != 0) { lc -= base_length[code]; send_bits(s, lc, extra); /* send the extra length bits */ } dist--; /* dist is now the match distance - 1 */ code = d_code(dist); Assert (code < D_CODES, "bad d_code"); send_code(s, code, dtree); /* send the distance code */ extra = extra_dbits[code]; if (extra != 0) { dist -= base_dist[code]; send_bits(s, dist, extra); /* send the extra distance bits */ } } /* literal or match pair ? */ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, "pendingBuf overflow"); } while (lx < s->last_lit); send_code(s, END_BLOCK, ltree); s->last_eob_len = ltree[END_BLOCK].Len; } /* =========================================================================== * Set the data type to BINARY or TEXT, using a crude approximation: * set it to Z_TEXT if all symbols are either printable characters (33 to 255) * or white spaces (9 to 13, or 32); or set it to Z_BINARY otherwise. * IN assertion: the fields Freq of dyn_ltree are set. */ local void set_data_type(s) deflate_state *s; { int n; for (n = 0; n < 9; n++) if (s->dyn_ltree[n].Freq != 0) break; if (n == 9) for (n = 14; n < 32; n++) if (s->dyn_ltree[n].Freq != 0) break; s->strm->data_type = (n == 32) ? Z_TEXT : Z_BINARY; } /* =========================================================================== * Reverse the first len bits of a code, using straightforward code (a faster * method would use a table) * IN assertion: 1 <= len <= 15 */ local unsigned bi_reverse(code, len) unsigned code; /* the value to invert */ int len; /* its bit length */ { register unsigned res = 0; do { res |= code & 1; code >>= 1, res <<= 1; } while (--len > 0); return res >> 1; } /* =========================================================================== * Flush the bit buffer, keeping at most 7 bits in it. */ local void bi_flush(s) deflate_state *s; { if (s->bi_valid == 16) { put_short(s, s->bi_buf); s->bi_buf = 0; s->bi_valid = 0; } else if (s->bi_valid >= 8) { put_byte(s, (Byte)s->bi_buf); s->bi_buf >>= 8; s->bi_valid -= 8; } } /* =========================================================================== * Flush the bit buffer and align the output on a byte boundary */ local void bi_windup(s) deflate_state *s; { if (s->bi_valid > 8) { put_short(s, s->bi_buf); } else if (s->bi_valid > 0) { put_byte(s, (Byte)s->bi_buf); } s->bi_buf = 0; s->bi_valid = 0; #ifdef DEBUG s->bits_sent = (s->bits_sent+7) & ~7; #endif } /* =========================================================================== * Copy a stored block, storing first the length and its * one's complement if requested. */ local void copy_block(s, buf, len, header) deflate_state *s; charf *buf; /* the input data */ unsigned len; /* its length */ int header; /* true if block header must be written */ { bi_windup(s); /* align on byte boundary */ s->last_eob_len = 8; /* enough lookahead for inflate */ if (header) { put_short(s, (ush)len); put_short(s, (ush)~len); #ifdef DEBUG s->bits_sent += 2*16; #endif } #ifdef DEBUG s->bits_sent += (ulg)len<<3; #endif while (len--) { put_byte(s, *buf++); } }
gpl-2.0
wurikiji/ttFS
hunbag/linux-3.10.61/kernel/events/ring_buffer.c
1874
9447
/* * Performance events ring-buffer code: * * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * * For licensing details see kernel-base/COPYING */ #include <linux/perf_event.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include "internal.h" static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, unsigned long offset, unsigned long head) { unsigned long sz = perf_data_size(rb); unsigned long mask = sz - 1; /* * check if user-writable * overwrite : over-write its own tail * !overwrite: buffer possibly drops events. */ if (rb->overwrite) return true; /* * verify that payload is not bigger than buffer * otherwise masking logic may fail to detect * the "not enough space" condition */ if ((head - offset) > sz) return false; offset = (offset - tail) & mask; head = (head - tail) & mask; if ((int)(head - offset) < 0) return false; return true; } static void perf_output_wakeup(struct perf_output_handle *handle) { atomic_set(&handle->rb->poll, POLL_IN); handle->event->pending_wakeup = 1; irq_work_queue(&handle->event->pending); } /* * We need to ensure a later event_id doesn't publish a head when a former * event isn't done writing. However since we need to deal with NMIs we * cannot fully serialize things. * * We only publish the head (and generate a wakeup) when the outer-most * event completes. */ static void perf_output_get_handle(struct perf_output_handle *handle) { struct ring_buffer *rb = handle->rb; preempt_disable(); local_inc(&rb->nest); handle->wakeup = local_read(&rb->wakeup); } static void perf_output_put_handle(struct perf_output_handle *handle) { struct ring_buffer *rb = handle->rb; unsigned long head; again: head = local_read(&rb->head); /* * IRQ/NMI can happen here, which means we can miss a head update. */ if (!local_dec_and_test(&rb->nest)) goto out; /* * Since the mmap() consumer (userspace) can run on a different CPU: * * kernel user * * READ ->data_tail READ ->data_head * smp_mb() (A) smp_rmb() (C) * WRITE $data READ $data * smp_wmb() (B) smp_mb() (D) * STORE ->data_head WRITE ->data_tail * * Where A pairs with D, and B pairs with C. * * I don't think A needs to be a full barrier because we won't in fact * write data until we see the store from userspace. So we simply don't * issue the data WRITE until we observe it. Be conservative for now. * * OTOH, D needs to be a full barrier since it separates the data READ * from the tail WRITE. * * For B a WMB is sufficient since it separates two WRITEs, and for C * an RMB is sufficient since it separates two READs. * * See perf_output_begin(). */ smp_wmb(); rb->user_page->data_head = head; /* * Now check if we missed an update, rely on the (compiler) * barrier in atomic_dec_and_test() to re-read rb->head. */ if (unlikely(head != local_read(&rb->head))) { local_inc(&rb->nest); goto again; } if (handle->wakeup != local_read(&rb->wakeup)) perf_output_wakeup(handle); out: preempt_enable(); } int perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size) { struct ring_buffer *rb; unsigned long tail, offset, head; int have_lost; struct perf_sample_data sample_data; struct { struct perf_event_header header; u64 id; u64 lost; } lost_event; rcu_read_lock(); /* * For inherited events we send all the output towards the parent. */ if (event->parent) event = event->parent; rb = rcu_dereference(event->rb); if (!rb) goto out; handle->rb = rb; handle->event = event; if (!rb->nr_pages) goto out; have_lost = local_read(&rb->lost); if (have_lost) { lost_event.header.size = sizeof(lost_event); perf_event_header__init_id(&lost_event.header, &sample_data, event); size += lost_event.header.size; } perf_output_get_handle(handle); do { /* * Userspace could choose to issue a mb() before updating the * tail pointer. So that all reads will be completed before the * write is issued. * * See perf_output_put_handle(). */ tail = ACCESS_ONCE(rb->user_page->data_tail); smp_mb(); offset = head = local_read(&rb->head); head += size; if (unlikely(!perf_output_space(rb, tail, offset, head))) goto fail; } while (local_cmpxchg(&rb->head, offset, head) != offset); if (head - local_read(&rb->wakeup) > rb->watermark) local_add(rb->watermark, &rb->wakeup); handle->page = offset >> (PAGE_SHIFT + page_order(rb)); handle->page &= rb->nr_pages - 1; handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1); handle->addr = rb->data_pages[handle->page]; handle->addr += handle->size; handle->size = (PAGE_SIZE << page_order(rb)) - handle->size; if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; lost_event.header.misc = 0; lost_event.id = event->id; lost_event.lost = local_xchg(&rb->lost, 0); perf_output_put(handle, lost_event); perf_event__output_id_sample(event, handle, &sample_data); } return 0; fail: local_inc(&rb->lost); perf_output_put_handle(handle); out: rcu_read_unlock(); return -ENOSPC; } unsigned int perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len) { return __output_copy(handle, buf, len); } unsigned int perf_output_skip(struct perf_output_handle *handle, unsigned int len) { return __output_skip(handle, NULL, len); } void perf_output_end(struct perf_output_handle *handle) { perf_output_put_handle(handle); rcu_read_unlock(); } static void ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) { long max_size = perf_data_size(rb); if (watermark) rb->watermark = min(max_size, watermark); if (!rb->watermark) rb->watermark = max_size / 2; if (flags & RING_BUFFER_WRITABLE) rb->overwrite = 0; else rb->overwrite = 1; atomic_set(&rb->refcount, 1); INIT_LIST_HEAD(&rb->event_list); spin_lock_init(&rb->event_lock); } #ifndef CONFIG_PERF_USE_VMALLOC /* * Back perf_mmap() with regular GFP_KERNEL-0 pages. */ struct page * perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) { if (pgoff > rb->nr_pages) return NULL; if (pgoff == 0) return virt_to_page(rb->user_page); return virt_to_page(rb->data_pages[pgoff - 1]); } static void *perf_mmap_alloc_page(int cpu) { struct page *page; int node; node = (cpu == -1) ? cpu : cpu_to_node(cpu); page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) return NULL; return page_address(page); } struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) { struct ring_buffer *rb; unsigned long size; int i; size = sizeof(struct ring_buffer); size += nr_pages * sizeof(void *); rb = kzalloc(size, GFP_KERNEL); if (!rb) goto fail; rb->user_page = perf_mmap_alloc_page(cpu); if (!rb->user_page) goto fail_user_page; for (i = 0; i < nr_pages; i++) { rb->data_pages[i] = perf_mmap_alloc_page(cpu); if (!rb->data_pages[i]) goto fail_data_pages; } rb->nr_pages = nr_pages; ring_buffer_init(rb, watermark, flags); return rb; fail_data_pages: for (i--; i >= 0; i--) free_page((unsigned long)rb->data_pages[i]); free_page((unsigned long)rb->user_page); fail_user_page: kfree(rb); fail: return NULL; } static void perf_mmap_free_page(unsigned long addr) { struct page *page = virt_to_page((void *)addr); page->mapping = NULL; __free_page(page); } void rb_free(struct ring_buffer *rb) { int i; perf_mmap_free_page((unsigned long)rb->user_page); for (i = 0; i < rb->nr_pages; i++) perf_mmap_free_page((unsigned long)rb->data_pages[i]); kfree(rb); } #else static int data_page_nr(struct ring_buffer *rb) { return rb->nr_pages << page_order(rb); } struct page * perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) { /* The '>' counts in the user page. */ if (pgoff > data_page_nr(rb)) return NULL; return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); } static void perf_mmap_unmark_page(void *addr) { struct page *page = vmalloc_to_page(addr); page->mapping = NULL; } static void rb_free_work(struct work_struct *work) { struct ring_buffer *rb; void *base; int i, nr; rb = container_of(work, struct ring_buffer, work); nr = data_page_nr(rb); base = rb->user_page; /* The '<=' counts in the user page. */ for (i = 0; i <= nr; i++) perf_mmap_unmark_page(base + (i * PAGE_SIZE)); vfree(base); kfree(rb); } void rb_free(struct ring_buffer *rb) { schedule_work(&rb->work); } struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) { struct ring_buffer *rb; unsigned long size; void *all_buf; size = sizeof(struct ring_buffer); size += sizeof(void *); rb = kzalloc(size, GFP_KERNEL); if (!rb) goto fail; INIT_WORK(&rb->work, rb_free_work); all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); if (!all_buf) goto fail_all_buf; rb->user_page = all_buf; rb->data_pages[0] = all_buf + PAGE_SIZE; rb->page_order = ilog2(nr_pages); rb->nr_pages = !!nr_pages; ring_buffer_init(rb, watermark, flags); return rb; fail_all_buf: kfree(rb); fail: return NULL; } #endif
gpl-2.0
eoghan2t9/android_kernel_asus_moorefield
arch/ia64/kernel/kprobes.c
2130
30380
/* * Kernel Probes (KProbes) * arch/ia64/kernel/kprobes.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * Copyright (C) Intel Corporation, 2005 * * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy * <anil.s.keshavamurthy@intel.com> adapted from i386 */ #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/preempt.h> #include <linux/moduleloader.h> #include <linux/kdebug.h> #include <asm/pgtable.h> #include <asm/sections.h> #include <asm/uaccess.h> extern void jprobe_inst_return(void); DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; enum instruction_type {A, I, M, F, B, L, X, u}; static enum instruction_type bundle_encoding[32][3] = { { M, I, I }, /* 00 */ { M, I, I }, /* 01 */ { M, I, I }, /* 02 */ { M, I, I }, /* 03 */ { M, L, X }, /* 04 */ { M, L, X }, /* 05 */ { u, u, u }, /* 06 */ { u, u, u }, /* 07 */ { M, M, I }, /* 08 */ { M, M, I }, /* 09 */ { M, M, I }, /* 0A */ { M, M, I }, /* 0B */ { M, F, I }, /* 0C */ { M, F, I }, /* 0D */ { M, M, F }, /* 0E */ { M, M, F }, /* 0F */ { M, I, B }, /* 10 */ { M, I, B }, /* 11 */ { M, B, B }, /* 12 */ { M, B, B }, /* 13 */ { u, u, u }, /* 14 */ { u, u, u }, /* 15 */ { B, B, B }, /* 16 */ { B, B, B }, /* 17 */ { M, M, B }, /* 18 */ { M, M, B }, /* 19 */ { u, u, u }, /* 1A */ { u, u, u }, /* 1B */ { M, F, B }, /* 1C */ { M, F, B }, /* 1D */ { u, u, u }, /* 1E */ { u, u, u }, /* 1F */ }; /* Insert a long branch code */ static void __kprobes set_brl_inst(void *from, void *to) { s64 rel = ((s64) to - (s64) from) >> 4; bundle_t *brl; brl = (bundle_t *) ((u64) from & ~0xf); brl->quad0.template = 0x05; /* [MLX](stop) */ brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */ brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2; brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46); /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff); } /* * In this function we check to see if the instruction * is IP relative instruction and update the kprobe * inst flag accordingly */ static void __kprobes update_kprobe_inst_flag(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, struct kprobe *p) { p->ainsn.inst_flag = 0; p->ainsn.target_br_reg = 0; p->ainsn.slot = slot; /* Check for Break instruction * Bits 37:40 Major opcode to be zero * Bits 27:32 X6 to be zero * Bits 32:35 X3 to be zero */ if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) { /* is a break instruction */ p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; return; } if (bundle_encoding[template][slot] == B) { switch (major_opcode) { case INDIRECT_CALL_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); break; case IP_RELATIVE_PREDICT_OPCODE: case IP_RELATIVE_BRANCH_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; break; case IP_RELATIVE_CALL_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); break; } } else if (bundle_encoding[template][slot] == X) { switch (major_opcode) { case LONG_CALL_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); break; } } return; } /* * In this function we check to see if the instruction * (qp) cmpx.crel.ctype p1,p2=r2,r3 * on which we are inserting kprobe is cmp instruction * with ctype as unc. */ static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst) { cmp_inst_t cmp_inst; uint ctype_unc = 0; if (!((bundle_encoding[template][slot] == I) || (bundle_encoding[template][slot] == M))) goto out; if (!((major_opcode == 0xC) || (major_opcode == 0xD) || (major_opcode == 0xE))) goto out; cmp_inst.l = kprobe_inst; if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) { /* Integer compare - Register Register (A6 type)*/ if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) ctype_unc = 1; } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) { /* Integer compare - Immediate Register (A8 type)*/ if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) ctype_unc = 1; } out: return ctype_unc; } /* * In this function we check to see if the instruction * on which we are inserting kprobe is supported. * Returns qp value if supported * Returns -EINVAL if unsupported */ static int __kprobes unsupported_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, unsigned long addr) { int qp; qp = kprobe_inst & 0x3f; if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) { if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on cmp unc " "instruction on slot 1 at <0x%lx> " "is not supported\n", addr); return -EINVAL; } qp = 0; } else if (bundle_encoding[template][slot] == I) { if (major_opcode == 0) { /* * Check for Integer speculation instruction * - Bit 33-35 to be equal to 0x1 */ if (((kprobe_inst >> 33) & 0x7) == 1) { printk(KERN_WARNING "Kprobes on speculation inst at <0x%lx> not supported\n", addr); return -EINVAL; } /* * IP relative mov instruction * - Bit 27-35 to be equal to 0x30 */ if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { printk(KERN_WARNING "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", addr); return -EINVAL; } } else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) && (kprobe_inst & (0x1UL << 12))) { /* test bit instructions, tbit,tnat,tf * bit 33-36 to be equal to 0 * bit 12 to be equal to 1 */ if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on test bit " "instruction on slot at <0x%lx> " "is not supported\n", addr); return -EINVAL; } qp = 0; } } else if (bundle_encoding[template][slot] == B) { if (major_opcode == 7) { /* IP-Relative Predict major code is 7 */ printk(KERN_WARNING "Kprobes on IP-Relative" "Predict is not supported\n"); return -EINVAL; } else if (major_opcode == 2) { /* Indirect Predict, major code is 2 * bit 27-32 to be equal to 10 or 11 */ int x6=(kprobe_inst >> 27) & 0x3F; if ((x6 == 0x10) || (x6 == 0x11)) { printk(KERN_WARNING "Kprobes on " "Indirect Predict is not supported\n"); return -EINVAL; } } } /* kernel does not use float instruction, here for safety kprobe * will judge whether it is fcmp/flass/float approximation instruction */ else if (unlikely(bundle_encoding[template][slot] == F)) { if ((major_opcode == 4 || major_opcode == 5) && (kprobe_inst & (0x1 << 12))) { /* fcmp/fclass unc instruction */ if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on fcmp/fclass " "instruction on slot at <0x%lx> " "is not supported\n", addr); return -EINVAL; } qp = 0; } if ((major_opcode == 0 || major_opcode == 1) && (kprobe_inst & (0x1UL << 33))) { /* float Approximation instruction */ if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on float Approx " "instr at <0x%lx> is not supported\n", addr); return -EINVAL; } qp = 0; } } return qp; } /* * In this function we override the bundle with * the break instruction at the given slot. */ static void __kprobes prepare_break_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, struct kprobe *p, int qp) { unsigned long break_inst = BREAK_INST; bundle_t *bundle = &p->opcode.bundle; /* * Copy the original kprobe_inst qualifying predicate(qp) * to the break instruction */ break_inst |= qp; switch (slot) { case 0: bundle->quad0.slot0 = break_inst; break; case 1: bundle->quad0.slot1_p0 = break_inst; bundle->quad1.slot1_p1 = break_inst >> (64-46); break; case 2: bundle->quad1.slot2 = break_inst; break; } /* * Update the instruction flag, so that we can * emulate the instruction properly after we * single step on original instruction */ update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p); } static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot, unsigned long *kprobe_inst, uint *major_opcode) { unsigned long kprobe_inst_p0, kprobe_inst_p1; unsigned int template; template = bundle->quad0.template; switch (slot) { case 0: *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); *kprobe_inst = bundle->quad0.slot0; break; case 1: *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); kprobe_inst_p0 = bundle->quad0.slot1_p0; kprobe_inst_p1 = bundle->quad1.slot1_p1; *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); break; case 2: *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); *kprobe_inst = bundle->quad1.slot2; break; } } /* Returns non-zero if the addr is in the Interrupt Vector Table */ static int __kprobes in_ivt_functions(unsigned long addr) { return (addr >= (unsigned long)__start_ivt_text && addr < (unsigned long)__end_ivt_text); } static int __kprobes valid_kprobe_addr(int template, int slot, unsigned long addr) { if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { printk(KERN_WARNING "Attempting to insert unaligned kprobe " "at 0x%lx\n", addr); return -EINVAL; } if (in_ivt_functions(addr)) { printk(KERN_WARNING "Kprobes can't be inserted inside " "IVT functions at 0x%lx\n", addr); return -EINVAL; } return 0; } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { unsigned int i; i = atomic_add_return(1, &kcb->prev_kprobe_index); kcb->prev_kprobe[i-1].kp = kprobe_running(); kcb->prev_kprobe[i-1].status = kcb->kprobe_status; } static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { unsigned int i; i = atomic_read(&kcb->prev_kprobe_index); __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i-1].kp; kcb->kprobe_status = kcb->prev_kprobe[i-1].status; atomic_sub(1, &kcb->prev_kprobe_index); } static void __kprobes set_current_kprobe(struct kprobe *p, struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = p; } static void kretprobe_trampoline(void) { } /* * At this point the target function has been tricked into * returning into our trampoline. Lookup the associated instance * and then: * - call the handler function * - cleanup by marking the instance as unused * - long jump back to the original return address */ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; struct hlist_node *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = ((struct fnptr *)kretprobe_trampoline)->ip; INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: * - instances are always inserted at the head of the list * - when multiple return probes are registered for the same * function, the first instance's ret_addr will point to the * real return address, and all the rest will point to * kretprobe_trampoline */ hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; orig_ret_address = (unsigned long)ri->ret_addr; if (orig_ret_address != trampoline_address) /* * This is the real return address. Any other * instances associated with this task are for * other calls deeper on the call stack */ break; } regs->cr_iip = orig_ret_address; hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; if (ri->rp && ri->rp->handler) ri->rp->handler(ri, regs); orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) /* * This is the real return address. Any other * instances associated with this task are for * other calls deeper on the call stack */ break; } kretprobe_assert(ri, orig_ret_address, trampoline_address); reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler * to run (and have re-enabled preemption) */ return 1; } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->b0; /* Replace the return addr with trampoline addr */ regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; } /* Check the instruction in the slot is break */ static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot) { unsigned int major_opcode; unsigned int template = bundle->quad0.template; unsigned long kprobe_inst; /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ if (slot == 1 && bundle_encoding[template][1] == L) slot++; /* Get Kprobe probe instruction at given slot*/ get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); /* For break instruction, * Bits 37:40 Major opcode to be zero * Bits 27:32 X6 to be zero * Bits 32:35 X3 to be zero */ if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) { /* Not a break instruction */ return 0; } /* Is a break instruction */ return 1; } /* * In this function, we check whether the target bundle modifies IP or * it triggers an exception. If so, it cannot be boostable. */ static int __kprobes can_boost(bundle_t *bundle, uint slot, unsigned long bundle_addr) { unsigned int template = bundle->quad0.template; do { if (search_exception_tables(bundle_addr + slot) || __is_ia64_break_inst(bundle, slot)) return 0; /* exception may occur in this bundle*/ } while ((++slot) < 3); template &= 0x1e; if (template >= 0x10 /* including B unit */ || template == 0x04 /* including X unit */ || template == 0x06) /* undefined */ return 0; return 1; } /* Prepare long jump bundle and disables other boosters if need */ static void __kprobes prepare_booster(struct kprobe *p) { unsigned long addr = (unsigned long)p->addr & ~0xFULL; unsigned int slot = (unsigned long)p->addr & 0xf; struct kprobe *other_kp; if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) { set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1); p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE; } /* disables boosters in previous slots */ for (; addr < (unsigned long)p->addr; addr++) { other_kp = get_kprobe((void *)addr); if (other_kp) other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE; } } int __kprobes arch_prepare_kprobe(struct kprobe *p) { unsigned long addr = (unsigned long) p->addr; unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); unsigned long kprobe_inst=0; unsigned int slot = addr & 0xf, template, major_opcode = 0; bundle_t *bundle; int qp; bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; template = bundle->quad0.template; if(valid_kprobe_addr(template, slot, addr)) return -EINVAL; /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ if (slot == 1 && bundle_encoding[template][1] == L) slot++; /* Get kprobe_inst and major_opcode from the bundle */ get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); if (qp < 0) return -EINVAL; p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) return -ENOMEM; memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); prepare_booster(p); return 0; } void __kprobes arch_arm_kprobe(struct kprobe *p) { unsigned long arm_addr; bundle_t *src, *dest; arm_addr = ((unsigned long)p->addr) & ~0xFUL; dest = &((kprobe_opcode_t *)arm_addr)->bundle; src = &p->opcode.bundle; flush_icache_range((unsigned long)p->ainsn.insn, (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t) * MAX_INSN_SIZE); switch (p->ainsn.slot) { case 0: dest->quad0.slot0 = src->quad0.slot0; break; case 1: dest->quad1.slot1_p1 = src->quad1.slot1_p1; break; case 2: dest->quad1.slot2 = src->quad1.slot2; break; } flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_disarm_kprobe(struct kprobe *p) { unsigned long arm_addr; bundle_t *src, *dest; arm_addr = ((unsigned long)p->addr) & ~0xFUL; dest = &((kprobe_opcode_t *)arm_addr)->bundle; /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ src = &p->ainsn.insn->bundle; switch (p->ainsn.slot) { case 0: dest->quad0.slot0 = src->quad0.slot0; break; case 1: dest->quad1.slot1_p1 = src->quad1.slot1_p1; break; case 2: dest->quad1.slot2 = src->quad1.slot2; break; } flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); p->ainsn.insn = NULL; } } /* * We are resuming execution after a single step fault, so the pt_regs * structure reflects the register state after we executed the instruction * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust * the ip to point back to the original stack address. To set the IP address * to original stack address, handle the case where we need to fixup the * relative IP address and/or fixup branch register. */ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) { unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; unsigned long template; int slot = ((unsigned long)p->addr & 0xf); template = p->ainsn.insn->bundle.quad0.template; if (slot == 1 && bundle_encoding[template][1] == L) slot = 2; if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) { if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { /* Fix relative IP address */ regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr; } if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) { /* * Fix target branch register, software convention is * to use either b0 or b6 or b7, so just checking * only those registers */ switch (p->ainsn.target_br_reg) { case 0: if ((regs->b0 == bundle_addr) || (regs->b0 == bundle_addr + 0x10)) { regs->b0 = (regs->b0 - bundle_addr) + resume_addr; } break; case 6: if ((regs->b6 == bundle_addr) || (regs->b6 == bundle_addr + 0x10)) { regs->b6 = (regs->b6 - bundle_addr) + resume_addr; } break; case 7: if ((regs->b7 == bundle_addr) || (regs->b7 == bundle_addr + 0x10)) { regs->b7 = (regs->b7 - bundle_addr) + resume_addr; } break; } /* end switch */ } goto turn_ss_off; } if (slot == 2) { if (regs->cr_iip == bundle_addr + 0x10) { regs->cr_iip = resume_addr + 0x10; } } else { if (regs->cr_iip == bundle_addr) { regs->cr_iip = resume_addr; } } turn_ss_off: /* Turn off Single Step bit */ ia64_psr(regs)->ss = 0; } static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) { unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle; unsigned long slot = (unsigned long)p->addr & 0xf; /* single step inline if break instruction */ if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) regs->cr_iip = (unsigned long)p->addr & ~0xFULL; else regs->cr_iip = bundle_addr & ~0xFULL; if (slot > 2) slot = 0; ia64_psr(regs)->ri = slot; /* turn on single stepping */ ia64_psr(regs)->ss = 1; } static int __kprobes is_ia64_break_inst(struct pt_regs *regs) { unsigned int slot = ia64_psr(regs)->ri; unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; bundle_t bundle; memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); return __is_ia64_break_inst(&bundle, slot); } static int __kprobes pre_kprobes_handler(struct die_args *args) { struct kprobe *p; int ret = 0; struct pt_regs *regs = args->regs; kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); kcb = get_kprobe_ctlblk(); /* Handle recursion cases */ if (kprobe_running()) { p = get_kprobe(addr); if (p) { if ((kcb->kprobe_status == KPROBE_HIT_SS) && (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { ia64_psr(regs)->ss = 0; goto no_kprobe; } /* We have reentered the pre_kprobe_handler(), since * another probe was hit while within the handler. * We here save the original kprobes variables and * just single step on the instruction of the new probe * without calling any user handlers. */ save_previous_kprobe(kcb); set_current_kprobe(p, kcb); kprobes_inc_nmissed_count(p); prepare_ss(p, regs); kcb->kprobe_status = KPROBE_REENTER; return 1; } else if (args->err == __IA64_BREAK_JPROBE) { /* * jprobe instrumented function just completed */ p = __get_cpu_var(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) { goto ss_probe; } } else if (!is_ia64_break_inst(regs)) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further * handling of this interrupt is appropriate */ ret = 1; goto no_kprobe; } else { /* Not our break */ goto no_kprobe; } } p = get_kprobe(addr); if (!p) { if (!is_ia64_break_inst(regs)) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed * either a probepoint or a debugger breakpoint * at this address. In either case, no further * handling of this interrupt is appropriate. */ ret = 1; } /* Not one of our break, let kernel handle it */ goto no_kprobe; } set_current_kprobe(p, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (p->pre_handler && p->pre_handler(p, regs)) /* * Our pre-handler is specifically requesting that we just * do a return. This is used for both the jprobe pre-handler * and the kretprobe trampoline */ return 1; ss_probe: #if !defined(CONFIG_PREEMPT) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ ia64_psr(regs)->ri = p->ainsn.slot; regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL; /* turn single stepping off */ ia64_psr(regs)->ss = 0; reset_current_kprobe(); preempt_enable_no_resched(); return 1; } #endif prepare_ss(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; } static int __kprobes post_kprobes_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (!cur) return 0; if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } resume_execution(cur, regs); /*Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); goto out; } reset_current_kprobe(); out: preempt_enable_no_resched(); return 1; } int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); switch(kcb->kprobe_status) { case KPROBE_HIT_SS: case KPROBE_REENTER: /* * We are here because the instruction being single * stepped caused a page fault. We reset the current * kprobe and the instruction pointer points back to * the probe address and allow the page fault handler * to continue as a normal page fault. */ regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); else reset_current_kprobe(); preempt_enable_no_resched(); break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, * we can also use npre/npostfault count for accouting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); /* * We come here because instructions in the pre/post * handler caused the page_fault, this could happen * if handler tries to access user space by * copy_from_user(), get_user() etc. Let the * user-specified handler try to fix it first. */ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) return 1; /* * In case the user-specified fault handler returned * zero, try to fix up. */ if (ia64_done_with_exception(regs)) return 1; /* * Let ia64_do_page_fault() fix it. */ break; default: break; } return 0; } int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; if (args->regs && user_mode(args->regs)) return ret; switch(val) { case DIE_BREAK: /* err is break number from ia64_bad_break() */ if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) || args->err == __IA64_BREAK_JPROBE || args->err == 0) if (pre_kprobes_handler(args)) ret = NOTIFY_STOP; break; case DIE_FAULT: /* err is vector number from ia64_fault() */ if (args->err == 36) if (post_kprobes_handler(args->regs)) ret = NOTIFY_STOP; break; default: break; } return ret; } struct param_bsp_cfm { unsigned long ip; unsigned long *bsp; unsigned long cfm; }; static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg) { unsigned long ip; struct param_bsp_cfm *lp = arg; do { unw_get_ip(info, &ip); if (ip == 0) break; if (ip == lp->ip) { unw_get_bsp(info, (unsigned long*)&lp->bsp); unw_get_cfm(info, (unsigned long*)&lp->cfm); return; } } while (unw_unwind(info) >= 0); lp->bsp = NULL; lp->cfm = 0; return; } unsigned long arch_deref_entry_point(void *entry) { return ((struct fnptr *)entry)->ip; } int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); unsigned long addr = arch_deref_entry_point(jp->entry); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct param_bsp_cfm pa; int bytes; /* * Callee owns the argument space and could overwrite it, eg * tail call optimization. So to be absolutely safe * we save the argument space before transferring the control * to instrumented jprobe function which runs in * the process context */ pa.ip = regs->cr_iip; unw_init_running(ia64_get_bsp_cfm, &pa); bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f) - (char *)pa.bsp; memcpy( kcb->jprobes_saved_stacked_regs, pa.bsp, bytes ); kcb->bsp = pa.bsp; kcb->cfm = pa.cfm; /* save architectural state */ kcb->jprobe_saved_regs = *regs; /* after rfi, execute the jprobe instrumented function */ regs->cr_iip = addr & ~0xFULL; ia64_psr(regs)->ri = addr & 0xf; regs->r1 = ((struct fnptr *)(jp->entry))->gp; /* * fix the return address to our jprobe_inst_return() function * in the jprobes.S file */ regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip; return 1; } /* ia64 does not need this */ void __kprobes jprobe_return(void) { } int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); int bytes; /* restoring architectural state */ *regs = kcb->jprobe_saved_regs; /* restoring the original argument space */ flush_register_stack(); bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f) - (char *)kcb->bsp; memcpy( kcb->bsp, kcb->jprobes_saved_stacked_regs, bytes ); invalidate_stacked_regs(); preempt_enable_no_resched(); return 1; } static struct kprobe trampoline_p = { .pre_handler = trampoline_probe_handler }; int __init arch_init_kprobes(void) { trampoline_p.addr = (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; return register_kprobe(&trampoline_p); } int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip) return 1; return 0; }
gpl-2.0
googyanas/Googy-Kernel
drivers/staging/altera-stapl/altera.c
2386
57360
/* * altera.c * * altera FPGA driver * * Copyright (C) Altera Corporation 1998-2001 * Copyright (C) 2010,2011 NetUP Inc. * Copyright (C) 2010,2011 Igor M. Liplianin <liplianin@netup.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <asm/unaligned.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/firmware.h> #include <linux/slab.h> #include "altera.h" #include "altera-exprt.h" #include "altera-jtag.h" static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debugging information"); MODULE_DESCRIPTION("altera FPGA kernel module"); MODULE_AUTHOR("Igor M. Liplianin <liplianin@netup.ru>"); MODULE_LICENSE("GPL"); #define dprintk(args...) \ if (debug) { \ printk(KERN_DEBUG args); \ } enum altera_fpga_opcode { OP_NOP = 0, OP_DUP, OP_SWP, OP_ADD, OP_SUB, OP_MULT, OP_DIV, OP_MOD, OP_SHL, OP_SHR, OP_NOT, OP_AND, OP_OR, OP_XOR, OP_INV, OP_GT, OP_LT, OP_RET, OP_CMPS, OP_PINT, OP_PRNT, OP_DSS, OP_DSSC, OP_ISS, OP_ISSC, OP_DPR = 0x1c, OP_DPRL, OP_DPO, OP_DPOL, OP_IPR, OP_IPRL, OP_IPO, OP_IPOL, OP_PCHR, OP_EXIT, OP_EQU, OP_POPT, OP_ABS = 0x2c, OP_BCH0, OP_PSH0 = 0x2f, OP_PSHL = 0x40, OP_PSHV, OP_JMP, OP_CALL, OP_NEXT, OP_PSTR, OP_SINT = 0x47, OP_ST, OP_ISTP, OP_DSTP, OP_SWPN, OP_DUPN, OP_POPV, OP_POPE, OP_POPA, OP_JMPZ, OP_DS, OP_IS, OP_DPRA, OP_DPOA, OP_IPRA, OP_IPOA, OP_EXPT, OP_PSHE, OP_PSHA, OP_DYNA, OP_EXPV = 0x5c, OP_COPY = 0x80, OP_REVA, OP_DSC, OP_ISC, OP_WAIT, OP_VS, OP_CMPA = 0xc0, OP_VSC, }; struct altera_procinfo { char *name; u8 attrs; struct altera_procinfo *next; }; /* This function checks if enough parameters are available on the stack. */ static int altera_check_stack(int stack_ptr, int count, int *status) { if (stack_ptr < count) { *status = -EOVERFLOW; return 0; } return 1; } static void altera_export_int(char *key, s32 value) { dprintk("Export: key = \"%s\", value = %d\n", key, value); } #define HEX_LINE_CHARS 72 #define HEX_LINE_BITS (HEX_LINE_CHARS * 4) static void altera_export_bool_array(char *key, u8 *data, s32 count) { char string[HEX_LINE_CHARS + 1]; s32 i, offset; u32 size, line, lines, linebits, value, j, k; if (count > HEX_LINE_BITS) { dprintk("Export: key = \"%s\", %d bits, value = HEX\n", key, count); lines = (count + (HEX_LINE_BITS - 1)) / HEX_LINE_BITS; for (line = 0; line < lines; ++line) { if (line < (lines - 1)) { linebits = HEX_LINE_BITS; size = HEX_LINE_CHARS; offset = count - ((line + 1) * HEX_LINE_BITS); } else { linebits = count - ((lines - 1) * HEX_LINE_BITS); size = (linebits + 3) / 4; offset = 0L; } string[size] = '\0'; j = size - 1; value = 0; for (k = 0; k < linebits; ++k) { i = k + offset; if (data[i >> 3] & (1 << (i & 7))) value |= (1 << (i & 3)); if ((i & 3) == 3) { sprintf(&string[j], "%1x", value); value = 0; --j; } } if ((k & 3) > 0) sprintf(&string[j], "%1x", value); dprintk("%s\n", string); } } else { size = (count + 3) / 4; string[size] = '\0'; j = size - 1; value = 0; for (i = 0; i < count; ++i) { if (data[i >> 3] & (1 << (i & 7))) value |= (1 << (i & 3)); if ((i & 3) == 3) { sprintf(&string[j], "%1x", value); value = 0; --j; } } if ((i & 3) > 0) sprintf(&string[j], "%1x", value); dprintk("Export: key = \"%s\", %d bits, value = HEX %s\n", key, count, string); } } static int altera_execute(struct altera_state *astate, u8 *p, s32 program_size, s32 *error_address, int *exit_code, int *format_version) { struct altera_config *aconf = astate->config; char *msg_buff = astate->msg_buff; long *stack = astate->stack; int status = 0; u32 first_word = 0L; u32 action_table = 0L; u32 proc_table = 0L; u32 str_table = 0L; u32 sym_table = 0L; u32 data_sect = 0L; u32 code_sect = 0L; u32 debug_sect = 0L; u32 action_count = 0L; u32 proc_count = 0L; u32 sym_count = 0L; long *vars = NULL; s32 *var_size = NULL; char *attrs = NULL; u8 *proc_attributes = NULL; u32 pc; u32 opcode_address; u32 args[3]; u32 opcode; u32 name_id; u8 charbuf[4]; long long_tmp; u32 variable_id; u8 *charptr_tmp; u8 *charptr_tmp2; long *longptr_tmp; int version = 0; int delta = 0; int stack_ptr = 0; u32 arg_count; int done = 0; int bad_opcode = 0; u32 count; u32 index; u32 index2; s32 long_count; s32 long_idx; s32 long_idx2; u32 i; u32 j; u32 uncomp_size; u32 offset; u32 value; int current_proc = 0; int reverse; char *name; dprintk("%s\n", __func__); /* Read header information */ if (program_size > 52L) { first_word = get_unaligned_be32(&p[0]); version = (first_word & 1L); *format_version = version + 1; delta = version * 8; action_table = get_unaligned_be32(&p[4]); proc_table = get_unaligned_be32(&p[8]); str_table = get_unaligned_be32(&p[4 + delta]); sym_table = get_unaligned_be32(&p[16 + delta]); data_sect = get_unaligned_be32(&p[20 + delta]); code_sect = get_unaligned_be32(&p[24 + delta]); debug_sect = get_unaligned_be32(&p[28 + delta]); action_count = get_unaligned_be32(&p[40 + delta]); proc_count = get_unaligned_be32(&p[44 + delta]); sym_count = get_unaligned_be32(&p[48 + (2 * delta)]); } if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) { done = 1; status = -EIO; goto exit_done; } if (sym_count <= 0) goto exit_done; vars = kzalloc(sym_count * sizeof(long), GFP_KERNEL); if (vars == NULL) status = -ENOMEM; if (status == 0) { var_size = kzalloc(sym_count * sizeof(s32), GFP_KERNEL); if (var_size == NULL) status = -ENOMEM; } if (status == 0) { attrs = kzalloc(sym_count, GFP_KERNEL); if (attrs == NULL) status = -ENOMEM; } if ((status == 0) && (version > 0)) { proc_attributes = kzalloc(proc_count, GFP_KERNEL); if (proc_attributes == NULL) status = -ENOMEM; } if (status != 0) goto exit_done; delta = version * 2; for (i = 0; i < sym_count; ++i) { offset = (sym_table + ((11 + delta) * i)); value = get_unaligned_be32(&p[offset + 3 + delta]); attrs[i] = p[offset]; /* * use bit 7 of attribute byte to indicate that * this buffer was dynamically allocated * and should be freed later */ attrs[i] &= 0x7f; var_size[i] = get_unaligned_be32(&p[offset + 7 + delta]); /* * Attribute bits: * bit 0: 0 = read-only, 1 = read-write * bit 1: 0 = not compressed, 1 = compressed * bit 2: 0 = not initialized, 1 = initialized * bit 3: 0 = scalar, 1 = array * bit 4: 0 = Boolean, 1 = integer * bit 5: 0 = declared variable, * 1 = compiler created temporary variable */ if ((attrs[i] & 0x0c) == 0x04) /* initialized scalar variable */ vars[i] = value; else if ((attrs[i] & 0x1e) == 0x0e) { /* initialized compressed Boolean array */ uncomp_size = get_unaligned_le32(&p[data_sect + value]); /* allocate a buffer for the uncompressed data */ vars[i] = (long)kzalloc(uncomp_size, GFP_KERNEL); if (vars[i] == 0L) status = -ENOMEM; else { /* set flag so buffer will be freed later */ attrs[i] |= 0x80; /* uncompress the data */ if (altera_shrink(&p[data_sect + value], var_size[i], (u8 *)vars[i], uncomp_size, version) != uncomp_size) /* decompression failed */ status = -EIO; else var_size[i] = uncomp_size * 8L; } } else if ((attrs[i] & 0x1e) == 0x0c) { /* initialized Boolean array */ vars[i] = value + data_sect + (long)p; } else if ((attrs[i] & 0x1c) == 0x1c) { /* initialized integer array */ vars[i] = value + data_sect; } else if ((attrs[i] & 0x0c) == 0x08) { /* uninitialized array */ /* flag attrs so that memory is freed */ attrs[i] |= 0x80; if (var_size[i] > 0) { u32 size; if (attrs[i] & 0x10) /* integer array */ size = (var_size[i] * sizeof(s32)); else /* Boolean array */ size = ((var_size[i] + 7L) / 8L); vars[i] = (long)kzalloc(size, GFP_KERNEL); if (vars[i] == 0) { status = -ENOMEM; } else { /* zero out memory */ for (j = 0; j < size; ++j) ((u8 *)(vars[i]))[j] = 0; } } else vars[i] = 0; } else vars[i] = 0; } exit_done: if (status != 0) done = 1; altera_jinit(astate); pc = code_sect; msg_buff[0] = '\0'; /* * For JBC version 2, we will execute the procedures corresponding to * the selected ACTION */ if (version > 0) { if (aconf->action == NULL) { status = -EINVAL; done = 1; } else { int action_found = 0; for (i = 0; (i < action_count) && !action_found; ++i) { name_id = get_unaligned_be32(&p[action_table + (12 * i)]); name = &p[str_table + name_id]; if (strnicmp(aconf->action, name, strlen(name)) == 0) { action_found = 1; current_proc = get_unaligned_be32(&p[action_table + (12 * i) + 8]); } } if (!action_found) { status = -EINVAL; done = 1; } } if (status == 0) { int first_time = 1; i = current_proc; while ((i != 0) || first_time) { first_time = 0; /* check procedure attribute byte */ proc_attributes[i] = (p[proc_table + (13 * i) + 8] & 0x03); /* * BIT0 - OPTIONAL * BIT1 - RECOMMENDED * BIT6 - FORCED OFF * BIT7 - FORCED ON */ i = get_unaligned_be32(&p[proc_table + (13 * i) + 4]); } /* * Set current_proc to the first procedure * to be executed */ i = current_proc; while ((i != 0) && ((proc_attributes[i] == 1) || ((proc_attributes[i] & 0xc0) == 0x40))) { i = get_unaligned_be32(&p[proc_table + (13 * i) + 4]); } if ((i != 0) || ((i == 0) && (current_proc == 0) && ((proc_attributes[0] != 1) && ((proc_attributes[0] & 0xc0) != 0x40)))) { current_proc = i; pc = code_sect + get_unaligned_be32(&p[proc_table + (13 * i) + 9]); if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; } else /* there are no procedures to execute! */ done = 1; } } msg_buff[0] = '\0'; while (!done) { opcode = (p[pc] & 0xff); opcode_address = pc; ++pc; if (debug > 1) printk("opcode: %02x\n", opcode); arg_count = (opcode >> 6) & 3; for (i = 0; i < arg_count; ++i) { args[i] = get_unaligned_be32(&p[pc]); pc += 4; } switch (opcode) { case OP_NOP: break; case OP_DUP: if (altera_check_stack(stack_ptr, 1, &status)) { stack[stack_ptr] = stack[stack_ptr - 1]; ++stack_ptr; } break; case OP_SWP: if (altera_check_stack(stack_ptr, 2, &status)) { long_tmp = stack[stack_ptr - 2]; stack[stack_ptr - 2] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } break; case OP_ADD: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] += stack[stack_ptr]; } break; case OP_SUB: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] -= stack[stack_ptr]; } break; case OP_MULT: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] *= stack[stack_ptr]; } break; case OP_DIV: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] /= stack[stack_ptr]; } break; case OP_MOD: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] %= stack[stack_ptr]; } break; case OP_SHL: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] <<= stack[stack_ptr]; } break; case OP_SHR: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] >>= stack[stack_ptr]; } break; case OP_NOT: if (altera_check_stack(stack_ptr, 1, &status)) stack[stack_ptr - 1] ^= (-1L); break; case OP_AND: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] &= stack[stack_ptr]; } break; case OP_OR: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] |= stack[stack_ptr]; } break; case OP_XOR: if (altera_check_stack(stack_ptr, 2, &status)) { --stack_ptr; stack[stack_ptr - 1] ^= stack[stack_ptr]; } break; case OP_INV: if (!altera_check_stack(stack_ptr, 1, &status)) break; stack[stack_ptr - 1] = stack[stack_ptr - 1] ? 0L : 1L; break; case OP_GT: if (!altera_check_stack(stack_ptr, 2, &status)) break; --stack_ptr; stack[stack_ptr - 1] = (stack[stack_ptr - 1] > stack[stack_ptr]) ? 1L : 0L; break; case OP_LT: if (!altera_check_stack(stack_ptr, 2, &status)) break; --stack_ptr; stack[stack_ptr - 1] = (stack[stack_ptr - 1] < stack[stack_ptr]) ? 1L : 0L; break; case OP_RET: if ((version > 0) && (stack_ptr == 0)) { /* * We completed one of the main procedures * of an ACTION. * Find the next procedure * to be executed and jump to it. * If there are no more procedures, then EXIT. */ i = get_unaligned_be32(&p[proc_table + (13 * current_proc) + 4]); while ((i != 0) && ((proc_attributes[i] == 1) || ((proc_attributes[i] & 0xc0) == 0x40))) i = get_unaligned_be32(&p[proc_table + (13 * i) + 4]); if (i == 0) { /* no procedures to execute! */ done = 1; *exit_code = 0; /* success */ } else { current_proc = i; pc = code_sect + get_unaligned_be32( &p[proc_table + (13 * i) + 9]); if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; } } else if (altera_check_stack(stack_ptr, 1, &status)) { pc = stack[--stack_ptr] + code_sect; if ((pc <= code_sect) || (pc >= debug_sect)) status = -ERANGE; } break; case OP_CMPS: /* * Array short compare * ...stack 0 is source 1 value * ...stack 1 is source 2 value * ...stack 2 is mask value * ...stack 3 is count */ if (altera_check_stack(stack_ptr, 4, &status)) { s32 a = stack[--stack_ptr]; s32 b = stack[--stack_ptr]; long_tmp = stack[--stack_ptr]; count = stack[stack_ptr - 1]; if ((count < 1) || (count > 32)) status = -ERANGE; else { long_tmp &= ((-1L) >> (32 - count)); stack[stack_ptr - 1] = ((a & long_tmp) == (b & long_tmp)) ? 1L : 0L; } } break; case OP_PINT: /* * PRINT add integer * ...stack 0 is integer value */ if (!altera_check_stack(stack_ptr, 1, &status)) break; sprintf(&msg_buff[strlen(msg_buff)], "%ld", stack[--stack_ptr]); break; case OP_PRNT: /* PRINT finish */ if (debug) printk(msg_buff, "\n"); msg_buff[0] = '\0'; break; case OP_DSS: /* * DRSCAN short * ...stack 0 is scan data * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_tmp = stack[--stack_ptr]; count = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_drscan(astate, count, charbuf, 0); break; case OP_DSSC: /* * DRSCAN short with capture * ...stack 0 is scan data * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_tmp = stack[--stack_ptr]; count = stack[stack_ptr - 1]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_swap_dr(astate, count, charbuf, 0, charbuf, 0); stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]); break; case OP_ISS: /* * IRSCAN short * ...stack 0 is scan data * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_tmp = stack[--stack_ptr]; count = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_irscan(astate, count, charbuf, 0); break; case OP_ISSC: /* * IRSCAN short with capture * ...stack 0 is scan data * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_tmp = stack[--stack_ptr]; count = stack[stack_ptr - 1]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_swap_ir(astate, count, charbuf, 0, charbuf, 0); stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]); break; case OP_DPR: if (!altera_check_stack(stack_ptr, 1, &status)) break; count = stack[--stack_ptr]; status = altera_set_dr_pre(&astate->js, count, 0, NULL); break; case OP_DPRL: /* * DRPRE with literal data * ...stack 0 is count * ...stack 1 is literal data */ if (!altera_check_stack(stack_ptr, 2, &status)) break; count = stack[--stack_ptr]; long_tmp = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_set_dr_pre(&astate->js, count, 0, charbuf); break; case OP_DPO: /* * DRPOST * ...stack 0 is count */ if (altera_check_stack(stack_ptr, 1, &status)) { count = stack[--stack_ptr]; status = altera_set_dr_post(&astate->js, count, 0, NULL); } break; case OP_DPOL: /* * DRPOST with literal data * ...stack 0 is count * ...stack 1 is literal data */ if (!altera_check_stack(stack_ptr, 2, &status)) break; count = stack[--stack_ptr]; long_tmp = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_set_dr_post(&astate->js, count, 0, charbuf); break; case OP_IPR: if (altera_check_stack(stack_ptr, 1, &status)) { count = stack[--stack_ptr]; status = altera_set_ir_pre(&astate->js, count, 0, NULL); } break; case OP_IPRL: /* * IRPRE with literal data * ...stack 0 is count * ...stack 1 is literal data */ if (altera_check_stack(stack_ptr, 2, &status)) { count = stack[--stack_ptr]; long_tmp = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_set_ir_pre(&astate->js, count, 0, charbuf); } break; case OP_IPO: /* * IRPOST * ...stack 0 is count */ if (altera_check_stack(stack_ptr, 1, &status)) { count = stack[--stack_ptr]; status = altera_set_ir_post(&astate->js, count, 0, NULL); } break; case OP_IPOL: /* * IRPOST with literal data * ...stack 0 is count * ...stack 1 is literal data */ if (!altera_check_stack(stack_ptr, 2, &status)) break; count = stack[--stack_ptr]; long_tmp = stack[--stack_ptr]; put_unaligned_le32(long_tmp, &charbuf[0]); status = altera_set_ir_post(&astate->js, count, 0, charbuf); break; case OP_PCHR: if (altera_check_stack(stack_ptr, 1, &status)) { u8 ch; count = strlen(msg_buff); ch = (char) stack[--stack_ptr]; if ((ch < 1) || (ch > 127)) { /* * character code out of range * instead of flagging an error, * force the value to 127 */ ch = 127; } msg_buff[count] = ch; msg_buff[count + 1] = '\0'; } break; case OP_EXIT: if (altera_check_stack(stack_ptr, 1, &status)) *exit_code = stack[--stack_ptr]; done = 1; break; case OP_EQU: if (!altera_check_stack(stack_ptr, 2, &status)) break; --stack_ptr; stack[stack_ptr - 1] = (stack[stack_ptr - 1] == stack[stack_ptr]) ? 1L : 0L; break; case OP_POPT: if (altera_check_stack(stack_ptr, 1, &status)) --stack_ptr; break; case OP_ABS: if (!altera_check_stack(stack_ptr, 1, &status)) break; if (stack[stack_ptr - 1] < 0) stack[stack_ptr - 1] = 0 - stack[stack_ptr - 1]; break; case OP_BCH0: /* * Batch operation 0 * SWP * SWPN 7 * SWP * SWPN 6 * DUPN 8 * SWPN 2 * SWP * DUPN 6 * DUPN 6 */ /* SWP */ if (altera_check_stack(stack_ptr, 2, &status)) { long_tmp = stack[stack_ptr - 2]; stack[stack_ptr - 2] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* SWPN 7 */ index = 7 + 1; if (altera_check_stack(stack_ptr, index, &status)) { long_tmp = stack[stack_ptr - index]; stack[stack_ptr - index] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* SWP */ if (altera_check_stack(stack_ptr, 2, &status)) { long_tmp = stack[stack_ptr - 2]; stack[stack_ptr - 2] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* SWPN 6 */ index = 6 + 1; if (altera_check_stack(stack_ptr, index, &status)) { long_tmp = stack[stack_ptr - index]; stack[stack_ptr - index] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* DUPN 8 */ index = 8 + 1; if (altera_check_stack(stack_ptr, index, &status)) { stack[stack_ptr] = stack[stack_ptr - index]; ++stack_ptr; } /* SWPN 2 */ index = 2 + 1; if (altera_check_stack(stack_ptr, index, &status)) { long_tmp = stack[stack_ptr - index]; stack[stack_ptr - index] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* SWP */ if (altera_check_stack(stack_ptr, 2, &status)) { long_tmp = stack[stack_ptr - 2]; stack[stack_ptr - 2] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } /* DUPN 6 */ index = 6 + 1; if (altera_check_stack(stack_ptr, index, &status)) { stack[stack_ptr] = stack[stack_ptr - index]; ++stack_ptr; } /* DUPN 6 */ index = 6 + 1; if (altera_check_stack(stack_ptr, index, &status)) { stack[stack_ptr] = stack[stack_ptr - index]; ++stack_ptr; } break; case OP_PSH0: stack[stack_ptr++] = 0; break; case OP_PSHL: stack[stack_ptr++] = (s32) args[0]; break; case OP_PSHV: stack[stack_ptr++] = vars[args[0]]; break; case OP_JMP: pc = args[0] + code_sect; if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; break; case OP_CALL: stack[stack_ptr++] = pc; pc = args[0] + code_sect; if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; break; case OP_NEXT: /* * Process FOR / NEXT loop * ...argument 0 is variable ID * ...stack 0 is step value * ...stack 1 is end value * ...stack 2 is top address */ if (altera_check_stack(stack_ptr, 3, &status)) { s32 step = stack[stack_ptr - 1]; s32 end = stack[stack_ptr - 2]; s32 top = stack[stack_ptr - 3]; s32 iterator = vars[args[0]]; int break_out = 0; if (step < 0) { if (iterator <= end) break_out = 1; } else if (iterator >= end) break_out = 1; if (break_out) { stack_ptr -= 3; } else { vars[args[0]] = iterator + step; pc = top + code_sect; if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; } } break; case OP_PSTR: /* * PRINT add string * ...argument 0 is string ID */ count = strlen(msg_buff); strlcpy(&msg_buff[count], &p[str_table + args[0]], ALTERA_MESSAGE_LENGTH - count); break; case OP_SINT: /* * STATE intermediate state * ...argument 0 is state code */ status = altera_goto_jstate(astate, args[0]); break; case OP_ST: /* * STATE final state * ...argument 0 is state code */ status = altera_goto_jstate(astate, args[0]); break; case OP_ISTP: /* * IRSTOP state * ...argument 0 is state code */ status = altera_set_irstop(&astate->js, args[0]); break; case OP_DSTP: /* * DRSTOP state * ...argument 0 is state code */ status = altera_set_drstop(&astate->js, args[0]); break; case OP_SWPN: /* * Exchange top with Nth stack value * ...argument 0 is 0-based stack entry * to swap with top element */ index = (args[0]) + 1; if (altera_check_stack(stack_ptr, index, &status)) { long_tmp = stack[stack_ptr - index]; stack[stack_ptr - index] = stack[stack_ptr - 1]; stack[stack_ptr - 1] = long_tmp; } break; case OP_DUPN: /* * Duplicate Nth stack value * ...argument 0 is 0-based stack entry to duplicate */ index = (args[0]) + 1; if (altera_check_stack(stack_ptr, index, &status)) { stack[stack_ptr] = stack[stack_ptr - index]; ++stack_ptr; } break; case OP_POPV: /* * Pop stack into scalar variable * ...argument 0 is variable ID * ...stack 0 is value */ if (altera_check_stack(stack_ptr, 1, &status)) vars[args[0]] = stack[--stack_ptr]; break; case OP_POPE: /* * Pop stack into integer array element * ...argument 0 is variable ID * ...stack 0 is array index * ...stack 1 is value */ if (!altera_check_stack(stack_ptr, 2, &status)) break; variable_id = args[0]; /* * If variable is read-only, * convert to writable array */ if ((version > 0) && ((attrs[variable_id] & 0x9c) == 0x1c)) { /* Allocate a writable buffer for this array */ count = var_size[variable_id]; long_tmp = vars[variable_id]; longptr_tmp = kzalloc(count * sizeof(long), GFP_KERNEL); vars[variable_id] = (long)longptr_tmp; if (vars[variable_id] == 0) { status = -ENOMEM; break; } /* copy previous contents into buffer */ for (i = 0; i < count; ++i) { longptr_tmp[i] = get_unaligned_be32(&p[long_tmp]); long_tmp += sizeof(long); } /* * set bit 7 - buffer was * dynamically allocated */ attrs[variable_id] |= 0x80; /* clear bit 2 - variable is writable */ attrs[variable_id] &= ~0x04; attrs[variable_id] |= 0x01; } /* check that variable is a writable integer array */ if ((attrs[variable_id] & 0x1c) != 0x18) status = -ERANGE; else { longptr_tmp = (long *)vars[variable_id]; /* pop the array index */ index = stack[--stack_ptr]; /* pop the value and store it into the array */ longptr_tmp[index] = stack[--stack_ptr]; } break; case OP_POPA: /* * Pop stack into Boolean array * ...argument 0 is variable ID * ...stack 0 is count * ...stack 1 is array index * ...stack 2 is value */ if (!altera_check_stack(stack_ptr, 3, &status)) break; variable_id = args[0]; /* * If variable is read-only, * convert to writable array */ if ((version > 0) && ((attrs[variable_id] & 0x9c) == 0x0c)) { /* Allocate a writable buffer for this array */ long_tmp = (var_size[variable_id] + 7L) >> 3L; charptr_tmp2 = (u8 *)vars[variable_id]; charptr_tmp = kzalloc(long_tmp, GFP_KERNEL); vars[variable_id] = (long)charptr_tmp; if (vars[variable_id] == 0) { status = -ENOMEM; break; } /* zero the buffer */ for (long_idx = 0L; long_idx < long_tmp; ++long_idx) { charptr_tmp[long_idx] = 0; } /* copy previous contents into buffer */ for (long_idx = 0L; long_idx < var_size[variable_id]; ++long_idx) { long_idx2 = long_idx; if (charptr_tmp2[long_idx2 >> 3] & (1 << (long_idx2 & 7))) { charptr_tmp[long_idx >> 3] |= (1 << (long_idx & 7)); } } /* * set bit 7 - buffer was * dynamically allocated */ attrs[variable_id] |= 0x80; /* clear bit 2 - variable is writable */ attrs[variable_id] &= ~0x04; attrs[variable_id] |= 0x01; } /* * check that variable is * a writable Boolean array */ if ((attrs[variable_id] & 0x1c) != 0x08) { status = -ERANGE; break; } charptr_tmp = (u8 *)vars[variable_id]; /* pop the count (number of bits to copy) */ long_count = stack[--stack_ptr]; /* pop the array index */ long_idx = stack[--stack_ptr]; reverse = 0; if (version > 0) { /* * stack 0 = array right index * stack 1 = array left index */ if (long_idx > long_count) { reverse = 1; long_tmp = long_count; long_count = 1 + long_idx - long_count; long_idx = long_tmp; /* reverse POPA is not supported */ status = -ERANGE; break; } else long_count = 1 + long_count - long_idx; } /* pop the data */ long_tmp = stack[--stack_ptr]; if (long_count < 1) { status = -ERANGE; break; } for (i = 0; i < long_count; ++i) { if (long_tmp & (1L << (s32) i)) charptr_tmp[long_idx >> 3L] |= (1L << (long_idx & 7L)); else charptr_tmp[long_idx >> 3L] &= ~(1L << (long_idx & 7L)); ++long_idx; } break; case OP_JMPZ: /* * Pop stack and branch if zero * ...argument 0 is address * ...stack 0 is condition value */ if (altera_check_stack(stack_ptr, 1, &status)) { if (stack[--stack_ptr] == 0) { pc = args[0] + code_sect; if ((pc < code_sect) || (pc >= debug_sect)) status = -ERANGE; } } break; case OP_DS: case OP_IS: /* * DRSCAN * IRSCAN * ...argument 0 is scan data variable ID * ...stack 0 is array index * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_idx = stack[--stack_ptr]; long_count = stack[--stack_ptr]; reverse = 0; if (version > 0) { /* * stack 0 = array right index * stack 1 = array left index * stack 2 = count */ long_tmp = long_count; long_count = stack[--stack_ptr]; if (long_idx > long_tmp) { reverse = 1; long_idx = long_tmp; } } charptr_tmp = (u8 *)vars[args[0]]; if (reverse) { /* * allocate a buffer * and reverse the data order */ charptr_tmp2 = charptr_tmp; charptr_tmp = kzalloc((long_count >> 3) + 1, GFP_KERNEL); if (charptr_tmp == NULL) { status = -ENOMEM; break; } long_tmp = long_idx + long_count - 1; long_idx2 = 0; while (long_idx2 < long_count) { if (charptr_tmp2[long_tmp >> 3] & (1 << (long_tmp & 7))) charptr_tmp[long_idx2 >> 3] |= (1 << (long_idx2 & 7)); else charptr_tmp[long_idx2 >> 3] &= ~(1 << (long_idx2 & 7)); --long_tmp; ++long_idx2; } } if (opcode == 0x51) /* DS */ status = altera_drscan(astate, long_count, charptr_tmp, long_idx); else /* IS */ status = altera_irscan(astate, long_count, charptr_tmp, long_idx); if (reverse) kfree(charptr_tmp); break; case OP_DPRA: /* * DRPRE with array data * ...argument 0 is variable ID * ...stack 0 is array index * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; index = stack[--stack_ptr]; count = stack[--stack_ptr]; if (version > 0) /* * stack 0 = array right index * stack 1 = array left index */ count = 1 + count - index; charptr_tmp = (u8 *)vars[args[0]]; status = altera_set_dr_pre(&astate->js, count, index, charptr_tmp); break; case OP_DPOA: /* * DRPOST with array data * ...argument 0 is variable ID * ...stack 0 is array index * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; index = stack[--stack_ptr]; count = stack[--stack_ptr]; if (version > 0) /* * stack 0 = array right index * stack 1 = array left index */ count = 1 + count - index; charptr_tmp = (u8 *)vars[args[0]]; status = altera_set_dr_post(&astate->js, count, index, charptr_tmp); break; case OP_IPRA: /* * IRPRE with array data * ...argument 0 is variable ID * ...stack 0 is array index * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; index = stack[--stack_ptr]; count = stack[--stack_ptr]; if (version > 0) /* * stack 0 = array right index * stack 1 = array left index */ count = 1 + count - index; charptr_tmp = (u8 *)vars[args[0]]; status = altera_set_ir_pre(&astate->js, count, index, charptr_tmp); break; case OP_IPOA: /* * IRPOST with array data * ...argument 0 is variable ID * ...stack 0 is array index * ...stack 1 is count */ if (!altera_check_stack(stack_ptr, 2, &status)) break; index = stack[--stack_ptr]; count = stack[--stack_ptr]; if (version > 0) /* * stack 0 = array right index * stack 1 = array left index */ count = 1 + count - index; charptr_tmp = (u8 *)vars[args[0]]; status = altera_set_ir_post(&astate->js, count, index, charptr_tmp); break; case OP_EXPT: /* * EXPORT * ...argument 0 is string ID * ...stack 0 is integer expression */ if (altera_check_stack(stack_ptr, 1, &status)) { name = &p[str_table + args[0]]; long_tmp = stack[--stack_ptr]; altera_export_int(name, long_tmp); } break; case OP_PSHE: /* * Push integer array element * ...argument 0 is variable ID * ...stack 0 is array index */ if (!altera_check_stack(stack_ptr, 1, &status)) break; variable_id = args[0]; index = stack[stack_ptr - 1]; /* check variable type */ if ((attrs[variable_id] & 0x1f) == 0x19) { /* writable integer array */ longptr_tmp = (long *)vars[variable_id]; stack[stack_ptr - 1] = longptr_tmp[index]; } else if ((attrs[variable_id] & 0x1f) == 0x1c) { /* read-only integer array */ long_tmp = vars[variable_id] + (index * sizeof(long)); stack[stack_ptr - 1] = get_unaligned_be32(&p[long_tmp]); } else status = -ERANGE; break; case OP_PSHA: /* * Push Boolean array * ...argument 0 is variable ID * ...stack 0 is count * ...stack 1 is array index */ if (!altera_check_stack(stack_ptr, 2, &status)) break; variable_id = args[0]; /* check that variable is a Boolean array */ if ((attrs[variable_id] & 0x18) != 0x08) { status = -ERANGE; break; } charptr_tmp = (u8 *)vars[variable_id]; /* pop the count (number of bits to copy) */ count = stack[--stack_ptr]; /* pop the array index */ index = stack[stack_ptr - 1]; if (version > 0) /* * stack 0 = array right index * stack 1 = array left index */ count = 1 + count - index; if ((count < 1) || (count > 32)) { status = -ERANGE; break; } long_tmp = 0L; for (i = 0; i < count; ++i) if (charptr_tmp[(i + index) >> 3] & (1 << ((i + index) & 7))) long_tmp |= (1L << i); stack[stack_ptr - 1] = long_tmp; break; case OP_DYNA: /* * Dynamically change size of array * ...argument 0 is variable ID * ...stack 0 is new size */ if (!altera_check_stack(stack_ptr, 1, &status)) break; variable_id = args[0]; long_tmp = stack[--stack_ptr]; if (long_tmp > var_size[variable_id]) { var_size[variable_id] = long_tmp; if (attrs[variable_id] & 0x10) /* allocate integer array */ long_tmp *= sizeof(long); else /* allocate Boolean array */ long_tmp = (long_tmp + 7) >> 3; /* * If the buffer was previously allocated, * free it */ if (attrs[variable_id] & 0x80) { kfree((void *)vars[variable_id]); vars[variable_id] = 0; } /* * Allocate a new buffer * of the requested size */ vars[variable_id] = (long) kzalloc(long_tmp, GFP_KERNEL); if (vars[variable_id] == 0) { status = -ENOMEM; break; } /* * Set the attribute bit to indicate that * this buffer was dynamically allocated and * should be freed later */ attrs[variable_id] |= 0x80; /* zero out memory */ count = ((var_size[variable_id] + 7L) / 8L); charptr_tmp = (u8 *)(vars[variable_id]); for (index = 0; index < count; ++index) charptr_tmp[index] = 0; } break; case OP_EXPV: /* * Export Boolean array * ...argument 0 is string ID * ...stack 0 is variable ID * ...stack 1 is array right index * ...stack 2 is array left index */ if (!altera_check_stack(stack_ptr, 3, &status)) break; if (version == 0) { /* EXPV is not supported in JBC 1.0 */ bad_opcode = 1; break; } name = &p[str_table + args[0]]; variable_id = stack[--stack_ptr]; long_idx = stack[--stack_ptr];/* right indx */ long_idx2 = stack[--stack_ptr];/* left indx */ if (long_idx > long_idx2) { /* reverse indices not supported */ status = -ERANGE; break; } long_count = 1 + long_idx2 - long_idx; charptr_tmp = (u8 *)vars[variable_id]; charptr_tmp2 = NULL; if ((long_idx & 7L) != 0) { s32 k = long_idx; charptr_tmp2 = kzalloc(((long_count + 7L) / 8L), GFP_KERNEL); if (charptr_tmp2 == NULL) { status = -ENOMEM; break; } for (i = 0; i < long_count; ++i) { if (charptr_tmp[k >> 3] & (1 << (k & 7))) charptr_tmp2[i >> 3] |= (1 << (i & 7)); else charptr_tmp2[i >> 3] &= ~(1 << (i & 7)); ++k; } charptr_tmp = charptr_tmp2; } else if (long_idx != 0) charptr_tmp = &charptr_tmp[long_idx >> 3]; altera_export_bool_array(name, charptr_tmp, long_count); /* free allocated buffer */ if ((long_idx & 7L) != 0) kfree(charptr_tmp2); break; case OP_COPY: { /* * Array copy * ...argument 0 is dest ID * ...argument 1 is source ID * ...stack 0 is count * ...stack 1 is dest index * ...stack 2 is source index */ s32 copy_count; s32 copy_index; s32 copy_index2; s32 destleft; s32 src_count; s32 dest_count; int src_reverse = 0; int dest_reverse = 0; if (!altera_check_stack(stack_ptr, 3, &status)) break; copy_count = stack[--stack_ptr]; copy_index = stack[--stack_ptr]; copy_index2 = stack[--stack_ptr]; reverse = 0; if (version > 0) { /* * stack 0 = source right index * stack 1 = source left index * stack 2 = destination right index * stack 3 = destination left index */ destleft = stack[--stack_ptr]; if (copy_count > copy_index) { src_reverse = 1; reverse = 1; src_count = 1 + copy_count - copy_index; /* copy_index = source start index */ } else { src_count = 1 + copy_index - copy_count; /* source start index */ copy_index = copy_count; } if (copy_index2 > destleft) { dest_reverse = 1; reverse = !reverse; dest_count = 1 + copy_index2 - destleft; /* destination start index */ copy_index2 = destleft; } else dest_count = 1 + destleft - copy_index2; copy_count = (src_count < dest_count) ? src_count : dest_count; if ((src_reverse || dest_reverse) && (src_count != dest_count)) /* * If either the source or destination * is reversed, we can't tolerate * a length mismatch, because we * "left justify" arrays when copying. * This won't work correctly * with reversed arrays. */ status = -ERANGE; } count = copy_count; index = copy_index; index2 = copy_index2; /* * If destination is a read-only array, * allocate a buffer and convert it to a writable array */ variable_id = args[1]; if ((version > 0) && ((attrs[variable_id] & 0x9c) == 0x0c)) { /* Allocate a writable buffer for this array */ long_tmp = (var_size[variable_id] + 7L) >> 3L; charptr_tmp2 = (u8 *)vars[variable_id]; charptr_tmp = kzalloc(long_tmp, GFP_KERNEL); vars[variable_id] = (long)charptr_tmp; if (vars[variable_id] == 0) { status = -ENOMEM; break; } /* zero the buffer */ for (long_idx = 0L; long_idx < long_tmp; ++long_idx) charptr_tmp[long_idx] = 0; /* copy previous contents into buffer */ for (long_idx = 0L; long_idx < var_size[variable_id]; ++long_idx) { long_idx2 = long_idx; if (charptr_tmp2[long_idx2 >> 3] & (1 << (long_idx2 & 7))) charptr_tmp[long_idx >> 3] |= (1 << (long_idx & 7)); } /* set bit 7 - buffer was dynamically allocated */ attrs[variable_id] |= 0x80; /* clear bit 2 - variable is writable */ attrs[variable_id] &= ~0x04; attrs[variable_id] |= 0x01; } charptr_tmp = (u8 *)vars[args[1]]; charptr_tmp2 = (u8 *)vars[args[0]]; /* check if destination is a writable Boolean array */ if ((attrs[args[1]] & 0x1c) != 0x08) { status = -ERANGE; break; } if (count < 1) { status = -ERANGE; break; } if (reverse) index2 += (count - 1); for (i = 0; i < count; ++i) { if (charptr_tmp2[index >> 3] & (1 << (index & 7))) charptr_tmp[index2 >> 3] |= (1 << (index2 & 7)); else charptr_tmp[index2 >> 3] &= ~(1 << (index2 & 7)); ++index; if (reverse) --index2; else ++index2; } break; } case OP_DSC: case OP_ISC: { /* * DRSCAN with capture * IRSCAN with capture * ...argument 0 is scan data variable ID * ...argument 1 is capture variable ID * ...stack 0 is capture index * ...stack 1 is scan data index * ...stack 2 is count */ s32 scan_right, scan_left; s32 capture_count = 0; s32 scan_count = 0; s32 capture_index; s32 scan_index; if (!altera_check_stack(stack_ptr, 3, &status)) break; capture_index = stack[--stack_ptr]; scan_index = stack[--stack_ptr]; if (version > 0) { /* * stack 0 = capture right index * stack 1 = capture left index * stack 2 = scan right index * stack 3 = scan left index * stack 4 = count */ scan_right = stack[--stack_ptr]; scan_left = stack[--stack_ptr]; capture_count = 1 + scan_index - capture_index; scan_count = 1 + scan_left - scan_right; scan_index = scan_right; } long_count = stack[--stack_ptr]; /* * If capture array is read-only, allocate a buffer * and convert it to a writable array */ variable_id = args[1]; if ((version > 0) && ((attrs[variable_id] & 0x9c) == 0x0c)) { /* Allocate a writable buffer for this array */ long_tmp = (var_size[variable_id] + 7L) >> 3L; charptr_tmp2 = (u8 *)vars[variable_id]; charptr_tmp = kzalloc(long_tmp, GFP_KERNEL); vars[variable_id] = (long)charptr_tmp; if (vars[variable_id] == 0) { status = -ENOMEM; break; } /* zero the buffer */ for (long_idx = 0L; long_idx < long_tmp; ++long_idx) charptr_tmp[long_idx] = 0; /* copy previous contents into buffer */ for (long_idx = 0L; long_idx < var_size[variable_id]; ++long_idx) { long_idx2 = long_idx; if (charptr_tmp2[long_idx2 >> 3] & (1 << (long_idx2 & 7))) charptr_tmp[long_idx >> 3] |= (1 << (long_idx & 7)); } /* * set bit 7 - buffer was * dynamically allocated */ attrs[variable_id] |= 0x80; /* clear bit 2 - variable is writable */ attrs[variable_id] &= ~0x04; attrs[variable_id] |= 0x01; } charptr_tmp = (u8 *)vars[args[0]]; charptr_tmp2 = (u8 *)vars[args[1]]; if ((version > 0) && ((long_count > capture_count) || (long_count > scan_count))) { status = -ERANGE; break; } /* * check that capture array * is a writable Boolean array */ if ((attrs[args[1]] & 0x1c) != 0x08) { status = -ERANGE; break; } if (status == 0) { if (opcode == 0x82) /* DSC */ status = altera_swap_dr(astate, long_count, charptr_tmp, scan_index, charptr_tmp2, capture_index); else /* ISC */ status = altera_swap_ir(astate, long_count, charptr_tmp, scan_index, charptr_tmp2, capture_index); } break; } case OP_WAIT: /* * WAIT * ...argument 0 is wait state * ...argument 1 is end state * ...stack 0 is cycles * ...stack 1 is microseconds */ if (!altera_check_stack(stack_ptr, 2, &status)) break; long_tmp = stack[--stack_ptr]; if (long_tmp != 0L) status = altera_wait_cycles(astate, long_tmp, args[0]); long_tmp = stack[--stack_ptr]; if ((status == 0) && (long_tmp != 0L)) status = altera_wait_msecs(astate, long_tmp, args[0]); if ((status == 0) && (args[1] != args[0])) status = altera_goto_jstate(astate, args[1]); if (version > 0) { --stack_ptr; /* throw away MAX cycles */ --stack_ptr; /* throw away MAX microseconds */ } break; case OP_CMPA: { /* * Array compare * ...argument 0 is source 1 ID * ...argument 1 is source 2 ID * ...argument 2 is mask ID * ...stack 0 is source 1 index * ...stack 1 is source 2 index * ...stack 2 is mask index * ...stack 3 is count */ s32 a, b; u8 *source1 = (u8 *)vars[args[0]]; u8 *source2 = (u8 *)vars[args[1]]; u8 *mask = (u8 *)vars[args[2]]; u32 index1; u32 index2; u32 mask_index; if (!altera_check_stack(stack_ptr, 4, &status)) break; index1 = stack[--stack_ptr]; index2 = stack[--stack_ptr]; mask_index = stack[--stack_ptr]; long_count = stack[--stack_ptr]; if (version > 0) { /* * stack 0 = source 1 right index * stack 1 = source 1 left index * stack 2 = source 2 right index * stack 3 = source 2 left index * stack 4 = mask right index * stack 5 = mask left index */ s32 mask_right = stack[--stack_ptr]; s32 mask_left = stack[--stack_ptr]; /* source 1 count */ a = 1 + index2 - index1; /* source 2 count */ b = 1 + long_count - mask_index; a = (a < b) ? a : b; /* mask count */ b = 1 + mask_left - mask_right; a = (a < b) ? a : b; /* source 2 start index */ index2 = mask_index; /* mask start index */ mask_index = mask_right; long_count = a; } long_tmp = 1L; if (long_count < 1) status = -ERANGE; else { count = long_count; for (i = 0; i < count; ++i) { if (mask[mask_index >> 3] & (1 << (mask_index & 7))) { a = source1[index1 >> 3] & (1 << (index1 & 7)) ? 1 : 0; b = source2[index2 >> 3] & (1 << (index2 & 7)) ? 1 : 0; if (a != b) /* failure */ long_tmp = 0L; } ++index1; ++index2; ++mask_index; } } stack[stack_ptr++] = long_tmp; break; } default: /* Unrecognized opcode -- ERROR! */ bad_opcode = 1; break; } if (bad_opcode) status = -ENOSYS; if ((stack_ptr < 0) || (stack_ptr >= ALTERA_STACK_SIZE)) status = -EOVERFLOW; if (status != 0) { done = 1; *error_address = (s32)(opcode_address - code_sect); } } altera_free_buffers(astate); /* Free all dynamically allocated arrays */ if ((attrs != NULL) && (vars != NULL)) for (i = 0; i < sym_count; ++i) if (attrs[i] & 0x80) kfree((void *)vars[i]); kfree(vars); kfree(var_size); kfree(attrs); kfree(proc_attributes); return status; } static int altera_get_note(u8 *p, s32 program_size, s32 *offset, char *key, char *value, int length) /* * Gets key and value of NOTE fields in the JBC file. * Can be called in two modes: if offset pointer is NULL, * then the function searches for note fields which match * the key string provided. If offset is not NULL, then * the function finds the next note field of any key, * starting at the offset specified by the offset pointer. * Returns 0 for success, else appropriate error code */ { int status = -ENODATA; u32 note_strings = 0L; u32 note_table = 0L; u32 note_count = 0L; u32 first_word = 0L; int version = 0; int delta = 0; char *key_ptr; char *value_ptr; int i; /* Read header information */ if (program_size > 52L) { first_word = get_unaligned_be32(&p[0]); version = (first_word & 1L); delta = version * 8; note_strings = get_unaligned_be32(&p[8 + delta]); note_table = get_unaligned_be32(&p[12 + delta]); note_count = get_unaligned_be32(&p[44 + (2 * delta)]); } if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) return -EIO; if (note_count <= 0L) return status; if (offset == NULL) { /* * We will search for the first note with a specific key, * and return only the value */ for (i = 0; (i < note_count) && (status != 0); ++i) { key_ptr = &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i)])]; if ((strnicmp(key, key_ptr, strlen(key_ptr)) == 0) && (key != NULL)) { status = 0; value_ptr = &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i) + 4])]; if (value != NULL) strlcpy(value, value_ptr, length); } } } else { /* * We will search for the next note, regardless of the key, * and return both the value and the key */ i = *offset; if ((i >= 0) && (i < note_count)) { status = 0; if (key != NULL) strlcpy(key, &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i)])], length); if (value != NULL) strlcpy(value, &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i) + 4])], length); *offset = i + 1; } } return status; } static int altera_check_crc(u8 *p, s32 program_size) { int status = 0; u16 local_expected = 0, local_actual = 0, shift_reg = 0xffff; int bit, feedback; u8 databyte; u32 i; u32 crc_section = 0L; u32 first_word = 0L; int version = 0; int delta = 0; if (program_size > 52L) { first_word = get_unaligned_be32(&p[0]); version = (first_word & 1L); delta = version * 8; crc_section = get_unaligned_be32(&p[32 + delta]); } if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) status = -EIO; if (crc_section >= program_size) status = -EIO; if (status == 0) { local_expected = (u16)get_unaligned_be16(&p[crc_section]); for (i = 0; i < crc_section; ++i) { databyte = p[i]; for (bit = 0; bit < 8; bit++) { feedback = (databyte ^ shift_reg) & 0x01; shift_reg >>= 1; if (feedback) shift_reg ^= 0x8408; databyte >>= 1; } } local_actual = (u16)~shift_reg; if (local_expected != local_actual) status = -EILSEQ; } if (debug || status) { switch (status) { case 0: printk(KERN_INFO "%s: CRC matched: %04x\n", __func__, local_actual); break; case -EILSEQ: printk(KERN_ERR "%s: CRC mismatch: expected %04x, " "actual %04x\n", __func__, local_expected, local_actual); break; case -ENODATA: printk(KERN_ERR "%s: expected CRC not found, " "actual CRC = %04x\n", __func__, local_actual); break; case -EIO: printk(KERN_ERR "%s: error: format isn't " "recognized.\n", __func__); break; default: printk(KERN_ERR "%s: CRC function returned error " "code %d\n", __func__, status); break; } } return status; } static int altera_get_file_info(u8 *p, s32 program_size, int *format_version, int *action_count, int *procedure_count) { int status = -EIO; u32 first_word = 0; int version = 0; if (program_size <= 52L) return status; first_word = get_unaligned_be32(&p[0]); if ((first_word == 0x4A414D00L) || (first_word == 0x4A414D01L)) { status = 0; version = (first_word & 1L); *format_version = version + 1; if (version > 0) { *action_count = get_unaligned_be32(&p[48]); *procedure_count = get_unaligned_be32(&p[52]); } } return status; } static int altera_get_act_info(u8 *p, s32 program_size, int index, char **name, char **description, struct altera_procinfo **proc_list) { int status = -EIO; struct altera_procinfo *procptr = NULL; struct altera_procinfo *tmpptr = NULL; u32 first_word = 0L; u32 action_table = 0L; u32 proc_table = 0L; u32 str_table = 0L; u32 note_strings = 0L; u32 action_count = 0L; u32 proc_count = 0L; u32 act_name_id = 0L; u32 act_desc_id = 0L; u32 act_proc_id = 0L; u32 act_proc_name = 0L; u8 act_proc_attribute = 0; if (program_size <= 52L) return status; /* Read header information */ first_word = get_unaligned_be32(&p[0]); if (first_word != 0x4A414D01L) return status; action_table = get_unaligned_be32(&p[4]); proc_table = get_unaligned_be32(&p[8]); str_table = get_unaligned_be32(&p[12]); note_strings = get_unaligned_be32(&p[16]); action_count = get_unaligned_be32(&p[48]); proc_count = get_unaligned_be32(&p[52]); if (index >= action_count) return status; act_name_id = get_unaligned_be32(&p[action_table + (12 * index)]); act_desc_id = get_unaligned_be32(&p[action_table + (12 * index) + 4]); act_proc_id = get_unaligned_be32(&p[action_table + (12 * index) + 8]); *name = &p[str_table + act_name_id]; if (act_desc_id < (note_strings - str_table)) *description = &p[str_table + act_desc_id]; do { act_proc_name = get_unaligned_be32( &p[proc_table + (13 * act_proc_id)]); act_proc_attribute = (p[proc_table + (13 * act_proc_id) + 8] & 0x03); procptr = (struct altera_procinfo *) kzalloc(sizeof(struct altera_procinfo), GFP_KERNEL); if (procptr == NULL) status = -ENOMEM; else { procptr->name = &p[str_table + act_proc_name]; procptr->attrs = act_proc_attribute; procptr->next = NULL; /* add record to end of linked list */ if (*proc_list == NULL) *proc_list = procptr; else { tmpptr = *proc_list; while (tmpptr->next != NULL) tmpptr = tmpptr->next; tmpptr->next = procptr; } } act_proc_id = get_unaligned_be32( &p[proc_table + (13 * act_proc_id) + 4]); } while ((act_proc_id != 0) && (act_proc_id < proc_count)); return status; } int altera_init(struct altera_config *config, const struct firmware *fw) { struct altera_state *astate = NULL; struct altera_procinfo *proc_list = NULL; struct altera_procinfo *procptr = NULL; char *key = NULL; char *value = NULL; char *action_name = NULL; char *description = NULL; int exec_result = 0; int exit_code = 0; int format_version = 0; int action_count = 0; int procedure_count = 0; int index = 0; s32 offset = 0L; s32 error_address = 0L; key = kzalloc(33 * sizeof(char), GFP_KERNEL); if (!key) return -ENOMEM; value = kzalloc(257 * sizeof(char), GFP_KERNEL); if (!value) return -ENOMEM; astate = kzalloc(sizeof(struct altera_state), GFP_KERNEL); if (!astate) return -ENOMEM; astate->config = config; if (!astate->config->jtag_io) { dprintk(KERN_INFO "%s: using byteblaster!\n", __func__); astate->config->jtag_io = netup_jtag_io_lpt; } altera_check_crc((u8 *)fw->data, fw->size); if (debug) { altera_get_file_info((u8 *)fw->data, fw->size, &format_version, &action_count, &procedure_count); printk(KERN_INFO "%s: File format is %s ByteCode format\n", __func__, (format_version == 2) ? "Jam STAPL" : "pre-standardized Jam 1.1"); while (altera_get_note((u8 *)fw->data, fw->size, &offset, key, value, 256) == 0) printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n", __func__, key, value); } if (debug && (format_version == 2) && (action_count > 0)) { printk(KERN_INFO "%s: Actions available:\n", __func__); for (index = 0; index < action_count; ++index) { altera_get_act_info((u8 *)fw->data, fw->size, index, &action_name, &description, &proc_list); if (description == NULL) printk(KERN_INFO "%s: %s\n", __func__, action_name); else printk(KERN_INFO "%s: %s \"%s\"\n", __func__, action_name, description); procptr = proc_list; while (procptr != NULL) { if (procptr->attrs != 0) printk(KERN_INFO "%s: %s (%s)\n", __func__, procptr->name, (procptr->attrs == 1) ? "optional" : "recommended"); proc_list = procptr->next; kfree(procptr); procptr = proc_list; } } printk(KERN_INFO "\n"); } exec_result = altera_execute(astate, (u8 *)fw->data, fw->size, &error_address, &exit_code, &format_version); if (exit_code) exec_result = -EREMOTEIO; if ((format_version == 2) && (exec_result == -EINVAL)) { if (astate->config->action == NULL) printk(KERN_ERR "%s: error: no action specified for " "Jam STAPL file.\nprogram terminated.\n", __func__); else printk(KERN_ERR "%s: error: action \"%s\"" " is not supported " "for this Jam STAPL file.\n" "Program terminated.\n", __func__, astate->config->action); } else if (exec_result) printk(KERN_ERR "%s: error %d\n", __func__, exec_result); kfree(key); kfree(value); kfree(astate); return 0; } EXPORT_SYMBOL(altera_init);
gpl-2.0
HyochanPyo/kernel_3.18.9
crypto/rmd320.c
2386
13402
/* * Cryptographic API. * * RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest. * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <asm/byteorder.h> #include "ripemd.h" struct rmd320_ctx { u64 byte_count; u32 state[10]; __le32 buffer[16]; }; #define K1 RMD_K1 #define K2 RMD_K2 #define K3 RMD_K3 #define K4 RMD_K4 #define K5 RMD_K5 #define KK1 RMD_K6 #define KK2 RMD_K7 #define KK3 RMD_K8 #define KK4 RMD_K9 #define KK5 RMD_K1 #define F1(x, y, z) (x ^ y ^ z) /* XOR */ #define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ #define F3(x, y, z) ((x | ~y) ^ z) #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define F5(x, y, z) (x ^ (y | ~z)) #define ROUND(a, b, c, d, e, f, k, x, s) { \ (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)) + (e); \ (c) = rol32((c), 10); \ } static void rmd320_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp; /* Initialize left lane */ aa = state[0]; bb = state[1]; cc = state[2]; dd = state[3]; ee = state[4]; /* Initialize right lane */ aaa = state[5]; bbb = state[6]; ccc = state[7]; ddd = state[8]; eee = state[9]; /* round 1: left lane */ ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11); ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14); ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15); ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12); ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5); ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8); ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7); ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9); ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11); ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13); ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14); ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15); ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6); ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7); ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9); ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8); /* round 1: right lane */ ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6); /* Swap contents of "a" registers */ tmp = aa; aa = aaa; aaa = tmp; /* round 2: left lane" */ ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7); ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6); ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8); ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13); ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11); ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9); ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7); ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15); ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7); ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12); ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15); ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9); ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11); ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7); ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13); ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12); /* round 2: right lane */ ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11); /* Swap contents of "b" registers */ tmp = bb; bb = bbb; bbb = tmp; /* round 3: left lane" */ ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11); ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13); ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6); ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7); ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14); ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9); ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13); ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15); ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14); ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8); ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13); ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6); ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5); ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12); ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7); ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5); /* round 3: right lane */ ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5); /* Swap contents of "c" registers */ tmp = cc; cc = ccc; ccc = tmp; /* round 4: left lane" */ ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11); ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12); ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14); ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15); ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14); ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15); ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9); ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8); ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9); ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14); ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5); ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6); ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8); ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6); ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5); ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12); /* round 4: right lane */ ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8); /* Swap contents of "d" registers */ tmp = dd; dd = ddd; ddd = tmp; /* round 5: left lane" */ ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9); ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15); ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5); ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11); ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6); ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8); ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13); ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12); ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5); ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12); ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13); ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14); ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11); ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8); ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5); ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6); /* round 5: right lane */ ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11); /* Swap contents of "e" registers */ tmp = ee; ee = eee; eee = tmp; /* combine results */ state[0] += aa; state[1] += bb; state[2] += cc; state[3] += dd; state[4] += ee; state[5] += aaa; state[6] += bbb; state[7] += ccc; state[8] += ddd; state[9] += eee; return; } static int rmd320_init(struct shash_desc *desc) { struct rmd320_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; rctx->state[0] = RMD_H0; rctx->state[1] = RMD_H1; rctx->state[2] = RMD_H2; rctx->state[3] = RMD_H3; rctx->state[4] = RMD_H4; rctx->state[5] = RMD_H5; rctx->state[6] = RMD_H6; rctx->state[7] = RMD_H7; rctx->state[8] = RMD_H8; rctx->state[9] = RMD_H9; memset(rctx->buffer, 0, sizeof(rctx->buffer)); return 0; } static int rmd320_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct rmd320_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; /* Enough space in buffer? If so copy and we're done */ if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); rmd320_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); rmd320_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } memcpy(rctx->buffer, data, len); out: return 0; } /* Add padding and return the message digest. */ static int rmd320_final(struct shash_desc *desc, u8 *out) { struct rmd320_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); rmd320_update(desc, padding, padlen); /* Append length */ rmd320_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 10; i++) dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); return 0; } static struct shash_alg alg = { .digestsize = RMD320_DIGEST_SIZE, .init = rmd320_init, .update = rmd320_update, .final = rmd320_final, .descsize = sizeof(struct rmd320_ctx), .base = { .cra_name = "rmd320", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = RMD320_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init rmd320_mod_init(void) { return crypto_register_shash(&alg); } static void __exit rmd320_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(rmd320_mod_init); module_exit(rmd320_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); MODULE_DESCRIPTION("RIPEMD-320 Message Digest"); MODULE_ALIAS_CRYPTO("rmd320");
gpl-2.0
quancao/kernel-imx-controlboard
drivers/ata/pata_sch.c
2642
5167
/* * pata_sch.c - Intel SCH PATA controllers * * Copyright (c) 2008 Alek Du <alek.du@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ /* * Supports: * Intel SCH (AF82US15W, AF82US15L, AF82UL11L) chipsets -- see spec at: * http://download.intel.com/design/chipsets/embedded/datashts/319537.pdf */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/dmi.h> #define DRV_NAME "pata_sch" #define DRV_VERSION "0.2" /* see SCH datasheet page 351 */ enum { D0TIM = 0x80, /* Device 0 Timing Register */ D1TIM = 0x84, /* Device 1 Timing Register */ PM = 0x07, /* PIO Mode Bit Mask */ MDM = (0x03 << 8), /* Multi-word DMA Mode Bit Mask */ UDM = (0x07 << 16), /* Ultra DMA Mode Bit Mask */ PPE = (1 << 30), /* Prefetch/Post Enable */ USD = (1 << 31), /* Use Synchronous DMA */ }; static int sch_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev); static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev); static const struct pci_device_id sch_pci_tbl[] = { /* Intel SCH PATA Controller */ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SCH_IDE), 0 }, { } /* terminate list */ }; static struct pci_driver sch_pci_driver = { .name = DRV_NAME, .id_table = sch_pci_tbl, .probe = sch_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static struct scsi_host_template sch_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations sch_pata_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = sch_set_piomode, .set_dmamode = sch_set_dmamode, }; static struct ata_port_info sch_port_info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &sch_pata_ops, }; MODULE_AUTHOR("Alek Du <alek.du@intel.com>"); MODULE_DESCRIPTION("SCSI low-level driver for Intel SCH PATA controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sch_pci_tbl); MODULE_VERSION(DRV_VERSION); /** * sch_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: ATA device * * Set PIO mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev) { unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *dev = to_pci_dev(ap->host->dev); unsigned int port = adev->devno ? D1TIM : D0TIM; unsigned int data; pci_read_config_dword(dev, port, &data); /* see SCH datasheet page 351 */ /* set PIO mode */ data &= ~(PM | PPE); data |= pio; /* enable PPE for block device */ if (adev->class == ATA_DEV_ATA) data |= PPE; pci_write_config_dword(dev, port, data); } /** * sch_set_dmamode - Initialize host controller PATA DMA timings * @ap: Port whose timings we are configuring * @adev: ATA device * * Set MW/UDMA mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev) { unsigned int dma_mode = adev->dma_mode; struct pci_dev *dev = to_pci_dev(ap->host->dev); unsigned int port = adev->devno ? D1TIM : D0TIM; unsigned int data; pci_read_config_dword(dev, port, &data); /* see SCH datasheet page 351 */ if (dma_mode >= XFER_UDMA_0) { /* enable Synchronous DMA mode */ data |= USD; data &= ~UDM; data |= (dma_mode - XFER_UDMA_0) << 16; } else { /* must be MWDMA mode, since we masked SWDMA already */ data &= ~(USD | MDM); data |= (dma_mode - XFER_MW_DMA_0) << 8; } pci_write_config_dword(dev, port, data); } /** * sch_init_one - Register SCH ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in sch_pci_tbl matching with @pdev * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int sch_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct ata_port_info *ppi[] = { &sch_port_info, NULL }; ata_print_version_once(&pdev->dev, DRV_VERSION); return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0); } module_pci_driver(sch_pci_driver);
gpl-2.0
sudosurootdev/kernel_lge_g3-wip
mm/swap.c
3154
21023
/* * linux/mm/swap.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * This file contains the default values for the operation of the * Linux VM subsystem. Fine-tuning documentation can be found in * Documentation/sysctl/vm.txt. * Started 18.12.91 * Swap aging added 23.2.95, Stephen Tweedie. * Buffermem limits added 12.3.98, Rik van Riel. */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/init.h> #include <linux/export.h> #include <linux/mm_inline.h> #include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/backing-dev.h> #include <linux/memcontrol.h> #include <linux/gfp.h> #include "internal.h" /* How many pages do we try to swap or page in/out together? */ int page_cluster; static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ static void __page_cache_release(struct page *page) { if (PageLRU(page)) { unsigned long flags; struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru_list(zone, page, page_off_lru(page)); spin_unlock_irqrestore(&zone->lru_lock, flags); } } static void __put_single_page(struct page *page) { __page_cache_release(page); free_hot_cold_page(page, 0); } static void __put_compound_page(struct page *page) { compound_page_dtor *dtor; __page_cache_release(page); dtor = get_compound_page_dtor(page); (*dtor)(page); } static void put_compound_page(struct page *page) { if (unlikely(PageTail(page))) { /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); VM_BUG_ON(PageHead(page_head)); if (put_page_testzero(page_head)) __put_single_page(page_head); out_put_single: if (put_page_testzero(page)) __put_single_page(page); return; } VM_BUG_ON(page_head != page->first_page); /* * We can release the refcount taken by * get_page_unless_zero() now that * __split_huge_page_refcount() is blocked on * the compound_lock. */ if (put_page_testzero(page_head)) VM_BUG_ON(1); /* __split_huge_page_refcount will wait now */ VM_BUG_ON(page_mapcount(page) <= 0); atomic_dec(&page->_mapcount); VM_BUG_ON(atomic_read(&page_head->_count) <= 0); VM_BUG_ON(atomic_read(&page->_count) != 0); compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } } else { /* page_head is a dangling pointer */ VM_BUG_ON(PageTail(page)); goto out_put_single; } } else if (put_page_testzero(page)) { if (PageHead(page)) __put_compound_page(page); else __put_single_page(page); } } void put_page(struct page *page) { if (unlikely(PageCompound(page))) put_compound_page(page); else if (put_page_testzero(page)) __put_single_page(page); } EXPORT_SYMBOL(put_page); /* * This function is exported but must not be called by anything other * than get_page(). It implements the slow path of get_page(). */ bool __get_page_tail(struct page *page) { /* * This takes care of get_page() if run on a tail page * returned by one of the get_user_pages/follow_page variants. * get_user_pages/follow_page itself doesn't need the compound * lock because it runs __get_page_tail_foll() under the * proper PT lock that already serializes against * split_huge_page(). */ unsigned long flags; bool got = false; struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); /* here __split_huge_page_refcount won't run anymore */ if (likely(PageTail(page))) { __get_page_tail_foll(page, false); got = true; } compound_unlock_irqrestore(page_head, flags); if (unlikely(!got)) put_page(page_head); } return got; } EXPORT_SYMBOL(__get_page_tail); /** * put_pages_list() - release a list of pages * @pages: list of pages threaded on page->lru * * Release a list of pages which are strung together on page.lru. Currently * used by read_cache_pages() and related error recovery code. */ void put_pages_list(struct list_head *pages) { while (!list_empty(pages)) { struct page *victim; victim = list_entry(pages->prev, struct page, lru); list_del(&victim->lru); page_cache_release(victim); } } EXPORT_SYMBOL(put_pages_list); static void pagevec_lru_move_fn(struct pagevec *pvec, void (*move_fn)(struct page *page, void *arg), void *arg) { int i; struct zone *zone = NULL; unsigned long flags = 0; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } (*move_fn)(page, arg); } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } static void pagevec_move_tail_fn(struct page *page, void *arg) { int *pgmoved = arg; if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { enum lru_list lru = page_lru_base_type(page); struct lruvec *lruvec; lruvec = mem_cgroup_lru_move_lists(page_zone(page), page, lru, lru); list_move_tail(&page->lru, &lruvec->lists[lru]); (*pgmoved)++; } } /* * pagevec_move_tail() must be called with IRQ disabled. * Otherwise this may cause nasty races. */ static void pagevec_move_tail(struct pagevec *pvec) { int pgmoved = 0; pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); __count_vm_events(PGROTATED, pgmoved); } /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. */ void rotate_reclaimable_page(struct page *page) { if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && !PageUnevictable(page) && PageLRU(page)) { struct pagevec *pvec; unsigned long flags; page_cache_get(page); local_irq_save(flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); local_irq_restore(flags); } } static void update_page_reclaim_stat(struct zone *zone, struct page *page, int file, int rotated) { struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; struct zone_reclaim_stat *memcg_reclaim_stat; memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); reclaim_stat->recent_scanned[file]++; if (rotated) reclaim_stat->recent_rotated[file]++; if (!memcg_reclaim_stat) return; memcg_reclaim_stat->recent_scanned[file]++; if (rotated) memcg_reclaim_stat->recent_rotated[file]++; } static void __activate_page(struct page *page, void *arg) { struct zone *zone = page_zone(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); int lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru); SetPageActive(page); lru += LRU_ACTIVE; add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); update_page_reclaim_stat(zone, page, file, 1); } } #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); static void activate_page_drain(int cpu) { struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, __activate_page, NULL); } void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); put_cpu_var(activate_page_pvecs); } } #else static inline void activate_page_drain(int cpu) { } void activate_page(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); __activate_page(page, NULL); spin_unlock_irq(&zone->lru_lock); } #endif /* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ void mark_page_accessed(struct page *page) { if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page) && PageLRU(page)) { activate_page(page); ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } } EXPORT_SYMBOL(mark_page_accessed); void __lru_cache_add(struct page *page, enum lru_list lru) { struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_add(pvec, page)) __pagevec_lru_add(pvec, lru); put_cpu_var(lru_add_pvecs); } EXPORT_SYMBOL(__lru_cache_add); /** * lru_cache_add_lru - add a page to a page list * @page: the page to be added to the LRU. * @lru: the LRU list to which the page is added. */ void lru_cache_add_lru(struct page *page, enum lru_list lru) { if (PageActive(page)) { VM_BUG_ON(PageUnevictable(page)); ClearPageActive(page); } else if (PageUnevictable(page)) { VM_BUG_ON(PageActive(page)); ClearPageUnevictable(page); } VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); __lru_cache_add(page, lru); } /** * add_page_to_unevictable_list - add a page to the unevictable list * @page: the page to be added to the unevictable list * * Add page directly to its zone's unevictable list. To avoid races with * tasks that might be making the page evictable, through eg. munlock, * munmap or exit, while it's not on the lru, we want to add the page * while it's locked or otherwise "invisible" to other tasks. This is * difficult to do when using the pagevec cache, so bypass that. */ void add_page_to_unevictable_list(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); SetPageUnevictable(page); SetPageLRU(page); add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); spin_unlock_irq(&zone->lru_lock); } /* * If the page can not be invalidated, it is moved to the * inactive list to speed up its reclaim. It is moved to the * head of the list, rather than the tail, to give the flusher * threads some time to write it out, as this is much more * effective than the single-page writeout from reclaim. * * If the page isn't page_mapped and dirty/writeback, the page * could reclaim asap using PG_reclaim. * * 1. active, mapped page -> none * 2. active, dirty/writeback page -> inactive, head, PG_reclaim * 3. inactive, mapped page -> none * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim * 5. inactive, clean -> inactive, tail * 6. Others -> none * * In 4, why it moves inactive's head, the VM expects the page would * be write it out by flusher threads as this is much more effective * than the single-page writeout from reclaim. */ static void lru_deactivate_fn(struct page *page, void *arg) { int lru, file; bool active; struct zone *zone = page_zone(page); if (!PageLRU(page)) return; if (PageUnevictable(page)) return; /* Some processes are using the page */ if (page_mapped(page)) return; active = PageActive(page); file = page_is_file_cache(page); lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru + active); ClearPageActive(page); ClearPageReferenced(page); add_page_to_lru_list(zone, page, lru); if (PageWriteback(page) || PageDirty(page)) { /* * PG_reclaim could be raced with end_page_writeback * It can make readahead confusing. But race window * is _really_ small and it's non-critical problem. */ SetPageReclaim(page); } else { struct lruvec *lruvec; /* * The page's writeback ends up during pagevec * We moves tha page into tail of inactive. */ lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru); list_move_tail(&page->lru, &lruvec->lists[lru]); __count_vm_event(PGROTATED); } if (active) __count_vm_event(PGDEACTIVATE); update_page_reclaim_stat(zone, page, file, 0); } /* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been * disabled; or "cpu" is being hot-unplugged, and is already dead. */ void lru_add_drain_cpu(int cpu) { struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); struct pagevec *pvec; int lru; for_each_lru(lru) { pvec = &pvecs[lru - LRU_BASE]; if (pagevec_count(pvec)) __pagevec_lru_add(pvec, lru); } pvec = &per_cpu(lru_rotate_pvecs, cpu); if (pagevec_count(pvec)) { unsigned long flags; /* No harm done if a racing interrupt already did this */ local_irq_save(flags); pagevec_move_tail(pvec); local_irq_restore(flags); } pvec = &per_cpu(lru_deactivate_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); activate_page_drain(cpu); } /** * deactivate_page - forcefully deactivate a page * @page: page to deactivate * * This function hints the VM that @page is a good reclaim candidate, * for example if its invalidation fails due to the page being dirty * or under writeback. */ void deactivate_page(struct page *page) { /* * In a workload with many unevictable page such as mprotect, unevictable * page deactivation for accelerating reclaim is pointless. */ if (PageUnevictable(page)) return; if (likely(get_page_unless_zero(page))) { struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); put_cpu_var(lru_deactivate_pvecs); } } void lru_add_drain(void) { lru_add_drain_cpu(get_cpu()); put_cpu(); } static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); } /* * Returns 0 for success */ int lru_add_drain_all(void) { return schedule_on_each_cpu(lru_add_drain_per_cpu); } /* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * * The locking in this function is against shrink_inactive_list(): we recheck * the page count inside the lock to see whether shrink_inactive_list() * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() * will free it. */ void release_pages(struct page **pages, int nr, int cold) { int i; LIST_HEAD(pages_to_free); struct zone *zone = NULL; unsigned long uninitialized_var(flags); for (i = 0; i < nr; i++) { struct page *page = pages[i]; if (unlikely(PageCompound(page))) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } put_compound_page(page); continue; } if (!put_page_testzero(page)) continue; if (PageLRU(page)) { struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru_list(zone, page, page_off_lru(page)); } list_add(&page->lru, &pages_to_free); } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); free_hot_cold_page_list(&pages_to_free, cold); } EXPORT_SYMBOL(release_pages); /* * The pages which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's * OK from a correctness point of view but is inefficient - those pages may be * cache-warm and we want to give them back to the page allocator ASAP. * * So __pagevec_release() will drain those queues here. __pagevec_lru_add() * and __pagevec_lru_add_active() call release_pages() directly to avoid * mutual recursion. */ void __pagevec_release(struct pagevec *pvec) { lru_add_drain(); release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); pagevec_reinit(pvec); } EXPORT_SYMBOL(__pagevec_release); #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* used by __split_huge_page_refcount() */ void lru_add_page_tail(struct zone* zone, struct page *page, struct page *page_tail) { int uninitialized_var(active); enum lru_list lru; const int file = 0; VM_BUG_ON(!PageHead(page)); VM_BUG_ON(PageCompound(page_tail)); VM_BUG_ON(PageLRU(page_tail)); VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); SetPageLRU(page_tail); if (page_evictable(page_tail, NULL)) { if (PageActive(page)) { SetPageActive(page_tail); active = 1; lru = LRU_ACTIVE_ANON; } else { active = 0; lru = LRU_INACTIVE_ANON; } } else { SetPageUnevictable(page_tail); lru = LRU_UNEVICTABLE; } if (likely(PageLRU(page))) list_add_tail(&page_tail->lru, &page->lru); else { struct list_head *list_head; /* * Head page has not yet been counted, as an hpage, * so we must account for each subpage individually. * * Use the standard add function to put page_tail on the list, * but then correct its position so they all end up in order. */ add_page_to_lru_list(zone, page_tail, lru); list_head = page_tail->lru.prev; list_move_tail(&page_tail->lru, list_head); } if (!PageUnevictable(page)) update_page_reclaim_stat(zone, page_tail, file, active); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static void __pagevec_lru_add_fn(struct page *page, void *arg) { enum lru_list lru = (enum lru_list)arg; struct zone *zone = page_zone(page); int file = is_file_lru(lru); int active = is_active_lru(lru); VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); if (active) SetPageActive(page); add_page_to_lru_list(zone, page, lru); update_page_reclaim_stat(zone, page, file, active); } /* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. */ void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { VM_BUG_ON(is_unevictable_lru(lru)); pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru); } EXPORT_SYMBOL(__pagevec_lru_add); /** * pagevec_lookup - gang pagecache lookup * @pvec: Where the resulting pages are placed * @mapping: The address_space to search * @start: The starting page index * @nr_pages: The maximum number of pages * * pagevec_lookup() will search for and return a group of up to @nr_pages pages * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a * reference against the pages in @pvec. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * * pagevec_lookup() returns the number of pages which were found. */ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages) { pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); return pagevec_count(pvec); } EXPORT_SYMBOL(pagevec_lookup); unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) { pvec->nr = find_get_pages_tag(mapping, index, tag, nr_pages, pvec->pages); return pagevec_count(pvec); } EXPORT_SYMBOL(pagevec_lookup_tag); /* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); #ifdef CONFIG_SWAP bdi_init(swapper_space.backing_dev_info); #endif /* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ }
gpl-2.0
RenderBroken/ghost_render_kernel
mm/swap.c
3154
21023
/* * linux/mm/swap.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * This file contains the default values for the operation of the * Linux VM subsystem. Fine-tuning documentation can be found in * Documentation/sysctl/vm.txt. * Started 18.12.91 * Swap aging added 23.2.95, Stephen Tweedie. * Buffermem limits added 12.3.98, Rik van Riel. */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/init.h> #include <linux/export.h> #include <linux/mm_inline.h> #include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/backing-dev.h> #include <linux/memcontrol.h> #include <linux/gfp.h> #include "internal.h" /* How many pages do we try to swap or page in/out together? */ int page_cluster; static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ static void __page_cache_release(struct page *page) { if (PageLRU(page)) { unsigned long flags; struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru_list(zone, page, page_off_lru(page)); spin_unlock_irqrestore(&zone->lru_lock, flags); } } static void __put_single_page(struct page *page) { __page_cache_release(page); free_hot_cold_page(page, 0); } static void __put_compound_page(struct page *page) { compound_page_dtor *dtor; __page_cache_release(page); dtor = get_compound_page_dtor(page); (*dtor)(page); } static void put_compound_page(struct page *page) { if (unlikely(PageTail(page))) { /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); VM_BUG_ON(PageHead(page_head)); if (put_page_testzero(page_head)) __put_single_page(page_head); out_put_single: if (put_page_testzero(page)) __put_single_page(page); return; } VM_BUG_ON(page_head != page->first_page); /* * We can release the refcount taken by * get_page_unless_zero() now that * __split_huge_page_refcount() is blocked on * the compound_lock. */ if (put_page_testzero(page_head)) VM_BUG_ON(1); /* __split_huge_page_refcount will wait now */ VM_BUG_ON(page_mapcount(page) <= 0); atomic_dec(&page->_mapcount); VM_BUG_ON(atomic_read(&page_head->_count) <= 0); VM_BUG_ON(atomic_read(&page->_count) != 0); compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } } else { /* page_head is a dangling pointer */ VM_BUG_ON(PageTail(page)); goto out_put_single; } } else if (put_page_testzero(page)) { if (PageHead(page)) __put_compound_page(page); else __put_single_page(page); } } void put_page(struct page *page) { if (unlikely(PageCompound(page))) put_compound_page(page); else if (put_page_testzero(page)) __put_single_page(page); } EXPORT_SYMBOL(put_page); /* * This function is exported but must not be called by anything other * than get_page(). It implements the slow path of get_page(). */ bool __get_page_tail(struct page *page) { /* * This takes care of get_page() if run on a tail page * returned by one of the get_user_pages/follow_page variants. * get_user_pages/follow_page itself doesn't need the compound * lock because it runs __get_page_tail_foll() under the * proper PT lock that already serializes against * split_huge_page(). */ unsigned long flags; bool got = false; struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); /* here __split_huge_page_refcount won't run anymore */ if (likely(PageTail(page))) { __get_page_tail_foll(page, false); got = true; } compound_unlock_irqrestore(page_head, flags); if (unlikely(!got)) put_page(page_head); } return got; } EXPORT_SYMBOL(__get_page_tail); /** * put_pages_list() - release a list of pages * @pages: list of pages threaded on page->lru * * Release a list of pages which are strung together on page.lru. Currently * used by read_cache_pages() and related error recovery code. */ void put_pages_list(struct list_head *pages) { while (!list_empty(pages)) { struct page *victim; victim = list_entry(pages->prev, struct page, lru); list_del(&victim->lru); page_cache_release(victim); } } EXPORT_SYMBOL(put_pages_list); static void pagevec_lru_move_fn(struct pagevec *pvec, void (*move_fn)(struct page *page, void *arg), void *arg) { int i; struct zone *zone = NULL; unsigned long flags = 0; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } (*move_fn)(page, arg); } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } static void pagevec_move_tail_fn(struct page *page, void *arg) { int *pgmoved = arg; if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { enum lru_list lru = page_lru_base_type(page); struct lruvec *lruvec; lruvec = mem_cgroup_lru_move_lists(page_zone(page), page, lru, lru); list_move_tail(&page->lru, &lruvec->lists[lru]); (*pgmoved)++; } } /* * pagevec_move_tail() must be called with IRQ disabled. * Otherwise this may cause nasty races. */ static void pagevec_move_tail(struct pagevec *pvec) { int pgmoved = 0; pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); __count_vm_events(PGROTATED, pgmoved); } /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. */ void rotate_reclaimable_page(struct page *page) { if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && !PageUnevictable(page) && PageLRU(page)) { struct pagevec *pvec; unsigned long flags; page_cache_get(page); local_irq_save(flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); local_irq_restore(flags); } } static void update_page_reclaim_stat(struct zone *zone, struct page *page, int file, int rotated) { struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; struct zone_reclaim_stat *memcg_reclaim_stat; memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); reclaim_stat->recent_scanned[file]++; if (rotated) reclaim_stat->recent_rotated[file]++; if (!memcg_reclaim_stat) return; memcg_reclaim_stat->recent_scanned[file]++; if (rotated) memcg_reclaim_stat->recent_rotated[file]++; } static void __activate_page(struct page *page, void *arg) { struct zone *zone = page_zone(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); int lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru); SetPageActive(page); lru += LRU_ACTIVE; add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); update_page_reclaim_stat(zone, page, file, 1); } } #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); static void activate_page_drain(int cpu) { struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, __activate_page, NULL); } void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); put_cpu_var(activate_page_pvecs); } } #else static inline void activate_page_drain(int cpu) { } void activate_page(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); __activate_page(page, NULL); spin_unlock_irq(&zone->lru_lock); } #endif /* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ void mark_page_accessed(struct page *page) { if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page) && PageLRU(page)) { activate_page(page); ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } } EXPORT_SYMBOL(mark_page_accessed); void __lru_cache_add(struct page *page, enum lru_list lru) { struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_add(pvec, page)) __pagevec_lru_add(pvec, lru); put_cpu_var(lru_add_pvecs); } EXPORT_SYMBOL(__lru_cache_add); /** * lru_cache_add_lru - add a page to a page list * @page: the page to be added to the LRU. * @lru: the LRU list to which the page is added. */ void lru_cache_add_lru(struct page *page, enum lru_list lru) { if (PageActive(page)) { VM_BUG_ON(PageUnevictable(page)); ClearPageActive(page); } else if (PageUnevictable(page)) { VM_BUG_ON(PageActive(page)); ClearPageUnevictable(page); } VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); __lru_cache_add(page, lru); } /** * add_page_to_unevictable_list - add a page to the unevictable list * @page: the page to be added to the unevictable list * * Add page directly to its zone's unevictable list. To avoid races with * tasks that might be making the page evictable, through eg. munlock, * munmap or exit, while it's not on the lru, we want to add the page * while it's locked or otherwise "invisible" to other tasks. This is * difficult to do when using the pagevec cache, so bypass that. */ void add_page_to_unevictable_list(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); SetPageUnevictable(page); SetPageLRU(page); add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); spin_unlock_irq(&zone->lru_lock); } /* * If the page can not be invalidated, it is moved to the * inactive list to speed up its reclaim. It is moved to the * head of the list, rather than the tail, to give the flusher * threads some time to write it out, as this is much more * effective than the single-page writeout from reclaim. * * If the page isn't page_mapped and dirty/writeback, the page * could reclaim asap using PG_reclaim. * * 1. active, mapped page -> none * 2. active, dirty/writeback page -> inactive, head, PG_reclaim * 3. inactive, mapped page -> none * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim * 5. inactive, clean -> inactive, tail * 6. Others -> none * * In 4, why it moves inactive's head, the VM expects the page would * be write it out by flusher threads as this is much more effective * than the single-page writeout from reclaim. */ static void lru_deactivate_fn(struct page *page, void *arg) { int lru, file; bool active; struct zone *zone = page_zone(page); if (!PageLRU(page)) return; if (PageUnevictable(page)) return; /* Some processes are using the page */ if (page_mapped(page)) return; active = PageActive(page); file = page_is_file_cache(page); lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru + active); ClearPageActive(page); ClearPageReferenced(page); add_page_to_lru_list(zone, page, lru); if (PageWriteback(page) || PageDirty(page)) { /* * PG_reclaim could be raced with end_page_writeback * It can make readahead confusing. But race window * is _really_ small and it's non-critical problem. */ SetPageReclaim(page); } else { struct lruvec *lruvec; /* * The page's writeback ends up during pagevec * We moves tha page into tail of inactive. */ lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru); list_move_tail(&page->lru, &lruvec->lists[lru]); __count_vm_event(PGROTATED); } if (active) __count_vm_event(PGDEACTIVATE); update_page_reclaim_stat(zone, page, file, 0); } /* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been * disabled; or "cpu" is being hot-unplugged, and is already dead. */ void lru_add_drain_cpu(int cpu) { struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); struct pagevec *pvec; int lru; for_each_lru(lru) { pvec = &pvecs[lru - LRU_BASE]; if (pagevec_count(pvec)) __pagevec_lru_add(pvec, lru); } pvec = &per_cpu(lru_rotate_pvecs, cpu); if (pagevec_count(pvec)) { unsigned long flags; /* No harm done if a racing interrupt already did this */ local_irq_save(flags); pagevec_move_tail(pvec); local_irq_restore(flags); } pvec = &per_cpu(lru_deactivate_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); activate_page_drain(cpu); } /** * deactivate_page - forcefully deactivate a page * @page: page to deactivate * * This function hints the VM that @page is a good reclaim candidate, * for example if its invalidation fails due to the page being dirty * or under writeback. */ void deactivate_page(struct page *page) { /* * In a workload with many unevictable page such as mprotect, unevictable * page deactivation for accelerating reclaim is pointless. */ if (PageUnevictable(page)) return; if (likely(get_page_unless_zero(page))) { struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); put_cpu_var(lru_deactivate_pvecs); } } void lru_add_drain(void) { lru_add_drain_cpu(get_cpu()); put_cpu(); } static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); } /* * Returns 0 for success */ int lru_add_drain_all(void) { return schedule_on_each_cpu(lru_add_drain_per_cpu); } /* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * * The locking in this function is against shrink_inactive_list(): we recheck * the page count inside the lock to see whether shrink_inactive_list() * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() * will free it. */ void release_pages(struct page **pages, int nr, int cold) { int i; LIST_HEAD(pages_to_free); struct zone *zone = NULL; unsigned long uninitialized_var(flags); for (i = 0; i < nr; i++) { struct page *page = pages[i]; if (unlikely(PageCompound(page))) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } put_compound_page(page); continue; } if (!put_page_testzero(page)) continue; if (PageLRU(page)) { struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru_list(zone, page, page_off_lru(page)); } list_add(&page->lru, &pages_to_free); } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); free_hot_cold_page_list(&pages_to_free, cold); } EXPORT_SYMBOL(release_pages); /* * The pages which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's * OK from a correctness point of view but is inefficient - those pages may be * cache-warm and we want to give them back to the page allocator ASAP. * * So __pagevec_release() will drain those queues here. __pagevec_lru_add() * and __pagevec_lru_add_active() call release_pages() directly to avoid * mutual recursion. */ void __pagevec_release(struct pagevec *pvec) { lru_add_drain(); release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); pagevec_reinit(pvec); } EXPORT_SYMBOL(__pagevec_release); #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* used by __split_huge_page_refcount() */ void lru_add_page_tail(struct zone* zone, struct page *page, struct page *page_tail) { int uninitialized_var(active); enum lru_list lru; const int file = 0; VM_BUG_ON(!PageHead(page)); VM_BUG_ON(PageCompound(page_tail)); VM_BUG_ON(PageLRU(page_tail)); VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); SetPageLRU(page_tail); if (page_evictable(page_tail, NULL)) { if (PageActive(page)) { SetPageActive(page_tail); active = 1; lru = LRU_ACTIVE_ANON; } else { active = 0; lru = LRU_INACTIVE_ANON; } } else { SetPageUnevictable(page_tail); lru = LRU_UNEVICTABLE; } if (likely(PageLRU(page))) list_add_tail(&page_tail->lru, &page->lru); else { struct list_head *list_head; /* * Head page has not yet been counted, as an hpage, * so we must account for each subpage individually. * * Use the standard add function to put page_tail on the list, * but then correct its position so they all end up in order. */ add_page_to_lru_list(zone, page_tail, lru); list_head = page_tail->lru.prev; list_move_tail(&page_tail->lru, list_head); } if (!PageUnevictable(page)) update_page_reclaim_stat(zone, page_tail, file, active); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static void __pagevec_lru_add_fn(struct page *page, void *arg) { enum lru_list lru = (enum lru_list)arg; struct zone *zone = page_zone(page); int file = is_file_lru(lru); int active = is_active_lru(lru); VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); if (active) SetPageActive(page); add_page_to_lru_list(zone, page, lru); update_page_reclaim_stat(zone, page, file, active); } /* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. */ void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { VM_BUG_ON(is_unevictable_lru(lru)); pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru); } EXPORT_SYMBOL(__pagevec_lru_add); /** * pagevec_lookup - gang pagecache lookup * @pvec: Where the resulting pages are placed * @mapping: The address_space to search * @start: The starting page index * @nr_pages: The maximum number of pages * * pagevec_lookup() will search for and return a group of up to @nr_pages pages * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a * reference against the pages in @pvec. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * * pagevec_lookup() returns the number of pages which were found. */ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages) { pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); return pagevec_count(pvec); } EXPORT_SYMBOL(pagevec_lookup); unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) { pvec->nr = find_get_pages_tag(mapping, index, tag, nr_pages, pvec->pages); return pagevec_count(pvec); } EXPORT_SYMBOL(pagevec_lookup_tag); /* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); #ifdef CONFIG_SWAP bdi_init(swapper_space.backing_dev_info); #endif /* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ }
gpl-2.0
alexey6600/M8_Sense_7.00
drivers/mtd/mtd_blkdevs.c
3410
13896
/* * Interface to Linux block layer for MTD 'translation layers'. * * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/list.h> #include <linux/fs.h> #include <linux/mtd/blktrans.h> #include <linux/mtd/mtd.h> #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/spinlock.h> #include <linux/hdreg.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <asm/uaccess.h> #include "mtdcore.h" static LIST_HEAD(blktrans_majors); static DEFINE_MUTEX(blktrans_ref_mutex); static void blktrans_dev_release(struct kref *kref) { struct mtd_blktrans_dev *dev = container_of(kref, struct mtd_blktrans_dev, ref); dev->disk->private_data = NULL; blk_cleanup_queue(dev->rq); put_disk(dev->disk); list_del(&dev->list); kfree(dev); } static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk) { struct mtd_blktrans_dev *dev; mutex_lock(&blktrans_ref_mutex); dev = disk->private_data; if (!dev) goto unlock; kref_get(&dev->ref); unlock: mutex_unlock(&blktrans_ref_mutex); return dev; } static void blktrans_dev_put(struct mtd_blktrans_dev *dev) { mutex_lock(&blktrans_ref_mutex); kref_put(&dev->ref, blktrans_dev_release); mutex_unlock(&blktrans_ref_mutex); } static int do_blktrans_request(struct mtd_blktrans_ops *tr, struct mtd_blktrans_dev *dev, struct request *req) { unsigned long block, nsect; char *buf; block = blk_rq_pos(req) << 9 >> tr->blkshift; nsect = blk_rq_cur_bytes(req) >> tr->blkshift; buf = req->buffer; if (req->cmd_type != REQ_TYPE_FS) return -EIO; if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(req->rq_disk)) return -EIO; if (req->cmd_flags & REQ_DISCARD) return tr->discard(dev, block, nsect); switch(rq_data_dir(req)) { case READ: for (; nsect > 0; nsect--, block++, buf += tr->blksize) if (tr->readsect(dev, block, buf)) return -EIO; rq_flush_dcache_pages(req); return 0; case WRITE: if (!tr->writesect) return -EIO; rq_flush_dcache_pages(req); for (; nsect > 0; nsect--, block++, buf += tr->blksize) if (tr->writesect(dev, block, buf)) return -EIO; return 0; default: printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); return -EIO; } } int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) { if (kthread_should_stop()) return 1; return dev->bg_stop; } EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); static int mtd_blktrans_thread(void *arg) { struct mtd_blktrans_dev *dev = arg; struct mtd_blktrans_ops *tr = dev->tr; struct request_queue *rq = dev->rq; struct request *req = NULL; int background_done = 0; spin_lock_irq(rq->queue_lock); while (!kthread_should_stop()) { int res; dev->bg_stop = false; if (!req && !(req = blk_fetch_request(rq))) { if (tr->background && !background_done) { spin_unlock_irq(rq->queue_lock); mutex_lock(&dev->lock); tr->background(dev); mutex_unlock(&dev->lock); spin_lock_irq(rq->queue_lock); /* * Do background processing just once per idle * period. */ background_done = !dev->bg_stop; continue; } set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) set_current_state(TASK_RUNNING); spin_unlock_irq(rq->queue_lock); schedule(); spin_lock_irq(rq->queue_lock); continue; } spin_unlock_irq(rq->queue_lock); mutex_lock(&dev->lock); res = do_blktrans_request(dev->tr, dev, req); mutex_unlock(&dev->lock); spin_lock_irq(rq->queue_lock); if (!__blk_end_request_cur(req, res)) req = NULL; background_done = 0; } if (req) __blk_end_request_all(req, -EIO); spin_unlock_irq(rq->queue_lock); return 0; } static void mtd_blktrans_request(struct request_queue *rq) { struct mtd_blktrans_dev *dev; struct request *req = NULL; dev = rq->queuedata; if (!dev) while ((req = blk_fetch_request(rq)) != NULL) __blk_end_request_all(req, -ENODEV); else { dev->bg_stop = true; wake_up_process(dev->thread); } } static int blktrans_open(struct block_device *bdev, fmode_t mode) { struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); int ret = 0; if (!dev) return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/ mutex_lock(&dev->lock); if (dev->open) goto unlock; kref_get(&dev->ref); __module_get(dev->tr->owner); if (!dev->mtd) goto unlock; if (dev->tr->open) { ret = dev->tr->open(dev); if (ret) goto error_put; } ret = __get_mtd_device(dev->mtd); if (ret) goto error_release; dev->file_mode = mode; unlock: dev->open++; mutex_unlock(&dev->lock); blktrans_dev_put(dev); return ret; error_release: if (dev->tr->release) dev->tr->release(dev); error_put: module_put(dev->tr->owner); kref_put(&dev->ref, blktrans_dev_release); mutex_unlock(&dev->lock); blktrans_dev_put(dev); return ret; } static int blktrans_release(struct gendisk *disk, fmode_t mode) { struct mtd_blktrans_dev *dev = blktrans_dev_get(disk); int ret = 0; if (!dev) return ret; mutex_lock(&dev->lock); if (--dev->open) goto unlock; kref_put(&dev->ref, blktrans_dev_release); module_put(dev->tr->owner); if (dev->mtd) { ret = dev->tr->release ? dev->tr->release(dev) : 0; __put_mtd_device(dev->mtd); } unlock: mutex_unlock(&dev->lock); blktrans_dev_put(dev); return ret; } static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); int ret = -ENXIO; if (!dev) return ret; mutex_lock(&dev->lock); if (!dev->mtd) goto unlock; ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0; unlock: mutex_unlock(&dev->lock); blktrans_dev_put(dev); return ret; } static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); int ret = -ENXIO; if (!dev) return ret; mutex_lock(&dev->lock); if (!dev->mtd) goto unlock; switch (cmd) { case BLKFLSBUF: ret = dev->tr->flush ? dev->tr->flush(dev) : 0; break; default: ret = -ENOTTY; } unlock: mutex_unlock(&dev->lock); blktrans_dev_put(dev); return ret; } static const struct block_device_operations mtd_blktrans_ops = { .owner = THIS_MODULE, .open = blktrans_open, .release = blktrans_release, .ioctl = blktrans_ioctl, .getgeo = blktrans_getgeo, }; int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) { struct mtd_blktrans_ops *tr = new->tr; struct mtd_blktrans_dev *d; int last_devnum = -1; struct gendisk *gd; int ret; if (mutex_trylock(&mtd_table_mutex)) { mutex_unlock(&mtd_table_mutex); BUG(); } mutex_lock(&blktrans_ref_mutex); list_for_each_entry(d, &tr->devs, list) { if (new->devnum == -1) { /* Use first free number */ if (d->devnum != last_devnum+1) { /* Found a free devnum. Plug it in here */ new->devnum = last_devnum+1; list_add_tail(&new->list, &d->list); goto added; } } else if (d->devnum == new->devnum) { /* Required number taken */ mutex_unlock(&blktrans_ref_mutex); return -EBUSY; } else if (d->devnum > new->devnum) { /* Required number was free */ list_add_tail(&new->list, &d->list); goto added; } last_devnum = d->devnum; } ret = -EBUSY; if (new->devnum == -1) new->devnum = last_devnum+1; /* Check that the device and any partitions will get valid * minor numbers and that the disk naming code below can cope * with this number. */ if (new->devnum > (MINORMASK >> tr->part_bits) || (tr->part_bits && new->devnum >= 27 * 26)) { mutex_unlock(&blktrans_ref_mutex); goto error1; } list_add_tail(&new->list, &tr->devs); added: mutex_unlock(&blktrans_ref_mutex); mutex_init(&new->lock); kref_init(&new->ref); if (!tr->writesect) new->readonly = 1; /* Create gendisk */ ret = -ENOMEM; gd = alloc_disk(1 << tr->part_bits); if (!gd) goto error2; new->disk = gd; gd->private_data = new; gd->major = tr->major; gd->first_minor = (new->devnum) << tr->part_bits; gd->fops = &mtd_blktrans_ops; if (tr->part_bits) if (new->devnum < 26) snprintf(gd->disk_name, sizeof(gd->disk_name), "%s%c", tr->name, 'a' + new->devnum); else snprintf(gd->disk_name, sizeof(gd->disk_name), "%s%c%c", tr->name, 'a' - 1 + new->devnum / 26, 'a' + new->devnum % 26); else snprintf(gd->disk_name, sizeof(gd->disk_name), "%s%d", tr->name, new->devnum); set_capacity(gd, (new->size * tr->blksize) >> 9); /* Create the request queue */ spin_lock_init(&new->queue_lock); new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock); if (!new->rq) goto error3; new->rq->queuedata = new; /* * Empirical measurements revealed that read ahead values larger than * 4 slowed down boot time, so start out with this small value. */ new->rq->backing_dev_info.ra_pages = (4 * 1024) / PAGE_CACHE_SIZE; blk_queue_logical_block_size(new->rq, tr->blksize); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); if (tr->discard) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); new->rq->limits.max_discard_sectors = UINT_MAX; } gd->queue = new->rq; /* Create processing thread */ /* TODO: workqueue ? */ new->thread = kthread_run(mtd_blktrans_thread, new, "%s%d", tr->name, new->mtd->index); if (IS_ERR(new->thread)) { ret = PTR_ERR(new->thread); goto error4; } gd->driverfs_dev = &new->mtd->dev; if (new->readonly) set_disk_ro(gd, 1); add_disk(gd); if (new->disk_attributes) { ret = sysfs_create_group(&disk_to_dev(gd)->kobj, new->disk_attributes); WARN_ON(ret); } return 0; error4: blk_cleanup_queue(new->rq); error3: put_disk(new->disk); error2: list_del(&new->list); error1: return ret; } int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) { unsigned long flags; if (mutex_trylock(&mtd_table_mutex)) { mutex_unlock(&mtd_table_mutex); BUG(); } if (old->disk_attributes) sysfs_remove_group(&disk_to_dev(old->disk)->kobj, old->disk_attributes); /* Stop new requests to arrive */ del_gendisk(old->disk); /* Stop the thread */ kthread_stop(old->thread); /* Kill current requests */ spin_lock_irqsave(&old->queue_lock, flags); old->rq->queuedata = NULL; blk_start_queue(old->rq); spin_unlock_irqrestore(&old->queue_lock, flags); /* If the device is currently open, tell trans driver to close it, then put mtd device, and don't touch it again */ mutex_lock(&old->lock); if (old->open) { if (old->tr->release) old->tr->release(old); __put_mtd_device(old->mtd); } old->mtd = NULL; mutex_unlock(&old->lock); blktrans_dev_put(old); return 0; } static void blktrans_notify_remove(struct mtd_info *mtd) { struct mtd_blktrans_ops *tr; struct mtd_blktrans_dev *dev, *next; list_for_each_entry(tr, &blktrans_majors, list) list_for_each_entry_safe(dev, next, &tr->devs, list) if (dev->mtd == mtd) tr->remove_dev(dev); } static void blktrans_notify_add(struct mtd_info *mtd) { struct mtd_blktrans_ops *tr; if (mtd->type == MTD_ABSENT) return; list_for_each_entry(tr, &blktrans_majors, list) tr->add_mtd(tr, mtd); } static struct mtd_notifier blktrans_notifier = { .add = blktrans_notify_add, .remove = blktrans_notify_remove, }; int register_mtd_blktrans(struct mtd_blktrans_ops *tr) { struct mtd_info *mtd; int ret; /* Register the notifier if/when the first device type is registered, to prevent the link/init ordering from fucking us over. */ if (!blktrans_notifier.list.next) register_mtd_user(&blktrans_notifier); mutex_lock(&mtd_table_mutex); ret = register_blkdev(tr->major, tr->name); if (ret < 0) { printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", tr->name, tr->major, ret); mutex_unlock(&mtd_table_mutex); return ret; } if (ret) tr->major = ret; tr->blkshift = ffs(tr->blksize) - 1; INIT_LIST_HEAD(&tr->devs); list_add(&tr->list, &blktrans_majors); mtd_for_each_device(mtd) if (mtd->type != MTD_ABSENT) tr->add_mtd(tr, mtd); mutex_unlock(&mtd_table_mutex); return 0; } int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) { struct mtd_blktrans_dev *dev, *next; mutex_lock(&mtd_table_mutex); /* Remove it from the list of active majors */ list_del(&tr->list); list_for_each_entry_safe(dev, next, &tr->devs, list) tr->remove_dev(dev); unregister_blkdev(tr->major, tr->name); mutex_unlock(&mtd_table_mutex); BUG_ON(!list_empty(&tr->devs)); return 0; } static void __exit mtd_blktrans_exit(void) { /* No race here -- if someone's currently in register_mtd_blktrans we're screwed anyway. */ if (blktrans_notifier.list.next) unregister_mtd_user(&blktrans_notifier); } module_exit(mtd_blktrans_exit); EXPORT_SYMBOL_GPL(register_mtd_blktrans); EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
gpl-2.0
MattCrystal/HTC-One---4.4-Linaro
sound/soc/fsl/fsl_dma.c
4946
32057
/* * Freescale DMA ALSA SoC PCM driver * * Author: Timur Tabi <timur@freescale.com> * * Copyright 2007-2010 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * * This driver implements ASoC support for the Elo DMA controller, which is * the DMA controller on Freescale 83xx, 85xx, and 86xx SOCs. In ALSA terms, * the PCM driver is what handles the DMA buffer. */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/gfp.h> #include <linux/of_platform.h> #include <linux/list.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/io.h> #include "fsl_dma.h" #include "fsl_ssi.h" /* For the offset of stx0 and srx0 */ /* * The formats that the DMA controller supports, which is anything * that is 8, 16, or 32 bits. */ #define FSLDMA_PCM_FORMATS (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_U8 | \ SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S16_BE | \ SNDRV_PCM_FMTBIT_U16_LE | \ SNDRV_PCM_FMTBIT_U16_BE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S24_BE | \ SNDRV_PCM_FMTBIT_U24_LE | \ SNDRV_PCM_FMTBIT_U24_BE | \ SNDRV_PCM_FMTBIT_S32_LE | \ SNDRV_PCM_FMTBIT_S32_BE | \ SNDRV_PCM_FMTBIT_U32_LE | \ SNDRV_PCM_FMTBIT_U32_BE) #define FSLDMA_PCM_RATES (SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_192000 | \ SNDRV_PCM_RATE_CONTINUOUS) struct dma_object { struct snd_soc_platform_driver dai; dma_addr_t ssi_stx_phys; dma_addr_t ssi_srx_phys; unsigned int ssi_fifo_depth; struct ccsr_dma_channel __iomem *channel; unsigned int irq; bool assigned; char path[1]; }; /* * The number of DMA links to use. Two is the bare minimum, but if you * have really small links you might need more. */ #define NUM_DMA_LINKS 2 /** fsl_dma_private: p-substream DMA data * * Each substream has a 1-to-1 association with a DMA channel. * * The link[] array is first because it needs to be aligned on a 32-byte * boundary, so putting it first will ensure alignment without padding the * structure. * * @link[]: array of link descriptors * @dma_channel: pointer to the DMA channel's registers * @irq: IRQ for this DMA channel * @substream: pointer to the substream object, needed by the ISR * @ssi_sxx_phys: bus address of the STX or SRX register to use * @ld_buf_phys: physical address of the LD buffer * @current_link: index into link[] of the link currently being processed * @dma_buf_phys: physical address of the DMA buffer * @dma_buf_next: physical address of the next period to process * @dma_buf_end: physical address of the byte after the end of the DMA * @buffer period_size: the size of a single period * @num_periods: the number of periods in the DMA buffer */ struct fsl_dma_private { struct fsl_dma_link_descriptor link[NUM_DMA_LINKS]; struct ccsr_dma_channel __iomem *dma_channel; unsigned int irq; struct snd_pcm_substream *substream; dma_addr_t ssi_sxx_phys; unsigned int ssi_fifo_depth; dma_addr_t ld_buf_phys; unsigned int current_link; dma_addr_t dma_buf_phys; dma_addr_t dma_buf_next; dma_addr_t dma_buf_end; size_t period_size; unsigned int num_periods; }; /** * fsl_dma_hardare: define characteristics of the PCM hardware. * * The PCM hardware is the Freescale DMA controller. This structure defines * the capabilities of that hardware. * * Since the sampling rate and data format are not controlled by the DMA * controller, we specify no limits for those values. The only exception is * period_bytes_min, which is set to a reasonably low value to prevent the * DMA controller from generating too many interrupts per second. * * Since each link descriptor has a 32-bit byte count field, we set * period_bytes_max to the largest 32-bit number. We also have no maximum * number of periods. * * Note that we specify SNDRV_PCM_INFO_JOINT_DUPLEX here, but only because a * limitation in the SSI driver requires the sample rates for playback and * capture to be the same. */ static const struct snd_pcm_hardware fsl_dma_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_JOINT_DUPLEX | SNDRV_PCM_INFO_PAUSE, .formats = FSLDMA_PCM_FORMATS, .rates = FSLDMA_PCM_RATES, .rate_min = 5512, .rate_max = 192000, .period_bytes_min = 512, /* A reasonable limit */ .period_bytes_max = (u32) -1, .periods_min = NUM_DMA_LINKS, .periods_max = (unsigned int) -1, .buffer_bytes_max = 128 * 1024, /* A reasonable limit */ }; /** * fsl_dma_abort_stream: tell ALSA that the DMA transfer has aborted * * This function should be called by the ISR whenever the DMA controller * halts data transfer. */ static void fsl_dma_abort_stream(struct snd_pcm_substream *substream) { unsigned long flags; snd_pcm_stream_lock_irqsave(substream, flags); if (snd_pcm_running(substream)) snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); snd_pcm_stream_unlock_irqrestore(substream, flags); } /** * fsl_dma_update_pointers - update LD pointers to point to the next period * * As each period is completed, this function changes the the link * descriptor pointers for that period to point to the next period. */ static void fsl_dma_update_pointers(struct fsl_dma_private *dma_private) { struct fsl_dma_link_descriptor *link = &dma_private->link[dma_private->current_link]; /* Update our link descriptors to point to the next period. On a 36-bit * system, we also need to update the ESAD bits. We also set (keep) the * snoop bits. See the comments in fsl_dma_hw_params() about snooping. */ if (dma_private->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { link->source_addr = cpu_to_be32(dma_private->dma_buf_next); #ifdef CONFIG_PHYS_64BIT link->source_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP | upper_32_bits(dma_private->dma_buf_next)); #endif } else { link->dest_addr = cpu_to_be32(dma_private->dma_buf_next); #ifdef CONFIG_PHYS_64BIT link->dest_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP | upper_32_bits(dma_private->dma_buf_next)); #endif } /* Update our variables for next time */ dma_private->dma_buf_next += dma_private->period_size; if (dma_private->dma_buf_next >= dma_private->dma_buf_end) dma_private->dma_buf_next = dma_private->dma_buf_phys; if (++dma_private->current_link >= NUM_DMA_LINKS) dma_private->current_link = 0; } /** * fsl_dma_isr: interrupt handler for the DMA controller * * @irq: IRQ of the DMA channel * @dev_id: pointer to the dma_private structure for this DMA channel */ static irqreturn_t fsl_dma_isr(int irq, void *dev_id) { struct fsl_dma_private *dma_private = dev_id; struct snd_pcm_substream *substream = dma_private->substream; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct device *dev = rtd->platform->dev; struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel; irqreturn_t ret = IRQ_NONE; u32 sr, sr2 = 0; /* We got an interrupt, so read the status register to see what we were interrupted for. */ sr = in_be32(&dma_channel->sr); if (sr & CCSR_DMA_SR_TE) { dev_err(dev, "dma transmit error\n"); fsl_dma_abort_stream(substream); sr2 |= CCSR_DMA_SR_TE; ret = IRQ_HANDLED; } if (sr & CCSR_DMA_SR_CH) ret = IRQ_HANDLED; if (sr & CCSR_DMA_SR_PE) { dev_err(dev, "dma programming error\n"); fsl_dma_abort_stream(substream); sr2 |= CCSR_DMA_SR_PE; ret = IRQ_HANDLED; } if (sr & CCSR_DMA_SR_EOLNI) { sr2 |= CCSR_DMA_SR_EOLNI; ret = IRQ_HANDLED; } if (sr & CCSR_DMA_SR_CB) ret = IRQ_HANDLED; if (sr & CCSR_DMA_SR_EOSI) { /* Tell ALSA we completed a period. */ snd_pcm_period_elapsed(substream); /* * Update our link descriptors to point to the next period. We * only need to do this if the number of periods is not equal to * the number of links. */ if (dma_private->num_periods != NUM_DMA_LINKS) fsl_dma_update_pointers(dma_private); sr2 |= CCSR_DMA_SR_EOSI; ret = IRQ_HANDLED; } if (sr & CCSR_DMA_SR_EOLSI) { sr2 |= CCSR_DMA_SR_EOLSI; ret = IRQ_HANDLED; } /* Clear the bits that we set */ if (sr2) out_be32(&dma_channel->sr, sr2); return ret; } /** * fsl_dma_new: initialize this PCM driver. * * This function is called when the codec driver calls snd_soc_new_pcms(), * once for each .dai_link in the machine driver's snd_soc_card * structure. * * snd_dma_alloc_pages() is just a front-end to dma_alloc_coherent(), which * (currently) always allocates the DMA buffer in lowmem, even if GFP_HIGHMEM * is specified. Therefore, any DMA buffers we allocate will always be in low * memory, but we support for 36-bit physical addresses anyway. * * Regardless of where the memory is actually allocated, since the device can * technically DMA to any 36-bit address, we do need to set the DMA mask to 36. */ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; static u64 fsl_dma_dmamask = DMA_BIT_MASK(36); int ret; if (!card->dev->dma_mask) card->dev->dma_mask = &fsl_dma_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = fsl_dma_dmamask; /* Some codecs have separate DAIs for playback and capture, so we * should allocate a DMA buffer only for the streams that are valid. */ if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev, fsl_dma_hardware.buffer_bytes_max, &pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->dma_buffer); if (ret) { dev_err(card->dev, "can't alloc playback dma buffer\n"); return ret; } } if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev, fsl_dma_hardware.buffer_bytes_max, &pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->dma_buffer); if (ret) { dev_err(card->dev, "can't alloc capture dma buffer\n"); snd_dma_free_pages(&pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->dma_buffer); return ret; } } return 0; } /** * fsl_dma_open: open a new substream. * * Each substream has its own DMA buffer. * * ALSA divides the DMA buffer into N periods. We create NUM_DMA_LINKS link * descriptors that ping-pong from one period to the next. For example, if * there are six periods and two link descriptors, this is how they look * before playback starts: * * The last link descriptor * ____________ points back to the first * | | * V | * ___ ___ | * | |->| |->| * |___| |___| * | | * | | * V V * _________________________________________ * | | | | | | | The DMA buffer is * | | | | | | | divided into 6 parts * |______|______|______|______|______|______| * * and here's how they look after the first period is finished playing: * * ____________ * | | * V | * ___ ___ | * | |->| |->| * |___| |___| * | | * |______________ * | | * V V * _________________________________________ * | | | | | | | * | | | | | | | * |______|______|______|______|______|______| * * The first link descriptor now points to the third period. The DMA * controller is currently playing the second period. When it finishes, it * will jump back to the first descriptor and play the third period. * * There are four reasons we do this: * * 1. The only way to get the DMA controller to automatically restart the * transfer when it gets to the end of the buffer is to use chaining * mode. Basic direct mode doesn't offer that feature. * 2. We need to receive an interrupt at the end of every period. The DMA * controller can generate an interrupt at the end of every link transfer * (aka segment). Making each period into a DMA segment will give us the * interrupts we need. * 3. By creating only two link descriptors, regardless of the number of * periods, we do not need to reallocate the link descriptors if the * number of periods changes. * 4. All of the audio data is still stored in a single, contiguous DMA * buffer, which is what ALSA expects. We're just dividing it into * contiguous parts, and creating a link descriptor for each one. */ static int fsl_dma_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct device *dev = rtd->platform->dev; struct dma_object *dma = container_of(rtd->platform->driver, struct dma_object, dai); struct fsl_dma_private *dma_private; struct ccsr_dma_channel __iomem *dma_channel; dma_addr_t ld_buf_phys; u64 temp_link; /* Pointer to next link descriptor */ u32 mr; unsigned int channel; int ret = 0; unsigned int i; /* * Reject any DMA buffer whose size is not a multiple of the period * size. We need to make sure that the DMA buffer can be evenly divided * into periods. */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) { dev_err(dev, "invalid buffer size\n"); return ret; } channel = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 0 : 1; if (dma->assigned) { dev_err(dev, "dma channel already assigned\n"); return -EBUSY; } dma_private = dma_alloc_coherent(dev, sizeof(struct fsl_dma_private), &ld_buf_phys, GFP_KERNEL); if (!dma_private) { dev_err(dev, "can't allocate dma private data\n"); return -ENOMEM; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dma_private->ssi_sxx_phys = dma->ssi_stx_phys; else dma_private->ssi_sxx_phys = dma->ssi_srx_phys; dma_private->ssi_fifo_depth = dma->ssi_fifo_depth; dma_private->dma_channel = dma->channel; dma_private->irq = dma->irq; dma_private->substream = substream; dma_private->ld_buf_phys = ld_buf_phys; dma_private->dma_buf_phys = substream->dma_buffer.addr; ret = request_irq(dma_private->irq, fsl_dma_isr, 0, "fsldma-audio", dma_private); if (ret) { dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n", dma_private->irq, ret); dma_free_coherent(dev, sizeof(struct fsl_dma_private), dma_private, dma_private->ld_buf_phys); return ret; } dma->assigned = 1; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); snd_soc_set_runtime_hwparams(substream, &fsl_dma_hardware); runtime->private_data = dma_private; /* Program the fixed DMA controller parameters */ dma_channel = dma_private->dma_channel; temp_link = dma_private->ld_buf_phys + sizeof(struct fsl_dma_link_descriptor); for (i = 0; i < NUM_DMA_LINKS; i++) { dma_private->link[i].next = cpu_to_be64(temp_link); temp_link += sizeof(struct fsl_dma_link_descriptor); } /* The last link descriptor points to the first */ dma_private->link[i - 1].next = cpu_to_be64(dma_private->ld_buf_phys); /* Tell the DMA controller where the first link descriptor is */ out_be32(&dma_channel->clndar, CCSR_DMA_CLNDAR_ADDR(dma_private->ld_buf_phys)); out_be32(&dma_channel->eclndar, CCSR_DMA_ECLNDAR_ADDR(dma_private->ld_buf_phys)); /* The manual says the BCR must be clear before enabling EMP */ out_be32(&dma_channel->bcr, 0); /* * Program the mode register for interrupts, external master control, * and source/destination hold. Also clear the Channel Abort bit. */ mr = in_be32(&dma_channel->mr) & ~(CCSR_DMA_MR_CA | CCSR_DMA_MR_DAHE | CCSR_DMA_MR_SAHE); /* * We want External Master Start and External Master Pause enabled, * because the SSI is controlling the DMA controller. We want the DMA * controller to be set up in advance, and then we signal only the SSI * to start transferring. * * We want End-Of-Segment Interrupts enabled, because this will generate * an interrupt at the end of each segment (each link descriptor * represents one segment). Each DMA segment is the same thing as an * ALSA period, so this is how we get an interrupt at the end of every * period. * * We want Error Interrupt enabled, so that we can get an error if * the DMA controller is mis-programmed somehow. */ mr |= CCSR_DMA_MR_EOSIE | CCSR_DMA_MR_EIE | CCSR_DMA_MR_EMP_EN | CCSR_DMA_MR_EMS_EN; /* For playback, we want the destination address to be held. For capture, set the source address to be held. */ mr |= (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? CCSR_DMA_MR_DAHE : CCSR_DMA_MR_SAHE; out_be32(&dma_channel->mr, mr); return 0; } /** * fsl_dma_hw_params: continue initializing the DMA links * * This function obtains hardware parameters about the opened stream and * programs the DMA controller accordingly. * * One drawback of big-endian is that when copying integers of different * sizes to a fixed-sized register, the address to which the integer must be * copied is dependent on the size of the integer. * * For example, if P is the address of a 32-bit register, and X is a 32-bit * integer, then X should be copied to address P. However, if X is a 16-bit * integer, then it should be copied to P+2. If X is an 8-bit register, * then it should be copied to P+3. * * So for playback of 8-bit samples, the DMA controller must transfer single * bytes from the DMA buffer to the last byte of the STX0 register, i.e. * offset by 3 bytes. For 16-bit samples, the offset is two bytes. * * For 24-bit samples, the offset is 1 byte. However, the DMA controller * does not support 3-byte copies (the DAHTS register supports only 1, 2, 4, * and 8 bytes at a time). So we do not support packed 24-bit samples. * 24-bit data must be padded to 32 bits. */ static int fsl_dma_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; struct fsl_dma_private *dma_private = runtime->private_data; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct device *dev = rtd->platform->dev; /* Number of bits per sample */ unsigned int sample_bits = snd_pcm_format_physical_width(params_format(hw_params)); /* Number of bytes per frame */ unsigned int sample_bytes = sample_bits / 8; /* Bus address of SSI STX register */ dma_addr_t ssi_sxx_phys = dma_private->ssi_sxx_phys; /* Size of the DMA buffer, in bytes */ size_t buffer_size = params_buffer_bytes(hw_params); /* Number of bytes per period */ size_t period_size = params_period_bytes(hw_params); /* Pointer to next period */ dma_addr_t temp_addr = substream->dma_buffer.addr; /* Pointer to DMA controller */ struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel; u32 mr; /* DMA Mode Register */ unsigned int i; /* Initialize our DMA tracking variables */ dma_private->period_size = period_size; dma_private->num_periods = params_periods(hw_params); dma_private->dma_buf_end = dma_private->dma_buf_phys + buffer_size; dma_private->dma_buf_next = dma_private->dma_buf_phys + (NUM_DMA_LINKS * period_size); if (dma_private->dma_buf_next >= dma_private->dma_buf_end) /* This happens if the number of periods == NUM_DMA_LINKS */ dma_private->dma_buf_next = dma_private->dma_buf_phys; mr = in_be32(&dma_channel->mr) & ~(CCSR_DMA_MR_BWC_MASK | CCSR_DMA_MR_SAHTS_MASK | CCSR_DMA_MR_DAHTS_MASK); /* Due to a quirk of the SSI's STX register, the target address * for the DMA operations depends on the sample size. So we calculate * that offset here. While we're at it, also tell the DMA controller * how much data to transfer per sample. */ switch (sample_bits) { case 8: mr |= CCSR_DMA_MR_DAHTS_1 | CCSR_DMA_MR_SAHTS_1; ssi_sxx_phys += 3; break; case 16: mr |= CCSR_DMA_MR_DAHTS_2 | CCSR_DMA_MR_SAHTS_2; ssi_sxx_phys += 2; break; case 32: mr |= CCSR_DMA_MR_DAHTS_4 | CCSR_DMA_MR_SAHTS_4; break; default: /* We should never get here */ dev_err(dev, "unsupported sample size %u\n", sample_bits); return -EINVAL; } /* * BWC determines how many bytes are sent/received before the DMA * controller checks the SSI to see if it needs to stop. BWC should * always be a multiple of the frame size, so that we always transmit * whole frames. Each frame occupies two slots in the FIFO. The * parameter for CCSR_DMA_MR_BWC() is rounded down the next power of two * (MR[BWC] can only represent even powers of two). * * To simplify the process, we set BWC to the largest value that is * less than or equal to the FIFO watermark. For playback, this ensures * that we transfer the maximum amount without overrunning the FIFO. * For capture, this ensures that we transfer the maximum amount without * underrunning the FIFO. * * f = SSI FIFO depth * w = SSI watermark value (which equals f - 2) * b = DMA bandwidth count (in bytes) * s = sample size (in bytes, which equals frame_size * 2) * * For playback, we never transmit more than the transmit FIFO * watermark, otherwise we might write more data than the FIFO can hold. * The watermark is equal to the FIFO depth minus two. * * For capture, two equations must hold: * w > f - (b / s) * w >= b / s * * So, b > 2 * s, but b must also be <= s * w. To simplify, we set * b = s * w, which is equal to * (dma_private->ssi_fifo_depth - 2) * sample_bytes. */ mr |= CCSR_DMA_MR_BWC((dma_private->ssi_fifo_depth - 2) * sample_bytes); out_be32(&dma_channel->mr, mr); for (i = 0; i < NUM_DMA_LINKS; i++) { struct fsl_dma_link_descriptor *link = &dma_private->link[i]; link->count = cpu_to_be32(period_size); /* The snoop bit tells the DMA controller whether it should tell * the ECM to snoop during a read or write to an address. For * audio, we use DMA to transfer data between memory and an I/O * device (the SSI's STX0 or SRX0 register). Snooping is only * needed if there is a cache, so we need to snoop memory * addresses only. For playback, that means we snoop the source * but not the destination. For capture, we snoop the * destination but not the source. * * Note that failing to snoop properly is unlikely to cause * cache incoherency if the period size is larger than the * size of L1 cache. This is because filling in one period will * flush out the data for the previous period. So if you * increased period_bytes_min to a large enough size, you might * get more performance by not snooping, and you'll still be * okay. You'll need to update fsl_dma_update_pointers() also. */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { link->source_addr = cpu_to_be32(temp_addr); link->source_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP | upper_32_bits(temp_addr)); link->dest_addr = cpu_to_be32(ssi_sxx_phys); link->dest_attr = cpu_to_be32(CCSR_DMA_ATR_NOSNOOP | upper_32_bits(ssi_sxx_phys)); } else { link->source_addr = cpu_to_be32(ssi_sxx_phys); link->source_attr = cpu_to_be32(CCSR_DMA_ATR_NOSNOOP | upper_32_bits(ssi_sxx_phys)); link->dest_addr = cpu_to_be32(temp_addr); link->dest_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP | upper_32_bits(temp_addr)); } temp_addr += period_size; } return 0; } /** * fsl_dma_pointer: determine the current position of the DMA transfer * * This function is called by ALSA when ALSA wants to know where in the * stream buffer the hardware currently is. * * For playback, the SAR register contains the physical address of the most * recent DMA transfer. For capture, the value is in the DAR register. * * The base address of the buffer is stored in the source_addr field of the * first link descriptor. */ static snd_pcm_uframes_t fsl_dma_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct fsl_dma_private *dma_private = runtime->private_data; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct device *dev = rtd->platform->dev; struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel; dma_addr_t position; snd_pcm_uframes_t frames; /* Obtain the current DMA pointer, but don't read the ESAD bits if we * only have 32-bit DMA addresses. This function is typically called * in interrupt context, so we need to optimize it. */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { position = in_be32(&dma_channel->sar); #ifdef CONFIG_PHYS_64BIT position |= (u64)(in_be32(&dma_channel->satr) & CCSR_DMA_ATR_ESAD_MASK) << 32; #endif } else { position = in_be32(&dma_channel->dar); #ifdef CONFIG_PHYS_64BIT position |= (u64)(in_be32(&dma_channel->datr) & CCSR_DMA_ATR_ESAD_MASK) << 32; #endif } /* * When capture is started, the SSI immediately starts to fill its FIFO. * This means that the DMA controller is not started until the FIFO is * full. However, ALSA calls this function before that happens, when * MR.DAR is still zero. In this case, just return zero to indicate * that nothing has been received yet. */ if (!position) return 0; if ((position < dma_private->dma_buf_phys) || (position > dma_private->dma_buf_end)) { dev_err(dev, "dma pointer is out of range, halting stream\n"); return SNDRV_PCM_POS_XRUN; } frames = bytes_to_frames(runtime, position - dma_private->dma_buf_phys); /* * If the current address is just past the end of the buffer, wrap it * around. */ if (frames == runtime->buffer_size) frames = 0; return frames; } /** * fsl_dma_hw_free: release resources allocated in fsl_dma_hw_params() * * Release the resources allocated in fsl_dma_hw_params() and de-program the * registers. * * This function can be called multiple times. */ static int fsl_dma_hw_free(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct fsl_dma_private *dma_private = runtime->private_data; if (dma_private) { struct ccsr_dma_channel __iomem *dma_channel; dma_channel = dma_private->dma_channel; /* Stop the DMA */ out_be32(&dma_channel->mr, CCSR_DMA_MR_CA); out_be32(&dma_channel->mr, 0); /* Reset all the other registers */ out_be32(&dma_channel->sr, -1); out_be32(&dma_channel->clndar, 0); out_be32(&dma_channel->eclndar, 0); out_be32(&dma_channel->satr, 0); out_be32(&dma_channel->sar, 0); out_be32(&dma_channel->datr, 0); out_be32(&dma_channel->dar, 0); out_be32(&dma_channel->bcr, 0); out_be32(&dma_channel->nlndar, 0); out_be32(&dma_channel->enlndar, 0); } return 0; } /** * fsl_dma_close: close the stream. */ static int fsl_dma_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct fsl_dma_private *dma_private = runtime->private_data; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct device *dev = rtd->platform->dev; struct dma_object *dma = container_of(rtd->platform->driver, struct dma_object, dai); if (dma_private) { if (dma_private->irq) free_irq(dma_private->irq, dma_private); if (dma_private->ld_buf_phys) { dma_unmap_single(dev, dma_private->ld_buf_phys, sizeof(dma_private->link), DMA_TO_DEVICE); } /* Deallocate the fsl_dma_private structure */ dma_free_coherent(dev, sizeof(struct fsl_dma_private), dma_private, dma_private->ld_buf_phys); substream->runtime->private_data = NULL; } dma->assigned = 0; return 0; } /* * Remove this PCM driver. */ static void fsl_dma_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; unsigned int i; for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) { substream = pcm->streams[i].substream; if (substream) { snd_dma_free_pages(&substream->dma_buffer); substream->dma_buffer.area = NULL; substream->dma_buffer.addr = 0; } } } /** * find_ssi_node -- returns the SSI node that points to his DMA channel node * * Although this DMA driver attempts to operate independently of the other * devices, it still needs to determine some information about the SSI device * that it's working with. Unfortunately, the device tree does not contain * a pointer from the DMA channel node to the SSI node -- the pointer goes the * other way. So we need to scan the device tree for SSI nodes until we find * the one that points to the given DMA channel node. It's ugly, but at least * it's contained in this one function. */ static struct device_node *find_ssi_node(struct device_node *dma_channel_np) { struct device_node *ssi_np, *np; for_each_compatible_node(ssi_np, NULL, "fsl,mpc8610-ssi") { /* Check each DMA phandle to see if it points to us. We * assume that device_node pointers are a valid comparison. */ np = of_parse_phandle(ssi_np, "fsl,playback-dma", 0); of_node_put(np); if (np == dma_channel_np) return ssi_np; np = of_parse_phandle(ssi_np, "fsl,capture-dma", 0); of_node_put(np); if (np == dma_channel_np) return ssi_np; } return NULL; } static struct snd_pcm_ops fsl_dma_ops = { .open = fsl_dma_open, .close = fsl_dma_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = fsl_dma_hw_params, .hw_free = fsl_dma_hw_free, .pointer = fsl_dma_pointer, }; static int __devinit fsl_soc_dma_probe(struct platform_device *pdev) { struct dma_object *dma; struct device_node *np = pdev->dev.of_node; struct device_node *ssi_np; struct resource res; const uint32_t *iprop; int ret; /* Find the SSI node that points to us. */ ssi_np = find_ssi_node(np); if (!ssi_np) { dev_err(&pdev->dev, "cannot find parent SSI node\n"); return -ENODEV; } ret = of_address_to_resource(ssi_np, 0, &res); if (ret) { dev_err(&pdev->dev, "could not determine resources for %s\n", ssi_np->full_name); of_node_put(ssi_np); return ret; } dma = kzalloc(sizeof(*dma) + strlen(np->full_name), GFP_KERNEL); if (!dma) { dev_err(&pdev->dev, "could not allocate dma object\n"); of_node_put(ssi_np); return -ENOMEM; } strcpy(dma->path, np->full_name); dma->dai.ops = &fsl_dma_ops; dma->dai.pcm_new = fsl_dma_new; dma->dai.pcm_free = fsl_dma_free_dma_buffers; /* Store the SSI-specific information that we need */ dma->ssi_stx_phys = res.start + offsetof(struct ccsr_ssi, stx0); dma->ssi_srx_phys = res.start + offsetof(struct ccsr_ssi, srx0); iprop = of_get_property(ssi_np, "fsl,fifo-depth", NULL); if (iprop) dma->ssi_fifo_depth = be32_to_cpup(iprop); else /* Older 8610 DTs didn't have the fifo-depth property */ dma->ssi_fifo_depth = 8; of_node_put(ssi_np); ret = snd_soc_register_platform(&pdev->dev, &dma->dai); if (ret) { dev_err(&pdev->dev, "could not register platform\n"); kfree(dma); return ret; } dma->channel = of_iomap(np, 0); dma->irq = irq_of_parse_and_map(np, 0); dev_set_drvdata(&pdev->dev, dma); return 0; } static int __devexit fsl_soc_dma_remove(struct platform_device *pdev) { struct dma_object *dma = dev_get_drvdata(&pdev->dev); snd_soc_unregister_platform(&pdev->dev); iounmap(dma->channel); irq_dispose_mapping(dma->irq); kfree(dma); return 0; } static const struct of_device_id fsl_soc_dma_ids[] = { { .compatible = "fsl,ssi-dma-channel", }, {} }; MODULE_DEVICE_TABLE(of, fsl_soc_dma_ids); static struct platform_driver fsl_soc_dma_driver = { .driver = { .name = "fsl-pcm-audio", .owner = THIS_MODULE, .of_match_table = fsl_soc_dma_ids, }, .probe = fsl_soc_dma_probe, .remove = __devexit_p(fsl_soc_dma_remove), }; module_platform_driver(fsl_soc_dma_driver); MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); MODULE_DESCRIPTION("Freescale Elo DMA ASoC PCM Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Euphoria-OS-Devices/android_kernel_lge_msm8974
drivers/video/console/sticore.c
4946
27082
/* * linux/drivers/video/console/sticore.c - * core code for console driver using HP's STI firmware * * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 2001-2003 Helge Deller <deller@gmx.de> * Copyright (C) 2001-2002 Thomas Bogendoerfer <tsbogend@alpha.franken.de> * * TODO: * - call STI in virtual mode rather than in real mode * - screen blanking with state_mgmt() in text mode STI ? * - try to make it work on m68k hp workstations ;) * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/font.h> #include <asm/hardware.h> #include <asm/page.h> #include <asm/parisc-device.h> #include <asm/pdc.h> #include <asm/cacheflush.h> #include <asm/grfioctl.h> #include "../sticore.h" #define STI_DRIVERVERSION "Version 0.9a" static struct sti_struct *default_sti __read_mostly; /* number of STI ROMS found and their ptrs to each struct */ static int num_sti_roms __read_mostly; static struct sti_struct *sti_roms[MAX_STI_ROMS] __read_mostly; /* The colour indices used by STI are * 0 - Black * 1 - White * 2 - Red * 3 - Yellow/Brown * 4 - Green * 5 - Cyan * 6 - Blue * 7 - Magenta * * So we have the same colours as VGA (basically one bit each for R, G, B), * but have to translate them, anyway. */ static const u8 col_trans[8] = { 0, 6, 4, 5, 2, 7, 3, 1 }; #define c_fg(sti, c) col_trans[((c>> 8) & 7)] #define c_bg(sti, c) col_trans[((c>>11) & 7)] #define c_index(sti, c) ((c) & 0xff) static const struct sti_init_flags default_init_flags = { .wait = STI_WAIT, .reset = 1, .text = 1, .nontext = 1, .no_chg_bet = 1, .no_chg_bei = 1, .init_cmap_tx = 1, }; static int sti_init_graph(struct sti_struct *sti) { struct sti_init_inptr_ext inptr_ext = { 0, }; struct sti_init_inptr inptr = { .text_planes = 3, /* # of text planes (max 3 for STI) */ .ext_ptr = STI_PTR(&inptr_ext) }; struct sti_init_outptr outptr = { 0, }; unsigned long flags; int ret; spin_lock_irqsave(&sti->lock, flags); ret = STI_CALL(sti->init_graph, &default_init_flags, &inptr, &outptr, sti->glob_cfg); spin_unlock_irqrestore(&sti->lock, flags); if (ret < 0) { printk(KERN_ERR "STI init_graph failed (ret %d, errno %d)\n",ret,outptr.errno); return -1; } sti->text_planes = outptr.text_planes; return 0; } static const struct sti_conf_flags default_conf_flags = { .wait = STI_WAIT, }; static void sti_inq_conf(struct sti_struct *sti) { struct sti_conf_inptr inptr = { 0, }; unsigned long flags; s32 ret; sti->outptr.ext_ptr = STI_PTR(&sti->outptr_ext); do { spin_lock_irqsave(&sti->lock, flags); ret = STI_CALL(sti->inq_conf, &default_conf_flags, &inptr, &sti->outptr, sti->glob_cfg); spin_unlock_irqrestore(&sti->lock, flags); } while (ret == 1); } static const struct sti_font_flags default_font_flags = { .wait = STI_WAIT, .non_text = 0, }; void sti_putc(struct sti_struct *sti, int c, int y, int x) { struct sti_font_inptr inptr = { .font_start_addr= STI_PTR(sti->font->raw), .index = c_index(sti, c), .fg_color = c_fg(sti, c), .bg_color = c_bg(sti, c), .dest_x = x * sti->font_width, .dest_y = y * sti->font_height, }; struct sti_font_outptr outptr = { 0, }; s32 ret; unsigned long flags; do { spin_lock_irqsave(&sti->lock, flags); ret = STI_CALL(sti->font_unpmv, &default_font_flags, &inptr, &outptr, sti->glob_cfg); spin_unlock_irqrestore(&sti->lock, flags); } while (ret == 1); } static const struct sti_blkmv_flags clear_blkmv_flags = { .wait = STI_WAIT, .color = 1, .clear = 1, }; void sti_set(struct sti_struct *sti, int src_y, int src_x, int height, int width, u8 color) { struct sti_blkmv_inptr inptr = { .fg_color = color, .bg_color = color, .src_x = src_x, .src_y = src_y, .dest_x = src_x, .dest_y = src_y, .width = width, .height = height, }; struct sti_blkmv_outptr outptr = { 0, }; s32 ret; unsigned long flags; do { spin_lock_irqsave(&sti->lock, flags); ret = STI_CALL(sti->block_move, &clear_blkmv_flags, &inptr, &outptr, sti->glob_cfg); spin_unlock_irqrestore(&sti->lock, flags); } while (ret == 1); } void sti_clear(struct sti_struct *sti, int src_y, int src_x, int height, int width, int c) { struct sti_blkmv_inptr inptr = { .fg_color = c_fg(sti, c), .bg_color = c_bg(sti, c), .src_x = src_x * sti->font_width, .src_y = src_y * sti->font_height, .dest_x = src_x * sti->font_width, .dest_y = src_y * sti->font_height, .width = width * sti->font_width, .height = height* sti->font_height, }; struct sti_blkmv_outptr outptr = { 0, }; s32 ret; unsigned long flags; do { spin_lock_irqsave(&sti->lock, flags); ret = STI_CALL(sti->block_move, &clear_blkmv_flags, &inptr, &outptr, sti->glob_cfg); spin_unlock_irqrestore(&sti->lock, flags); } while (ret == 1); } static const struct sti_blkmv_flags default_blkmv_flags = { .wait = STI_WAIT, }; void sti_bmove(struct sti_struct *sti, int src_y, int src_x, int dst_y, int dst_x, int height, int width) { struct sti_blkmv_inptr inptr = { .src_x = src_x * sti->font_width, .src_y = src_y * sti->font_height, .dest_x = dst_x * sti->font_width, .dest_y = dst_y * sti->font_height, .width = width * sti->font_width, .height = height* sti->font_height, }; struct sti_blkmv_outptr outptr = { 0, }; s32 ret; unsigned long flags; do { spin_lock_irqsave(&sti->lock, flags); ret = STI_CALL(sti->block_move, &default_blkmv_flags, &inptr, &outptr, sti->glob_cfg); spin_unlock_irqrestore(&sti->lock, flags); } while (ret == 1); } static void sti_flush(unsigned long start, unsigned long end) { flush_icache_range(start, end); } static void __devinit sti_rom_copy(unsigned long base, unsigned long count, void *dest) { unsigned long dest_start = (unsigned long) dest; /* this still needs to be revisited (see arch/parisc/mm/init.c:246) ! */ while (count >= 4) { count -= 4; *(u32 *)dest = gsc_readl(base); base += 4; dest += 4; } while (count) { count--; *(u8 *)dest = gsc_readb(base); base++; dest++; } sti_flush(dest_start, (unsigned long)dest); } static char default_sti_path[21] __read_mostly; #ifndef MODULE static int __devinit sti_setup(char *str) { if (str) strlcpy (default_sti_path, str, sizeof (default_sti_path)); return 1; } /* Assuming the machine has multiple STI consoles (=graphic cards) which * all get detected by sticon, the user may define with the linux kernel * parameter sti=<x> which of them will be the initial boot-console. * <x> is a number between 0 and MAX_STI_ROMS, with 0 as the default * STI screen. */ __setup("sti=", sti_setup); #endif static char __devinitdata *font_name[MAX_STI_ROMS] = { "VGA8x16", }; static int __devinitdata font_index[MAX_STI_ROMS], font_height[MAX_STI_ROMS], font_width[MAX_STI_ROMS]; #ifndef MODULE static int __devinit sti_font_setup(char *str) { char *x; int i = 0; /* we accept sti_font=VGA8x16, sti_font=10x20, sti_font=10*20 * or sti_font=7 style command lines. */ while (i<MAX_STI_ROMS && str && *str) { if (*str>='0' && *str<='9') { if ((x = strchr(str, 'x')) || (x = strchr(str, '*'))) { font_height[i] = simple_strtoul(str, NULL, 0); font_width[i] = simple_strtoul(x+1, NULL, 0); } else { font_index[i] = simple_strtoul(str, NULL, 0); } } else { font_name[i] = str; /* fb font name */ } if ((x = strchr(str, ','))) *x++ = 0; str = x; i++; } return 1; } /* The optional linux kernel parameter "sti_font" defines which font * should be used by the sticon driver to draw characters to the screen. * Possible values are: * - sti_font=<fb_fontname>: * <fb_fontname> is the name of one of the linux-kernel built-in * framebuffer font names (e.g. VGA8x16, SUN22x18). * This is only available if the fonts have been statically compiled * in with e.g. the CONFIG_FONT_8x16 or CONFIG_FONT_SUN12x22 options. * - sti_font=<number> * most STI ROMs have built-in HP specific fonts, which can be selected * by giving the desired number to the sticon driver. * NOTE: This number is machine and STI ROM dependend. * - sti_font=<height>x<width> (e.g. sti_font=16x8) * <height> and <width> gives hints to the height and width of the * font which the user wants. The sticon driver will try to use * a font with this height and width, but if no suitable font is * found, sticon will use the default 8x8 font. */ __setup("sti_font=", sti_font_setup); #endif static void __devinit sti_dump_globcfg(struct sti_glob_cfg *glob_cfg, unsigned int sti_mem_request) { struct sti_glob_cfg_ext *cfg; DPRINTK((KERN_INFO "%d text planes\n" "%4d x %4d screen resolution\n" "%4d x %4d offscreen\n" "%4d x %4d layout\n" "regions at %08x %08x %08x %08x\n" "regions at %08x %08x %08x %08x\n" "reent_lvl %d\n" "save_addr %08x\n", glob_cfg->text_planes, glob_cfg->onscreen_x, glob_cfg->onscreen_y, glob_cfg->offscreen_x, glob_cfg->offscreen_y, glob_cfg->total_x, glob_cfg->total_y, glob_cfg->region_ptrs[0], glob_cfg->region_ptrs[1], glob_cfg->region_ptrs[2], glob_cfg->region_ptrs[3], glob_cfg->region_ptrs[4], glob_cfg->region_ptrs[5], glob_cfg->region_ptrs[6], glob_cfg->region_ptrs[7], glob_cfg->reent_lvl, glob_cfg->save_addr)); /* dump extended cfg */ cfg = PTR_STI((unsigned long)glob_cfg->ext_ptr); DPRINTK(( KERN_INFO "monitor %d\n" "in friendly mode: %d\n" "power consumption %d watts\n" "freq ref %d\n" "sti_mem_addr %08x (size=%d bytes)\n", cfg->curr_mon, cfg->friendly_boot, cfg->power, cfg->freq_ref, cfg->sti_mem_addr, sti_mem_request)); } static void __devinit sti_dump_outptr(struct sti_struct *sti) { DPRINTK((KERN_INFO "%d bits per pixel\n" "%d used bits\n" "%d planes\n" "attributes %08x\n", sti->outptr.bits_per_pixel, sti->outptr.bits_used, sti->outptr.planes, sti->outptr.attributes)); } static int __devinit sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address, unsigned long hpa) { struct sti_glob_cfg *glob_cfg; struct sti_glob_cfg_ext *glob_cfg_ext; void *save_addr; void *sti_mem_addr; const int save_addr_size = 1024; /* XXX */ int i; if (!sti->sti_mem_request) sti->sti_mem_request = 256; /* STI default */ glob_cfg = kzalloc(sizeof(*sti->glob_cfg), GFP_KERNEL); glob_cfg_ext = kzalloc(sizeof(*glob_cfg_ext), GFP_KERNEL); save_addr = kzalloc(save_addr_size, GFP_KERNEL); sti_mem_addr = kzalloc(sti->sti_mem_request, GFP_KERNEL); if (!(glob_cfg && glob_cfg_ext && save_addr && sti_mem_addr)) { kfree(glob_cfg); kfree(glob_cfg_ext); kfree(save_addr); kfree(sti_mem_addr); return -ENOMEM; } glob_cfg->ext_ptr = STI_PTR(glob_cfg_ext); glob_cfg->save_addr = STI_PTR(save_addr); for (i=0; i<8; i++) { unsigned long newhpa, len; if (sti->pd) { unsigned char offs = sti->rm_entry[i]; if (offs == 0) continue; if (offs != PCI_ROM_ADDRESS && (offs < PCI_BASE_ADDRESS_0 || offs > PCI_BASE_ADDRESS_5)) { printk (KERN_WARNING "STI pci region mapping for region %d (%02x) can't be mapped\n", i,sti->rm_entry[i]); continue; } newhpa = pci_resource_start (sti->pd, (offs - PCI_BASE_ADDRESS_0) / 4); } else newhpa = (i == 0) ? rom_address : hpa; sti->regions_phys[i] = REGION_OFFSET_TO_PHYS(sti->regions[i], newhpa); len = sti->regions[i].region_desc.length * 4096; if (len) glob_cfg->region_ptrs[i] = sti->regions_phys[i]; DPRINTK(("region #%d: phys %08lx, region_ptr %08x, len=%lukB, " "btlb=%d, sysonly=%d, cache=%d, last=%d\n", i, sti->regions_phys[i], glob_cfg->region_ptrs[i], len/1024, sti->regions[i].region_desc.btlb, sti->regions[i].region_desc.sys_only, sti->regions[i].region_desc.cache, sti->regions[i].region_desc.last)); /* last entry reached ? */ if (sti->regions[i].region_desc.last) break; } if (++i<8 && sti->regions[i].region) printk(KERN_WARNING "%s: *future ptr (0x%8x) not yet supported !\n", __FILE__, sti->regions[i].region); glob_cfg_ext->sti_mem_addr = STI_PTR(sti_mem_addr); sti->glob_cfg = glob_cfg; return 0; } #ifdef CONFIG_FB static struct sti_cooked_font __devinit *sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name) { const struct font_desc *fbfont; unsigned int size, bpc; void *dest; struct sti_rom_font *nf; struct sti_cooked_font *cooked_font; if (!fbfont_name || !strlen(fbfont_name)) return NULL; fbfont = find_font(fbfont_name); if (!fbfont) fbfont = get_default_font(1024,768, ~(u32)0, ~(u32)0); if (!fbfont) return NULL; DPRINTK((KERN_DEBUG "selected %dx%d fb-font %s\n", fbfont->width, fbfont->height, fbfont->name)); bpc = ((fbfont->width+7)/8) * fbfont->height; size = bpc * 256; size += sizeof(struct sti_rom_font); nf = kzalloc(size, GFP_KERNEL); if (!nf) return NULL; nf->first_char = 0; nf->last_char = 255; nf->width = fbfont->width; nf->height = fbfont->height; nf->font_type = STI_FONT_HPROMAN8; nf->bytes_per_char = bpc; nf->next_font = 0; nf->underline_height = 1; nf->underline_pos = fbfont->height - nf->underline_height; dest = nf; dest += sizeof(struct sti_rom_font); memcpy(dest, fbfont->data, bpc*256); cooked_font = kzalloc(sizeof(*cooked_font), GFP_KERNEL); if (!cooked_font) { kfree(nf); return NULL; } cooked_font->raw = nf; cooked_font->next_font = NULL; cooked_rom->font_start = cooked_font; return cooked_font; } #else static struct sti_cooked_font __devinit *sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name) { return NULL; } #endif static struct sti_cooked_font __devinit *sti_select_font(struct sti_cooked_rom *rom, int (*search_font_fnc)(struct sti_cooked_rom *, int, int)) { struct sti_cooked_font *font; int i; int index = num_sti_roms; /* check for framebuffer-font first */ if ((font = sti_select_fbfont(rom, font_name[index]))) return font; if (font_width[index] && font_height[index]) font_index[index] = search_font_fnc(rom, font_height[index], font_width[index]); for (font = rom->font_start, i = font_index[index]; font && (i > 0); font = font->next_font, i--); if (font) return font; else return rom->font_start; } static void __devinit sti_dump_rom(struct sti_rom *rom) { printk(KERN_INFO " id %04x-%04x, conforms to spec rev. %d.%02x\n", rom->graphics_id[0], rom->graphics_id[1], rom->revno[0] >> 4, rom->revno[0] & 0x0f); DPRINTK((" supports %d monitors\n", rom->num_mons)); DPRINTK((" font start %08x\n", rom->font_start)); DPRINTK((" region list %08x\n", rom->region_list)); DPRINTK((" init_graph %08x\n", rom->init_graph)); DPRINTK((" bus support %02x\n", rom->bus_support)); DPRINTK((" ext bus support %02x\n", rom->ext_bus_support)); DPRINTK((" alternate code type %d\n", rom->alt_code_type)); } static int __devinit sti_cook_fonts(struct sti_cooked_rom *cooked_rom, struct sti_rom *raw_rom) { struct sti_rom_font *raw_font, *font_start; struct sti_cooked_font *cooked_font; cooked_font = kzalloc(sizeof(*cooked_font), GFP_KERNEL); if (!cooked_font) return 0; cooked_rom->font_start = cooked_font; raw_font = ((void *)raw_rom) + (raw_rom->font_start); font_start = raw_font; cooked_font->raw = raw_font; while (raw_font->next_font) { raw_font = ((void *)font_start) + (raw_font->next_font); cooked_font->next_font = kzalloc(sizeof(*cooked_font), GFP_KERNEL); if (!cooked_font->next_font) return 1; cooked_font = cooked_font->next_font; cooked_font->raw = raw_font; } cooked_font->next_font = NULL; return 1; } static int __devinit sti_search_font(struct sti_cooked_rom *rom, int height, int width) { struct sti_cooked_font *font; int i = 0; for (font = rom->font_start; font; font = font->next_font, i++) { if ((font->raw->width == width) && (font->raw->height == height)) return i; } return 0; } #define BMODE_RELOCATE(offset) offset = (offset) / 4; #define BMODE_LAST_ADDR_OFFS 0x50 static void * __devinit sti_bmode_font_raw(struct sti_cooked_font *f) { unsigned char *n, *p, *q; int size = f->raw->bytes_per_char*256+sizeof(struct sti_rom_font); n = kzalloc (4*size, GFP_KERNEL); if (!n) return NULL; p = n + 3; q = (unsigned char *)f->raw; while (size--) { *p = *q++; p+=4; } return n + 3; } static void __devinit sti_bmode_rom_copy(unsigned long base, unsigned long count, void *dest) { unsigned long dest_start = (unsigned long) dest; while (count) { count--; *(u8 *)dest = gsc_readl(base); base += 4; dest++; } sti_flush(dest_start, (unsigned long)dest); } static struct sti_rom * __devinit sti_get_bmode_rom (unsigned long address) { struct sti_rom *raw; u32 size; struct sti_rom_font *raw_font, *font_start; sti_bmode_rom_copy(address + BMODE_LAST_ADDR_OFFS, sizeof(size), &size); size = (size+3) / 4; raw = kmalloc(size, GFP_KERNEL); if (raw) { sti_bmode_rom_copy(address, size, raw); memmove (&raw->res004, &raw->type[0], 0x3c); raw->type[3] = raw->res004; BMODE_RELOCATE (raw->region_list); BMODE_RELOCATE (raw->font_start); BMODE_RELOCATE (raw->init_graph); BMODE_RELOCATE (raw->state_mgmt); BMODE_RELOCATE (raw->font_unpmv); BMODE_RELOCATE (raw->block_move); BMODE_RELOCATE (raw->inq_conf); raw_font = ((void *)raw) + raw->font_start; font_start = raw_font; while (raw_font->next_font) { BMODE_RELOCATE (raw_font->next_font); raw_font = ((void *)font_start) + raw_font->next_font; } } return raw; } static struct sti_rom __devinit *sti_get_wmode_rom(unsigned long address) { struct sti_rom *raw; unsigned long size; /* read the ROM size directly from the struct in ROM */ size = gsc_readl(address + offsetof(struct sti_rom,last_addr)); raw = kmalloc(size, GFP_KERNEL); if (raw) sti_rom_copy(address, size, raw); return raw; } static int __devinit sti_read_rom(int wordmode, struct sti_struct *sti, unsigned long address) { struct sti_cooked_rom *cooked; struct sti_rom *raw = NULL; unsigned long revno; cooked = kmalloc(sizeof *cooked, GFP_KERNEL); if (!cooked) goto out_err; if (wordmode) raw = sti_get_wmode_rom (address); else raw = sti_get_bmode_rom (address); if (!raw) goto out_err; if (!sti_cook_fonts(cooked, raw)) { printk(KERN_ERR "No font found for STI at %08lx\n", address); goto out_err; } if (raw->region_list) memcpy(sti->regions, ((void *)raw)+raw->region_list, sizeof(sti->regions)); address = (unsigned long) STI_PTR(raw); sti->font_unpmv = address + (raw->font_unpmv & 0x03ffffff); sti->block_move = address + (raw->block_move & 0x03ffffff); sti->init_graph = address + (raw->init_graph & 0x03ffffff); sti->inq_conf = address + (raw->inq_conf & 0x03ffffff); sti->rom = cooked; sti->rom->raw = raw; sti->font = sti_select_font(sti->rom, sti_search_font); sti->font_width = sti->font->raw->width; sti->font_height = sti->font->raw->height; if (!wordmode) sti->font->raw = sti_bmode_font_raw(sti->font); sti->sti_mem_request = raw->sti_mem_req; sti->graphics_id[0] = raw->graphics_id[0]; sti->graphics_id[1] = raw->graphics_id[1]; sti_dump_rom(raw); /* check if the ROM routines in this card are compatible */ if (wordmode || sti->graphics_id[1] != 0x09A02587) goto ok; revno = (raw->revno[0] << 8) | raw->revno[1]; switch (sti->graphics_id[0]) { case S9000_ID_HCRX: /* HyperA or HyperB ? */ if (revno == 0x8408 || revno == 0x840b) goto msg_not_supported; break; case CRT_ID_THUNDER: if (revno == 0x8509) goto msg_not_supported; break; case CRT_ID_THUNDER2: if (revno == 0x850c) goto msg_not_supported; } ok: return 1; msg_not_supported: printk(KERN_ERR "Sorry, this GSC/STI card is not yet supported.\n"); printk(KERN_ERR "Please see http://parisc-linux.org/faq/" "graphics-howto.html for more info.\n"); /* fall through */ out_err: kfree(raw); kfree(cooked); return 0; } static struct sti_struct * __devinit sti_try_rom_generic(unsigned long address, unsigned long hpa, struct pci_dev *pd) { struct sti_struct *sti; int ok; u32 sig; if (num_sti_roms >= MAX_STI_ROMS) { printk(KERN_WARNING "maximum number of STI ROMS reached !\n"); return NULL; } sti = kzalloc(sizeof(*sti), GFP_KERNEL); if (!sti) { printk(KERN_ERR "Not enough memory !\n"); return NULL; } spin_lock_init(&sti->lock); test_rom: /* if we can't read the ROM, bail out early. Not being able * to read the hpa is okay, for romless sti */ if (pdc_add_valid(address)) goto out_err; sig = gsc_readl(address); /* check for a PCI ROM structure */ if ((le32_to_cpu(sig)==0xaa55)) { unsigned int i, rm_offset; u32 *rm; i = gsc_readl(address+0x04); if (i != 1) { /* The ROM could have multiple architecture * dependent images (e.g. i386, parisc,...) */ printk(KERN_WARNING "PCI ROM is not a STI ROM type image (0x%8x)\n", i); goto out_err; } sti->pd = pd; i = gsc_readl(address+0x0c); DPRINTK(("PCI ROM size (from header) = %d kB\n", le16_to_cpu(i>>16)*512/1024)); rm_offset = le16_to_cpu(i & 0xffff); if (rm_offset) { /* read 16 bytes from the pci region mapper array */ rm = (u32*) &sti->rm_entry; *rm++ = gsc_readl(address+rm_offset+0x00); *rm++ = gsc_readl(address+rm_offset+0x04); *rm++ = gsc_readl(address+rm_offset+0x08); *rm++ = gsc_readl(address+rm_offset+0x0c); DPRINTK(("PCI region Mapper offset = %08x: ", rm_offset)); for (i=0; i<16; i++) DPRINTK(("%02x ", sti->rm_entry[i])); DPRINTK(("\n")); } address += le32_to_cpu(gsc_readl(address+8)); DPRINTK(("sig %04x, PCI STI ROM at %08lx\n", sig, address)); goto test_rom; } ok = 0; if ((sig & 0xff) == 0x01) { DPRINTK((" byte mode ROM at %08lx, hpa at %08lx\n", address, hpa)); ok = sti_read_rom(0, sti, address); } if ((sig & 0xffff) == 0x0303) { DPRINTK((" word mode ROM at %08lx, hpa at %08lx\n", address, hpa)); ok = sti_read_rom(1, sti, address); } if (!ok) goto out_err; if (sti_init_glob_cfg(sti, address, hpa)) goto out_err; /* not enough memory */ /* disable STI PCI ROM. ROM and card RAM overlap and * leaving it enabled would force HPMCs */ if (sti->pd) { unsigned long rom_base; rom_base = pci_resource_start(sti->pd, PCI_ROM_RESOURCE); pci_write_config_dword(sti->pd, PCI_ROM_ADDRESS, rom_base & ~PCI_ROM_ADDRESS_ENABLE); DPRINTK((KERN_DEBUG "STI PCI ROM disabled\n")); } if (sti_init_graph(sti)) goto out_err; sti_inq_conf(sti); sti_dump_globcfg(sti->glob_cfg, sti->sti_mem_request); sti_dump_outptr(sti); printk(KERN_INFO " graphics card name: %s\n", sti->outptr.dev_name ); sti_roms[num_sti_roms] = sti; num_sti_roms++; return sti; out_err: kfree(sti); return NULL; } static void __devinit sticore_check_for_default_sti(struct sti_struct *sti, char *path) { if (strcmp (path, default_sti_path) == 0) default_sti = sti; } /* * on newer systems PDC gives the address of the ROM * in the additional address field addr[1] while on * older Systems the PDC stores it in page0->proc_sti */ static int __devinit sticore_pa_init(struct parisc_device *dev) { char pa_path[21]; struct sti_struct *sti = NULL; int hpa = dev->hpa.start; if (dev->num_addrs && dev->addr[0]) sti = sti_try_rom_generic(dev->addr[0], hpa, NULL); if (!sti) sti = sti_try_rom_generic(hpa, hpa, NULL); if (!sti) sti = sti_try_rom_generic(PAGE0->proc_sti, hpa, NULL); if (!sti) return 1; print_pa_hwpath(dev, pa_path); sticore_check_for_default_sti(sti, pa_path); return 0; } static int __devinit sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent) { #ifdef CONFIG_PCI unsigned long fb_base, rom_base; unsigned int fb_len, rom_len; int err; struct sti_struct *sti; err = pci_enable_device(pd); if (err < 0) { dev_err(&pd->dev, "Cannot enable PCI device\n"); return err; } fb_base = pci_resource_start(pd, 0); fb_len = pci_resource_len(pd, 0); rom_base = pci_resource_start(pd, PCI_ROM_RESOURCE); rom_len = pci_resource_len(pd, PCI_ROM_RESOURCE); if (rom_base) { pci_write_config_dword(pd, PCI_ROM_ADDRESS, rom_base | PCI_ROM_ADDRESS_ENABLE); DPRINTK((KERN_DEBUG "STI PCI ROM enabled at 0x%08lx\n", rom_base)); } printk(KERN_INFO "STI PCI graphic ROM found at %08lx (%u kB), fb at %08lx (%u MB)\n", rom_base, rom_len/1024, fb_base, fb_len/1024/1024); DPRINTK((KERN_DEBUG "Trying PCI STI ROM at %08lx, PCI hpa at %08lx\n", rom_base, fb_base)); sti = sti_try_rom_generic(rom_base, fb_base, pd); if (sti) { char pa_path[30]; print_pci_hwpath(pd, pa_path); sticore_check_for_default_sti(sti, pa_path); } if (!sti) { printk(KERN_WARNING "Unable to handle STI device '%s'\n", pci_name(pd)); return -ENODEV; } #endif /* CONFIG_PCI */ return 0; } static void __devexit sticore_pci_remove(struct pci_dev *pd) { BUG(); } static struct pci_device_id sti_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_VISUALIZE_EG) }, { PCI_DEVICE(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_VISUALIZE_FX6) }, { PCI_DEVICE(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_VISUALIZE_FX4) }, { PCI_DEVICE(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_VISUALIZE_FX2) }, { PCI_DEVICE(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_VISUALIZE_FXE) }, { 0, } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, sti_pci_tbl); static struct pci_driver pci_sti_driver = { .name = "sti", .id_table = sti_pci_tbl, .probe = sticore_pci_init, .remove = sticore_pci_remove, }; static struct parisc_device_id sti_pa_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00077 }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00085 }, { 0, } }; static struct parisc_driver pa_sti_driver = { .name = "sti", .id_table = sti_pa_tbl, .probe = sticore_pa_init, }; /* * sti_init_roms() - detects all STI ROMs and stores them in sti_roms[] */ static int sticore_initialized __read_mostly; static void __devinit sti_init_roms(void) { if (sticore_initialized) return; sticore_initialized = 1; printk(KERN_INFO "STI GSC/PCI core graphics driver " STI_DRIVERVERSION "\n"); /* Register drivers for native & PCI cards */ register_parisc_driver(&pa_sti_driver); WARN_ON(pci_register_driver(&pci_sti_driver)); /* if we didn't find the given default sti, take the first one */ if (!default_sti) default_sti = sti_roms[0]; } /* * index = 0 gives default sti * index > 0 gives other stis in detection order */ struct sti_struct * sti_get_rom(unsigned int index) { if (!sticore_initialized) sti_init_roms(); if (index == 0) return default_sti; if (index > num_sti_roms) return NULL; return sti_roms[index-1]; } EXPORT_SYMBOL(sti_get_rom); MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer"); MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines"); MODULE_LICENSE("GPL v2");
gpl-2.0
TrustZoneGenericDriver/linux
drivers/ide/ide-cs.c
5202
12339
/*====================================================================== A driver for PCMCIA IDE/ATA disk cards The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/ide.h> #include <linux/major.h> #include <linux/delay.h> #include <asm/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> #define DRV_NAME "ide-cs" /*====================================================================*/ /* Module parameters */ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("PCMCIA ATA/IDE card driver"); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/ typedef struct ide_info_t { struct pcmcia_device *p_dev; struct ide_host *host; int ndev; } ide_info_t; static void ide_release(struct pcmcia_device *); static int ide_config(struct pcmcia_device *); static void ide_detach(struct pcmcia_device *p_dev); static int ide_probe(struct pcmcia_device *link) { ide_info_t *info; dev_dbg(&link->dev, "ide_attach()\n"); /* Create new ide device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->p_dev = link; link->priv = info; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO | CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; return ide_config(link); } /* ide_attach */ static void ide_detach(struct pcmcia_device *link) { ide_info_t *info = link->priv; dev_dbg(&link->dev, "ide_detach(0x%p)\n", link); ide_release(link); kfree(info); } /* ide_detach */ static const struct ide_port_ops idecs_port_ops = { .quirkproc = ide_undecoded_slave, }; static const struct ide_port_info idecs_port_info = { .port_ops = &idecs_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, .chipset = ide_pci, }; static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle) { struct ide_host *host; ide_hwif_t *hwif; int i, rc; struct ide_hw hw, *hws[] = { &hw }; if (!request_region(io, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", DRV_NAME, io, io + 7); return NULL; } if (!request_region(ctl, 1, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n", DRV_NAME, ctl); release_region(io, 8); return NULL; } memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, io, ctl); hw.irq = irq; hw.dev = &handle->dev; rc = ide_host_add(&idecs_port_info, hws, 1, &host); if (rc) goto out_release; hwif = host->ports[0]; if (hwif->present) return host; /* retry registration in case device is still spinning up */ for (i = 0; i < 10; i++) { msleep(100); ide_port_scan(hwif); if (hwif->present) return host; } return host; out_release: release_region(ctl, 1); release_region(io, 8); return NULL; } static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data) { int *is_kme = priv_data; if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH) != IO_DATA_PATH_WIDTH_8) { pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; } pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; if (pdev->resource[1]->end) { pdev->resource[0]->end = 8; pdev->resource[1]->end = (*is_kme) ? 2 : 1; } else { if (pdev->resource[0]->end < 16) return -ENODEV; } return pcmcia_request_io(pdev); } static int ide_config(struct pcmcia_device *link) { ide_info_t *info = link->priv; int ret = 0, is_kme = 0; unsigned long io_base, ctl_base; struct ide_host *host; dev_dbg(&link->dev, "ide_config(0x%p)\n", link); is_kme = ((link->manf_id == MANFID_KME) && ((link->card_id == PRODID_KME_KXLC005_A) || (link->card_id == PRODID_KME_KXLC005_B))); if (pcmcia_loop_config(link, pcmcia_check_one_config, &is_kme)) { link->config_flags &= ~CONF_AUTO_CHECK_VCC; if (pcmcia_loop_config(link, pcmcia_check_one_config, &is_kme)) goto failed; /* No suitable config found */ } io_base = link->resource[0]->start; if (link->resource[1]->end) ctl_base = link->resource[1]->start; else ctl_base = link->resource[0]->start + 0x0e; if (!link->irq) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; /* disable drive interrupts during IDE probe */ outb(0x02, ctl_base); /* special setup for KXLC005 card */ if (is_kme) outb(0x81, ctl_base+1); host = idecs_register(io_base, ctl_base, link->irq, link); if (host == NULL && resource_size(link->resource[0]) == 0x20) { outb(0x02, ctl_base + 0x10); host = idecs_register(io_base + 0x10, ctl_base + 0x10, link->irq, link); } if (host == NULL) goto failed; info->ndev = 1; info->host = host; dev_info(&link->dev, "ide-cs: hd%c: Vpp = %d.%d\n", 'a' + host->ports[0]->index * 2, link->vpp / 10, link->vpp % 10); return 0; failed: ide_release(link); return -ENODEV; } /* ide_config */ static void ide_release(struct pcmcia_device *link) { ide_info_t *info = link->priv; struct ide_host *host = info->host; dev_dbg(&link->dev, "ide_release(0x%p)\n", link); if (info->ndev) { ide_hwif_t *hwif = host->ports[0]; unsigned long data_addr, ctl_addr; data_addr = hwif->io_ports.data_addr; ctl_addr = hwif->io_ports.ctl_addr; ide_host_remove(host); info->ndev = 0; release_region(ctl_addr, 1); release_region(data_addr, 8); } pcmcia_disable_device(link); } /* ide_release */ static const struct pcmcia_device_id ide_ids[] = { PCMCIA_DEVICE_FUNC_ID(4), PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */ PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */ PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */ PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904), PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */ PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000), /* Kingston */ PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), /* TI emulated */ PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */ PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */ PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0), PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74), PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf), PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591), PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee), PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c), PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1), PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883), PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d), PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, ide_ids); static struct pcmcia_driver ide_cs_driver = { .owner = THIS_MODULE, .name = "ide-cs", .probe = ide_probe, .remove = ide_detach, .id_table = ide_ids, }; static int __init init_ide_cs(void) { return pcmcia_register_driver(&ide_cs_driver); } static void __exit exit_ide_cs(void) { pcmcia_unregister_driver(&ide_cs_driver); } late_initcall(init_ide_cs); module_exit(exit_ide_cs);
gpl-2.0
vdsirotkin/vds_kernel_cm10.1
arch/sh/kernel/cpu/sh2a/setup-sh7201.c
7506
11678
/* * SH7201 setup * * Copyright (C) 2008 Peter Griffin pgriffin@mpc-data.co.uk * Copyright (C) 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/sh_timer.h> #include <linux/io.h> enum { UNUSED = 0, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, ADC_ADI, MTU20_ABCD, MTU20_VEF, MTU21_AB, MTU21_VU, MTU22_AB, MTU22_VU, MTU23_ABCD, MTU24_ABCD, MTU25_UVW, MTU2_TCI3V, MTU2_TCI4V, RTC, WDT, IIC30, IIC31, IIC32, DMAC0_DMINT0, DMAC1_DMINT1, DMAC2_DMINT2, DMAC3_DMINT3, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7, DMAC0_DMINTA, DMAC4_DMINT4, DMAC5_DMINT5, DMAC6_DMINT6, DMAC7_DMINT7, RCAN0, RCAN1, SSI0_SSII, SSI1_SSII, TMR0, TMR1, /* interrupt groups */ PINT, }; static struct intc_vect vectors[] __initdata = { INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), INTC_IRQ(ADC_ADI, 92), INTC_IRQ(MTU20_ABCD, 108), INTC_IRQ(MTU20_ABCD, 109), INTC_IRQ(MTU20_ABCD, 110), INTC_IRQ(MTU20_ABCD, 111), INTC_IRQ(MTU20_VEF, 112), INTC_IRQ(MTU20_VEF, 113), INTC_IRQ(MTU20_VEF, 114), INTC_IRQ(MTU21_AB, 116), INTC_IRQ(MTU21_AB, 117), INTC_IRQ(MTU21_VU, 120), INTC_IRQ(MTU21_VU, 121), INTC_IRQ(MTU22_AB, 124), INTC_IRQ(MTU22_AB, 125), INTC_IRQ(MTU22_VU, 128), INTC_IRQ(MTU22_VU, 129), INTC_IRQ(MTU23_ABCD, 132), INTC_IRQ(MTU23_ABCD, 133), INTC_IRQ(MTU23_ABCD, 134), INTC_IRQ(MTU23_ABCD, 135), INTC_IRQ(MTU2_TCI3V, 136), INTC_IRQ(MTU24_ABCD, 140), INTC_IRQ(MTU24_ABCD, 141), INTC_IRQ(MTU24_ABCD, 142), INTC_IRQ(MTU24_ABCD, 143), INTC_IRQ(MTU2_TCI4V, 144), INTC_IRQ(MTU25_UVW, 148), INTC_IRQ(MTU25_UVW, 149), INTC_IRQ(MTU25_UVW, 150), INTC_IRQ(RTC, 152), INTC_IRQ(RTC, 153), INTC_IRQ(RTC, 154), INTC_IRQ(WDT, 156), INTC_IRQ(IIC30, 157), INTC_IRQ(IIC30, 158), INTC_IRQ(IIC30, 159), INTC_IRQ(IIC30, 160), INTC_IRQ(IIC30, 161), INTC_IRQ(IIC31, 164), INTC_IRQ(IIC31, 165), INTC_IRQ(IIC31, 166), INTC_IRQ(IIC31, 167), INTC_IRQ(IIC31, 168), INTC_IRQ(IIC32, 170), INTC_IRQ(IIC32, 171), INTC_IRQ(IIC32, 172), INTC_IRQ(IIC32, 173), INTC_IRQ(IIC32, 174), INTC_IRQ(DMAC0_DMINT0, 176), INTC_IRQ(DMAC1_DMINT1, 177), INTC_IRQ(DMAC2_DMINT2, 178), INTC_IRQ(DMAC3_DMINT3, 179), INTC_IRQ(SCIF0, 180), INTC_IRQ(SCIF0, 181), INTC_IRQ(SCIF0, 182), INTC_IRQ(SCIF0, 183), INTC_IRQ(SCIF1, 184), INTC_IRQ(SCIF1, 185), INTC_IRQ(SCIF1, 186), INTC_IRQ(SCIF1, 187), INTC_IRQ(SCIF2, 188), INTC_IRQ(SCIF2, 189), INTC_IRQ(SCIF2, 190), INTC_IRQ(SCIF2, 191), INTC_IRQ(SCIF3, 192), INTC_IRQ(SCIF3, 193), INTC_IRQ(SCIF3, 194), INTC_IRQ(SCIF3, 195), INTC_IRQ(SCIF4, 196), INTC_IRQ(SCIF4, 197), INTC_IRQ(SCIF4, 198), INTC_IRQ(SCIF4, 199), INTC_IRQ(SCIF5, 200), INTC_IRQ(SCIF5, 201), INTC_IRQ(SCIF5, 202), INTC_IRQ(SCIF5, 203), INTC_IRQ(SCIF6, 204), INTC_IRQ(SCIF6, 205), INTC_IRQ(SCIF6, 206), INTC_IRQ(SCIF6, 207), INTC_IRQ(SCIF7, 208), INTC_IRQ(SCIF7, 209), INTC_IRQ(SCIF7, 210), INTC_IRQ(SCIF7, 211), INTC_IRQ(DMAC0_DMINTA, 212), INTC_IRQ(DMAC4_DMINT4, 216), INTC_IRQ(DMAC5_DMINT5, 217), INTC_IRQ(DMAC6_DMINT6, 218), INTC_IRQ(DMAC7_DMINT7, 219), INTC_IRQ(RCAN0, 228), INTC_IRQ(RCAN0, 229), INTC_IRQ(RCAN0, 230), INTC_IRQ(RCAN0, 231), INTC_IRQ(RCAN0, 232), INTC_IRQ(RCAN1, 234), INTC_IRQ(RCAN1, 235), INTC_IRQ(RCAN1, 236), INTC_IRQ(RCAN1, 237), INTC_IRQ(RCAN1, 238), INTC_IRQ(SSI0_SSII, 244), INTC_IRQ(SSI1_SSII, 245), INTC_IRQ(TMR0, 246), INTC_IRQ(TMR0, 247), INTC_IRQ(TMR0, 248), INTC_IRQ(TMR1, 252), INTC_IRQ(TMR1, 253), INTC_IRQ(TMR1, 254), }; static struct intc_group groups[] __initdata = { INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7), }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xfffe9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, { 0xfffe941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, { 0xfffe9420, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI, 0 } }, { 0xfffe9800, 0, 16, 4, /* IPR06 */ { 0, MTU20_ABCD, MTU20_VEF, MTU21_AB } }, { 0xfffe9802, 0, 16, 4, /* IPR07 */ { MTU21_VU, MTU22_AB, MTU22_VU, MTU23_ABCD } }, { 0xfffe9804, 0, 16, 4, /* IPR08 */ { MTU2_TCI3V, MTU24_ABCD, MTU2_TCI4V, MTU25_UVW } }, { 0xfffe9806, 0, 16, 4, /* IPR09 */ { RTC, WDT, IIC30, 0 } }, { 0xfffe9808, 0, 16, 4, /* IPR10 */ { IIC31, IIC32, DMAC0_DMINT0, DMAC1_DMINT1 } }, { 0xfffe980a, 0, 16, 4, /* IPR11 */ { DMAC2_DMINT2, DMAC3_DMINT3, SCIF0, SCIF1 } }, { 0xfffe980c, 0, 16, 4, /* IPR12 */ { SCIF2, SCIF3, SCIF4, SCIF5 } }, { 0xfffe980e, 0, 16, 4, /* IPR13 */ { SCIF6, SCIF7, DMAC0_DMINTA, DMAC4_DMINT4 } }, { 0xfffe9810, 0, 16, 4, /* IPR14 */ { DMAC5_DMINT5, DMAC6_DMINT6, DMAC7_DMINT7, 0 } }, { 0xfffe9812, 0, 16, 4, /* IPR15 */ { 0, RCAN0, RCAN1, 0 } }, { 0xfffe9814, 0, 16, 4, /* IPR16 */ { SSI0_SSII, SSI1_SSII, TMR0, TMR1 } }, }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xfffe9408, 0, 16, /* PINTER */ { 0, 0, 0, 0, 0, 0, 0, 0, PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, }; static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups, mask_registers, prio_registers, NULL); static struct plat_sci_port scif0_platform_data = { .mapbase = 0xfffe8000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 180, 180, 180, 180 } }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xfffe8800, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 184, 184, 184, 184 } }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xfffe9000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 188, 188, 188, 188 } }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xfffe9800, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 192, 192, 192, 192 } }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct plat_sci_port scif4_platform_data = { .mapbase = 0xfffea000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 196, 196, 196, 196 } }; static struct platform_device scif4_device = { .name = "sh-sci", .id = 4, .dev = { .platform_data = &scif4_platform_data, }, }; static struct plat_sci_port scif5_platform_data = { .mapbase = 0xfffea800, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 200, 200, 200, 200 } }; static struct platform_device scif5_device = { .name = "sh-sci", .id = 5, .dev = { .platform_data = &scif5_platform_data, }, }; static struct plat_sci_port scif6_platform_data = { .mapbase = 0xfffeb000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 204, 204, 204, 204 } }; static struct platform_device scif6_device = { .name = "sh-sci", .id = 6, .dev = { .platform_data = &scif6_platform_data, }, }; static struct plat_sci_port scif7_platform_data = { .mapbase = 0xfffeb800, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 208, 208, 208, 208 } }; static struct platform_device scif7_device = { .name = "sh-sci", .id = 7, .dev = { .platform_data = &scif7_platform_data, }, }; static struct resource rtc_resources[] = { [0] = { .start = 0xffff0800, .end = 0xffff2000 + 0x58 - 1, .flags = IORESOURCE_IO, }, [1] = { /* Shared Period/Carry/Alarm IRQ */ .start = 152, .flags = IORESOURCE_IRQ, }, }; static struct platform_device rtc_device = { .name = "sh-rtc", .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, }; static struct sh_timer_config mtu2_0_platform_data = { .channel_offset = -0x80, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource mtu2_0_resources[] = { [0] = { .start = 0xfffe4300, .end = 0xfffe4326, .flags = IORESOURCE_MEM, }, [1] = { .start = 108, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_0_device = { .name = "sh_mtu2", .id = 0, .dev = { .platform_data = &mtu2_0_platform_data, }, .resource = mtu2_0_resources, .num_resources = ARRAY_SIZE(mtu2_0_resources), }; static struct sh_timer_config mtu2_1_platform_data = { .channel_offset = -0x100, .timer_bit = 1, .clockevent_rating = 200, }; static struct resource mtu2_1_resources[] = { [0] = { .start = 0xfffe4380, .end = 0xfffe4390, .flags = IORESOURCE_MEM, }, [1] = { .start = 116, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_1_device = { .name = "sh_mtu2", .id = 1, .dev = { .platform_data = &mtu2_1_platform_data, }, .resource = mtu2_1_resources, .num_resources = ARRAY_SIZE(mtu2_1_resources), }; static struct sh_timer_config mtu2_2_platform_data = { .channel_offset = 0x80, .timer_bit = 2, .clockevent_rating = 200, }; static struct resource mtu2_2_resources[] = { [0] = { .start = 0xfffe4000, .end = 0xfffe400a, .flags = IORESOURCE_MEM, }, [1] = { .start = 124, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_2_device = { .name = "sh_mtu2", .id = 2, .dev = { .platform_data = &mtu2_2_platform_data, }, .resource = mtu2_2_resources, .num_resources = ARRAY_SIZE(mtu2_2_resources), }; static struct platform_device *sh7201_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &scif7_device, &rtc_device, &mtu2_0_device, &mtu2_1_device, &mtu2_2_device, }; static int __init sh7201_devices_setup(void) { return platform_add_devices(sh7201_devices, ARRAY_SIZE(sh7201_devices)); } arch_initcall(sh7201_devices_setup); void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } static struct platform_device *sh7201_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &scif7_device, &mtu2_0_device, &mtu2_1_device, &mtu2_2_device, }; #define STBCR3 0xfffe0408 void __init plat_early_device_setup(void) { /* enable MTU2 clock */ __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3); early_platform_add_devices(sh7201_early_devices, ARRAY_SIZE(sh7201_early_devices)); }
gpl-2.0
FrancescoCG/CrazySuperKernel-CM14.1-KLTE
arch/sh/kernel/cpu/sh2a/setup-sh7206.c
7506
10228
/* * SH7206 Setup * * Copyright (C) 2006 Yoshinori Sato * Copyright (C) 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/sh_timer.h> #include <linux/io.h> enum { UNUSED = 0, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, ADC_ADI0, ADC_ADI1, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7, MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU, MTU3_ABCD, MTU4_ABCD, MTU5, POE2_12, MTU3S_ABCD, MTU4S_ABCD, MTU5S, IIC3, CMT0, CMT1, BSC, WDT, MTU2_TCI3V, MTU2_TCI4V, MTU2S_TCI3V, MTU2S_TCI4V, POE2_OEI3, SCIF0, SCIF1, SCIF2, SCIF3, /* interrupt groups */ PINT, }; static struct intc_vect vectors[] __initdata = { INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), INTC_IRQ(ADC_ADI0, 92), INTC_IRQ(ADC_ADI1, 96), INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109), INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113), INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117), INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121), INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125), INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129), INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133), INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137), INTC_IRQ(CMT0, 140), INTC_IRQ(CMT1, 144), INTC_IRQ(BSC, 148), INTC_IRQ(WDT, 152), INTC_IRQ(MTU0_ABCD, 156), INTC_IRQ(MTU0_ABCD, 157), INTC_IRQ(MTU0_ABCD, 158), INTC_IRQ(MTU0_ABCD, 159), INTC_IRQ(MTU0_VEF, 160), INTC_IRQ(MTU0_VEF, 161), INTC_IRQ(MTU0_VEF, 162), INTC_IRQ(MTU1_AB, 164), INTC_IRQ(MTU1_AB, 165), INTC_IRQ(MTU1_VU, 168), INTC_IRQ(MTU1_VU, 169), INTC_IRQ(MTU2_AB, 172), INTC_IRQ(MTU2_AB, 173), INTC_IRQ(MTU2_VU, 176), INTC_IRQ(MTU2_VU, 177), INTC_IRQ(MTU3_ABCD, 180), INTC_IRQ(MTU3_ABCD, 181), INTC_IRQ(MTU3_ABCD, 182), INTC_IRQ(MTU3_ABCD, 183), INTC_IRQ(MTU2_TCI3V, 184), INTC_IRQ(MTU4_ABCD, 188), INTC_IRQ(MTU4_ABCD, 189), INTC_IRQ(MTU4_ABCD, 190), INTC_IRQ(MTU4_ABCD, 191), INTC_IRQ(MTU2_TCI4V, 192), INTC_IRQ(MTU5, 196), INTC_IRQ(MTU5, 197), INTC_IRQ(MTU5, 198), INTC_IRQ(POE2_12, 200), INTC_IRQ(POE2_12, 201), INTC_IRQ(MTU3S_ABCD, 204), INTC_IRQ(MTU3S_ABCD, 205), INTC_IRQ(MTU3S_ABCD, 206), INTC_IRQ(MTU3S_ABCD, 207), INTC_IRQ(MTU2S_TCI3V, 208), INTC_IRQ(MTU4S_ABCD, 212), INTC_IRQ(MTU4S_ABCD, 213), INTC_IRQ(MTU4S_ABCD, 214), INTC_IRQ(MTU4S_ABCD, 215), INTC_IRQ(MTU2S_TCI4V, 216), INTC_IRQ(MTU5S, 220), INTC_IRQ(MTU5S, 221), INTC_IRQ(MTU5S, 222), INTC_IRQ(POE2_OEI3, 224), INTC_IRQ(IIC3, 228), INTC_IRQ(IIC3, 229), INTC_IRQ(IIC3, 230), INTC_IRQ(IIC3, 231), INTC_IRQ(IIC3, 232), INTC_IRQ(SCIF0, 240), INTC_IRQ(SCIF0, 241), INTC_IRQ(SCIF0, 242), INTC_IRQ(SCIF0, 243), INTC_IRQ(SCIF1, 244), INTC_IRQ(SCIF1, 245), INTC_IRQ(SCIF1, 246), INTC_IRQ(SCIF1, 247), INTC_IRQ(SCIF2, 248), INTC_IRQ(SCIF2, 249), INTC_IRQ(SCIF2, 250), INTC_IRQ(SCIF2, 251), INTC_IRQ(SCIF3, 252), INTC_IRQ(SCIF3, 253), INTC_IRQ(SCIF3, 254), INTC_IRQ(SCIF3, 255), }; static struct intc_group groups[] __initdata = { INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7), }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI0, ADC_ADI1 } }, { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } }, { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } }, { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { CMT0, CMT1, BSC, WDT } }, { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU } }, { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU2_AB, MTU2_VU, MTU3_ABCD, MTU2_TCI3V } }, { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU4_ABCD, MTU2_TCI4V, MTU5, POE2_12 } }, { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { MTU3S_ABCD, MTU2S_TCI3V, MTU4S_ABCD, MTU2S_TCI4V } }, { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU5S, POE2_OEI3, IIC3, 0 } }, { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF0, SCIF1, SCIF2, SCIF3 } }, }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xfffe0808, 0, 16, /* PINTER */ { 0, 0, 0, 0, 0, 0, 0, 0, PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, }; static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups, mask_registers, prio_registers, NULL); static struct plat_sci_port scif0_platform_data = { .mapbase = 0xfffe8000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 240, 240, 240, 240 }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xfffe8800, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 244, 244, 244, 244 }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xfffe9000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 248, 248, 248, 248 }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xfffe9800, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 252, 252, 252, 252 }, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct sh_timer_config cmt0_platform_data = { .channel_offset = 0x02, .timer_bit = 0, .clockevent_rating = 125, .clocksource_rating = 0, /* disabled due to code generation issues */ }; static struct resource cmt0_resources[] = { [0] = { .start = 0xfffec002, .end = 0xfffec007, .flags = IORESOURCE_MEM, }, [1] = { .start = 140, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt0_device = { .name = "sh_cmt", .id = 0, .dev = { .platform_data = &cmt0_platform_data, }, .resource = cmt0_resources, .num_resources = ARRAY_SIZE(cmt0_resources), }; static struct sh_timer_config cmt1_platform_data = { .channel_offset = 0x08, .timer_bit = 1, .clockevent_rating = 125, .clocksource_rating = 0, /* disabled due to code generation issues */ }; static struct resource cmt1_resources[] = { [0] = { .start = 0xfffec008, .end = 0xfffec00d, .flags = IORESOURCE_MEM, }, [1] = { .start = 144, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt1_device = { .name = "sh_cmt", .id = 1, .dev = { .platform_data = &cmt1_platform_data, }, .resource = cmt1_resources, .num_resources = ARRAY_SIZE(cmt1_resources), }; static struct sh_timer_config mtu2_0_platform_data = { .channel_offset = -0x80, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource mtu2_0_resources[] = { [0] = { .start = 0xfffe4300, .end = 0xfffe4326, .flags = IORESOURCE_MEM, }, [1] = { .start = 156, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_0_device = { .name = "sh_mtu2", .id = 0, .dev = { .platform_data = &mtu2_0_platform_data, }, .resource = mtu2_0_resources, .num_resources = ARRAY_SIZE(mtu2_0_resources), }; static struct sh_timer_config mtu2_1_platform_data = { .channel_offset = -0x100, .timer_bit = 1, .clockevent_rating = 200, }; static struct resource mtu2_1_resources[] = { [0] = { .start = 0xfffe4380, .end = 0xfffe4390, .flags = IORESOURCE_MEM, }, [1] = { .start = 164, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_1_device = { .name = "sh_mtu2", .id = 1, .dev = { .platform_data = &mtu2_1_platform_data, }, .resource = mtu2_1_resources, .num_resources = ARRAY_SIZE(mtu2_1_resources), }; static struct sh_timer_config mtu2_2_platform_data = { .channel_offset = 0x80, .timer_bit = 2, .clockevent_rating = 200, }; static struct resource mtu2_2_resources[] = { [0] = { .start = 0xfffe4000, .end = 0xfffe400a, .flags = IORESOURCE_MEM, }, [1] = { .start = 180, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_2_device = { .name = "sh_mtu2", .id = 2, .dev = { .platform_data = &mtu2_2_platform_data, }, .resource = mtu2_2_resources, .num_resources = ARRAY_SIZE(mtu2_2_resources), }; static struct platform_device *sh7206_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &cmt0_device, &cmt1_device, &mtu2_0_device, &mtu2_1_device, &mtu2_2_device, }; static int __init sh7206_devices_setup(void) { return platform_add_devices(sh7206_devices, ARRAY_SIZE(sh7206_devices)); } arch_initcall(sh7206_devices_setup); void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } static struct platform_device *sh7206_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &cmt0_device, &cmt1_device, &mtu2_0_device, &mtu2_1_device, &mtu2_2_device, }; #define STBCR3 0xfffe0408 #define STBCR4 0xfffe040c void __init plat_early_device_setup(void) { /* enable CMT clock */ __raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4); /* enable MTU2 clock */ __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3); early_platform_add_devices(sh7206_early_devices, ARRAY_SIZE(sh7206_early_devices)); }
gpl-2.0
SeKwonLee/pmfs
drivers/i2c/i2c-boardinfo.c
7506
3082
/* * i2c-boardinfo.c - collect pre-declarations of I2C devices * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301 USA. */ #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/rwsem.h> #include "i2c-core.h" /* These symbols are exported ONLY FOR the i2c core. * No other users will be supported. */ DECLARE_RWSEM(__i2c_board_lock); EXPORT_SYMBOL_GPL(__i2c_board_lock); LIST_HEAD(__i2c_board_list); EXPORT_SYMBOL_GPL(__i2c_board_list); int __i2c_first_dynamic_bus_num; EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num); /** * i2c_register_board_info - statically declare I2C devices * @busnum: identifies the bus to which these devices belong * @info: vector of i2c device descriptors * @len: how many descriptors in the vector; may be zero to reserve * the specified bus number. * * Systems using the Linux I2C driver stack can declare tables of board info * while they initialize. This should be done in board-specific init code * near arch_initcall() time, or equivalent, before any I2C adapter driver is * registered. For example, mainboard init code could define several devices, * as could the init code for each daughtercard in a board stack. * * The I2C devices will be created later, after the adapter for the relevant * bus has been registered. After that moment, standard driver model tools * are used to bind "new style" I2C drivers to the devices. The bus number * for any device declared using this routine is not available for dynamic * allocation. * * The board info passed can safely be __initdata, but be careful of embedded * pointers (for platform_data, functions, etc) since that won't be copied. */ int __init i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len) { int status; down_write(&__i2c_board_lock); /* dynamic bus numbers will be assigned after the last static one */ if (busnum >= __i2c_first_dynamic_bus_num) __i2c_first_dynamic_bus_num = busnum + 1; for (status = 0; len; len--, info++) { struct i2c_devinfo *devinfo; devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); if (!devinfo) { pr_debug("i2c-core: can't register boardinfo!\n"); status = -ENOMEM; break; } devinfo->busnum = busnum; devinfo->board_info = *info; list_add_tail(&devinfo->list, &__i2c_board_list); } up_write(&__i2c_board_lock); return status; }
gpl-2.0
javelinanddart/Canuck
arch/h8300/kernel/sys_h8300.c
7762
1728
/* * linux/arch/h8300/kernel/sys_h8300.c * * This file contains various random system calls that * have a non-standard calling sequence on the H8/300 * platform. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/ipc.h> #include <asm/setup.h> #include <asm/uaccess.h> #include <asm/cachectl.h> #include <asm/traps.h> #include <asm/unistd.h> /* sys_cacheflush -- no support. */ asmlinkage int sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) { return -EINVAL; } asmlinkage int sys_getpagesize(void) { return PAGE_SIZE; } #if defined(CONFIG_SYSCALL_PRINT) asmlinkage void syscall_print(void *dummy,...) { struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4); printk("call %06lx:%ld 1:%08lx,2:%08lx,3:%08lx,ret:%08lx\n", ((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0); } #endif /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register long res __asm__("er0"); register const char *const *_c __asm__("er3") = envp; register const char *const *_b __asm__("er2") = argv; register const char * _a __asm__("er1") = filename; __asm__ __volatile__ ("mov.l %1,er0\n\t" "trapa #0\n\t" : "=r" (res) : "g" (__NR_execve), "g" (_a), "g" (_b), "g" (_c) : "cc", "memory"); return res; }
gpl-2.0
slz/arco-samsung-kernel-msm7x30
arch/x86/um/mem_32.c
8530
1162
/* * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mm.h> #include <asm/page.h> #include <asm/mman.h> static struct vm_area_struct gate_vma; static int __init gate_vma_init(void) { if (!FIXADDR_USER_START) return 0; gate_vma.vm_mm = NULL; gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; gate_vma.vm_page_prot = __P101; return 0; } __initcall(gate_vma_init); struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return FIXADDR_USER_START ? &gate_vma : NULL; } int in_gate_area_no_mm(unsigned long addr) { if (!FIXADDR_USER_START) return 0; if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) return 1; return 0; } int in_gate_area(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma = get_gate_vma(mm); if (!vma) return 0; return (addr >= vma->vm_start) && (addr < vma->vm_end); }
gpl-2.0
asis92/kernel-lp-lg-d802
tools/perf/scripts/perl/Perf-Trace-Util/Context.c
11602
3691
/* * This file was generated automatically by ExtUtils::ParseXS version 2.18_02 from the * contents of Context.xs. Do not edit this file, edit Context.xs instead. * * ANY CHANGES MADE HERE WILL BE LOST! * */ #line 1 "Context.xs" /* * Context.xs. XS interfaces for perf script. * * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "EXTERN.h" #include "perl.h" #include "XSUB.h" #include "../../../perf.h" #include "../../../util/trace-event.h" #ifndef PERL_UNUSED_VAR # define PERL_UNUSED_VAR(var) if (0) var = var #endif #line 42 "Context.c" XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */ XS(XS_Perf__Trace__Context_common_pc) { #ifdef dVAR dVAR; dXSARGS; #else dXSARGS; #endif if (items != 1) Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context"); PERL_UNUSED_VAR(cv); /* -W */ { struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); int RETVAL; dXSTARG; RETVAL = common_pc(context); XSprePUSH; PUSHi((IV)RETVAL); } XSRETURN(1); } XS(XS_Perf__Trace__Context_common_flags); /* prototype to pass -Wmissing-prototypes */ XS(XS_Perf__Trace__Context_common_flags) { #ifdef dVAR dVAR; dXSARGS; #else dXSARGS; #endif if (items != 1) Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context"); PERL_UNUSED_VAR(cv); /* -W */ { struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); int RETVAL; dXSTARG; RETVAL = common_flags(context); XSprePUSH; PUSHi((IV)RETVAL); } XSRETURN(1); } XS(XS_Perf__Trace__Context_common_lock_depth); /* prototype to pass -Wmissing-prototypes */ XS(XS_Perf__Trace__Context_common_lock_depth) { #ifdef dVAR dVAR; dXSARGS; #else dXSARGS; #endif if (items != 1) Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_lock_depth", "context"); PERL_UNUSED_VAR(cv); /* -W */ { struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); int RETVAL; dXSTARG; RETVAL = common_lock_depth(context); XSprePUSH; PUSHi((IV)RETVAL); } XSRETURN(1); } #ifdef __cplusplus extern "C" #endif XS(boot_Perf__Trace__Context); /* prototype to pass -Wmissing-prototypes */ XS(boot_Perf__Trace__Context) { #ifdef dVAR dVAR; dXSARGS; #else dXSARGS; #endif const char* file = __FILE__; PERL_UNUSED_VAR(cv); /* -W */ PERL_UNUSED_VAR(items); /* -W */ XS_VERSION_BOOTCHECK ; newXSproto("Perf::Trace::Context::common_pc", XS_Perf__Trace__Context_common_pc, file, "$"); newXSproto("Perf::Trace::Context::common_flags", XS_Perf__Trace__Context_common_flags, file, "$"); newXSproto("Perf::Trace::Context::common_lock_depth", XS_Perf__Trace__Context_common_lock_depth, file, "$"); if (PL_unitcheckav) call_list(PL_scopestack_ix, PL_unitcheckav); XSRETURN_YES; }
gpl-2.0
Sricharanti/sricharan
arch/mips/pci/fixup-tb0226.c
13650
2397
/* * fixup-tb0226.c, The TANBAC TB0226 specific PCI fixups. * * Copyright (C) 2002-2005 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/pci.h> #include <asm/vr41xx/giu.h> #include <asm/vr41xx/tb0226.h> int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = -1; switch (slot) { case 12: vr41xx_set_irq_trigger(GD82559_1_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); vr41xx_set_irq_level(GD82559_1_PIN, IRQ_LEVEL_LOW); irq = GD82559_1_IRQ; break; case 13: vr41xx_set_irq_trigger(GD82559_2_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); vr41xx_set_irq_level(GD82559_2_PIN, IRQ_LEVEL_LOW); irq = GD82559_2_IRQ; break; case 14: switch (pin) { case 1: vr41xx_set_irq_trigger(UPD720100_INTA_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); vr41xx_set_irq_level(UPD720100_INTA_PIN, IRQ_LEVEL_LOW); irq = UPD720100_INTA_IRQ; break; case 2: vr41xx_set_irq_trigger(UPD720100_INTB_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); vr41xx_set_irq_level(UPD720100_INTB_PIN, IRQ_LEVEL_LOW); irq = UPD720100_INTB_IRQ; break; case 3: vr41xx_set_irq_trigger(UPD720100_INTC_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); vr41xx_set_irq_level(UPD720100_INTC_PIN, IRQ_LEVEL_LOW); irq = UPD720100_INTC_IRQ; break; default: break; } break; default: break; } return irq; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
syhost/android_kernel_xiaomi_armani
drivers/usb/host/whci/pzl.c
13906
10580
/* * Wireless Host Controller (WHC) periodic schedule management. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/dma-mapping.h> #include <linux/uwb/umc.h> #include <linux/usb.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" static void update_pzl_pointers(struct whc *whc, int period, u64 addr) { switch (period) { case 0: whc_qset_set_link_ptr(&whc->pz_list[0], addr); whc_qset_set_link_ptr(&whc->pz_list[2], addr); whc_qset_set_link_ptr(&whc->pz_list[4], addr); whc_qset_set_link_ptr(&whc->pz_list[6], addr); whc_qset_set_link_ptr(&whc->pz_list[8], addr); whc_qset_set_link_ptr(&whc->pz_list[10], addr); whc_qset_set_link_ptr(&whc->pz_list[12], addr); whc_qset_set_link_ptr(&whc->pz_list[14], addr); break; case 1: whc_qset_set_link_ptr(&whc->pz_list[1], addr); whc_qset_set_link_ptr(&whc->pz_list[5], addr); whc_qset_set_link_ptr(&whc->pz_list[9], addr); whc_qset_set_link_ptr(&whc->pz_list[13], addr); break; case 2: whc_qset_set_link_ptr(&whc->pz_list[3], addr); whc_qset_set_link_ptr(&whc->pz_list[11], addr); break; case 3: whc_qset_set_link_ptr(&whc->pz_list[7], addr); break; case 4: whc_qset_set_link_ptr(&whc->pz_list[15], addr); break; } } /* * Return the 'period' to use for this qset. The minimum interval for * the endpoint is used so whatever urbs are submitted the device is * polled often enough. */ static int qset_get_period(struct whc *whc, struct whc_qset *qset) { uint8_t bInterval = qset->ep->desc.bInterval; if (bInterval < 6) bInterval = 6; if (bInterval > 10) bInterval = 10; return bInterval - 6; } static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset) { int period; period = qset_get_period(whc, qset); qset_clear(whc, qset); list_move(&qset->list_node, &whc->periodic_list[period]); qset->in_sw_list = true; } static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset) { list_move(&qset->list_node, &whc->periodic_removed_list); qset->in_hw_list = false; qset->in_sw_list = false; } /** * pzl_process_qset - process any recently inactivated or halted qTDs * in a qset. * * After inactive qTDs are removed, new qTDs can be added if the * urb queue still contains URBs. * * Returns the schedule updates required. */ static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset) { enum whc_update update = 0; uint32_t status = 0; while (qset->ntds) { struct whc_qtd *td; int t; t = qset->td_start; td = &qset->qtd[qset->td_start]; status = le32_to_cpu(td->status); /* * Nothing to do with a still active qTD. */ if (status & QTD_STS_ACTIVE) break; if (status & QTD_STS_HALTED) { /* Ug, an error. */ process_halted_qtd(whc, qset, td); /* A halted qTD always triggers an update because the qset was either removed or reactivated. */ update |= WHC_UPDATE_UPDATED; goto done; } /* Mmm, a completed qTD. */ process_inactive_qtd(whc, qset, td); } if (!qset->remove) update |= qset_add_qtds(whc, qset); done: /* * If there are no qTDs in this qset, remove it from the PZL. */ if (qset->remove && qset->ntds == 0) { pzl_qset_remove(whc, qset); update |= WHC_UPDATE_REMOVED; } return update; } /** * pzl_start - start the periodic schedule * @whc: the WHCI host controller * * The PZL must be valid (e.g., all entries in the list should have * the T bit set). */ void pzl_start(struct whc *whc) { le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE); whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN); whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED, 1000, "start PZL"); } /** * pzl_stop - stop the periodic schedule * @whc: the WHCI host controller */ void pzl_stop(struct whc *whc) { whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0); whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, WUSBSTS_PERIODIC_SCHED, 0, 1000, "stop PZL"); } /** * pzl_update - request a PZL update and wait for the hardware to be synced * @whc: the WHCI HC * @wusbcmd: WUSBCMD value to start the update. * * If the WUSB HC is inactive (i.e., the PZL is stopped) then the * update must be skipped as the hardware may not respond to update * requests. */ void pzl_update(struct whc *whc, uint32_t wusbcmd) { struct wusbhc *wusbhc = &whc->wusbhc; long t; mutex_lock(&wusbhc->mutex); if (wusbhc->active) { whc_write_wusbcmd(whc, wusbcmd, wusbcmd); t = wait_event_timeout( whc->periodic_list_wq, (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0, msecs_to_jiffies(1000)); if (t == 0) whc_hw_error(whc, "PZL update timeout"); } mutex_unlock(&wusbhc->mutex); } static void update_pzl_hw_view(struct whc *whc) { struct whc_qset *qset, *t; int period; u64 tmp_qh = 0; for (period = 0; period < 5; period++) { list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { whc_qset_set_link_ptr(&qset->qh.link, tmp_qh); tmp_qh = qset->qset_dma; qset->in_hw_list = true; } update_pzl_pointers(whc, period, tmp_qh); } } /** * scan_periodic_work - scan the PZL for qsets to process. * * Process each qset in the PZL in turn and then signal the WHC that * the PZL has been updated. * * Then start, stop or update the periodic schedule as required. */ void scan_periodic_work(struct work_struct *work) { struct whc *whc = container_of(work, struct whc, periodic_work); struct whc_qset *qset, *t; enum whc_update update = 0; int period; spin_lock_irq(&whc->lock); for (period = 4; period >= 0; period--) { list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { if (!qset->in_hw_list) update |= WHC_UPDATE_ADDED; update |= pzl_process_qset(whc, qset); } } if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED)) update_pzl_hw_view(whc); spin_unlock_irq(&whc->lock); if (update) { uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB; if (update & WHC_UPDATE_REMOVED) wusbcmd |= WUSBCMD_PERIODIC_QSET_RM; pzl_update(whc, wusbcmd); } /* * Now that the PZL is updated, complete the removal of any * removed qsets. * * If the qset was to be reset, do so and reinsert it into the * PZL if it has pending transfers. */ spin_lock_irq(&whc->lock); list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) { qset_remove_complete(whc, qset); if (qset->reset) { qset_reset(whc, qset); if (!list_empty(&qset->stds)) { qset_insert_in_sw_list(whc, qset); queue_work(whc->workqueue, &whc->periodic_work); } } } spin_unlock_irq(&whc->lock); } /** * pzl_urb_enqueue - queue an URB onto the periodic list (PZL) * @whc: the WHCI host controller * @urb: the URB to enqueue * @mem_flags: flags for any memory allocations * * The qset for the endpoint is obtained and the urb queued on to it. * * Work is scheduled to update the hardware's view of the PZL. */ int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) { struct whc_qset *qset; int err; unsigned long flags; spin_lock_irqsave(&whc->lock, flags); err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); if (err < 0) { spin_unlock_irqrestore(&whc->lock, flags); return err; } qset = get_qset(whc, urb, GFP_ATOMIC); if (qset == NULL) err = -ENOMEM; else err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); if (!err) { if (!qset->in_sw_list && !qset->remove) qset_insert_in_sw_list(whc, qset); } else usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); spin_unlock_irqrestore(&whc->lock, flags); if (!err) queue_work(whc->workqueue, &whc->periodic_work); return err; } /** * pzl_urb_dequeue - remove an URB (qset) from the periodic list * @whc: the WHCI host controller * @urb: the URB to dequeue * @status: the current status of the URB * * URBs that do yet have qTDs can simply be removed from the software * queue, otherwise the qset must be removed so the qTDs can be safely * removed. */ int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status) { struct whc_urb *wurb = urb->hcpriv; struct whc_qset *qset = wurb->qset; struct whc_std *std, *t; bool has_qtd = false; int ret; unsigned long flags; spin_lock_irqsave(&whc->lock, flags); ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status); if (ret < 0) goto out; list_for_each_entry_safe(std, t, &qset->stds, list_node) { if (std->urb == urb) { if (std->qtd) has_qtd = true; qset_free_std(whc, std); } else std->qtd = NULL; /* so this std is re-added when the qset is */ } if (has_qtd) { pzl_qset_remove(whc, qset); update_pzl_hw_view(whc); wurb->status = status; wurb->is_async = false; queue_work(whc->workqueue, &wurb->dequeue_work); } else qset_remove_urb(whc, qset, urb, status); out: spin_unlock_irqrestore(&whc->lock, flags); return ret; } /** * pzl_qset_delete - delete a qset from the PZL */ void pzl_qset_delete(struct whc *whc, struct whc_qset *qset) { qset->remove = 1; queue_work(whc->workqueue, &whc->periodic_work); qset_delete(whc, qset); } /** * pzl_init - initialize the periodic zone list * @whc: the WHCI host controller */ int pzl_init(struct whc *whc) { int i; whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16, &whc->pz_list_dma, GFP_KERNEL); if (whc->pz_list == NULL) return -ENOMEM; /* Set T bit on all elements in PZL. */ for (i = 0; i < 16; i++) whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T); le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE); return 0; } /** * pzl_clean_up - free PZL resources * @whc: the WHCI host controller * * The PZL is stopped and empty. */ void pzl_clean_up(struct whc *whc) { if (whc->pz_list) dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list, whc->pz_list_dma); }
gpl-2.0
p2pjack/Virtuous-Beastmode
net/llc/llc_s_ac.c
14930
6006
/* * llc_s_ac.c - actions performed during sap state transition. * * Description : * Functions in this module are implementation of sap component actions. * Details of actions can be found in IEEE-802.2 standard document. * All functions have one sap and one event as input argument. All of * them return 0 On success and 1 otherwise. * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/netdevice.h> #include <net/llc.h> #include <net/llc_pdu.h> #include <net/llc_s_ac.h> #include <net/llc_s_ev.h> #include <net/llc_sap.h> /** * llc_sap_action_unit_data_ind - forward UI PDU to network layer * @sap: SAP * @skb: the event to forward * * Received a UI PDU from MAC layer; forward to network layer as a * UNITDATA INDICATION; verify our event is the kind we expect */ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb) { llc_sap_rtn_pdu(sap, skb); return 0; } /** * llc_sap_action_send_ui - sends UI PDU resp to UNITDATA REQ to MAC layer * @sap: SAP * @skb: the event to send * * Sends a UI PDU to the MAC layer in response to a UNITDATA REQUEST * primitive from the network layer. Verifies event is a primitive type of * event. Verify the primitive is a UNITDATA REQUEST. */ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(skb); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); if (likely(!rc)) rc = dev_queue_xmit(skb); return rc; } /** * llc_sap_action_send_xid_c - send XID PDU as response to XID REQ * @sap: SAP * @skb: the event to send * * Send a XID command PDU to MAC layer in response to a XID REQUEST * primitive from the network layer. Verify event is a primitive type * event. Verify the primitive is a XID REQUEST. */ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); if (likely(!rc)) rc = dev_queue_xmit(skb); return rc; } /** * llc_sap_action_send_xid_r - send XID PDU resp to MAC for received XID * @sap: SAP * @skb: the event to send * * Send XID response PDU to MAC in response to an earlier received XID * command PDU. Verify event is a PDU type event */ int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb) { u8 mac_da[ETH_ALEN], mac_sa[ETH_ALEN], dsap; int rc = 1; struct sk_buff *nskb; llc_pdu_decode_sa(skb, mac_da); llc_pdu_decode_da(skb, mac_sa); llc_pdu_decode_ssap(skb, &dsap); nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, sizeof(struct llc_xid_info)); if (!nskb) goto out; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, LLC_PDU_RSP); llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 0); rc = llc_mac_hdr_init(nskb, mac_sa, mac_da); if (likely(!rc)) rc = dev_queue_xmit(nskb); out: return rc; } /** * llc_sap_action_send_test_c - send TEST PDU to MAC in resp to TEST REQ * @sap: SAP * @skb: the event to send * * Send a TEST command PDU to the MAC layer in response to a TEST REQUEST * primitive from the network layer. Verify event is a primitive type * event; verify the primitive is a TEST REQUEST. */ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_test_cmd(skb); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); if (likely(!rc)) rc = dev_queue_xmit(skb); return rc; } int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb) { u8 mac_da[ETH_ALEN], mac_sa[ETH_ALEN], dsap; struct sk_buff *nskb; int rc = 1; u32 data_size; llc_pdu_decode_sa(skb, mac_da); llc_pdu_decode_da(skb, mac_sa); llc_pdu_decode_ssap(skb, &dsap); /* The test request command is type U (llc_len = 3) */ data_size = ntohs(eth_hdr(skb)->h_proto) - 3; nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); if (!nskb) goto out; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, LLC_PDU_RSP); llc_pdu_init_as_test_rsp(nskb, skb); rc = llc_mac_hdr_init(nskb, mac_sa, mac_da); if (likely(!rc)) rc = dev_queue_xmit(nskb); out: return rc; } /** * llc_sap_action_report_status - report data link status to layer mgmt * @sap: SAP * @skb: the event to send * * Report data link status to layer management. Verify our event is the * kind we expect. */ int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb) { return 0; } /** * llc_sap_action_xid_ind - send XID PDU resp to net layer via XID IND * @sap: SAP * @skb: the event to send * * Send a XID response PDU to the network layer via a XID INDICATION * primitive. */ int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb) { llc_sap_rtn_pdu(sap, skb); return 0; } /** * llc_sap_action_test_ind - send TEST PDU to net layer via TEST IND * @sap: SAP * @skb: the event to send * * Send a TEST response PDU to the network layer via a TEST INDICATION * primitive. Verify our event is a PDU type event. */ int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb) { llc_sap_rtn_pdu(sap, skb); return 0; }
gpl-2.0
artcotto/CharizardX_kernel_hammerhead
arch/arm/mach-msm/board-8930-storage.c
339
9517
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/bootmem.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/mmc.h> #include <mach/msm_bus_board.h> #include <mach/board.h> #include <mach/gpiomux.h> #include <mach/socinfo.h> #include "devices.h" #include "board-8930.h" #include "board-storage-common-a.h" /* MSM8960 has 5 SDCC controllers */ enum sdcc_controllers { SDCC1, SDCC2, SDCC3, SDCC4, SDCC5, MAX_SDCC_CONTROLLER }; /* All SDCC controllers require VDD/VCC voltage */ static struct msm_mmc_reg_data mmc_vdd_reg_data[MAX_SDCC_CONTROLLER] = { /* SDCC1 : eMMC card connected */ [SDCC1] = { .name = "sdc_vdd", .high_vol_level = 2950000, .low_vol_level = 2950000, .always_on = 1, .lpm_sup = 1, .lpm_uA = 9000, .hpm_uA = 200000, /* 200mA */ }, /* SDCC3 : External card slot connected */ [SDCC3] = { .name = "sdc_vdd", .high_vol_level = 2950000, .low_vol_level = 2950000, /* * Normally this is not an always ON regulator. On this * platform, unfortunately the sd detect line is connected * to this via esd circuit and so turn this off/on while card * is not present causes the sd detect line to toggle * continuously. This is expected to be fixed in the newer * hardware revisions - maybe once that is done, this can be * reverted. */ .lpm_sup = 1, .hpm_uA = 800000, /* 800mA */ .lpm_uA = 9000, }, }; /* All SDCC controllers may require voting for VDD PAD voltage */ static struct msm_mmc_reg_data mmc_vdd_io_reg_data[MAX_SDCC_CONTROLLER] = { /* SDCC1 : eMMC card connected */ [SDCC1] = { .name = "sdc_vdd_io", .always_on = 1, .high_vol_level = 1800000, .low_vol_level = 1800000, .hpm_uA = 200000, /* 200mA */ }, /* SDCC3 : External card slot connected */ [SDCC3] = { .name = "sdc_vdd_io", .high_vol_level = 2950000, .low_vol_level = 1850000, .always_on = 1, .lpm_sup = 1, /* Max. Active current required is 16 mA */ .hpm_uA = 16000, /* * Sleep current required is ~300 uA. But min. vote can be * in terms of mA (min. 1 mA). So let's vote for 2 mA * during sleep. */ .lpm_uA = 2000, } }; static struct msm_mmc_slot_reg_data mmc_slot_vreg_data[MAX_SDCC_CONTROLLER] = { /* SDCC1 : eMMC card connected */ [SDCC1] = { .vdd_data = &mmc_vdd_reg_data[SDCC1], .vdd_io_data = &mmc_vdd_io_reg_data[SDCC1], }, /* SDCC3 : External card slot connected */ [SDCC3] = { .vdd_data = &mmc_vdd_reg_data[SDCC3], .vdd_io_data = &mmc_vdd_io_reg_data[SDCC3], } }; /* SDC1 pad data */ static struct msm_mmc_pad_drv sdc1_pad_drv_on_cfg[] = { {TLMM_HDRV_SDC1_CLK, GPIO_CFG_16MA}, {TLMM_HDRV_SDC1_CMD, GPIO_CFG_10MA}, {TLMM_HDRV_SDC1_DATA, GPIO_CFG_10MA} }; static struct msm_mmc_pad_drv sdc1_pad_drv_off_cfg[] = { {TLMM_HDRV_SDC1_CLK, GPIO_CFG_2MA}, {TLMM_HDRV_SDC1_CMD, GPIO_CFG_2MA}, {TLMM_HDRV_SDC1_DATA, GPIO_CFG_2MA} }; static struct msm_mmc_pad_pull sdc1_pad_pull_on_cfg[] = { {TLMM_PULL_SDC1_CLK, GPIO_CFG_NO_PULL}, {TLMM_PULL_SDC1_CMD, GPIO_CFG_PULL_UP}, {TLMM_PULL_SDC1_DATA, GPIO_CFG_PULL_UP} }; static struct msm_mmc_pad_pull sdc1_pad_pull_off_cfg[] = { {TLMM_PULL_SDC1_CLK, GPIO_CFG_NO_PULL}, {TLMM_PULL_SDC1_CMD, GPIO_CFG_PULL_UP}, {TLMM_PULL_SDC1_DATA, GPIO_CFG_PULL_UP} }; /* SDC3 pad data */ static struct msm_mmc_pad_drv sdc3_pad_drv_on_cfg[] = { {TLMM_HDRV_SDC3_CLK, GPIO_CFG_8MA}, {TLMM_HDRV_SDC3_CMD, GPIO_CFG_8MA}, {TLMM_HDRV_SDC3_DATA, GPIO_CFG_8MA} }; static struct msm_mmc_pad_drv sdc3_pad_drv_off_cfg[] = { {TLMM_HDRV_SDC3_CLK, GPIO_CFG_2MA}, {TLMM_HDRV_SDC3_CMD, GPIO_CFG_2MA}, {TLMM_HDRV_SDC3_DATA, GPIO_CFG_2MA} }; static struct msm_mmc_pad_pull sdc3_pad_pull_on_cfg[] = { {TLMM_PULL_SDC3_CLK, GPIO_CFG_NO_PULL}, {TLMM_PULL_SDC3_CMD, GPIO_CFG_PULL_UP}, {TLMM_PULL_SDC3_DATA, GPIO_CFG_PULL_UP} }; static struct msm_mmc_pad_pull sdc3_pad_pull_off_cfg[] = { {TLMM_PULL_SDC3_CLK, GPIO_CFG_NO_PULL}, /* * SDC3 CMD line should be PULLed UP otherwise fluid platform will * see transitions (1 -> 0 and 0 -> 1) on card detection line, * which would result in false card detection interrupts. */ {TLMM_PULL_SDC3_CMD, GPIO_CFG_PULL_UP}, /* * Keeping DATA lines status to PULL UP will make sure that * there is no current leak during sleep if external pull up * is connected to DATA lines. */ {TLMM_PULL_SDC3_DATA, GPIO_CFG_PULL_UP} }; static struct msm_mmc_pad_pull_data mmc_pad_pull_data[MAX_SDCC_CONTROLLER] = { [SDCC1] = { .on = sdc1_pad_pull_on_cfg, .off = sdc1_pad_pull_off_cfg, .size = ARRAY_SIZE(sdc1_pad_pull_on_cfg) }, [SDCC3] = { .on = sdc3_pad_pull_on_cfg, .off = sdc3_pad_pull_off_cfg, .size = ARRAY_SIZE(sdc3_pad_pull_on_cfg) }, }; static struct msm_mmc_pad_drv_data mmc_pad_drv_data[MAX_SDCC_CONTROLLER] = { [SDCC1] = { .on = sdc1_pad_drv_on_cfg, .off = sdc1_pad_drv_off_cfg, .size = ARRAY_SIZE(sdc1_pad_drv_on_cfg) }, [SDCC3] = { .on = sdc3_pad_drv_on_cfg, .off = sdc3_pad_drv_off_cfg, .size = ARRAY_SIZE(sdc3_pad_drv_on_cfg) }, }; static struct msm_mmc_pad_data mmc_pad_data[MAX_SDCC_CONTROLLER] = { [SDCC1] = { .pull = &mmc_pad_pull_data[SDCC1], .drv = &mmc_pad_drv_data[SDCC1] }, [SDCC3] = { .pull = &mmc_pad_pull_data[SDCC3], .drv = &mmc_pad_drv_data[SDCC3] }, }; static struct msm_mmc_pin_data mmc_slot_pin_data[MAX_SDCC_CONTROLLER] = { [SDCC1] = { .pad_data = &mmc_pad_data[SDCC1], }, [SDCC3] = { .pad_data = &mmc_pad_data[SDCC3], }, }; #define MSM_MPM_PIN_SDC1_DAT1 17 #define MSM_MPM_PIN_SDC3_DAT1 21 static unsigned int sdc1_sup_clk_rates[] = { 400000, 24000000, 48000000, 96000000 }; #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT static unsigned int sdc3_sup_clk_rates[] = { 400000, 24000000, 48000000, 96000000, 192000000, }; #endif #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT static struct mmc_platform_data msm8960_sdc1_data = { .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, #ifdef CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT .mmc_bus_width = MMC_CAP_8_BIT_DATA, #else .mmc_bus_width = MMC_CAP_4_BIT_DATA, #endif .sup_clk_table = sdc1_sup_clk_rates, .sup_clk_cnt = ARRAY_SIZE(sdc1_sup_clk_rates), .nonremovable = 1, .vreg_data = &mmc_slot_vreg_data[SDCC1], .pin_data = &mmc_slot_pin_data[SDCC1], .mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1, .msm_bus_voting_data = &sps_to_ddr_bus_voting_data, .uhs_caps2 = MMC_CAP2_HS200_1_8V_SDR, }; #endif #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT static struct mmc_platform_data msm8960_sdc3_data = { .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .sup_clk_table = sdc3_sup_clk_rates, .sup_clk_cnt = ARRAY_SIZE(sdc3_sup_clk_rates), #ifdef CONFIG_MMC_MSM_SDC3_WP_SUPPORT /*TODO: Insert right replacement for PM8038 */ #ifndef MSM8930_PHASE_2 .wpswitch_gpio = PM8921_GPIO_PM_TO_SYS(16), #else .wpswitch_gpio = 66, .is_wpswitch_active_low = true, #endif #endif .vreg_data = &mmc_slot_vreg_data[SDCC3], .pin_data = &mmc_slot_pin_data[SDCC3], /*TODO: Insert right replacement for PM8038 */ #ifndef MSM8930_PHASE_2 .status_gpio = PM8921_GPIO_PM_TO_SYS(26), .status_irq = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, 26), #else .status_gpio = 94, .status_irq = MSM_GPIO_TO_INT(94), #endif .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, .is_status_gpio_active_low = true, .xpc_cap = 1, .uhs_caps = (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_MAX_CURRENT_800), .mpm_sdiowakeup_int = MSM_MPM_PIN_SDC3_DAT1, .msm_bus_voting_data = &sps_to_ddr_bus_voting_data, }; #endif void __init msm8930_init_mmc(void) { #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT /* * When eMMC runs in DDR mode on CDP platform, we have * seen instability due to DATA CRC errors. These errors are * attributed to long physical path between MSM and eMMC on CDP. * So let's not enable the DDR mode on CDP platform but let other * platforms take advantage of eMMC DDR mode. */ if (!machine_is_msm8930_cdp()) msm8960_sdc1_data.uhs_caps |= (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50); /* SDC1 : eMMC card connected */ msm_add_sdcc(1, &msm8960_sdc1_data); #endif #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT /* * All 8930 platform boards using the 1.2 SoC have been reworked so that * the sd card detect line's esd circuit is no longer powered by the sd * card's voltage regulator. So this means we can turn the regulator off * to save power without affecting the sd card detect functionality. * This change to the boards will be true for newer versions of the SoC * as well. */ if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1 && SOCINFO_VERSION_MINOR(socinfo_get_version()) < 2) { msm8960_sdc3_data.vreg_data->vdd_data->always_on = true; msm8960_sdc3_data.vreg_data->vdd_data->reset_at_init = true; } /* SDC3: External card slot */ if (!machine_is_msm8930_cdp()) { msm8960_sdc3_data.wpswitch_gpio = 0; msm8960_sdc3_data.is_wpswitch_active_low = false; } msm_add_sdcc(3, &msm8960_sdc3_data); #endif }
gpl-2.0
Dm47021/Linux-kernel_4.1.15-rt17_MusicOS
drivers/tty/serial/ioc3_serial.c
851
58776
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005 Silicon Graphics, Inc. All Rights Reserved. */ /* * This file contains a module version of the ioc3 serial driver. This * includes all the support functions needed (support functions, etc.) * and the serial driver itself. */ #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/circ_buf.h> #include <linux/serial_reg.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/serial_core.h> #include <linux/ioc3.h> #include <linux/slab.h> /* * Interesting things about the ioc3 */ #define LOGICAL_PORTS 2 /* rs232(0) and rs422(1) */ #define PORTS_PER_CARD 2 #define LOGICAL_PORTS_PER_CARD (PORTS_PER_CARD * LOGICAL_PORTS) #define MAX_CARDS 8 #define MAX_LOGICAL_PORTS (LOGICAL_PORTS_PER_CARD * MAX_CARDS) /* determine given the sio_ir what port it applies to */ #define GET_PORT_FROM_SIO_IR(_x) (_x & SIO_IR_SA) ? 0 : 1 /* * we have 2 logical ports (rs232, rs422) for each physical port * evens are rs232, odds are rs422 */ #define GET_PHYSICAL_PORT(_x) ((_x) >> 1) #define GET_LOGICAL_PORT(_x) ((_x) & 1) #define IS_PHYSICAL_PORT(_x) !((_x) & 1) #define IS_RS232(_x) !((_x) & 1) static unsigned int Num_of_ioc3_cards; static unsigned int Submodule_slot; /* defining this will get you LOTS of great debug info */ //#define DEBUG_INTERRUPTS #define DPRINT_CONFIG(_x...) ; //#define DPRINT_CONFIG(_x...) printk _x #define NOT_PROGRESS() ; //#define NOT_PROGRESS() printk("%s : fails %d\n", __func__, __LINE__) /* number of characters we want to transmit to the lower level at a time */ #define MAX_CHARS 256 #define FIFO_SIZE (MAX_CHARS-1) /* it's a uchar */ /* Device name we're using */ #define DEVICE_NAME "ttySIOC" #define DEVICE_MAJOR 204 #define DEVICE_MINOR 116 /* flags for next_char_state */ #define NCS_BREAK 0x1 #define NCS_PARITY 0x2 #define NCS_FRAMING 0x4 #define NCS_OVERRUN 0x8 /* cause we need SOME parameters ... */ #define MIN_BAUD_SUPPORTED 1200 #define MAX_BAUD_SUPPORTED 115200 /* protocol types supported */ #define PROTO_RS232 0 #define PROTO_RS422 1 /* Notification types */ #define N_DATA_READY 0x01 #define N_OUTPUT_LOWAT 0x02 #define N_BREAK 0x04 #define N_PARITY_ERROR 0x08 #define N_FRAMING_ERROR 0x10 #define N_OVERRUN_ERROR 0x20 #define N_DDCD 0x40 #define N_DCTS 0x80 #define N_ALL_INPUT (N_DATA_READY | N_BREAK \ | N_PARITY_ERROR | N_FRAMING_ERROR \ | N_OVERRUN_ERROR | N_DDCD | N_DCTS) #define N_ALL_OUTPUT N_OUTPUT_LOWAT #define N_ALL_ERRORS (N_PARITY_ERROR | N_FRAMING_ERROR \ | N_OVERRUN_ERROR) #define N_ALL (N_DATA_READY | N_OUTPUT_LOWAT | N_BREAK \ | N_PARITY_ERROR | N_FRAMING_ERROR \ | N_OVERRUN_ERROR | N_DDCD | N_DCTS) #define SER_CLK_SPEED(prediv) ((22000000 << 1) / prediv) #define SER_DIVISOR(x, clk) (((clk) + (x) * 8) / ((x) * 16)) #define DIVISOR_TO_BAUD(div, clk) ((clk) / 16 / (div)) /* Some masks */ #define LCR_MASK_BITS_CHAR (UART_LCR_WLEN5 | UART_LCR_WLEN6 \ | UART_LCR_WLEN7 | UART_LCR_WLEN8) #define LCR_MASK_STOP_BITS (UART_LCR_STOP) #define PENDING(_a, _p) (readl(&(_p)->vma->sio_ir) & (_a)->ic_enable) #define RING_BUF_SIZE 4096 #define BUF_SIZE_BIT SBBR_L_SIZE #define PROD_CONS_MASK PROD_CONS_PTR_4K #define TOTAL_RING_BUF_SIZE (RING_BUF_SIZE * 4) /* driver specific - one per card */ struct ioc3_card { struct { /* uart ports are allocated here */ struct uart_port icp_uart_port[LOGICAL_PORTS]; /* the ioc3_port used for this port */ struct ioc3_port *icp_port; } ic_port[PORTS_PER_CARD]; /* currently enabled interrupts */ uint32_t ic_enable; }; /* Local port info for each IOC3 serial port */ struct ioc3_port { /* handy reference material */ struct uart_port *ip_port; struct ioc3_card *ip_card; struct ioc3_driver_data *ip_idd; struct ioc3_submodule *ip_is; /* pci mem addresses for this port */ struct ioc3_serialregs __iomem *ip_serial_regs; struct ioc3_uartregs __iomem *ip_uart_regs; /* Ring buffer page for this port */ dma_addr_t ip_dma_ringbuf; /* vaddr of ring buffer */ struct ring_buffer *ip_cpu_ringbuf; /* Rings for this port */ struct ring *ip_inring; struct ring *ip_outring; /* Hook to port specific values */ struct port_hooks *ip_hooks; spinlock_t ip_lock; /* Various rx/tx parameters */ int ip_baud; int ip_tx_lowat; int ip_rx_timeout; /* Copy of notification bits */ int ip_notify; /* Shadow copies of various registers so we don't need to PIO * read them constantly */ uint32_t ip_sscr; uint32_t ip_tx_prod; uint32_t ip_rx_cons; unsigned char ip_flags; }; /* tx low water mark. We need to notify the driver whenever tx is getting * close to empty so it can refill the tx buffer and keep things going. * Let's assume that if we interrupt 1 ms before the tx goes idle, we'll * have no trouble getting in more chars in time (I certainly hope so). */ #define TX_LOWAT_LATENCY 1000 #define TX_LOWAT_HZ (1000000 / TX_LOWAT_LATENCY) #define TX_LOWAT_CHARS(baud) (baud / 10 / TX_LOWAT_HZ) /* Flags per port */ #define INPUT_HIGH 0x01 /* used to signify that we have turned off the rx_high * temporarily - we need to drain the fifo and don't * want to get blasted with interrupts. */ #define DCD_ON 0x02 /* DCD state is on */ #define LOWAT_WRITTEN 0x04 #define READ_ABORTED 0x08 /* the read was aborted - used to avaoid infinate looping * in the interrupt handler */ #define INPUT_ENABLE 0x10 /* Since each port has different register offsets and bitmasks * for everything, we'll store those that we need in tables so we * don't have to be constantly checking the port we are dealing with. */ struct port_hooks { uint32_t intr_delta_dcd; uint32_t intr_delta_cts; uint32_t intr_tx_mt; uint32_t intr_rx_timer; uint32_t intr_rx_high; uint32_t intr_tx_explicit; uint32_t intr_clear; uint32_t intr_all; char rs422_select_pin; }; static struct port_hooks hooks_array[PORTS_PER_CARD] = { /* values for port A */ { .intr_delta_dcd = SIO_IR_SA_DELTA_DCD, .intr_delta_cts = SIO_IR_SA_DELTA_CTS, .intr_tx_mt = SIO_IR_SA_TX_MT, .intr_rx_timer = SIO_IR_SA_RX_TIMER, .intr_rx_high = SIO_IR_SA_RX_HIGH, .intr_tx_explicit = SIO_IR_SA_TX_EXPLICIT, .intr_clear = (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | SIO_IR_SA_INT | SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR), .intr_all = SIO_IR_SA, .rs422_select_pin = GPPR_UARTA_MODESEL_PIN, }, /* values for port B */ { .intr_delta_dcd = SIO_IR_SB_DELTA_DCD, .intr_delta_cts = SIO_IR_SB_DELTA_CTS, .intr_tx_mt = SIO_IR_SB_TX_MT, .intr_rx_timer = SIO_IR_SB_RX_TIMER, .intr_rx_high = SIO_IR_SB_RX_HIGH, .intr_tx_explicit = SIO_IR_SB_TX_EXPLICIT, .intr_clear = (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | SIO_IR_SB_INT | SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR), .intr_all = SIO_IR_SB, .rs422_select_pin = GPPR_UARTB_MODESEL_PIN, } }; struct ring_entry { union { struct { uint32_t alldata; uint32_t allsc; } all; struct { char data[4]; /* data bytes */ char sc[4]; /* status/control */ } s; } u; }; /* Test the valid bits in any of the 4 sc chars using "allsc" member */ #define RING_ANY_VALID \ ((uint32_t)(RXSB_MODEM_VALID | RXSB_DATA_VALID) * 0x01010101) #define ring_sc u.s.sc #define ring_data u.s.data #define ring_allsc u.all.allsc /* Number of entries per ring buffer. */ #define ENTRIES_PER_RING (RING_BUF_SIZE / (int) sizeof(struct ring_entry)) /* An individual ring */ struct ring { struct ring_entry entries[ENTRIES_PER_RING]; }; /* The whole enchilada */ struct ring_buffer { struct ring TX_A; struct ring RX_A; struct ring TX_B; struct ring RX_B; }; /* Get a ring from a port struct */ #define RING(_p, _wh) &(((struct ring_buffer *)((_p)->ip_cpu_ringbuf))->_wh) /* for Infinite loop detection */ #define MAXITER 10000000 /** * set_baud - Baud rate setting code * @port: port to set * @baud: baud rate to use */ static int set_baud(struct ioc3_port *port, int baud) { int divisor; int actual_baud; int diff; int lcr, prediv; struct ioc3_uartregs __iomem *uart; for (prediv = 6; prediv < 64; prediv++) { divisor = SER_DIVISOR(baud, SER_CLK_SPEED(prediv)); if (!divisor) continue; /* invalid divisor */ actual_baud = DIVISOR_TO_BAUD(divisor, SER_CLK_SPEED(prediv)); diff = actual_baud - baud; if (diff < 0) diff = -diff; /* if we're within 1% we've found a match */ if (diff * 100 <= actual_baud) break; } /* if the above loop completed, we didn't match * the baud rate. give up. */ if (prediv == 64) { NOT_PROGRESS(); return 1; } uart = port->ip_uart_regs; lcr = readb(&uart->iu_lcr); writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr); writeb((unsigned char)divisor, &uart->iu_dll); writeb((unsigned char)(divisor >> 8), &uart->iu_dlm); writeb((unsigned char)prediv, &uart->iu_scr); writeb((unsigned char)lcr, &uart->iu_lcr); return 0; } /** * get_ioc3_port - given a uart port, return the control structure * @the_port: uart port to find */ static struct ioc3_port *get_ioc3_port(struct uart_port *the_port) { struct ioc3_driver_data *idd = dev_get_drvdata(the_port->dev); struct ioc3_card *card_ptr = idd->data[Submodule_slot]; int ii, jj; if (!card_ptr) { NOT_PROGRESS(); return NULL; } for (ii = 0; ii < PORTS_PER_CARD; ii++) { for (jj = 0; jj < LOGICAL_PORTS; jj++) { if (the_port == &card_ptr->ic_port[ii].icp_uart_port[jj]) return card_ptr->ic_port[ii].icp_port; } } NOT_PROGRESS(); return NULL; } /** * port_init - Initialize the sio and ioc3 hardware for a given port * called per port from attach... * @port: port to initialize */ static int inline port_init(struct ioc3_port *port) { uint32_t sio_cr; struct port_hooks *hooks = port->ip_hooks; struct ioc3_uartregs __iomem *uart; int reset_loop_counter = 0xfffff; struct ioc3_driver_data *idd = port->ip_idd; /* Idle the IOC3 serial interface */ writel(SSCR_RESET, &port->ip_serial_regs->sscr); /* Wait until any pending bus activity for this port has ceased */ do { sio_cr = readl(&idd->vma->sio_cr); if (reset_loop_counter-- <= 0) { printk(KERN_WARNING "IOC3 unable to come out of reset" " scr 0x%x\n", sio_cr); return -1; } } while (!(sio_cr & SIO_CR_ARB_DIAG_IDLE) && (((sio_cr &= SIO_CR_ARB_DIAG) == SIO_CR_ARB_DIAG_TXA) || sio_cr == SIO_CR_ARB_DIAG_TXB || sio_cr == SIO_CR_ARB_DIAG_RXA || sio_cr == SIO_CR_ARB_DIAG_RXB)); /* Finish reset sequence */ writel(0, &port->ip_serial_regs->sscr); /* Once RESET is done, reload cached tx_prod and rx_cons values * and set rings to empty by making prod == cons */ port->ip_tx_prod = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK; writel(port->ip_tx_prod, &port->ip_serial_regs->stpir); port->ip_rx_cons = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK; writel(port->ip_rx_cons | SRCIR_ARM, &port->ip_serial_regs->srcir); /* Disable interrupts for this 16550 */ uart = port->ip_uart_regs; writeb(0, &uart->iu_lcr); writeb(0, &uart->iu_ier); /* Set the default baud */ set_baud(port, port->ip_baud); /* Set line control to 8 bits no parity */ writeb(UART_LCR_WLEN8 | 0, &uart->iu_lcr); /* UART_LCR_STOP == 1 stop */ /* Enable the FIFOs */ writeb(UART_FCR_ENABLE_FIFO, &uart->iu_fcr); /* then reset 16550 FIFOs */ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, &uart->iu_fcr); /* Clear modem control register */ writeb(0, &uart->iu_mcr); /* Clear deltas in modem status register */ writel(0, &port->ip_serial_regs->shadow); /* Only do this once per port pair */ if (port->ip_hooks == &hooks_array[0]) { unsigned long ring_pci_addr; uint32_t __iomem *sbbr_l, *sbbr_h; sbbr_l = &idd->vma->sbbr_l; sbbr_h = &idd->vma->sbbr_h; ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf; DPRINT_CONFIG(("%s: ring_pci_addr 0x%p\n", __func__, (void *)ring_pci_addr)); writel((unsigned int)((uint64_t) ring_pci_addr >> 32), sbbr_h); writel((unsigned int)ring_pci_addr | BUF_SIZE_BIT, sbbr_l); } /* Set the receive timeout value to 10 msec */ writel(SRTR_HZ / 100, &port->ip_serial_regs->srtr); /* Set rx threshold, enable DMA */ /* Set high water mark at 3/4 of full ring */ port->ip_sscr = (ENTRIES_PER_RING * 3 / 4); /* uart experiences pauses at high baud rate reducing actual * throughput by 10% or so unless we enable high speed polling * XXX when this hardware bug is resolved we should revert to * normal polling speed */ port->ip_sscr |= SSCR_HIGH_SPD; writel(port->ip_sscr, &port->ip_serial_regs->sscr); /* Disable and clear all serial related interrupt bits */ port->ip_card->ic_enable &= ~hooks->intr_clear; ioc3_disable(port->ip_is, idd, hooks->intr_clear); ioc3_ack(port->ip_is, idd, hooks->intr_clear); return 0; } /** * enable_intrs - enable interrupts * @port: port to enable * @mask: mask to use */ static void enable_intrs(struct ioc3_port *port, uint32_t mask) { if ((port->ip_card->ic_enable & mask) != mask) { port->ip_card->ic_enable |= mask; ioc3_enable(port->ip_is, port->ip_idd, mask); } } /** * local_open - local open a port * @port: port to open */ static inline int local_open(struct ioc3_port *port) { int spiniter = 0; port->ip_flags = INPUT_ENABLE; /* Pause the DMA interface if necessary */ if (port->ip_sscr & SSCR_DMA_EN) { writel(port->ip_sscr | SSCR_DMA_PAUSE, &port->ip_serial_regs->sscr); while ((readl(&port->ip_serial_regs->sscr) & SSCR_PAUSE_STATE) == 0) { spiniter++; if (spiniter > MAXITER) { NOT_PROGRESS(); return -1; } } } /* Reset the input fifo. If the uart received chars while the port * was closed and DMA is not enabled, the uart may have a bunch of * chars hanging around in its rx fifo which will not be discarded * by rclr in the upper layer. We must get rid of them here. */ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR, &port->ip_uart_regs->iu_fcr); writeb(UART_LCR_WLEN8, &port->ip_uart_regs->iu_lcr); /* UART_LCR_STOP == 1 stop */ /* Re-enable DMA, set default threshold to intr whenever there is * data available. */ port->ip_sscr &= ~SSCR_RX_THRESHOLD; port->ip_sscr |= 1; /* default threshold */ /* Plug in the new sscr. This implicitly clears the DMA_PAUSE * flag if it was set above */ writel(port->ip_sscr, &port->ip_serial_regs->sscr); port->ip_tx_lowat = 1; return 0; } /** * set_rx_timeout - Set rx timeout and threshold values. * @port: port to use * @timeout: timeout value in ticks */ static inline int set_rx_timeout(struct ioc3_port *port, int timeout) { int threshold; port->ip_rx_timeout = timeout; /* Timeout is in ticks. Let's figure out how many chars we * can receive at the current baud rate in that interval * and set the rx threshold to that amount. There are 4 chars * per ring entry, so we'll divide the number of chars that will * arrive in timeout by 4. * So .... timeout * baud / 10 / HZ / 4, with HZ = 100. */ threshold = timeout * port->ip_baud / 4000; if (threshold == 0) threshold = 1; /* otherwise we'll intr all the time! */ if ((unsigned)threshold > (unsigned)SSCR_RX_THRESHOLD) return 1; port->ip_sscr &= ~SSCR_RX_THRESHOLD; port->ip_sscr |= threshold; writel(port->ip_sscr, &port->ip_serial_regs->sscr); /* Now set the rx timeout to the given value * again timeout * SRTR_HZ / HZ */ timeout = timeout * SRTR_HZ / 100; if (timeout > SRTR_CNT) timeout = SRTR_CNT; writel(timeout, &port->ip_serial_regs->srtr); return 0; } /** * config_port - config the hardware * @port: port to config * @baud: baud rate for the port * @byte_size: data size * @stop_bits: number of stop bits * @parenb: parity enable ? * @parodd: odd parity ? */ static inline int config_port(struct ioc3_port *port, int baud, int byte_size, int stop_bits, int parenb, int parodd) { char lcr, sizebits; int spiniter = 0; DPRINT_CONFIG(("%s: line %d baud %d byte_size %d stop %d parenb %d " "parodd %d\n", __func__, ((struct uart_port *)port->ip_port)->line, baud, byte_size, stop_bits, parenb, parodd)); if (set_baud(port, baud)) return 1; switch (byte_size) { case 5: sizebits = UART_LCR_WLEN5; break; case 6: sizebits = UART_LCR_WLEN6; break; case 7: sizebits = UART_LCR_WLEN7; break; case 8: sizebits = UART_LCR_WLEN8; break; default: return 1; } /* Pause the DMA interface if necessary */ if (port->ip_sscr & SSCR_DMA_EN) { writel(port->ip_sscr | SSCR_DMA_PAUSE, &port->ip_serial_regs->sscr); while ((readl(&port->ip_serial_regs->sscr) & SSCR_PAUSE_STATE) == 0) { spiniter++; if (spiniter > MAXITER) return -1; } } /* Clear relevant fields in lcr */ lcr = readb(&port->ip_uart_regs->iu_lcr); lcr &= ~(LCR_MASK_BITS_CHAR | UART_LCR_EPAR | UART_LCR_PARITY | LCR_MASK_STOP_BITS); /* Set byte size in lcr */ lcr |= sizebits; /* Set parity */ if (parenb) { lcr |= UART_LCR_PARITY; if (!parodd) lcr |= UART_LCR_EPAR; } /* Set stop bits */ if (stop_bits) lcr |= UART_LCR_STOP /* 2 stop bits */ ; writeb(lcr, &port->ip_uart_regs->iu_lcr); /* Re-enable the DMA interface if necessary */ if (port->ip_sscr & SSCR_DMA_EN) { writel(port->ip_sscr, &port->ip_serial_regs->sscr); } port->ip_baud = baud; /* When we get within this number of ring entries of filling the * entire ring on tx, place an EXPLICIT intr to generate a lowat * notification when output has drained. */ port->ip_tx_lowat = (TX_LOWAT_CHARS(baud) + 3) / 4; if (port->ip_tx_lowat == 0) port->ip_tx_lowat = 1; set_rx_timeout(port, 2); return 0; } /** * do_write - Write bytes to the port. Returns the number of bytes * actually written. Called from transmit_chars * @port: port to use * @buf: the stuff to write * @len: how many bytes in 'buf' */ static inline int do_write(struct ioc3_port *port, char *buf, int len) { int prod_ptr, cons_ptr, total = 0; struct ring *outring; struct ring_entry *entry; struct port_hooks *hooks = port->ip_hooks; BUG_ON(!(len >= 0)); prod_ptr = port->ip_tx_prod; cons_ptr = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK; outring = port->ip_outring; /* Maintain a 1-entry red-zone. The ring buffer is full when * (cons - prod) % ring_size is 1. Rather than do this subtraction * in the body of the loop, I'll do it now. */ cons_ptr = (cons_ptr - (int)sizeof(struct ring_entry)) & PROD_CONS_MASK; /* Stuff the bytes into the output */ while ((prod_ptr != cons_ptr) && (len > 0)) { int xx; /* Get 4 bytes (one ring entry) at a time */ entry = (struct ring_entry *)((caddr_t) outring + prod_ptr); /* Invalidate all entries */ entry->ring_allsc = 0; /* Copy in some bytes */ for (xx = 0; (xx < 4) && (len > 0); xx++) { entry->ring_data[xx] = *buf++; entry->ring_sc[xx] = TXCB_VALID; len--; total++; } /* If we are within some small threshold of filling up the * entire ring buffer, we must place an EXPLICIT intr here * to generate a lowat interrupt in case we subsequently * really do fill up the ring and the caller goes to sleep. * No need to place more than one though. */ if (!(port->ip_flags & LOWAT_WRITTEN) && ((cons_ptr - prod_ptr) & PROD_CONS_MASK) <= port->ip_tx_lowat * (int)sizeof(struct ring_entry)) { port->ip_flags |= LOWAT_WRITTEN; entry->ring_sc[0] |= TXCB_INT_WHEN_DONE; } /* Go on to next entry */ prod_ptr += sizeof(struct ring_entry); prod_ptr &= PROD_CONS_MASK; } /* If we sent something, start DMA if necessary */ if (total > 0 && !(port->ip_sscr & SSCR_DMA_EN)) { port->ip_sscr |= SSCR_DMA_EN; writel(port->ip_sscr, &port->ip_serial_regs->sscr); } /* Store the new producer pointer. If tx is disabled, we stuff the * data into the ring buffer, but we don't actually start tx. */ if (!uart_tx_stopped(port->ip_port)) { writel(prod_ptr, &port->ip_serial_regs->stpir); /* If we are now transmitting, enable tx_mt interrupt so we * can disable DMA if necessary when the tx finishes. */ if (total > 0) enable_intrs(port, hooks->intr_tx_mt); } port->ip_tx_prod = prod_ptr; return total; } /** * disable_intrs - disable interrupts * @port: port to enable * @mask: mask to use */ static inline void disable_intrs(struct ioc3_port *port, uint32_t mask) { if (port->ip_card->ic_enable & mask) { ioc3_disable(port->ip_is, port->ip_idd, mask); port->ip_card->ic_enable &= ~mask; } } /** * set_notification - Modify event notification * @port: port to use * @mask: events mask * @set_on: set ? */ static int set_notification(struct ioc3_port *port, int mask, int set_on) { struct port_hooks *hooks = port->ip_hooks; uint32_t intrbits, sscrbits; BUG_ON(!mask); intrbits = sscrbits = 0; if (mask & N_DATA_READY) intrbits |= (hooks->intr_rx_timer | hooks->intr_rx_high); if (mask & N_OUTPUT_LOWAT) intrbits |= hooks->intr_tx_explicit; if (mask & N_DDCD) { intrbits |= hooks->intr_delta_dcd; sscrbits |= SSCR_RX_RING_DCD; } if (mask & N_DCTS) intrbits |= hooks->intr_delta_cts; if (set_on) { enable_intrs(port, intrbits); port->ip_notify |= mask; port->ip_sscr |= sscrbits; } else { disable_intrs(port, intrbits); port->ip_notify &= ~mask; port->ip_sscr &= ~sscrbits; } /* We require DMA if either DATA_READY or DDCD notification is * currently requested. If neither of these is requested and * there is currently no tx in progress, DMA may be disabled. */ if (port->ip_notify & (N_DATA_READY | N_DDCD)) port->ip_sscr |= SSCR_DMA_EN; else if (!(port->ip_card->ic_enable & hooks->intr_tx_mt)) port->ip_sscr &= ~SSCR_DMA_EN; writel(port->ip_sscr, &port->ip_serial_regs->sscr); return 0; } /** * set_mcr - set the master control reg * @the_port: port to use * @mask1: mcr mask * @mask2: shadow mask */ static inline int set_mcr(struct uart_port *the_port, int mask1, int mask2) { struct ioc3_port *port = get_ioc3_port(the_port); uint32_t shadow; int spiniter = 0; char mcr; if (!port) return -1; /* Pause the DMA interface if necessary */ if (port->ip_sscr & SSCR_DMA_EN) { writel(port->ip_sscr | SSCR_DMA_PAUSE, &port->ip_serial_regs->sscr); while ((readl(&port->ip_serial_regs->sscr) & SSCR_PAUSE_STATE) == 0) { spiniter++; if (spiniter > MAXITER) return -1; } } shadow = readl(&port->ip_serial_regs->shadow); mcr = (shadow & 0xff000000) >> 24; /* Set new value */ mcr |= mask1; shadow |= mask2; writeb(mcr, &port->ip_uart_regs->iu_mcr); writel(shadow, &port->ip_serial_regs->shadow); /* Re-enable the DMA interface if necessary */ if (port->ip_sscr & SSCR_DMA_EN) { writel(port->ip_sscr, &port->ip_serial_regs->sscr); } return 0; } /** * ioc3_set_proto - set the protocol for the port * @port: port to use * @proto: protocol to use */ static int ioc3_set_proto(struct ioc3_port *port, int proto) { struct port_hooks *hooks = port->ip_hooks; switch (proto) { default: case PROTO_RS232: /* Clear the appropriate GIO pin */ DPRINT_CONFIG(("%s: rs232\n", __func__)); writel(0, (&port->ip_idd->vma->gppr[0] + hooks->rs422_select_pin)); break; case PROTO_RS422: /* Set the appropriate GIO pin */ DPRINT_CONFIG(("%s: rs422\n", __func__)); writel(1, (&port->ip_idd->vma->gppr[0] + hooks->rs422_select_pin)); break; } return 0; } /** * transmit_chars - upper level write, called with the_port->lock * @the_port: port to write */ static void transmit_chars(struct uart_port *the_port) { int xmit_count, tail, head; int result; char *start; struct tty_struct *tty; struct ioc3_port *port = get_ioc3_port(the_port); struct uart_state *state; if (!the_port) return; if (!port) return; state = the_port->state; tty = state->port.tty; if (uart_circ_empty(&state->xmit) || uart_tx_stopped(the_port)) { /* Nothing to do or hw stopped */ set_notification(port, N_ALL_OUTPUT, 0); return; } head = state->xmit.head; tail = state->xmit.tail; start = (char *)&state->xmit.buf[tail]; /* write out all the data or until the end of the buffer */ xmit_count = (head < tail) ? (UART_XMIT_SIZE - tail) : (head - tail); if (xmit_count > 0) { result = do_write(port, start, xmit_count); if (result > 0) { /* booking */ xmit_count -= result; the_port->icount.tx += result; /* advance the pointers */ tail += result; tail &= UART_XMIT_SIZE - 1; state->xmit.tail = tail; start = (char *)&state->xmit.buf[tail]; } } if (uart_circ_chars_pending(&state->xmit) < WAKEUP_CHARS) uart_write_wakeup(the_port); if (uart_circ_empty(&state->xmit)) { set_notification(port, N_OUTPUT_LOWAT, 0); } else { set_notification(port, N_OUTPUT_LOWAT, 1); } } /** * ioc3_change_speed - change the speed of the port * @the_port: port to change * @new_termios: new termios settings * @old_termios: old termios settings */ static void ioc3_change_speed(struct uart_port *the_port, struct ktermios *new_termios, struct ktermios *old_termios) { struct ioc3_port *port = get_ioc3_port(the_port); unsigned int cflag, iflag; int baud; int new_parity = 0, new_parity_enable = 0, new_stop = 0, new_data = 8; struct uart_state *state = the_port->state; cflag = new_termios->c_cflag; iflag = new_termios->c_iflag; switch (cflag & CSIZE) { case CS5: new_data = 5; break; case CS6: new_data = 6; break; case CS7: new_data = 7; break; case CS8: new_data = 8; break; default: /* cuz we always need a default ... */ new_data = 5; break; } if (cflag & CSTOPB) { new_stop = 1; } if (cflag & PARENB) { new_parity_enable = 1; if (cflag & PARODD) new_parity = 1; } baud = uart_get_baud_rate(the_port, new_termios, old_termios, MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED); DPRINT_CONFIG(("%s: returned baud %d for line %d\n", __func__, baud, the_port->line)); if (!the_port->fifosize) the_port->fifosize = FIFO_SIZE; uart_update_timeout(the_port, cflag, baud); the_port->ignore_status_mask = N_ALL_INPUT; state->port.low_latency = 1; if (iflag & IGNPAR) the_port->ignore_status_mask &= ~(N_PARITY_ERROR | N_FRAMING_ERROR); if (iflag & IGNBRK) { the_port->ignore_status_mask &= ~N_BREAK; if (iflag & IGNPAR) the_port->ignore_status_mask &= ~N_OVERRUN_ERROR; } if (!(cflag & CREAD)) { /* ignore everything */ the_port->ignore_status_mask &= ~N_DATA_READY; } if (cflag & CRTSCTS) { /* enable hardware flow control */ port->ip_sscr |= SSCR_HFC_EN; } else { /* disable hardware flow control */ port->ip_sscr &= ~SSCR_HFC_EN; } writel(port->ip_sscr, &port->ip_serial_regs->sscr); /* Set the configuration and proper notification call */ DPRINT_CONFIG(("%s : port 0x%p line %d cflag 0%o " "config_port(baud %d data %d stop %d penable %d " " parity %d), notification 0x%x\n", __func__, (void *)port, the_port->line, cflag, baud, new_data, new_stop, new_parity_enable, new_parity, the_port->ignore_status_mask)); if ((config_port(port, baud, /* baud */ new_data, /* byte size */ new_stop, /* stop bits */ new_parity_enable, /* set parity */ new_parity)) >= 0) { /* parity 1==odd */ set_notification(port, the_port->ignore_status_mask, 1); } } /** * ic3_startup_local - Start up the serial port - returns >= 0 if no errors * @the_port: Port to operate on */ static inline int ic3_startup_local(struct uart_port *the_port) { struct ioc3_port *port; if (!the_port) { NOT_PROGRESS(); return -1; } port = get_ioc3_port(the_port); if (!port) { NOT_PROGRESS(); return -1; } local_open(port); /* set the protocol */ ioc3_set_proto(port, IS_RS232(the_port->line) ? PROTO_RS232 : PROTO_RS422); return 0; } /* * ioc3_cb_output_lowat - called when the output low water mark is hit * @port: port to output */ static void ioc3_cb_output_lowat(struct ioc3_port *port) { unsigned long pflags; /* the_port->lock is set on the call here */ if (port->ip_port) { spin_lock_irqsave(&port->ip_port->lock, pflags); transmit_chars(port->ip_port); spin_unlock_irqrestore(&port->ip_port->lock, pflags); } } /* * ioc3_cb_post_ncs - called for some basic errors * @port: port to use * @ncs: event */ static void ioc3_cb_post_ncs(struct uart_port *the_port, int ncs) { struct uart_icount *icount; icount = &the_port->icount; if (ncs & NCS_BREAK) icount->brk++; if (ncs & NCS_FRAMING) icount->frame++; if (ncs & NCS_OVERRUN) icount->overrun++; if (ncs & NCS_PARITY) icount->parity++; } /** * do_read - Read in bytes from the port. Return the number of bytes * actually read. * @the_port: port to use * @buf: place to put the stuff we read * @len: how big 'buf' is */ static inline int do_read(struct uart_port *the_port, char *buf, int len) { int prod_ptr, cons_ptr, total; struct ioc3_port *port = get_ioc3_port(the_port); struct ring *inring; struct ring_entry *entry; struct port_hooks *hooks; int byte_num; char *sc; int loop_counter; BUG_ON(!(len >= 0)); BUG_ON(!port); hooks = port->ip_hooks; /* There is a nasty timing issue in the IOC3. When the rx_timer * expires or the rx_high condition arises, we take an interrupt. * At some point while servicing the interrupt, we read bytes from * the ring buffer and re-arm the rx_timer. However the rx_timer is * not started until the first byte is received *after* it is armed, * and any bytes pending in the rx construction buffers are not drained * to memory until either there are 4 bytes available or the rx_timer * expires. This leads to a potential situation where data is left * in the construction buffers forever - 1 to 3 bytes were received * after the interrupt was generated but before the rx_timer was * re-armed. At that point as long as no subsequent bytes are received * the timer will never be started and the bytes will remain in the * construction buffer forever. The solution is to execute a DRAIN * command after rearming the timer. This way any bytes received before * the DRAIN will be drained to memory, and any bytes received after * the DRAIN will start the TIMER and be drained when it expires. * Luckily, this only needs to be done when the DMA buffer is empty * since there is no requirement that this function return all * available data as long as it returns some. */ /* Re-arm the timer */ writel(port->ip_rx_cons | SRCIR_ARM, &port->ip_serial_regs->srcir); prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK; cons_ptr = port->ip_rx_cons; if (prod_ptr == cons_ptr) { int reset_dma = 0; /* Input buffer appears empty, do a flush. */ /* DMA must be enabled for this to work. */ if (!(port->ip_sscr & SSCR_DMA_EN)) { port->ip_sscr |= SSCR_DMA_EN; reset_dma = 1; } /* Potential race condition: we must reload the srpir after * issuing the drain command, otherwise we could think the rx * buffer is empty, then take a very long interrupt, and when * we come back it's full and we wait forever for the drain to * complete. */ writel(port->ip_sscr | SSCR_RX_DRAIN, &port->ip_serial_regs->sscr); prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK; /* We must not wait for the DRAIN to complete unless there are * at least 8 bytes (2 ring entries) available to receive the * data otherwise the DRAIN will never complete and we'll * deadlock here. * In fact, to make things easier, I'll just ignore the flush if * there is any data at all now available. */ if (prod_ptr == cons_ptr) { loop_counter = 0; while (readl(&port->ip_serial_regs->sscr) & SSCR_RX_DRAIN) { loop_counter++; if (loop_counter > MAXITER) return -1; } /* SIGH. We have to reload the prod_ptr *again* since * the drain may have caused it to change */ prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK; } if (reset_dma) { port->ip_sscr &= ~SSCR_DMA_EN; writel(port->ip_sscr, &port->ip_serial_regs->sscr); } } inring = port->ip_inring; port->ip_flags &= ~READ_ABORTED; total = 0; loop_counter = 0xfffff; /* to avoid hangs */ /* Grab bytes from the hardware */ while ((prod_ptr != cons_ptr) && (len > 0)) { entry = (struct ring_entry *)((caddr_t) inring + cons_ptr); if (loop_counter-- <= 0) { printk(KERN_WARNING "IOC3 serial: " "possible hang condition/" "port stuck on read (line %d).\n", the_port->line); break; } /* According to the producer pointer, this ring entry * must contain some data. But if the PIO happened faster * than the DMA, the data may not be available yet, so let's * wait until it arrives. */ if ((entry->ring_allsc & RING_ANY_VALID) == 0) { /* Indicate the read is aborted so we don't disable * the interrupt thinking that the consumer is * congested. */ port->ip_flags |= READ_ABORTED; len = 0; break; } /* Load the bytes/status out of the ring entry */ for (byte_num = 0; byte_num < 4 && len > 0; byte_num++) { sc = &(entry->ring_sc[byte_num]); /* Check for change in modem state or overrun */ if ((*sc & RXSB_MODEM_VALID) && (port->ip_notify & N_DDCD)) { /* Notify upper layer if DCD dropped */ if ((port->ip_flags & DCD_ON) && !(*sc & RXSB_DCD)) { /* If we have already copied some data, * return it. We'll pick up the carrier * drop on the next pass. That way we * don't throw away the data that has * already been copied back to * the caller's buffer. */ if (total > 0) { len = 0; break; } port->ip_flags &= ~DCD_ON; /* Turn off this notification so the * carrier drop protocol won't see it * again when it does a read. */ *sc &= ~RXSB_MODEM_VALID; /* To keep things consistent, we need * to update the consumer pointer so * the next reader won't come in and * try to read the same ring entries * again. This must be done here before * the dcd change. */ if ((entry->ring_allsc & RING_ANY_VALID) == 0) { cons_ptr += (int)sizeof (struct ring_entry); cons_ptr &= PROD_CONS_MASK; } writel(cons_ptr, &port->ip_serial_regs->srcir); port->ip_rx_cons = cons_ptr; /* Notify upper layer of carrier drop */ if ((port->ip_notify & N_DDCD) && port->ip_port) { uart_handle_dcd_change (port->ip_port, 0); wake_up_interruptible (&the_port->state-> port.delta_msr_wait); } /* If we had any data to return, we * would have returned it above. */ return 0; } } if (*sc & RXSB_MODEM_VALID) { /* Notify that an input overrun occurred */ if ((*sc & RXSB_OVERRUN) && (port->ip_notify & N_OVERRUN_ERROR)) { ioc3_cb_post_ncs(the_port, NCS_OVERRUN); } /* Don't look at this byte again */ *sc &= ~RXSB_MODEM_VALID; } /* Check for valid data or RX errors */ if ((*sc & RXSB_DATA_VALID) && ((*sc & (RXSB_PAR_ERR | RXSB_FRAME_ERR | RXSB_BREAK)) && (port->ip_notify & (N_PARITY_ERROR | N_FRAMING_ERROR | N_BREAK)))) { /* There is an error condition on the next byte. * If we have already transferred some bytes, * we'll stop here. Otherwise if this is the * first byte to be read, we'll just transfer * it alone after notifying the * upper layer of its status. */ if (total > 0) { len = 0; break; } else { if ((*sc & RXSB_PAR_ERR) && (port-> ip_notify & N_PARITY_ERROR)) { ioc3_cb_post_ncs(the_port, NCS_PARITY); } if ((*sc & RXSB_FRAME_ERR) && (port-> ip_notify & N_FRAMING_ERROR)) { ioc3_cb_post_ncs(the_port, NCS_FRAMING); } if ((*sc & RXSB_BREAK) && (port->ip_notify & N_BREAK)) { ioc3_cb_post_ncs (the_port, NCS_BREAK); } len = 1; } } if (*sc & RXSB_DATA_VALID) { *sc &= ~RXSB_DATA_VALID; *buf = entry->ring_data[byte_num]; buf++; len--; total++; } } /* If we used up this entry entirely, go on to the next one, * otherwise we must have run out of buffer space, so * leave the consumer pointer here for the next read in case * there are still unread bytes in this entry. */ if ((entry->ring_allsc & RING_ANY_VALID) == 0) { cons_ptr += (int)sizeof(struct ring_entry); cons_ptr &= PROD_CONS_MASK; } } /* Update consumer pointer and re-arm rx timer interrupt */ writel(cons_ptr, &port->ip_serial_regs->srcir); port->ip_rx_cons = cons_ptr; /* If we have now dipped below the rx high water mark and we have * rx_high interrupt turned off, we can now turn it back on again. */ if ((port->ip_flags & INPUT_HIGH) && (((prod_ptr - cons_ptr) & PROD_CONS_MASK) < ((port-> ip_sscr & SSCR_RX_THRESHOLD) << PROD_CONS_PTR_OFF))) { port->ip_flags &= ~INPUT_HIGH; enable_intrs(port, hooks->intr_rx_high); } return total; } /** * receive_chars - upper level read. * @the_port: port to read from */ static int receive_chars(struct uart_port *the_port) { unsigned char ch[MAX_CHARS]; int read_count = 0, read_room, flip = 0; struct uart_state *state = the_port->state; struct ioc3_port *port = get_ioc3_port(the_port); unsigned long pflags; /* Make sure all the pointers are "good" ones */ if (!state) return 0; if (!(port->ip_flags & INPUT_ENABLE)) return 0; spin_lock_irqsave(&the_port->lock, pflags); read_count = do_read(the_port, ch, MAX_CHARS); if (read_count > 0) { flip = 1; read_room = tty_insert_flip_string(&state->port, ch, read_count); the_port->icount.rx += read_count; } spin_unlock_irqrestore(&the_port->lock, pflags); if (flip) tty_flip_buffer_push(&state->port); return read_count; } /** * ioc3uart_intr_one - lowest level (per port) interrupt handler. * @is : submodule * @idd: driver data * @pending: interrupts to handle */ static int inline ioc3uart_intr_one(struct ioc3_submodule *is, struct ioc3_driver_data *idd, unsigned int pending) { int port_num = GET_PORT_FROM_SIO_IR(pending); struct port_hooks *hooks; unsigned int rx_high_rd_aborted = 0; unsigned long flags; struct uart_port *the_port; struct ioc3_port *port; int loop_counter; struct ioc3_card *card_ptr; unsigned int sio_ir; card_ptr = idd->data[is->id]; port = card_ptr->ic_port[port_num].icp_port; hooks = port->ip_hooks; /* Possible race condition here: The tx_mt interrupt bit may be * cleared without the intervention of the interrupt handler, * e.g. by a write. If the top level interrupt handler reads a * tx_mt, then some other processor does a write, starting up * output, then we come in here, see the tx_mt and stop DMA, the * output started by the other processor will hang. Thus we can * only rely on tx_mt being legitimate if it is read while the * port lock is held. Therefore this bit must be ignored in the * passed in interrupt mask which was read by the top level * interrupt handler since the port lock was not held at the time * it was read. We can only rely on this bit being accurate if it * is read while the port lock is held. So we'll clear it for now, * and reload it later once we have the port lock. */ sio_ir = pending & ~(hooks->intr_tx_mt); spin_lock_irqsave(&port->ip_lock, flags); loop_counter = MAXITER; /* to avoid hangs */ do { uint32_t shadow; if (loop_counter-- <= 0) { printk(KERN_WARNING "IOC3 serial: " "possible hang condition/" "port stuck on interrupt (line %d).\n", ((struct uart_port *)port->ip_port)->line); break; } /* Handle a DCD change */ if (sio_ir & hooks->intr_delta_dcd) { ioc3_ack(is, idd, hooks->intr_delta_dcd); shadow = readl(&port->ip_serial_regs->shadow); if ((port->ip_notify & N_DDCD) && (shadow & SHADOW_DCD) && (port->ip_port)) { the_port = port->ip_port; uart_handle_dcd_change(the_port, shadow & SHADOW_DCD); wake_up_interruptible (&the_port->state->port.delta_msr_wait); } else if ((port->ip_notify & N_DDCD) && !(shadow & SHADOW_DCD)) { /* Flag delta DCD/no DCD */ uart_handle_dcd_change(port->ip_port, shadow & SHADOW_DCD); port->ip_flags |= DCD_ON; } } /* Handle a CTS change */ if (sio_ir & hooks->intr_delta_cts) { ioc3_ack(is, idd, hooks->intr_delta_cts); shadow = readl(&port->ip_serial_regs->shadow); if ((port->ip_notify & N_DCTS) && (port->ip_port)) { the_port = port->ip_port; uart_handle_cts_change(the_port, shadow & SHADOW_CTS); wake_up_interruptible (&the_port->state->port.delta_msr_wait); } } /* rx timeout interrupt. Must be some data available. Put this * before the check for rx_high since servicing this condition * may cause that condition to clear. */ if (sio_ir & hooks->intr_rx_timer) { ioc3_ack(is, idd, hooks->intr_rx_timer); if ((port->ip_notify & N_DATA_READY) && (port->ip_port)) { receive_chars(port->ip_port); } } /* rx high interrupt. Must be after rx_timer. */ else if (sio_ir & hooks->intr_rx_high) { /* Data available, notify upper layer */ if ((port->ip_notify & N_DATA_READY) && port->ip_port) { receive_chars(port->ip_port); } /* We can't ACK this interrupt. If receive_chars didn't * cause the condition to clear, we'll have to disable * the interrupt until the data is drained. * If the read was aborted, don't disable the interrupt * as this may cause us to hang indefinitely. An * aborted read generally means that this interrupt * hasn't been delivered to the cpu yet anyway, even * though we see it as asserted when we read the sio_ir. */ if ((sio_ir = PENDING(card_ptr, idd)) & hooks->intr_rx_high) { if (port->ip_flags & READ_ABORTED) { rx_high_rd_aborted++; } else { card_ptr->ic_enable &= ~hooks->intr_rx_high; port->ip_flags |= INPUT_HIGH; } } } /* We got a low water interrupt: notify upper layer to * send more data. Must come before tx_mt since servicing * this condition may cause that condition to clear. */ if (sio_ir & hooks->intr_tx_explicit) { port->ip_flags &= ~LOWAT_WRITTEN; ioc3_ack(is, idd, hooks->intr_tx_explicit); if (port->ip_notify & N_OUTPUT_LOWAT) ioc3_cb_output_lowat(port); } /* Handle tx_mt. Must come after tx_explicit. */ else if (sio_ir & hooks->intr_tx_mt) { /* If we are expecting a lowat notification * and we get to this point it probably means that for * some reason the tx_explicit didn't work as expected * (that can legitimately happen if the output buffer is * filled up in just the right way). * So send the notification now. */ if (port->ip_notify & N_OUTPUT_LOWAT) { ioc3_cb_output_lowat(port); /* We need to reload the sio_ir since the lowat * call may have caused another write to occur, * clearing the tx_mt condition. */ sio_ir = PENDING(card_ptr, idd); } /* If the tx_mt condition still persists even after the * lowat call, we've got some work to do. */ if (sio_ir & hooks->intr_tx_mt) { /* If we are not currently expecting DMA input, * and the transmitter has just gone idle, * there is no longer any reason for DMA, so * disable it. */ if (!(port->ip_notify & (N_DATA_READY | N_DDCD))) { BUG_ON(!(port->ip_sscr & SSCR_DMA_EN)); port->ip_sscr &= ~SSCR_DMA_EN; writel(port->ip_sscr, &port->ip_serial_regs->sscr); } /* Prevent infinite tx_mt interrupt */ card_ptr->ic_enable &= ~hooks->intr_tx_mt; } } sio_ir = PENDING(card_ptr, idd); /* if the read was aborted and only hooks->intr_rx_high, * clear hooks->intr_rx_high, so we do not loop forever. */ if (rx_high_rd_aborted && (sio_ir == hooks->intr_rx_high)) { sio_ir &= ~hooks->intr_rx_high; } } while (sio_ir & hooks->intr_all); spin_unlock_irqrestore(&port->ip_lock, flags); ioc3_enable(is, idd, card_ptr->ic_enable); return 0; } /** * ioc3uart_intr - field all serial interrupts * @is : submodule * @idd: driver data * @pending: interrupts to handle * */ static int ioc3uart_intr(struct ioc3_submodule *is, struct ioc3_driver_data *idd, unsigned int pending) { int ret = 0; /* * The upper level interrupt handler sends interrupts for both ports * here. So we need to call for each port with its interrupts. */ if (pending & SIO_IR_SA) ret |= ioc3uart_intr_one(is, idd, pending & SIO_IR_SA); if (pending & SIO_IR_SB) ret |= ioc3uart_intr_one(is, idd, pending & SIO_IR_SB); return ret; } /** * ic3_type * @port: Port to operate with (we ignore since we only have one port) * */ static const char *ic3_type(struct uart_port *the_port) { if (IS_RS232(the_port->line)) return "SGI IOC3 Serial [rs232]"; else return "SGI IOC3 Serial [rs422]"; } /** * ic3_tx_empty - Is the transmitter empty? * @port: Port to operate on * */ static unsigned int ic3_tx_empty(struct uart_port *the_port) { unsigned int ret = 0; struct ioc3_port *port = get_ioc3_port(the_port); if (readl(&port->ip_serial_regs->shadow) & SHADOW_TEMT) ret = TIOCSER_TEMT; return ret; } /** * ic3_stop_tx - stop the transmitter * @port: Port to operate on * */ static void ic3_stop_tx(struct uart_port *the_port) { struct ioc3_port *port = get_ioc3_port(the_port); if (port) set_notification(port, N_OUTPUT_LOWAT, 0); } /** * ic3_stop_rx - stop the receiver * @port: Port to operate on * */ static void ic3_stop_rx(struct uart_port *the_port) { struct ioc3_port *port = get_ioc3_port(the_port); if (port) port->ip_flags &= ~INPUT_ENABLE; } /** * null_void_function * @port: Port to operate on * */ static void null_void_function(struct uart_port *the_port) { } /** * ic3_shutdown - shut down the port - free irq and disable * @port: port to shut down * */ static void ic3_shutdown(struct uart_port *the_port) { unsigned long port_flags; struct ioc3_port *port; struct uart_state *state; port = get_ioc3_port(the_port); if (!port) return; state = the_port->state; wake_up_interruptible(&state->port.delta_msr_wait); spin_lock_irqsave(&the_port->lock, port_flags); set_notification(port, N_ALL, 0); spin_unlock_irqrestore(&the_port->lock, port_flags); } /** * ic3_set_mctrl - set control lines (dtr, rts, etc) * @port: Port to operate on * @mctrl: Lines to set/unset * */ static void ic3_set_mctrl(struct uart_port *the_port, unsigned int mctrl) { unsigned char mcr = 0; if (mctrl & TIOCM_RTS) mcr |= UART_MCR_RTS; if (mctrl & TIOCM_DTR) mcr |= UART_MCR_DTR; if (mctrl & TIOCM_OUT1) mcr |= UART_MCR_OUT1; if (mctrl & TIOCM_OUT2) mcr |= UART_MCR_OUT2; if (mctrl & TIOCM_LOOP) mcr |= UART_MCR_LOOP; set_mcr(the_port, mcr, SHADOW_DTR); } /** * ic3_get_mctrl - get control line info * @port: port to operate on * */ static unsigned int ic3_get_mctrl(struct uart_port *the_port) { struct ioc3_port *port = get_ioc3_port(the_port); uint32_t shadow; unsigned int ret = 0; if (!port) return 0; shadow = readl(&port->ip_serial_regs->shadow); if (shadow & SHADOW_DCD) ret |= TIOCM_CD; if (shadow & SHADOW_DR) ret |= TIOCM_DSR; if (shadow & SHADOW_CTS) ret |= TIOCM_CTS; return ret; } /** * ic3_start_tx - Start transmitter. Called with the_port->lock * @port: Port to operate on * */ static void ic3_start_tx(struct uart_port *the_port) { struct ioc3_port *port = get_ioc3_port(the_port); if (port) { set_notification(port, N_OUTPUT_LOWAT, 1); enable_intrs(port, port->ip_hooks->intr_tx_mt); } } /** * ic3_break_ctl - handle breaks * @port: Port to operate on * @break_state: Break state * */ static void ic3_break_ctl(struct uart_port *the_port, int break_state) { } /** * ic3_startup - Start up the serial port - always return 0 (We're always on) * @port: Port to operate on * */ static int ic3_startup(struct uart_port *the_port) { int retval; struct ioc3_port *port; struct ioc3_card *card_ptr; unsigned long port_flags; if (!the_port) { NOT_PROGRESS(); return -ENODEV; } port = get_ioc3_port(the_port); if (!port) { NOT_PROGRESS(); return -ENODEV; } card_ptr = port->ip_card; port->ip_port = the_port; if (!card_ptr) { NOT_PROGRESS(); return -ENODEV; } /* Start up the serial port */ spin_lock_irqsave(&the_port->lock, port_flags); retval = ic3_startup_local(the_port); spin_unlock_irqrestore(&the_port->lock, port_flags); return retval; } /** * ic3_set_termios - set termios stuff * @port: port to operate on * @termios: New settings * @termios: Old * */ static void ic3_set_termios(struct uart_port *the_port, struct ktermios *termios, struct ktermios *old_termios) { unsigned long port_flags; spin_lock_irqsave(&the_port->lock, port_flags); ioc3_change_speed(the_port, termios, old_termios); spin_unlock_irqrestore(&the_port->lock, port_flags); } /** * ic3_request_port - allocate resources for port - no op.... * @port: port to operate on * */ static int ic3_request_port(struct uart_port *port) { return 0; } /* Associate the uart functions above - given to serial core */ static struct uart_ops ioc3_ops = { .tx_empty = ic3_tx_empty, .set_mctrl = ic3_set_mctrl, .get_mctrl = ic3_get_mctrl, .stop_tx = ic3_stop_tx, .start_tx = ic3_start_tx, .stop_rx = ic3_stop_rx, .break_ctl = ic3_break_ctl, .startup = ic3_startup, .shutdown = ic3_shutdown, .set_termios = ic3_set_termios, .type = ic3_type, .release_port = null_void_function, .request_port = ic3_request_port, }; /* * Boot-time initialization code */ static struct uart_driver ioc3_uart = { .owner = THIS_MODULE, .driver_name = "ioc3_serial", .dev_name = DEVICE_NAME, .major = DEVICE_MAJOR, .minor = DEVICE_MINOR, .nr = MAX_LOGICAL_PORTS }; /** * ioc3_serial_core_attach - register with serial core * This is done during pci probing * @is: submodule struct for this * @idd: handle for this card */ static inline int ioc3_serial_core_attach( struct ioc3_submodule *is, struct ioc3_driver_data *idd) { struct ioc3_port *port; struct uart_port *the_port; struct ioc3_card *card_ptr = idd->data[is->id]; int ii, phys_port; struct pci_dev *pdev = idd->pdev; DPRINT_CONFIG(("%s: attach pdev 0x%p - card_ptr 0x%p\n", __func__, pdev, (void *)card_ptr)); if (!card_ptr) return -ENODEV; /* once around for each logical port on this card */ for (ii = 0; ii < LOGICAL_PORTS_PER_CARD; ii++) { phys_port = GET_PHYSICAL_PORT(ii); the_port = &card_ptr->ic_port[phys_port]. icp_uart_port[GET_LOGICAL_PORT(ii)]; port = card_ptr->ic_port[phys_port].icp_port; port->ip_port = the_port; DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p [%d/%d]\n", __func__, (void *)the_port, (void *)port, phys_port, ii)); /* membase, iobase and mapbase just need to be non-0 */ the_port->membase = (unsigned char __iomem *)1; the_port->iobase = (pdev->bus->number << 16) | ii; the_port->line = (Num_of_ioc3_cards << 2) | ii; the_port->mapbase = 1; the_port->type = PORT_16550A; the_port->fifosize = FIFO_SIZE; the_port->ops = &ioc3_ops; the_port->irq = idd->irq_io; the_port->dev = &pdev->dev; if (uart_add_one_port(&ioc3_uart, the_port) < 0) { printk(KERN_WARNING "%s: unable to add port %d bus %d\n", __func__, the_port->line, pdev->bus->number); } else { DPRINT_CONFIG(("IOC3 serial port %d irq %d bus %d\n", the_port->line, the_port->irq, pdev->bus->number)); } /* all ports are rs232 for now */ if (IS_PHYSICAL_PORT(ii)) ioc3_set_proto(port, PROTO_RS232); } return 0; } /** * ioc3uart_remove - register detach function * @is: submodule struct for this submodule * @idd: ioc3 driver data for this submodule */ static int ioc3uart_remove(struct ioc3_submodule *is, struct ioc3_driver_data *idd) { struct ioc3_card *card_ptr = idd->data[is->id]; struct uart_port *the_port; struct ioc3_port *port; int ii; if (card_ptr) { for (ii = 0; ii < LOGICAL_PORTS_PER_CARD; ii++) { the_port = &card_ptr->ic_port[GET_PHYSICAL_PORT(ii)]. icp_uart_port[GET_LOGICAL_PORT(ii)]; if (the_port) uart_remove_one_port(&ioc3_uart, the_port); port = card_ptr->ic_port[GET_PHYSICAL_PORT(ii)].icp_port; if (port && IS_PHYSICAL_PORT(ii) && (GET_PHYSICAL_PORT(ii) == 0)) { pci_free_consistent(port->ip_idd->pdev, TOTAL_RING_BUF_SIZE, (void *)port->ip_cpu_ringbuf, port->ip_dma_ringbuf); kfree(port); card_ptr->ic_port[GET_PHYSICAL_PORT(ii)]. icp_port = NULL; } } kfree(card_ptr); idd->data[is->id] = NULL; } return 0; } /** * ioc3uart_probe - card probe function called from shim driver * @is: submodule struct for this submodule * @idd: ioc3 driver data for this card */ static int ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) { struct pci_dev *pdev = idd->pdev; struct ioc3_card *card_ptr; int ret = 0; struct ioc3_port *port; struct ioc3_port *ports[PORTS_PER_CARD]; int phys_port; int cnt; DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd)); card_ptr = kzalloc(sizeof(struct ioc3_card), GFP_KERNEL); if (!card_ptr) { printk(KERN_WARNING "ioc3_attach_one" ": unable to get memory for the IOC3\n"); return -ENOMEM; } idd->data[is->id] = card_ptr; Submodule_slot = is->id; writel(((UARTA_BASE >> 3) << SIO_CR_SER_A_BASE_SHIFT) | ((UARTB_BASE >> 3) << SIO_CR_SER_B_BASE_SHIFT) | (0xf << SIO_CR_CMD_PULSE_SHIFT), &idd->vma->sio_cr); pci_write_config_dword(pdev, PCI_LAT, 0xff00); /* Enable serial port mode select generic PIO pins as outputs */ ioc3_gpcr_set(idd, GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL); /* Create port structures for each port */ for (phys_port = 0; phys_port < PORTS_PER_CARD; phys_port++) { port = kzalloc(sizeof(struct ioc3_port), GFP_KERNEL); if (!port) { printk(KERN_WARNING "IOC3 serial memory not available for port\n"); ret = -ENOMEM; goto out4; } spin_lock_init(&port->ip_lock); /* we need to remember the previous ones, to point back to * them farther down - setting up the ring buffers. */ ports[phys_port] = port; /* init to something useful */ card_ptr->ic_port[phys_port].icp_port = port; port->ip_is = is; port->ip_idd = idd; port->ip_baud = 9600; port->ip_card = card_ptr; port->ip_hooks = &hooks_array[phys_port]; /* Setup each port */ if (phys_port == 0) { port->ip_serial_regs = &idd->vma->port_a; port->ip_uart_regs = &idd->vma->sregs.uarta; DPRINT_CONFIG(("%s : Port A ip_serial_regs 0x%p " "ip_uart_regs 0x%p\n", __func__, (void *)port->ip_serial_regs, (void *)port->ip_uart_regs)); /* setup ring buffers */ port->ip_cpu_ringbuf = pci_alloc_consistent(pdev, TOTAL_RING_BUF_SIZE, &port->ip_dma_ringbuf); BUG_ON(!((((int64_t) port->ip_dma_ringbuf) & (TOTAL_RING_BUF_SIZE - 1)) == 0)); port->ip_inring = RING(port, RX_A); port->ip_outring = RING(port, TX_A); DPRINT_CONFIG(("%s : Port A ip_cpu_ringbuf 0x%p " "ip_dma_ringbuf 0x%p, ip_inring 0x%p " "ip_outring 0x%p\n", __func__, (void *)port->ip_cpu_ringbuf, (void *)port->ip_dma_ringbuf, (void *)port->ip_inring, (void *)port->ip_outring)); } else { port->ip_serial_regs = &idd->vma->port_b; port->ip_uart_regs = &idd->vma->sregs.uartb; DPRINT_CONFIG(("%s : Port B ip_serial_regs 0x%p " "ip_uart_regs 0x%p\n", __func__, (void *)port->ip_serial_regs, (void *)port->ip_uart_regs)); /* share the ring buffers */ port->ip_dma_ringbuf = ports[phys_port - 1]->ip_dma_ringbuf; port->ip_cpu_ringbuf = ports[phys_port - 1]->ip_cpu_ringbuf; port->ip_inring = RING(port, RX_B); port->ip_outring = RING(port, TX_B); DPRINT_CONFIG(("%s : Port B ip_cpu_ringbuf 0x%p " "ip_dma_ringbuf 0x%p, ip_inring 0x%p " "ip_outring 0x%p\n", __func__, (void *)port->ip_cpu_ringbuf, (void *)port->ip_dma_ringbuf, (void *)port->ip_inring, (void *)port->ip_outring)); } DPRINT_CONFIG(("%s : port %d [addr 0x%p] card_ptr 0x%p", __func__, phys_port, (void *)port, (void *)card_ptr)); DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n", (void *)port->ip_serial_regs, (void *)port->ip_uart_regs)); /* Initialize the hardware for IOC3 */ port_init(port); DPRINT_CONFIG(("%s: phys_port %d port 0x%p inring 0x%p " "outring 0x%p\n", __func__, phys_port, (void *)port, (void *)port->ip_inring, (void *)port->ip_outring)); } /* register port with the serial core */ if ((ret = ioc3_serial_core_attach(is, idd))) goto out4; Num_of_ioc3_cards++; return ret; /* error exits that give back resources */ out4: for (cnt = 0; cnt < phys_port; cnt++) kfree(ports[cnt]); kfree(card_ptr); return ret; } static struct ioc3_submodule ioc3uart_ops = { .name = "IOC3uart", .probe = ioc3uart_probe, .remove = ioc3uart_remove, /* call .intr for both ports initially */ .irq_mask = SIO_IR_SA | SIO_IR_SB, .intr = ioc3uart_intr, .owner = THIS_MODULE, }; /** * ioc3_detect - module init called, */ static int __init ioc3uart_init(void) { int ret; /* register with serial core */ if ((ret = uart_register_driver(&ioc3_uart)) < 0) { printk(KERN_WARNING "%s: Couldn't register IOC3 uart serial driver\n", __func__); return ret; } ret = ioc3_register_submodule(&ioc3uart_ops); if (ret) uart_unregister_driver(&ioc3_uart); return ret; } static void __exit ioc3uart_exit(void) { ioc3_unregister_submodule(&ioc3uart_ops); uart_unregister_driver(&ioc3_uart); } module_init(ioc3uart_init); module_exit(ioc3uart_exit); MODULE_AUTHOR("Pat Gefre - Silicon Graphics Inc. (SGI) <pfg@sgi.com>"); MODULE_DESCRIPTION("Serial PCI driver module for SGI IOC3 card"); MODULE_LICENSE("GPL");
gpl-2.0
libos-nuse/net-next-nuse
arch/powerpc/platforms/cell/smp.c
1107
3983
/* * SMP support for BPA machines. * * Dave Engebretsen, Peter Bergner, and * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com * * Plus various changes from other IBM teams... * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/cache.h> #include <linux/err.h> #include <linux/device.h> #include <linux/cpu.h> #include <asm/ptrace.h> #include <linux/atomic.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/smp.h> #include <asm/paca.h> #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/firmware.h> #include <asm/rtas.h> #include <asm/cputhreads.h> #include <asm/code-patching.h> #include "interrupt.h" #include <asm/udbg.h> #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif /* * The Primary thread of each non-boot processor was started from the OF client * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. */ static cpumask_t of_spin_map; /** * smp_startup_cpu() - start the given cpu * * At boot time, there is nothing to do for primary threads which were * started from Open Firmware. For anything else, call RTAS with the * appropriate start location. * * Returns: * 0 - failure * 1 - success */ static inline int smp_startup_cpu(unsigned int lcpu) { int status; unsigned long start_here = __pa(ppc_function_entry(generic_secondary_smp_init)); unsigned int pcpu; int start_cpu; if (cpumask_test_cpu(lcpu, &of_spin_map)) /* Already started by OF and sitting in spin loop */ return 1; pcpu = get_hard_smp_processor_id(lcpu); /* Fixup atomic count: it exited inside IRQ handler. */ task_thread_info(paca[lcpu].__current)->preempt_count = 0; /* * If the RTAS start-cpu token does not exist then presume the * cpu is already spinning. */ start_cpu = rtas_token("start-cpu"); if (start_cpu == RTAS_UNKNOWN_SERVICE) return 1; status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); if (status != 0) { printk(KERN_ERR "start-cpu failed: %i\n", status); return 0; } return 1; } static void smp_cell_setup_cpu(int cpu) { if (cpu != boot_cpuid) iic_setup_cpu(); /* * change default DABRX to allow user watchpoints */ mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); } static int smp_cell_kick_cpu(int nr) { BUG_ON(nr < 0 || nr >= NR_CPUS); if (!smp_startup_cpu(nr)) return -ENOENT; /* * The processor is currently spinning, waiting for the * cpu_start field to become non-zero After we set cpu_start, * the processor will continue on to secondary_start */ paca[nr].cpu_start = 1; return 0; } static struct smp_ops_t bpa_iic_smp_ops = { .message_pass = iic_message_pass, .probe = iic_request_IPIs, .kick_cpu = smp_cell_kick_cpu, .setup_cpu = smp_cell_setup_cpu, .cpu_bootable = smp_generic_cpu_bootable, }; /* This is called very early */ void __init smp_init_cell(void) { int i; DBG(" -> smp_init_cell()\n"); smp_ops = &bpa_iic_smp_ops; /* Mark threads which are still spinning in hold loops. */ if (cpu_has_feature(CPU_FTR_SMT)) { for_each_present_cpu(i) { if (cpu_thread_in_core(i) == 0) cpumask_set_cpu(i, &of_spin_map); } } else cpumask_copy(&of_spin_map, cpu_present_mask); cpumask_clear_cpu(boot_cpuid, &of_spin_map); /* Non-lpar has additional take/give timebase */ if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { smp_ops->give_timebase = rtas_give_timebase; smp_ops->take_timebase = rtas_take_timebase; } DBG(" <- smp_init_cell()\n"); }
gpl-2.0
arjen75/lg_p700_kernel
drivers/net/bna/bna_txrx.c
2387
103208
/* * Linux network driver for Brocade Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com */ #include "bna.h" #include "bfa_sm.h" #include "bfi.h" /** * IB */ #define bna_ib_find_free_ibidx(_mask, _pos)\ do {\ (_pos) = 0;\ while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\ ((1 << (_pos)) & (_mask)))\ (_pos)++;\ } while (0) #define bna_ib_count_ibidx(_mask, _count)\ do {\ int pos = 0;\ (_count) = 0;\ while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\ if ((1 << pos) & (_mask))\ (_count) = pos + 1;\ pos++;\ } \ } while (0) #define bna_ib_select_segpool(_count, _q_idx)\ do {\ int i;\ (_q_idx) = -1;\ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\ if ((_count <= ibidx_pool[i].pool_entry_size)) {\ (_q_idx) = i;\ break;\ } \ } \ } while (0) struct bna_ibidx_pool { int pool_size; int pool_entry_size; }; init_ibidx_pool(ibidx_pool); static struct bna_intr * bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type, int vector) { struct bna_intr *intr; struct list_head *qe; list_for_each(qe, &ib_mod->intr_active_q) { intr = (struct bna_intr *)qe; if ((intr->intr_type == intr_type) && (intr->vector == vector)) { intr->ref_count++; return intr; } } if (list_empty(&ib_mod->intr_free_q)) return NULL; bfa_q_deq(&ib_mod->intr_free_q, &intr); bfa_q_qe_init(&intr->qe); intr->ref_count = 1; intr->intr_type = intr_type; intr->vector = vector; list_add_tail(&intr->qe, &ib_mod->intr_active_q); return intr; } static void bna_intr_put(struct bna_ib_mod *ib_mod, struct bna_intr *intr) { intr->ref_count--; if (intr->ref_count == 0) { intr->ib = NULL; list_del(&intr->qe); bfa_q_qe_init(&intr->qe); list_add_tail(&intr->qe, &ib_mod->intr_free_q); } } void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna, struct bna_res_info *res_info) { int i; int j; int count; u8 offset; struct bna_doorbell_qset *qset; unsigned long off; ib_mod->bna = bna; ib_mod->ib = (struct bna_ib *) res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva; ib_mod->intr = (struct bna_intr *) res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva; ib_mod->idx_seg = (struct bna_ibidx_seg *) res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva; INIT_LIST_HEAD(&ib_mod->ib_free_q); INIT_LIST_HEAD(&ib_mod->intr_free_q); INIT_LIST_HEAD(&ib_mod->intr_active_q); for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]); for (i = 0; i < BFI_MAX_IB; i++) { ib_mod->ib[i].ib_id = i; ib_mod->ib[i].ib_seg_host_addr_kva = res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; ib_mod->ib[i].ib_seg_host_addr.lsb = res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; ib_mod->ib[i].ib_seg_host_addr.msb = res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; qset = (struct bna_doorbell_qset *)0; off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1) * (0x20 >> 2)]); ib_mod->ib[i].door_bell.doorbell_addr = off + BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva); bfa_q_qe_init(&ib_mod->ib[i].qe); list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q); bfa_q_qe_init(&ib_mod->intr[i].qe); list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q); } count = 0; offset = 0; for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) { for (j = 0; j < ibidx_pool[i].pool_size; j++) { bfa_q_qe_init(&ib_mod->idx_seg[count]); ib_mod->idx_seg[count].ib_seg_size = ibidx_pool[i].pool_entry_size; ib_mod->idx_seg[count].ib_idx_tbl_offset = offset; list_add_tail(&ib_mod->idx_seg[count].qe, &ib_mod->ibidx_seg_pool[i]); count++; offset += ibidx_pool[i].pool_entry_size; } } } void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod) { int i; int j; struct list_head *qe; i = 0; list_for_each(qe, &ib_mod->ib_free_q) i++; i = 0; list_for_each(qe, &ib_mod->intr_free_q) i++; for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) { j = 0; list_for_each(qe, &ib_mod->ibidx_seg_pool[i]) j++; } ib_mod->bna = NULL; } static struct bna_ib * bna_ib_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type, int vector) { struct bna_ib *ib; struct bna_intr *intr; if (intr_type == BNA_INTR_T_INTX) vector = (1 << vector); intr = bna_intr_get(ib_mod, intr_type, vector); if (intr == NULL) return NULL; if (intr->ib) { if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) { bna_intr_put(ib_mod, intr); return NULL; } intr->ib->ref_count++; return intr->ib; } if (list_empty(&ib_mod->ib_free_q)) { bna_intr_put(ib_mod, intr); return NULL; } bfa_q_deq(&ib_mod->ib_free_q, &ib); bfa_q_qe_init(&ib->qe); ib->ref_count = 1; ib->start_count = 0; ib->idx_mask = 0; ib->intr = intr; ib->idx_seg = NULL; intr->ib = ib; ib->bna = ib_mod->bna; return ib; } static void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib) { bna_intr_put(ib_mod, ib->intr); ib->ref_count--; if (ib->ref_count == 0) { ib->intr = NULL; ib->bna = NULL; list_add_tail(&ib->qe, &ib_mod->ib_free_q); } } /* Returns index offset - starting from 0 */ static int bna_ib_reserve_idx(struct bna_ib *ib) { struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; struct bna_ibidx_seg *idx_seg; int idx; int num_idx; int q_idx; /* Find the first free index position */ bna_ib_find_free_ibidx(ib->idx_mask, idx); if (idx == BFI_IBIDX_MAX_SEGSIZE) return -1; /* * Calculate the total number of indexes held by this IB, * including the index newly reserved above. */ bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx); /* See if there is a free space in the index segment held by this IB */ if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) { ib->idx_mask |= (1 << idx); return idx; } if (ib->start_count) return -1; /* Allocate a new segment */ bna_ib_select_segpool(num_idx, q_idx); while (1) { if (q_idx == BFI_IBIDX_TOTAL_POOLS) return -1; if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx])) break; q_idx++; } bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg); bfa_q_qe_init(&idx_seg->qe); /* Free the old segment */ if (ib->idx_seg) { bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx); list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]); } ib->idx_seg = idx_seg; ib->idx_mask |= (1 << idx); return idx; } static void bna_ib_release_idx(struct bna_ib *ib, int idx) { struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; struct bna_ibidx_seg *idx_seg; int num_idx; int cur_q_idx; int new_q_idx; ib->idx_mask &= ~(1 << idx); if (ib->start_count) return; bna_ib_count_ibidx(ib->idx_mask, num_idx); /* * Free the segment, if there are no more indexes in the segment * held by this IB */ if (!num_idx) { bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx); list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[cur_q_idx]); ib->idx_seg = NULL; return; } /* See if we can move to a smaller segment */ bna_ib_select_segpool(num_idx, new_q_idx); bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx); while (new_q_idx < cur_q_idx) { if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx])) break; new_q_idx++; } if (new_q_idx < cur_q_idx) { /* Select the new smaller segment */ bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg); bfa_q_qe_init(&idx_seg->qe); /* Free the old segment */ list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[cur_q_idx]); ib->idx_seg = idx_seg; } } static int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config) { if (ib->start_count) return -1; ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo; ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo; ib->ib_config.interpkt_count = ib_config->interpkt_count; ib->ib_config.ctrl_flags = ib_config->ctrl_flags; ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE; if (ib->intr->intr_type == BNA_INTR_T_MSIX) ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE; return 0; } static void bna_ib_start(struct bna_ib *ib) { struct bna_ib_blk_mem ib_cfg; struct bna_ib_blk_mem *ib_mem; u32 pg_num; u32 intx_mask; int i; void __iomem *base_addr; unsigned long off; ib->start_count++; if (ib->start_count > 1) return; ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb); ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb); ib_cfg.clsc_n_ctrl_n_msix = (((u32) ib->ib_config.coalescing_timeo << 16) | ((u32)ib->ib_config.ctrl_flags << 8) | (ib->intr->vector)); ib_cfg.ipkt_n_ent_n_idxof = ((u32) (ib->ib_config.interpkt_timeo & 0xf) << 16) | ((u32)ib->idx_seg->ib_seg_size << 8) | (ib->idx_seg->ib_idx_tbl_offset); ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32) ib->ib_config.interpkt_count << 24); pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num, HQM_IB_RAM_BASE_OFFSET); writel(pg_num, ib->bna->regs.page_addr); base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva, HQM_IB_RAM_BASE_OFFSET); ib_mem = (struct bna_ib_blk_mem *)0; off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo; writel(htonl(ib_cfg.host_addr_lo), base_addr + off); off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi; writel(htonl(ib_cfg.host_addr_hi), base_addr + off); off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix; writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off); off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof; writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off); off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked; writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off); ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( (u32)ib->ib_config.coalescing_timeo, 0); pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num, HQM_INDX_TBL_RAM_BASE_OFFSET); writel(pg_num, ib->bna->regs.page_addr); base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva, HQM_INDX_TBL_RAM_BASE_OFFSET); for (i = 0; i < ib->idx_seg->ib_seg_size; i++) { off = (unsigned long) ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE); writel(0, base_addr + off); } if (ib->intr->intr_type == BNA_INTR_T_INTX) { bna_intx_disable(ib->bna, intx_mask); intx_mask &= ~(ib->intr->vector); bna_intx_enable(ib->bna, intx_mask); } } static void bna_ib_stop(struct bna_ib *ib) { u32 intx_mask; ib->start_count--; if (ib->start_count == 0) { writel(BNA_DOORBELL_IB_INT_DISABLE, ib->door_bell.doorbell_addr); if (ib->intr->intr_type == BNA_INTR_T_INTX) { bna_intx_disable(ib->bna, intx_mask); intx_mask |= (ib->intr->vector); bna_intx_enable(ib->bna, intx_mask); } } } static void bna_ib_fail(struct bna_ib *ib) { ib->start_count = 0; } /** * RXF */ static void rxf_enable(struct bna_rxf *rxf); static void rxf_disable(struct bna_rxf *rxf); static void __rxf_config_set(struct bna_rxf *rxf); static void __rxf_rit_set(struct bna_rxf *rxf); static void __bna_rxf_stat_clr(struct bna_rxf *rxf); static int rxf_process_packet_filter(struct bna_rxf *rxf); static int rxf_clear_packet_filter(struct bna_rxf *rxf); static void rxf_reset_packet_filter(struct bna_rxf *rxf); static void rxf_cb_enabled(void *arg, int status); static void rxf_cb_disabled(void *arg, int status); static void bna_rxf_cb_stats_cleared(void *arg, int status); static void __rxf_enable(struct bna_rxf *rxf); static void __rxf_disable(struct bna_rxf *rxf); bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, enum bna_rxf_event); bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf, enum bna_rxf_event); bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf, enum bna_rxf_event); bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, enum bna_rxf_event); bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf, enum bna_rxf_event); bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf, enum bna_rxf_event); bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf, enum bna_rxf_event); bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf, enum bna_rxf_event); bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf, enum bna_rxf_event); static struct bfa_sm_table rxf_sm_table[] = { {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED}, {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT}, {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT}, {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED}, {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT}, {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT}, {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT}, {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT}, {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT} }; static void bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) { call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS); } static void bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) { switch (event) { case RXF_E_START: bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait); break; case RXF_E_STOP: bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); break; case RXF_E_FAIL: /* No-op */ break; case RXF_E_CAM_FLTR_MOD: call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); break; case RXF_E_STARTED: case RXF_E_STOPPED: case RXF_E_CAM_FLTR_RESP: /** * These events are received due to flushing of mbox * when device fails */ /* No-op */ break; case RXF_E_PAUSE: rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED; call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS); break; case RXF_E_RESUME: rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING; call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS); break; default: bfa_sm_fault(rxf->rx->bna, event); } } static void bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf) { __rxf_config_set(rxf); __rxf_rit_set(rxf); rxf_enable(rxf); } static void bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event) { switch (event) { case RXF_E_STOP: /** * STOP is originated from bnad. When this happens, * it can not be waiting for filter update */ call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT); bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait); break; case RXF_E_FAIL: call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); call_rxf_start_cbfn(rxf, BNA_CB_FAIL); bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); break; case RXF_E_CAM_FLTR_MOD: /* No-op */ break; case RXF_E_STARTED: /** * Force rxf_process_filter() to go through initial * config */ if ((rxf->ucast_active_mac != NULL) && (rxf->ucast_pending_set == 0)) rxf->ucast_pending_set = 1; if (rxf->rss_status == BNA_STATUS_T_ENABLED) rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING; rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING; bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait); break; case RXF_E_PAUSE: case RXF_E_RESUME: rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED; break; default: bfa_sm_fault(rxf->rx->bna, event); } } static void bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf) { if (!rxf_process_packet_filter(rxf)) { /* No more pending CAM entries to update */ bfa_fsm_set_state(rxf, bna_rxf_sm_started); } } static void bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event) { switch (event) { case RXF_E_STOP: /** * STOP is originated from bnad. When this happens, * it can not be waiting for filter update */ call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT); bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait); break; case RXF_E_FAIL: rxf_reset_packet_filter(rxf); call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); call_rxf_start_cbfn(rxf, BNA_CB_FAIL); bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); break; case RXF_E_CAM_FLTR_MOD: /* No-op */ break; case RXF_E_CAM_FLTR_RESP: if (!rxf_process_packet_filter(rxf)) { /* No more pending CAM entries to update */ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); bfa_fsm_set_state(rxf, bna_rxf_sm_started); } break; case RXF_E_PAUSE: case RXF_E_RESUME: rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED; break; default: bfa_sm_fault(rxf->rx->bna, event); } } static void bna_rxf_sm_started_entry(struct bna_rxf *rxf) { call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS); if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) { if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) bfa_fsm_send_event(rxf, RXF_E_PAUSE); else bfa_fsm_send_event(rxf, RXF_E_RESUME); } } static void bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) { switch (event) { case RXF_E_STOP: bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait); /* Hack to get FSM start clearing CAM entries */ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP); break; case RXF_E_FAIL: rxf_reset_packet_filter(rxf); bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); break; case RXF_E_CAM_FLTR_MOD: bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait); break; case RXF_E_PAUSE: bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait); break; case RXF_E_RESUME: bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait); break; default: bfa_sm_fault(rxf->rx->bna, event); } } static void bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf) { /** * Note: Do not add rxf_clear_packet_filter here. * It will overstep mbox when this transition happens: * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event */ } static void bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) { switch (event) { case RXF_E_FAIL: /** * FSM was in the process of stopping, initiated by * bnad. When this happens, no one can be waiting for * start or filter update */ rxf_reset_packet_filter(rxf); bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); break; case RXF_E_CAM_FLTR_RESP: if (!rxf_clear_packet_filter(rxf)) { /* No more pending CAM entries to clear */ bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait); rxf_disable(rxf); } break; default: bfa_sm_fault(rxf->rx->bna, event); } } static void bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf) { /** * NOTE: Do not add rxf_disable here. * It will overstep mbox when this transition happens: * start_wait -> stop_wait on RXF_E_STOP event */ } static void bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event) { switch (event) { case RXF_E_FAIL: /** * FSM was in the process of stopping, initiated by * bnad. When this happens, no one can be waiting for * start or filter update */ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); break; case RXF_E_STARTED: /** * This event is received due to abrupt transition from * bna_rxf_sm_start_wait state on receiving * RXF_E_STOP event */ rxf_disable(rxf); break; case RXF_E_STOPPED: /** * FSM was in the process of stopping, initiated by * bnad. When this happens, no one can be waiting for * start or filter update */ bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait); break; case RXF_E_PAUSE: rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED; break; case RXF_E_RESUME: rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING; break; default: bfa_sm_fault(rxf->rx->bna, event); } } static void bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf) { rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED); __rxf_disable(rxf); } static void bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event) { switch (event) { case RXF_E_FAIL: /** * FSM was in the process of disabling rxf, initiated by * bnad. */ call_rxf_pause_cbfn(rxf, BNA_CB_FAIL); bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); break; case RXF_E_STOPPED: rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED; call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS); bfa_fsm_set_state(rxf, bna_rxf_sm_started); break; /* * Since PAUSE/RESUME can only be sent by bnad, we don't expect * any other event during these states */ default: bfa_sm_fault(rxf->rx->bna, event); } } static void bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf) { rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED); rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED; __rxf_enable(rxf); } static void bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event) { switch (event) { case RXF_E_FAIL: /** * FSM was in the process of disabling rxf, initiated by * bnad. */ call_rxf_resume_cbfn(rxf, BNA_CB_FAIL); bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); break; case RXF_E_STARTED: rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING; call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS); bfa_fsm_set_state(rxf, bna_rxf_sm_started); break; /* * Since PAUSE/RESUME can only be sent by bnad, we don't expect * any other event during these states */ default: bfa_sm_fault(rxf->rx->bna, event); } } static void bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf) { __bna_rxf_stat_clr(rxf); } static void bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) { switch (event) { case RXF_E_FAIL: case RXF_E_STAT_CLEARED: bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); break; default: bfa_sm_fault(rxf->rx->bna, event); } } static void __rxf_enable(struct bna_rxf *rxf) { struct bfi_ll_rxf_multi_req ll_req; u32 bm[2] = {0, 0}; if (rxf->rxf_id < 32) bm[0] = 1 << rxf->rxf_id; else bm[1] = 1 << (rxf->rxf_id - 32); bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0); ll_req.rxf_id_mask[0] = htonl(bm[0]); ll_req.rxf_id_mask[1] = htonl(bm[1]); ll_req.enable = 1; bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req), rxf_cb_enabled, rxf); bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); } static void __rxf_disable(struct bna_rxf *rxf) { struct bfi_ll_rxf_multi_req ll_req; u32 bm[2] = {0, 0}; if (rxf->rxf_id < 32) bm[0] = 1 << rxf->rxf_id; else bm[1] = 1 << (rxf->rxf_id - 32); bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0); ll_req.rxf_id_mask[0] = htonl(bm[0]); ll_req.rxf_id_mask[1] = htonl(bm[1]); ll_req.enable = 0; bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req), rxf_cb_disabled, rxf); bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); } static void __rxf_config_set(struct bna_rxf *rxf) { u32 i; struct bna_rss_mem *rss_mem; struct bna_rx_fndb_ram *rx_fndb_ram; struct bna *bna = rxf->rx->bna; void __iomem *base_addr; unsigned long off; base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, RSS_TABLE_BASE_OFFSET); rss_mem = (struct bna_rss_mem *)0; /* Configure RSS if required */ if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) { /* configure RSS Table */ writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM + bna->port_num, RSS_TABLE_BASE_OFFSET), bna->regs.page_addr); /* temporarily disable RSS, while hash value is written */ off = (unsigned long)&rss_mem[0].type_n_hash; writel(0, base_addr + off); for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) { off = (unsigned long) &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i]; writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]), base_addr + off); } off = (unsigned long)&rss_mem[0].type_n_hash; writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask, base_addr + off); } /* Configure RxF */ writel(BNA_GET_PAGE_NUM( LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2), RX_FNDB_RAM_BASE_OFFSET), bna->regs.page_addr); base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, RX_FNDB_RAM_BASE_OFFSET); rx_fndb_ram = (struct bna_rx_fndb_ram *)0; /* We always use RSS table 0 */ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop; writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE, base_addr + off); /* small large buffer enable/disable */ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props; writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80, base_addr + off); /* RIT offset, HDS forced offset, multicast RxQ Id */ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq; writel((rxf->rit_segment->rit_offset << 16) | (rxf->forced_offset << 8) | (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id, base_addr + off); /* * default vlan tag, default function enable, strip vlan bytes, * HDS type, header size */ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags; writel(((u32)rxf->default_vlan_tag << 16) | (rxf->ctrl_flags & (BNA_RXF_CF_DEFAULT_VLAN | BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE | BNA_RXF_CF_VLAN_STRIP)) | (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) | rxf->hds_cfg.header_size, base_addr + off); } void __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status) { struct bna *bna = rxf->rx->bna; int i; writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2), VLAN_RAM_BASE_OFFSET), bna->regs.page_addr); if (status == BNA_STATUS_T_ENABLED) { /* enable VLAN filtering on this function */ for (i = 0; i <= BFI_MAX_VLAN / 32; i++) { writel(rxf->vlan_filter_table[i], BNA_GET_VLAN_MEM_ENTRY_ADDR (bna->pcidev.pci_bar_kva, rxf->rxf_id, i * 32)); } } else { /* disable VLAN filtering on this function */ for (i = 0; i <= BFI_MAX_VLAN / 32; i++) { writel(0xffffffff, BNA_GET_VLAN_MEM_ENTRY_ADDR (bna->pcidev.pci_bar_kva, rxf->rxf_id, i * 32)); } } } static void __rxf_rit_set(struct bna_rxf *rxf) { struct bna *bna = rxf->rx->bna; struct bna_rit_mem *rit_mem; int i; void __iomem *base_addr; unsigned long off; base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, FUNCTION_TO_RXQ_TRANSLATE); rit_mem = (struct bna_rit_mem *)0; writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num, FUNCTION_TO_RXQ_TRANSLATE), bna->regs.page_addr); for (i = 0; i < rxf->rit_segment->rit_size; i++) { off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset]; writel(rxf->rit_segment->rit[i].large_rxq_id << 6 | rxf->rit_segment->rit[i].small_rxq_id, base_addr + off); } } static void __bna_rxf_stat_clr(struct bna_rxf *rxf) { struct bfi_ll_stats_req ll_req; u32 bm[2] = {0, 0}; if (rxf->rxf_id < 32) bm[0] = 1 << rxf->rxf_id; else bm[1] = 1 << (rxf->rxf_id - 32); bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0); ll_req.stats_mask = 0; ll_req.txf_id_mask[0] = 0; ll_req.txf_id_mask[1] = 0; ll_req.rxf_id_mask[0] = htonl(bm[0]); ll_req.rxf_id_mask[1] = htonl(bm[1]); bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req), bna_rxf_cb_stats_cleared, rxf); bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); } static void rxf_enable(struct bna_rxf *rxf) { if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) bfa_fsm_send_event(rxf, RXF_E_STARTED); else { rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED; __rxf_enable(rxf); } } static void rxf_cb_enabled(void *arg, int status) { struct bna_rxf *rxf = (struct bna_rxf *)arg; bfa_q_qe_init(&rxf->mbox_qe.qe); bfa_fsm_send_event(rxf, RXF_E_STARTED); } static void rxf_disable(struct bna_rxf *rxf) { if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) bfa_fsm_send_event(rxf, RXF_E_STOPPED); else rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED; __rxf_disable(rxf); } static void rxf_cb_disabled(void *arg, int status) { struct bna_rxf *rxf = (struct bna_rxf *)arg; bfa_q_qe_init(&rxf->mbox_qe.qe); bfa_fsm_send_event(rxf, RXF_E_STOPPED); } void rxf_cb_cam_fltr_mbox_cmd(void *arg, int status) { struct bna_rxf *rxf = (struct bna_rxf *)arg; bfa_q_qe_init(&rxf->mbox_qe.qe); bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP); } static void bna_rxf_cb_stats_cleared(void *arg, int status) { struct bna_rxf *rxf = (struct bna_rxf *)arg; bfa_q_qe_init(&rxf->mbox_qe.qe); bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED); } void rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd, const struct bna_mac *mac_addr) { struct bfi_ll_mac_addr_req req; bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0); req.rxf_id = rxf->rxf_id; memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN); bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req), rxf_cb_cam_fltr_mbox_cmd, rxf); bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); } static int rxf_process_packet_filter_mcast(struct bna_rxf *rxf) { struct bna_mac *mac = NULL; struct list_head *qe; /* Add multicast entries */ if (!list_empty(&rxf->mcast_pending_add_q)) { bfa_q_deq(&rxf->mcast_pending_add_q, &qe); bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac); list_add_tail(&mac->qe, &rxf->mcast_active_q); return 1; } /* Delete multicast entries previousely added */ if (!list_empty(&rxf->mcast_pending_del_q)) { bfa_q_deq(&rxf->mcast_pending_del_q, &qe); bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac); bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); return 1; } return 0; } static int rxf_process_packet_filter_vlan(struct bna_rxf *rxf) { /* Apply the VLAN filter */ if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) { rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING; if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC)) __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); } /* Apply RSS configuration */ if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) { rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING; if (rxf->rss_status == BNA_STATUS_T_DISABLED) { /* RSS is being disabled */ rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE; __rxf_rit_set(rxf); __rxf_config_set(rxf); } else { /* RSS is being enabled or reconfigured */ rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE; __rxf_rit_set(rxf); __rxf_config_set(rxf); } } return 0; } /** * Processes pending ucast, mcast entry addition/deletion and issues mailbox * command. Also processes pending filter configuration - promiscuous mode, * default mode, allmutli mode and issues mailbox command or directly applies * to h/w */ static int rxf_process_packet_filter(struct bna_rxf *rxf) { /* Set the default MAC first */ if (rxf->ucast_pending_set > 0) { rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ, rxf->ucast_active_mac); rxf->ucast_pending_set--; return 1; } if (rxf_process_packet_filter_ucast(rxf)) return 1; if (rxf_process_packet_filter_mcast(rxf)) return 1; if (rxf_process_packet_filter_promisc(rxf)) return 1; if (rxf_process_packet_filter_allmulti(rxf)) return 1; if (rxf_process_packet_filter_vlan(rxf)) return 1; return 0; } static int rxf_clear_packet_filter_mcast(struct bna_rxf *rxf) { struct bna_mac *mac = NULL; struct list_head *qe; /* 3. delete pending mcast entries */ if (!list_empty(&rxf->mcast_pending_del_q)) { bfa_q_deq(&rxf->mcast_pending_del_q, &qe); bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac); bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); return 1; } /* 4. clear active mcast entries; move them to pending_add_q */ if (!list_empty(&rxf->mcast_active_q)) { bfa_q_deq(&rxf->mcast_active_q, &qe); bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac); list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); return 1; } return 0; } /** * In the rxf stop path, processes pending ucast/mcast delete queue and issues * the mailbox command. Moves the active ucast/mcast entries to pending add q, * so that they are added to CAM again in the rxf start path. Moves the current * filter settings - promiscuous, default, allmutli - to pending filter * configuration */ static int rxf_clear_packet_filter(struct bna_rxf *rxf) { if (rxf_clear_packet_filter_ucast(rxf)) return 1; if (rxf_clear_packet_filter_mcast(rxf)) return 1; /* 5. clear active default MAC in the CAM */ if (rxf->ucast_pending_set > 0) rxf->ucast_pending_set = 0; if (rxf_clear_packet_filter_promisc(rxf)) return 1; if (rxf_clear_packet_filter_allmulti(rxf)) return 1; return 0; } static void rxf_reset_packet_filter_mcast(struct bna_rxf *rxf) { struct list_head *qe; struct bna_mac *mac; /* 3. Move active mcast entries to pending_add_q */ while (!list_empty(&rxf->mcast_active_q)) { bfa_q_deq(&rxf->mcast_active_q, &qe); bfa_q_qe_init(qe); list_add_tail(qe, &rxf->mcast_pending_add_q); } /* 4. Throw away delete pending mcast entries */ while (!list_empty(&rxf->mcast_pending_del_q)) { bfa_q_deq(&rxf->mcast_pending_del_q, &qe); bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); } } /** * In the rxf fail path, throws away the ucast/mcast entries pending for * deletion, moves all active ucast/mcast entries to pending queue so that * they are added back to CAM in the rxf start path. Also moves the current * filter configuration to pending filter configuration. */ static void rxf_reset_packet_filter(struct bna_rxf *rxf) { rxf_reset_packet_filter_ucast(rxf); rxf_reset_packet_filter_mcast(rxf); /* 5. Turn off ucast set flag */ rxf->ucast_pending_set = 0; rxf_reset_packet_filter_promisc(rxf); rxf_reset_packet_filter_allmulti(rxf); } static void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx, struct bna_rx_config *q_config) { struct list_head *qe; struct bna_rxp *rxp; /* rxf_id is initialized during rx_mod init */ rxf->rx = rx; INIT_LIST_HEAD(&rxf->ucast_pending_add_q); INIT_LIST_HEAD(&rxf->ucast_pending_del_q); rxf->ucast_pending_set = 0; INIT_LIST_HEAD(&rxf->ucast_active_q); rxf->ucast_active_mac = NULL; INIT_LIST_HEAD(&rxf->mcast_pending_add_q); INIT_LIST_HEAD(&rxf->mcast_pending_del_q); INIT_LIST_HEAD(&rxf->mcast_active_q); bfa_q_qe_init(&rxf->mbox_qe.qe); if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED) rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP; rxf->rxf_oper_state = (q_config->paused) ? BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING; bna_rxf_adv_init(rxf, rx, q_config); rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod, q_config->num_paths); list_for_each(qe, &rx->rxp_q) { rxp = (struct bna_rxp *)qe; if (q_config->rxp_type == BNA_RXP_SINGLE) rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id; else rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id; break; } rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; memset(rxf->vlan_filter_table, 0, (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32))); /* Set up VLAN 0 for pure priority tagged packets */ rxf->vlan_filter_table[0] |= 1; bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); } static void bna_rxf_uninit(struct bna_rxf *rxf) { struct bna *bna = rxf->rx->bna; struct bna_mac *mac; bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment); rxf->rit_segment = NULL; rxf->ucast_pending_set = 0; while (!list_empty(&rxf->ucast_pending_add_q)) { bfa_q_deq(&rxf->ucast_pending_add_q, &mac); bfa_q_qe_init(&mac->qe); bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); } if (rxf->ucast_active_mac) { bfa_q_qe_init(&rxf->ucast_active_mac->qe); bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, rxf->ucast_active_mac); rxf->ucast_active_mac = NULL; } while (!list_empty(&rxf->mcast_pending_add_q)) { bfa_q_deq(&rxf->mcast_pending_add_q, &mac); bfa_q_qe_init(&mac->qe); bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); } /* Turn off pending promisc mode */ if (is_promisc_enable(rxf->rxmode_pending, rxf->rxmode_pending_bitmask)) { /* system promisc state should be pending */ BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id)); promisc_inactive(rxf->rxmode_pending, rxf->rxmode_pending_bitmask); bna->rxf_promisc_id = BFI_MAX_RXF; } /* Promisc mode should not be active */ BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC); /* Turn off pending all-multi mode */ if (is_allmulti_enable(rxf->rxmode_pending, rxf->rxmode_pending_bitmask)) { allmulti_inactive(rxf->rxmode_pending, rxf->rxmode_pending_bitmask); } /* Allmulti mode should not be active */ BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI); rxf->rx = NULL; } static void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status) { bfa_fsm_send_event(rx, RX_E_RXF_STARTED); if (rx->rxf.rxf_id < 32) rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id); else rx->bna->rx_mod.rxf_bmap[1] |= ((u32) 1 << (rx->rxf.rxf_id - 32)); } static void bna_rxf_start(struct bna_rxf *rxf) { rxf->start_cbfn = bna_rx_cb_rxf_started; rxf->start_cbarg = rxf->rx; rxf->rxf_flags &= ~BNA_RXF_FL_FAILED; bfa_fsm_send_event(rxf, RXF_E_START); } static void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status) { bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); if (rx->rxf.rxf_id < 32) rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id; else rx->bna->rx_mod.rxf_bmap[1] &= ~(u32) 1 << (rx->rxf.rxf_id - 32); } static void bna_rxf_stop(struct bna_rxf *rxf) { rxf->stop_cbfn = bna_rx_cb_rxf_stopped; rxf->stop_cbarg = rxf->rx; bfa_fsm_send_event(rxf, RXF_E_STOP); } static void bna_rxf_fail(struct bna_rxf *rxf) { rxf->rxf_flags |= BNA_RXF_FL_FAILED; bfa_fsm_send_event(rxf, RXF_E_FAIL); } int bna_rxf_state_get(struct bna_rxf *rxf) { return bfa_sm_to_state(rxf_sm_table, rxf->fsm); } enum bna_cb_status bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, void (*cbfn)(struct bnad *, struct bna_rx *, enum bna_cb_status)) { struct bna_rxf *rxf = &rx->rxf; if (rxf->ucast_active_mac == NULL) { rxf->ucast_active_mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod); if (rxf->ucast_active_mac == NULL) return BNA_CB_UCAST_CAM_FULL; bfa_q_qe_init(&rxf->ucast_active_mac->qe); } memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN); rxf->ucast_pending_set++; rxf->cam_fltr_cbfn = cbfn; rxf->cam_fltr_cbarg = rx->bna->bnad; bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); return BNA_CB_SUCCESS; } enum bna_cb_status bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, void (*cbfn)(struct bnad *, struct bna_rx *, enum bna_cb_status)) { struct bna_rxf *rxf = &rx->rxf; struct list_head *qe; struct bna_mac *mac; /* Check if already added */ list_for_each(qe, &rxf->mcast_active_q) { mac = (struct bna_mac *)qe; if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { if (cbfn) (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); return BNA_CB_SUCCESS; } } /* Check if pending addition */ list_for_each(qe, &rxf->mcast_pending_add_q) { mac = (struct bna_mac *)qe; if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { if (cbfn) (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); return BNA_CB_SUCCESS; } } mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); if (mac == NULL) return BNA_CB_MCAST_LIST_FULL; bfa_q_qe_init(&mac->qe); memcpy(mac->addr, addr, ETH_ALEN); list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); rxf->cam_fltr_cbfn = cbfn; rxf->cam_fltr_cbarg = rx->bna->bnad; bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); return BNA_CB_SUCCESS; } enum bna_cb_status bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, void (*cbfn)(struct bnad *, struct bna_rx *, enum bna_cb_status)) { struct bna_rxf *rxf = &rx->rxf; struct list_head list_head; struct list_head *qe; u8 *mcaddr; struct bna_mac *mac; struct bna_mac *mac1; int skip; int delete; int need_hw_config = 0; int i; /* Allocate nodes */ INIT_LIST_HEAD(&list_head); for (i = 0, mcaddr = mclist; i < count; i++) { mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); if (mac == NULL) goto err_return; bfa_q_qe_init(&mac->qe); memcpy(mac->addr, mcaddr, ETH_ALEN); list_add_tail(&mac->qe, &list_head); mcaddr += ETH_ALEN; } /* Schedule for addition */ while (!list_empty(&list_head)) { bfa_q_deq(&list_head, &qe); mac = (struct bna_mac *)qe; bfa_q_qe_init(&mac->qe); skip = 0; /* Skip if already added */ list_for_each(qe, &rxf->mcast_active_q) { mac1 = (struct bna_mac *)qe; if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) { bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); skip = 1; break; } } if (skip) continue; /* Skip if pending addition */ list_for_each(qe, &rxf->mcast_pending_add_q) { mac1 = (struct bna_mac *)qe; if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) { bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); skip = 1; break; } } if (skip) continue; need_hw_config = 1; list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); } /** * Delete the entries that are in the pending_add_q but not * in the new list */ while (!list_empty(&rxf->mcast_pending_add_q)) { bfa_q_deq(&rxf->mcast_pending_add_q, &qe); mac = (struct bna_mac *)qe; bfa_q_qe_init(&mac->qe); for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) { if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) { delete = 0; break; } mcaddr += ETH_ALEN; } if (delete) bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); else list_add_tail(&mac->qe, &list_head); } while (!list_empty(&list_head)) { bfa_q_deq(&list_head, &qe); mac = (struct bna_mac *)qe; bfa_q_qe_init(&mac->qe); list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); } /** * Schedule entries for deletion that are in the active_q but not * in the new list */ while (!list_empty(&rxf->mcast_active_q)) { bfa_q_deq(&rxf->mcast_active_q, &qe); mac = (struct bna_mac *)qe; bfa_q_qe_init(&mac->qe); for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) { if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) { delete = 0; break; } mcaddr += ETH_ALEN; } if (delete) { list_add_tail(&mac->qe, &rxf->mcast_pending_del_q); need_hw_config = 1; } else { list_add_tail(&mac->qe, &list_head); } } while (!list_empty(&list_head)) { bfa_q_deq(&list_head, &qe); mac = (struct bna_mac *)qe; bfa_q_qe_init(&mac->qe); list_add_tail(&mac->qe, &rxf->mcast_active_q); } if (need_hw_config) { rxf->cam_fltr_cbfn = cbfn; rxf->cam_fltr_cbarg = rx->bna->bnad; bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); } else if (cbfn) (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); return BNA_CB_SUCCESS; err_return: while (!list_empty(&list_head)) { bfa_q_deq(&list_head, &qe); mac = (struct bna_mac *)qe; bfa_q_qe_init(&mac->qe); bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); } return BNA_CB_MCAST_LIST_FULL; } void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) { struct bna_rxf *rxf = &rx->rxf; int index = (vlan_id >> 5); int bit = (1 << (vlan_id & 0x1F)); rxf->vlan_filter_table[index] |= bit; if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING; bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); } } void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) { struct bna_rxf *rxf = &rx->rxf; int index = (vlan_id >> 5); int bit = (1 << (vlan_id & 0x1F)); rxf->vlan_filter_table[index] &= ~bit; if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING; bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); } } /** * RX */ #define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \ struct bna_doorbell_qset *_qset; \ unsigned long off; \ (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \ (q)->rcb->q_depth = (qdepth); \ (q)->rcb->unmap_q = unmapq_mem; \ (q)->rcb->rxq = (q); \ (q)->rcb->cq = &(rxp)->cq; \ (q)->rcb->bnad = (bna)->bnad; \ _qset = (struct bna_doorbell_qset *)0; \ off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \ (q)->rcb->q_dbell = off + \ BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \ (q)->rcb->id = _id; \ } while (0) #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ (qcfg)->num_paths : ((qcfg)->num_paths * 2)) #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\ (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) #define call_rx_stop_callback(rx, status) \ if ((rx)->stop_cbfn) { \ (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \ (rx)->stop_cbfn = NULL; \ (rx)->stop_cbarg = NULL; \ } /* * Since rx_enable is synchronous callback, there is no start_cbfn required. * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers * for each rxpath. */ #define call_rx_disable_cbfn(rx, status) \ if ((rx)->disable_cbfn) { \ (*(rx)->disable_cbfn)((rx)->disable_cbarg, \ status); \ (rx)->disable_cbfn = NULL; \ (rx)->disable_cbarg = NULL; \ } \ #define rxqs_reqd(type, num_rxqs) \ (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2)) #define rx_ib_fail(rx) \ do { \ struct bna_rxp *rxp; \ struct list_head *qe; \ list_for_each(qe, &(rx)->rxp_q) { \ rxp = (struct bna_rxp *)qe; \ bna_ib_fail(rxp->cq.ib); \ } \ } while (0) static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *); static void __bna_rxq_start(struct bna_rxq *rxq); static void __bna_cq_start(struct bna_cq *cq); static void bna_rit_create(struct bna_rx *rx); static void bna_rx_cb_multi_rxq_stopped(void *arg, int status); static void bna_rx_cb_rxq_stopped_all(void *arg); bfa_fsm_state_decl(bna_rx, stopped, struct bna_rx, enum bna_rx_event); bfa_fsm_state_decl(bna_rx, rxf_start_wait, struct bna_rx, enum bna_rx_event); bfa_fsm_state_decl(bna_rx, started, struct bna_rx, enum bna_rx_event); bfa_fsm_state_decl(bna_rx, rxf_stop_wait, struct bna_rx, enum bna_rx_event); bfa_fsm_state_decl(bna_rx, rxq_stop_wait, struct bna_rx, enum bna_rx_event); static const struct bfa_sm_table rx_sm_table[] = { {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED}, {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT}, {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED}, {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT}, {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT}, }; static void bna_rx_sm_stopped_entry(struct bna_rx *rx) { struct bna_rxp *rxp; struct list_head *qe_rxp; list_for_each(qe_rxp, &rx->rxp_q) { rxp = (struct bna_rxp *)qe_rxp; rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb); } call_rx_stop_callback(rx, BNA_CB_SUCCESS); } static void bna_rx_sm_stopped(struct bna_rx *rx, enum bna_rx_event event) { switch (event) { case RX_E_START: bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); break; case RX_E_STOP: call_rx_stop_callback(rx, BNA_CB_SUCCESS); break; case RX_E_FAIL: /* no-op */ break; default: bfa_sm_fault(rx->bna, event); break; } } static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) { struct bna_rxp *rxp; struct list_head *qe_rxp; struct bna_rxq *q0 = NULL, *q1 = NULL; /* Setup the RIT */ bna_rit_create(rx); list_for_each(qe_rxp, &rx->rxp_q) { rxp = (struct bna_rxp *)qe_rxp; bna_ib_start(rxp->cq.ib); GET_RXQS(rxp, q0, q1); q0->buffer_size = bna_port_mtu_get(&rx->bna->port); __bna_rxq_start(q0); rx->rx_post_cbfn(rx->bna->bnad, q0->rcb); if (q1) { __bna_rxq_start(q1); rx->rx_post_cbfn(rx->bna->bnad, q1->rcb); } __bna_cq_start(&rxp->cq); } bna_rxf_start(&rx->rxf); } static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, enum bna_rx_event event) { switch (event) { case RX_E_STOP: bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); break; case RX_E_FAIL: bfa_fsm_set_state(rx, bna_rx_sm_stopped); rx_ib_fail(rx); bna_rxf_fail(&rx->rxf); break; case RX_E_RXF_STARTED: bfa_fsm_set_state(rx, bna_rx_sm_started); break; default: bfa_sm_fault(rx->bna, event); break; } } void bna_rx_sm_started_entry(struct bna_rx *rx) { struct bna_rxp *rxp; struct list_head *qe_rxp; /* Start IB */ list_for_each(qe_rxp, &rx->rxp_q) { rxp = (struct bna_rxp *)qe_rxp; bna_ib_ack(&rxp->cq.ib->door_bell, 0); } bna_llport_rx_started(&rx->bna->port.llport); } void bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) { switch (event) { case RX_E_FAIL: bna_llport_rx_stopped(&rx->bna->port.llport); bfa_fsm_set_state(rx, bna_rx_sm_stopped); rx_ib_fail(rx); bna_rxf_fail(&rx->rxf); break; case RX_E_STOP: bna_llport_rx_stopped(&rx->bna->port.llport); bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); break; default: bfa_sm_fault(rx->bna, event); break; } } void bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) { bna_rxf_stop(&rx->rxf); } void bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) { switch (event) { case RX_E_RXF_STOPPED: bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait); break; case RX_E_RXF_STARTED: /** * RxF was in the process of starting up when * RXF_E_STOP was issued. Ignore this event */ break; case RX_E_FAIL: bfa_fsm_set_state(rx, bna_rx_sm_stopped); rx_ib_fail(rx); bna_rxf_fail(&rx->rxf); break; default: bfa_sm_fault(rx->bna, event); break; } } void bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx) { struct bna_rxp *rxp = NULL; struct bna_rxq *q0 = NULL; struct bna_rxq *q1 = NULL; struct list_head *qe; u32 rxq_mask[2] = {0, 0}; /* Only one call to multi-rxq-stop for all RXPs in this RX */ bfa_wc_up(&rx->rxq_stop_wc); list_for_each(qe, &rx->rxp_q) { rxp = (struct bna_rxp *)qe; GET_RXQS(rxp, q0, q1); if (q0->rxq_id < 32) rxq_mask[0] |= ((u32)1 << q0->rxq_id); else rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32)); if (q1) { if (q1->rxq_id < 32) rxq_mask[0] |= ((u32)1 << q1->rxq_id); else rxq_mask[1] |= ((u32) 1 << (q1->rxq_id - 32)); } } __bna_multi_rxq_stop(rxp, rxq_mask); } void bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event) { struct bna_rxp *rxp = NULL; struct list_head *qe; switch (event) { case RX_E_RXQ_STOPPED: list_for_each(qe, &rx->rxp_q) { rxp = (struct bna_rxp *)qe; bna_ib_stop(rxp->cq.ib); } /* Fall through */ case RX_E_FAIL: bfa_fsm_set_state(rx, bna_rx_sm_stopped); break; default: bfa_sm_fault(rx->bna, event); break; } } void __bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask) { struct bfi_ll_q_stop_req ll_req; bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0); ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]); ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]); bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req), bna_rx_cb_multi_rxq_stopped, rxp); bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe); } void __bna_rxq_start(struct bna_rxq *rxq) { struct bna_rxtx_q_mem *q_mem; struct bna_rxq_mem rxq_cfg, *rxq_mem; struct bna_dma_addr cur_q_addr; /* struct bna_doorbell_qset *qset; */ struct bna_qpt *qpt; u32 pg_num; struct bna *bna = rxq->rx->bna; void __iomem *base_addr; unsigned long off; qpt = &rxq->qpt; cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr)); rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb; rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb; rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb; rxq_cfg.cur_q_entry_hi = cur_q_addr.msb; rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0; rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2); rxq_cfg.sg_n_cq_n_cns_ptr = ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0; rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) | BNA_Q_IDLE_STATE; rxq_cfg.next_qid = 0x0 | (0x3 << 8); /* Write the page number register */ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num, HQM_RXTX_Q_RAM_BASE_OFFSET); writel(pg_num, bna->regs.page_addr); /* Write to h/w */ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, HQM_RXTX_Q_RAM_BASE_OFFSET); q_mem = (struct bna_rxtx_q_mem *)0; rxq_mem = &q_mem[rxq->rxq_id].rxq; off = (unsigned long)&rxq_mem->pg_tbl_addr_lo; writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off); off = (unsigned long)&rxq_mem->pg_tbl_addr_hi; writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off); off = (unsigned long)&rxq_mem->cur_q_entry_lo; writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off); off = (unsigned long)&rxq_mem->cur_q_entry_hi; writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off); off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr; writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off); off = (unsigned long)&rxq_mem->entry_n_pg_size; writel(rxq_cfg.entry_n_pg_size, base_addr + off); off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr; writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off); off = (unsigned long)&rxq_mem->buf_sz_n_q_state; writel(rxq_cfg.buf_sz_n_q_state, base_addr + off); off = (unsigned long)&rxq_mem->next_qid; writel(rxq_cfg.next_qid, base_addr + off); rxq->rcb->producer_index = 0; rxq->rcb->consumer_index = 0; } void __bna_cq_start(struct bna_cq *cq) { struct bna_cq_mem cq_cfg, *cq_mem; const struct bna_qpt *qpt; struct bna_dma_addr cur_q_addr; u32 pg_num; struct bna *bna = cq->rx->bna; void __iomem *base_addr; unsigned long off; qpt = &cq->qpt; cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr)); /* * Fill out structure, to be subsequently written * to hardware */ cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb; cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb; cq_cfg.cur_q_entry_lo = cur_q_addr.lsb; cq_cfg.cur_q_entry_hi = cur_q_addr.msb; cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0; cq_cfg.entry_n_pg_size = ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2); cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) | ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0); cq_cfg.q_state = BNA_Q_IDLE_STATE; /* Write the page number register */ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num, HQM_CQ_RAM_BASE_OFFSET); writel(pg_num, bna->regs.page_addr); /* H/W write */ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, HQM_CQ_RAM_BASE_OFFSET); cq_mem = (struct bna_cq_mem *)0; off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo; writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off); off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi; writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off); off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo; writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off); off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi; writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off); off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr; writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off); off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size; writel(cq_cfg.entry_n_pg_size, base_addr + off); off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr; writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off); off = (unsigned long)&cq_mem[cq->cq_id].q_state; writel(cq_cfg.q_state, base_addr + off); cq->ccb->producer_index = 0; *(cq->ccb->hw_producer_index) = 0; } void bna_rit_create(struct bna_rx *rx) { struct list_head *qe_rxp; struct bna_rxp *rxp; struct bna_rxq *q0 = NULL; struct bna_rxq *q1 = NULL; int offset; offset = 0; list_for_each(qe_rxp, &rx->rxp_q) { rxp = (struct bna_rxp *)qe_rxp; GET_RXQS(rxp, q0, q1); rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id; rx->rxf.rit_segment->rit[offset].small_rxq_id = (q1 ? q1->rxq_id : 0); offset++; } } static int _rx_can_satisfy(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg) { if ((rx_mod->rx_free_count == 0) || (rx_mod->rxp_free_count == 0) || (rx_mod->rxq_free_count == 0)) return 0; if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || (rx_mod->rxq_free_count < rx_cfg->num_paths)) return 0; } else { if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) return 0; } if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths)) return 0; return 1; } static struct bna_rxq * _get_free_rxq(struct bna_rx_mod *rx_mod) { struct bna_rxq *rxq = NULL; struct list_head *qe = NULL; bfa_q_deq(&rx_mod->rxq_free_q, &qe); if (qe) { rx_mod->rxq_free_count--; rxq = (struct bna_rxq *)qe; } return rxq; } static void _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) { bfa_q_qe_init(&rxq->qe); list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); rx_mod->rxq_free_count++; } static struct bna_rxp * _get_free_rxp(struct bna_rx_mod *rx_mod) { struct list_head *qe = NULL; struct bna_rxp *rxp = NULL; bfa_q_deq(&rx_mod->rxp_free_q, &qe); if (qe) { rx_mod->rxp_free_count--; rxp = (struct bna_rxp *)qe; } return rxp; } static void _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) { bfa_q_qe_init(&rxp->qe); list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); rx_mod->rxp_free_count++; } static struct bna_rx * _get_free_rx(struct bna_rx_mod *rx_mod) { struct list_head *qe = NULL; struct bna_rx *rx = NULL; bfa_q_deq(&rx_mod->rx_free_q, &qe); if (qe) { rx_mod->rx_free_count--; rx = (struct bna_rx *)qe; bfa_q_qe_init(qe); list_add_tail(&rx->qe, &rx_mod->rx_active_q); } return rx; } static void _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx) { bfa_q_qe_init(&rx->qe); list_add_tail(&rx->qe, &rx_mod->rx_free_q); rx_mod->rx_free_count++; } static void _rx_init(struct bna_rx *rx, struct bna *bna) { rx->bna = bna; rx->rx_flags = 0; INIT_LIST_HEAD(&rx->rxp_q); rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all; rx->rxq_stop_wc.wc_cbarg = rx; rx->rxq_stop_wc.wc_count = 0; rx->stop_cbfn = NULL; rx->stop_cbarg = NULL; } static void _rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0, struct bna_rxq *q1) { switch (rxp->type) { case BNA_RXP_SINGLE: rxp->rxq.single.only = q0; rxp->rxq.single.reserved = NULL; break; case BNA_RXP_SLR: rxp->rxq.slr.large = q0; rxp->rxq.slr.small = q1; break; case BNA_RXP_HDS: rxp->rxq.hds.data = q0; rxp->rxq.hds.hdr = q1; break; default: break; } } static void _rxq_qpt_init(struct bna_rxq *rxq, struct bna_rxp *rxp, u32 page_count, u32 page_size, struct bna_mem_descr *qpt_mem, struct bna_mem_descr *swqpt_mem, struct bna_mem_descr *page_mem) { int i; rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; rxq->qpt.kv_qpt_ptr = qpt_mem->kva; rxq->qpt.page_count = page_count; rxq->qpt.page_size = page_size; rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; for (i = 0; i < rxq->qpt.page_count; i++) { rxq->rcb->sw_qpt[i] = page_mem[i].kva; ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = page_mem[i].dma.lsb; ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = page_mem[i].dma.msb; } } static void _rxp_cqpt_setup(struct bna_rxp *rxp, u32 page_count, u32 page_size, struct bna_mem_descr *qpt_mem, struct bna_mem_descr *swqpt_mem, struct bna_mem_descr *page_mem) { int i; rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; rxp->cq.qpt.page_count = page_count; rxp->cq.qpt.page_size = page_size; rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; for (i = 0; i < rxp->cq.qpt.page_count; i++) { rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva; ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = page_mem[i].dma.lsb; ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = page_mem[i].dma.msb; } } static void _rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp) { list_add_tail(&rxp->qe, &rx->rxp_q); } static void _init_rxmod_queues(struct bna_rx_mod *rx_mod) { INIT_LIST_HEAD(&rx_mod->rx_free_q); INIT_LIST_HEAD(&rx_mod->rxq_free_q); INIT_LIST_HEAD(&rx_mod->rxp_free_q); INIT_LIST_HEAD(&rx_mod->rx_active_q); rx_mod->rx_free_count = 0; rx_mod->rxq_free_count = 0; rx_mod->rxp_free_count = 0; } static void _rx_ctor(struct bna_rx *rx, int id) { bfa_q_qe_init(&rx->qe); INIT_LIST_HEAD(&rx->rxp_q); rx->bna = NULL; rx->rxf.rxf_id = id; /* FIXME: mbox_qe ctor()?? */ bfa_q_qe_init(&rx->mbox_qe.qe); rx->stop_cbfn = NULL; rx->stop_cbarg = NULL; } void bna_rx_cb_multi_rxq_stopped(void *arg, int status) { struct bna_rxp *rxp = (struct bna_rxp *)arg; bfa_wc_down(&rxp->rx->rxq_stop_wc); } void bna_rx_cb_rxq_stopped_all(void *arg) { struct bna_rx *rx = (struct bna_rx *)arg; bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED); } static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx, enum bna_cb_status status) { struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; bfa_wc_down(&rx_mod->rx_stop_wc); } static void bna_rx_mod_cb_rx_stopped_all(void *arg) { struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; if (rx_mod->stop_cbfn) rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS); rx_mod->stop_cbfn = NULL; } static void bna_rx_start(struct bna_rx *rx) { rx->rx_flags |= BNA_RX_F_PORT_ENABLED; if (rx->rx_flags & BNA_RX_F_ENABLE) bfa_fsm_send_event(rx, RX_E_START); } static void bna_rx_stop(struct bna_rx *rx) { rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED; if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped) bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS); else { rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; rx->stop_cbarg = &rx->bna->rx_mod; bfa_fsm_send_event(rx, RX_E_STOP); } } static void bna_rx_fail(struct bna_rx *rx) { /* Indicate port is not enabled, and failed */ rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED; rx->rx_flags |= BNA_RX_F_PORT_FAILED; bfa_fsm_send_event(rx, RX_E_FAIL); } void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) { struct bna_rx *rx; struct list_head *qe; rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED; if (type == BNA_RX_T_LOOPBACK) rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK; list_for_each(qe, &rx_mod->rx_active_q) { rx = (struct bna_rx *)qe; if (rx->type == type) bna_rx_start(rx); } } void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type) { struct bna_rx *rx; struct list_head *qe; rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED; rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK; rx_mod->stop_cbfn = bna_port_cb_rx_stopped; /** * Before calling bna_rx_stop(), increment rx_stop_wc as many times * as we are going to call bna_rx_stop */ list_for_each(qe, &rx_mod->rx_active_q) { rx = (struct bna_rx *)qe; if (rx->type == type) bfa_wc_up(&rx_mod->rx_stop_wc); } if (rx_mod->rx_stop_wc.wc_count == 0) { rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS); rx_mod->stop_cbfn = NULL; return; } list_for_each(qe, &rx_mod->rx_active_q) { rx = (struct bna_rx *)qe; if (rx->type == type) bna_rx_stop(rx); } } void bna_rx_mod_fail(struct bna_rx_mod *rx_mod) { struct bna_rx *rx; struct list_head *qe; rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED; rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK; list_for_each(qe, &rx_mod->rx_active_q) { rx = (struct bna_rx *)qe; bna_rx_fail(rx); } } void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, struct bna_res_info *res_info) { int index; struct bna_rx *rx_ptr; struct bna_rxp *rxp_ptr; struct bna_rxq *rxq_ptr; rx_mod->bna = bna; rx_mod->flags = 0; rx_mod->rx = (struct bna_rx *) res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva; rx_mod->rxp = (struct bna_rxp *) res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva; rx_mod->rxq = (struct bna_rxq *) res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva; /* Initialize the queues */ _init_rxmod_queues(rx_mod); /* Build RX queues */ for (index = 0; index < BFI_MAX_RXQ; index++) { rx_ptr = &rx_mod->rx[index]; _rx_ctor(rx_ptr, index); list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); rx_mod->rx_free_count++; } /* build RX-path queue */ for (index = 0; index < BFI_MAX_RXQ; index++) { rxp_ptr = &rx_mod->rxp[index]; rxp_ptr->cq.cq_id = index; bfa_q_qe_init(&rxp_ptr->qe); list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); rx_mod->rxp_free_count++; } /* build RXQ queue */ for (index = 0; index < BFI_MAX_RXQ; index++) { rxq_ptr = &rx_mod->rxq[index]; rxq_ptr->rxq_id = index; bfa_q_qe_init(&rxq_ptr->qe); list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); rx_mod->rxq_free_count++; } rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all; rx_mod->rx_stop_wc.wc_cbarg = rx_mod; rx_mod->rx_stop_wc.wc_count = 0; } void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod) { struct list_head *qe; int i; i = 0; list_for_each(qe, &rx_mod->rx_free_q) i++; i = 0; list_for_each(qe, &rx_mod->rxp_free_q) i++; i = 0; list_for_each(qe, &rx_mod->rxq_free_q) i++; rx_mod->bna = NULL; } int bna_rx_state_get(struct bna_rx *rx) { return bfa_sm_to_state(rx_sm_table, rx->fsm); } void bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) { u32 cq_size, hq_size, dq_size; u32 cpage_count, hpage_count, dpage_count; struct bna_mem_info *mem_info; u32 cq_depth; u32 hq_depth; u32 dq_depth; dq_depth = q_cfg->q_depth; hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth); cq_depth = dq_depth + hq_depth; BNA_TO_POWER_OF_2_HIGH(cq_depth); cq_size = cq_depth * BFI_CQ_WI_SIZE; cq_size = ALIGN(cq_size, PAGE_SIZE); cpage_count = SIZE_TO_PAGES(cq_size); BNA_TO_POWER_OF_2_HIGH(dq_depth); dq_size = dq_depth * BFI_RXQ_WI_SIZE; dq_size = ALIGN(dq_size, PAGE_SIZE); dpage_count = SIZE_TO_PAGES(dq_size); if (BNA_RXP_SINGLE != q_cfg->rxp_type) { BNA_TO_POWER_OF_2_HIGH(hq_depth); hq_size = hq_depth * BFI_RXQ_WI_SIZE; hq_size = ALIGN(hq_size, PAGE_SIZE); hpage_count = SIZE_TO_PAGES(hq_size); } else { hpage_count = 0; } /* CCB structures */ res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_KVA; mem_info->len = sizeof(struct bna_ccb); mem_info->num = q_cfg->num_paths; /* RCB structures */ res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_KVA; mem_info->len = sizeof(struct bna_rcb); mem_info->num = BNA_GET_RXQS(q_cfg); /* Completion QPT */ res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; mem_info->len = cpage_count * sizeof(struct bna_dma_addr); mem_info->num = q_cfg->num_paths; /* Completion s/w QPT */ res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_KVA; mem_info->len = cpage_count * sizeof(void *); mem_info->num = q_cfg->num_paths; /* Completion QPT pages */ res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; mem_info->len = PAGE_SIZE; mem_info->num = cpage_count * q_cfg->num_paths; /* Data QPTs */ res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; mem_info->len = dpage_count * sizeof(struct bna_dma_addr); mem_info->num = q_cfg->num_paths; /* Data s/w QPTs */ res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_KVA; mem_info->len = dpage_count * sizeof(void *); mem_info->num = q_cfg->num_paths; /* Data QPT pages */ res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; mem_info->len = PAGE_SIZE; mem_info->num = dpage_count * q_cfg->num_paths; /* Hdr QPTs */ res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; mem_info->len = hpage_count * sizeof(struct bna_dma_addr); mem_info->num = (hpage_count ? q_cfg->num_paths : 0); /* Hdr s/w QPTs */ res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_KVA; mem_info->len = hpage_count * sizeof(void *); mem_info->num = (hpage_count ? q_cfg->num_paths : 0); /* Hdr QPT pages */ res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; mem_info->len = (hpage_count ? PAGE_SIZE : 0); mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0); /* RX Interrupts */ res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR; res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; } struct bna_rx * bna_rx_create(struct bna *bna, struct bnad *bnad, struct bna_rx_config *rx_cfg, struct bna_rx_event_cbfn *rx_cbfn, struct bna_res_info *res_info, void *priv) { struct bna_rx_mod *rx_mod = &bna->rx_mod; struct bna_rx *rx; struct bna_rxp *rxp; struct bna_rxq *q0; struct bna_rxq *q1; struct bna_intr_info *intr_info; u32 page_count; struct bna_mem_descr *ccb_mem; struct bna_mem_descr *rcb_mem; struct bna_mem_descr *unmapq_mem; struct bna_mem_descr *cqpt_mem; struct bna_mem_descr *cswqpt_mem; struct bna_mem_descr *cpage_mem; struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */ struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */ struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */ struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */ struct bna_mem_descr *hpage_mem; /* hdr page mem */ struct bna_mem_descr *dpage_mem; /* data page mem */ int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0; int dpage_count, hpage_count, rcb_idx; struct bna_ib_config ibcfg; /* Fail if we don't have enough RXPs, RXQs */ if (!_rx_can_satisfy(rx_mod, rx_cfg)) return NULL; /* Initialize resource pointers */ intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0]; cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0]; dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0]; hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0]; dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0]; hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; /* Compute q depth & page count */ page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num / rx_cfg->num_paths; dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num / rx_cfg->num_paths; hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num / rx_cfg->num_paths; /* Get RX pointer */ rx = _get_free_rx(rx_mod); _rx_init(rx, bna); rx->priv = priv; rx->type = rx_cfg->rx_type; rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; /* Following callbacks are mandatory */ rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) { switch (rx->type) { case BNA_RX_T_REGULAR: if (!(rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)) rx->rx_flags |= BNA_RX_F_PORT_ENABLED; break; case BNA_RX_T_LOOPBACK: if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK) rx->rx_flags |= BNA_RX_F_PORT_ENABLED; break; } } for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) { rxp = _get_free_rxp(rx_mod); rxp->type = rx_cfg->rxp_type; rxp->rx = rx; rxp->cq.rx = rx; /* Get required RXQs, and queue them to rx-path */ q0 = _get_free_rxq(rx_mod); if (BNA_RXP_SINGLE == rx_cfg->rxp_type) q1 = NULL; else q1 = _get_free_rxq(rx_mod); /* Initialize IB */ if (1 == intr_info->num) { rxp->cq.ib = bna_ib_get(&bna->ib_mod, intr_info->intr_type, intr_info->idl[0].vector); rxp->vector = intr_info->idl[0].vector; } else { rxp->cq.ib = bna_ib_get(&bna->ib_mod, intr_info->intr_type, intr_info->idl[i].vector); /* Map the MSI-x vector used for this RXP */ rxp->vector = intr_info->idl[i].vector; } rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib); ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO; ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT; ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE; bna_ib_config(rxp->cq.ib, &ibcfg); /* Link rxqs to rxp */ _rxp_add_rxqs(rxp, q0, q1); /* Link rxp to rx */ _rx_add_rxp(rx, rxp); q0->rx = rx; q0->rxp = rxp; /* Initialize RCB for the large / data q */ q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0, (void *)unmapq_mem[rcb_idx].kva); rcb_idx++; (q0)->rx_packets = (q0)->rx_bytes = 0; (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0; /* Initialize RXQs */ _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE, &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]); q0->rcb->page_idx = dpage_idx; q0->rcb->page_count = dpage_count; dpage_idx += dpage_count; /* Call bnad to complete rcb setup */ if (rx->rcb_setup_cbfn) rx->rcb_setup_cbfn(bnad, q0->rcb); if (q1) { q1->rx = rx; q1->rxp = rxp; q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1, (void *)unmapq_mem[rcb_idx].kva); rcb_idx++; (q1)->buffer_size = (rx_cfg)->small_buff_size; (q1)->rx_packets = (q1)->rx_bytes = 0; (q1)->rx_packets_with_error = (q1)->rxbuf_alloc_failed = 0; _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE, &hqpt_mem[i], &hsqpt_mem[i], &hpage_mem[hpage_idx]); q1->rcb->page_idx = hpage_idx; q1->rcb->page_count = hpage_count; hpage_idx += hpage_count; /* Call bnad to complete rcb setup */ if (rx->rcb_setup_cbfn) rx->rcb_setup_cbfn(bnad, q1->rcb); } /* Setup RXP::CQ */ rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]); rxp->cq.ccb->page_idx = cpage_idx; rxp->cq.ccb->page_count = page_count; cpage_idx += page_count; rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; rxp->cq.ccb->producer_index = 0; rxp->cq.ccb->q_depth = rx_cfg->q_depth + ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : rx_cfg->q_depth); rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell; rxp->cq.ccb->rcb[0] = q0->rcb; if (q1) rxp->cq.ccb->rcb[1] = q1->rcb; rxp->cq.ccb->cq = &rxp->cq; rxp->cq.ccb->bnad = bna->bnad; rxp->cq.ccb->hw_producer_index = ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva + (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE)); *(rxp->cq.ccb->hw_producer_index) = 0; rxp->cq.ccb->intr_type = intr_info->intr_type; rxp->cq.ccb->intr_vector = (intr_info->num == 1) ? intr_info->idl[0].vector : intr_info->idl[i].vector; rxp->cq.ccb->rx_coalescing_timeo = rxp->cq.ib->ib_config.coalescing_timeo; rxp->cq.ccb->id = i; /* Call bnad to complete CCB setup */ if (rx->ccb_setup_cbfn) rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); } /* for each rx-path */ bna_rxf_init(&rx->rxf, rx, rx_cfg); bfa_fsm_set_state(rx, bna_rx_sm_stopped); return rx; } void bna_rx_destroy(struct bna_rx *rx) { struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; struct bna_ib_mod *ib_mod = &rx->bna->ib_mod; struct bna_rxq *q0 = NULL; struct bna_rxq *q1 = NULL; struct bna_rxp *rxp; struct list_head *qe; bna_rxf_uninit(&rx->rxf); while (!list_empty(&rx->rxp_q)) { bfa_q_deq(&rx->rxp_q, &rxp); GET_RXQS(rxp, q0, q1); /* Callback to bnad for destroying RCB */ if (rx->rcb_destroy_cbfn) rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); q0->rcb = NULL; q0->rxp = NULL; q0->rx = NULL; _put_free_rxq(rx_mod, q0); if (q1) { /* Callback to bnad for destroying RCB */ if (rx->rcb_destroy_cbfn) rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); q1->rcb = NULL; q1->rxp = NULL; q1->rx = NULL; _put_free_rxq(rx_mod, q1); } rxp->rxq.slr.large = NULL; rxp->rxq.slr.small = NULL; if (rxp->cq.ib) { if (rxp->cq.ib_seg_offset != 0xff) bna_ib_release_idx(rxp->cq.ib, rxp->cq.ib_seg_offset); bna_ib_put(ib_mod, rxp->cq.ib); rxp->cq.ib = NULL; } /* Callback to bnad for destroying CCB */ if (rx->ccb_destroy_cbfn) rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); rxp->cq.ccb = NULL; rxp->rx = NULL; _put_free_rxp(rx_mod, rxp); } list_for_each(qe, &rx_mod->rx_active_q) { if (qe == &rx->qe) { list_del(&rx->qe); bfa_q_qe_init(&rx->qe); break; } } rx->bna = NULL; rx->priv = NULL; _put_free_rx(rx_mod, rx); } void bna_rx_enable(struct bna_rx *rx) { if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped) return; rx->rx_flags |= BNA_RX_F_ENABLE; if (rx->rx_flags & BNA_RX_F_PORT_ENABLED) bfa_fsm_send_event(rx, RX_E_START); } void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, void (*cbfn)(void *, struct bna_rx *, enum bna_cb_status)) { if (type == BNA_SOFT_CLEANUP) { /* h/w should not be accessed. Treat we're stopped */ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); } else { rx->stop_cbfn = cbfn; rx->stop_cbarg = rx->bna->bnad; rx->rx_flags &= ~BNA_RX_F_ENABLE; bfa_fsm_send_event(rx, RX_E_STOP); } } /** * TX */ #define call_tx_stop_cbfn(tx, status)\ do {\ if ((tx)->stop_cbfn)\ (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\ (tx)->stop_cbfn = NULL;\ (tx)->stop_cbarg = NULL;\ } while (0) #define call_tx_prio_change_cbfn(tx, status)\ do {\ if ((tx)->prio_change_cbfn)\ (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\ (tx)->prio_change_cbfn = NULL;\ } while (0) static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx, enum bna_cb_status status); static void bna_tx_cb_txq_stopped(void *arg, int status); static void bna_tx_cb_stats_cleared(void *arg, int status); static void __bna_tx_stop(struct bna_tx *tx); static void __bna_tx_start(struct bna_tx *tx); static void __bna_txf_stat_clr(struct bna_tx *tx); enum bna_tx_event { TX_E_START = 1, TX_E_STOP = 2, TX_E_FAIL = 3, TX_E_TXQ_STOPPED = 4, TX_E_PRIO_CHANGE = 5, TX_E_STAT_CLEARED = 6, }; enum bna_tx_state { BNA_TX_STOPPED = 1, BNA_TX_STARTED = 2, BNA_TX_TXQ_STOP_WAIT = 3, BNA_TX_PRIO_STOP_WAIT = 4, BNA_TX_STAT_CLR_WAIT = 5, }; bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event); bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event); bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx, enum bna_tx_event); bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx, enum bna_tx_event); bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx, enum bna_tx_event); static struct bfa_sm_table tx_sm_table[] = { {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED}, {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED}, {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT}, {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT}, {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT}, }; static void bna_tx_sm_stopped_entry(struct bna_tx *tx) { struct bna_txq *txq; struct list_head *qe; list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb); } call_tx_stop_cbfn(tx, BNA_CB_SUCCESS); } static void bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) { switch (event) { case TX_E_START: bfa_fsm_set_state(tx, bna_tx_sm_started); break; case TX_E_STOP: bfa_fsm_set_state(tx, bna_tx_sm_stopped); break; case TX_E_FAIL: /* No-op */ break; case TX_E_PRIO_CHANGE: call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS); break; case TX_E_TXQ_STOPPED: /** * This event is received due to flushing of mbox when * device fails */ /* No-op */ break; default: bfa_sm_fault(tx->bna, event); } } static void bna_tx_sm_started_entry(struct bna_tx *tx) { struct bna_txq *txq; struct list_head *qe; __bna_tx_start(tx); /* Start IB */ list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; bna_ib_ack(&txq->ib->door_bell, 0); } } static void bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) { struct bna_txq *txq; struct list_head *qe; switch (event) { case TX_E_STOP: bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait); __bna_tx_stop(tx); break; case TX_E_FAIL: list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; bna_ib_fail(txq->ib); (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb); } bfa_fsm_set_state(tx, bna_tx_sm_stopped); break; case TX_E_PRIO_CHANGE: bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); break; default: bfa_sm_fault(tx->bna, event); } } static void bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx) { } static void bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event) { struct bna_txq *txq; struct list_head *qe; switch (event) { case TX_E_FAIL: bfa_fsm_set_state(tx, bna_tx_sm_stopped); break; case TX_E_TXQ_STOPPED: list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; bna_ib_stop(txq->ib); } bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait); break; case TX_E_PRIO_CHANGE: /* No-op */ break; default: bfa_sm_fault(tx->bna, event); } } static void bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) { __bna_tx_stop(tx); } static void bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) { struct bna_txq *txq; struct list_head *qe; switch (event) { case TX_E_STOP: bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait); break; case TX_E_FAIL: call_tx_prio_change_cbfn(tx, BNA_CB_FAIL); bfa_fsm_set_state(tx, bna_tx_sm_stopped); break; case TX_E_TXQ_STOPPED: list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; bna_ib_stop(txq->ib); (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb); } call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS); bfa_fsm_set_state(tx, bna_tx_sm_started); break; case TX_E_PRIO_CHANGE: /* No-op */ break; default: bfa_sm_fault(tx->bna, event); } } static void bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx) { __bna_txf_stat_clr(tx); } static void bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event) { switch (event) { case TX_E_FAIL: case TX_E_STAT_CLEARED: bfa_fsm_set_state(tx, bna_tx_sm_stopped); break; default: bfa_sm_fault(tx->bna, event); } } static void __bna_txq_start(struct bna_tx *tx, struct bna_txq *txq) { struct bna_rxtx_q_mem *q_mem; struct bna_txq_mem txq_cfg; struct bna_txq_mem *txq_mem; struct bna_dma_addr cur_q_addr; u32 pg_num; void __iomem *base_addr; unsigned long off; /* Fill out structure, to be subsequently written to hardware */ txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb; txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb; cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr)); txq_cfg.cur_q_entry_lo = cur_q_addr.lsb; txq_cfg.cur_q_entry_hi = cur_q_addr.msb; txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0; txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) | (txq->qpt.page_size >> 2); txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) | ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0); txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE; txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) | (txq->priority & 0x7)); txq_cfg.wvc_n_cquota_n_rquota = ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) | (BFI_TX_MAX_WRR_QUOTA & 0xfff)); /* Setup the page and write to H/W */ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num, HQM_RXTX_Q_RAM_BASE_OFFSET); writel(pg_num, tx->bna->regs.page_addr); base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva, HQM_RXTX_Q_RAM_BASE_OFFSET); q_mem = (struct bna_rxtx_q_mem *)0; txq_mem = &q_mem[txq->txq_id].txq; /* * The following 4 lines, is a hack b'cos the H/W needs to read * these DMA addresses as little endian */ off = (unsigned long)&txq_mem->pg_tbl_addr_lo; writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off); off = (unsigned long)&txq_mem->pg_tbl_addr_hi; writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off); off = (unsigned long)&txq_mem->cur_q_entry_lo; writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off); off = (unsigned long)&txq_mem->cur_q_entry_hi; writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off); off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr; writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off); off = (unsigned long)&txq_mem->entry_n_pg_size; writel(txq_cfg.entry_n_pg_size, base_addr + off); off = (unsigned long)&txq_mem->int_blk_n_cns_ptr; writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off); off = (unsigned long)&txq_mem->cns_ptr2_n_q_state; writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off); off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri; writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off); off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota; writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off); txq->tcb->producer_index = 0; txq->tcb->consumer_index = 0; *(txq->tcb->hw_consumer_index) = 0; } static void __bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq) { struct bfi_ll_q_stop_req ll_req; u32 bit_mask[2] = {0, 0}; if (txq->txq_id < 32) bit_mask[0] = (u32)1 << txq->txq_id; else bit_mask[1] = (u32)1 << (txq->txq_id - 32); memset(&ll_req, 0, sizeof(ll_req)); ll_req.mh.msg_class = BFI_MC_LL; ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ; ll_req.mh.mtag.h2i.lpu_id = 0; ll_req.q_id_mask[0] = htonl(bit_mask[0]); ll_req.q_id_mask[1] = htonl(bit_mask[1]); bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req), bna_tx_cb_txq_stopped, tx); bna_mbox_send(tx->bna, &tx->mbox_qe); } static void __bna_txf_start(struct bna_tx *tx) { struct bna_tx_fndb_ram *tx_fndb; struct bna_txf *txf = &tx->txf; void __iomem *base_addr; unsigned long off; writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM + (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET), tx->bna->regs.page_addr); base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva, TX_FNDB_RAM_BASE_OFFSET); tx_fndb = (struct bna_tx_fndb_ram *)0; off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags; writel(((u32)txf->vlan << 16) | txf->ctrl_flags, base_addr + off); if (tx->txf.txf_id < 32) tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id); else tx->bna->tx_mod.txf_bmap[1] |= ((u32) 1 << (tx->txf.txf_id - 32)); } static void __bna_txf_stop(struct bna_tx *tx) { struct bna_tx_fndb_ram *tx_fndb; u32 page_num; u32 ctl_flags; struct bna_txf *txf = &tx->txf; void __iomem *base_addr; unsigned long off; /* retrieve the running txf_flags & turn off enable bit */ page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM + (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET); writel(page_num, tx->bna->regs.page_addr); base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva, TX_FNDB_RAM_BASE_OFFSET); tx_fndb = (struct bna_tx_fndb_ram *)0; off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags; ctl_flags = readl(base_addr + off); ctl_flags &= ~BFI_TXF_CF_ENABLE; writel(ctl_flags, base_addr + off); if (tx->txf.txf_id < 32) tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id); else tx->bna->tx_mod.txf_bmap[0] &= ~((u32) 1 << (tx->txf.txf_id - 32)); } static void __bna_txf_stat_clr(struct bna_tx *tx) { struct bfi_ll_stats_req ll_req; u32 txf_bmap[2] = {0, 0}; if (tx->txf.txf_id < 32) txf_bmap[0] = ((u32)1 << tx->txf.txf_id); else txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32)); bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0); ll_req.stats_mask = 0; ll_req.rxf_id_mask[0] = 0; ll_req.rxf_id_mask[1] = 0; ll_req.txf_id_mask[0] = htonl(txf_bmap[0]); ll_req.txf_id_mask[1] = htonl(txf_bmap[1]); bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req), bna_tx_cb_stats_cleared, tx); bna_mbox_send(tx->bna, &tx->mbox_qe); } static void __bna_tx_start(struct bna_tx *tx) { struct bna_txq *txq; struct list_head *qe; list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; bna_ib_start(txq->ib); __bna_txq_start(tx, txq); } __bna_txf_start(tx); list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; txq->tcb->priority = txq->priority; (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb); } } static void __bna_tx_stop(struct bna_tx *tx) { struct bna_txq *txq; struct list_head *qe; list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb); } __bna_txf_stop(tx); list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; bfa_wc_up(&tx->txq_stop_wc); } list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; __bna_txq_stop(tx, txq); } } static void bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, struct bna_mem_descr *qpt_mem, struct bna_mem_descr *swqpt_mem, struct bna_mem_descr *page_mem) { int i; txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; txq->qpt.kv_qpt_ptr = qpt_mem->kva; txq->qpt.page_count = page_count; txq->qpt.page_size = page_size; txq->tcb->sw_qpt = (void **) swqpt_mem->kva; for (i = 0; i < page_count; i++) { txq->tcb->sw_qpt[i] = page_mem[i].kva; ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = page_mem[i].dma.lsb; ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = page_mem[i].dma.msb; } } static void bna_tx_free(struct bna_tx *tx) { struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; struct bna_txq *txq; struct bna_ib_mod *ib_mod = &tx->bna->ib_mod; struct list_head *qe; while (!list_empty(&tx->txq_q)) { bfa_q_deq(&tx->txq_q, &txq); bfa_q_qe_init(&txq->qe); if (txq->ib) { if (txq->ib_seg_offset != -1) bna_ib_release_idx(txq->ib, txq->ib_seg_offset); bna_ib_put(ib_mod, txq->ib); txq->ib = NULL; } txq->tcb = NULL; txq->tx = NULL; list_add_tail(&txq->qe, &tx_mod->txq_free_q); } list_for_each(qe, &tx_mod->tx_active_q) { if (qe == &tx->qe) { list_del(&tx->qe); bfa_q_qe_init(&tx->qe); break; } } tx->bna = NULL; tx->priv = NULL; list_add_tail(&tx->qe, &tx_mod->tx_free_q); } static void bna_tx_cb_txq_stopped(void *arg, int status) { struct bna_tx *tx = (struct bna_tx *)arg; bfa_q_qe_init(&tx->mbox_qe.qe); bfa_wc_down(&tx->txq_stop_wc); } static void bna_tx_cb_txq_stopped_all(void *arg) { struct bna_tx *tx = (struct bna_tx *)arg; bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED); } static void bna_tx_cb_stats_cleared(void *arg, int status) { struct bna_tx *tx = (struct bna_tx *)arg; bfa_q_qe_init(&tx->mbox_qe.qe); bfa_fsm_send_event(tx, TX_E_STAT_CLEARED); } static void bna_tx_start(struct bna_tx *tx) { tx->flags |= BNA_TX_F_PORT_STARTED; if (tx->flags & BNA_TX_F_ENABLED) bfa_fsm_send_event(tx, TX_E_START); } static void bna_tx_stop(struct bna_tx *tx) { tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; tx->stop_cbarg = &tx->bna->tx_mod; tx->flags &= ~BNA_TX_F_PORT_STARTED; bfa_fsm_send_event(tx, TX_E_STOP); } static void bna_tx_fail(struct bna_tx *tx) { tx->flags &= ~BNA_TX_F_PORT_STARTED; bfa_fsm_send_event(tx, TX_E_FAIL); } static void bna_tx_prio_changed(struct bna_tx *tx, int prio) { struct bna_txq *txq; struct list_head *qe; list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; txq->priority = prio; } bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE); } static void bna_tx_cee_link_status(struct bna_tx *tx, int cee_link) { if (cee_link) tx->flags |= BNA_TX_F_PRIO_LOCK; else tx->flags &= ~BNA_TX_F_PRIO_LOCK; } static void bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx, enum bna_cb_status status) { struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; bfa_wc_down(&tx_mod->tx_stop_wc); } static void bna_tx_mod_cb_tx_stopped_all(void *arg) { struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; if (tx_mod->stop_cbfn) tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS); tx_mod->stop_cbfn = NULL; } void bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) { u32 q_size; u32 page_count; struct bna_mem_info *mem_info; res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_KVA; mem_info->len = sizeof(struct bna_tcb); mem_info->num = num_txq; q_size = txq_depth * BFI_TXQ_WI_SIZE; q_size = ALIGN(q_size, PAGE_SIZE); page_count = q_size >> PAGE_SHIFT; res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; mem_info->len = page_count * sizeof(struct bna_dma_addr); mem_info->num = num_txq; res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_KVA; mem_info->len = page_count * sizeof(void *); mem_info->num = num_txq; res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; mem_info->len = PAGE_SIZE; mem_info->num = num_txq * page_count; res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR; res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; } struct bna_tx * bna_tx_create(struct bna *bna, struct bnad *bnad, struct bna_tx_config *tx_cfg, struct bna_tx_event_cbfn *tx_cbfn, struct bna_res_info *res_info, void *priv) { struct bna_intr_info *intr_info; struct bna_tx_mod *tx_mod = &bna->tx_mod; struct bna_tx *tx; struct bna_txq *txq; struct list_head *qe; struct bna_ib_mod *ib_mod = &bna->ib_mod; struct bna_doorbell_qset *qset; struct bna_ib_config ib_config; int page_count; int page_size; int page_idx; int i; unsigned long off; intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) / tx_cfg->num_txq; page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len; /** * Get resources */ if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) return NULL; /* Tx */ if (list_empty(&tx_mod->tx_free_q)) return NULL; bfa_q_deq(&tx_mod->tx_free_q, &tx); bfa_q_qe_init(&tx->qe); /* TxQs */ INIT_LIST_HEAD(&tx->txq_q); for (i = 0; i < tx_cfg->num_txq; i++) { if (list_empty(&tx_mod->txq_free_q)) goto err_return; bfa_q_deq(&tx_mod->txq_free_q, &txq); bfa_q_qe_init(&txq->qe); list_add_tail(&txq->qe, &tx->txq_q); txq->ib = NULL; txq->ib_seg_offset = -1; txq->tx = tx; } /* IBs */ i = 0; list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; if (intr_info->num == 1) txq->ib = bna_ib_get(ib_mod, intr_info->intr_type, intr_info->idl[0].vector); else txq->ib = bna_ib_get(ib_mod, intr_info->intr_type, intr_info->idl[i].vector); if (txq->ib == NULL) goto err_return; txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib); if (txq->ib_seg_offset == -1) goto err_return; i++; } /* * Initialize */ /* Tx */ tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; /* Following callbacks are mandatory */ tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; list_add_tail(&tx->qe, &tx_mod->tx_active_q); tx->bna = bna; tx->priv = priv; tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all; tx->txq_stop_wc.wc_cbarg = tx; tx->txq_stop_wc.wc_count = 0; tx->type = tx_cfg->tx_type; tx->flags = 0; if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) { switch (tx->type) { case BNA_TX_T_REGULAR: if (!(tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)) tx->flags |= BNA_TX_F_PORT_STARTED; break; case BNA_TX_T_LOOPBACK: if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK) tx->flags |= BNA_TX_F_PORT_STARTED; break; } } if (tx->bna->tx_mod.cee_link) tx->flags |= BNA_TX_F_PRIO_LOCK; /* TxQ */ i = 0; page_idx = 0; list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; txq->priority = tx_mod->priority; txq->tcb = (struct bna_tcb *) res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva; txq->tx_packets = 0; txq->tx_bytes = 0; /* IB */ ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO; ib_config.interpkt_timeo = 0; /* Not used */ ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT; ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA | BFI_IB_CF_INT_ENABLE | BFI_IB_CF_COALESCING_MODE); bna_ib_config(txq->ib, &ib_config); /* TCB */ txq->tcb->producer_index = 0; txq->tcb->consumer_index = 0; txq->tcb->hw_consumer_index = (volatile u32 *) ((volatile u8 *)txq->ib->ib_seg_host_addr_kva + (txq->ib_seg_offset * BFI_IBIDX_SIZE)); *(txq->tcb->hw_consumer_index) = 0; txq->tcb->q_depth = tx_cfg->txq_depth; txq->tcb->unmap_q = (void *) res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva; qset = (struct bna_doorbell_qset *)0; off = (unsigned long)&qset[txq->txq_id].txq[0]; txq->tcb->q_dbell = off + BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva); txq->tcb->i_dbell = &txq->ib->door_bell; txq->tcb->intr_type = intr_info->intr_type; txq->tcb->intr_vector = (intr_info->num == 1) ? intr_info->idl[0].vector : intr_info->idl[i].vector; txq->tcb->txq = txq; txq->tcb->bnad = bnad; txq->tcb->id = i; /* QPT, SWQPT, Pages */ bna_txq_qpt_setup(txq, page_count, page_size, &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], &res_info[BNA_TX_RES_MEM_T_PAGE]. res_u.mem_info.mdl[page_idx]); txq->tcb->page_idx = page_idx; txq->tcb->page_count = page_count; page_idx += page_count; /* Callback to bnad for setting up TCB */ if (tx->tcb_setup_cbfn) (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); i++; } /* TxF */ tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED; tx->txf.vlan = 0; /* Mbox element */ bfa_q_qe_init(&tx->mbox_qe.qe); bfa_fsm_set_state(tx, bna_tx_sm_stopped); return tx; err_return: bna_tx_free(tx); return NULL; } void bna_tx_destroy(struct bna_tx *tx) { /* Callback to bnad for destroying TCB */ if (tx->tcb_destroy_cbfn) { struct bna_txq *txq; struct list_head *qe; list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); } } bna_tx_free(tx); } void bna_tx_enable(struct bna_tx *tx) { if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) return; tx->flags |= BNA_TX_F_ENABLED; if (tx->flags & BNA_TX_F_PORT_STARTED) bfa_fsm_send_event(tx, TX_E_START); } void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status)) { if (type == BNA_SOFT_CLEANUP) { (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS); return; } tx->stop_cbfn = cbfn; tx->stop_cbarg = tx->bna->bnad; tx->flags &= ~BNA_TX_F_ENABLED; bfa_fsm_send_event(tx, TX_E_STOP); } int bna_tx_state_get(struct bna_tx *tx) { return bfa_sm_to_state(tx_sm_table, tx->fsm); } void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, struct bna_res_info *res_info) { int i; tx_mod->bna = bna; tx_mod->flags = 0; tx_mod->tx = (struct bna_tx *) res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva; tx_mod->txq = (struct bna_txq *) res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva; INIT_LIST_HEAD(&tx_mod->tx_free_q); INIT_LIST_HEAD(&tx_mod->tx_active_q); INIT_LIST_HEAD(&tx_mod->txq_free_q); for (i = 0; i < BFI_MAX_TXQ; i++) { tx_mod->tx[i].txf.txf_id = i; bfa_q_qe_init(&tx_mod->tx[i].qe); list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); tx_mod->txq[i].txq_id = i; bfa_q_qe_init(&tx_mod->txq[i].qe); list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); } tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all; tx_mod->tx_stop_wc.wc_cbarg = tx_mod; tx_mod->tx_stop_wc.wc_count = 0; } void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod) { struct list_head *qe; int i; i = 0; list_for_each(qe, &tx_mod->tx_free_q) i++; i = 0; list_for_each(qe, &tx_mod->txq_free_q) i++; tx_mod->bna = NULL; } void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type) { struct bna_tx *tx; struct list_head *qe; tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED; if (type == BNA_TX_T_LOOPBACK) tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK; list_for_each(qe, &tx_mod->tx_active_q) { tx = (struct bna_tx *)qe; if (tx->type == type) bna_tx_start(tx); } } void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type) { struct bna_tx *tx; struct list_head *qe; tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED; tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK; tx_mod->stop_cbfn = bna_port_cb_tx_stopped; /** * Before calling bna_tx_stop(), increment tx_stop_wc as many times * as we are going to call bna_tx_stop */ list_for_each(qe, &tx_mod->tx_active_q) { tx = (struct bna_tx *)qe; if (tx->type == type) bfa_wc_up(&tx_mod->tx_stop_wc); } if (tx_mod->tx_stop_wc.wc_count == 0) { tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS); tx_mod->stop_cbfn = NULL; return; } list_for_each(qe, &tx_mod->tx_active_q) { tx = (struct bna_tx *)qe; if (tx->type == type) bna_tx_stop(tx); } } void bna_tx_mod_fail(struct bna_tx_mod *tx_mod) { struct bna_tx *tx; struct list_head *qe; tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED; tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK; list_for_each(qe, &tx_mod->tx_active_q) { tx = (struct bna_tx *)qe; bna_tx_fail(tx); } } void bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio) { struct bna_tx *tx; struct list_head *qe; if (prio != tx_mod->priority) { tx_mod->priority = prio; list_for_each(qe, &tx_mod->tx_active_q) { tx = (struct bna_tx *)qe; bna_tx_prio_changed(tx, prio); } } } void bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link) { struct bna_tx *tx; struct list_head *qe; tx_mod->cee_link = cee_link; list_for_each(qe, &tx_mod->tx_active_q) { tx = (struct bna_tx *)qe; bna_tx_cee_link_status(tx, cee_link); } }
gpl-2.0
whyberg/MediaPad_10FHD
arch/arm/mm/mmap.c
2899
4259
/* * linux/arch/arm/mm/mmap.c */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/shm.h> #include <linux/sched.h> #include <linux/io.h> #include <linux/personality.h> #include <linux/random.h> #include <asm/cputype.h> #include <asm/system.h> #define COLOUR_ALIGN(addr,pgoff) \ ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) /* * We need to ensure that shared mappings are correctly aligned to * avoid aliasing issues with VIPT caches. We need to ensure that * a specific page of an object is always mapped at a multiple of * SHMLBA bytes. * * We unconditionally provide this function for all cases, however * in the VIVT case, we optimise out the alignment rules. */ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) unsigned int cache_type; int do_align = 0, aliasing = 0; /* * We only need to do colour alignment if either the I or D * caches alias. This is indicated by bits 9 and 21 of the * cache type register. */ cache_type = read_cpuid_cachetype(); if (cache_type != read_cpuid_id()) { aliasing = (cache_type | cache_type >> 12) & (1 << 11); if (aliasing) do_align = filp || flags & MAP_SHARED; } #else #define do_align 0 #define aliasing 0 #endif /* * We enforce the MAP_FIXED case. */ if (flags & MAP_FIXED) { if (aliasing && flags & MAP_SHARED && (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) return -EINVAL; return addr; } if (len > TASK_SIZE) return -ENOMEM; if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; } else { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } /* 8 bits of randomness in 20 address space bits */ if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; full_search: if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) { /* * Start a new search - just in case we missed * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } if (!vma || addr + len <= vma->vm_start) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; if (do_align) addr = COLOUR_ALIGN(addr, pgoff); } } /* * You really shouldn't be using read() or write() on /dev/mem. This * might go away in the future. */ int valid_phys_addr_range(unsigned long addr, size_t size) { if (addr < PHYS_OFFSET) return 0; if (addr + size > __pa(high_memory - 1) + 1) return 0; return 1; } /* * We don't use supersection mappings for mmap() on /dev/mem, which * means that we can't map the memory area above the 4G barrier into * userspace. */ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); } #ifdef CONFIG_STRICT_DEVMEM #include <linux/ioport.h> /* * devmem_is_allowed() checks to see if /dev/mem access to a certain * address is valid. The argument is a physical page number. * We mimic x86 here by disallowing access to system RAM as well as * device-exclusive MMIO regions. This effectively disable read()/write() * on /dev/mem. */ int devmem_is_allowed(unsigned long pfn) { if (iomem_is_exclusive(pfn << PAGE_SHIFT)) return 0; if (!page_is_ram(pfn)) return 1; return 0; } #endif
gpl-2.0
OPNay/android_kernel_samsung_jalte
drivers/staging/bcm/hostmibs.c
4947
5968
/* * File Name: hostmibs.c * * Author: Beceem Communications Pvt. Ltd * * Abstract: This file contains the routines to copy the statistics used by * the driver to the Host MIBS structure and giving the same to Application. */ #include "headers.h" INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs) { S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL; S_PHS_RULE *pstPhsRule = NULL; S_CLASSIFIER_TABLE *pstClassifierTable = NULL; S_CLASSIFIER_ENTRY *pstClassifierRule = NULL; PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION) &Adapter->stBCMPhsContext; UINT nClassifierIndex = 0, nPhsTableIndex = 0, nSfIndex = 0, uiIndex = 0; if (pDeviceExtension == NULL) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n"); return STATUS_FAILURE; } /* Copy the classifier Table */ for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS; nClassifierIndex++) { if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE) memcpy((PVOID) & pstHostMibs-> astClassifierTable[nClassifierIndex], (PVOID) & Adapter-> astClassifierTable[nClassifierIndex], sizeof(S_MIBS_CLASSIFIER_RULE)); } /* Copy the SF Table */ for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) { if (Adapter->PackInfo[nSfIndex].bValid) { memcpy((PVOID) & pstHostMibs->astSFtable[nSfIndex], (PVOID) & Adapter->PackInfo[nSfIndex], sizeof(S_MIBS_SERVICEFLOW_TABLE)); } else { /* If index in not valid, * don't process this for the PHS table. * Go For the next entry. */ continue; } /* Retrieve the SFID Entry Index for requested Service Flow */ if (PHS_INVALID_TABLE_INDEX == GetServiceFlowEntry(pDeviceExtension-> pstServiceFlowPhsRulesTable, Adapter->PackInfo[nSfIndex]. usVCID_Value, &pstServiceFlowEntry)) continue; pstClassifierTable = pstServiceFlowEntry->pstClassifierTable; for (uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++) { pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex]; if (pstClassifierRule->bUsed) { pstPhsRule = pstClassifierRule->pstPhsRule; pstHostMibs->astPhsRulesTable[nPhsTableIndex]. ulSFID = Adapter->PackInfo[nSfIndex].ulSFID; memcpy(&pstHostMibs-> astPhsRulesTable[nPhsTableIndex].u8PHSI, &pstPhsRule->u8PHSI, sizeof(S_PHS_RULE)); nPhsTableIndex++; } } } /* Copy other Host Statistics parameters */ pstHostMibs->stHostInfo.GoodTransmits = Adapter->dev->stats.tx_packets; pstHostMibs->stHostInfo.GoodReceives = Adapter->dev->stats.rx_packets; pstHostMibs->stHostInfo.CurrNumFreeDesc = atomic_read(&Adapter->CurrNumFreeTxDesc); pstHostMibs->stHostInfo.BEBucketSize = Adapter->BEBucketSize; pstHostMibs->stHostInfo.rtPSBucketSize = Adapter->rtPSBucketSize; pstHostMibs->stHostInfo.TimerActive = Adapter->TimerActive; pstHostMibs->stHostInfo.u32TotalDSD = Adapter->u32TotalDSD; memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist, Adapter->aTxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES); memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist, Adapter->aRxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES); return STATUS_SUCCESS; } VOID GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *pstHostMibs, const PPER_TARANG_DATA pTarang) { memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs), &(pTarang->stDroppedAppCntrlMsgs), sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES)); } VOID CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter, CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex) { S_MIBS_EXTSERVICEFLOW_PARAMETERS *t = &Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable; t->wmanIfSfid = psfLocalSet->u32SFID; t->wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate; t->wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst; t->wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate; t->wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter; t->wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency; t->wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator; t->wmanIfCmnCpsFixedVsVariableSduInd = ntohl(t->wmanIfCmnCpsFixedVsVariableSduInd); t->wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize; t->wmanIfCmnCpsSduSize = ntohl(t->wmanIfCmnCpsSduSize); t->wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType; t->wmanIfCmnCpsSfSchedulingType = ntohl(t->wmanIfCmnCpsSfSchedulingType); t->wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable; t->wmanIfCmnCpsArqEnable = ntohl(t->wmanIfCmnCpsArqEnable); t->wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize); t->wmanIfCmnCpsArqWindowSize = ntohl(t->wmanIfCmnCpsArqWindowSize); t->wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime); t->wmanIfCmnCpsArqBlockLifetime = ntohl(t->wmanIfCmnCpsArqBlockLifetime); t->wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut); t->wmanIfCmnCpsArqSyncLossTimeout = ntohl(t->wmanIfCmnCpsArqSyncLossTimeout); t->wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder; t->wmanIfCmnCpsArqDeliverInOrder = ntohl(t->wmanIfCmnCpsArqDeliverInOrder); t->wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut); t->wmanIfCmnCpsArqRxPurgeTimeout = ntohl(t->wmanIfCmnCpsArqRxPurgeTimeout); t->wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize); t->wmanIfCmnCpsArqBlockSize = ntohl(t->wmanIfCmnCpsArqBlockSize); t->wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy; t->wmanIfCmnCpsReqTxPolicy = ntohl(t->wmanIfCmnCpsReqTxPolicy); t->wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification; t->wmanIfCmnSfCsSpecification = ntohl(t->wmanIfCmnSfCsSpecification); t->wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID); t->wmanIfCmnCpsTargetSaid = ntohl(t->wmanIfCmnCpsTargetSaid); }
gpl-2.0
Validus-Lollipop/android_kernel_motorola_msm8960dt-common
drivers/staging/bcm/InterfaceDld.c
4947
10932
#include "headers.h" int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc) { /* unsigned int reg = 0; */ mm_segment_t oldfs = {0}; int errno = 0, len = 0; /* ,is_config_file = 0 */ loff_t pos = 0; PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg; /* PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter; */ char *buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL); if (!buff) return -ENOMEM; while (1) { oldfs = get_fs(); set_fs(get_ds()); len = vfs_read(flp, (void __force __user *)buff, MAX_TRANSFER_CTRL_BYTE_USB, &pos); set_fs(oldfs); if (len <= 0) { if (len < 0) { BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len < 0"); errno = len; } else { errno = 0; BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got end of file!"); } break; } /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_INITEXIT, MP_INIT, * DBG_LVL_ALL, buff, * MAX_TRANSFER_CTRL_BYTE_USB); */ errno = InterfaceWRM(psIntfAdapter, on_chip_loc, buff, len); if (errno) { BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "WRM Failed! status: %d", errno); break; } on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB; } kfree(buff); return errno; } int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_chip_loc) { char *buff, *buff_readback; unsigned int reg = 0; mm_segment_t oldfs = {0}; int errno = 0, len = 0, is_config_file = 0; loff_t pos = 0; static int fw_down; INT Status = STATUS_SUCCESS; PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg; int bytes; buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA); buff_readback = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA); if (!buff || !buff_readback) { kfree(buff); kfree(buff_readback); return -ENOMEM; } is_config_file = (on_chip_loc == CONFIG_BEGIN_ADDR) ? 1 : 0; memset(buff_readback, 0, MAX_TRANSFER_CTRL_BYTE_USB); memset(buff, 0, MAX_TRANSFER_CTRL_BYTE_USB); while (1) { oldfs = get_fs(); set_fs(get_ds()); len = vfs_read(flp, (void __force __user *)buff, MAX_TRANSFER_CTRL_BYTE_USB, &pos); set_fs(oldfs); fw_down++; if (len <= 0) { if (len < 0) { BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len < 0"); errno = len; } else { errno = 0; BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got end of file!"); } break; } bytes = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len); if (bytes < 0) { Status = bytes; BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "RDM of len %d Failed! %d", len, reg); goto exit; } reg++; if ((len-sizeof(unsigned int)) < 4) { if (memcmp(buff_readback, buff, len)) { BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down); BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Length is: %d", len); Status = -EIO; goto exit; } } else { len -= 4; while (len) { if (*(unsigned int *)&buff_readback[len] != *(unsigned int *)&buff[len]) { BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down); BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&buff_readback[len]); BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len); Status = -EIO; goto exit; } len -= 4; } } on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB; } /* End of while(1) */ exit: kfree(buff); kfree(buff_readback); return Status; } static int bcm_download_config_file(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo) { int retval = STATUS_SUCCESS; B_UINT32 value = 0; if (Adapter->pstargetparams == NULL) { Adapter->pstargetparams = kmalloc(sizeof(STARGETPARAMS), GFP_KERNEL); if (Adapter->pstargetparams == NULL) return -ENOMEM; } if (psFwInfo->u32FirmwareLength != sizeof(STARGETPARAMS)) return -EIO; retval = copy_from_user(Adapter->pstargetparams, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength); if (retval) { kfree(Adapter->pstargetparams); Adapter->pstargetparams = NULL; return -EFAULT; } /* Parse the structure and then Download the Firmware */ beceem_parse_target_struct(Adapter); /* Initializing the NVM. */ BcmInitNVM(Adapter); retval = InitLedSettings(Adapter); if (retval) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "INIT LED Failed\n"); return retval; } if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { Adapter->LEDInfo.bLedInitDone = FALSE; Adapter->DriverState = DRIVER_INIT; wake_up(&Adapter->LEDInfo.notify_led_event); } if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { Adapter->DriverState = FW_DOWNLOAD; wake_up(&Adapter->LEDInfo.notify_led_event); } /* Initialize the DDR Controller */ retval = ddr_init(Adapter); if (retval) { BCM_DEBUG_PRINT (Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "DDR Init Failed\n"); return retval; } value = 0; wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value)); wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value)); if (Adapter->eNVMType == NVM_FLASH) { retval = PropagateCalParamsFromFlashToMemory(Adapter); if (retval) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "propagaion of cal param failed with status :%d", retval); return retval; } } retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams, sizeof(STARGETPARAMS), CONFIG_BEGIN_ADDR); if (retval) BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "configuration file not downloaded properly"); else Adapter->bCfgDownloaded = TRUE; return retval; } static int bcm_compare_buff_contents(unsigned char *readbackbuff, unsigned char *buff, unsigned int len) { int retval = STATUS_SUCCESS; PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev); if ((len-sizeof(unsigned int)) < 4) { if (memcmp(readbackbuff , buff, len)) retval = -EINVAL; } else { len -= 4; while (len) { if (*(unsigned int *)&readbackbuff[len] != *(unsigned int *)&buff[len]) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper"); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&readbackbuff[len]); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len); retval = -EINVAL; break; } len -= 4; } } return retval; } int bcm_ioctl_fw_download(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo) { int retval = STATUS_SUCCESS; PUCHAR buff = NULL; /* Config File is needed for the Driver to download the Config file and * Firmware. Check for the Config file to be first to be sent from the * Application */ atomic_set(&Adapter->uiMBupdate, FALSE); if (!Adapter->bCfgDownloaded && psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) { /* Can't Download Firmware. */ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Download the config File first\n"); return -EINVAL; } /* If Config File, Finish the DDR Settings and then Download CFG File */ if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR) { retval = bcm_download_config_file(Adapter, psFwInfo); } else { buff = kzalloc(psFwInfo->u32FirmwareLength, GFP_KERNEL); if (buff == NULL) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed in allocation memory"); return -ENOMEM; } retval = copy_from_user(buff, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength); if (retval != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copying buffer from user space failed"); retval = -EFAULT; goto error; } retval = buffDnldVerify(Adapter, buff, psFwInfo->u32FirmwareLength, psFwInfo->u32StartingAddress); if (retval != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "f/w download failed status :%d", retval); goto error; } } error: kfree(buff); return retval; } static INT buffDnld(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress) { unsigned int len = 0; int retval = STATUS_SUCCESS; len = u32FirmwareLength; while (u32FirmwareLength) { len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB); retval = wrm(Adapter, u32StartingAddress, mappedbuffer, len); if (retval) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "wrm failed with status :%d", retval); break; } u32StartingAddress += len; u32FirmwareLength -= len; mappedbuffer += len; } return retval; } static INT buffRdbkVerify(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress) { UINT len = u32FirmwareLength; INT retval = STATUS_SUCCESS; PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL); int bytes; if (NULL == readbackbuff) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED"); return -ENOMEM; } while (u32FirmwareLength && !retval) { len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB); bytes = rdm(Adapter, u32StartingAddress, readbackbuff, len); if (bytes < 0) { retval = bytes; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed with status %d", retval); break; } retval = bcm_compare_buff_contents(readbackbuff, mappedbuffer, len); if (STATUS_SUCCESS != retval) break; u32StartingAddress += len; u32FirmwareLength -= len; mappedbuffer += len; } /* end of while (u32FirmwareLength && !retval) */ kfree(readbackbuff); return retval; } INT buffDnldVerify(PMINI_ADAPTER Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength, unsigned long u32StartingAddress) { INT status = STATUS_SUCCESS; status = buffDnld(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress); if (status != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer download failed"); goto error; } status = buffRdbkVerify(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress); if (status != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer readback verifier failed"); goto error; } error: return status; }
gpl-2.0
zparallax/amplitude_aosp_12_1
drivers/staging/phison/phison.c
4947
2586
/* * Copyright (C) 2006 Red Hat <evan_ko@phison.com> * * May be copied or modified under the terms of the GNU General Public License * * [Modify History] * #0001, Evan, 2008.10.22, V0.00, New release. * #0002, Evan, 2008.11.01, V0.90, Test Work In Ubuntu Linux 8.04. * #0003, Evan, 2008.01.08, V0.91, Change Name "PCIE-SSD" to "E-BOX". */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> #define PHISON_DEBUG #define DRV_NAME "phison_e-box" /* #0003 */ #define DRV_VERSION "0.91" /* #0003 */ #define PCI_VENDOR_ID_PHISON 0x1987 #define PCI_DEVICE_ID_PS5000 0x5000 static int phison_pre_reset(struct ata_link *link, unsigned long deadline) { int ret; struct ata_port *ap = link->ap; ap->cbl = ATA_CBL_NONE; ret = ata_std_prereset(link, deadline); dev_dbg(ap->dev, "phison_pre_reset(), ret = %x\n", ret); return ret; } static struct scsi_host_template phison_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations phison_ops = { .inherits = &ata_bmdma_port_ops, .prereset = phison_pre_reset, }; static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; struct ata_port_info info = { .flags = ATA_FLAG_NO_ATAPI, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = ATA_UDMA5, .port_ops = &phison_ops, }; const struct ata_port_info *ppi[] = { &info, NULL }; ret = ata_pci_bmdma_init_one(pdev, ppi, &phison_sht, NULL, 0); dev_dbg(&pdev->dev, "phison_init_one(), ret = %x\n", ret); return ret; } static DEFINE_PCI_DEVICE_TABLE(phison_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000), PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, phison_pci_tbl); static struct pci_driver phison_pci_driver = { .name = DRV_NAME, .id_table = phison_pci_tbl, .probe = phison_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM /* haven't tested it. */ .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init phison_ide_init(void) { return pci_register_driver(&phison_pci_driver); } static void __exit phison_ide_exit(void) { pci_unregister_driver(&phison_pci_driver); } module_init(phison_ide_init); module_exit(phison_ide_exit); MODULE_AUTHOR("Evan Ko"); MODULE_DESCRIPTION("PCIE driver module for PHISON PS5000 E-BOX"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
TeamExodus/kernel_lge_g3
arch/sh/mm/alignment.c
7507
4763
/* * Alignment access counters and corresponding user-space interfaces. * * Copyright (C) 2009 ST Microelectronics * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <linux/ratelimit.h> #include <asm/alignment.h> #include <asm/processor.h> static unsigned long se_user; static unsigned long se_sys; static unsigned long se_half; static unsigned long se_word; static unsigned long se_dword; static unsigned long se_multi; /* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not valid! */ static int se_usermode = UM_WARN | UM_FIXUP; /* 0: no warning 1: print a warning message, disabled by default */ static int se_kernmode_warn; core_param(alignment, se_usermode, int, 0600); void inc_unaligned_byte_access(void) { se_half++; } void inc_unaligned_word_access(void) { se_word++; } void inc_unaligned_dword_access(void) { se_dword++; } void inc_unaligned_multi_access(void) { se_multi++; } void inc_unaligned_user_access(void) { se_user++; } void inc_unaligned_kernel_access(void) { se_sys++; } /* * This defaults to the global policy which can be set from the command * line, while processes can overload their preferences via prctl(). */ unsigned int unaligned_user_action(void) { unsigned int action = se_usermode; if (current->thread.flags & SH_THREAD_UAC_SIGBUS) { action &= ~UM_FIXUP; action |= UM_SIGNAL; } if (current->thread.flags & SH_THREAD_UAC_NOPRINT) action &= ~UM_WARN; return action; } int get_unalign_ctl(struct task_struct *tsk, unsigned long addr) { return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK, (unsigned int __user *)addr); } int set_unalign_ctl(struct task_struct *tsk, unsigned int val) { tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) | (val & SH_THREAD_UAC_MASK); return 0; } void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn, struct pt_regs *regs) { if (user_mode(regs) && (se_usermode & UM_WARN)) pr_notice_ratelimited("Fixing up unaligned userspace access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", tsk->comm, task_pid_nr(tsk), (void *)instruction_pointer(regs), insn); else if (se_kernmode_warn) pr_notice_ratelimited("Fixing up unaligned kernel access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", tsk->comm, task_pid_nr(tsk), (void *)instruction_pointer(regs), insn); } static const char *se_usermode_action[] = { "ignored", "warn", "fixup", "fixup+warn", "signal", "signal+warn" }; static int alignment_proc_show(struct seq_file *m, void *v) { seq_printf(m, "User:\t\t%lu\n", se_user); seq_printf(m, "System:\t\t%lu\n", se_sys); seq_printf(m, "Half:\t\t%lu\n", se_half); seq_printf(m, "Word:\t\t%lu\n", se_word); seq_printf(m, "DWord:\t\t%lu\n", se_dword); seq_printf(m, "Multi:\t\t%lu\n", se_multi); seq_printf(m, "User faults:\t%i (%s)\n", se_usermode, se_usermode_action[se_usermode]); seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, se_kernmode_warn ? "+warn" : ""); return 0; } static int alignment_proc_open(struct inode *inode, struct file *file) { return single_open(file, alignment_proc_show, NULL); } static ssize_t alignment_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { int *data = PDE(file->f_path.dentry->d_inode)->data; char mode; if (count > 0) { if (get_user(mode, buffer)) return -EFAULT; if (mode >= '0' && mode <= '5') *data = mode - '0'; } return count; } static const struct file_operations alignment_proc_fops = { .owner = THIS_MODULE, .open = alignment_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = alignment_proc_write, }; /* * This needs to be done after sysctl_init, otherwise sys/ will be * overwritten. Actually, this shouldn't be in sys/ at all since * it isn't a sysctl, and it doesn't contain sysctl information. * We now locate it in /proc/cpu/alignment instead. */ static int __init alignment_init(void) { struct proc_dir_entry *dir, *res; dir = proc_mkdir("cpu", NULL); if (!dir) return -ENOMEM; res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir, &alignment_proc_fops, &se_usermode); if (!res) return -ENOMEM; res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir, &alignment_proc_fops, &se_kernmode_warn); if (!res) return -ENOMEM; return 0; } fs_initcall(alignment_init);
gpl-2.0
jejecule/android_kernel_oppo_msm8974
arch/cris/kernel/time.c
7507
3853
/* * linux/arch/cris/kernel/time.c * * Copyright (C) 1991, 1992, 1995 Linus Torvalds * Copyright (C) 1999, 2000, 2001 Axis Communications AB * * 1994-07-02 Alan Modra * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime * 1995-03-26 Markus Kuhn * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887 * precision CMOS clock update * 1996-05-03 Ingo Molnar * fixed time warps in do_[slow|fast]_gettimeoffset() * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 * "A Kernel Model for Precision Timekeeping" by Dave Mills * * Linux/CRIS specific code: * * Authors: Bjorn Wesen * Johan Adolfsson * */ #include <asm/rtc.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/param.h> #include <linux/jiffies.h> #include <linux/bcd.h> #include <linux/timex.h> #include <linux/init.h> #include <linux/profile.h> #include <linux/sched.h> /* just for sched_clock() - funny that */ int have_rtc; /* used to remember if we have an RTC or not */; #define TICK_SIZE tick extern unsigned long loops_per_jiffy; /* init/main.c */ unsigned long loops_per_usec; #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET extern unsigned long do_slow_gettimeoffset(void); static unsigned long (*do_gettimeoffset)(void) = do_slow_gettimeoffset; u32 arch_gettimeoffset(void) { return do_gettimeoffset() * 1000; } #endif /* * BUG: This routine does not handle hour overflow properly; it just * sets the minutes. Usually you'll only notice that after reboot! */ int set_rtc_mmss(unsigned long nowtime) { int retval = 0; int real_seconds, real_minutes, cmos_minutes; printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime); if(!have_rtc) return 0; cmos_minutes = CMOS_READ(RTC_MINUTES); cmos_minutes = bcd2bin(cmos_minutes); /* * since we're only adjusting minutes and seconds, * don't interfere with hour overflow. This avoids * messing with unknown time zones but requires your * RTC not to be off by more than 15 minutes */ real_seconds = nowtime % 60; real_minutes = nowtime / 60; if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) real_minutes += 30; /* correct for half hour time zone */ real_minutes %= 60; if (abs(real_minutes - cmos_minutes) < 30) { real_seconds = bin2bcd(real_seconds); real_minutes = bin2bcd(real_minutes); CMOS_WRITE(real_seconds,RTC_SECONDS); CMOS_WRITE(real_minutes,RTC_MINUTES); } else { printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; } return retval; } /* grab the time from the RTC chip */ unsigned long get_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; if(!have_rtc) return 0; sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); day = CMOS_READ(RTC_DAY_OF_MONTH); mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); sec = bcd2bin(sec); min = bcd2bin(min); hour = bcd2bin(hour); day = bcd2bin(day); mon = bcd2bin(mon); year = bcd2bin(year); if ((year += 1900) < 1970) year += 100; return mktime(year, mon, day, hour, min, sec); } int update_persistent_clock(struct timespec now) { return set_rtc_mmss(now.tv_sec); } void read_persistent_clock(struct timespec *ts) { ts->tv_sec = get_cmos_time(); ts->tv_nsec = 0; } extern void cris_profile_sample(struct pt_regs* regs); void cris_do_profile(struct pt_regs* regs) { #ifdef CONFIG_SYSTEM_PROFILER cris_profile_sample(regs); #endif #ifdef CONFIG_PROFILING profile_tick(CPU_PROFILING); #endif } unsigned long long sched_clock(void) { return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ) + get_ns_in_jiffie(); } static int __init init_udelay(void) { loops_per_usec = (loops_per_jiffy * HZ) / 1000000; return 0; } __initcall(init_udelay);
gpl-2.0
davidmueller13/android_kernel_samsung_zeroflteskt
drivers/net/wireless/b43/leds.c
9555
9271
/* Broadcom B43 wireless driver LED control Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it> Copyright (c) 2005-2007 Michael Buesch <m@bues.ch> Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "leds.h" #include "rfkill.h" static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index, bool activelow) { u16 ctl; ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL); if (activelow) ctl &= ~(1 << led_index); else ctl |= (1 << led_index); b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl); } static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index, bool activelow) { u16 ctl; ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL); if (activelow) ctl |= (1 << led_index); else ctl &= ~(1 << led_index); b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl); } static void b43_led_update(struct b43_wldev *dev, struct b43_led *led) { bool radio_enabled; bool turn_on; if (!led->wl) return; radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable); /* The led->state read is racy, but we don't care. In case we raced * with the brightness_set handler, we will be called again soon * to fixup our state. */ if (radio_enabled) turn_on = atomic_read(&led->state) != LED_OFF; else turn_on = false; if (turn_on == led->hw_state) return; led->hw_state = turn_on; if (turn_on) b43_led_turn_on(dev, led->index, led->activelow); else b43_led_turn_off(dev, led->index, led->activelow); } static void b43_leds_work(struct work_struct *work) { struct b43_leds *leds = container_of(work, struct b43_leds, work); struct b43_wl *wl = container_of(leds, struct b43_wl, leds); struct b43_wldev *dev; mutex_lock(&wl->mutex); dev = wl->current_dev; if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED)) goto out_unlock; b43_led_update(dev, &wl->leds.led_tx); b43_led_update(dev, &wl->leds.led_rx); b43_led_update(dev, &wl->leds.led_radio); b43_led_update(dev, &wl->leds.led_assoc); out_unlock: mutex_unlock(&wl->mutex); } /* Callback from the LED subsystem. */ static void b43_led_brightness_set(struct led_classdev *led_dev, enum led_brightness brightness) { struct b43_led *led = container_of(led_dev, struct b43_led, led_dev); struct b43_wl *wl = led->wl; if (likely(!wl->leds.stop)) { atomic_set(&led->state, brightness); ieee80211_queue_work(wl->hw, &wl->leds.work); } } static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, const char *name, const char *default_trigger, u8 led_index, bool activelow) { int err; if (led->wl) return -EEXIST; if (!default_trigger) return -EINVAL; led->wl = dev->wl; led->index = led_index; led->activelow = activelow; strncpy(led->name, name, sizeof(led->name)); atomic_set(&led->state, 0); led->led_dev.name = led->name; led->led_dev.default_trigger = default_trigger; led->led_dev.brightness_set = b43_led_brightness_set; err = led_classdev_register(dev->dev->dev, &led->led_dev); if (err) { b43warn(dev->wl, "LEDs: Failed to register %s\n", name); led->wl = NULL; return err; } return 0; } static void b43_unregister_led(struct b43_led *led) { if (!led->wl) return; led_classdev_unregister(&led->led_dev); led->wl = NULL; } static void b43_map_led(struct b43_wldev *dev, u8 led_index, enum b43_led_behaviour behaviour, bool activelow) { struct ieee80211_hw *hw = dev->wl->hw; char name[B43_LED_MAX_NAME_LEN + 1]; /* Map the b43 specific LED behaviour value to the * generic LED triggers. */ switch (behaviour) { case B43_LED_INACTIVE: case B43_LED_OFF: case B43_LED_ON: break; case B43_LED_ACTIVITY: case B43_LED_TRANSFER: case B43_LED_APTRANSFER: snprintf(name, sizeof(name), "b43-%s::tx", wiphy_name(hw->wiphy)); b43_register_led(dev, &dev->wl->leds.led_tx, name, ieee80211_get_tx_led_name(hw), led_index, activelow); snprintf(name, sizeof(name), "b43-%s::rx", wiphy_name(hw->wiphy)); b43_register_led(dev, &dev->wl->leds.led_rx, name, ieee80211_get_rx_led_name(hw), led_index, activelow); break; case B43_LED_RADIO_ALL: case B43_LED_RADIO_A: case B43_LED_RADIO_B: case B43_LED_MODE_BG: snprintf(name, sizeof(name), "b43-%s::radio", wiphy_name(hw->wiphy)); b43_register_led(dev, &dev->wl->leds.led_radio, name, ieee80211_get_radio_led_name(hw), led_index, activelow); break; case B43_LED_WEIRD: case B43_LED_ASSOC: snprintf(name, sizeof(name), "b43-%s::assoc", wiphy_name(hw->wiphy)); b43_register_led(dev, &dev->wl->leds.led_assoc, name, ieee80211_get_assoc_led_name(hw), led_index, activelow); break; default: b43warn(dev->wl, "LEDs: Unknown behaviour 0x%02X\n", behaviour); break; } } static void b43_led_get_sprominfo(struct b43_wldev *dev, unsigned int led_index, enum b43_led_behaviour *behaviour, bool *activelow) { u8 sprom[4]; sprom[0] = dev->dev->bus_sprom->gpio0; sprom[1] = dev->dev->bus_sprom->gpio1; sprom[2] = dev->dev->bus_sprom->gpio2; sprom[3] = dev->dev->bus_sprom->gpio3; if (sprom[led_index] == 0xFF) { /* There is no LED information in the SPROM * for this LED. Hardcode it here. */ *activelow = false; switch (led_index) { case 0: *behaviour = B43_LED_ACTIVITY; *activelow = true; if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ) *behaviour = B43_LED_RADIO_ALL; break; case 1: *behaviour = B43_LED_RADIO_B; if (dev->dev->board_vendor == PCI_VENDOR_ID_ASUSTEK) *behaviour = B43_LED_ASSOC; break; case 2: *behaviour = B43_LED_RADIO_A; break; case 3: *behaviour = B43_LED_OFF; break; default: *behaviour = B43_LED_OFF; B43_WARN_ON(1); return; } } else { *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR; *activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW); } } void b43_leds_init(struct b43_wldev *dev) { struct b43_led *led; unsigned int i; enum b43_led_behaviour behaviour; bool activelow; /* Sync the RF-kill LED state (if we have one) with radio and switch states. */ led = &dev->wl->leds.led_radio; if (led->wl) { if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) { b43_led_turn_on(dev, led->index, led->activelow); led->hw_state = true; atomic_set(&led->state, 1); } else { b43_led_turn_off(dev, led->index, led->activelow); led->hw_state = false; atomic_set(&led->state, 0); } } /* Initialize TX/RX/ASSOC leds */ led = &dev->wl->leds.led_tx; if (led->wl) { b43_led_turn_off(dev, led->index, led->activelow); led->hw_state = false; atomic_set(&led->state, 0); } led = &dev->wl->leds.led_rx; if (led->wl) { b43_led_turn_off(dev, led->index, led->activelow); led->hw_state = false; atomic_set(&led->state, 0); } led = &dev->wl->leds.led_assoc; if (led->wl) { b43_led_turn_off(dev, led->index, led->activelow); led->hw_state = false; atomic_set(&led->state, 0); } /* Initialize other LED states. */ for (i = 0; i < B43_MAX_NR_LEDS; i++) { b43_led_get_sprominfo(dev, i, &behaviour, &activelow); switch (behaviour) { case B43_LED_OFF: b43_led_turn_off(dev, i, activelow); break; case B43_LED_ON: b43_led_turn_on(dev, i, activelow); break; default: /* Leave others as-is. */ break; } } dev->wl->leds.stop = 0; } void b43_leds_exit(struct b43_wldev *dev) { struct b43_leds *leds = &dev->wl->leds; b43_led_turn_off(dev, leds->led_tx.index, leds->led_tx.activelow); b43_led_turn_off(dev, leds->led_rx.index, leds->led_rx.activelow); b43_led_turn_off(dev, leds->led_assoc.index, leds->led_assoc.activelow); b43_led_turn_off(dev, leds->led_radio.index, leds->led_radio.activelow); } void b43_leds_stop(struct b43_wldev *dev) { struct b43_leds *leds = &dev->wl->leds; leds->stop = 1; cancel_work_sync(&leds->work); } void b43_leds_register(struct b43_wldev *dev) { unsigned int i; enum b43_led_behaviour behaviour; bool activelow; INIT_WORK(&dev->wl->leds.work, b43_leds_work); /* Register the LEDs to the LED subsystem. */ for (i = 0; i < B43_MAX_NR_LEDS; i++) { b43_led_get_sprominfo(dev, i, &behaviour, &activelow); b43_map_led(dev, i, behaviour, activelow); } } void b43_leds_unregister(struct b43_wl *wl) { struct b43_leds *leds = &wl->leds; b43_unregister_led(&leds->led_tx); b43_unregister_led(&leds->led_rx); b43_unregister_led(&leds->led_assoc); b43_unregister_led(&leds->led_radio); }
gpl-2.0
Ki113R/android_kernel_samsung_golden
fs/ncpfs/ncpsign_kernel.c
12883
3729
/* * ncpsign_kernel.c * * Arne de Bruijn (arne@knoware.nl), 1997 * */ #ifdef CONFIG_NCPFS_PACKET_SIGNING #include <linux/string.h> #include <linux/ncp.h> #include <linux/bitops.h> #include "ncp_fs.h" #include "ncpsign_kernel.h" /* i386: 32-bit, little endian, handles mis-alignment */ #ifdef __i386__ #define GET_LE32(p) (*(const int *)(p)) #define PUT_LE32(p,v) { *(int *)(p)=v; } #else /* from include/ncplib.h */ #define BVAL(buf,pos) (((const __u8 *)(buf))[pos]) #define PVAL(buf,pos) ((unsigned)BVAL(buf,pos)) #define BSET(buf,pos,val) (((__u8 *)(buf))[pos] = (val)) static inline __u16 WVAL_LH(const __u8 * buf, int pos) { return PVAL(buf, pos) | PVAL(buf, pos + 1) << 8; } static inline __u32 DVAL_LH(const __u8 * buf, int pos) { return WVAL_LH(buf, pos) | WVAL_LH(buf, pos + 2) << 16; } static inline void WSET_LH(__u8 * buf, int pos, __u16 val) { BSET(buf, pos, val & 0xff); BSET(buf, pos + 1, val >> 8); } static inline void DSET_LH(__u8 * buf, int pos, __u32 val) { WSET_LH(buf, pos, val & 0xffff); WSET_LH(buf, pos + 2, val >> 16); } #define GET_LE32(p) DVAL_LH(p,0) #define PUT_LE32(p,v) DSET_LH(p,0,v) #endif static void nwsign(char *r_data1, char *r_data2, char *outdata) { int i; unsigned int w0,w1,w2,w3; static int rbit[4]={0, 2, 1, 3}; #ifdef __i386__ unsigned int *data2=(unsigned int *)r_data2; #else unsigned int data2[16]; for (i=0;i<16;i++) data2[i]=GET_LE32(r_data2+(i<<2)); #endif w0=GET_LE32(r_data1); w1=GET_LE32(r_data1+4); w2=GET_LE32(r_data1+8); w3=GET_LE32(r_data1+12); for (i=0;i<16;i+=4) { w0=rol32(w0 + ((w1 & w2) | ((~w1) & w3)) + data2[i+0],3); w3=rol32(w3 + ((w0 & w1) | ((~w0) & w2)) + data2[i+1],7); w2=rol32(w2 + ((w3 & w0) | ((~w3) & w1)) + data2[i+2],11); w1=rol32(w1 + ((w2 & w3) | ((~w2) & w0)) + data2[i+3],19); } for (i=0;i<4;i++) { w0=rol32(w0 + (((w2 | w3) & w1) | (w2 & w3)) + 0x5a827999 + data2[i+0],3); w3=rol32(w3 + (((w1 | w2) & w0) | (w1 & w2)) + 0x5a827999 + data2[i+4],5); w2=rol32(w2 + (((w0 | w1) & w3) | (w0 & w1)) + 0x5a827999 + data2[i+8],9); w1=rol32(w1 + (((w3 | w0) & w2) | (w3 & w0)) + 0x5a827999 + data2[i+12],13); } for (i=0;i<4;i++) { w0=rol32(w0 + ((w1 ^ w2) ^ w3) + 0x6ed9eba1 + data2[rbit[i]+0],3); w3=rol32(w3 + ((w0 ^ w1) ^ w2) + 0x6ed9eba1 + data2[rbit[i]+8],9); w2=rol32(w2 + ((w3 ^ w0) ^ w1) + 0x6ed9eba1 + data2[rbit[i]+4],11); w1=rol32(w1 + ((w2 ^ w3) ^ w0) + 0x6ed9eba1 + data2[rbit[i]+12],15); } PUT_LE32(outdata,(w0+GET_LE32(r_data1)) & 0xffffffff); PUT_LE32(outdata+4,(w1+GET_LE32(r_data1+4)) & 0xffffffff); PUT_LE32(outdata+8,(w2+GET_LE32(r_data1+8)) & 0xffffffff); PUT_LE32(outdata+12,(w3+GET_LE32(r_data1+12)) & 0xffffffff); } /* Make a signature for the current packet and add it at the end of the */ /* packet. */ void __sign_packet(struct ncp_server *server, const char *packet, size_t size, __u32 totalsize, void *sign_buff) { unsigned char data[64]; memcpy(data, server->sign_root, 8); *(__u32*)(data + 8) = totalsize; if (size < 52) { memcpy(data + 12, packet, size); memset(data + 12 + size, 0, 52 - size); } else { memcpy(data + 12, packet, 52); } nwsign(server->sign_last, data, server->sign_last); memcpy(sign_buff, server->sign_last, 8); } int sign_verify_reply(struct ncp_server *server, const char *packet, size_t size, __u32 totalsize, const void *sign_buff) { unsigned char data[64]; unsigned char hash[16]; memcpy(data, server->sign_root, 8); *(__u32*)(data + 8) = totalsize; if (size < 52) { memcpy(data + 12, packet, size); memset(data + 12 + size, 0, 52 - size); } else { memcpy(data + 12, packet, 52); } nwsign(server->sign_last, data, hash); return memcmp(sign_buff, hash, 8); } #endif /* CONFIG_NCPFS_PACKET_SIGNING */
gpl-2.0
zparallax/amplitude_shamu
arch/mips/pci/fixup-capcella.c
13651
1580
/* * fixup-cappcela.c, The ZAO Networks Capcella specific PCI fixups. * * Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/pci.h> #include <asm/vr41xx/capcella.h> /* * Shortcuts */ #define INT1 RTL8139_1_IRQ #define INT2 RTL8139_2_IRQ #define INTA PC104PLUS_INTA_IRQ #define INTB PC104PLUS_INTB_IRQ #define INTC PC104PLUS_INTC_IRQ #define INTD PC104PLUS_INTD_IRQ static char irq_tab_capcella[][5] __initdata = { [11] = { -1, INT1, INT1, INT1, INT1 }, [12] = { -1, INT2, INT2, INT2, INT2 }, [14] = { -1, INTA, INTB, INTC, INTD } }; int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_tab_capcella[slot][pin]; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
peteralfonso/platform_kernel_tegra
arch/x86/math-emu/reg_divide.c
14419
4994
/*---------------------------------------------------------------------------+ | reg_divide.c | | | | Divide one FPU_REG by another and put the result in a destination FPU_REG.| | | | Copyright (C) 1996 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@jacobi.maths.monash.edu.au | | | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | The destination may be any FPU_REG, including one of the source FPU_REGs. | +---------------------------------------------------------------------------*/ #include "exception.h" #include "reg_constant.h" #include "fpu_emu.h" #include "fpu_system.h" /* Divide one register by another and put the result into a third register. */ int FPU_div(int flags, int rm, int control_w) { FPU_REG x, y; FPU_REG const *a, *b, *st0_ptr, *st_ptr; FPU_REG *dest; u_char taga, tagb, signa, signb, sign, saved_sign; int tag, deststnr; if (flags & DEST_RM) deststnr = rm; else deststnr = 0; if (flags & REV) { b = &st(0); st0_ptr = b; tagb = FPU_gettag0(); if (flags & LOADED) { a = (FPU_REG *) rm; taga = flags & 0x0f; } else { a = &st(rm); st_ptr = a; taga = FPU_gettagi(rm); } } else { a = &st(0); st0_ptr = a; taga = FPU_gettag0(); if (flags & LOADED) { b = (FPU_REG *) rm; tagb = flags & 0x0f; } else { b = &st(rm); st_ptr = b; tagb = FPU_gettagi(rm); } } signa = getsign(a); signb = getsign(b); sign = signa ^ signb; dest = &st(deststnr); saved_sign = getsign(dest); if (!(taga | tagb)) { /* Both regs Valid, this should be the most common case. */ reg_copy(a, &x); reg_copy(b, &y); setpositive(&x); setpositive(&y); tag = FPU_u_div(&x, &y, dest, control_w, sign); if (tag < 0) return tag; FPU_settagi(deststnr, tag); return tag; } if (taga == TAG_Special) taga = FPU_Special(a); if (tagb == TAG_Special) tagb = FPU_Special(b); if (((taga == TAG_Valid) && (tagb == TW_Denormal)) || ((taga == TW_Denormal) && (tagb == TAG_Valid)) || ((taga == TW_Denormal) && (tagb == TW_Denormal))) { if (denormal_operand() < 0) return FPU_Exception; FPU_to_exp16(a, &x); FPU_to_exp16(b, &y); tag = FPU_u_div(&x, &y, dest, control_w, sign); if (tag < 0) return tag; FPU_settagi(deststnr, tag); return tag; } else if ((taga <= TW_Denormal) && (tagb <= TW_Denormal)) { if (tagb != TAG_Zero) { /* Want to find Zero/Valid */ if (tagb == TW_Denormal) { if (denormal_operand() < 0) return FPU_Exception; } /* The result is zero. */ FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr); setsign(dest, sign); return TAG_Zero; } /* We have an exception condition, either 0/0 or Valid/Zero. */ if (taga == TAG_Zero) { /* 0/0 */ return arith_invalid(deststnr); } /* Valid/Zero */ return FPU_divide_by_zero(deststnr, sign); } /* Must have infinities, NaNs, etc */ else if ((taga == TW_NaN) || (tagb == TW_NaN)) { if (flags & LOADED) return real_2op_NaN((FPU_REG *) rm, flags & 0x0f, 0, st0_ptr); if (flags & DEST_RM) { int tag; tag = FPU_gettag0(); if (tag == TAG_Special) tag = FPU_Special(st0_ptr); return real_2op_NaN(st0_ptr, tag, rm, (flags & REV) ? st0_ptr : &st(rm)); } else { int tag; tag = FPU_gettagi(rm); if (tag == TAG_Special) tag = FPU_Special(&st(rm)); return real_2op_NaN(&st(rm), tag, 0, (flags & REV) ? st0_ptr : &st(rm)); } } else if (taga == TW_Infinity) { if (tagb == TW_Infinity) { /* infinity/infinity */ return arith_invalid(deststnr); } else { /* tagb must be Valid or Zero */ if ((tagb == TW_Denormal) && (denormal_operand() < 0)) return FPU_Exception; /* Infinity divided by Zero or Valid does not raise and exception, but returns Infinity */ FPU_copy_to_regi(a, TAG_Special, deststnr); setsign(dest, sign); return taga; } } else if (tagb == TW_Infinity) { if ((taga == TW_Denormal) && (denormal_operand() < 0)) return FPU_Exception; /* The result is zero. */ FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr); setsign(dest, sign); return TAG_Zero; } #ifdef PARANOID else { EXCEPTION(EX_INTERNAL | 0x102); return FPU_Exception; } #endif /* PARANOID */ return 0; }
gpl-2.0
jokerfr9/DragonsKernel_Kylessopen
arch/arm/mach-msm/clock-9615.c
84
51967
/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/ctype.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <asm/mach-types.h> #include <mach/msm_iomap.h> #include <mach/clk.h> #include <mach/msm_xo.h> #include <mach/rpm-9615.h> #include <mach/rpm-regulator.h> #include "clock-local.h" #include "clock-voter.h" #include "clock-rpm.h" #include "devices.h" #define REG(off) (MSM_CLK_CTL_BASE + (off)) #define REG_LPA(off) (MSM_LPASS_CLK_CTL_BASE + (off)) #define REG_GCC(off) (MSM_APCS_GCC_BASE + (off)) /* Peripheral clock registers. */ #define CE1_HCLK_CTL_REG REG(0x2720) #define CE1_CORE_CLK_CTL_REG REG(0x2724) #define DMA_BAM_HCLK_CTL REG(0x25C0) #define CLK_HALT_CFPB_STATEA_REG REG(0x2FCC) #define CLK_HALT_CFPB_STATEB_REG REG(0x2FD0) #define CLK_HALT_CFPB_STATEC_REG REG(0x2FD4) #define CLK_HALT_DFAB_STATE_REG REG(0x2FC8) #define CLK_HALT_MSS_KPSS_MISC_STATE_REG REG(0x2FDC) #define CLK_HALT_SFPB_MISC_STATE_REG REG(0x2FD8) #define CLK_TEST_REG REG(0x2FA0) #define GPn_MD_REG(n) REG(0x2D00+(0x20*(n))) #define GPn_NS_REG(n) REG(0x2D24+(0x20*(n))) #define GSBIn_HCLK_CTL_REG(n) REG(0x29C0+(0x20*((n)-1))) #define GSBIn_QUP_APPS_MD_REG(n) REG(0x29C8+(0x20*((n)-1))) #define GSBIn_QUP_APPS_NS_REG(n) REG(0x29CC+(0x20*((n)-1))) #define GSBIn_RESET_REG(n) REG(0x29DC+(0x20*((n)-1))) #define GSBIn_UART_APPS_MD_REG(n) REG(0x29D0+(0x20*((n)-1))) #define GSBIn_UART_APPS_NS_REG(n) REG(0x29D4+(0x20*((n)-1))) #define PDM_CLK_NS_REG REG(0x2CC0) #define BB_PLL_ENA_SC0_REG REG(0x34C0) #define BB_PLL0_L_VAL_REG REG(0x30C4) #define BB_PLL0_M_VAL_REG REG(0x30C8) #define BB_PLL0_MODE_REG REG(0x30C0) #define BB_PLL0_N_VAL_REG REG(0x30CC) #define BB_PLL0_STATUS_REG REG(0x30D8) #define BB_PLL0_CONFIG_REG REG(0x30D4) #define BB_PLL0_TEST_CTL_REG REG(0x30D0) #define BB_PLL8_L_VAL_REG REG(0x3144) #define BB_PLL8_M_VAL_REG REG(0x3148) #define BB_PLL8_MODE_REG REG(0x3140) #define BB_PLL8_N_VAL_REG REG(0x314C) #define BB_PLL8_STATUS_REG REG(0x3158) #define BB_PLL8_CONFIG_REG REG(0x3154) #define BB_PLL8_TEST_CTL_REG REG(0x3150) #define BB_PLL14_L_VAL_REG REG(0x31C4) #define BB_PLL14_M_VAL_REG REG(0x31C8) #define BB_PLL14_MODE_REG REG(0x31C0) #define BB_PLL14_N_VAL_REG REG(0x31CC) #define BB_PLL14_STATUS_REG REG(0x31D8) #define BB_PLL14_CONFIG_REG REG(0x31D4) #define BB_PLL14_TEST_CTL_REG REG(0x31D0) #define SC_PLL0_L_VAL_REG REG(0x3208) #define SC_PLL0_M_VAL_REG REG(0x320C) #define SC_PLL0_MODE_REG REG(0x3200) #define SC_PLL0_N_VAL_REG REG(0x3210) #define SC_PLL0_STATUS_REG REG(0x321C) #define SC_PLL0_CONFIG_REG REG(0x3204) #define SC_PLL0_TEST_CTL_REG REG(0x3218) #define PLLTEST_PAD_CFG_REG REG(0x2FA4) #define PMEM_ACLK_CTL_REG REG(0x25A0) #define RINGOSC_NS_REG REG(0x2DC0) #define RINGOSC_STATUS_REG REG(0x2DCC) #define RINGOSC_TCXO_CTL_REG REG(0x2DC4) #define SC0_U_CLK_BRANCH_ENA_VOTE_REG REG(0x3080) #define SDCn_APPS_CLK_MD_REG(n) REG(0x2828+(0x20*((n)-1))) #define SDCn_APPS_CLK_NS_REG(n) REG(0x282C+(0x20*((n)-1))) #define SDCn_HCLK_CTL_REG(n) REG(0x2820+(0x20*((n)-1))) #define SDCn_RESET_REG(n) REG(0x2830+(0x20*((n)-1))) #define USB_HS1_HCLK_CTL_REG REG(0x2900) #define USB_HS1_RESET_REG REG(0x2910) #define USB_HS1_XCVR_FS_CLK_MD_REG REG(0x2908) #define USB_HS1_XCVR_FS_CLK_NS_REG REG(0x290C) #define USB_HS1_SYS_CLK_MD_REG REG(0x36A0) #define USB_HS1_SYS_CLK_NS_REG REG(0x36A4) #define USB_HSIC_HCLK_CTL_REG REG(0x2920) #define USB_HSIC_XCVR_FS_CLK_MD_REG REG(0x2924) #define USB_HSIC_XCVR_FS_CLK_NS_REG REG(0x2928) #define USB_HSIC_RESET_REG REG(0x2934) #define USB_HSIC_HSIO_CAL_CLK_CTL_REG REG(0x2B48) #define USB_HSIC_CLK_MD_REG REG(0x2B4C) #define USB_HSIC_CLK_NS_REG REG(0x2B50) #define USB_HSIC_SYSTEM_CLK_MD_REG REG(0x2B54) #define USB_HSIC_SYSTEM_CLK_NS_REG REG(0x2B58) #define SLIMBUS_XO_SRC_CLK_CTL_REG REG(0x2628) /* Low-power Audio clock registers. */ #define LCC_CLK_HS_DEBUG_CFG_REG REG_LPA(0x00A4) #define LCC_CLK_LS_DEBUG_CFG_REG REG_LPA(0x00A8) #define LCC_CODEC_I2S_MIC_MD_REG REG_LPA(0x0064) #define LCC_CODEC_I2S_MIC_NS_REG REG_LPA(0x0060) #define LCC_CODEC_I2S_MIC_STATUS_REG REG_LPA(0x0068) #define LCC_CODEC_I2S_SPKR_MD_REG REG_LPA(0x0070) #define LCC_CODEC_I2S_SPKR_NS_REG REG_LPA(0x006C) #define LCC_CODEC_I2S_SPKR_STATUS_REG REG_LPA(0x0074) #define LCC_MI2S_MD_REG REG_LPA(0x004C) #define LCC_MI2S_NS_REG REG_LPA(0x0048) #define LCC_MI2S_STATUS_REG REG_LPA(0x0050) #define LCC_PCM_MD_REG REG_LPA(0x0058) #define LCC_PCM_NS_REG REG_LPA(0x0054) #define LCC_PCM_STATUS_REG REG_LPA(0x005C) #define LCC_PLL0_STATUS_REG REG_LPA(0x0018) #define LCC_SPARE_I2S_MIC_MD_REG REG_LPA(0x007C) #define LCC_SPARE_I2S_MIC_NS_REG REG_LPA(0x0078) #define LCC_SPARE_I2S_MIC_STATUS_REG REG_LPA(0x0080) #define LCC_SPARE_I2S_SPKR_MD_REG REG_LPA(0x0088) #define LCC_SPARE_I2S_SPKR_NS_REG REG_LPA(0x0084) #define LCC_SPARE_I2S_SPKR_STATUS_REG REG_LPA(0x008C) #define LCC_SLIMBUS_NS_REG REG_LPA(0x00CC) #define LCC_SLIMBUS_MD_REG REG_LPA(0x00D0) #define LCC_SLIMBUS_STATUS_REG REG_LPA(0x00D4) #define LCC_AHBEX_BRANCH_CTL_REG REG_LPA(0x00E4) #define LCC_PRI_PLL_CLK_CTL_REG REG_LPA(0x00C4) #define GCC_APCS_CLK_DIAG REG_GCC(0x001C) /* MUX source input identifiers. */ #define cxo_to_bb_mux 0 #define pll8_to_bb_mux 3 #define pll14_to_bb_mux 4 #define gnd_to_bb_mux 6 #define cxo_to_xo_mux 0 #define gnd_to_xo_mux 3 #define cxo_to_lpa_mux 1 #define pll4_to_lpa_mux 2 #define gnd_to_lpa_mux 6 /* Test Vector Macros */ #define TEST_TYPE_PER_LS 1 #define TEST_TYPE_PER_HS 2 #define TEST_TYPE_LPA 5 #define TEST_TYPE_LPA_HS 6 #define TEST_TYPE_SHIFT 24 #define TEST_CLK_SEL_MASK BM(23, 0) #define TEST_VECTOR(s, t) (((t) << TEST_TYPE_SHIFT) | BVAL(23, 0, (s))) #define TEST_PER_LS(s) TEST_VECTOR((s), TEST_TYPE_PER_LS) #define TEST_PER_HS(s) TEST_VECTOR((s), TEST_TYPE_PER_HS) #define TEST_LPA(s) TEST_VECTOR((s), TEST_TYPE_LPA) #define TEST_LPA_HS(s) TEST_VECTOR((s), TEST_TYPE_LPA_HS) #define MN_MODE_DUAL_EDGE 0x2 /* MD Registers */ #define MD8(m_lsb, m, n_lsb, n) \ (BVAL((m_lsb+7), m_lsb, m) | BVAL((n_lsb+7), n_lsb, ~(n))) #define MD16(m, n) (BVAL(31, 16, m) | BVAL(15, 0, ~(n))) /* NS Registers */ #define NS(n_msb, n_lsb, n, m, mde_lsb, d_msb, d_lsb, d, s_msb, s_lsb, s) \ (BVAL(n_msb, n_lsb, ~(n-m)) \ | (BVAL((mde_lsb+1), mde_lsb, MN_MODE_DUAL_EDGE) * !!(n)) \ | BVAL(d_msb, d_lsb, (d-1)) | BVAL(s_msb, s_lsb, s)) #define NS_SRC_SEL(s_msb, s_lsb, s) \ BVAL(s_msb, s_lsb, s) enum vdd_dig_levels { VDD_DIG_NONE, VDD_DIG_LOW, VDD_DIG_NOMINAL, VDD_DIG_HIGH }; static int set_vdd_dig(struct clk_vdd_class *vdd_class, int level) { static const int vdd_uv[] = { [VDD_DIG_NONE] = 0, [VDD_DIG_LOW] = 945000, [VDD_DIG_NOMINAL] = 1050000, [VDD_DIG_HIGH] = 1150000 }; return rpm_vreg_set_voltage(RPM_VREG_ID_PM8018_S1, RPM_VREG_VOTER3, vdd_uv[level], vdd_uv[VDD_DIG_HIGH], 1); } static DEFINE_VDD_CLASS(vdd_dig, set_vdd_dig); #define VDD_DIG_FMAX_MAP1(l1, f1) \ .vdd_class = &vdd_dig, \ .fmax[VDD_DIG_##l1] = (f1) #define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \ .vdd_class = &vdd_dig, \ .fmax[VDD_DIG_##l1] = (f1), \ .fmax[VDD_DIG_##l2] = (f2) /* * Clock Descriptions */ static struct msm_xo_voter *xo_cxo; static int cxo_clk_enable(struct clk *clk) { return msm_xo_mode_vote(xo_cxo, MSM_XO_MODE_ON); } static void cxo_clk_disable(struct clk *clk) { msm_xo_mode_vote(xo_cxo, MSM_XO_MODE_OFF); } static struct clk_ops clk_ops_cxo = { .enable = cxo_clk_enable, .disable = cxo_clk_disable, .get_rate = fixed_clk_get_rate, .is_local = local_clk_is_local, }; static struct fixed_clk cxo_clk = { .rate = 19200000, .c = { .dbg_name = "cxo_clk", .ops = &clk_ops_cxo, CLK_INIT(cxo_clk.c), }, }; static DEFINE_SPINLOCK(soft_vote_lock); static int pll_acpu_vote_clk_enable(struct clk *clk) { int ret = 0; unsigned long flags; struct pll_vote_clk *pll = to_pll_vote_clk(clk); spin_lock_irqsave(&soft_vote_lock, flags); if (!*pll->soft_vote) ret = pll_vote_clk_enable(clk); if (ret == 0) *pll->soft_vote |= (pll->soft_vote_mask); spin_unlock_irqrestore(&soft_vote_lock, flags); return ret; } static void pll_acpu_vote_clk_disable(struct clk *clk) { unsigned long flags; struct pll_vote_clk *pll = to_pll_vote_clk(clk); spin_lock_irqsave(&soft_vote_lock, flags); *pll->soft_vote &= ~(pll->soft_vote_mask); if (!*pll->soft_vote) pll_vote_clk_disable(clk); spin_unlock_irqrestore(&soft_vote_lock, flags); } static struct clk_ops clk_ops_pll_acpu_vote = { .enable = pll_acpu_vote_clk_enable, .disable = pll_acpu_vote_clk_disable, .auto_off = pll_acpu_vote_clk_disable, .is_enabled = pll_vote_clk_is_enabled, .get_rate = pll_vote_clk_get_rate, .get_parent = pll_vote_clk_get_parent, .is_local = local_clk_is_local, }; #define PLL_SOFT_VOTE_PRIMARY BIT(0) #define PLL_SOFT_VOTE_ACPU BIT(1) static unsigned int soft_vote_pll0; static struct pll_vote_clk pll0_clk = { .rate = 276000000, .en_reg = BB_PLL_ENA_SC0_REG, .en_mask = BIT(0), .status_reg = BB_PLL0_STATUS_REG, .parent = &cxo_clk.c, .soft_vote = &soft_vote_pll0, .soft_vote_mask = PLL_SOFT_VOTE_PRIMARY, .c = { .dbg_name = "pll0_clk", .ops = &clk_ops_pll_acpu_vote, CLK_INIT(pll0_clk.c), }, }; static struct pll_vote_clk pll0_acpu_clk = { .rate = 276000000, .en_reg = BB_PLL_ENA_SC0_REG, .en_mask = BIT(0), .status_reg = BB_PLL0_STATUS_REG, .soft_vote = &soft_vote_pll0, .soft_vote_mask = PLL_SOFT_VOTE_ACPU, .c = { .dbg_name = "pll0_acpu_clk", .ops = &clk_ops_pll_acpu_vote, CLK_INIT(pll0_acpu_clk.c), }, }; static struct pll_vote_clk pll4_clk = { .rate = 393216000, .en_reg = BB_PLL_ENA_SC0_REG, .en_mask = BIT(4), .status_reg = LCC_PLL0_STATUS_REG, .parent = &cxo_clk.c, .c = { .dbg_name = "pll4_clk", .ops = &clk_ops_pll_vote, CLK_INIT(pll4_clk.c), }, }; static unsigned int soft_vote_pll8; static struct pll_vote_clk pll8_clk = { .rate = 384000000, .en_reg = BB_PLL_ENA_SC0_REG, .en_mask = BIT(8), .status_reg = BB_PLL8_STATUS_REG, .parent = &cxo_clk.c, .soft_vote = &soft_vote_pll8, .soft_vote_mask = PLL_SOFT_VOTE_PRIMARY, .c = { .dbg_name = "pll8_clk", .ops = &clk_ops_pll_acpu_vote, CLK_INIT(pll8_clk.c), }, }; static struct pll_vote_clk pll8_acpu_clk = { .rate = 384000000, .en_reg = BB_PLL_ENA_SC0_REG, .en_mask = BIT(8), .status_reg = BB_PLL8_STATUS_REG, .soft_vote = &soft_vote_pll8, .soft_vote_mask = PLL_SOFT_VOTE_ACPU, .c = { .dbg_name = "pll8_acpu_clk", .ops = &clk_ops_pll_acpu_vote, CLK_INIT(pll8_acpu_clk.c), }, }; static struct pll_clk pll9_acpu_clk = { .rate = 440000000, .mode_reg = SC_PLL0_MODE_REG, .c = { .dbg_name = "pll9_acpu_clk", .ops = &clk_ops_pll, CLK_INIT(pll9_acpu_clk.c), }, }; static struct pll_vote_clk pll14_clk = { .rate = 480000000, .en_reg = BB_PLL_ENA_SC0_REG, .en_mask = BIT(11), .status_reg = BB_PLL14_STATUS_REG, .parent = &cxo_clk.c, .c = { .dbg_name = "pll14_clk", .ops = &clk_ops_pll_vote, CLK_INIT(pll14_clk.c), }, }; static struct clk_ops clk_ops_rcg_9615 = { .enable = rcg_clk_enable, .disable = rcg_clk_disable, .auto_off = rcg_clk_disable, .set_rate = rcg_clk_set_rate, .get_rate = rcg_clk_get_rate, .list_rate = rcg_clk_list_rate, .is_enabled = rcg_clk_is_enabled, .round_rate = rcg_clk_round_rate, .reset = rcg_clk_reset, .is_local = local_clk_is_local, .get_parent = rcg_clk_get_parent, }; static struct clk_ops clk_ops_branch = { .enable = branch_clk_enable, .disable = branch_clk_disable, .auto_off = branch_clk_disable, .is_enabled = branch_clk_is_enabled, .reset = branch_clk_reset, .is_local = local_clk_is_local, .get_parent = branch_clk_get_parent, .set_parent = branch_clk_set_parent, }; /* * Peripheral Clocks */ #define CLK_GP(i, n, h_r, h_b) \ struct rcg_clk i##_clk = { \ .b = { \ .ctl_reg = GPn_NS_REG(n), \ .en_mask = BIT(9), \ .halt_reg = h_r, \ .halt_bit = h_b, \ }, \ .ns_reg = GPn_NS_REG(n), \ .md_reg = GPn_MD_REG(n), \ .root_en_mask = BIT(11), \ .ns_mask = (BM(23, 16) | BM(6, 0)), \ .set_rate = set_rate_mnd, \ .freq_tbl = clk_tbl_gp, \ .current_freq = &rcg_dummy_freq, \ .c = { \ .dbg_name = #i "_clk", \ .ops = &clk_ops_rcg_9615, \ VDD_DIG_FMAX_MAP1(LOW, 27000000), \ CLK_INIT(i##_clk.c), \ }, \ } #define F_GP(f, s, d, m, n) \ { \ .freq_hz = f, \ .src_clk = &s##_clk.c, \ .md_val = MD8(16, m, 0, n), \ .ns_val = NS(23, 16, n, m, 5, 4, 3, d, 2, 0, s##_to_bb_mux), \ .mnd_en_mask = BIT(8) * !!(n), \ } static struct clk_freq_tbl clk_tbl_gp[] = { F_GP( 0, gnd, 1, 0, 0), F_GP( 9600000, cxo, 2, 0, 0), F_GP( 19200000, cxo, 1, 0, 0), F_END }; static CLK_GP(gp0, 0, CLK_HALT_SFPB_MISC_STATE_REG, 7); static CLK_GP(gp1, 1, CLK_HALT_SFPB_MISC_STATE_REG, 6); static CLK_GP(gp2, 2, CLK_HALT_SFPB_MISC_STATE_REG, 5); #define CLK_GSBI_UART(i, n, h_r, h_b) \ struct rcg_clk i##_clk = { \ .b = { \ .ctl_reg = GSBIn_UART_APPS_NS_REG(n), \ .en_mask = BIT(9), \ .reset_reg = GSBIn_RESET_REG(n), \ .reset_mask = BIT(0), \ .halt_reg = h_r, \ .halt_bit = h_b, \ }, \ .ns_reg = GSBIn_UART_APPS_NS_REG(n), \ .md_reg = GSBIn_UART_APPS_MD_REG(n), \ .root_en_mask = BIT(11), \ .ns_mask = (BM(31, 16) | BM(6, 0)), \ .set_rate = set_rate_mnd, \ .freq_tbl = clk_tbl_gsbi_uart, \ .current_freq = &rcg_dummy_freq, \ .c = { \ .dbg_name = #i "_clk", \ .ops = &clk_ops_rcg_9615, \ VDD_DIG_FMAX_MAP2(LOW, 32000000, NOMINAL, 64000000), \ CLK_INIT(i##_clk.c), \ }, \ } #define F_GSBI_UART(f, s, d, m, n) \ { \ .freq_hz = f, \ .src_clk = &s##_clk.c, \ .md_val = MD16(m, n), \ .ns_val = NS(31, 16, n, m, 5, 4, 3, d, 2, 0, s##_to_bb_mux), \ .mnd_en_mask = BIT(8) * !!(n), \ } static struct clk_freq_tbl clk_tbl_gsbi_uart[] = { F_GSBI_UART( 0, gnd, 1, 0, 0), F_GSBI_UART( 3686400, pll8, 1, 6, 625), F_GSBI_UART( 7372800, pll8, 1, 12, 625), F_GSBI_UART(14745600, pll8, 1, 24, 625), F_GSBI_UART(16000000, pll8, 4, 1, 6), F_GSBI_UART(24000000, pll8, 4, 1, 4), F_GSBI_UART(32000000, pll8, 4, 1, 3), F_GSBI_UART(40000000, pll8, 1, 5, 48), F_GSBI_UART(46400000, pll8, 1, 29, 240), F_GSBI_UART(48000000, pll8, 4, 1, 2), F_GSBI_UART(51200000, pll8, 1, 2, 15), F_GSBI_UART(56000000, pll8, 1, 7, 48), F_GSBI_UART(58982400, pll8, 1, 96, 625), F_GSBI_UART(64000000, pll8, 2, 1, 3), F_END }; static CLK_GSBI_UART(gsbi1_uart, 1, CLK_HALT_CFPB_STATEA_REG, 10); static CLK_GSBI_UART(gsbi2_uart, 2, CLK_HALT_CFPB_STATEA_REG, 6); static CLK_GSBI_UART(gsbi3_uart, 3, CLK_HALT_CFPB_STATEA_REG, 2); static CLK_GSBI_UART(gsbi4_uart, 4, CLK_HALT_CFPB_STATEB_REG, 26); static CLK_GSBI_UART(gsbi5_uart, 5, CLK_HALT_CFPB_STATEB_REG, 22); #define CLK_GSBI_QUP(i, n, h_r, h_b) \ struct rcg_clk i##_clk = { \ .b = { \ .ctl_reg = GSBIn_QUP_APPS_NS_REG(n), \ .en_mask = BIT(9), \ .reset_reg = GSBIn_RESET_REG(n), \ .reset_mask = BIT(0), \ .halt_reg = h_r, \ .halt_bit = h_b, \ }, \ .ns_reg = GSBIn_QUP_APPS_NS_REG(n), \ .md_reg = GSBIn_QUP_APPS_MD_REG(n), \ .root_en_mask = BIT(11), \ .ns_mask = (BM(23, 16) | BM(6, 0)), \ .set_rate = set_rate_mnd, \ .freq_tbl = clk_tbl_gsbi_qup, \ .current_freq = &rcg_dummy_freq, \ .c = { \ .dbg_name = #i "_clk", \ .ops = &clk_ops_rcg_9615, \ VDD_DIG_FMAX_MAP2(LOW, 24000000, NOMINAL, 52000000), \ CLK_INIT(i##_clk.c), \ }, \ } #define F_GSBI_QUP(f, s, d, m, n) \ { \ .freq_hz = f, \ .src_clk = &s##_clk.c, \ .md_val = MD8(16, m, 0, n), \ .ns_val = NS(23, 16, n, m, 5, 4, 3, d, 2, 0, s##_to_bb_mux), \ .mnd_en_mask = BIT(8) * !!(n), \ } static struct clk_freq_tbl clk_tbl_gsbi_qup[] = { F_GSBI_QUP( 0, gnd, 1, 0, 0), F_GSBI_QUP( 960000, cxo, 4, 1, 5), F_GSBI_QUP( 4800000, cxo, 4, 0, 1), F_GSBI_QUP( 9600000, cxo, 2, 0, 1), F_GSBI_QUP(15058800, pll8, 1, 2, 51), F_GSBI_QUP(24000000, pll8, 4, 1, 4), F_GSBI_QUP(25600000, pll8, 1, 1, 15), F_GSBI_QUP(48000000, pll8, 4, 1, 2), F_GSBI_QUP(51200000, pll8, 1, 2, 15), F_END }; static CLK_GSBI_QUP(gsbi1_qup, 1, CLK_HALT_CFPB_STATEA_REG, 9); static CLK_GSBI_QUP(gsbi2_qup, 2, CLK_HALT_CFPB_STATEA_REG, 4); static CLK_GSBI_QUP(gsbi3_qup, 3, CLK_HALT_CFPB_STATEA_REG, 0); static CLK_GSBI_QUP(gsbi4_qup, 4, CLK_HALT_CFPB_STATEB_REG, 24); static CLK_GSBI_QUP(gsbi5_qup, 5, CLK_HALT_CFPB_STATEB_REG, 20); #define F_PDM(f, s, d) \ { \ .freq_hz = f, \ .src_clk = &s##_clk.c, \ .ns_val = NS_SRC_SEL(1, 0, s##_to_xo_mux), \ } static struct clk_freq_tbl clk_tbl_pdm[] = { F_PDM( 0, gnd, 1), F_PDM(19200000, cxo, 1), F_END }; static struct rcg_clk pdm_clk = { .b = { .ctl_reg = PDM_CLK_NS_REG, .en_mask = BIT(9), .reset_reg = PDM_CLK_NS_REG, .reset_mask = BIT(12), .halt_reg = CLK_HALT_CFPB_STATEC_REG, .halt_bit = 3, }, .ns_reg = PDM_CLK_NS_REG, .root_en_mask = BIT(11), .ns_mask = BM(1, 0), .set_rate = set_rate_nop, .freq_tbl = clk_tbl_pdm, .current_freq = &rcg_dummy_freq, .c = { .dbg_name = "pdm_clk", .ops = &clk_ops_rcg_9615, VDD_DIG_FMAX_MAP1(LOW, 19200000), CLK_INIT(pdm_clk.c), }, }; static struct branch_clk pmem_clk = { .b = { .ctl_reg = PMEM_ACLK_CTL_REG, .en_mask = BIT(4), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 20, }, .c = { .dbg_name = "pmem_clk", .ops = &clk_ops_branch, CLK_INIT(pmem_clk.c), }, }; #define F_PRNG(f, s) \ { \ .freq_hz = f, \ .src_clk = &s##_clk.c, \ } static struct clk_freq_tbl clk_tbl_prng[] = { F_PRNG(32000000, pll8), F_END }; static struct rcg_clk prng_clk = { .b = { .ctl_reg = SC0_U_CLK_BRANCH_ENA_VOTE_REG, .en_mask = BIT(10), .halt_reg = CLK_HALT_SFPB_MISC_STATE_REG, .halt_check = HALT_VOTED, .halt_bit = 10, }, .set_rate = set_rate_nop, .freq_tbl = clk_tbl_prng, .current_freq = &rcg_dummy_freq, .c = { .dbg_name = "prng_clk", .ops = &clk_ops_rcg_9615, VDD_DIG_FMAX_MAP2(LOW, 32000000, NOMINAL, 65000000), CLK_INIT(prng_clk.c), }, }; #define CLK_SDC(name, n, h_b, f_table) \ struct rcg_clk name = { \ .b = { \ .ctl_reg = SDCn_APPS_CLK_NS_REG(n), \ .en_mask = BIT(9), \ .reset_reg = SDCn_RESET_REG(n), \ .reset_mask = BIT(0), \ .halt_reg = CLK_HALT_DFAB_STATE_REG, \ .halt_bit = h_b, \ }, \ .ns_reg = SDCn_APPS_CLK_NS_REG(n), \ .md_reg = SDCn_APPS_CLK_MD_REG(n), \ .root_en_mask = BIT(11), \ .ns_mask = (BM(23, 16) | BM(6, 0)), \ .set_rate = set_rate_mnd, \ .freq_tbl = f_table, \ .current_freq = &rcg_dummy_freq, \ .c = { \ .dbg_name = #name, \ .ops = &clk_ops_rcg_9615, \ VDD_DIG_FMAX_MAP2(LOW, 25000000, NOMINAL, 50000000), \ CLK_INIT(name.c), \ }, \ } #define F_SDC(f, s, d, m, n) \ { \ .freq_hz = f, \ .src_clk = &s##_clk.c, \ .md_val = MD8(16, m, 0, n), \ .ns_val = NS(23, 16, n, m, 5, 4, 3, d, 2, 0, s##_to_bb_mux), \ .mnd_en_mask = BIT(8) * !!(n), \ } static struct clk_freq_tbl clk_tbl_sdc1_2[] = { F_SDC( 0, gnd, 1, 0, 0), F_SDC( 144300, cxo, 1, 1, 133), F_SDC( 400000, pll8, 4, 1, 240), F_SDC( 16000000, pll8, 4, 1, 6), F_SDC( 17070000, pll8, 1, 2, 45), F_SDC( 20210000, pll8, 1, 1, 19), F_SDC( 24000000, pll8, 4, 1, 4), F_SDC( 48000000, pll8, 4, 1, 2), F_END }; static CLK_SDC(sdc1_clk, 1, 6, clk_tbl_sdc1_2); static CLK_SDC(sdc2_clk, 2, 5, clk_tbl_sdc1_2); #define F_USB(f, s, d, m, n) \ { \ .freq_hz = f, \ .src_clk = &s##_clk.c, \ .md_val = MD8(16, m, 0, n), \ .ns_val = NS(23, 16, n, m, 5, 4, 3, d, 2, 0, s##_to_bb_mux), \ .mnd_en_mask = BIT(8) * !!(n), \ } static struct clk_freq_tbl clk_tbl_usb[] = { F_USB( 0, gnd, 1, 0, 0), F_USB(60000000, pll8, 1, 5, 32), F_END }; static struct clk_freq_tbl clk_tbl_usb_hsic_sys[] = { F_USB( 0, gnd, 1, 0, 0), F_USB(64000000, pll8, 1, 1, 6), F_END }; static struct rcg_clk usb_hs1_xcvr_clk = { .b = { .ctl_reg = USB_HS1_XCVR_FS_CLK_NS_REG, .en_mask = BIT(9), .reset_reg = USB_HS1_RESET_REG, .reset_mask = BIT(0), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 0, }, .ns_reg = USB_HS1_XCVR_FS_CLK_NS_REG, .md_reg = USB_HS1_XCVR_FS_CLK_MD_REG, .root_en_mask = BIT(11), .ns_mask = (BM(23, 16) | BM(6, 0)), .set_rate = set_rate_mnd, .freq_tbl = clk_tbl_usb, .current_freq = &rcg_dummy_freq, .c = { .dbg_name = "usb_hs1_xcvr_clk", .ops = &clk_ops_rcg_9615, VDD_DIG_FMAX_MAP1(NOMINAL, 60000000), CLK_INIT(usb_hs1_xcvr_clk.c), }, }; static struct rcg_clk usb_hs1_sys_clk = { .b = { .ctl_reg = USB_HS1_SYS_CLK_NS_REG, .en_mask = BIT(9), .reset_reg = USB_HS1_RESET_REG, .reset_mask = BIT(0), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 4, }, .ns_reg = USB_HS1_SYS_CLK_NS_REG, .md_reg = USB_HS1_SYS_CLK_MD_REG, .root_en_mask = BIT(11), .ns_mask = (BM(23, 16) | BM(6, 0)), .set_rate = set_rate_mnd, .freq_tbl = clk_tbl_usb, .current_freq = &rcg_dummy_freq, .c = { .dbg_name = "usb_hs1_sys_clk", .ops = &clk_ops_rcg_9615, VDD_DIG_FMAX_MAP1(NOMINAL, 60000000), CLK_INIT(usb_hs1_sys_clk.c), }, }; static struct rcg_clk usb_hsic_xcvr_clk = { .b = { .ctl_reg = USB_HSIC_XCVR_FS_CLK_NS_REG, .en_mask = BIT(9), .reset_reg = USB_HSIC_RESET_REG, .reset_mask = BIT(0), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 9, }, .ns_reg = USB_HSIC_XCVR_FS_CLK_NS_REG, .md_reg = USB_HSIC_XCVR_FS_CLK_MD_REG, .root_en_mask = BIT(11), .ns_mask = (BM(23, 16) | BM(6, 0)), .set_rate = set_rate_mnd, .freq_tbl = clk_tbl_usb, .current_freq = &rcg_dummy_freq, .c = { .dbg_name = "usb_hsic_xcvr_clk", .ops = &clk_ops_rcg_9615, VDD_DIG_FMAX_MAP1(LOW, 60000000), CLK_INIT(usb_hsic_xcvr_clk.c), }, }; static struct rcg_clk usb_hsic_sys_clk = { .b = { .ctl_reg = USB_HSIC_SYSTEM_CLK_NS_REG, .en_mask = BIT(9), .reset_reg = USB_HSIC_RESET_REG, .reset_mask = BIT(0), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 7, }, .ns_reg = USB_HSIC_SYSTEM_CLK_NS_REG, .md_reg = USB_HSIC_SYSTEM_CLK_MD_REG, .root_en_mask = BIT(11), .ns_mask = (BM(23, 16) | BM(6, 0)), .set_rate = set_rate_mnd, .freq_tbl = clk_tbl_usb_hsic_sys, .current_freq = &rcg_dummy_freq, .c = { .dbg_name = "usb_hsic_sys_clk", .ops = &clk_ops_rcg_9615, VDD_DIG_FMAX_MAP1(LOW, 64000000), CLK_INIT(usb_hsic_sys_clk.c), }, }; static struct clk_freq_tbl clk_tbl_usb_hsic[] = { F_USB( 0, gnd, 1, 0, 0), F_USB(480000000, pll14, 1, 0, 0), F_END }; static struct rcg_clk usb_hsic_clk = { .b = { .ctl_reg = USB_HSIC_CLK_NS_REG, .en_mask = BIT(9), .reset_reg = USB_HSIC_RESET_REG, .reset_mask = BIT(0), .halt_check = DELAY, }, .ns_reg = USB_HSIC_CLK_NS_REG, .md_reg = USB_HSIC_CLK_MD_REG, .root_en_mask = BIT(11), .ns_mask = (BM(23, 16) | BM(6, 0)), .set_rate = set_rate_mnd, .freq_tbl = clk_tbl_usb_hsic, .current_freq = &rcg_dummy_freq, .c = { .dbg_name = "usb_hsic_clk", .ops = &clk_ops_rcg_9615, VDD_DIG_FMAX_MAP1(LOW, 480000000), CLK_INIT(usb_hsic_clk.c), }, }; static struct branch_clk usb_hsic_hsio_cal_clk = { .b = { .ctl_reg = USB_HSIC_HSIO_CAL_CLK_CTL_REG, .en_mask = BIT(0), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 8, }, .parent = &cxo_clk.c, .c = { .dbg_name = "usb_hsic_hsio_cal_clk", .ops = &clk_ops_branch, CLK_INIT(usb_hsic_hsio_cal_clk.c), }, }; /* Fast Peripheral Bus Clocks */ static struct branch_clk ce1_core_clk = { .b = { .ctl_reg = CE1_CORE_CLK_CTL_REG, .en_mask = BIT(4), .halt_reg = CLK_HALT_CFPB_STATEC_REG, .halt_bit = 27, }, .c = { .dbg_name = "ce1_core_clk", .ops = &clk_ops_branch, CLK_INIT(ce1_core_clk.c), }, }; static struct branch_clk ce1_p_clk = { .b = { .ctl_reg = CE1_HCLK_CTL_REG, .en_mask = BIT(4), .halt_reg = CLK_HALT_CFPB_STATEC_REG, .halt_bit = 1, }, .c = { .dbg_name = "ce1_p_clk", .ops = &clk_ops_branch, CLK_INIT(ce1_p_clk.c), }, }; static struct branch_clk dma_bam_p_clk = { .b = { .ctl_reg = DMA_BAM_HCLK_CTL, .en_mask = BIT(4), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 12, }, .c = { .dbg_name = "dma_bam_p_clk", .ops = &clk_ops_branch, CLK_INIT(dma_bam_p_clk.c), }, }; static struct branch_clk gsbi1_p_clk = { .b = { .ctl_reg = GSBIn_HCLK_CTL_REG(1), .en_mask = BIT(4), .halt_reg = CLK_HALT_CFPB_STATEA_REG, .halt_bit = 11, }, .c = { .dbg_name = "gsbi1_p_clk", .ops = &clk_ops_branch, CLK_INIT(gsbi1_p_clk.c), }, }; static struct branch_clk gsbi2_p_clk = { .b = { .ctl_reg = GSBIn_HCLK_CTL_REG(2), .en_mask = BIT(4), .halt_reg = CLK_HALT_CFPB_STATEA_REG, .halt_bit = 7, }, .c = { .dbg_name = "gsbi2_p_clk", .ops = &clk_ops_branch, CLK_INIT(gsbi2_p_clk.c), }, }; static struct branch_clk gsbi3_p_clk = { .b = { .ctl_reg = GSBIn_HCLK_CTL_REG(3), .en_mask = BIT(4), .halt_reg = CLK_HALT_CFPB_STATEA_REG, .halt_bit = 3, }, .c = { .dbg_name = "gsbi3_p_clk", .ops = &clk_ops_branch, CLK_INIT(gsbi3_p_clk.c), }, }; static struct branch_clk gsbi4_p_clk = { .b = { .ctl_reg = GSBIn_HCLK_CTL_REG(4), .en_mask = BIT(4), .halt_reg = CLK_HALT_CFPB_STATEB_REG, .halt_bit = 27, }, .c = { .dbg_name = "gsbi4_p_clk", .ops = &clk_ops_branch, CLK_INIT(gsbi4_p_clk.c), }, }; static struct branch_clk gsbi5_p_clk = { .b = { .ctl_reg = GSBIn_HCLK_CTL_REG(5), .en_mask = BIT(4), .halt_reg = CLK_HALT_CFPB_STATEB_REG, .halt_bit = 23, }, .c = { .dbg_name = "gsbi5_p_clk", .ops = &clk_ops_branch, CLK_INIT(gsbi5_p_clk.c), }, }; static struct branch_clk usb_hs1_p_clk = { .b = { .ctl_reg = USB_HS1_HCLK_CTL_REG, .en_mask = BIT(4), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 1, }, .c = { .dbg_name = "usb_hs1_p_clk", .ops = &clk_ops_branch, CLK_INIT(usb_hs1_p_clk.c), }, }; static struct branch_clk usb_hsic_p_clk = { .b = { .ctl_reg = USB_HSIC_HCLK_CTL_REG, .en_mask = BIT(4), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 3, }, .c = { .dbg_name = "usb_hsic_p_clk", .ops = &clk_ops_branch, CLK_INIT(usb_hsic_p_clk.c), }, }; static struct branch_clk sdc1_p_clk = { .b = { .ctl_reg = SDCn_HCLK_CTL_REG(1), .en_mask = BIT(4), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 11, }, .c = { .dbg_name = "sdc1_p_clk", .ops = &clk_ops_branch, CLK_INIT(sdc1_p_clk.c), }, }; static struct branch_clk sdc2_p_clk = { .b = { .ctl_reg = SDCn_HCLK_CTL_REG(2), .en_mask = BIT(4), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 10, }, .c = { .dbg_name = "sdc2_p_clk", .ops = &clk_ops_branch, CLK_INIT(sdc2_p_clk.c), }, }; /* HW-Voteable Clocks */ static struct branch_clk adm0_clk = { .b = { .ctl_reg = SC0_U_CLK_BRANCH_ENA_VOTE_REG, .en_mask = BIT(2), .halt_reg = CLK_HALT_MSS_KPSS_MISC_STATE_REG, .halt_check = HALT_VOTED, .halt_bit = 14, }, .c = { .dbg_name = "adm0_clk", .ops = &clk_ops_branch, CLK_INIT(adm0_clk.c), }, }; static struct branch_clk adm0_p_clk = { .b = { .ctl_reg = SC0_U_CLK_BRANCH_ENA_VOTE_REG, .en_mask = BIT(3), .halt_reg = CLK_HALT_MSS_KPSS_MISC_STATE_REG, .halt_check = HALT_VOTED, .halt_bit = 13, }, .c = { .dbg_name = "adm0_p_clk", .ops = &clk_ops_branch, CLK_INIT(adm0_p_clk.c), }, }; static struct branch_clk pmic_arb0_p_clk = { .b = { .ctl_reg = SC0_U_CLK_BRANCH_ENA_VOTE_REG, .en_mask = BIT(8), .halt_reg = CLK_HALT_SFPB_MISC_STATE_REG, .halt_check = HALT_VOTED, .halt_bit = 22, }, .c = { .dbg_name = "pmic_arb0_p_clk", .ops = &clk_ops_branch, CLK_INIT(pmic_arb0_p_clk.c), }, }; static struct branch_clk pmic_arb1_p_clk = { .b = { .ctl_reg = SC0_U_CLK_BRANCH_ENA_VOTE_REG, .en_mask = BIT(9), .halt_reg = CLK_HALT_SFPB_MISC_STATE_REG, .halt_check = HALT_VOTED, .halt_bit = 21, }, .c = { .dbg_name = "pmic_arb1_p_clk", .ops = &clk_ops_branch, CLK_INIT(pmic_arb1_p_clk.c), }, }; static struct branch_clk pmic_ssbi2_clk = { .b = { .ctl_reg = SC0_U_CLK_BRANCH_ENA_VOTE_REG, .en_mask = BIT(7), .halt_reg = CLK_HALT_SFPB_MISC_STATE_REG, .halt_check = HALT_VOTED, .halt_bit = 23, }, .c = { .dbg_name = "pmic_ssbi2_clk", .ops = &clk_ops_branch, CLK_INIT(pmic_ssbi2_clk.c), }, }; static struct branch_clk rpm_msg_ram_p_clk = { .b = { .ctl_reg = SC0_U_CLK_BRANCH_ENA_VOTE_REG, .en_mask = BIT(6), .halt_reg = CLK_HALT_SFPB_MISC_STATE_REG, .halt_check = HALT_VOTED, .halt_bit = 12, }, .c = { .dbg_name = "rpm_msg_ram_p_clk", .ops = &clk_ops_branch, CLK_INIT(rpm_msg_ram_p_clk.c), }, }; /* * Low Power Audio Clocks */ #define F_AIF_OSR(f, s, d, m, n) \ { \ .freq_hz = f, \ .src_clk = &s##_clk.c, \ .md_val = MD8(8, m, 0, n), \ .ns_val = NS(31, 24, n, m, 5, 4, 3, d, 2, 0, s##_to_lpa_mux), \ .mnd_en_mask = BIT(8) * !!(n), \ } static struct clk_freq_tbl clk_tbl_aif_osr[] = { F_AIF_OSR( 0, gnd, 1, 0, 0), F_AIF_OSR( 512000, pll4, 4, 1, 192), F_AIF_OSR( 768000, pll4, 4, 1, 128), F_AIF_OSR( 1024000, pll4, 4, 1, 96), F_AIF_OSR( 1536000, pll4, 4, 1, 64), F_AIF_OSR( 2048000, pll4, 4, 1, 48), F_AIF_OSR( 3072000, pll4, 4, 1, 32), F_AIF_OSR( 4096000, pll4, 4, 1, 24), F_AIF_OSR( 6144000, pll4, 4, 1, 16), F_AIF_OSR( 8192000, pll4, 4, 1, 12), F_AIF_OSR(12288000, pll4, 4, 1, 8), F_AIF_OSR(24576000, pll4, 4, 1, 4), F_END }; #define CLK_AIF_OSR(i, ns, md, h_r) \ struct rcg_clk i##_clk = { \ .b = { \ .ctl_reg = ns, \ .en_mask = BIT(17), \ .reset_reg = ns, \ .reset_mask = BIT(19), \ .halt_reg = h_r, \ .halt_check = ENABLE, \ .halt_bit = 1, \ }, \ .ns_reg = ns, \ .md_reg = md, \ .root_en_mask = BIT(9), \ .ns_mask = (BM(31, 24) | BM(6, 0)), \ .set_rate = set_rate_mnd, \ .freq_tbl = clk_tbl_aif_osr, \ .current_freq = &rcg_dummy_freq, \ .c = { \ .dbg_name = #i "_clk", \ .ops = &clk_ops_rcg_9615, \ CLK_INIT(i##_clk.c), \ }, \ } #define CLK_AIF_OSR_DIV(i, ns, md, h_r) \ struct rcg_clk i##_clk = { \ .b = { \ .ctl_reg = ns, \ .en_mask = BIT(21), \ .reset_reg = ns, \ .reset_mask = BIT(23), \ .halt_reg = h_r, \ .halt_check = ENABLE, \ .halt_bit = 1, \ }, \ .ns_reg = ns, \ .md_reg = md, \ .root_en_mask = BIT(9), \ .ns_mask = (BM(31, 24) | BM(6, 0)), \ .set_rate = set_rate_mnd, \ .freq_tbl = clk_tbl_aif_osr, \ .current_freq = &rcg_dummy_freq, \ .c = { \ .dbg_name = #i "_clk", \ .ops = &clk_ops_rcg_9615, \ CLK_INIT(i##_clk.c), \ }, \ } #define CLK_AIF_BIT(i, ns, h_r) \ struct cdiv_clk i##_clk = { \ .b = { \ .ctl_reg = ns, \ .en_mask = BIT(15), \ .halt_reg = h_r, \ .halt_check = DELAY, \ }, \ .ns_reg = ns, \ .ext_mask = BIT(14), \ .div_offset = 10, \ .max_div = 16, \ .c = { \ .dbg_name = #i "_clk", \ .ops = &clk_ops_cdiv, \ CLK_INIT(i##_clk.c), \ }, \ } #define CLK_AIF_BIT_DIV(i, ns, h_r) \ struct cdiv_clk i##_clk = { \ .b = { \ .ctl_reg = ns, \ .en_mask = BIT(19), \ .halt_reg = h_r, \ .halt_check = DELAY, \ }, \ .ns_reg = ns, \ .ext_mask = BIT(18), \ .div_offset = 10, \ .max_div = 256, \ .c = { \ .dbg_name = #i "_clk", \ .ops = &clk_ops_cdiv, \ CLK_INIT(i##_clk.c), \ }, \ } static CLK_AIF_OSR(mi2s_osr, LCC_MI2S_NS_REG, LCC_MI2S_MD_REG, LCC_MI2S_STATUS_REG); static CLK_AIF_BIT(mi2s_bit, LCC_MI2S_NS_REG, LCC_MI2S_STATUS_REG); static CLK_AIF_OSR_DIV(codec_i2s_mic_osr, LCC_CODEC_I2S_MIC_NS_REG, LCC_CODEC_I2S_MIC_MD_REG, LCC_CODEC_I2S_MIC_STATUS_REG); static CLK_AIF_BIT_DIV(codec_i2s_mic_bit, LCC_CODEC_I2S_MIC_NS_REG, LCC_CODEC_I2S_MIC_STATUS_REG); static CLK_AIF_OSR_DIV(spare_i2s_mic_osr, LCC_SPARE_I2S_MIC_NS_REG, LCC_SPARE_I2S_MIC_MD_REG, LCC_SPARE_I2S_MIC_STATUS_REG); static CLK_AIF_BIT_DIV(spare_i2s_mic_bit, LCC_SPARE_I2S_MIC_NS_REG, LCC_SPARE_I2S_MIC_STATUS_REG); static CLK_AIF_OSR_DIV(codec_i2s_spkr_osr, LCC_CODEC_I2S_SPKR_NS_REG, LCC_CODEC_I2S_SPKR_MD_REG, LCC_CODEC_I2S_SPKR_STATUS_REG); static CLK_AIF_BIT_DIV(codec_i2s_spkr_bit, LCC_CODEC_I2S_SPKR_NS_REG, LCC_CODEC_I2S_SPKR_STATUS_REG); static CLK_AIF_OSR_DIV(spare_i2s_spkr_osr, LCC_SPARE_I2S_SPKR_NS_REG, LCC_SPARE_I2S_SPKR_MD_REG, LCC_SPARE_I2S_SPKR_STATUS_REG); static CLK_AIF_BIT_DIV(spare_i2s_spkr_bit, LCC_SPARE_I2S_SPKR_NS_REG, LCC_SPARE_I2S_SPKR_STATUS_REG); #define F_PCM(f, s, d, m, n) \ { \ .freq_hz = f, \ .src_clk = &s##_clk.c, \ .md_val = MD16(m, n), \ .ns_val = NS(31, 16, n, m, 5, 4, 3, d, 2, 0, s##_to_lpa_mux), \ .mnd_en_mask = BIT(8) * !!(n), \ } static struct clk_freq_tbl clk_tbl_pcm[] = { { .ns_val = BIT(10) /* external input */ }, F_PCM( 512000, pll4, 4, 1, 192), F_PCM( 768000, pll4, 4, 1, 128), F_PCM( 1024000, pll4, 4, 1, 96), F_PCM( 1536000, pll4, 4, 1, 64), F_PCM( 2048000, pll4, 4, 1, 48), F_PCM( 3072000, pll4, 4, 1, 32), F_PCM( 4096000, pll4, 4, 1, 24), F_PCM( 6144000, pll4, 4, 1, 16), F_PCM( 8192000, pll4, 4, 1, 12), F_PCM(12288000, pll4, 4, 1, 8), F_PCM(24576000, pll4, 4, 1, 4), F_END }; static struct rcg_clk pcm_clk = { .b = { .ctl_reg = LCC_PCM_NS_REG, .en_mask = BIT(11), .reset_reg = LCC_PCM_NS_REG, .reset_mask = BIT(13), .halt_reg = LCC_PCM_STATUS_REG, .halt_check = ENABLE, .halt_bit = 0, }, .ns_reg = LCC_PCM_NS_REG, .md_reg = LCC_PCM_MD_REG, .root_en_mask = BIT(9), .ns_mask = BM(31, 16) | BIT(10) | BM(6, 0), .set_rate = set_rate_mnd, .freq_tbl = clk_tbl_pcm, .current_freq = &rcg_dummy_freq, .c = { .dbg_name = "pcm_clk", .ops = &clk_ops_rcg_9615, VDD_DIG_FMAX_MAP1(LOW, 24576000), CLK_INIT(pcm_clk.c), }, }; static struct rcg_clk audio_slimbus_clk = { .b = { .ctl_reg = LCC_SLIMBUS_NS_REG, .en_mask = BIT(10), .reset_reg = LCC_AHBEX_BRANCH_CTL_REG, .reset_mask = BIT(5), .halt_reg = LCC_SLIMBUS_STATUS_REG, .halt_check = ENABLE, .halt_bit = 0, }, .ns_reg = LCC_SLIMBUS_NS_REG, .md_reg = LCC_SLIMBUS_MD_REG, .root_en_mask = BIT(9), .ns_mask = (BM(31, 24) | BM(6, 0)), .set_rate = set_rate_mnd, .freq_tbl = clk_tbl_aif_osr, .current_freq = &rcg_dummy_freq, .c = { .dbg_name = "audio_slimbus_clk", .ops = &clk_ops_rcg_9615, VDD_DIG_FMAX_MAP1(LOW, 24576000), CLK_INIT(audio_slimbus_clk.c), }, }; static struct branch_clk sps_slimbus_clk = { .b = { .ctl_reg = LCC_SLIMBUS_NS_REG, .en_mask = BIT(12), .halt_reg = LCC_SLIMBUS_STATUS_REG, .halt_check = ENABLE, .halt_bit = 1, }, .parent = &audio_slimbus_clk.c, .c = { .dbg_name = "sps_slimbus_clk", .ops = &clk_ops_branch, CLK_INIT(sps_slimbus_clk.c), }, }; static struct branch_clk slimbus_xo_src_clk = { .b = { .ctl_reg = SLIMBUS_XO_SRC_CLK_CTL_REG, .en_mask = BIT(2), .halt_reg = CLK_HALT_DFAB_STATE_REG, .halt_bit = 28, }, .parent = &sps_slimbus_clk.c, .c = { .dbg_name = "slimbus_xo_src_clk", .ops = &clk_ops_branch, CLK_INIT(slimbus_xo_src_clk.c), }, }; DEFINE_CLK_RPM(cfpb_clk, cfpb_a_clk, CFPB, NULL); DEFINE_CLK_RPM(dfab_clk, dfab_a_clk, DAYTONA_FABRIC, NULL); DEFINE_CLK_RPM(ebi1_clk, ebi1_a_clk, EBI1, NULL); DEFINE_CLK_RPM(sfab_clk, sfab_a_clk, SYSTEM_FABRIC, NULL); DEFINE_CLK_RPM(sfpb_clk, sfpb_a_clk, SFPB, NULL); static DEFINE_CLK_VOTER(dfab_usb_hs_clk, &dfab_clk.c); static DEFINE_CLK_VOTER(dfab_sdc1_clk, &dfab_clk.c); static DEFINE_CLK_VOTER(dfab_sdc2_clk, &dfab_clk.c); static DEFINE_CLK_VOTER(dfab_sps_clk, &dfab_clk.c); static DEFINE_CLK_VOTER(dfab_bam_dmux_clk, &dfab_clk.c); static DEFINE_CLK_VOTER(ebi1_msmbus_clk, &ebi1_clk.c); /* * TODO: replace dummy_clk below with ebi1_clk.c once the * bus driver starts voting on ebi1 rates. */ static DEFINE_CLK_VOTER(ebi1_adm_clk, &dummy_clk); #ifdef CONFIG_DEBUG_FS struct measure_sel { u32 test_vector; struct clk *clk; }; static DEFINE_CLK_MEASURE(q6sw_clk); static DEFINE_CLK_MEASURE(q6fw_clk); static DEFINE_CLK_MEASURE(q6_func_clk); static struct measure_sel measure_mux[] = { { TEST_PER_LS(0x08), &slimbus_xo_src_clk.c }, { TEST_PER_LS(0x12), &sdc1_p_clk.c }, { TEST_PER_LS(0x13), &sdc1_clk.c }, { TEST_PER_LS(0x14), &sdc2_p_clk.c }, { TEST_PER_LS(0x15), &sdc2_clk.c }, { TEST_PER_LS(0x1F), &gp0_clk.c }, { TEST_PER_LS(0x20), &gp1_clk.c }, { TEST_PER_LS(0x21), &gp2_clk.c }, { TEST_PER_LS(0x26), &pmem_clk.c }, { TEST_PER_LS(0x25), &dfab_clk.c }, { TEST_PER_LS(0x25), &dfab_a_clk.c }, { TEST_PER_LS(0x32), &dma_bam_p_clk.c }, { TEST_PER_LS(0x33), &cfpb_clk.c }, { TEST_PER_LS(0x33), &cfpb_a_clk.c }, { TEST_PER_LS(0x3E), &gsbi1_uart_clk.c }, { TEST_PER_LS(0x3F), &gsbi1_qup_clk.c }, { TEST_PER_LS(0x41), &gsbi2_p_clk.c }, { TEST_PER_LS(0x42), &gsbi2_uart_clk.c }, { TEST_PER_LS(0x44), &gsbi2_qup_clk.c }, { TEST_PER_LS(0x45), &gsbi3_p_clk.c }, { TEST_PER_LS(0x46), &gsbi3_uart_clk.c }, { TEST_PER_LS(0x48), &gsbi3_qup_clk.c }, { TEST_PER_LS(0x49), &gsbi4_p_clk.c }, { TEST_PER_LS(0x4A), &gsbi4_uart_clk.c }, { TEST_PER_LS(0x4C), &gsbi4_qup_clk.c }, { TEST_PER_LS(0x4D), &gsbi5_p_clk.c }, { TEST_PER_LS(0x4E), &gsbi5_uart_clk.c }, { TEST_PER_LS(0x50), &gsbi5_qup_clk.c }, { TEST_PER_LS(0x78), &sfpb_clk.c }, { TEST_PER_LS(0x78), &sfpb_a_clk.c }, { TEST_PER_LS(0x7A), &pmic_ssbi2_clk.c }, { TEST_PER_LS(0x7B), &pmic_arb0_p_clk.c }, { TEST_PER_LS(0x7C), &pmic_arb1_p_clk.c }, { TEST_PER_LS(0x7D), &prng_clk.c }, { TEST_PER_LS(0x7F), &rpm_msg_ram_p_clk.c }, { TEST_PER_LS(0x80), &adm0_p_clk.c }, { TEST_PER_LS(0x84), &usb_hs1_p_clk.c }, { TEST_PER_LS(0x85), &usb_hs1_xcvr_clk.c }, { TEST_PER_LS(0x86), &usb_hsic_sys_clk.c }, { TEST_PER_LS(0x87), &usb_hsic_p_clk.c }, { TEST_PER_LS(0x88), &usb_hsic_xcvr_clk.c }, { TEST_PER_LS(0x8B), &usb_hsic_hsio_cal_clk.c }, { TEST_PER_LS(0x8D), &usb_hs1_sys_clk.c }, { TEST_PER_LS(0x92), &ce1_p_clk.c }, { TEST_PER_HS(0x18), &sfab_clk.c }, { TEST_PER_HS(0x18), &sfab_a_clk.c }, { TEST_PER_HS(0x26), &q6sw_clk }, { TEST_PER_HS(0x27), &q6fw_clk }, { TEST_PER_LS(0xA4), &ce1_core_clk.c }, { TEST_PER_HS(0x2A), &adm0_clk.c }, { TEST_PER_HS(0x34), &ebi1_clk.c }, { TEST_PER_HS(0x34), &ebi1_a_clk.c }, { TEST_PER_HS(0x3E), &usb_hsic_clk.c }, { TEST_LPA(0x0F), &mi2s_bit_clk.c }, { TEST_LPA(0x10), &codec_i2s_mic_bit_clk.c }, { TEST_LPA(0x11), &codec_i2s_spkr_bit_clk.c }, { TEST_LPA(0x12), &spare_i2s_mic_bit_clk.c }, { TEST_LPA(0x13), &spare_i2s_spkr_bit_clk.c }, { TEST_LPA(0x14), &pcm_clk.c }, { TEST_LPA(0x1D), &audio_slimbus_clk.c }, { TEST_LPA_HS(0x00), &q6_func_clk }, }; static struct measure_sel *find_measure_sel(struct clk *clk) { int i; for (i = 0; i < ARRAY_SIZE(measure_mux); i++) if (measure_mux[i].clk == clk) return &measure_mux[i]; return NULL; } static int measure_clk_set_parent(struct clk *c, struct clk *parent) { int ret = 0; u32 clk_sel; struct measure_sel *p; struct measure_clk *clk = to_measure_clk(c); unsigned long flags; if (!parent) return -EINVAL; p = find_measure_sel(parent); if (!p) return -EINVAL; spin_lock_irqsave(&local_clock_reg_lock, flags); /* * Program the test vector, measurement period (sample_ticks) * and scaling multiplier. */ clk->sample_ticks = 0x10000; clk_sel = p->test_vector & TEST_CLK_SEL_MASK; clk->multiplier = 1; switch (p->test_vector >> TEST_TYPE_SHIFT) { case TEST_TYPE_PER_LS: writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG); break; case TEST_TYPE_PER_HS: writel_relaxed(0x4020000|BVAL(16, 10, clk_sel), CLK_TEST_REG); break; case TEST_TYPE_LPA: writel_relaxed(0x4030D98, CLK_TEST_REG); writel_relaxed(BVAL(6, 1, clk_sel)|BIT(0), LCC_CLK_LS_DEBUG_CFG_REG); break; case TEST_TYPE_LPA_HS: writel_relaxed(0x402BC00, CLK_TEST_REG); writel_relaxed(BVAL(2, 1, clk_sel)|BIT(0), LCC_CLK_HS_DEBUG_CFG_REG); break; default: ret = -EPERM; } /* Make sure test vector is set before starting measurements. */ mb(); spin_unlock_irqrestore(&local_clock_reg_lock, flags); return ret; } /* Sample clock for 'ticks' reference clock ticks. */ static unsigned long run_measurement(unsigned ticks) { /* Stop counters and set the XO4 counter start value. */ writel_relaxed(ticks, RINGOSC_TCXO_CTL_REG); /* Wait for timer to become ready. */ while ((readl_relaxed(RINGOSC_STATUS_REG) & BIT(25)) != 0) cpu_relax(); /* Run measurement and wait for completion. */ writel_relaxed(BIT(28)|ticks, RINGOSC_TCXO_CTL_REG); while ((readl_relaxed(RINGOSC_STATUS_REG) & BIT(25)) == 0) cpu_relax(); /* Stop counters. */ writel_relaxed(0x0, RINGOSC_TCXO_CTL_REG); /* Return measured ticks. */ return readl_relaxed(RINGOSC_STATUS_REG) & BM(24, 0); } /* Perform a hardware rate measurement for a given clock. FOR DEBUG USE ONLY: Measurements take ~15 ms! */ static unsigned long measure_clk_get_rate(struct clk *c) { unsigned long flags; u32 pdm_reg_backup, ringosc_reg_backup; u64 raw_count_short, raw_count_full; struct measure_clk *clk = to_measure_clk(c); unsigned ret; spin_lock_irqsave(&local_clock_reg_lock, flags); /* Enable CXO/4 and RINGOSC branch and root. */ pdm_reg_backup = readl_relaxed(PDM_CLK_NS_REG); ringosc_reg_backup = readl_relaxed(RINGOSC_NS_REG); writel_relaxed(0x2898, PDM_CLK_NS_REG); writel_relaxed(0xA00, RINGOSC_NS_REG); /* * The ring oscillator counter will not reset if the measured clock * is not running. To detect this, run a short measurement before * the full measurement. If the raw results of the two are the same * then the clock must be off. */ /* Run a short measurement. (~1 ms) */ raw_count_short = run_measurement(0x1000); /* Run a full measurement. (~14 ms) */ raw_count_full = run_measurement(clk->sample_ticks); writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG); writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG); /* Return 0 if the clock is off. */ if (raw_count_full == raw_count_short) ret = 0; else { /* Compute rate in Hz. */ raw_count_full = ((raw_count_full * 10) + 15) * 4800000; do_div(raw_count_full, ((clk->sample_ticks * 10) + 35)); ret = (raw_count_full * clk->multiplier); } /* Route dbg_hs_clk to PLLTEST. 300mV single-ended amplitude. */ writel_relaxed(0x38F8, PLLTEST_PAD_CFG_REG); spin_unlock_irqrestore(&local_clock_reg_lock, flags); return ret; } #else /* !CONFIG_DEBUG_FS */ static int measure_clk_set_parent(struct clk *clk, struct clk *parent) { return -EINVAL; } static unsigned long measure_clk_get_rate(struct clk *clk) { return 0; } #endif /* CONFIG_DEBUG_FS */ static struct clk_ops measure_clk_ops = { .set_parent = measure_clk_set_parent, .get_rate = measure_clk_get_rate, .is_local = local_clk_is_local, }; static struct measure_clk measure_clk = { .c = { .dbg_name = "measure_clk", .ops = &measure_clk_ops, CLK_INIT(measure_clk.c), }, .multiplier = 1, }; static struct clk_lookup msm_clocks_9615[] = { CLK_LOOKUP("cxo", cxo_clk.c, NULL), CLK_LOOKUP("xo", cxo_clk.c, "BAM_RMNT"), CLK_LOOKUP("pll0", pll0_clk.c, NULL), CLK_LOOKUP("pll8", pll8_clk.c, NULL), CLK_LOOKUP("pll14", pll14_clk.c, NULL), CLK_LOOKUP("pll0", pll0_acpu_clk.c, "acpu"), CLK_LOOKUP("pll8", pll8_acpu_clk.c, "acpu"), CLK_LOOKUP("pll9", pll9_acpu_clk.c, "acpu"), CLK_LOOKUP("measure", measure_clk.c, "debug"), CLK_LOOKUP("bus_clk", sfab_clk.c, "msm_sys_fab"), CLK_LOOKUP("bus_a_clk", sfab_a_clk.c, "msm_sys_fab"), CLK_LOOKUP("mem_clk", ebi1_msmbus_clk.c, "msm_bus"), CLK_LOOKUP("mem_a_clk", ebi1_a_clk.c, "msm_bus"), CLK_LOOKUP("bus_clk", sfpb_clk.c, NULL), CLK_LOOKUP("bus_a_clk", sfpb_a_clk.c, NULL), CLK_LOOKUP("bus_clk", cfpb_clk.c, NULL), CLK_LOOKUP("bus_a_clk", cfpb_a_clk.c, NULL), CLK_LOOKUP("ebi1_clk", ebi1_clk.c, NULL), CLK_LOOKUP("dfab_clk", dfab_clk.c, NULL), CLK_LOOKUP("dfab_a_clk", dfab_a_clk.c, NULL), CLK_LOOKUP("core_clk", gp0_clk.c, NULL), CLK_LOOKUP("core_clk", gp1_clk.c, NULL), CLK_LOOKUP("core_clk", gp2_clk.c, NULL), CLK_LOOKUP("core_clk", gsbi3_uart_clk.c, NULL), CLK_LOOKUP("core_clk", gsbi4_uart_clk.c, "msm_serial_hsl.0"), CLK_LOOKUP("core_clk", gsbi5_uart_clk.c, NULL), CLK_LOOKUP("core_clk", gsbi3_qup_clk.c, "spi_qsd.0"), CLK_LOOKUP("core_clk", gsbi4_qup_clk.c, NULL), CLK_LOOKUP("core_clk", gsbi5_qup_clk.c, "qup_i2c.0"), CLK_LOOKUP("core_clk", pdm_clk.c, NULL), CLK_LOOKUP("mem_clk", pmem_clk.c, "msm_sps"), CLK_LOOKUP("core_clk", prng_clk.c, "msm_rng.0"), CLK_LOOKUP("core_clk", sdc1_clk.c, "msm_sdcc.1"), CLK_LOOKUP("core_clk", sdc2_clk.c, "msm_sdcc.2"), CLK_LOOKUP("iface_clk", ce1_p_clk.c, NULL), CLK_LOOKUP("core_clk", ce1_core_clk.c, NULL), CLK_LOOKUP("dma_bam_pclk", dma_bam_p_clk.c, NULL), CLK_LOOKUP("iface_clk", gsbi3_p_clk.c, "spi_qsd.0"), CLK_LOOKUP("iface_clk", gsbi4_p_clk.c, "msm_serial_hsl.0"), CLK_LOOKUP("iface_clk", gsbi5_p_clk.c, "qup_i2c.0"), CLK_LOOKUP("iface_clk", usb_hs1_p_clk.c, "msm_otg"), CLK_LOOKUP("core_clk", usb_hs1_sys_clk.c, "msm_otg"), CLK_LOOKUP("alt_core_clk", usb_hs1_xcvr_clk.c, "msm_otg"), CLK_LOOKUP("alt_core_clk", usb_hsic_xcvr_clk.c, "msm_hsic_host"), CLK_LOOKUP("cal_clk", usb_hsic_hsio_cal_clk.c, "msm_hsic_host"), CLK_LOOKUP("core_clk", usb_hsic_sys_clk.c, "msm_hsic_host"), CLK_LOOKUP("iface_clk", usb_hsic_p_clk.c, "msm_hsic_host"), CLK_LOOKUP("phy_clk", usb_hsic_clk.c, "msm_hsic_host"), CLK_LOOKUP("iface_clk", sdc1_p_clk.c, "msm_sdcc.1"), CLK_LOOKUP("iface_clk", sdc2_p_clk.c, "msm_sdcc.2"), CLK_LOOKUP("core_clk", adm0_clk.c, "msm_dmov"), CLK_LOOKUP("iface_clk", adm0_p_clk.c, "msm_dmov"), CLK_LOOKUP("iface_clk", pmic_arb0_p_clk.c, NULL), CLK_LOOKUP("iface_clk", pmic_arb1_p_clk.c, NULL), CLK_LOOKUP("core_clk", pmic_ssbi2_clk.c, NULL), CLK_LOOKUP("mem_clk", rpm_msg_ram_p_clk.c, NULL), CLK_LOOKUP("mi2s_bit_clk", mi2s_bit_clk.c, NULL), CLK_LOOKUP("mi2s_osr_clk", mi2s_osr_clk.c, NULL), CLK_LOOKUP("i2s_mic_bit_clk", codec_i2s_mic_bit_clk.c, NULL), CLK_LOOKUP("i2s_mic_osr_clk", codec_i2s_mic_osr_clk.c, NULL), CLK_LOOKUP("i2s_mic_bit_clk", spare_i2s_mic_bit_clk.c, NULL), CLK_LOOKUP("i2s_mic_osr_clk", spare_i2s_mic_osr_clk.c, NULL), CLK_LOOKUP("i2s_spkr_bit_clk", codec_i2s_spkr_bit_clk.c, NULL), CLK_LOOKUP("i2s_spkr_osr_clk", codec_i2s_spkr_osr_clk.c, NULL), CLK_LOOKUP("i2s_spkr_bit_clk", spare_i2s_spkr_bit_clk.c, NULL), CLK_LOOKUP("i2s_spkr_osr_clk", spare_i2s_spkr_osr_clk.c, NULL), CLK_LOOKUP("pcm_clk", pcm_clk.c, NULL), CLK_LOOKUP("sps_slimbus_clk", sps_slimbus_clk.c, NULL), CLK_LOOKUP("audio_slimbus_clk", audio_slimbus_clk.c, NULL), CLK_LOOKUP("core_clk", dfab_usb_hs_clk.c, "msm_otg"), CLK_LOOKUP("bus_clk", dfab_sdc1_clk.c, "msm_sdcc.1"), CLK_LOOKUP("bus_clk", dfab_sdc2_clk.c, "msm_sdcc.2"), CLK_LOOKUP("dfab_clk", dfab_sps_clk.c, "msm_sps"), CLK_LOOKUP("bus_clk", dfab_bam_dmux_clk.c, "BAM_RMNT"), CLK_LOOKUP("mem_clk", ebi1_adm_clk.c, "msm_dmov"), CLK_LOOKUP("iface_clk", ce1_p_clk.c, "qce.0"), CLK_LOOKUP("iface_clk", ce1_p_clk.c, "qcrypto.0"), CLK_LOOKUP("core_clk", ce1_core_clk.c, "qce.0"), CLK_LOOKUP("core_clk", ce1_core_clk.c, "qcrypto.0"), CLK_LOOKUP("q6sw_clk", q6sw_clk, NULL), CLK_LOOKUP("q6fw_clk", q6fw_clk, NULL), CLK_LOOKUP("q6_func_clk", q6_func_clk, NULL), /* TODO: Make this real when RPM's ready. */ CLK_DUMMY("ebi1_msmbus_clk", ebi1_msmbus_clk.c, NULL, OFF), CLK_DUMMY("mem_clk", ebi1_adm_clk.c, "msm_dmov", OFF), }; static void set_fsm_mode(void __iomem *mode_reg) { u32 regval = readl_relaxed(mode_reg); /* De-assert reset to FSM */ regval &= ~BIT(21); writel_relaxed(regval, mode_reg); /* Program bias count */ regval &= ~BM(19, 14); regval |= BVAL(19, 14, 0x1); writel_relaxed(regval, mode_reg); /* Program lock count */ regval &= ~BM(13, 8); regval |= BVAL(13, 8, 0x8); writel_relaxed(regval, mode_reg); /* Enable PLL FSM voting */ regval |= BIT(20); writel_relaxed(regval, mode_reg); } /* * Miscellaneous clock register initializations */ static void __init reg_init(void) { u32 regval, is_pll_enabled, pll9_lval; /* Enable PDM CXO source. */ regval = readl_relaxed(PDM_CLK_NS_REG); writel_relaxed(BIT(13) | regval, PDM_CLK_NS_REG); /* Check if PLL0 is active */ is_pll_enabled = readl_relaxed(BB_PLL0_STATUS_REG) & BIT(16); if (!is_pll_enabled) { writel_relaxed(0xE, BB_PLL0_L_VAL_REG); writel_relaxed(0x3, BB_PLL0_M_VAL_REG); writel_relaxed(0x8, BB_PLL0_N_VAL_REG); regval = readl_relaxed(BB_PLL0_CONFIG_REG); /* Enable the main output and the MN accumulator */ regval |= BIT(23) | BIT(22); /* Set pre-divider and post-divider values to 1 and 1 */ regval &= ~BIT(19); regval &= ~BM(21, 20); /* Set VCO frequency */ regval &= ~BM(17, 16); writel_relaxed(regval, BB_PLL0_CONFIG_REG); /* Enable AUX output */ regval = readl_relaxed(BB_PLL0_TEST_CTL_REG); regval |= BIT(12); writel_relaxed(regval, BB_PLL0_TEST_CTL_REG); set_fsm_mode(BB_PLL0_MODE_REG); } /* Check if PLL14 is enabled in FSM mode */ is_pll_enabled = readl_relaxed(BB_PLL14_STATUS_REG) & BIT(16); if (!is_pll_enabled) { writel_relaxed(0x19, BB_PLL14_L_VAL_REG); writel_relaxed(0x0, BB_PLL14_M_VAL_REG); writel_relaxed(0x1, BB_PLL14_N_VAL_REG); regval = readl_relaxed(BB_PLL14_CONFIG_REG); /* Enable main output and the MN accumulator */ regval |= BIT(23) | BIT(22); /* Set pre-divider and post-divider values to 1 and 1 */ regval &= ~BIT(19); regval &= ~BM(21, 20); /* Set VCO frequency */ regval &= ~BM(17, 16); writel_relaxed(regval, BB_PLL14_CONFIG_REG); set_fsm_mode(BB_PLL14_MODE_REG); } else if (!(readl_relaxed(BB_PLL14_MODE_REG) & BIT(20))) WARN(1, "PLL14 enabled in non-FSM mode!\n"); /* Detect PLL9 rate and fixup structure accordingly */ pll9_lval = readl_relaxed(SC_PLL0_L_VAL_REG); if (pll9_lval == 0x1C) pll9_acpu_clk.rate = 550000000; /* Enable PLL4 source on the LPASS Primary PLL Mux */ regval = readl_relaxed(LCC_PRI_PLL_CLK_CTL_REG); writel_relaxed(regval | BIT(0), LCC_PRI_PLL_CLK_CTL_REG); /* Disable hardware clock gating on certain clocks */ regval = readl_relaxed(USB_HSIC_HCLK_CTL_REG); regval &= ~BIT(6); writel_relaxed(regval, USB_HSIC_HCLK_CTL_REG); regval = readl_relaxed(CE1_CORE_CLK_CTL_REG); regval &= ~BIT(6); writel_relaxed(regval, CE1_CORE_CLK_CTL_REG); regval = readl_relaxed(USB_HS1_HCLK_CTL_REG); regval &= ~BIT(6); writel_relaxed(regval, USB_HS1_HCLK_CTL_REG); regval = readl_relaxed(DMA_BAM_HCLK_CTL); regval &= ~BIT(6); writel_relaxed(regval, DMA_BAM_HCLK_CTL); } /* Local clock driver initialization. */ static void __init msm9615_clock_init(void) { xo_cxo = msm_xo_get(MSM_XO_CXO, "clock-9615"); if (IS_ERR(xo_cxo)) { pr_err("%s: msm_xo_get(CXO) failed.\n", __func__); BUG(); } vote_vdd_level(&vdd_dig, VDD_DIG_HIGH); clk_ops_pll.enable = sr_pll_clk_enable; /* Initialize clock registers. */ reg_init(); /* Initialize rates for clocks that only support one. */ clk_set_rate(&pdm_clk.c, 19200000); clk_set_rate(&prng_clk.c, 32000000); clk_set_rate(&usb_hs1_xcvr_clk.c, 60000000); clk_set_rate(&usb_hs1_sys_clk.c, 60000000); clk_set_rate(&usb_hsic_xcvr_clk.c, 60000000); clk_set_rate(&usb_hsic_sys_clk.c, 64000000); clk_set_rate(&usb_hsic_clk.c, 480000000); /* * The halt status bits for PDM may be incorrect at boot. * Toggle these clocks on and off to refresh them. */ rcg_clk_enable(&pdm_clk.c); rcg_clk_disable(&pdm_clk.c); } static int __init msm9615_clock_late_init(void) { return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH); } struct clock_init_data msm9615_clock_init_data __initdata = { .table = msm_clocks_9615, .size = ARRAY_SIZE(msm_clocks_9615), .init = msm9615_clock_init, .late_init = msm9615_clock_late_init, };
gpl-2.0
Grommerin/kernel_imx
drivers/net/can/janz-ican3.c
596
44625
/* * Janz MODULbus VMOD-ICAN3 CAN Interface Driver * * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/mfd/janz.h> #include <asm/io.h> /* the DPM has 64k of memory, organized into 256x 256 byte pages */ #define DPM_NUM_PAGES 256 #define DPM_PAGE_SIZE 256 #define DPM_PAGE_ADDR(p) ((p) * DPM_PAGE_SIZE) /* JANZ ICAN3 "old-style" host interface queue page numbers */ #define QUEUE_OLD_CONTROL 0 #define QUEUE_OLD_RB0 1 #define QUEUE_OLD_RB1 2 #define QUEUE_OLD_WB0 3 #define QUEUE_OLD_WB1 4 /* Janz ICAN3 "old-style" host interface control registers */ #define MSYNC_PEER 0x00 /* ICAN only */ #define MSYNC_LOCL 0x01 /* host only */ #define TARGET_RUNNING 0x02 #define MSYNC_RB0 0x01 #define MSYNC_RB1 0x02 #define MSYNC_RBLW 0x04 #define MSYNC_RB_MASK (MSYNC_RB0 | MSYNC_RB1) #define MSYNC_WB0 0x10 #define MSYNC_WB1 0x20 #define MSYNC_WBLW 0x40 #define MSYNC_WB_MASK (MSYNC_WB0 | MSYNC_WB1) /* Janz ICAN3 "new-style" host interface queue page numbers */ #define QUEUE_TOHOST 5 #define QUEUE_FROMHOST_MID 6 #define QUEUE_FROMHOST_HIGH 7 #define QUEUE_FROMHOST_LOW 8 /* The first free page in the DPM is #9 */ #define DPM_FREE_START 9 /* Janz ICAN3 "new-style" and "fast" host interface descriptor flags */ #define DESC_VALID 0x80 #define DESC_WRAP 0x40 #define DESC_INTERRUPT 0x20 #define DESC_IVALID 0x10 #define DESC_LEN(len) (len) /* Janz ICAN3 Firmware Messages */ #define MSG_CONNECTI 0x02 #define MSG_DISCONNECT 0x03 #define MSG_IDVERS 0x04 #define MSG_MSGLOST 0x05 #define MSG_NEWHOSTIF 0x08 #define MSG_INQUIRY 0x0a #define MSG_SETAFILMASK 0x10 #define MSG_INITFDPMQUEUE 0x11 #define MSG_HWCONF 0x12 #define MSG_FMSGLOST 0x15 #define MSG_CEVTIND 0x37 #define MSG_CBTRREQ 0x41 #define MSG_COFFREQ 0x42 #define MSG_CONREQ 0x43 #define MSG_CCONFREQ 0x47 /* * Janz ICAN3 CAN Inquiry Message Types * * NOTE: there appears to be a firmware bug here. You must send * NOTE: INQUIRY_STATUS and expect to receive an INQUIRY_EXTENDED * NOTE: response. The controller never responds to a message with * NOTE: the INQUIRY_EXTENDED subspec :( */ #define INQUIRY_STATUS 0x00 #define INQUIRY_TERMINATION 0x01 #define INQUIRY_EXTENDED 0x04 /* Janz ICAN3 CAN Set Acceptance Filter Mask Message Types */ #define SETAFILMASK_REJECT 0x00 #define SETAFILMASK_FASTIF 0x02 /* Janz ICAN3 CAN Hardware Configuration Message Types */ #define HWCONF_TERMINATE_ON 0x01 #define HWCONF_TERMINATE_OFF 0x00 /* Janz ICAN3 CAN Event Indication Message Types */ #define CEVTIND_EI 0x01 #define CEVTIND_DOI 0x02 #define CEVTIND_LOST 0x04 #define CEVTIND_FULL 0x08 #define CEVTIND_BEI 0x10 #define CEVTIND_CHIP_SJA1000 0x02 #define ICAN3_BUSERR_QUOTA_MAX 255 /* Janz ICAN3 CAN Frame Conversion */ #define ICAN3_ECHO 0x10 #define ICAN3_EFF_RTR 0x40 #define ICAN3_SFF_RTR 0x10 #define ICAN3_EFF 0x80 #define ICAN3_CAN_TYPE_MASK 0x0f #define ICAN3_CAN_TYPE_SFF 0x00 #define ICAN3_CAN_TYPE_EFF 0x01 #define ICAN3_CAN_DLC_MASK 0x0f /* * SJA1000 Status and Error Register Definitions * * Copied from drivers/net/can/sja1000/sja1000.h */ /* status register content */ #define SR_BS 0x80 #define SR_ES 0x40 #define SR_TS 0x20 #define SR_RS 0x10 #define SR_TCS 0x08 #define SR_TBS 0x04 #define SR_DOS 0x02 #define SR_RBS 0x01 #define SR_CRIT (SR_BS|SR_ES) /* ECC register */ #define ECC_SEG 0x1F #define ECC_DIR 0x20 #define ECC_ERR 6 #define ECC_BIT 0x00 #define ECC_FORM 0x40 #define ECC_STUFF 0x80 #define ECC_MASK 0xc0 /* Number of buffers for use in the "new-style" host interface */ #define ICAN3_NEW_BUFFERS 16 /* Number of buffers for use in the "fast" host interface */ #define ICAN3_TX_BUFFERS 512 #define ICAN3_RX_BUFFERS 1024 /* SJA1000 Clock Input */ #define ICAN3_CAN_CLOCK 8000000 /* Driver Name */ #define DRV_NAME "janz-ican3" /* DPM Control Registers -- starts at offset 0x100 in the MODULbus registers */ struct ican3_dpm_control { /* window address register */ u8 window_address; u8 unused1; /* * Read access: clear interrupt from microcontroller * Write access: send interrupt to microcontroller */ u8 interrupt; u8 unused2; /* write-only: reset all hardware on the module */ u8 hwreset; u8 unused3; /* write-only: generate an interrupt to the TPU */ u8 tpuinterrupt; }; struct ican3_dev { /* must be the first member */ struct can_priv can; /* CAN network device */ struct net_device *ndev; struct napi_struct napi; /* Device for printing */ struct device *dev; /* module number */ unsigned int num; /* base address of registers and IRQ */ struct janz_cmodio_onboard_regs __iomem *ctrl; struct ican3_dpm_control __iomem *dpmctrl; void __iomem *dpm; int irq; /* CAN bus termination status */ struct completion termination_comp; bool termination_enabled; /* CAN bus error status registers */ struct completion buserror_comp; struct can_berr_counter bec; /* old and new style host interface */ unsigned int iftype; /* * Any function which changes the current DPM page must hold this * lock while it is performing data accesses. This ensures that the * function will not be preempted and end up reading data from a * different DPM page than it expects. */ spinlock_t lock; /* new host interface */ unsigned int rx_int; unsigned int rx_num; unsigned int tx_num; /* fast host interface */ unsigned int fastrx_start; unsigned int fastrx_int; unsigned int fastrx_num; unsigned int fasttx_start; unsigned int fasttx_num; /* first free DPM page */ unsigned int free_page; }; struct ican3_msg { u8 control; u8 spec; __le16 len; u8 data[252]; }; struct ican3_new_desc { u8 control; u8 pointer; }; struct ican3_fast_desc { u8 control; u8 command; u8 data[14]; }; /* write to the window basic address register */ static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page) { BUG_ON(page >= DPM_NUM_PAGES); iowrite8(page, &mod->dpmctrl->window_address); } /* * ICAN3 "old-style" host interface */ /* * Receive a message from the ICAN3 "old-style" firmware interface * * LOCKING: must hold mod->lock * * returns 0 on success, -ENOMEM when no message exists */ static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned int mbox, mbox_page; u8 locl, peer, xord; /* get the MSYNC registers */ ican3_set_page(mod, QUEUE_OLD_CONTROL); peer = ioread8(mod->dpm + MSYNC_PEER); locl = ioread8(mod->dpm + MSYNC_LOCL); xord = locl ^ peer; if ((xord & MSYNC_RB_MASK) == 0x00) { dev_dbg(mod->dev, "no mbox for reading\n"); return -ENOMEM; } /* find the first free mbox to read */ if ((xord & MSYNC_RB_MASK) == MSYNC_RB_MASK) mbox = (xord & MSYNC_RBLW) ? MSYNC_RB0 : MSYNC_RB1; else mbox = (xord & MSYNC_RB0) ? MSYNC_RB0 : MSYNC_RB1; /* copy the message */ mbox_page = (mbox == MSYNC_RB0) ? QUEUE_OLD_RB0 : QUEUE_OLD_RB1; ican3_set_page(mod, mbox_page); memcpy_fromio(msg, mod->dpm, sizeof(*msg)); /* * notify the firmware that the read buffer is available * for it to fill again */ locl ^= mbox; ican3_set_page(mod, QUEUE_OLD_CONTROL); iowrite8(locl, mod->dpm + MSYNC_LOCL); return 0; } /* * Send a message through the "old-style" firmware interface * * LOCKING: must hold mod->lock * * returns 0 on success, -ENOMEM when no free space exists */ static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned int mbox, mbox_page; u8 locl, peer, xord; /* get the MSYNC registers */ ican3_set_page(mod, QUEUE_OLD_CONTROL); peer = ioread8(mod->dpm + MSYNC_PEER); locl = ioread8(mod->dpm + MSYNC_LOCL); xord = locl ^ peer; if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) { dev_err(mod->dev, "no mbox for writing\n"); return -ENOMEM; } /* calculate a free mbox to use */ mbox = (xord & MSYNC_WB0) ? MSYNC_WB1 : MSYNC_WB0; /* copy the message to the DPM */ mbox_page = (mbox == MSYNC_WB0) ? QUEUE_OLD_WB0 : QUEUE_OLD_WB1; ican3_set_page(mod, mbox_page); memcpy_toio(mod->dpm, msg, sizeof(*msg)); locl ^= mbox; if (mbox == MSYNC_WB1) locl |= MSYNC_WBLW; ican3_set_page(mod, QUEUE_OLD_CONTROL); iowrite8(locl, mod->dpm + MSYNC_LOCL); return 0; } /* * ICAN3 "new-style" Host Interface Setup */ static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod) { struct ican3_new_desc desc; unsigned long flags; void __iomem *dst; int i; spin_lock_irqsave(&mod->lock, flags); /* setup the internal datastructures for RX */ mod->rx_num = 0; mod->rx_int = 0; /* tohost queue descriptors are in page 5 */ ican3_set_page(mod, QUEUE_TOHOST); dst = mod->dpm; /* initialize the tohost (rx) queue descriptors: pages 9-24 */ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) { desc.control = DESC_INTERRUPT | DESC_LEN(1); /* I L=1 */ desc.pointer = mod->free_page; /* set wrap flag on last buffer */ if (i == ICAN3_NEW_BUFFERS - 1) desc.control |= DESC_WRAP; memcpy_toio(dst, &desc, sizeof(desc)); dst += sizeof(desc); mod->free_page++; } /* fromhost (tx) mid queue descriptors are in page 6 */ ican3_set_page(mod, QUEUE_FROMHOST_MID); dst = mod->dpm; /* setup the internal datastructures for TX */ mod->tx_num = 0; /* initialize the fromhost mid queue descriptors: pages 25-40 */ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) { desc.control = DESC_VALID | DESC_LEN(1); /* V L=1 */ desc.pointer = mod->free_page; /* set wrap flag on last buffer */ if (i == ICAN3_NEW_BUFFERS - 1) desc.control |= DESC_WRAP; memcpy_toio(dst, &desc, sizeof(desc)); dst += sizeof(desc); mod->free_page++; } /* fromhost hi queue descriptors are in page 7 */ ican3_set_page(mod, QUEUE_FROMHOST_HIGH); dst = mod->dpm; /* initialize only a single buffer in the fromhost hi queue (unused) */ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */ desc.pointer = mod->free_page; memcpy_toio(dst, &desc, sizeof(desc)); mod->free_page++; /* fromhost low queue descriptors are in page 8 */ ican3_set_page(mod, QUEUE_FROMHOST_LOW); dst = mod->dpm; /* initialize only a single buffer in the fromhost low queue (unused) */ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */ desc.pointer = mod->free_page; memcpy_toio(dst, &desc, sizeof(desc)); mod->free_page++; spin_unlock_irqrestore(&mod->lock, flags); } /* * ICAN3 Fast Host Interface Setup */ static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod) { struct ican3_fast_desc desc; unsigned long flags; unsigned int addr; void __iomem *dst; int i; spin_lock_irqsave(&mod->lock, flags); /* save the start recv page */ mod->fastrx_start = mod->free_page; mod->fastrx_num = 0; mod->fastrx_int = 0; /* build a single fast tohost queue descriptor */ memset(&desc, 0, sizeof(desc)); desc.control = 0x00; desc.command = 1; /* build the tohost queue descriptor ring in memory */ addr = 0; for (i = 0; i < ICAN3_RX_BUFFERS; i++) { /* set the wrap bit on the last buffer */ if (i == ICAN3_RX_BUFFERS - 1) desc.control |= DESC_WRAP; /* switch to the correct page */ ican3_set_page(mod, mod->free_page); /* copy the descriptor to the DPM */ dst = mod->dpm + addr; memcpy_toio(dst, &desc, sizeof(desc)); addr += sizeof(desc); /* move to the next page if necessary */ if (addr >= DPM_PAGE_SIZE) { addr = 0; mod->free_page++; } } /* make sure we page-align the next queue */ if (addr != 0) mod->free_page++; /* save the start xmit page */ mod->fasttx_start = mod->free_page; mod->fasttx_num = 0; /* build a single fast fromhost queue descriptor */ memset(&desc, 0, sizeof(desc)); desc.control = DESC_VALID; desc.command = 1; /* build the fromhost queue descriptor ring in memory */ addr = 0; for (i = 0; i < ICAN3_TX_BUFFERS; i++) { /* set the wrap bit on the last buffer */ if (i == ICAN3_TX_BUFFERS - 1) desc.control |= DESC_WRAP; /* switch to the correct page */ ican3_set_page(mod, mod->free_page); /* copy the descriptor to the DPM */ dst = mod->dpm + addr; memcpy_toio(dst, &desc, sizeof(desc)); addr += sizeof(desc); /* move to the next page if necessary */ if (addr >= DPM_PAGE_SIZE) { addr = 0; mod->free_page++; } } spin_unlock_irqrestore(&mod->lock, flags); } /* * ICAN3 "new-style" Host Interface Message Helpers */ /* * LOCKING: must hold mod->lock */ static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) { struct ican3_new_desc desc; void __iomem *desc_addr = mod->dpm + (mod->tx_num * sizeof(desc)); /* switch to the fromhost mid queue, and read the buffer descriptor */ ican3_set_page(mod, QUEUE_FROMHOST_MID); memcpy_fromio(&desc, desc_addr, sizeof(desc)); if (!(desc.control & DESC_VALID)) { dev_dbg(mod->dev, "%s: no free buffers\n", __func__); return -ENOMEM; } /* switch to the data page, copy the data */ ican3_set_page(mod, desc.pointer); memcpy_toio(mod->dpm, msg, sizeof(*msg)); /* switch back to the descriptor, set the valid bit, write it back */ ican3_set_page(mod, QUEUE_FROMHOST_MID); desc.control ^= DESC_VALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* update the tx number */ mod->tx_num = (desc.control & DESC_WRAP) ? 0 : (mod->tx_num + 1); return 0; } /* * LOCKING: must hold mod->lock */ static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) { struct ican3_new_desc desc; void __iomem *desc_addr = mod->dpm + (mod->rx_num * sizeof(desc)); /* switch to the tohost queue, and read the buffer descriptor */ ican3_set_page(mod, QUEUE_TOHOST); memcpy_fromio(&desc, desc_addr, sizeof(desc)); if (!(desc.control & DESC_VALID)) { dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__); return -ENOMEM; } /* switch to the data page, copy the data */ ican3_set_page(mod, desc.pointer); memcpy_fromio(msg, mod->dpm, sizeof(*msg)); /* switch back to the descriptor, toggle the valid bit, write it back */ ican3_set_page(mod, QUEUE_TOHOST); desc.control ^= DESC_VALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* update the rx number */ mod->rx_num = (desc.control & DESC_WRAP) ? 0 : (mod->rx_num + 1); return 0; } /* * Message Send / Recv Helpers */ static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned long flags; int ret; spin_lock_irqsave(&mod->lock, flags); if (mod->iftype == 0) ret = ican3_old_send_msg(mod, msg); else ret = ican3_new_send_msg(mod, msg); spin_unlock_irqrestore(&mod->lock, flags); return ret; } static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned long flags; int ret; spin_lock_irqsave(&mod->lock, flags); if (mod->iftype == 0) ret = ican3_old_recv_msg(mod, msg); else ret = ican3_new_recv_msg(mod, msg); spin_unlock_irqrestore(&mod->lock, flags); return ret; } /* * Quick Pre-constructed Messages */ static int __devinit ican3_msg_connect(struct ican3_dev *mod) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_CONNECTI; msg.len = cpu_to_le16(0); return ican3_send_msg(mod, &msg); } static int __devexit ican3_msg_disconnect(struct ican3_dev *mod) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_DISCONNECT; msg.len = cpu_to_le16(0); return ican3_send_msg(mod, &msg); } static int __devinit ican3_msg_newhostif(struct ican3_dev *mod) { struct ican3_msg msg; int ret; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_NEWHOSTIF; msg.len = cpu_to_le16(0); /* If we're not using the old interface, switching seems bogus */ WARN_ON(mod->iftype != 0); ret = ican3_send_msg(mod, &msg); if (ret) return ret; /* mark the module as using the new host interface */ mod->iftype = 1; return 0; } static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod) { struct ican3_msg msg; unsigned int addr; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_INITFDPMQUEUE; msg.len = cpu_to_le16(8); /* write the tohost queue start address */ addr = DPM_PAGE_ADDR(mod->fastrx_start); msg.data[0] = addr & 0xff; msg.data[1] = (addr >> 8) & 0xff; msg.data[2] = (addr >> 16) & 0xff; msg.data[3] = (addr >> 24) & 0xff; /* write the fromhost queue start address */ addr = DPM_PAGE_ADDR(mod->fasttx_start); msg.data[4] = addr & 0xff; msg.data[5] = (addr >> 8) & 0xff; msg.data[6] = (addr >> 16) & 0xff; msg.data[7] = (addr >> 24) & 0xff; /* If we're not using the new interface yet, we cannot do this */ WARN_ON(mod->iftype != 1); return ican3_send_msg(mod, &msg); } /* * Setup the CAN filter to either accept or reject all * messages from the CAN bus. */ static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept) { struct ican3_msg msg; int ret; /* Standard Frame Format */ memset(&msg, 0, sizeof(msg)); msg.spec = MSG_SETAFILMASK; msg.len = cpu_to_le16(5); msg.data[0] = 0x00; /* IDLo LSB */ msg.data[1] = 0x00; /* IDLo MSB */ msg.data[2] = 0xff; /* IDHi LSB */ msg.data[3] = 0x07; /* IDHi MSB */ /* accept all frames for fast host if, or reject all frames */ msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; ret = ican3_send_msg(mod, &msg); if (ret) return ret; /* Extended Frame Format */ memset(&msg, 0, sizeof(msg)); msg.spec = MSG_SETAFILMASK; msg.len = cpu_to_le16(13); msg.data[0] = 0; /* MUX = 0 */ msg.data[1] = 0x00; /* IDLo LSB */ msg.data[2] = 0x00; msg.data[3] = 0x00; msg.data[4] = 0x20; /* IDLo MSB */ msg.data[5] = 0xff; /* IDHi LSB */ msg.data[6] = 0xff; msg.data[7] = 0xff; msg.data[8] = 0x3f; /* IDHi MSB */ /* accept all frames for fast host if, or reject all frames */ msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; return ican3_send_msg(mod, &msg); } /* * Bring the CAN bus online or offline */ static int ican3_set_bus_state(struct ican3_dev *mod, bool on) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = on ? MSG_CONREQ : MSG_COFFREQ; msg.len = cpu_to_le16(0); return ican3_send_msg(mod, &msg); } static int ican3_set_termination(struct ican3_dev *mod, bool on) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_HWCONF; msg.len = cpu_to_le16(2); msg.data[0] = 0x00; msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF; return ican3_send_msg(mod, &msg); } static int ican3_send_inquiry(struct ican3_dev *mod, u8 subspec) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_INQUIRY; msg.len = cpu_to_le16(2); msg.data[0] = subspec; msg.data[1] = 0x00; return ican3_send_msg(mod, &msg); } static int ican3_set_buserror(struct ican3_dev *mod, u8 quota) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_CCONFREQ; msg.len = cpu_to_le16(2); msg.data[0] = 0x00; msg.data[1] = quota; return ican3_send_msg(mod, &msg); } /* * ICAN3 to Linux CAN Frame Conversion */ static void ican3_to_can_frame(struct ican3_dev *mod, struct ican3_fast_desc *desc, struct can_frame *cf) { if ((desc->command & ICAN3_CAN_TYPE_MASK) == ICAN3_CAN_TYPE_SFF) { if (desc->data[1] & ICAN3_SFF_RTR) cf->can_id |= CAN_RTR_FLAG; cf->can_id |= desc->data[0] << 3; cf->can_id |= (desc->data[1] & 0xe0) >> 5; cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK; memcpy(cf->data, &desc->data[2], sizeof(cf->data)); } else { cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK; if (desc->data[0] & ICAN3_EFF_RTR) cf->can_id |= CAN_RTR_FLAG; if (desc->data[0] & ICAN3_EFF) { cf->can_id |= CAN_EFF_FLAG; cf->can_id |= desc->data[2] << 21; /* 28-21 */ cf->can_id |= desc->data[3] << 13; /* 20-13 */ cf->can_id |= desc->data[4] << 5; /* 12-5 */ cf->can_id |= (desc->data[5] & 0xf8) >> 3; } else { cf->can_id |= desc->data[2] << 3; /* 10-3 */ cf->can_id |= desc->data[3] >> 5; /* 2-0 */ } memcpy(cf->data, &desc->data[6], sizeof(cf->data)); } } static void can_frame_to_ican3(struct ican3_dev *mod, struct can_frame *cf, struct ican3_fast_desc *desc) { /* clear out any stale data in the descriptor */ memset(desc->data, 0, sizeof(desc->data)); /* we always use the extended format, with the ECHO flag set */ desc->command = ICAN3_CAN_TYPE_EFF; desc->data[0] |= cf->can_dlc; desc->data[1] |= ICAN3_ECHO; if (cf->can_id & CAN_RTR_FLAG) desc->data[0] |= ICAN3_EFF_RTR; /* pack the id into the correct places */ if (cf->can_id & CAN_EFF_FLAG) { desc->data[0] |= ICAN3_EFF; desc->data[2] = (cf->can_id & 0x1fe00000) >> 21; /* 28-21 */ desc->data[3] = (cf->can_id & 0x001fe000) >> 13; /* 20-13 */ desc->data[4] = (cf->can_id & 0x00001fe0) >> 5; /* 12-5 */ desc->data[5] = (cf->can_id & 0x0000001f) << 3; /* 4-0 */ } else { desc->data[2] = (cf->can_id & 0x7F8) >> 3; /* bits 10-3 */ desc->data[3] = (cf->can_id & 0x007) << 5; /* bits 2-0 */ } /* copy the data bits into the descriptor */ memcpy(&desc->data[6], cf->data, sizeof(cf->data)); } /* * Interrupt Handling */ /* * Handle an ID + Version message response from the firmware. We never generate * this message in production code, but it is very useful when debugging to be * able to display this message. */ static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg) { dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data); } static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg) { struct net_device *dev = mod->ndev; struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; /* * Report that communication messages with the microcontroller firmware * are being lost. These are never CAN frames, so we do not generate an * error frame for userspace */ if (msg->spec == MSG_MSGLOST) { dev_err(mod->dev, "lost %d control messages\n", msg->data[0]); return; } /* * Oops, this indicates that we have lost messages in the fast queue, * which are exclusively CAN messages. Our driver isn't reading CAN * frames fast enough. * * We'll pretend that the SJA1000 told us that it ran out of buffer * space, because there is not a better message for this. */ skb = alloc_can_err_skb(dev, &cf); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_errors++; stats->rx_bytes += cf->can_dlc; netif_rx(skb); } } /* * Handle CAN Event Indication Messages from the firmware * * The ICAN3 firmware provides the values of some SJA1000 registers when it * generates this message. The code below is largely copied from the * drivers/net/can/sja1000/sja1000.c file, and adapted as necessary */ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) { struct net_device *dev = mod->ndev; struct net_device_stats *stats = &dev->stats; enum can_state state = mod->can.state; u8 status, isrc, rxerr, txerr; struct can_frame *cf; struct sk_buff *skb; /* we can only handle the SJA1000 part */ if (msg->data[1] != CEVTIND_CHIP_SJA1000) { dev_err(mod->dev, "unable to handle errors on non-SJA1000\n"); return -ENODEV; } /* check the message length for sanity */ if (le16_to_cpu(msg->len) < 6) { dev_err(mod->dev, "error message too short\n"); return -EINVAL; } skb = alloc_can_err_skb(dev, &cf); if (skb == NULL) return -ENOMEM; isrc = msg->data[0]; status = msg->data[3]; rxerr = msg->data[4]; txerr = msg->data[5]; /* data overrun interrupt */ if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) { dev_dbg(mod->dev, "data overrun interrupt\n"); cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; } /* error warning + passive interrupt */ if (isrc == CEVTIND_EI) { dev_dbg(mod->dev, "error warning + passive interrupt\n"); if (status & SR_BS) { state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; can_bus_off(dev); } else if (status & SR_ES) { if (rxerr >= 128 || txerr >= 128) state = CAN_STATE_ERROR_PASSIVE; else state = CAN_STATE_ERROR_WARNING; } else { state = CAN_STATE_ERROR_ACTIVE; } } /* bus error interrupt */ if (isrc == CEVTIND_BEI) { u8 ecc = msg->data[2]; dev_dbg(mod->dev, "bus error interrupt\n"); mod->can.can_stats.bus_error++; stats->rx_errors++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (ecc & ECC_MASK) { case ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; break; case ECC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case ECC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & ECC_SEG; break; } if ((ecc & ECC_DIR) == 0) cf->data[2] |= CAN_ERR_PROT_TX; cf->data[6] = txerr; cf->data[7] = rxerr; } if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING || state == CAN_STATE_ERROR_PASSIVE)) { cf->can_id |= CAN_ERR_CRTL; if (state == CAN_STATE_ERROR_WARNING) { mod->can.can_stats.error_warning++; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } else { mod->can.can_stats.error_passive++; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } cf->data[6] = txerr; cf->data[7] = rxerr; } mod->can.state = state; stats->rx_errors++; stats->rx_bytes += cf->can_dlc; netif_rx(skb); return 0; } static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg) { switch (msg->data[0]) { case INQUIRY_STATUS: case INQUIRY_EXTENDED: mod->bec.rxerr = msg->data[5]; mod->bec.txerr = msg->data[6]; complete(&mod->buserror_comp); break; case INQUIRY_TERMINATION: mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON; complete(&mod->termination_comp); break; default: dev_err(mod->dev, "received an unknown inquiry response\n"); break; } } static void ican3_handle_unknown_message(struct ican3_dev *mod, struct ican3_msg *msg) { dev_warn(mod->dev, "received unknown message: spec 0x%.2x length %d\n", msg->spec, le16_to_cpu(msg->len)); } /* * Handle a control message from the firmware */ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg) { dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__, mod->num, msg->spec, le16_to_cpu(msg->len)); switch (msg->spec) { case MSG_IDVERS: ican3_handle_idvers(mod, msg); break; case MSG_MSGLOST: case MSG_FMSGLOST: ican3_handle_msglost(mod, msg); break; case MSG_CEVTIND: ican3_handle_cevtind(mod, msg); break; case MSG_INQUIRY: ican3_handle_inquiry(mod, msg); break; default: ican3_handle_unknown_message(mod, msg); break; } } /* * Check that there is room in the TX ring to transmit another skb * * LOCKING: must hold mod->lock */ static bool ican3_txok(struct ican3_dev *mod) { struct ican3_fast_desc __iomem *desc; u8 control; /* copy the control bits of the descriptor */ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16)); desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc)); control = ioread8(&desc->control); /* if the control bits are not valid, then we have no more space */ if (!(control & DESC_VALID)) return false; return true; } /* * Receive one CAN frame from the hardware * * CONTEXT: must be called from user context */ static int ican3_recv_skb(struct ican3_dev *mod) { struct net_device *ndev = mod->ndev; struct net_device_stats *stats = &ndev->stats; struct ican3_fast_desc desc; void __iomem *desc_addr; struct can_frame *cf; struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&mod->lock, flags); /* copy the whole descriptor */ ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16)); desc_addr = mod->dpm + ((mod->fastrx_num % 16) * sizeof(desc)); memcpy_fromio(&desc, desc_addr, sizeof(desc)); spin_unlock_irqrestore(&mod->lock, flags); /* check that we actually have a CAN frame */ if (!(desc.control & DESC_VALID)) return -ENOBUFS; /* allocate an skb */ skb = alloc_can_skb(ndev, &cf); if (unlikely(skb == NULL)) { stats->rx_dropped++; goto err_noalloc; } /* convert the ICAN3 frame into Linux CAN format */ ican3_to_can_frame(mod, &desc, cf); /* receive the skb, update statistics */ netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; err_noalloc: /* toggle the valid bit and return the descriptor to the ring */ desc.control ^= DESC_VALID; spin_lock_irqsave(&mod->lock, flags); ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16)); memcpy_toio(desc_addr, &desc, 1); /* update the next buffer pointer */ mod->fastrx_num = (desc.control & DESC_WRAP) ? 0 : (mod->fastrx_num + 1); /* there are still more buffers to process */ spin_unlock_irqrestore(&mod->lock, flags); return 0; } static int ican3_napi(struct napi_struct *napi, int budget) { struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi); struct ican3_msg msg; unsigned long flags; int received = 0; int ret; /* process all communication messages */ while (true) { ret = ican3_recv_msg(mod, &msg); if (ret) break; ican3_handle_message(mod, &msg); } /* process all CAN frames from the fast interface */ while (received < budget) { ret = ican3_recv_skb(mod); if (ret) break; received++; } /* We have processed all packets that the adapter had, but it * was less than our budget, stop polling */ if (received < budget) napi_complete(napi); spin_lock_irqsave(&mod->lock, flags); /* Wake up the transmit queue if necessary */ if (netif_queue_stopped(mod->ndev) && ican3_txok(mod)) netif_wake_queue(mod->ndev); spin_unlock_irqrestore(&mod->lock, flags); /* re-enable interrupt generation */ iowrite8(1 << mod->num, &mod->ctrl->int_enable); return received; } static irqreturn_t ican3_irq(int irq, void *dev_id) { struct ican3_dev *mod = dev_id; u8 stat; /* * The interrupt status register on this device reports interrupts * as zeroes instead of using ones like most other devices */ stat = ioread8(&mod->ctrl->int_disable) & (1 << mod->num); if (stat == (1 << mod->num)) return IRQ_NONE; /* clear the MODULbus interrupt from the microcontroller */ ioread8(&mod->dpmctrl->interrupt); /* disable interrupt generation, schedule the NAPI poller */ iowrite8(1 << mod->num, &mod->ctrl->int_disable); napi_schedule(&mod->napi); return IRQ_HANDLED; } /* * Firmware reset, startup, and shutdown */ /* * Reset an ICAN module to its power-on state * * CONTEXT: no network device registered */ static int ican3_reset_module(struct ican3_dev *mod) { u8 val = 1 << mod->num; unsigned long start; u8 runold, runnew; /* disable interrupts so no more work is scheduled */ iowrite8(1 << mod->num, &mod->ctrl->int_disable); /* the first unallocated page in the DPM is #9 */ mod->free_page = DPM_FREE_START; ican3_set_page(mod, QUEUE_OLD_CONTROL); runold = ioread8(mod->dpm + TARGET_RUNNING); /* reset the module */ iowrite8(val, &mod->ctrl->reset_assert); iowrite8(val, &mod->ctrl->reset_deassert); /* wait until the module has finished resetting and is running */ start = jiffies; do { ican3_set_page(mod, QUEUE_OLD_CONTROL); runnew = ioread8(mod->dpm + TARGET_RUNNING); if (runnew == (runold ^ 0xff)) return 0; msleep(10); } while (time_before(jiffies, start + HZ / 4)); dev_err(mod->dev, "failed to reset CAN module\n"); return -ETIMEDOUT; } static void __devexit ican3_shutdown_module(struct ican3_dev *mod) { ican3_msg_disconnect(mod); ican3_reset_module(mod); } /* * Startup an ICAN module, bringing it into fast mode */ static int __devinit ican3_startup_module(struct ican3_dev *mod) { int ret; ret = ican3_reset_module(mod); if (ret) { dev_err(mod->dev, "unable to reset module\n"); return ret; } /* re-enable interrupts so we can send messages */ iowrite8(1 << mod->num, &mod->ctrl->int_enable); ret = ican3_msg_connect(mod); if (ret) { dev_err(mod->dev, "unable to connect to module\n"); return ret; } ican3_init_new_host_interface(mod); ret = ican3_msg_newhostif(mod); if (ret) { dev_err(mod->dev, "unable to switch to new-style interface\n"); return ret; } /* default to "termination on" */ ret = ican3_set_termination(mod, true); if (ret) { dev_err(mod->dev, "unable to enable termination\n"); return ret; } /* default to "bus errors enabled" */ ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX); if (ret) { dev_err(mod->dev, "unable to set bus-error\n"); return ret; } ican3_init_fast_host_interface(mod); ret = ican3_msg_fasthostif(mod); if (ret) { dev_err(mod->dev, "unable to switch to fast host interface\n"); return ret; } ret = ican3_set_id_filter(mod, true); if (ret) { dev_err(mod->dev, "unable to set acceptance filter\n"); return ret; } return 0; } /* * CAN Network Device */ static int ican3_open(struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); u8 quota; int ret; /* open the CAN layer */ ret = open_candev(ndev); if (ret) { dev_err(mod->dev, "unable to start CAN layer\n"); return ret; } /* set the bus error generation state appropriately */ if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) quota = ICAN3_BUSERR_QUOTA_MAX; else quota = 0; ret = ican3_set_buserror(mod, quota); if (ret) { dev_err(mod->dev, "unable to set bus-error\n"); close_candev(ndev); return ret; } /* bring the bus online */ ret = ican3_set_bus_state(mod, true); if (ret) { dev_err(mod->dev, "unable to set bus-on\n"); close_candev(ndev); return ret; } /* start up the network device */ mod->can.state = CAN_STATE_ERROR_ACTIVE; netif_start_queue(ndev); return 0; } static int ican3_stop(struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); int ret; /* stop the network device xmit routine */ netif_stop_queue(ndev); mod->can.state = CAN_STATE_STOPPED; /* bring the bus offline, stop receiving packets */ ret = ican3_set_bus_state(mod, false); if (ret) { dev_err(mod->dev, "unable to set bus-off\n"); return ret; } /* close the CAN layer */ close_candev(ndev); return 0; } static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf = (struct can_frame *)skb->data; struct ican3_fast_desc desc; void __iomem *desc_addr; unsigned long flags; spin_lock_irqsave(&mod->lock, flags); /* check that we can actually transmit */ if (!ican3_txok(mod)) { dev_err(mod->dev, "no free descriptors, stopping queue\n"); netif_stop_queue(ndev); spin_unlock_irqrestore(&mod->lock, flags); return NETDEV_TX_BUSY; } /* copy the control bits of the descriptor */ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16)); desc_addr = mod->dpm + ((mod->fasttx_num % 16) * sizeof(desc)); memset(&desc, 0, sizeof(desc)); memcpy_fromio(&desc, desc_addr, 1); /* convert the Linux CAN frame into ICAN3 format */ can_frame_to_ican3(mod, cf, &desc); /* * the programming manual says that you must set the IVALID bit, then * interrupt, then set the valid bit. Quite weird, but it seems to be * required for this to work */ desc.control |= DESC_IVALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* generate a MODULbus interrupt to the microcontroller */ iowrite8(0x01, &mod->dpmctrl->interrupt); desc.control ^= DESC_VALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* update the next buffer pointer */ mod->fasttx_num = (desc.control & DESC_WRAP) ? 0 : (mod->fasttx_num + 1); /* update statistics */ stats->tx_packets++; stats->tx_bytes += cf->can_dlc; kfree_skb(skb); /* * This hardware doesn't have TX-done notifications, so we'll try and * emulate it the best we can using ECHO skbs. Get the next TX * descriptor, and see if we have room to send. If not, stop the queue. * It will be woken when the ECHO skb for the current packet is recv'd. */ /* copy the control bits of the descriptor */ if (!ican3_txok(mod)) netif_stop_queue(ndev); spin_unlock_irqrestore(&mod->lock, flags); return NETDEV_TX_OK; } static const struct net_device_ops ican3_netdev_ops = { .ndo_open = ican3_open, .ndo_stop = ican3_stop, .ndo_start_xmit = ican3_xmit, }; /* * Low-level CAN Device */ /* This structure was stolen from drivers/net/can/sja1000/sja1000.c */ static struct can_bittiming_const ican3_bittiming_const = { .name = DRV_NAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; /* * This routine was stolen from drivers/net/can/sja1000/sja1000.c * * The bittiming register command for the ICAN3 just sets the bit timing * registers on the SJA1000 chip directly */ static int ican3_set_bittiming(struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); struct can_bittiming *bt = &mod->can.bittiming; struct ican3_msg msg; u8 btr0, btr1; btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | (((bt->phase_seg2 - 1) & 0x7) << 4); if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= 0x80; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_CBTRREQ; msg.len = cpu_to_le16(4); msg.data[0] = 0x00; msg.data[1] = 0x00; msg.data[2] = btr0; msg.data[3] = btr1; return ican3_send_msg(mod, &msg); } static int ican3_set_mode(struct net_device *ndev, enum can_mode mode) { struct ican3_dev *mod = netdev_priv(ndev); int ret; if (mode != CAN_MODE_START) return -ENOTSUPP; /* bring the bus online */ ret = ican3_set_bus_state(mod, true); if (ret) { dev_err(mod->dev, "unable to set bus-on\n"); return ret; } /* start up the network device */ mod->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); return 0; } static int ican3_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { struct ican3_dev *mod = netdev_priv(ndev); int ret; ret = ican3_send_inquiry(mod, INQUIRY_STATUS); if (ret) return ret; ret = wait_for_completion_timeout(&mod->buserror_comp, HZ); if (ret <= 0) { dev_info(mod->dev, "%s timed out\n", __func__); return -ETIMEDOUT; } bec->rxerr = mod->bec.rxerr; bec->txerr = mod->bec.txerr; return 0; } /* * Sysfs Attributes */ static ssize_t ican3_sysfs_show_term(struct device *dev, struct device_attribute *attr, char *buf) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); int ret; ret = ican3_send_inquiry(mod, INQUIRY_TERMINATION); if (ret) return ret; ret = wait_for_completion_timeout(&mod->termination_comp, HZ); if (ret <= 0) { dev_info(mod->dev, "%s timed out\n", __func__); return -ETIMEDOUT; } return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled); } static ssize_t ican3_sysfs_set_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); unsigned long enable; int ret; if (strict_strtoul(buf, 0, &enable)) return -EINVAL; ret = ican3_set_termination(mod, enable); if (ret) return ret; return count; } static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term, ican3_sysfs_set_term); static struct attribute *ican3_sysfs_attrs[] = { &dev_attr_termination.attr, NULL, }; static struct attribute_group ican3_sysfs_attr_group = { .attrs = ican3_sysfs_attrs, }; /* * PCI Subsystem */ static int __devinit ican3_probe(struct platform_device *pdev) { struct janz_platform_data *pdata; struct net_device *ndev; struct ican3_dev *mod; struct resource *res; struct device *dev; int ret; pdata = pdev->dev.platform_data; if (!pdata) return -ENXIO; dev_dbg(&pdev->dev, "probe: module number %d\n", pdata->modno); /* save the struct device for printing */ dev = &pdev->dev; /* allocate the CAN device and private data */ ndev = alloc_candev(sizeof(*mod), 0); if (!ndev) { dev_err(dev, "unable to allocate CANdev\n"); ret = -ENOMEM; goto out_return; } platform_set_drvdata(pdev, ndev); mod = netdev_priv(ndev); mod->ndev = ndev; mod->dev = &pdev->dev; mod->num = pdata->modno; netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS); spin_lock_init(&mod->lock); init_completion(&mod->termination_comp); init_completion(&mod->buserror_comp); /* setup device-specific sysfs attributes */ ndev->sysfs_groups[0] = &ican3_sysfs_attr_group; /* the first unallocated page in the DPM is 9 */ mod->free_page = DPM_FREE_START; ndev->netdev_ops = &ican3_netdev_ops; ndev->flags |= IFF_ECHO; SET_NETDEV_DEV(ndev, &pdev->dev); mod->can.clock.freq = ICAN3_CAN_CLOCK; mod->can.bittiming_const = &ican3_bittiming_const; mod->can.do_set_bittiming = ican3_set_bittiming; mod->can.do_set_mode = ican3_set_mode; mod->can.do_get_berr_counter = ican3_get_berr_counter; mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_BERR_REPORTING; /* find our IRQ number */ mod->irq = platform_get_irq(pdev, 0); if (mod->irq < 0) { dev_err(dev, "IRQ line not found\n"); ret = -ENODEV; goto out_free_ndev; } ndev->irq = mod->irq; /* get access to the MODULbus registers for this module */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "MODULbus registers not found\n"); ret = -ENODEV; goto out_free_ndev; } mod->dpm = ioremap(res->start, resource_size(res)); if (!mod->dpm) { dev_err(dev, "MODULbus registers not ioremap\n"); ret = -ENOMEM; goto out_free_ndev; } mod->dpmctrl = mod->dpm + DPM_PAGE_SIZE; /* get access to the control registers for this module */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { dev_err(dev, "CONTROL registers not found\n"); ret = -ENODEV; goto out_iounmap_dpm; } mod->ctrl = ioremap(res->start, resource_size(res)); if (!mod->ctrl) { dev_err(dev, "CONTROL registers not ioremap\n"); ret = -ENOMEM; goto out_iounmap_dpm; } /* disable our IRQ, then hookup the IRQ handler */ iowrite8(1 << mod->num, &mod->ctrl->int_disable); ret = request_irq(mod->irq, ican3_irq, IRQF_SHARED, DRV_NAME, mod); if (ret) { dev_err(dev, "unable to request IRQ\n"); goto out_iounmap_ctrl; } /* reset and initialize the CAN controller into fast mode */ napi_enable(&mod->napi); ret = ican3_startup_module(mod); if (ret) { dev_err(dev, "%s: unable to start CANdev\n", __func__); goto out_free_irq; } /* register with the Linux CAN layer */ ret = register_candev(ndev); if (ret) { dev_err(dev, "%s: unable to register CANdev\n", __func__); goto out_free_irq; } dev_info(dev, "module %d: registered CAN device\n", pdata->modno); return 0; out_free_irq: napi_disable(&mod->napi); iowrite8(1 << mod->num, &mod->ctrl->int_disable); free_irq(mod->irq, mod); out_iounmap_ctrl: iounmap(mod->ctrl); out_iounmap_dpm: iounmap(mod->dpm); out_free_ndev: free_candev(ndev); out_return: return ret; } static int __devexit ican3_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ican3_dev *mod = netdev_priv(ndev); /* unregister the netdevice, stop interrupts */ unregister_netdev(ndev); napi_disable(&mod->napi); iowrite8(1 << mod->num, &mod->ctrl->int_disable); free_irq(mod->irq, mod); /* put the module into reset */ ican3_shutdown_module(mod); /* unmap all registers */ iounmap(mod->ctrl); iounmap(mod->dpm); free_candev(ndev); return 0; } static struct platform_driver ican3_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = ican3_probe, .remove = __devexit_p(ican3_remove), }; static int __init ican3_init(void) { return platform_driver_register(&ican3_driver); } static void __exit ican3_exit(void) { platform_driver_unregister(&ican3_driver); } MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:janz-ican3"); module_init(ican3_init); module_exit(ican3_exit);
gpl-2.0
chrisw957/gumstix-linux
arch/arm/mm/fault-armv.c
852
6860
/* * linux/arch/arm/mm/fault-armv.c * * Copyright (C) 1995 Linus Torvalds * Modifications for ARM processor (c) 1995-2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bitops.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/gfp.h> #include <asm/bugs.h> #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "mm.h" static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; #if __LINUX_ARM_ARCH__ < 6 /* * We take the easy way out of this problem - we make the * PTE uncacheable. However, we leave the write buffer on. * * Note that the pte lock held when calling update_mmu_cache must also * guard the pte (somewhere else in the same mm) that we modify here. * Therefore those configurations which might call adjust_pte (those * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. */ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn, pte_t *ptep) { pte_t entry = *ptep; int ret; /* * If this page is present, it's actually being shared. */ ret = pte_present(entry); /* * If this page isn't present, or is already setup to * fault (ie, is old), we can safely ignore any issues. */ if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { flush_cache_page(vma, address, pfn); outer_flush_range((pfn << PAGE_SHIFT), (pfn << PAGE_SHIFT) + PAGE_SIZE); pte_val(entry) &= ~L_PTE_MT_MASK; pte_val(entry) |= shared_pte_mask; set_pte_at(vma->vm_mm, address, ptep, entry); flush_tlb_page(vma, address); } return ret; } #if USE_SPLIT_PTE_PTLOCKS /* * If we are using split PTE locks, then we need to take the page * lock here. Otherwise we are using shared mm->page_table_lock * which is already locked, thus cannot take it. */ static inline void do_pte_lock(spinlock_t *ptl) { /* * Use nested version here to indicate that we are already * holding one similar spinlock. */ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); } static inline void do_pte_unlock(spinlock_t *ptl) { spin_unlock(ptl); } #else /* !USE_SPLIT_PTE_PTLOCKS */ static inline void do_pte_lock(spinlock_t *ptl) {} static inline void do_pte_unlock(spinlock_t *ptl) {} #endif /* USE_SPLIT_PTE_PTLOCKS */ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) { spinlock_t *ptl; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret; pgd = pgd_offset(vma->vm_mm, address); if (pgd_none_or_clear_bad(pgd)) return 0; pud = pud_offset(pgd, address); if (pud_none_or_clear_bad(pud)) return 0; pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) return 0; /* * This is called while another page table is mapped, so we * must use the nested version. This also means we need to * open-code the spin-locking. */ ptl = pte_lockptr(vma->vm_mm, pmd); pte = pte_offset_map(pmd, address); do_pte_lock(ptl); ret = do_adjust_pte(vma, address, pfn, pte); do_pte_unlock(ptl); pte_unmap(pte); return ret; } static void make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, unsigned long pfn) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *mpnt; unsigned long offset; pgoff_t pgoff; int aliases = 0; pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); /* * If we have any shared mappings that are in the same mm * space, then we need to handle them specially to maintain * cache coherency. */ flush_dcache_mmap_lock(mapping); vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { /* * If this VMA is not in our MM, we can ignore it. * Note that we intentionally mask out the VMA * that we are fixing up. */ if (mpnt->vm_mm != mm || mpnt == vma) continue; if (!(mpnt->vm_flags & VM_MAYSHARE)) continue; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); } flush_dcache_mmap_unlock(mapping); if (aliases) do_adjust_pte(vma, addr, pfn, ptep); } /* * Take care of architecture specific things when placing a new PTE into * a page table, or changing an existing PTE. Basically, there are two * things that we need to take care of: * * 1. If PG_dcache_clean is not set for the page, we need to ensure * that any cache entries for the kernels virtual memory * range are written back to the page. * 2. If we have multiple shared mappings of the same space in * an object, we need to deal with the cache aliasing issues. * * Note that the pte lock will be held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long pfn = pte_pfn(*ptep); struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) return; /* * The zero page is never written to, so never has any dirty * cache lines, and therefore never needs to be flushed. */ page = pfn_to_page(pfn); if (page == ZERO_PAGE(0)) return; mapping = page_mapping(page); if (!test_and_set_bit(PG_dcache_clean, &page->flags)) __flush_dcache_page(mapping, page); if (mapping) { if (cache_is_vivt()) make_coherent(mapping, vma, addr, ptep, pfn); else if (vma->vm_flags & VM_EXEC) __flush_icache_all(); } } #endif /* __LINUX_ARM_ARCH__ < 6 */ /* * Check whether the write buffer has physical address aliasing * issues. If it has, we need to avoid them for the case where * we have several shared mappings of the same object in user * space. */ static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) { register unsigned long zero = 0, one = 1, val; local_irq_disable(); mb(); *p1 = one; mb(); *p2 = zero; mb(); val = *p1; mb(); local_irq_enable(); return val != zero; } void __init check_writebuffer_bugs(void) { struct page *page; const char *reason; unsigned long v = 1; printk(KERN_INFO "CPU: Testing write buffer coherency: "); page = alloc_page(GFP_KERNEL); if (page) { unsigned long *p1, *p2; pgprot_t prot = __pgprot_modify(PAGE_KERNEL, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE); p1 = vmap(&page, 1, VM_IOREMAP, prot); p2 = vmap(&page, 1, VM_IOREMAP, prot); if (p1 && p2) { v = check_writebuffer(p1, p2); reason = "enabling work-around"; } else { reason = "unable to map memory\n"; } vunmap(p1); vunmap(p2); put_page(page); } else { reason = "unable to grab page\n"; } if (v) { printk("failed, %s\n", reason); shared_pte_mask = L_PTE_MT_UNCACHED; } else { printk("ok\n"); } }
gpl-2.0
underdarkonsole/huawei_vision_kernel_JB
net/rds/af_rds.c
852
14354
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/in.h> #include <linux/poll.h> #include <net/sock.h> #include "rds.h" #include "rdma.h" /* this is just used for stats gathering :/ */ static DEFINE_SPINLOCK(rds_sock_lock); static unsigned long rds_sock_count; static LIST_HEAD(rds_sock_list); DECLARE_WAIT_QUEUE_HEAD(rds_poll_waitq); /* * This is called as the final descriptor referencing this socket is closed. * We have to unbind the socket so that another socket can be bound to the * address it was using. * * We have to be careful about racing with the incoming path. sock_orphan() * sets SOCK_DEAD and we use that as an indicator to the rx path that new * messages shouldn't be queued. */ static int rds_release(struct socket *sock) { struct sock *sk = sock->sk; struct rds_sock *rs; unsigned long flags; if (sk == NULL) goto out; rs = rds_sk_to_rs(sk); sock_orphan(sk); /* Note - rds_clear_recv_queue grabs rs_recv_lock, so * that ensures the recv path has completed messing * with the socket. */ rds_clear_recv_queue(rs); rds_cong_remove_socket(rs); rds_remove_bound(rs); rds_send_drop_to(rs, NULL); rds_rdma_drop_keys(rs); rds_notify_queue_get(rs, NULL); spin_lock_irqsave(&rds_sock_lock, flags); list_del_init(&rs->rs_item); rds_sock_count--; spin_unlock_irqrestore(&rds_sock_lock, flags); sock->sk = NULL; sock_put(sk); out: return 0; } /* * Careful not to race with rds_release -> sock_orphan which clears sk_sleep. * _bh() isn't OK here, we're called from interrupt handlers. It's probably OK * to wake the waitqueue after sk_sleep is clear as we hold a sock ref, but * this seems more conservative. * NB - normally, one would use sk_callback_lock for this, but we can * get here from interrupts, whereas the network code grabs sk_callback_lock * with _lock_bh only - so relying on sk_callback_lock introduces livelocks. */ void rds_wake_sk_sleep(struct rds_sock *rs) { unsigned long flags; read_lock_irqsave(&rs->rs_recv_lock, flags); __rds_wake_sk_sleep(rds_rs_to_sk(rs)); read_unlock_irqrestore(&rs->rs_recv_lock, flags); } static int rds_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; struct rds_sock *rs = rds_sk_to_rs(sock->sk); memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); /* racey, don't care */ if (peer) { if (!rs->rs_conn_addr) return -ENOTCONN; sin->sin_port = rs->rs_conn_port; sin->sin_addr.s_addr = rs->rs_conn_addr; } else { sin->sin_port = rs->rs_bound_port; sin->sin_addr.s_addr = rs->rs_bound_addr; } sin->sin_family = AF_INET; *uaddr_len = sizeof(*sin); return 0; } /* * RDS' poll is without a doubt the least intuitive part of the interface, * as POLLIN and POLLOUT do not behave entirely as you would expect from * a network protocol. * * POLLIN is asserted if * - there is data on the receive queue. * - to signal that a previously congested destination may have become * uncongested * - A notification has been queued to the socket (this can be a congestion * update, or a RDMA completion). * * POLLOUT is asserted if there is room on the send queue. This does not mean * however, that the next sendmsg() call will succeed. If the application tries * to send to a congested destination, the system call may still fail (and * return ENOBUFS). */ static unsigned int rds_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct rds_sock *rs = rds_sk_to_rs(sk); unsigned int mask = 0; unsigned long flags; poll_wait(file, sk_sleep(sk), wait); if (rs->rs_seen_congestion) poll_wait(file, &rds_poll_waitq, wait); read_lock_irqsave(&rs->rs_recv_lock, flags); if (!rs->rs_cong_monitor) { /* When a congestion map was updated, we signal POLLIN for * "historical" reasons. Applications can also poll for * WRBAND instead. */ if (rds_cong_updated_since(&rs->rs_cong_track)) mask |= (POLLIN | POLLRDNORM | POLLWRBAND); } else { spin_lock(&rs->rs_lock); if (rs->rs_cong_notify) mask |= (POLLIN | POLLRDNORM); spin_unlock(&rs->rs_lock); } if (!list_empty(&rs->rs_recv_queue) || !list_empty(&rs->rs_notify_queue)) mask |= (POLLIN | POLLRDNORM); if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) mask |= (POLLOUT | POLLWRNORM); read_unlock_irqrestore(&rs->rs_recv_lock, flags); /* clear state any time we wake a seen-congested socket */ if (mask) rs->rs_seen_congestion = 0; return mask; } static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return -ENOIOCTLCMD; } static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval, int len) { struct sockaddr_in sin; int ret = 0; /* racing with another thread binding seems ok here */ if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (len < sizeof(struct sockaddr_in)) { ret = -EINVAL; goto out; } if (copy_from_user(&sin, optval, sizeof(sin))) { ret = -EFAULT; goto out; } rds_send_drop_to(rs, &sin); out: return ret; } static int rds_set_bool_option(unsigned char *optvar, char __user *optval, int optlen) { int value; if (optlen < sizeof(int)) return -EINVAL; if (get_user(value, (int __user *) optval)) return -EFAULT; *optvar = !!value; return 0; } static int rds_cong_monitor(struct rds_sock *rs, char __user *optval, int optlen) { int ret; ret = rds_set_bool_option(&rs->rs_cong_monitor, optval, optlen); if (ret == 0) { if (rs->rs_cong_monitor) { rds_cong_add_socket(rs); } else { rds_cong_remove_socket(rs); rs->rs_cong_mask = 0; rs->rs_cong_notify = 0; } } return ret; } static int rds_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct rds_sock *rs = rds_sk_to_rs(sock->sk); int ret; if (level != SOL_RDS) { ret = -ENOPROTOOPT; goto out; } switch (optname) { case RDS_CANCEL_SENT_TO: ret = rds_cancel_sent_to(rs, optval, optlen); break; case RDS_GET_MR: ret = rds_get_mr(rs, optval, optlen); break; case RDS_GET_MR_FOR_DEST: ret = rds_get_mr_for_dest(rs, optval, optlen); break; case RDS_FREE_MR: ret = rds_free_mr(rs, optval, optlen); break; case RDS_RECVERR: ret = rds_set_bool_option(&rs->rs_recverr, optval, optlen); break; case RDS_CONG_MONITOR: ret = rds_cong_monitor(rs, optval, optlen); break; default: ret = -ENOPROTOOPT; } out: return ret; } static int rds_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct rds_sock *rs = rds_sk_to_rs(sock->sk); int ret = -ENOPROTOOPT, len; if (level != SOL_RDS) goto out; if (get_user(len, optlen)) { ret = -EFAULT; goto out; } switch (optname) { case RDS_INFO_FIRST ... RDS_INFO_LAST: ret = rds_info_getsockopt(sock, optname, optval, optlen); break; case RDS_RECVERR: if (len < sizeof(int)) ret = -EINVAL; else if (put_user(rs->rs_recverr, (int __user *) optval) || put_user(sizeof(int), optlen)) ret = -EFAULT; else ret = 0; break; default: break; } out: return ret; } static int rds_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; struct rds_sock *rs = rds_sk_to_rs(sk); int ret = 0; lock_sock(sk); if (addr_len != sizeof(struct sockaddr_in)) { ret = -EINVAL; goto out; } if (sin->sin_family != AF_INET) { ret = -EAFNOSUPPORT; goto out; } if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) { ret = -EDESTADDRREQ; goto out; } rs->rs_conn_addr = sin->sin_addr.s_addr; rs->rs_conn_port = sin->sin_port; out: release_sock(sk); return ret; } static struct proto rds_proto = { .name = "RDS", .owner = THIS_MODULE, .obj_size = sizeof(struct rds_sock), }; static const struct proto_ops rds_proto_ops = { .family = AF_RDS, .owner = THIS_MODULE, .release = rds_release, .bind = rds_bind, .connect = rds_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = rds_getname, .poll = rds_poll, .ioctl = rds_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = rds_setsockopt, .getsockopt = rds_getsockopt, .sendmsg = rds_sendmsg, .recvmsg = rds_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static int __rds_create(struct socket *sock, struct sock *sk, int protocol) { unsigned long flags; struct rds_sock *rs; sock_init_data(sock, sk); sock->ops = &rds_proto_ops; sk->sk_protocol = protocol; rs = rds_sk_to_rs(sk); spin_lock_init(&rs->rs_lock); rwlock_init(&rs->rs_recv_lock); INIT_LIST_HEAD(&rs->rs_send_queue); INIT_LIST_HEAD(&rs->rs_recv_queue); INIT_LIST_HEAD(&rs->rs_notify_queue); INIT_LIST_HEAD(&rs->rs_cong_list); spin_lock_init(&rs->rs_rdma_lock); rs->rs_rdma_keys = RB_ROOT; spin_lock_irqsave(&rds_sock_lock, flags); list_add_tail(&rs->rs_item, &rds_sock_list); rds_sock_count++; spin_unlock_irqrestore(&rds_sock_lock, flags); return 0; } static int rds_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (sock->type != SOCK_SEQPACKET || protocol) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto); if (!sk) return -ENOMEM; return __rds_create(sock, sk, protocol); } void rds_sock_addref(struct rds_sock *rs) { sock_hold(rds_rs_to_sk(rs)); } void rds_sock_put(struct rds_sock *rs) { sock_put(rds_rs_to_sk(rs)); } static const struct net_proto_family rds_family_ops = { .family = AF_RDS, .create = rds_create, .owner = THIS_MODULE, }; static void rds_sock_inc_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { struct rds_sock *rs; struct rds_incoming *inc; unsigned long flags; unsigned int total = 0; len /= sizeof(struct rds_info_message); spin_lock_irqsave(&rds_sock_lock, flags); list_for_each_entry(rs, &rds_sock_list, rs_item) { read_lock(&rs->rs_recv_lock); /* XXX too lazy to maintain counts.. */ list_for_each_entry(inc, &rs->rs_recv_queue, i_item) { total++; if (total <= len) rds_inc_info_copy(inc, iter, inc->i_saddr, rs->rs_bound_addr, 1); } read_unlock(&rs->rs_recv_lock); } spin_unlock_irqrestore(&rds_sock_lock, flags); lens->nr = total; lens->each = sizeof(struct rds_info_message); } static void rds_sock_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { struct rds_info_socket sinfo; struct rds_sock *rs; unsigned long flags; len /= sizeof(struct rds_info_socket); spin_lock_irqsave(&rds_sock_lock, flags); if (len < rds_sock_count) goto out; list_for_each_entry(rs, &rds_sock_list, rs_item) { sinfo.sndbuf = rds_sk_sndbuf(rs); sinfo.rcvbuf = rds_sk_rcvbuf(rs); sinfo.bound_addr = rs->rs_bound_addr; sinfo.connected_addr = rs->rs_conn_addr; sinfo.bound_port = rs->rs_bound_port; sinfo.connected_port = rs->rs_conn_port; sinfo.inum = sock_i_ino(rds_rs_to_sk(rs)); rds_info_copy(iter, &sinfo, sizeof(sinfo)); } out: lens->nr = rds_sock_count; lens->each = sizeof(struct rds_info_socket); spin_unlock_irqrestore(&rds_sock_lock, flags); } static void __exit rds_exit(void) { sock_unregister(rds_family_ops.family); proto_unregister(&rds_proto); rds_conn_exit(); rds_cong_exit(); rds_sysctl_exit(); rds_threads_exit(); rds_stats_exit(); rds_page_exit(); rds_info_deregister_func(RDS_INFO_SOCKETS, rds_sock_info); rds_info_deregister_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info); } module_exit(rds_exit); static int __init rds_init(void) { int ret; ret = rds_conn_init(); if (ret) goto out; ret = rds_threads_init(); if (ret) goto out_conn; ret = rds_sysctl_init(); if (ret) goto out_threads; ret = rds_stats_init(); if (ret) goto out_sysctl; ret = proto_register(&rds_proto, 1); if (ret) goto out_stats; ret = sock_register(&rds_family_ops); if (ret) goto out_proto; rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info); rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info); goto out; out_proto: proto_unregister(&rds_proto); out_stats: rds_stats_exit(); out_sysctl: rds_sysctl_exit(); out_threads: rds_threads_exit(); out_conn: rds_conn_exit(); rds_cong_exit(); rds_page_exit(); out: return ret; } module_init(rds_init); #define DRV_VERSION "4.0" #define DRV_RELDATE "Feb 12, 2009" MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>"); MODULE_DESCRIPTION("RDS: Reliable Datagram Sockets" " v" DRV_VERSION " (" DRV_RELDATE ")"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS_NETPROTO(PF_RDS);
gpl-2.0
EpicCM/SPH-D700-Kernel
drivers/hwmon/via-cputemp.c
852
8135
/* * via-cputemp.c - Driver for VIA CPU core temperature monitoring * Copyright (C) 2009 VIA Technologies, Inc. * * based on existing coretemp.c, which is * * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/hwmon.h> #include <linux/sysfs.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/cpu.h> #include <asm/msr.h> #include <asm/processor.h> #define DRVNAME "via_cputemp" enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW; /* * Functions declaration */ struct via_cputemp_data { struct device *hwmon_dev; const char *name; u32 id; u32 msr; }; /* * Sysfs stuff */ static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { int ret; struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct via_cputemp_data *data = dev_get_drvdata(dev); if (attr->index == SHOW_NAME) ret = sprintf(buf, "%s\n", data->name); else /* show label */ ret = sprintf(buf, "Core %d\n", data->id); return ret; } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct via_cputemp_data *data = dev_get_drvdata(dev); u32 eax, edx; int err; err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx); if (err) return -EAGAIN; return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, SHOW_TEMP); static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL); static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME); static struct attribute *via_cputemp_attributes[] = { &sensor_dev_attr_name.dev_attr.attr, &sensor_dev_attr_temp1_label.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, NULL }; static const struct attribute_group via_cputemp_group = { .attrs = via_cputemp_attributes, }; static int __devinit via_cputemp_probe(struct platform_device *pdev) { struct via_cputemp_data *data; struct cpuinfo_x86 *c = &cpu_data(pdev->id); int err; u32 eax, edx; data = kzalloc(sizeof(struct via_cputemp_data), GFP_KERNEL); if (!data) { err = -ENOMEM; dev_err(&pdev->dev, "Out of memory\n"); goto exit; } data->id = pdev->id; data->name = "via_cputemp"; switch (c->x86_model) { case 0xA: /* C7 A */ case 0xD: /* C7 D */ data->msr = 0x1169; break; case 0xF: /* Nano */ data->msr = 0x1423; break; default: err = -ENODEV; goto exit_free; } /* test if we can access the TEMPERATURE MSR */ err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx); if (err) { dev_err(&pdev->dev, "Unable to access TEMPERATURE MSR, giving up\n"); goto exit_free; } platform_set_drvdata(pdev, data); err = sysfs_create_group(&pdev->dev.kobj, &via_cputemp_group); if (err) goto exit_free; data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); dev_err(&pdev->dev, "Class registration failed (%d)\n", err); goto exit_remove; } return 0; exit_remove: sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group); exit_free: platform_set_drvdata(pdev, NULL); kfree(data); exit: return err; } static int __devexit via_cputemp_remove(struct platform_device *pdev) { struct via_cputemp_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static struct platform_driver via_cputemp_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = via_cputemp_probe, .remove = __devexit_p(via_cputemp_remove), }; struct pdev_entry { struct list_head list; struct platform_device *pdev; unsigned int cpu; }; static LIST_HEAD(pdev_list); static DEFINE_MUTEX(pdev_list_mutex); static int __cpuinit via_cputemp_device_add(unsigned int cpu) { int err; struct platform_device *pdev; struct pdev_entry *pdev_entry; pdev = platform_device_alloc(DRVNAME, cpu); if (!pdev) { err = -ENOMEM; printk(KERN_ERR DRVNAME ": Device allocation failed\n"); goto exit; } pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); if (!pdev_entry) { err = -ENOMEM; goto exit_device_put; } err = platform_device_add(pdev); if (err) { printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", err); goto exit_device_free; } pdev_entry->pdev = pdev; pdev_entry->cpu = cpu; mutex_lock(&pdev_list_mutex); list_add_tail(&pdev_entry->list, &pdev_list); mutex_unlock(&pdev_list_mutex); return 0; exit_device_free: kfree(pdev_entry); exit_device_put: platform_device_put(pdev); exit: return err; } #ifdef CONFIG_HOTPLUG_CPU static void via_cputemp_device_remove(unsigned int cpu) { struct pdev_entry *p, *n; mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { if (p->cpu == cpu) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } } mutex_unlock(&pdev_list_mutex); } static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long) hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: via_cputemp_device_add(cpu); break; case CPU_DOWN_PREPARE: via_cputemp_device_remove(cpu); break; } return NOTIFY_OK; } static struct notifier_block via_cputemp_cpu_notifier __refdata = { .notifier_call = via_cputemp_cpu_callback, }; #endif /* !CONFIG_HOTPLUG_CPU */ static int __init via_cputemp_init(void) { int i, err; struct pdev_entry *p, *n; if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) { printk(KERN_DEBUG DRVNAME ": Not a VIA CPU\n"); err = -ENODEV; goto exit; } err = platform_driver_register(&via_cputemp_driver); if (err) goto exit; for_each_online_cpu(i) { struct cpuinfo_x86 *c = &cpu_data(i); if (c->x86 != 6) continue; if (c->x86_model < 0x0a) continue; if (c->x86_model > 0x0f) { printk(KERN_WARNING DRVNAME ": Unknown CPU " "model 0x%x\n", c->x86_model); continue; } err = via_cputemp_device_add(i); if (err) goto exit_devices_unreg; } if (list_empty(&pdev_list)) { err = -ENODEV; goto exit_driver_unreg; } #ifdef CONFIG_HOTPLUG_CPU register_hotcpu_notifier(&via_cputemp_cpu_notifier); #endif return 0; exit_devices_unreg: mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } mutex_unlock(&pdev_list_mutex); exit_driver_unreg: platform_driver_unregister(&via_cputemp_driver); exit: return err; } static void __exit via_cputemp_exit(void) { struct pdev_entry *p, *n; #ifdef CONFIG_HOTPLUG_CPU unregister_hotcpu_notifier(&via_cputemp_cpu_notifier); #endif mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } mutex_unlock(&pdev_list_mutex); platform_driver_unregister(&via_cputemp_driver); } MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>"); MODULE_DESCRIPTION("VIA CPU temperature monitor"); MODULE_LICENSE("GPL"); module_init(via_cputemp_init) module_exit(via_cputemp_exit)
gpl-2.0
virtuous/kernel-7x30-gingerbread-v3
arch/mips/dec/kn02-irq.c
1620
1879
/* * DECstation 5000/200 (KN02) Control and Status Register * interrupts. * * Copyright (c) 2002, 2003, 2005 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/types.h> #include <asm/dec/kn02.h> /* * Bits 7:0 of the Control Register are write-only -- the * corresponding bits of the Status Register have a different * meaning. Hence we use a cache. It speeds up things a bit * as well. * * There is no default value -- it has to be initialized. */ u32 cached_kn02_csr; static int kn02_irq_base; static inline void unmask_kn02_irq(unsigned int irq) { volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); cached_kn02_csr |= (1 << (irq - kn02_irq_base + 16)); *csr = cached_kn02_csr; } static inline void mask_kn02_irq(unsigned int irq) { volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); cached_kn02_csr &= ~(1 << (irq - kn02_irq_base + 16)); *csr = cached_kn02_csr; } static void ack_kn02_irq(unsigned int irq) { mask_kn02_irq(irq); iob(); } static struct irq_chip kn02_irq_type = { .name = "KN02-CSR", .ack = ack_kn02_irq, .mask = mask_kn02_irq, .mask_ack = ack_kn02_irq, .unmask = unmask_kn02_irq, }; void __init init_kn02_irqs(int base) { volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); int i; /* Mask interrupts. */ cached_kn02_csr &= ~KN02_CSR_IOINTEN; *csr = cached_kn02_csr; iob(); for (i = base; i < base + KN02_IRQ_LINES; i++) set_irq_chip_and_handler(i, &kn02_irq_type, handle_level_irq); kn02_irq_base = base; }
gpl-2.0
StefanescuCristian/ubuntu-bfsq
sound/usb/usx2y/usbusx2y.c
1876
13540
/* * usbusy2y.c - ALSA USB US-428 Driver * 2005-04-14 Karsten Wiese Version 0.8.7.2: Call snd_card_free() instead of snd_card_free_in_thread() to prevent oops with dead keyboard symptom. Tested ok with kernel 2.6.12-rc2. 2004-12-14 Karsten Wiese Version 0.8.7.1: snd_pcm_open for rawusb pcm-devices now returns -EBUSY if called without rawusb's hwdep device being open. 2004-12-02 Karsten Wiese Version 0.8.7: Use macro usb_maxpacket() for portability. 2004-10-26 Karsten Wiese Version 0.8.6: wake_up() process waiting in usX2Y_urbs_start() on error. 2004-10-21 Karsten Wiese Version 0.8.5: nrpacks is runtime or compiletime configurable now with tested values from 1 to 4. 2004-10-03 Karsten Wiese Version 0.8.2: Avoid any possible racing while in prepare callback. 2004-09-30 Karsten Wiese Version 0.8.0: Simplified things and made ohci work again. 2004-09-20 Karsten Wiese Version 0.7.3: Use usb_kill_urb() instead of deprecated (kernel 2.6.9) usb_unlink_urb(). 2004-07-13 Karsten Wiese Version 0.7.1: Don't sleep in START/STOP callbacks anymore. us428 channels C/D not handled just for this version, sorry. 2004-06-21 Karsten Wiese Version 0.6.4: Temporarely suspend midi input to sanely call usb_set_interface() when setting format. 2004-06-12 Karsten Wiese Version 0.6.3: Made it thus the following rule is enforced: "All pcm substreams of one usX2Y have to operate at the same rate & format." 2004-04-06 Karsten Wiese Version 0.6.0: Runs on 2.6.5 kernel without any "--with-debug=" things. us224 reported running. 2004-01-14 Karsten Wiese Version 0.5.1: Runs with 2.6.1 kernel. 2003-12-30 Karsten Wiese Version 0.4.1: Fix 24Bit 4Channel capturing for the us428. 2003-11-27 Karsten Wiese, Martin Langer Version 0.4: us122 support. us224 could be tested by uncommenting the sections containing USB_ID_US224 2003-11-03 Karsten Wiese Version 0.3: 24Bit support. "arecord -D hw:1 -c 2 -r 48000 -M -f S24_3LE|aplay -D hw:1 -c 2 -r 48000 -M -f S24_3LE" works. 2003-08-22 Karsten Wiese Version 0.0.8: Removed EZUSB Firmware. First Stage Firmwaredownload is now done by tascam-firmware downloader. See: http://usb-midi-fw.sourceforge.net/tascam-firmware.tar.gz 2003-06-18 Karsten Wiese Version 0.0.5: changed to compile with kernel 2.4.21 and alsa 0.9.4 2002-10-16 Karsten Wiese Version 0.0.4: compiles again with alsa-current. USB_ISO_ASAP not used anymore (most of the time), instead urb->start_frame is calculated here now, some calls inside usb-driver don't need to happen anymore. To get the best out of this: Disable APM-support in the kernel as APM-BIOS calls (once each second) hard disable interrupt for many precious milliseconds. This helped me much on my slowish PII 400 & PIII 500. ACPI yet untested but might cause the same bad behaviour. Use a kernel with lowlatency and preemptiv patches applied. To autoload snd-usb-midi append a line post-install snd-usb-us428 modprobe snd-usb-midi to /etc/modules.conf. known problems: sliders, knobs, lights not yet handled except MASTER Volume slider. "pcm -c 2" doesn't work. "pcm -c 2 -m direct_interleaved" does. KDE3: "Enable full duplex operation" deadlocks. 2002-08-31 Karsten Wiese Version 0.0.3: audio also simplex; simplifying: iso urbs only 1 packet, melted structs. ASYNC_UNLINK not used anymore: no more crashes so far..... for alsa 0.9 rc3. 2002-08-09 Karsten Wiese Version 0.0.2: midi works with snd-usb-midi, audio (only fullduplex now) with i.e. bristol. The firmware has been sniffed from win2k us-428 driver 3.09. * Copyright (c) 2002 - 2004 Karsten Wiese * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include "usx2y.h" #include "usbusx2y.h" #include "usX2Yhwdep.h" MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>"); MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{TASCAM(0x1604),"NAME_ALLCAPS"(0x8001)(0x8005)(0x8007)}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for "NAME_ALLCAPS"."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS"."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS"."); static int snd_usX2Y_card_used[SNDRV_CARDS]; static void usX2Y_usb_disconnect(struct usb_device* usb_device, void* ptr); static void snd_usX2Y_card_private_free(struct snd_card *card); /* * pipe 4 is used for switching the lamps, setting samplerate, volumes .... */ static void i_usX2Y_Out04Int(struct urb *urb) { #ifdef CONFIG_SND_DEBUG if (urb->status) { int i; struct usX2Ydev *usX2Y = urb->context; for (i = 0; i < 10 && usX2Y->AS04.urb[i] != urb; i++); snd_printdd("i_usX2Y_Out04Int() urb %i status=%i\n", i, urb->status); } #endif } static void i_usX2Y_In04Int(struct urb *urb) { int err = 0; struct usX2Ydev *usX2Y = urb->context; struct us428ctls_sharedmem *us428ctls = usX2Y->us428ctls_sharedmem; usX2Y->In04IntCalls++; if (urb->status) { snd_printdd("Interrupt Pipe 4 came back with status=%i\n", urb->status); return; } // printk("%i:0x%02X ", 8, (int)((unsigned char*)usX2Y->In04Buf)[8]); Master volume shows 0 here if fader is at max during boot ?!? if (us428ctls) { int diff = -1; if (-2 == us428ctls->CtlSnapShotLast) { diff = 0; memcpy(usX2Y->In04Last, usX2Y->In04Buf, sizeof(usX2Y->In04Last)); us428ctls->CtlSnapShotLast = -1; } else { int i; for (i = 0; i < 21; i++) { if (usX2Y->In04Last[i] != ((char*)usX2Y->In04Buf)[i]) { if (diff < 0) diff = i; usX2Y->In04Last[i] = ((char*)usX2Y->In04Buf)[i]; } } } if (0 <= diff) { int n = us428ctls->CtlSnapShotLast + 1; if (n >= N_us428_ctl_BUFS || n < 0) n = 0; memcpy(us428ctls->CtlSnapShot + n, usX2Y->In04Buf, sizeof(us428ctls->CtlSnapShot[0])); us428ctls->CtlSnapShotDiffersAt[n] = diff; us428ctls->CtlSnapShotLast = n; wake_up(&usX2Y->us428ctls_wait_queue_head); } } if (usX2Y->US04) { if (0 == usX2Y->US04->submitted) do { err = usb_submit_urb(usX2Y->US04->urb[usX2Y->US04->submitted++], GFP_ATOMIC); } while (!err && usX2Y->US04->submitted < usX2Y->US04->len); } else if (us428ctls && us428ctls->p4outLast >= 0 && us428ctls->p4outLast < N_us428_p4out_BUFS) { if (us428ctls->p4outLast != us428ctls->p4outSent) { int j, send = us428ctls->p4outSent + 1; if (send >= N_us428_p4out_BUFS) send = 0; for (j = 0; j < URBS_AsyncSeq && !err; ++j) if (0 == usX2Y->AS04.urb[j]->status) { struct us428_p4out *p4out = us428ctls->p4out + send; // FIXME if more than 1 p4out is new, 1 gets lost. usb_fill_bulk_urb(usX2Y->AS04.urb[j], usX2Y->dev, usb_sndbulkpipe(usX2Y->dev, 0x04), &p4out->val.vol, p4out->type == eLT_Light ? sizeof(struct us428_lights) : 5, i_usX2Y_Out04Int, usX2Y); err = usb_submit_urb(usX2Y->AS04.urb[j], GFP_ATOMIC); us428ctls->p4outSent = send; break; } } } if (err) snd_printk(KERN_ERR "In04Int() usb_submit_urb err=%i\n", err); urb->dev = usX2Y->dev; usb_submit_urb(urb, GFP_ATOMIC); } /* * Prepare some urbs */ int usX2Y_AsyncSeq04_init(struct usX2Ydev *usX2Y) { int err = 0, i; if (NULL == (usX2Y->AS04.buffer = kmalloc(URB_DataLen_AsyncSeq*URBS_AsyncSeq, GFP_KERNEL))) { err = -ENOMEM; } else for (i = 0; i < URBS_AsyncSeq; ++i) { if (NULL == (usX2Y->AS04.urb[i] = usb_alloc_urb(0, GFP_KERNEL))) { err = -ENOMEM; break; } usb_fill_bulk_urb( usX2Y->AS04.urb[i], usX2Y->dev, usb_sndbulkpipe(usX2Y->dev, 0x04), usX2Y->AS04.buffer + URB_DataLen_AsyncSeq*i, 0, i_usX2Y_Out04Int, usX2Y ); } return err; } int usX2Y_In04_init(struct usX2Ydev *usX2Y) { if (! (usX2Y->In04urb = usb_alloc_urb(0, GFP_KERNEL))) return -ENOMEM; if (! (usX2Y->In04Buf = kmalloc(21, GFP_KERNEL))) { usb_free_urb(usX2Y->In04urb); return -ENOMEM; } init_waitqueue_head(&usX2Y->In04WaitQueue); usb_fill_int_urb(usX2Y->In04urb, usX2Y->dev, usb_rcvintpipe(usX2Y->dev, 0x4), usX2Y->In04Buf, 21, i_usX2Y_In04Int, usX2Y, 10); return usb_submit_urb(usX2Y->In04urb, GFP_KERNEL); } static void usX2Y_unlinkSeq(struct snd_usX2Y_AsyncSeq *S) { int i; for (i = 0; i < URBS_AsyncSeq; ++i) { usb_kill_urb(S->urb[i]); usb_free_urb(S->urb[i]); S->urb[i] = NULL; } kfree(S->buffer); } static struct usb_device_id snd_usX2Y_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US428 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US122 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US224 }, { /* terminator */ } }; static int usX2Y_create_card(struct usb_device *device, struct usb_interface *intf, struct snd_card **cardp) { int dev; struct snd_card * card; int err; for (dev = 0; dev < SNDRV_CARDS; ++dev) if (enable[dev] && !snd_usX2Y_card_used[dev]) break; if (dev >= SNDRV_CARDS) return -ENODEV; err = snd_card_new(&intf->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct usX2Ydev), &card); if (err < 0) return err; snd_usX2Y_card_used[usX2Y(card)->card_index = dev] = 1; card->private_free = snd_usX2Y_card_private_free; usX2Y(card)->dev = device; init_waitqueue_head(&usX2Y(card)->prepare_wait_queue); mutex_init(&usX2Y(card)->pcm_mutex); INIT_LIST_HEAD(&usX2Y(card)->midi_list); strcpy(card->driver, "USB "NAME_ALLCAPS""); sprintf(card->shortname, "TASCAM "NAME_ALLCAPS""); sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)", card->shortname, le16_to_cpu(device->descriptor.idVendor), le16_to_cpu(device->descriptor.idProduct), 0,//us428(card)->usbmidi.ifnum, usX2Y(card)->dev->bus->busnum, usX2Y(card)->dev->devnum ); *cardp = card; return 0; } static int usX2Y_usb_probe(struct usb_device *device, struct usb_interface *intf, const struct usb_device_id *device_id, struct snd_card **cardp) { int err; struct snd_card * card; *cardp = NULL; if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 || (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US428)) return -EINVAL; err = usX2Y_create_card(device, intf, &card); if (err < 0) return err; if ((err = usX2Y_hwdep_new(card, device)) < 0 || (err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } *cardp = card; return 0; } /* * new 2.5 USB kernel API */ static int snd_usX2Y_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct snd_card *card; int err; err = usX2Y_usb_probe(interface_to_usbdev(intf), intf, id, &card); if (err < 0) return err; dev_set_drvdata(&intf->dev, card); return 0; } static void snd_usX2Y_disconnect(struct usb_interface *intf) { usX2Y_usb_disconnect(interface_to_usbdev(intf), usb_get_intfdata(intf)); } MODULE_DEVICE_TABLE(usb, snd_usX2Y_usb_id_table); static struct usb_driver snd_usX2Y_usb_driver = { .name = "snd-usb-usx2y", .probe = snd_usX2Y_probe, .disconnect = snd_usX2Y_disconnect, .id_table = snd_usX2Y_usb_id_table, }; static void snd_usX2Y_card_private_free(struct snd_card *card) { kfree(usX2Y(card)->In04Buf); usb_free_urb(usX2Y(card)->In04urb); if (usX2Y(card)->us428ctls_sharedmem) snd_free_pages(usX2Y(card)->us428ctls_sharedmem, sizeof(*usX2Y(card)->us428ctls_sharedmem)); if (usX2Y(card)->card_index >= 0 && usX2Y(card)->card_index < SNDRV_CARDS) snd_usX2Y_card_used[usX2Y(card)->card_index] = 0; } /* * Frees the device. */ static void usX2Y_usb_disconnect(struct usb_device *device, void* ptr) { if (ptr) { struct snd_card *card = ptr; struct usX2Ydev *usX2Y = usX2Y(card); struct list_head *p; usX2Y->chip_status = USX2Y_STAT_CHIP_HUP; usX2Y_unlinkSeq(&usX2Y->AS04); usb_kill_urb(usX2Y->In04urb); snd_card_disconnect(card); /* release the midi resources */ list_for_each(p, &usX2Y->midi_list) { snd_usbmidi_disconnect(p); } if (usX2Y->us428ctls_sharedmem) wake_up(&usX2Y->us428ctls_wait_queue_head); snd_card_free(card); } } module_usb_driver(snd_usX2Y_usb_driver);
gpl-2.0
MoKee/android_kernel_lge_iproj
drivers/media/dvb/frontends/cx24116.c
2388
41138
/* Conexant cx24116/cx24118 - DVBS/S2 Satellite demod/tuner driver Copyright (C) 2006-2008 Steven Toth <stoth@hauppauge.com> Copyright (C) 2006-2007 Georg Acher Copyright (C) 2007-2008 Darron Broad March 2007 Fixed some bugs. Added diseqc support. Added corrected signal strength support. August 2007 Sync with legacy version. Some clean ups. Copyright (C) 2008 Igor Liplianin September, 9th 2008 Fixed locking on high symbol rates (>30000). Implement MPEG initialization parameter. January, 17th 2009 Fill set_voltage with actually control voltage code. Correct set tone to not affect voltage. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/firmware.h> #include "dvb_frontend.h" #include "cx24116.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)"); #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_INFO "cx24116: " args); \ } while (0) #define CX24116_DEFAULT_FIRMWARE "dvb-fe-cx24116.fw" #define CX24116_SEARCH_RANGE_KHZ 5000 /* known registers */ #define CX24116_REG_COMMAND (0x00) /* command args 0x00..0x1e */ #define CX24116_REG_EXECUTE (0x1f) /* execute command */ #define CX24116_REG_MAILBOX (0x96) /* FW or multipurpose mailbox? */ #define CX24116_REG_RESET (0x20) /* reset status > 0 */ #define CX24116_REG_SIGNAL (0x9e) /* signal low */ #define CX24116_REG_SSTATUS (0x9d) /* signal high / status */ #define CX24116_REG_QUALITY8 (0xa3) #define CX24116_REG_QSTATUS (0xbc) #define CX24116_REG_QUALITY0 (0xd5) #define CX24116_REG_BER0 (0xc9) #define CX24116_REG_BER8 (0xc8) #define CX24116_REG_BER16 (0xc7) #define CX24116_REG_BER24 (0xc6) #define CX24116_REG_UCB0 (0xcb) #define CX24116_REG_UCB8 (0xca) #define CX24116_REG_CLKDIV (0xf3) #define CX24116_REG_RATEDIV (0xf9) /* configured fec (not tuned) or actual FEC (tuned) 1=1/2 2=2/3 etc */ #define CX24116_REG_FECSTATUS (0x9c) /* FECSTATUS bits */ /* mask to determine configured fec (not tuned) or actual fec (tuned) */ #define CX24116_FEC_FECMASK (0x1f) /* Select DVB-S demodulator, else DVB-S2 */ #define CX24116_FEC_DVBS (0x20) #define CX24116_FEC_UNKNOWN (0x40) /* Unknown/unused */ /* Pilot mode requested when tuning else always reset when tuned */ #define CX24116_FEC_PILOT (0x80) /* arg buffer size */ #define CX24116_ARGLEN (0x1e) /* rolloff */ #define CX24116_ROLLOFF_020 (0x00) #define CX24116_ROLLOFF_025 (0x01) #define CX24116_ROLLOFF_035 (0x02) /* pilot bit */ #define CX24116_PILOT_OFF (0x00) #define CX24116_PILOT_ON (0x40) /* signal status */ #define CX24116_HAS_SIGNAL (0x01) #define CX24116_HAS_CARRIER (0x02) #define CX24116_HAS_VITERBI (0x04) #define CX24116_HAS_SYNCLOCK (0x08) #define CX24116_HAS_UNKNOWN1 (0x10) #define CX24116_HAS_UNKNOWN2 (0x20) #define CX24116_STATUS_MASK (0x0f) #define CX24116_SIGNAL_MASK (0xc0) #define CX24116_DISEQC_TONEOFF (0) /* toneburst never sent */ #define CX24116_DISEQC_TONECACHE (1) /* toneburst cached */ #define CX24116_DISEQC_MESGCACHE (2) /* message cached */ /* arg offset for DiSEqC */ #define CX24116_DISEQC_BURST (1) #define CX24116_DISEQC_ARG2_2 (2) /* unknown value=2 */ #define CX24116_DISEQC_ARG3_0 (3) /* unknown value=0 */ #define CX24116_DISEQC_ARG4_0 (4) /* unknown value=0 */ #define CX24116_DISEQC_MSGLEN (5) #define CX24116_DISEQC_MSGOFS (6) /* DiSEqC burst */ #define CX24116_DISEQC_MINI_A (0) #define CX24116_DISEQC_MINI_B (1) /* DiSEqC tone burst */ static int toneburst = 1; module_param(toneburst, int, 0644); MODULE_PARM_DESC(toneburst, "DiSEqC toneburst 0=OFF, 1=TONE CACHE, "\ "2=MESSAGE CACHE (default:1)"); /* SNR measurements */ static int esno_snr; module_param(esno_snr, int, 0644); MODULE_PARM_DESC(esno_snr, "SNR return units, 0=PERCENTAGE 0-100, "\ "1=ESNO(db * 10) (default:0)"); enum cmds { CMD_SET_VCO = 0x10, CMD_TUNEREQUEST = 0x11, CMD_MPEGCONFIG = 0x13, CMD_TUNERINIT = 0x14, CMD_BANDWIDTH = 0x15, CMD_GETAGC = 0x19, CMD_LNBCONFIG = 0x20, CMD_LNBSEND = 0x21, /* Formerly CMD_SEND_DISEQC */ CMD_LNBDCLEVEL = 0x22, CMD_SET_TONE = 0x23, CMD_UPDFWVERS = 0x35, CMD_TUNERSLEEP = 0x36, CMD_AGCCONTROL = 0x3b, /* Unknown */ }; /* The Demod/Tuner can't easily provide these, we cache them */ struct cx24116_tuning { u32 frequency; u32 symbol_rate; fe_spectral_inversion_t inversion; fe_code_rate_t fec; fe_delivery_system_t delsys; fe_modulation_t modulation; fe_pilot_t pilot; fe_rolloff_t rolloff; /* Demod values */ u8 fec_val; u8 fec_mask; u8 inversion_val; u8 pilot_val; u8 rolloff_val; }; /* Basic commands that are sent to the firmware */ struct cx24116_cmd { u8 len; u8 args[CX24116_ARGLEN]; }; struct cx24116_state { struct i2c_adapter *i2c; const struct cx24116_config *config; struct dvb_frontend frontend; struct cx24116_tuning dcur; struct cx24116_tuning dnxt; u8 skip_fw_load; u8 burst; struct cx24116_cmd dsec_cmd; }; static int cx24116_writereg(struct cx24116_state *state, int reg, int data) { u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; int err; if (debug > 1) printk("cx24116: %s: write reg 0x%02x, value 0x%02x\n", __func__, reg, data); err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) { printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x," " value == 0x%02x)\n", __func__, err, reg, data); return -EREMOTEIO; } return 0; } /* Bulk byte writes to a single I2C address, for 32k firmware load */ static int cx24116_writeregN(struct cx24116_state *state, int reg, const u8 *data, u16 len) { int ret = -EREMOTEIO; struct i2c_msg msg; u8 *buf; buf = kmalloc(len + 1, GFP_KERNEL); if (buf == NULL) { printk("Unable to kmalloc\n"); ret = -ENOMEM; goto error; } *(buf) = reg; memcpy(buf + 1, data, len); msg.addr = state->config->demod_address; msg.flags = 0; msg.buf = buf; msg.len = len + 1; if (debug > 1) printk(KERN_INFO "cx24116: %s: write regN 0x%02x, len = %d\n", __func__, reg, len); ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) { printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x\n", __func__, ret, reg); ret = -EREMOTEIO; } error: kfree(buf); return ret; } static int cx24116_readreg(struct cx24116_state *state, u8 reg) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk(KERN_ERR "%s: reg=0x%x (error=%d)\n", __func__, reg, ret); return ret; } if (debug > 1) printk(KERN_INFO "cx24116: read reg 0x%02x, value 0x%02x\n", reg, b1[0]); return b1[0]; } static int cx24116_set_inversion(struct cx24116_state *state, fe_spectral_inversion_t inversion) { dprintk("%s(%d)\n", __func__, inversion); switch (inversion) { case INVERSION_OFF: state->dnxt.inversion_val = 0x00; break; case INVERSION_ON: state->dnxt.inversion_val = 0x04; break; case INVERSION_AUTO: state->dnxt.inversion_val = 0x0C; break; default: return -EINVAL; } state->dnxt.inversion = inversion; return 0; } /* * modfec (modulation and FEC) * =========================== * * MOD FEC mask/val standard * ---- -------- ----------- -------- * QPSK FEC_1_2 0x02 0x02+X DVB-S * QPSK FEC_2_3 0x04 0x02+X DVB-S * QPSK FEC_3_4 0x08 0x02+X DVB-S * QPSK FEC_4_5 0x10 0x02+X DVB-S (?) * QPSK FEC_5_6 0x20 0x02+X DVB-S * QPSK FEC_6_7 0x40 0x02+X DVB-S * QPSK FEC_7_8 0x80 0x02+X DVB-S * QPSK FEC_8_9 0x01 0x02+X DVB-S (?) (NOT SUPPORTED?) * QPSK AUTO 0xff 0x02+X DVB-S * * For DVB-S high byte probably represents FEC * and low byte selects the modulator. The high * byte is search range mask. Bit 5 may turn * on DVB-S and remaining bits represent some * kind of calibration (how/what i do not know). * * Eg.(2/3) szap "Zone Horror" * * mask/val = 0x04, 0x20 * status 1f | signal c3c0 | snr a333 | ber 00000098 | unc 0 | FE_HAS_LOCK * * mask/val = 0x04, 0x30 * status 1f | signal c3c0 | snr a333 | ber 00000000 | unc 0 | FE_HAS_LOCK * * After tuning FECSTATUS contains actual FEC * in use numbered 1 through to 8 for 1/2 .. 2/3 etc * * NBC=NOT/NON BACKWARD COMPATIBLE WITH DVB-S (DVB-S2 only) * * NBC-QPSK FEC_1_2 0x00, 0x04 DVB-S2 * NBC-QPSK FEC_3_5 0x00, 0x05 DVB-S2 * NBC-QPSK FEC_2_3 0x00, 0x06 DVB-S2 * NBC-QPSK FEC_3_4 0x00, 0x07 DVB-S2 * NBC-QPSK FEC_4_5 0x00, 0x08 DVB-S2 * NBC-QPSK FEC_5_6 0x00, 0x09 DVB-S2 * NBC-QPSK FEC_8_9 0x00, 0x0a DVB-S2 * NBC-QPSK FEC_9_10 0x00, 0x0b DVB-S2 * * NBC-8PSK FEC_3_5 0x00, 0x0c DVB-S2 * NBC-8PSK FEC_2_3 0x00, 0x0d DVB-S2 * NBC-8PSK FEC_3_4 0x00, 0x0e DVB-S2 * NBC-8PSK FEC_5_6 0x00, 0x0f DVB-S2 * NBC-8PSK FEC_8_9 0x00, 0x10 DVB-S2 * NBC-8PSK FEC_9_10 0x00, 0x11 DVB-S2 * * For DVB-S2 low bytes selects both modulator * and FEC. High byte is meaningless here. To * set pilot, bit 6 (0x40) is set. When inspecting * FECSTATUS bit 7 (0x80) represents the pilot * selection whilst not tuned. When tuned, actual FEC * in use is found in FECSTATUS as per above. Pilot * value is reset. */ /* A table of modulation, fec and configuration bytes for the demod. * Not all S2 mmodulation schemes are support and not all rates with * a scheme are support. Especially, no auto detect when in S2 mode. */ static struct cx24116_modfec { fe_delivery_system_t delivery_system; fe_modulation_t modulation; fe_code_rate_t fec; u8 mask; /* In DVBS mode this is used to autodetect */ u8 val; /* Passed to the firmware to indicate mode selection */ } CX24116_MODFEC_MODES[] = { /* QPSK. For unknown rates we set hardware to auto detect 0xfe 0x30 */ /*mod fec mask val */ { SYS_DVBS, QPSK, FEC_NONE, 0xfe, 0x30 }, { SYS_DVBS, QPSK, FEC_1_2, 0x02, 0x2e }, /* 00000010 00101110 */ { SYS_DVBS, QPSK, FEC_2_3, 0x04, 0x2f }, /* 00000100 00101111 */ { SYS_DVBS, QPSK, FEC_3_4, 0x08, 0x30 }, /* 00001000 00110000 */ { SYS_DVBS, QPSK, FEC_4_5, 0xfe, 0x30 }, /* 000?0000 ? */ { SYS_DVBS, QPSK, FEC_5_6, 0x20, 0x31 }, /* 00100000 00110001 */ { SYS_DVBS, QPSK, FEC_6_7, 0xfe, 0x30 }, /* 0?000000 ? */ { SYS_DVBS, QPSK, FEC_7_8, 0x80, 0x32 }, /* 10000000 00110010 */ { SYS_DVBS, QPSK, FEC_8_9, 0xfe, 0x30 }, /* 0000000? ? */ { SYS_DVBS, QPSK, FEC_AUTO, 0xfe, 0x30 }, /* NBC-QPSK */ { SYS_DVBS2, QPSK, FEC_1_2, 0x00, 0x04 }, { SYS_DVBS2, QPSK, FEC_3_5, 0x00, 0x05 }, { SYS_DVBS2, QPSK, FEC_2_3, 0x00, 0x06 }, { SYS_DVBS2, QPSK, FEC_3_4, 0x00, 0x07 }, { SYS_DVBS2, QPSK, FEC_4_5, 0x00, 0x08 }, { SYS_DVBS2, QPSK, FEC_5_6, 0x00, 0x09 }, { SYS_DVBS2, QPSK, FEC_8_9, 0x00, 0x0a }, { SYS_DVBS2, QPSK, FEC_9_10, 0x00, 0x0b }, /* 8PSK */ { SYS_DVBS2, PSK_8, FEC_3_5, 0x00, 0x0c }, { SYS_DVBS2, PSK_8, FEC_2_3, 0x00, 0x0d }, { SYS_DVBS2, PSK_8, FEC_3_4, 0x00, 0x0e }, { SYS_DVBS2, PSK_8, FEC_5_6, 0x00, 0x0f }, { SYS_DVBS2, PSK_8, FEC_8_9, 0x00, 0x10 }, { SYS_DVBS2, PSK_8, FEC_9_10, 0x00, 0x11 }, /* * `val' can be found in the FECSTATUS register when tuning. * FECSTATUS will give the actual FEC in use if tuning was successful. */ }; static int cx24116_lookup_fecmod(struct cx24116_state *state, fe_delivery_system_t d, fe_modulation_t m, fe_code_rate_t f) { int i, ret = -EOPNOTSUPP; dprintk("%s(0x%02x,0x%02x)\n", __func__, m, f); for (i = 0; i < ARRAY_SIZE(CX24116_MODFEC_MODES); i++) { if ((d == CX24116_MODFEC_MODES[i].delivery_system) && (m == CX24116_MODFEC_MODES[i].modulation) && (f == CX24116_MODFEC_MODES[i].fec)) { ret = i; break; } } return ret; } static int cx24116_set_fec(struct cx24116_state *state, fe_delivery_system_t delsys, fe_modulation_t mod, fe_code_rate_t fec) { int ret = 0; dprintk("%s(0x%02x,0x%02x)\n", __func__, mod, fec); ret = cx24116_lookup_fecmod(state, delsys, mod, fec); if (ret < 0) return ret; state->dnxt.fec = fec; state->dnxt.fec_val = CX24116_MODFEC_MODES[ret].val; state->dnxt.fec_mask = CX24116_MODFEC_MODES[ret].mask; dprintk("%s() mask/val = 0x%02x/0x%02x\n", __func__, state->dnxt.fec_mask, state->dnxt.fec_val); return 0; } static int cx24116_set_symbolrate(struct cx24116_state *state, u32 rate) { dprintk("%s(%d)\n", __func__, rate); /* check if symbol rate is within limits */ if ((rate > state->frontend.ops.info.symbol_rate_max) || (rate < state->frontend.ops.info.symbol_rate_min)) { dprintk("%s() unsupported symbol_rate = %d\n", __func__, rate); return -EOPNOTSUPP; } state->dnxt.symbol_rate = rate; dprintk("%s() symbol_rate = %d\n", __func__, rate); return 0; } static int cx24116_load_firmware(struct dvb_frontend *fe, const struct firmware *fw); static int cx24116_firmware_ondemand(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; const struct firmware *fw; int ret = 0; dprintk("%s()\n", __func__); if (cx24116_readreg(state, 0x20) > 0) { if (state->skip_fw_load) return 0; /* Load firmware */ /* request the firmware, this will block until loaded */ printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__, CX24116_DEFAULT_FIRMWARE); ret = request_firmware(&fw, CX24116_DEFAULT_FIRMWARE, state->i2c->dev.parent); printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n", __func__); if (ret) { printk(KERN_ERR "%s: No firmware uploaded " "(timeout or file not found?)\n", __func__); return ret; } /* Make sure we don't recurse back through here * during loading */ state->skip_fw_load = 1; ret = cx24116_load_firmware(fe, fw); if (ret) printk(KERN_ERR "%s: Writing firmware to device failed\n", __func__); release_firmware(fw); printk(KERN_INFO "%s: Firmware upload %s\n", __func__, ret == 0 ? "complete" : "failed"); /* Ensure firmware is always loaded if required */ state->skip_fw_load = 0; } return ret; } /* Take a basic firmware command structure, format it * and forward it for processing */ static int cx24116_cmd_execute(struct dvb_frontend *fe, struct cx24116_cmd *cmd) { struct cx24116_state *state = fe->demodulator_priv; int i, ret; dprintk("%s()\n", __func__); /* Load the firmware if required */ ret = cx24116_firmware_ondemand(fe); if (ret != 0) { printk(KERN_ERR "%s(): Unable initialise the firmware\n", __func__); return ret; } /* Write the command */ for (i = 0; i < cmd->len ; i++) { dprintk("%s: 0x%02x == 0x%02x\n", __func__, i, cmd->args[i]); cx24116_writereg(state, i, cmd->args[i]); } /* Start execution and wait for cmd to terminate */ cx24116_writereg(state, CX24116_REG_EXECUTE, 0x01); while (cx24116_readreg(state, CX24116_REG_EXECUTE)) { msleep(10); if (i++ > 64) { /* Avoid looping forever if the firmware does not respond */ printk(KERN_WARNING "%s() Firmware not responding\n", __func__); return -EREMOTEIO; } } return 0; } static int cx24116_load_firmware(struct dvb_frontend *fe, const struct firmware *fw) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int i, ret, len, max, remaining; unsigned char vers[4]; dprintk("%s\n", __func__); dprintk("Firmware is %zu bytes (%02x %02x .. %02x %02x)\n", fw->size, fw->data[0], fw->data[1], fw->data[fw->size-2], fw->data[fw->size-1]); /* Toggle 88x SRST pin to reset demod */ if (state->config->reset_device) state->config->reset_device(fe); /* Begin the firmware load process */ /* Prepare the demod, load the firmware, cleanup after load */ /* Init PLL */ cx24116_writereg(state, 0xE5, 0x00); cx24116_writereg(state, 0xF1, 0x08); cx24116_writereg(state, 0xF2, 0x13); /* Start PLL */ cx24116_writereg(state, 0xe0, 0x03); cx24116_writereg(state, 0xe0, 0x00); /* Unknown */ cx24116_writereg(state, CX24116_REG_CLKDIV, 0x46); cx24116_writereg(state, CX24116_REG_RATEDIV, 0x00); /* Unknown */ cx24116_writereg(state, 0xF0, 0x03); cx24116_writereg(state, 0xF4, 0x81); cx24116_writereg(state, 0xF5, 0x00); cx24116_writereg(state, 0xF6, 0x00); /* Split firmware to the max I2C write len and write. * Writes whole firmware as one write when i2c_wr_max is set to 0. */ if (state->config->i2c_wr_max) max = state->config->i2c_wr_max; else max = INT_MAX; /* enough for 32k firmware */ for (remaining = fw->size; remaining > 0; remaining -= max - 1) { len = remaining; if (len > max - 1) len = max - 1; cx24116_writeregN(state, 0xF7, &fw->data[fw->size - remaining], len); } cx24116_writereg(state, 0xF4, 0x10); cx24116_writereg(state, 0xF0, 0x00); cx24116_writereg(state, 0xF8, 0x06); /* Firmware CMD 10: VCO config */ cmd.args[0x00] = CMD_SET_VCO; cmd.args[0x01] = 0x05; cmd.args[0x02] = 0xdc; cmd.args[0x03] = 0xda; cmd.args[0x04] = 0xae; cmd.args[0x05] = 0xaa; cmd.args[0x06] = 0x04; cmd.args[0x07] = 0x9d; cmd.args[0x08] = 0xfc; cmd.args[0x09] = 0x06; cmd.len = 0x0a; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; cx24116_writereg(state, CX24116_REG_SSTATUS, 0x00); /* Firmware CMD 14: Tuner config */ cmd.args[0x00] = CMD_TUNERINIT; cmd.args[0x01] = 0x00; cmd.args[0x02] = 0x00; cmd.len = 0x03; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; cx24116_writereg(state, 0xe5, 0x00); /* Firmware CMD 13: MPEG config */ cmd.args[0x00] = CMD_MPEGCONFIG; cmd.args[0x01] = 0x01; cmd.args[0x02] = 0x75; cmd.args[0x03] = 0x00; if (state->config->mpg_clk_pos_pol) cmd.args[0x04] = state->config->mpg_clk_pos_pol; else cmd.args[0x04] = 0x02; cmd.args[0x05] = 0x00; cmd.len = 0x06; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Firmware CMD 35: Get firmware version */ cmd.args[0x00] = CMD_UPDFWVERS; cmd.len = 0x02; for (i = 0; i < 4; i++) { cmd.args[0x01] = i; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; vers[i] = cx24116_readreg(state, CX24116_REG_MAILBOX); } printk(KERN_INFO "%s: FW version %i.%i.%i.%i\n", __func__, vers[0], vers[1], vers[2], vers[3]); return 0; } static int cx24116_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct cx24116_state *state = fe->demodulator_priv; int lock = cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_STATUS_MASK; dprintk("%s: status = 0x%02x\n", __func__, lock); *status = 0; if (lock & CX24116_HAS_SIGNAL) *status |= FE_HAS_SIGNAL; if (lock & CX24116_HAS_CARRIER) *status |= FE_HAS_CARRIER; if (lock & CX24116_HAS_VITERBI) *status |= FE_HAS_VITERBI; if (lock & CX24116_HAS_SYNCLOCK) *status |= FE_HAS_SYNC | FE_HAS_LOCK; return 0; } static int cx24116_read_ber(struct dvb_frontend *fe, u32 *ber) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); *ber = (cx24116_readreg(state, CX24116_REG_BER24) << 24) | (cx24116_readreg(state, CX24116_REG_BER16) << 16) | (cx24116_readreg(state, CX24116_REG_BER8) << 8) | cx24116_readreg(state, CX24116_REG_BER0); return 0; } /* TODO Determine function and scale appropriately */ static int cx24116_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; u16 sig_reading; dprintk("%s()\n", __func__); /* Firmware CMD 19: Get AGC */ cmd.args[0x00] = CMD_GETAGC; cmd.len = 0x01; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; sig_reading = (cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_SIGNAL_MASK) | (cx24116_readreg(state, CX24116_REG_SIGNAL) << 6); *signal_strength = 0 - sig_reading; dprintk("%s: raw / cooked = 0x%04x / 0x%04x\n", __func__, sig_reading, *signal_strength); return 0; } /* SNR (0..100)% = (sig & 0xf0) * 10 + (sig & 0x0f) * 10 / 16 */ static int cx24116_read_snr_pct(struct dvb_frontend *fe, u16 *snr) { struct cx24116_state *state = fe->demodulator_priv; u8 snr_reading; static const u32 snr_tab[] = { /* 10 x Table (rounded up) */ 0x00000, 0x0199A, 0x03333, 0x04ccD, 0x06667, 0x08000, 0x0999A, 0x0b333, 0x0cccD, 0x0e667, 0x10000, 0x1199A, 0x13333, 0x14ccD, 0x16667, 0x18000 }; dprintk("%s()\n", __func__); snr_reading = cx24116_readreg(state, CX24116_REG_QUALITY0); if (snr_reading >= 0xa0 /* 100% */) *snr = 0xffff; else *snr = snr_tab[(snr_reading & 0xf0) >> 4] + (snr_tab[(snr_reading & 0x0f)] >> 4); dprintk("%s: raw / cooked = 0x%02x / 0x%04x\n", __func__, snr_reading, *snr); return 0; } /* The reelbox patches show the value in the registers represents * ESNO, from 0->30db (values 0->300). We provide this value by * default. */ static int cx24116_read_snr_esno(struct dvb_frontend *fe, u16 *snr) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); *snr = cx24116_readreg(state, CX24116_REG_QUALITY8) << 8 | cx24116_readreg(state, CX24116_REG_QUALITY0); dprintk("%s: raw 0x%04x\n", __func__, *snr); return 0; } static int cx24116_read_snr(struct dvb_frontend *fe, u16 *snr) { if (esno_snr == 1) return cx24116_read_snr_esno(fe, snr); else return cx24116_read_snr_pct(fe, snr); } static int cx24116_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); *ucblocks = (cx24116_readreg(state, CX24116_REG_UCB8) << 8) | cx24116_readreg(state, CX24116_REG_UCB0); return 0; } /* Overwrite the current tuning params, we are about to tune */ static void cx24116_clone_params(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; memcpy(&state->dcur, &state->dnxt, sizeof(state->dcur)); } /* Wait for LNB */ static int cx24116_wait_for_lnb(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; int i; dprintk("%s() qstatus = 0x%02x\n", __func__, cx24116_readreg(state, CX24116_REG_QSTATUS)); /* Wait for up to 300 ms */ for (i = 0; i < 30 ; i++) { if (cx24116_readreg(state, CX24116_REG_QSTATUS) & 0x20) return 0; msleep(10); } dprintk("%s(): LNB not ready\n", __func__); return -ETIMEDOUT; /* -EBUSY ? */ } static int cx24116_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct cx24116_cmd cmd; int ret; dprintk("%s: %s\n", __func__, voltage == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" : voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : "??"); /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); cmd.args[0x00] = CMD_LNBDCLEVEL; cmd.args[0x01] = (voltage == SEC_VOLTAGE_18 ? 0x01 : 0x00); cmd.len = 0x02; /* Min delay time before DiSEqC send */ msleep(15); return cx24116_cmd_execute(fe, &cmd); } static int cx24116_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct cx24116_cmd cmd; int ret; dprintk("%s(%d)\n", __func__, tone); if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) { printk(KERN_ERR "%s: Invalid, tone=%d\n", __func__, tone); return -EINVAL; } /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Min delay time after DiSEqC send */ msleep(15); /* XXX determine is FW does this, see send_diseqc/burst */ /* Now we set the tone */ cmd.args[0x00] = CMD_SET_TONE; cmd.args[0x01] = 0x00; cmd.args[0x02] = 0x00; switch (tone) { case SEC_TONE_ON: dprintk("%s: setting tone on\n", __func__); cmd.args[0x03] = 0x01; break; case SEC_TONE_OFF: dprintk("%s: setting tone off\n", __func__); cmd.args[0x03] = 0x00; break; } cmd.len = 0x04; /* Min delay time before DiSEqC send */ msleep(15); /* XXX determine is FW does this, see send_diseqc/burst */ return cx24116_cmd_execute(fe, &cmd); } /* Initialise DiSEqC */ static int cx24116_diseqc_init(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; /* Firmware CMD 20: LNB/DiSEqC config */ cmd.args[0x00] = CMD_LNBCONFIG; cmd.args[0x01] = 0x00; cmd.args[0x02] = 0x10; cmd.args[0x03] = 0x00; cmd.args[0x04] = 0x8f; cmd.args[0x05] = 0x28; cmd.args[0x06] = (toneburst == CX24116_DISEQC_TONEOFF) ? 0x00 : 0x01; cmd.args[0x07] = 0x01; cmd.len = 0x08; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Prepare a DiSEqC command */ state->dsec_cmd.args[0x00] = CMD_LNBSEND; /* DiSEqC burst */ state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_A; /* Unknown */ state->dsec_cmd.args[CX24116_DISEQC_ARG2_2] = 0x02; state->dsec_cmd.args[CX24116_DISEQC_ARG3_0] = 0x00; /* Continuation flag? */ state->dsec_cmd.args[CX24116_DISEQC_ARG4_0] = 0x00; /* DiSEqC message length */ state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = 0x00; /* Command length */ state->dsec_cmd.len = CX24116_DISEQC_MSGOFS; return 0; } /* Send DiSEqC message with derived burst (hack) || previous burst */ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *d) { struct cx24116_state *state = fe->demodulator_priv; int i, ret; /* Dump DiSEqC message */ if (debug) { printk(KERN_INFO "cx24116: %s(", __func__); for (i = 0 ; i < d->msg_len ;) { printk(KERN_INFO "0x%02x", d->msg[i]); if (++i < d->msg_len) printk(KERN_INFO ", "); } printk(") toneburst=%d\n", toneburst); } /* Validate length */ if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS)) return -EINVAL; /* DiSEqC message */ for (i = 0; i < d->msg_len; i++) state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i]; /* DiSEqC message length */ state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = d->msg_len; /* Command length */ state->dsec_cmd.len = CX24116_DISEQC_MSGOFS + state->dsec_cmd.args[CX24116_DISEQC_MSGLEN]; /* DiSEqC toneburst */ if (toneburst == CX24116_DISEQC_MESGCACHE) /* Message is cached */ return 0; else if (toneburst == CX24116_DISEQC_TONEOFF) /* Message is sent without burst */ state->dsec_cmd.args[CX24116_DISEQC_BURST] = 0; else if (toneburst == CX24116_DISEQC_TONECACHE) { /* * Message is sent with derived else cached burst * * WRITE PORT GROUP COMMAND 38 * * 0/A/A: E0 10 38 F0..F3 * 1/B/B: E0 10 38 F4..F7 * 2/C/A: E0 10 38 F8..FB * 3/D/B: E0 10 38 FC..FF * * databyte[3]= 8421:8421 * ABCD:WXYZ * CLR :SET * * WX= PORT SELECT 0..3 (X=TONEBURST) * Y = VOLTAGE (0=13V, 1=18V) * Z = BAND (0=LOW, 1=HIGH(22K)) */ if (d->msg_len >= 4 && d->msg[2] == 0x38) state->dsec_cmd.args[CX24116_DISEQC_BURST] = ((d->msg[3] & 4) >> 2); if (debug) dprintk("%s burst=%d\n", __func__, state->dsec_cmd.args[CX24116_DISEQC_BURST]); } /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); /* Command */ ret = cx24116_cmd_execute(fe, &state->dsec_cmd); if (ret != 0) return ret; /* * Wait for send * * Eutelsat spec: * >15ms delay + (XXX determine if FW does this, see set_tone) * 13.5ms per byte + * >15ms delay + * 12.5ms burst + * >15ms delay (XXX determine if FW does this, see set_tone) */ msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + ((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60)); return 0; } /* Send DiSEqC burst */ static int cx24116_diseqc_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst) { struct cx24116_state *state = fe->demodulator_priv; int ret; dprintk("%s(%d) toneburst=%d\n", __func__, burst, toneburst); /* DiSEqC burst */ if (burst == SEC_MINI_A) state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_A; else if (burst == SEC_MINI_B) state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_B; else return -EINVAL; /* DiSEqC toneburst */ if (toneburst != CX24116_DISEQC_MESGCACHE) /* Burst is cached */ return 0; /* Burst is to be sent with cached message */ /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); /* Command */ ret = cx24116_cmd_execute(fe, &state->dsec_cmd); if (ret != 0) return ret; /* * Wait for send * * Eutelsat spec: * >15ms delay + (XXX determine if FW does this, see set_tone) * 13.5ms per byte + * >15ms delay + * 12.5ms burst + * >15ms delay (XXX determine if FW does this, see set_tone) */ msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + 60); return 0; } static void cx24116_release(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); kfree(state); } static struct dvb_frontend_ops cx24116_ops; struct dvb_frontend *cx24116_attach(const struct cx24116_config *config, struct i2c_adapter *i2c) { struct cx24116_state *state = NULL; int ret; dprintk("%s\n", __func__); /* allocate memory for the internal state */ state = kzalloc(sizeof(struct cx24116_state), GFP_KERNEL); if (state == NULL) goto error1; state->config = config; state->i2c = i2c; /* check if the demod is present */ ret = (cx24116_readreg(state, 0xFF) << 8) | cx24116_readreg(state, 0xFE); if (ret != 0x0501) { printk(KERN_INFO "Invalid probe, probably not a CX24116 device\n"); goto error2; } /* create dvb_frontend */ memcpy(&state->frontend.ops, &cx24116_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error2: kfree(state); error1: return NULL; } EXPORT_SYMBOL(cx24116_attach); /* * Initialise or wake up device * * Power config will reset and load initial firmware if required */ static int cx24116_initfe(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; dprintk("%s()\n", __func__); /* Power on */ cx24116_writereg(state, 0xe0, 0); cx24116_writereg(state, 0xe1, 0); cx24116_writereg(state, 0xea, 0); /* Firmware CMD 36: Power config */ cmd.args[0x00] = CMD_TUNERSLEEP; cmd.args[0x01] = 0; cmd.len = 0x02; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; ret = cx24116_diseqc_init(fe); if (ret != 0) return ret; /* HVR-4000 needs this */ return cx24116_set_voltage(fe, SEC_VOLTAGE_13); } /* * Put device to sleep */ static int cx24116_sleep(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; dprintk("%s()\n", __func__); /* Firmware CMD 36: Power config */ cmd.args[0x00] = CMD_TUNERSLEEP; cmd.args[0x01] = 1; cmd.len = 0x02; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Power off (Shutdown clocks) */ cx24116_writereg(state, 0xea, 0xff); cx24116_writereg(state, 0xe1, 1); cx24116_writereg(state, 0xe0, 1); return 0; } static int cx24116_set_property(struct dvb_frontend *fe, struct dtv_property *tvp) { dprintk("%s(..)\n", __func__); return 0; } static int cx24116_get_property(struct dvb_frontend *fe, struct dtv_property *tvp) { dprintk("%s(..)\n", __func__); return 0; } /* dvb-core told us to tune, the tv property cache will be complete, * it's safe for is to pull values and use them for tuning purposes. */ static int cx24116_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct cx24116_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct cx24116_cmd cmd; fe_status_t tunerstat; int i, status, ret, retune = 1; dprintk("%s()\n", __func__); switch (c->delivery_system) { case SYS_DVBS: dprintk("%s: DVB-S delivery system selected\n", __func__); /* Only QPSK is supported for DVB-S */ if (c->modulation != QPSK) { dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } /* Pilot doesn't exist in DVB-S, turn bit off */ state->dnxt.pilot_val = CX24116_PILOT_OFF; /* DVB-S only supports 0.35 */ if (c->rolloff != ROLLOFF_35) { dprintk("%s: unsupported rolloff selected (%d)\n", __func__, c->rolloff); return -EOPNOTSUPP; } state->dnxt.rolloff_val = CX24116_ROLLOFF_035; break; case SYS_DVBS2: dprintk("%s: DVB-S2 delivery system selected\n", __func__); /* * NBC 8PSK/QPSK with DVB-S is supported for DVB-S2, * but not hardware auto detection */ if (c->modulation != PSK_8 && c->modulation != QPSK) { dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } switch (c->pilot) { case PILOT_AUTO: /* Not supported but emulated */ state->dnxt.pilot_val = (c->modulation == QPSK) ? CX24116_PILOT_OFF : CX24116_PILOT_ON; retune++; break; case PILOT_OFF: state->dnxt.pilot_val = CX24116_PILOT_OFF; break; case PILOT_ON: state->dnxt.pilot_val = CX24116_PILOT_ON; break; default: dprintk("%s: unsupported pilot mode selected (%d)\n", __func__, c->pilot); return -EOPNOTSUPP; } switch (c->rolloff) { case ROLLOFF_20: state->dnxt.rolloff_val = CX24116_ROLLOFF_020; break; case ROLLOFF_25: state->dnxt.rolloff_val = CX24116_ROLLOFF_025; break; case ROLLOFF_35: state->dnxt.rolloff_val = CX24116_ROLLOFF_035; break; case ROLLOFF_AUTO: /* Rolloff must be explicit */ default: dprintk("%s: unsupported rolloff selected (%d)\n", __func__, c->rolloff); return -EOPNOTSUPP; } break; default: dprintk("%s: unsupported delivery system selected (%d)\n", __func__, c->delivery_system); return -EOPNOTSUPP; } state->dnxt.delsys = c->delivery_system; state->dnxt.modulation = c->modulation; state->dnxt.frequency = c->frequency; state->dnxt.pilot = c->pilot; state->dnxt.rolloff = c->rolloff; ret = cx24116_set_inversion(state, c->inversion); if (ret != 0) return ret; /* FEC_NONE/AUTO for DVB-S2 is not supported and detected here */ ret = cx24116_set_fec(state, c->delivery_system, c->modulation, c->fec_inner); if (ret != 0) return ret; ret = cx24116_set_symbolrate(state, c->symbol_rate); if (ret != 0) return ret; /* discard the 'current' tuning parameters and prepare to tune */ cx24116_clone_params(fe); dprintk("%s: delsys = %d\n", __func__, state->dcur.delsys); dprintk("%s: modulation = %d\n", __func__, state->dcur.modulation); dprintk("%s: frequency = %d\n", __func__, state->dcur.frequency); dprintk("%s: pilot = %d (val = 0x%02x)\n", __func__, state->dcur.pilot, state->dcur.pilot_val); dprintk("%s: retune = %d\n", __func__, retune); dprintk("%s: rolloff = %d (val = 0x%02x)\n", __func__, state->dcur.rolloff, state->dcur.rolloff_val); dprintk("%s: symbol_rate = %d\n", __func__, state->dcur.symbol_rate); dprintk("%s: FEC = %d (mask/val = 0x%02x/0x%02x)\n", __func__, state->dcur.fec, state->dcur.fec_mask, state->dcur.fec_val); dprintk("%s: Inversion = %d (val = 0x%02x)\n", __func__, state->dcur.inversion, state->dcur.inversion_val); /* This is also done in advise/acquire on HVR4000 but not on LITE */ if (state->config->set_ts_params) state->config->set_ts_params(fe, 0); /* Set/Reset B/W */ cmd.args[0x00] = CMD_BANDWIDTH; cmd.args[0x01] = 0x01; cmd.len = 0x02; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Prepare a tune request */ cmd.args[0x00] = CMD_TUNEREQUEST; /* Frequency */ cmd.args[0x01] = (state->dcur.frequency & 0xff0000) >> 16; cmd.args[0x02] = (state->dcur.frequency & 0x00ff00) >> 8; cmd.args[0x03] = (state->dcur.frequency & 0x0000ff); /* Symbol Rate */ cmd.args[0x04] = ((state->dcur.symbol_rate / 1000) & 0xff00) >> 8; cmd.args[0x05] = ((state->dcur.symbol_rate / 1000) & 0x00ff); /* Automatic Inversion */ cmd.args[0x06] = state->dcur.inversion_val; /* Modulation / FEC / Pilot */ cmd.args[0x07] = state->dcur.fec_val | state->dcur.pilot_val; cmd.args[0x08] = CX24116_SEARCH_RANGE_KHZ >> 8; cmd.args[0x09] = CX24116_SEARCH_RANGE_KHZ & 0xff; cmd.args[0x0a] = 0x00; cmd.args[0x0b] = 0x00; cmd.args[0x0c] = state->dcur.rolloff_val; cmd.args[0x0d] = state->dcur.fec_mask; if (state->dcur.symbol_rate > 30000000) { cmd.args[0x0e] = 0x04; cmd.args[0x0f] = 0x00; cmd.args[0x10] = 0x01; cmd.args[0x11] = 0x77; cmd.args[0x12] = 0x36; cx24116_writereg(state, CX24116_REG_CLKDIV, 0x44); cx24116_writereg(state, CX24116_REG_RATEDIV, 0x01); } else { cmd.args[0x0e] = 0x06; cmd.args[0x0f] = 0x00; cmd.args[0x10] = 0x00; cmd.args[0x11] = 0xFA; cmd.args[0x12] = 0x24; cx24116_writereg(state, CX24116_REG_CLKDIV, 0x46); cx24116_writereg(state, CX24116_REG_RATEDIV, 0x00); } cmd.len = 0x13; /* We need to support pilot and non-pilot tuning in the * driver automatically. This is a workaround for because * the demod does not support autodetect. */ do { /* Reset status register */ status = cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_SIGNAL_MASK; cx24116_writereg(state, CX24116_REG_SSTATUS, status); /* Tune */ ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) break; /* * Wait for up to 500 ms before retrying * * If we are able to tune then generally it occurs within 100ms. * If it takes longer, try a different toneburst setting. */ for (i = 0; i < 50 ; i++) { cx24116_read_status(fe, &tunerstat); status = tunerstat & (FE_HAS_SIGNAL | FE_HAS_SYNC); if (status == (FE_HAS_SIGNAL | FE_HAS_SYNC)) { dprintk("%s: Tuned\n", __func__); goto tuned; } msleep(10); } dprintk("%s: Not tuned\n", __func__); /* Toggle pilot bit when in auto-pilot */ if (state->dcur.pilot == PILOT_AUTO) cmd.args[0x07] ^= CX24116_PILOT_ON; } while (--retune); tuned: /* Set/Reset B/W */ cmd.args[0x00] = CMD_BANDWIDTH; cmd.args[0x01] = 0x00; cmd.len = 0x02; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; return ret; } static int cx24116_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *params, unsigned int mode_flags, unsigned int *delay, fe_status_t *status) { *delay = HZ / 5; if (params) { int ret = cx24116_set_frontend(fe, params); if (ret) return ret; } return cx24116_read_status(fe, status); } static int cx24116_get_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } static struct dvb_frontend_ops cx24116_ops = { .info = { .name = "Conexant CX24116/CX24118", .type = FE_QPSK, .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 1011, /* kHz for QPSK frontends */ .frequency_tolerance = 5000, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_2G_MODULATION | FE_CAN_QPSK | FE_CAN_RECOVER }, .release = cx24116_release, .init = cx24116_initfe, .sleep = cx24116_sleep, .read_status = cx24116_read_status, .read_ber = cx24116_read_ber, .read_signal_strength = cx24116_read_signal_strength, .read_snr = cx24116_read_snr, .read_ucblocks = cx24116_read_ucblocks, .set_tone = cx24116_set_tone, .set_voltage = cx24116_set_voltage, .diseqc_send_master_cmd = cx24116_send_diseqc_msg, .diseqc_send_burst = cx24116_diseqc_send_burst, .get_frontend_algo = cx24116_get_algo, .tune = cx24116_tune, .set_property = cx24116_set_property, .get_property = cx24116_get_property, .set_frontend = cx24116_set_frontend, }; MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24116/cx24118 hardware"); MODULE_AUTHOR("Steven Toth"); MODULE_LICENSE("GPL");
gpl-2.0
e9wifi-dev/android_kernel_lge_e9wifi-test
security/keys/keyring.c
4180
30924
/* Keyring handling * * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/err.h> #include <keys/keyring-type.h> #include <linux/uaccess.h> #include "internal.h" #define rcu_dereference_locked_keyring(keyring) \ (rcu_dereference_protected( \ (keyring)->payload.subscriptions, \ rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem))) #define KEY_LINK_FIXQUOTA 1UL /* * When plumbing the depths of the key tree, this sets a hard limit * set on how deep we're willing to go. */ #define KEYRING_SEARCH_MAX_DEPTH 6 /* * We keep all named keyrings in a hash to speed looking them up. */ #define KEYRING_NAME_HASH_SIZE (1 << 5) static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE]; static DEFINE_RWLOCK(keyring_name_lock); static inline unsigned keyring_hash(const char *desc) { unsigned bucket = 0; for (; *desc; desc++) bucket += (unsigned char)*desc; return bucket & (KEYRING_NAME_HASH_SIZE - 1); } /* * The keyring key type definition. Keyrings are simply keys of this type and * can be treated as ordinary keys in addition to having their own special * operations. */ static int keyring_instantiate(struct key *keyring, const void *data, size_t datalen); static int keyring_match(const struct key *keyring, const void *criterion); static void keyring_revoke(struct key *keyring); static void keyring_destroy(struct key *keyring); static void keyring_describe(const struct key *keyring, struct seq_file *m); static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen); struct key_type key_type_keyring = { .name = "keyring", .def_datalen = sizeof(struct keyring_list), .instantiate = keyring_instantiate, .match = keyring_match, .revoke = keyring_revoke, .destroy = keyring_destroy, .describe = keyring_describe, .read = keyring_read, }; EXPORT_SYMBOL(key_type_keyring); /* * Semaphore to serialise link/link calls to prevent two link calls in parallel * introducing a cycle. */ static DECLARE_RWSEM(keyring_serialise_link_sem); /* * Publish the name of a keyring so that it can be found by name (if it has * one). */ static void keyring_publish_name(struct key *keyring) { int bucket; if (keyring->description) { bucket = keyring_hash(keyring->description); write_lock(&keyring_name_lock); if (!keyring_name_hash[bucket].next) INIT_LIST_HEAD(&keyring_name_hash[bucket]); list_add_tail(&keyring->type_data.link, &keyring_name_hash[bucket]); write_unlock(&keyring_name_lock); } } /* * Initialise a keyring. * * Returns 0 on success, -EINVAL if given any data. */ static int keyring_instantiate(struct key *keyring, const void *data, size_t datalen) { int ret; ret = -EINVAL; if (datalen == 0) { /* make the keyring available by name if it has one */ keyring_publish_name(keyring); ret = 0; } return ret; } /* * Match keyrings on their name */ static int keyring_match(const struct key *keyring, const void *description) { return keyring->description && strcmp(keyring->description, description) == 0; } /* * Clean up a keyring when it is destroyed. Unpublish its name if it had one * and dispose of its data. */ static void keyring_destroy(struct key *keyring) { struct keyring_list *klist; int loop; if (keyring->description) { write_lock(&keyring_name_lock); if (keyring->type_data.link.next != NULL && !list_empty(&keyring->type_data.link)) list_del(&keyring->type_data.link); write_unlock(&keyring_name_lock); } klist = rcu_dereference_check(keyring->payload.subscriptions, atomic_read(&keyring->usage) == 0); if (klist) { for (loop = klist->nkeys - 1; loop >= 0; loop--) key_put(klist->keys[loop]); kfree(klist); } } /* * Describe a keyring for /proc. */ static void keyring_describe(const struct key *keyring, struct seq_file *m) { struct keyring_list *klist; if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_instantiated(keyring)) { rcu_read_lock(); klist = rcu_dereference(keyring->payload.subscriptions); if (klist) seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys); else seq_puts(m, ": empty"); rcu_read_unlock(); } } /* * Read a list of key IDs from the keyring's contents in binary form * * The keyring's semaphore is read-locked by the caller. */ static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen) { struct keyring_list *klist; struct key *key; size_t qty, tmp; int loop, ret; ret = 0; klist = rcu_dereference_locked_keyring(keyring); if (klist) { /* calculate how much data we could return */ qty = klist->nkeys * sizeof(key_serial_t); if (buffer && buflen > 0) { if (buflen > qty) buflen = qty; /* copy the IDs of the subscribed keys into the * buffer */ ret = -EFAULT; for (loop = 0; loop < klist->nkeys; loop++) { key = klist->keys[loop]; tmp = sizeof(key_serial_t); if (tmp > buflen) tmp = buflen; if (copy_to_user(buffer, &key->serial, tmp) != 0) goto error; buflen -= tmp; if (buflen == 0) break; buffer += tmp; } } ret = qty; } error: return ret; } /* * Allocate a keyring and link into the destination keyring. */ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, const struct cred *cred, unsigned long flags, struct key *dest) { struct key *keyring; int ret; keyring = key_alloc(&key_type_keyring, description, uid, gid, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL, flags); if (!IS_ERR(keyring)) { ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); if (ret < 0) { key_put(keyring); keyring = ERR_PTR(ret); } } return keyring; } /** * keyring_search_aux - Search a keyring tree for a key matching some criteria * @keyring_ref: A pointer to the keyring with possession indicator. * @cred: The credentials to use for permissions checks. * @type: The type of key to search for. * @description: Parameter for @match. * @match: Function to rule on whether or not a key is the one required. * @no_state_check: Don't check if a matching key is bad * * Search the supplied keyring tree for a key that matches the criteria given. * The root keyring and any linked keyrings must grant Search permission to the * caller to be searchable and keys can only be found if they too grant Search * to the caller. The possession flag on the root keyring pointer controls use * of the possessor bits in permissions checking of the entire tree. In * addition, the LSM gets to forbid keyring searches and key matches. * * The search is performed as a breadth-then-depth search up to the prescribed * limit (KEYRING_SEARCH_MAX_DEPTH). * * Keys are matched to the type provided and are then filtered by the match * function, which is given the description to use in any way it sees fit. The * match function may use any attributes of a key that it wishes to to * determine the match. Normally the match function from the key type would be * used. * * RCU is used to prevent the keyring key lists from disappearing without the * need to take lots of locks. * * Returns a pointer to the found key and increments the key usage count if * successful; -EAGAIN if no matching keys were found, or if expired or revoked * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the * specified keyring wasn't a keyring. * * In the case of a successful return, the possession attribute from * @keyring_ref is propagated to the returned key reference. */ key_ref_t keyring_search_aux(key_ref_t keyring_ref, const struct cred *cred, struct key_type *type, const void *description, key_match_func_t match, bool no_state_check) { struct { struct keyring_list *keylist; int kix; } stack[KEYRING_SEARCH_MAX_DEPTH]; struct keyring_list *keylist; struct timespec now; unsigned long possessed, kflags; struct key *keyring, *key; key_ref_t key_ref; long err; int sp, nkeys, kix; keyring = key_ref_to_ptr(keyring_ref); possessed = is_key_possessed(keyring_ref); key_check(keyring); /* top keyring must have search permission to begin the search */ err = key_task_permission(keyring_ref, cred, KEY_SEARCH); if (err < 0) { key_ref = ERR_PTR(err); goto error; } key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error; rcu_read_lock(); now = current_kernel_time(); err = -EAGAIN; sp = 0; /* firstly we should check to see if this top-level keyring is what we * are looking for */ key_ref = ERR_PTR(-EAGAIN); kflags = keyring->flags; if (keyring->type == type && match(keyring, description)) { key = keyring; if (no_state_check) goto found; /* check it isn't negative and hasn't expired or been * revoked */ if (kflags & (1 << KEY_FLAG_REVOKED)) goto error_2; if (key->expiry && now.tv_sec >= key->expiry) goto error_2; key_ref = ERR_PTR(key->type_data.reject_error); if (kflags & (1 << KEY_FLAG_NEGATIVE)) goto error_2; goto found; } /* otherwise, the top keyring must not be revoked, expired, or * negatively instantiated if we are to search it */ key_ref = ERR_PTR(-EAGAIN); if (kflags & ((1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_NEGATIVE)) || (keyring->expiry && now.tv_sec >= keyring->expiry)) goto error_2; /* start processing a new keyring */ descend: if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto not_this_keyring; keylist = rcu_dereference(keyring->payload.subscriptions); if (!keylist) goto not_this_keyring; /* iterate through the keys in this keyring first */ nkeys = keylist->nkeys; smp_rmb(); for (kix = 0; kix < nkeys; kix++) { key = keylist->keys[kix]; kflags = key->flags; /* ignore keys not of this type */ if (key->type != type) continue; /* skip revoked keys and expired keys */ if (!no_state_check) { if (kflags & (1 << KEY_FLAG_REVOKED)) continue; if (key->expiry && now.tv_sec >= key->expiry) continue; } /* keys that don't match */ if (!match(key, description)) continue; /* key must have search permissions */ if (key_task_permission(make_key_ref(key, possessed), cred, KEY_SEARCH) < 0) continue; if (no_state_check) goto found; /* we set a different error code if we pass a negative key */ if (kflags & (1 << KEY_FLAG_NEGATIVE)) { err = key->type_data.reject_error; continue; } goto found; } /* search through the keyrings nested in this one */ kix = 0; ascend: nkeys = keylist->nkeys; smp_rmb(); for (; kix < nkeys; kix++) { key = keylist->keys[kix]; if (key->type != &key_type_keyring) continue; /* recursively search nested keyrings * - only search keyrings for which we have search permission */ if (sp >= KEYRING_SEARCH_MAX_DEPTH) continue; if (key_task_permission(make_key_ref(key, possessed), cred, KEY_SEARCH) < 0) continue; /* stack the current position */ stack[sp].keylist = keylist; stack[sp].kix = kix; sp++; /* begin again with the new keyring */ keyring = key; goto descend; } /* the keyring we're looking at was disqualified or didn't contain a * matching key */ not_this_keyring: if (sp > 0) { /* resume the processing of a keyring higher up in the tree */ sp--; keylist = stack[sp].keylist; kix = stack[sp].kix + 1; goto ascend; } key_ref = ERR_PTR(err); goto error_2; /* we found a viable match */ found: atomic_inc(&key->usage); key_check(key); key_ref = make_key_ref(key, possessed); error_2: rcu_read_unlock(); error: return key_ref; } /** * keyring_search - Search the supplied keyring tree for a matching key * @keyring: The root of the keyring tree to be searched. * @type: The type of keyring we want to find. * @description: The name of the keyring we want to find. * * As keyring_search_aux() above, but using the current task's credentials and * type's default matching function. */ key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, const char *description) { if (!type->match) return ERR_PTR(-ENOKEY); return keyring_search_aux(keyring, current->cred, type, description, type->match, false); } EXPORT_SYMBOL(keyring_search); /* * Search the given keyring only (no recursion). * * The caller must guarantee that the keyring is a keyring and that the * permission is granted to search the keyring as no check is made here. * * RCU is used to make it unnecessary to lock the keyring key list here. * * Returns a pointer to the found key with usage count incremented if * successful and returns -ENOKEY if not found. Revoked keys and keys not * providing the requested permission are skipped over. * * If successful, the possession indicator is propagated from the keyring ref * to the returned key reference. */ key_ref_t __keyring_search_one(key_ref_t keyring_ref, const struct key_type *ktype, const char *description, key_perm_t perm) { struct keyring_list *klist; unsigned long possessed; struct key *keyring, *key; int nkeys, loop; keyring = key_ref_to_ptr(keyring_ref); possessed = is_key_possessed(keyring_ref); rcu_read_lock(); klist = rcu_dereference(keyring->payload.subscriptions); if (klist) { nkeys = klist->nkeys; smp_rmb(); for (loop = 0; loop < nkeys ; loop++) { key = klist->keys[loop]; if (key->type == ktype && (!key->type->match || key->type->match(key, description)) && key_permission(make_key_ref(key, possessed), perm) == 0 && !test_bit(KEY_FLAG_REVOKED, &key->flags) ) goto found; } } rcu_read_unlock(); return ERR_PTR(-ENOKEY); found: atomic_inc(&key->usage); rcu_read_unlock(); return make_key_ref(key, possessed); } /* * Find a keyring with the specified name. * * All named keyrings in the current user namespace are searched, provided they * grant Search permission directly to the caller (unless this check is * skipped). Keyrings whose usage points have reached zero or who have been * revoked are skipped. * * Returns a pointer to the keyring with the keyring's refcount having being * incremented on success. -ENOKEY is returned if a key could not be found. */ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) { struct key *keyring; int bucket; if (!name) return ERR_PTR(-EINVAL); bucket = keyring_hash(name); read_lock(&keyring_name_lock); if (keyring_name_hash[bucket].next) { /* search this hash bucket for a keyring with a matching name * that's readable and that hasn't been revoked */ list_for_each_entry(keyring, &keyring_name_hash[bucket], type_data.link ) { if (keyring->user->user_ns != current_user_ns()) continue; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) continue; if (strcmp(keyring->description, name) != 0) continue; if (!skip_perm_check && key_permission(make_key_ref(keyring, 0), KEY_SEARCH) < 0) continue; /* we've got a match but we might end up racing with * key_cleanup() if the keyring is currently 'dead' * (ie. it has a zero usage count) */ if (!atomic_inc_not_zero(&keyring->usage)) continue; goto out; } } keyring = ERR_PTR(-ENOKEY); out: read_unlock(&keyring_name_lock); return keyring; } /* * See if a cycle will will be created by inserting acyclic tree B in acyclic * tree A at the topmost level (ie: as a direct child of A). * * Since we are adding B to A at the top level, checking for cycles should just * be a matter of seeing if node A is somewhere in tree B. */ static int keyring_detect_cycle(struct key *A, struct key *B) { struct { struct keyring_list *keylist; int kix; } stack[KEYRING_SEARCH_MAX_DEPTH]; struct keyring_list *keylist; struct key *subtree, *key; int sp, nkeys, kix, ret; rcu_read_lock(); ret = -EDEADLK; if (A == B) goto cycle_detected; subtree = B; sp = 0; /* start processing a new keyring */ descend: if (test_bit(KEY_FLAG_REVOKED, &subtree->flags)) goto not_this_keyring; keylist = rcu_dereference(subtree->payload.subscriptions); if (!keylist) goto not_this_keyring; kix = 0; ascend: /* iterate through the remaining keys in this keyring */ nkeys = keylist->nkeys; smp_rmb(); for (; kix < nkeys; kix++) { key = keylist->keys[kix]; if (key == A) goto cycle_detected; /* recursively check nested keyrings */ if (key->type == &key_type_keyring) { if (sp >= KEYRING_SEARCH_MAX_DEPTH) goto too_deep; /* stack the current position */ stack[sp].keylist = keylist; stack[sp].kix = kix; sp++; /* begin again with the new keyring */ subtree = key; goto descend; } } /* the keyring we're looking at was disqualified or didn't contain a * matching key */ not_this_keyring: if (sp > 0) { /* resume the checking of a keyring higher up in the tree */ sp--; keylist = stack[sp].keylist; kix = stack[sp].kix + 1; goto ascend; } ret = 0; /* no cycles detected */ error: rcu_read_unlock(); return ret; too_deep: ret = -ELOOP; goto error; cycle_detected: ret = -EDEADLK; goto error; } /* * Dispose of a keyring list after the RCU grace period, freeing the unlinked * key */ static void keyring_unlink_rcu_disposal(struct rcu_head *rcu) { struct keyring_list *klist = container_of(rcu, struct keyring_list, rcu); if (klist->delkey != USHRT_MAX) key_put(klist->keys[klist->delkey]); kfree(klist); } /* * Preallocate memory so that a key can be linked into to a keyring. */ int __key_link_begin(struct key *keyring, const struct key_type *type, const char *description, unsigned long *_prealloc) __acquires(&keyring->sem) { struct keyring_list *klist, *nklist; unsigned long prealloc; unsigned max; size_t size; int loop, ret; kenter("%d,%s,%s,", key_serial(keyring), type->name, description); if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto error_krsem; /* serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders */ if (type == &key_type_keyring) down_write(&keyring_serialise_link_sem); klist = rcu_dereference_locked_keyring(keyring); /* see if there's a matching key we can displace */ if (klist && klist->nkeys > 0) { for (loop = klist->nkeys - 1; loop >= 0; loop--) { if (klist->keys[loop]->type == type && strcmp(klist->keys[loop]->description, description) == 0 ) { /* found a match - we'll replace this one with * the new key */ size = sizeof(struct key *) * klist->maxkeys; size += sizeof(*klist); BUG_ON(size > PAGE_SIZE); ret = -ENOMEM; nklist = kmemdup(klist, size, GFP_KERNEL); if (!nklist) goto error_sem; /* note replacement slot */ klist->delkey = nklist->delkey = loop; prealloc = (unsigned long)nklist; goto done; } } } /* check that we aren't going to overrun the user's quota */ ret = key_payload_reserve(keyring, keyring->datalen + KEYQUOTA_LINK_BYTES); if (ret < 0) goto error_sem; if (klist && klist->nkeys < klist->maxkeys) { /* there's sufficient slack space to append directly */ nklist = NULL; prealloc = KEY_LINK_FIXQUOTA; } else { /* grow the key list */ max = 4; if (klist) max += klist->maxkeys; ret = -ENFILE; if (max > USHRT_MAX - 1) goto error_quota; size = sizeof(*klist) + sizeof(struct key *) * max; if (size > PAGE_SIZE) goto error_quota; ret = -ENOMEM; nklist = kmalloc(size, GFP_KERNEL); if (!nklist) goto error_quota; nklist->maxkeys = max; if (klist) { memcpy(nklist->keys, klist->keys, sizeof(struct key *) * klist->nkeys); nklist->delkey = klist->nkeys; nklist->nkeys = klist->nkeys + 1; klist->delkey = USHRT_MAX; } else { nklist->nkeys = 1; nklist->delkey = 0; } /* add the key into the new space */ nklist->keys[nklist->delkey] = NULL; } prealloc = (unsigned long)nklist | KEY_LINK_FIXQUOTA; done: *_prealloc = prealloc; kleave(" = 0"); return 0; error_quota: /* undo the quota changes */ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); error_sem: if (type == &key_type_keyring) up_write(&keyring_serialise_link_sem); error_krsem: up_write(&keyring->sem); kleave(" = %d", ret); return ret; } /* * Check already instantiated keys aren't going to be a problem. * * The caller must have called __key_link_begin(). Don't need to call this for * keys that were created since __key_link_begin() was called. */ int __key_link_check_live_key(struct key *keyring, struct key *key) { if (key->type == &key_type_keyring) /* check that we aren't going to create a cycle by linking one * keyring to another */ return keyring_detect_cycle(keyring, key); return 0; } /* * Link a key into to a keyring. * * Must be called with __key_link_begin() having being called. Discards any * already extant link to matching key if there is one, so that each keyring * holds at most one link to any given key of a particular type+description * combination. */ void __key_link(struct key *keyring, struct key *key, unsigned long *_prealloc) { struct keyring_list *klist, *nklist; nklist = (struct keyring_list *)(*_prealloc & ~KEY_LINK_FIXQUOTA); *_prealloc = 0; kenter("%d,%d,%p", keyring->serial, key->serial, nklist); klist = rcu_dereference_locked_keyring(keyring); atomic_inc(&key->usage); /* there's a matching key we can displace or an empty slot in a newly * allocated list we can fill */ if (nklist) { kdebug("replace %hu/%hu/%hu", nklist->delkey, nklist->nkeys, nklist->maxkeys); nklist->keys[nklist->delkey] = key; rcu_assign_pointer(keyring->payload.subscriptions, nklist); /* dispose of the old keyring list and, if there was one, the * displaced key */ if (klist) { kdebug("dispose %hu/%hu/%hu", klist->delkey, klist->nkeys, klist->maxkeys); call_rcu(&klist->rcu, keyring_unlink_rcu_disposal); } } else { /* there's sufficient slack space to append directly */ klist->keys[klist->nkeys] = key; smp_wmb(); klist->nkeys++; } } /* * Finish linking a key into to a keyring. * * Must be called with __key_link_begin() having being called. */ void __key_link_end(struct key *keyring, struct key_type *type, unsigned long prealloc) __releases(&keyring->sem) { BUG_ON(type == NULL); BUG_ON(type->name == NULL); kenter("%d,%s,%lx", keyring->serial, type->name, prealloc); if (type == &key_type_keyring) up_write(&keyring_serialise_link_sem); if (prealloc) { if (prealloc & KEY_LINK_FIXQUOTA) key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); kfree((struct keyring_list *)(prealloc & ~KEY_LINK_FIXQUOTA)); } up_write(&keyring->sem); } /** * key_link - Link a key to a keyring * @keyring: The keyring to make the link in. * @key: The key to link to. * * Make a link in a keyring to a key, such that the keyring holds a reference * on that key and the key can potentially be found by searching that keyring. * * This function will write-lock the keyring's semaphore and will consume some * of the user's key data quota to hold the link. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is * full, -EDQUOT if there is insufficient key data quota remaining to add * another link or -ENOMEM if there's insufficient memory. * * It is assumed that the caller has checked that it is permitted for a link to * be made (the keyring should have Write permission and the key Link * permission). */ int key_link(struct key *keyring, struct key *key) { unsigned long prealloc; int ret; key_check(keyring); key_check(key); ret = __key_link_begin(keyring, key->type, key->description, &prealloc); if (ret == 0) { ret = __key_link_check_live_key(keyring, key); if (ret == 0) __key_link(keyring, key, &prealloc); __key_link_end(keyring, key->type, prealloc); } return ret; } EXPORT_SYMBOL(key_link); /** * key_unlink - Unlink the first link to a key from a keyring. * @keyring: The keyring to remove the link from. * @key: The key the link is to. * * Remove a link from a keyring to a key. * * This function will write-lock the keyring's semaphore. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if * the key isn't linked to by the keyring or -ENOMEM if there's insufficient * memory. * * It is assumed that the caller has checked that it is permitted for a link to * be removed (the keyring should have Write permission; no permissions are * required on the key). */ int key_unlink(struct key *keyring, struct key *key) { struct keyring_list *klist, *nklist; int loop, ret; key_check(keyring); key_check(key); ret = -ENOTDIR; if (keyring->type != &key_type_keyring) goto error; down_write(&keyring->sem); klist = rcu_dereference_locked_keyring(keyring); if (klist) { /* search the keyring for the key */ for (loop = 0; loop < klist->nkeys; loop++) if (klist->keys[loop] == key) goto key_is_present; } up_write(&keyring->sem); ret = -ENOENT; goto error; key_is_present: /* we need to copy the key list for RCU purposes */ nklist = kmalloc(sizeof(*klist) + sizeof(struct key *) * klist->maxkeys, GFP_KERNEL); if (!nklist) goto nomem; nklist->maxkeys = klist->maxkeys; nklist->nkeys = klist->nkeys - 1; if (loop > 0) memcpy(&nklist->keys[0], &klist->keys[0], loop * sizeof(struct key *)); if (loop < nklist->nkeys) memcpy(&nklist->keys[loop], &klist->keys[loop + 1], (nklist->nkeys - loop) * sizeof(struct key *)); /* adjust the user's quota */ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); rcu_assign_pointer(keyring->payload.subscriptions, nklist); up_write(&keyring->sem); /* schedule for later cleanup */ klist->delkey = loop; call_rcu(&klist->rcu, keyring_unlink_rcu_disposal); ret = 0; error: return ret; nomem: ret = -ENOMEM; up_write(&keyring->sem); goto error; } EXPORT_SYMBOL(key_unlink); /* * Dispose of a keyring list after the RCU grace period, releasing the keys it * links to. */ static void keyring_clear_rcu_disposal(struct rcu_head *rcu) { struct keyring_list *klist; int loop; klist = container_of(rcu, struct keyring_list, rcu); for (loop = klist->nkeys - 1; loop >= 0; loop--) key_put(klist->keys[loop]); kfree(klist); } /** * keyring_clear - Clear a keyring * @keyring: The keyring to clear. * * Clear the contents of the specified keyring. * * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring. */ int keyring_clear(struct key *keyring) { struct keyring_list *klist; int ret; ret = -ENOTDIR; if (keyring->type == &key_type_keyring) { /* detach the pointer block with the locks held */ down_write(&keyring->sem); klist = rcu_dereference_locked_keyring(keyring); if (klist) { /* adjust the quota */ key_payload_reserve(keyring, sizeof(struct keyring_list)); rcu_assign_pointer(keyring->payload.subscriptions, NULL); } up_write(&keyring->sem); /* free the keys after the locks have been dropped */ if (klist) call_rcu(&klist->rcu, keyring_clear_rcu_disposal); ret = 0; } return ret; } EXPORT_SYMBOL(keyring_clear); /* * Dispose of the links from a revoked keyring. * * This is called with the key sem write-locked. */ static void keyring_revoke(struct key *keyring) { struct keyring_list *klist; klist = rcu_dereference_locked_keyring(keyring); /* adjust the quota */ key_payload_reserve(keyring, 0); if (klist) { rcu_assign_pointer(keyring->payload.subscriptions, NULL); call_rcu(&klist->rcu, keyring_clear_rcu_disposal); } } /* * Determine whether a key is dead. */ static bool key_is_dead(struct key *key, time_t limit) { return test_bit(KEY_FLAG_DEAD, &key->flags) || (key->expiry > 0 && key->expiry <= limit); } /* * Collect garbage from the contents of a keyring, replacing the old list with * a new one with the pointers all shuffled down. * * Dead keys are classed as oned that are flagged as being dead or are revoked, * expired or negative keys that were revoked or expired before the specified * limit. */ void keyring_gc(struct key *keyring, time_t limit) { struct keyring_list *klist, *new; struct key *key; int loop, keep, max; kenter("{%x,%s}", key_serial(keyring), keyring->description); down_write(&keyring->sem); klist = rcu_dereference_locked_keyring(keyring); if (!klist) goto no_klist; /* work out how many subscriptions we're keeping */ keep = 0; for (loop = klist->nkeys - 1; loop >= 0; loop--) if (!key_is_dead(klist->keys[loop], limit)) keep++; if (keep == klist->nkeys) goto just_return; /* allocate a new keyring payload */ max = roundup(keep, 4); new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *), GFP_KERNEL); if (!new) goto nomem; new->maxkeys = max; new->nkeys = 0; new->delkey = 0; /* install the live keys * - must take care as expired keys may be updated back to life */ keep = 0; for (loop = klist->nkeys - 1; loop >= 0; loop--) { key = klist->keys[loop]; if (!key_is_dead(key, limit)) { if (keep >= max) goto discard_new; new->keys[keep++] = key_get(key); } } new->nkeys = keep; /* adjust the quota */ key_payload_reserve(keyring, sizeof(struct keyring_list) + KEYQUOTA_LINK_BYTES * keep); if (keep == 0) { rcu_assign_pointer(keyring->payload.subscriptions, NULL); kfree(new); } else { rcu_assign_pointer(keyring->payload.subscriptions, new); } up_write(&keyring->sem); call_rcu(&klist->rcu, keyring_clear_rcu_disposal); kleave(" [yes]"); return; discard_new: new->nkeys = keep; keyring_clear_rcu_disposal(&new->rcu); up_write(&keyring->sem); kleave(" [discard]"); return; just_return: up_write(&keyring->sem); kleave(" [no dead]"); return; no_klist: up_write(&keyring->sem); kleave(" [no_klist]"); return; nomem: up_write(&keyring->sem); kleave(" [oom]"); }
gpl-2.0
bkcokota/android_kernel_k2_ul
arch/tile/kernel/pci-dma.c
4692
7107
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <asm/tlbflush.h> #include <asm/homecache.h> /* Generic DMA mapping functions: */ /* * Allocate what Linux calls "coherent" memory, which for us just * means uncached. */ void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); int node = dev_to_node(dev); int order = get_order(size); struct page *pg; dma_addr_t addr; gfp |= __GFP_ZERO; /* * By forcing NUMA node 0 for 32-bit masks we ensure that the * high 32 bits of the resulting PA will be zero. If the mask * size is, e.g., 24, we may still not be able to guarantee a * suitable memory address, in which case we will return NULL. * But such devices are uncommon. */ if (dma_mask <= DMA_BIT_MASK(32)) node = 0; pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED); if (pg == NULL) return NULL; addr = page_to_phys(pg); if (addr + size > dma_mask) { homecache_free_pages(addr, order); return NULL; } *dma_handle = addr; return page_address(pg); } EXPORT_SYMBOL(dma_alloc_coherent); /* * Free memory that was allocated with dma_alloc_coherent. */ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { homecache_free_pages((unsigned long)vaddr, get_order(size)); } EXPORT_SYMBOL(dma_free_coherent); /* * The map routines "map" the specified address range for DMA * accesses. The memory belongs to the device after this call is * issued, until it is unmapped with dma_unmap_single. * * We don't need to do any mapping, we just flush the address range * out of the cache and return a DMA address. * * The unmap routines do whatever is necessary before the processor * accesses the memory again, and must be called before the driver * touches the memory. We can get away with a cache invalidate if we * can count on nothing having been touched. */ /* Flush a PA range from cache page by page. */ static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size) { struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1)); while ((ssize_t)size > 0) { /* Flush the page. */ homecache_flush_cache(page++, 0); /* Figure out if we need to continue on the next page. */ size -= bytesleft; bytesleft = PAGE_SIZE; } } /* * dma_map_single can be passed any memory address, and there appear * to be no alignment constraints. * * There is a chance that the start of the buffer will share a cache * line with some other data that has been touched in the meantime. */ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { dma_addr_t dma_addr = __pa(ptr); BUG_ON(!valid_dma_direction(direction)); WARN_ON(size == 0); __dma_map_pa_range(dma_addr, size); return dma_addr; } EXPORT_SYMBOL(dma_map_single); void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { BUG_ON(!valid_dma_direction(direction)); } EXPORT_SYMBOL(dma_unmap_single); int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); WARN_ON(nents == 0 || sglist->length == 0); for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); __dma_map_pa_range(sg->dma_address, sg->length); } return nents; } EXPORT_SYMBOL(dma_map_sg); void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction) { BUG_ON(!valid_dma_direction(direction)); } EXPORT_SYMBOL(dma_unmap_sg); dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(!valid_dma_direction(direction)); BUG_ON(offset + size > PAGE_SIZE); homecache_flush_cache(page, 0); return page_to_pa(page) + offset; } EXPORT_SYMBOL(dma_map_page); void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { BUG_ON(!valid_dma_direction(direction)); } EXPORT_SYMBOL(dma_unmap_page); void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(!valid_dma_direction(direction)); } EXPORT_SYMBOL(dma_sync_single_for_cpu); void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { unsigned long start = PFN_DOWN(dma_handle); unsigned long end = PFN_DOWN(dma_handle + size - 1); unsigned long i; BUG_ON(!valid_dma_direction(direction)); for (i = start; i <= end; ++i) homecache_flush_cache(pfn_to_page(i), 0); } EXPORT_SYMBOL(dma_sync_single_for_device); void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { BUG_ON(!valid_dma_direction(direction)); WARN_ON(nelems == 0 || sg[0].length == 0); } EXPORT_SYMBOL(dma_sync_sg_for_cpu); /* * Flush and invalidate cache for scatterlist. */ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); WARN_ON(nelems == 0 || sglist->length == 0); for_each_sg(sglist, sg, nelems, i) { dma_sync_single_for_device(dev, sg->dma_address, sg_dma_len(sg), direction); } } EXPORT_SYMBOL(dma_sync_sg_for_device); void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); } EXPORT_SYMBOL(dma_sync_single_range_for_cpu); void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { dma_sync_single_for_device(dev, dma_handle + offset, size, direction); } EXPORT_SYMBOL(dma_sync_single_range_for_device); /* * dma_alloc_noncoherent() returns non-cacheable memory, so there's no * need to do any flushing here. */ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { } EXPORT_SYMBOL(dma_cache_sync);
gpl-2.0