repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
archfan/xu4-linux | net/netfilter/ipvs/ip_vs_conn.c | 233 | 36081 | /*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the Netfilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Peter Kese <peter.kese@ijs.si>
* Julian Anastasov <ja@ssi.bg>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
* with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
* and others. Many code here is taken from IP MASQ code of kernel 2.2.
*
* Changes:
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/net.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h> /* for proc_net_* */
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <net/net_namespace.h>
#include <net/ip_vs.h>
#ifndef CONFIG_IP_VS_TAB_BITS
#define CONFIG_IP_VS_TAB_BITS 12
#endif
/*
* Connection hash size. Default is what was selected at compile time.
*/
static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
/* size and mask values */
int ip_vs_conn_tab_size __read_mostly;
static int ip_vs_conn_tab_mask __read_mostly;
/*
* Connection hash table: for input and output packets lookups of IPVS
*/
static struct hlist_head *ip_vs_conn_tab __read_mostly;
/* SLAB cache for IPVS connections */
static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
/* counter for no client port connections */
static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
/* random value for IPVS connection hash */
static unsigned int ip_vs_conn_rnd __read_mostly;
/*
* Fine locking granularity for big connection hash table
*/
#define CT_LOCKARRAY_BITS 5
#define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
#define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
/* We need an addrstrlen that works with or without v6 */
#ifdef CONFIG_IP_VS_IPV6
#define IP_VS_ADDRSTRLEN INET6_ADDRSTRLEN
#else
#define IP_VS_ADDRSTRLEN (8+1)
#endif
struct ip_vs_aligned_lock
{
spinlock_t l;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
/* lock array for conn table */
static struct ip_vs_aligned_lock
__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
static inline void ct_write_lock_bh(unsigned int key)
{
spin_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_write_unlock_bh(unsigned int key)
{
spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
/*
* Returns hash value for IPVS connection entry
*/
static unsigned int ip_vs_conn_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto,
const union nf_inet_addr *addr,
__be16 port)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
(__force u32)port, proto, ip_vs_conn_rnd) ^
((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
#endif
return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
ip_vs_conn_rnd) ^
((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
}
static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
bool inverse)
{
const union nf_inet_addr *addr;
__be16 port;
if (p->pe_data && p->pe->hashkey_raw)
return p->pe->hashkey_raw(p, ip_vs_conn_rnd, inverse) &
ip_vs_conn_tab_mask;
if (likely(!inverse)) {
addr = p->caddr;
port = p->cport;
} else {
addr = p->vaddr;
port = p->vport;
}
return ip_vs_conn_hashkey(p->ipvs, p->af, p->protocol, addr, port);
}
static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
{
struct ip_vs_conn_param p;
ip_vs_conn_fill_param(cp->ipvs, cp->af, cp->protocol,
&cp->caddr, cp->cport, NULL, 0, &p);
if (cp->pe) {
p.pe = cp->pe;
p.pe_data = cp->pe_data;
p.pe_data_len = cp->pe_data_len;
}
return ip_vs_conn_hashkey_param(&p, false);
}
/*
* Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
* returns bool success.
*/
static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
{
unsigned int hash;
int ret;
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
return 0;
/* Hash by protocol, client address and port */
hash = ip_vs_conn_hashkey_conn(cp);
ct_write_lock_bh(hash);
spin_lock(&cp->lock);
if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
cp->flags |= IP_VS_CONN_F_HASHED;
atomic_inc(&cp->refcnt);
hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]);
ret = 1;
} else {
pr_err("%s(): request for already hashed, called from %pF\n",
__func__, __builtin_return_address(0));
ret = 0;
}
spin_unlock(&cp->lock);
ct_write_unlock_bh(hash);
return ret;
}
/*
* UNhashes ip_vs_conn from ip_vs_conn_tab.
* returns bool success. Caller should hold conn reference.
*/
static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
{
unsigned int hash;
int ret;
/* unhash it and decrease its reference counter */
hash = ip_vs_conn_hashkey_conn(cp);
ct_write_lock_bh(hash);
spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
hlist_del_rcu(&cp->c_list);
cp->flags &= ~IP_VS_CONN_F_HASHED;
atomic_dec(&cp->refcnt);
ret = 1;
} else
ret = 0;
spin_unlock(&cp->lock);
ct_write_unlock_bh(hash);
return ret;
}
/* Try to unlink ip_vs_conn from ip_vs_conn_tab.
* returns bool success.
*/
static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
{
unsigned int hash;
bool ret;
hash = ip_vs_conn_hashkey_conn(cp);
ct_write_lock_bh(hash);
spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
ret = false;
/* Decrease refcnt and unlink conn only if we are last user */
if (atomic_cmpxchg(&cp->refcnt, 1, 0) == 1) {
hlist_del_rcu(&cp->c_list);
cp->flags &= ~IP_VS_CONN_F_HASHED;
ret = true;
}
} else
ret = atomic_read(&cp->refcnt) ? false : true;
spin_unlock(&cp->lock);
ct_write_unlock_bh(hash);
return ret;
}
/*
* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
* Called for pkts coming from OUTside-to-INside.
* p->caddr, p->cport: pkt source address (foreign host)
* p->vaddr, p->vport: pkt dest address (load balancer)
*/
static inline struct ip_vs_conn *
__ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
unsigned int hash;
struct ip_vs_conn *cp;
hash = ip_vs_conn_hashkey_param(p, false);
rcu_read_lock();
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (p->cport == cp->cport && p->vport == cp->vport &&
cp->af == p->af &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
p->protocol == cp->protocol &&
cp->ipvs == p->ipvs) {
if (!__ip_vs_conn_get(cp))
continue;
/* HIT */
rcu_read_unlock();
return cp;
}
}
rcu_read_unlock();
return NULL;
}
struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
struct ip_vs_conn *cp;
cp = __ip_vs_conn_in_get(p);
if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) {
struct ip_vs_conn_param cport_zero_p = *p;
cport_zero_p.cport = 0;
cp = __ip_vs_conn_in_get(&cport_zero_p);
}
IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
cp ? "hit" : "not hit");
return cp;
}
static int
ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs,
int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
struct ip_vs_conn_param *p)
{
__be16 _ports[2], *pptr;
pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
if (pptr == NULL)
return 1;
if (likely(!ip_vs_iph_inverse(iph)))
ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->saddr,
pptr[0], &iph->daddr, pptr[1], p);
else
ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->daddr,
pptr[1], &iph->saddr, pptr[0], p);
return 0;
}
struct ip_vs_conn *
ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af,
const struct sk_buff *skb,
const struct ip_vs_iphdr *iph)
{
struct ip_vs_conn_param p;
if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
return NULL;
return ip_vs_conn_in_get(&p);
}
EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
/* Get reference to connection template */
struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
{
unsigned int hash;
struct ip_vs_conn *cp;
hash = ip_vs_conn_hashkey_param(p, false);
rcu_read_lock();
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (unlikely(p->pe_data && p->pe->ct_match)) {
if (cp->ipvs != p->ipvs)
continue;
if (p->pe == cp->pe && p->pe->ct_match(p, cp)) {
if (__ip_vs_conn_get(cp))
goto out;
}
continue;
}
if (cp->af == p->af &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
/* protocol should only be IPPROTO_IP if
* p->vaddr is a fwmark */
ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC :
p->af, p->vaddr, &cp->vaddr) &&
p->vport == cp->vport && p->cport == cp->cport &&
cp->flags & IP_VS_CONN_F_TEMPLATE &&
p->protocol == cp->protocol &&
cp->ipvs == p->ipvs) {
if (__ip_vs_conn_get(cp))
goto out;
}
}
cp = NULL;
out:
rcu_read_unlock();
IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
cp ? "hit" : "not hit");
return cp;
}
/* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
* Called for pkts coming from inside-to-OUTside.
* p->caddr, p->cport: pkt source address (inside host)
* p->vaddr, p->vport: pkt dest address (foreign host) */
struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
{
unsigned int hash;
struct ip_vs_conn *cp, *ret=NULL;
/*
* Check for "full" addressed entries
*/
hash = ip_vs_conn_hashkey_param(p, true);
rcu_read_lock();
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (p->vport == cp->cport && p->cport == cp->dport &&
cp->af == p->af &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
p->protocol == cp->protocol &&
cp->ipvs == p->ipvs) {
if (!__ip_vs_conn_get(cp))
continue;
/* HIT */
ret = cp;
break;
}
}
rcu_read_unlock();
IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
ret ? "hit" : "not hit");
return ret;
}
struct ip_vs_conn *
ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
const struct sk_buff *skb,
const struct ip_vs_iphdr *iph)
{
struct ip_vs_conn_param p;
if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
return NULL;
return ip_vs_conn_out_get(&p);
}
EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
/*
* Put back the conn and restart its timer with its timeout
*/
void ip_vs_conn_put(struct ip_vs_conn *cp)
{
unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
0 : cp->timeout;
mod_timer(&cp->timer, jiffies+t);
__ip_vs_conn_put(cp);
}
/*
* Fill a no_client_port connection with a client port number
*/
void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
{
if (ip_vs_conn_unhash(cp)) {
spin_lock_bh(&cp->lock);
if (cp->flags & IP_VS_CONN_F_NO_CPORT) {
atomic_dec(&ip_vs_conn_no_cport_cnt);
cp->flags &= ~IP_VS_CONN_F_NO_CPORT;
cp->cport = cport;
}
spin_unlock_bh(&cp->lock);
/* hash on new dport */
ip_vs_conn_hash(cp);
}
}
/*
* Bind a connection entry with the corresponding packet_xmit.
* Called by ip_vs_conn_new.
*/
static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
{
switch (IP_VS_FWD_METHOD(cp)) {
case IP_VS_CONN_F_MASQ:
cp->packet_xmit = ip_vs_nat_xmit;
break;
case IP_VS_CONN_F_TUNNEL:
#ifdef CONFIG_IP_VS_IPV6
if (cp->daf == AF_INET6)
cp->packet_xmit = ip_vs_tunnel_xmit_v6;
else
#endif
cp->packet_xmit = ip_vs_tunnel_xmit;
break;
case IP_VS_CONN_F_DROUTE:
cp->packet_xmit = ip_vs_dr_xmit;
break;
case IP_VS_CONN_F_LOCALNODE:
cp->packet_xmit = ip_vs_null_xmit;
break;
case IP_VS_CONN_F_BYPASS:
cp->packet_xmit = ip_vs_bypass_xmit;
break;
}
}
#ifdef CONFIG_IP_VS_IPV6
static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
{
switch (IP_VS_FWD_METHOD(cp)) {
case IP_VS_CONN_F_MASQ:
cp->packet_xmit = ip_vs_nat_xmit_v6;
break;
case IP_VS_CONN_F_TUNNEL:
if (cp->daf == AF_INET6)
cp->packet_xmit = ip_vs_tunnel_xmit_v6;
else
cp->packet_xmit = ip_vs_tunnel_xmit;
break;
case IP_VS_CONN_F_DROUTE:
cp->packet_xmit = ip_vs_dr_xmit_v6;
break;
case IP_VS_CONN_F_LOCALNODE:
cp->packet_xmit = ip_vs_null_xmit;
break;
case IP_VS_CONN_F_BYPASS:
cp->packet_xmit = ip_vs_bypass_xmit_v6;
break;
}
}
#endif
static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
{
return atomic_read(&dest->activeconns)
+ atomic_read(&dest->inactconns);
}
/*
* Bind a connection entry with a virtual service destination
* Called just after a new connection entry is created.
*/
static inline void
ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
{
unsigned int conn_flags;
__u32 flags;
/* if dest is NULL, then return directly */
if (!dest)
return;
/* Increase the refcnt counter of the dest */
ip_vs_dest_hold(dest);
conn_flags = atomic_read(&dest->conn_flags);
if (cp->protocol != IPPROTO_UDP)
conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
flags = cp->flags;
/* Bind with the destination and its corresponding transmitter */
if (flags & IP_VS_CONN_F_SYNC) {
/* if the connection is not template and is created
* by sync, preserve the activity flag.
*/
if (!(flags & IP_VS_CONN_F_TEMPLATE))
conn_flags &= ~IP_VS_CONN_F_INACTIVE;
/* connections inherit forwarding method from dest */
flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT);
}
flags |= conn_flags;
cp->flags = flags;
cp->dest = dest;
IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
"d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
"dest->refcnt:%d\n",
ip_vs_proto_name(cp->protocol),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
ip_vs_fwd_tag(cp), cp->state,
cp->flags, atomic_read(&cp->refcnt),
atomic_read(&dest->refcnt));
/* Update the connection counters */
if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so modify the counters
* according to the flags, later the protocol can
* update them on state change
*/
if (!(flags & IP_VS_CONN_F_INACTIVE))
atomic_inc(&dest->activeconns);
else
atomic_inc(&dest->inactconns);
} else {
/* It is a persistent connection/template, so increase
the persistent connection counter */
atomic_inc(&dest->persistconns);
}
if (dest->u_threshold != 0 &&
ip_vs_dest_totalconns(dest) >= dest->u_threshold)
dest->flags |= IP_VS_DEST_F_OVERLOAD;
}
/*
* Check if there is a destination for the connection, if so
* bind the connection to the destination.
*/
void ip_vs_try_bind_dest(struct ip_vs_conn *cp)
{
struct ip_vs_dest *dest;
rcu_read_lock();
/* This function is only invoked by the synchronization code. We do
* not currently support heterogeneous pools with synchronization,
* so we can make the assumption that the svc_af is the same as the
* dest_af
*/
dest = ip_vs_find_dest(cp->ipvs, cp->af, cp->af, &cp->daddr,
cp->dport, &cp->vaddr, cp->vport,
cp->protocol, cp->fwmark, cp->flags);
if (dest) {
struct ip_vs_proto_data *pd;
spin_lock_bh(&cp->lock);
if (cp->dest) {
spin_unlock_bh(&cp->lock);
rcu_read_unlock();
return;
}
/* Applications work depending on the forwarding method
* but better to reassign them always when binding dest */
if (cp->app)
ip_vs_unbind_app(cp);
ip_vs_bind_dest(cp, dest);
spin_unlock_bh(&cp->lock);
/* Update its packet transmitter */
cp->packet_xmit = NULL;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
ip_vs_bind_xmit_v6(cp);
else
#endif
ip_vs_bind_xmit(cp);
pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol);
if (pd && atomic_read(&pd->appcnt))
ip_vs_bind_app(cp, pd->pp);
}
rcu_read_unlock();
}
/*
* Unbind a connection entry with its VS destination
* Called by the ip_vs_conn_expire function.
*/
static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
{
struct ip_vs_dest *dest = cp->dest;
if (!dest)
return;
IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
"d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
"dest->refcnt:%d\n",
ip_vs_proto_name(cp->protocol),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
ip_vs_fwd_tag(cp), cp->state,
cp->flags, atomic_read(&cp->refcnt),
atomic_read(&dest->refcnt));
/* Update the connection counters */
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so decrease the inactconns
or activeconns counter */
if (cp->flags & IP_VS_CONN_F_INACTIVE) {
atomic_dec(&dest->inactconns);
} else {
atomic_dec(&dest->activeconns);
}
} else {
/* It is a persistent connection/template, so decrease
the persistent connection counter */
atomic_dec(&dest->persistconns);
}
if (dest->l_threshold != 0) {
if (ip_vs_dest_totalconns(dest) < dest->l_threshold)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
} else if (dest->u_threshold != 0) {
if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
} else {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
}
ip_vs_dest_put(dest);
}
static int expire_quiescent_template(struct netns_ipvs *ipvs,
struct ip_vs_dest *dest)
{
#ifdef CONFIG_SYSCTL
return ipvs->sysctl_expire_quiescent_template &&
(atomic_read(&dest->weight) == 0);
#else
return 0;
#endif
}
/*
* Checking if the destination of a connection template is available.
* If available, return 1, otherwise invalidate this connection
* template and return 0.
*/
int ip_vs_check_template(struct ip_vs_conn *ct)
{
struct ip_vs_dest *dest = ct->dest;
struct netns_ipvs *ipvs = ct->ipvs;
/*
* Checking the dest server status.
*/
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
expire_quiescent_template(ipvs, dest)) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
"-> d:%s:%d\n",
ip_vs_proto_name(ct->protocol),
IP_VS_DBG_ADDR(ct->af, &ct->caddr),
ntohs(ct->cport),
IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
ntohs(ct->vport),
IP_VS_DBG_ADDR(ct->daf, &ct->daddr),
ntohs(ct->dport));
/*
* Invalidate the connection template
*/
if (ct->vport != htons(0xffff)) {
if (ip_vs_conn_unhash(ct)) {
ct->dport = htons(0xffff);
ct->vport = htons(0xffff);
ct->cport = 0;
ip_vs_conn_hash(ct);
}
}
/*
* Simply decrease the refcnt of the template,
* don't restart its timer.
*/
__ip_vs_conn_put(ct);
return 0;
}
return 1;
}
static void ip_vs_conn_rcu_free(struct rcu_head *head)
{
struct ip_vs_conn *cp = container_of(head, struct ip_vs_conn,
rcu_head);
ip_vs_pe_put(cp->pe);
kfree(cp->pe_data);
kmem_cache_free(ip_vs_conn_cachep, cp);
}
static void ip_vs_conn_expire(unsigned long data)
{
struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
struct netns_ipvs *ipvs = cp->ipvs;
/*
* do I control anybody?
*/
if (atomic_read(&cp->n_control))
goto expire_later;
/* Unlink conn if not referenced anymore */
if (likely(ip_vs_conn_unlink(cp))) {
/* delete the timer if it is activated by other users */
del_timer(&cp->timer);
/* does anybody control me? */
if (cp->control)
ip_vs_control_del(cp);
if (cp->flags & IP_VS_CONN_F_NFCT) {
/* Do not access conntracks during subsys cleanup
* because nf_conntrack_find_get can not be used after
* conntrack cleanup for the net.
*/
smp_rmb();
if (ipvs->enable)
ip_vs_conn_drop_conntrack(cp);
}
if (unlikely(cp->app != NULL))
ip_vs_unbind_app(cp);
ip_vs_unbind_dest(cp);
if (cp->flags & IP_VS_CONN_F_NO_CPORT)
atomic_dec(&ip_vs_conn_no_cport_cnt);
call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free);
atomic_dec(&ipvs->conn_count);
return;
}
expire_later:
IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n",
atomic_read(&cp->refcnt),
atomic_read(&cp->n_control));
atomic_inc(&cp->refcnt);
cp->timeout = 60*HZ;
if (ipvs->sync_state & IP_VS_STATE_MASTER)
ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs));
ip_vs_conn_put(cp);
}
/* Modify timer, so that it expires as soon as possible.
* Can be called without reference only if under RCU lock.
*/
void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
{
/* Using mod_timer_pending will ensure the timer is not
* modified after the final del_timer in ip_vs_conn_expire.
*/
if (timer_pending(&cp->timer) &&
time_after(cp->timer.expires, jiffies))
mod_timer_pending(&cp->timer, jiffies);
}
/*
* Create a new connection entry and hash it into the ip_vs_conn_tab
*/
struct ip_vs_conn *
ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
struct ip_vs_dest *dest, __u32 fwmark)
{
struct ip_vs_conn *cp;
struct netns_ipvs *ipvs = p->ipvs;
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs,
p->protocol);
cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
if (cp == NULL) {
IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NULL;
}
INIT_HLIST_NODE(&cp->c_list);
setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
cp->ipvs = ipvs;
cp->af = p->af;
cp->daf = dest_af;
cp->protocol = p->protocol;
ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
cp->cport = p->cport;
/* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
&cp->vaddr, p->vaddr);
cp->vport = p->vport;
ip_vs_addr_set(cp->daf, &cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
cp->fwmark = fwmark;
if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
ip_vs_pe_get(p->pe);
cp->pe = p->pe;
cp->pe_data = p->pe_data;
cp->pe_data_len = p->pe_data_len;
} else {
cp->pe = NULL;
cp->pe_data = NULL;
cp->pe_data_len = 0;
}
spin_lock_init(&cp->lock);
/*
* Set the entry is referenced by the current thread before hashing
* it in the table, so that other thread run ip_vs_random_dropentry
* but cannot drop this entry.
*/
atomic_set(&cp->refcnt, 1);
cp->control = NULL;
atomic_set(&cp->n_control, 0);
atomic_set(&cp->in_pkts, 0);
cp->packet_xmit = NULL;
cp->app = NULL;
cp->app_data = NULL;
/* reset struct ip_vs_seq */
cp->in_seq.delta = 0;
cp->out_seq.delta = 0;
atomic_inc(&ipvs->conn_count);
if (flags & IP_VS_CONN_F_NO_CPORT)
atomic_inc(&ip_vs_conn_no_cport_cnt);
/* Bind the connection with a destination server */
cp->dest = NULL;
ip_vs_bind_dest(cp, dest);
/* Set its state and timeout */
cp->state = 0;
cp->old_state = 0;
cp->timeout = 3*HZ;
cp->sync_endtime = jiffies & ~3UL;
/* Bind its packet transmitter */
#ifdef CONFIG_IP_VS_IPV6
if (p->af == AF_INET6)
ip_vs_bind_xmit_v6(cp);
else
#endif
ip_vs_bind_xmit(cp);
if (unlikely(pd && atomic_read(&pd->appcnt)))
ip_vs_bind_app(cp, pd->pp);
/*
* Allow conntrack to be preserved. By default, conntrack
* is created and destroyed for every packet.
* Sometimes keeping conntrack can be useful for
* IP_VS_CONN_F_ONE_PACKET too.
*/
if (ip_vs_conntrack_enabled(ipvs))
cp->flags |= IP_VS_CONN_F_NFCT;
/* Hash it in the ip_vs_conn_tab finally */
ip_vs_conn_hash(cp);
return cp;
}
/*
* /proc/net/ip_vs_conn entries
*/
#ifdef CONFIG_PROC_FS
struct ip_vs_iter_state {
struct seq_net_private p;
struct hlist_head *l;
};
static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
{
int idx;
struct ip_vs_conn *cp;
struct ip_vs_iter_state *iter = seq->private;
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
/* __ip_vs_conn_get() is not needed by
* ip_vs_conn_seq_show and ip_vs_conn_sync_seq_show
*/
if (pos-- == 0) {
iter->l = &ip_vs_conn_tab[idx];
return cp;
}
}
cond_resched_rcu();
}
return NULL;
}
static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct ip_vs_iter_state *iter = seq->private;
iter->l = NULL;
rcu_read_lock();
return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
}
static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_conn *cp = v;
struct ip_vs_iter_state *iter = seq->private;
struct hlist_node *e;
struct hlist_head *l = iter->l;
int idx;
++*pos;
if (v == SEQ_START_TOKEN)
return ip_vs_conn_array(seq, 0);
/* more on same hash chain? */
e = rcu_dereference(hlist_next_rcu(&cp->c_list));
if (e)
return hlist_entry(e, struct ip_vs_conn, c_list);
idx = l - ip_vs_conn_tab;
while (++idx < ip_vs_conn_tab_size) {
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
iter->l = &ip_vs_conn_tab[idx];
return cp;
}
cond_resched_rcu();
}
iter->l = NULL;
return NULL;
}
static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n");
else {
const struct ip_vs_conn *cp = v;
struct net *net = seq_file_net(seq);
char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
size_t len = 0;
char dbuf[IP_VS_ADDRSTRLEN];
if (!net_eq(cp->ipvs->net, net))
return 0;
if (cp->pe_data) {
pe_data[0] = ' ';
len = strlen(cp->pe->name);
memcpy(pe_data + 1, cp->pe->name, len);
pe_data[len + 1] = ' ';
len += 2;
len += cp->pe->show_pe_data(cp, pe_data + len);
}
pe_data[len] = '\0';
#ifdef CONFIG_IP_VS_IPV6
if (cp->daf == AF_INET6)
snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
else
#endif
snprintf(dbuf, sizeof(dbuf), "%08X",
ntohl(cp->daddr.ip));
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
"%s %04X %-11s %7lu%s\n",
ip_vs_proto_name(cp->protocol),
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
dbuf, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
(cp->timer.expires-jiffies)/HZ, pe_data);
else
#endif
seq_printf(seq,
"%-3s %08X %04X %08X %04X"
" %s %04X %-11s %7lu%s\n",
ip_vs_proto_name(cp->protocol),
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
dbuf, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
(cp->timer.expires-jiffies)/HZ, pe_data);
}
return 0;
}
static const struct seq_operations ip_vs_conn_seq_ops = {
.start = ip_vs_conn_seq_start,
.next = ip_vs_conn_seq_next,
.stop = ip_vs_conn_seq_stop,
.show = ip_vs_conn_seq_show,
};
static int ip_vs_conn_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip_vs_conn_seq_ops,
sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_fops = {
.owner = THIS_MODULE,
.open = ip_vs_conn_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static const char *ip_vs_origin_name(unsigned int flags)
{
if (flags & IP_VS_CONN_F_SYNC)
return "SYNC";
else
return "LOCAL";
}
static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
{
char dbuf[IP_VS_ADDRSTRLEN];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n");
else {
const struct ip_vs_conn *cp = v;
struct net *net = seq_file_net(seq);
if (!net_eq(cp->ipvs->net, net))
return 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->daf == AF_INET6)
snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
else
#endif
snprintf(dbuf, sizeof(dbuf), "%08X",
ntohl(cp->daddr.ip));
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
"%s %04X %-11s %-6s %7lu\n",
ip_vs_proto_name(cp->protocol),
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
dbuf, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
ip_vs_origin_name(cp->flags),
(cp->timer.expires-jiffies)/HZ);
else
#endif
seq_printf(seq,
"%-3s %08X %04X %08X %04X "
"%s %04X %-11s %-6s %7lu\n",
ip_vs_proto_name(cp->protocol),
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
dbuf, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
ip_vs_origin_name(cp->flags),
(cp->timer.expires-jiffies)/HZ);
}
return 0;
}
static const struct seq_operations ip_vs_conn_sync_seq_ops = {
.start = ip_vs_conn_seq_start,
.next = ip_vs_conn_seq_next,
.stop = ip_vs_conn_seq_stop,
.show = ip_vs_conn_sync_seq_show,
};
static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops,
sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_sync_fops = {
.owner = THIS_MODULE,
.open = ip_vs_conn_sync_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif
/*
* Randomly drop connection entries before running out of memory
*/
static inline int todrop_entry(struct ip_vs_conn *cp)
{
/*
* The drop rate array needs tuning for real environments.
* Called from timer bh only => no locking
*/
static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
static char todrop_counter[9] = {0};
int i;
/* if the conn entry hasn't lasted for 60 seconds, don't drop it.
This will leave enough time for normal connection to get
through. */
if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ))
return 0;
/* Don't drop the entry if its number of incoming packets is not
located in [0, 8] */
i = atomic_read(&cp->in_pkts);
if (i > 8 || i < 0) return 0;
if (!todrop_rate[i]) return 0;
if (--todrop_counter[i] > 0) return 0;
todrop_counter[i] = todrop_rate[i];
return 1;
}
/* Called from keventd and must protect itself from softirqs */
void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
{
int idx;
struct ip_vs_conn *cp, *cp_c;
rcu_read_lock();
/*
* Randomly scan 1/32 of the whole table every second
*/
for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
unsigned int hash = prandom_u32() & ip_vs_conn_tab_mask;
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
/* connection template */
continue;
if (cp->ipvs != ipvs)
continue;
if (cp->protocol == IPPROTO_TCP) {
switch(cp->state) {
case IP_VS_TCP_S_SYN_RECV:
case IP_VS_TCP_S_SYNACK:
break;
case IP_VS_TCP_S_ESTABLISHED:
if (todrop_entry(cp))
break;
continue;
default:
continue;
}
} else if (cp->protocol == IPPROTO_SCTP) {
switch (cp->state) {
case IP_VS_SCTP_S_INIT1:
case IP_VS_SCTP_S_INIT:
break;
case IP_VS_SCTP_S_ESTABLISHED:
if (todrop_entry(cp))
break;
continue;
default:
continue;
}
} else {
if (!todrop_entry(cp))
continue;
}
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_expire_now(cp);
cp_c = cp->control;
/* cp->control is valid only with reference to cp */
if (cp_c && __ip_vs_conn_get(cp)) {
IP_VS_DBG(4, "del conn template\n");
ip_vs_conn_expire_now(cp_c);
__ip_vs_conn_put(cp);
}
}
cond_resched_rcu();
}
rcu_read_unlock();
}
/*
* Flush all the connection entries in the ip_vs_conn_tab
*/
static void ip_vs_conn_flush(struct netns_ipvs *ipvs)
{
int idx;
struct ip_vs_conn *cp, *cp_c;
flush_again:
rcu_read_lock();
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
if (cp->ipvs != ipvs)
continue;
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_expire_now(cp);
cp_c = cp->control;
/* cp->control is valid only with reference to cp */
if (cp_c && __ip_vs_conn_get(cp)) {
IP_VS_DBG(4, "del conn template\n");
ip_vs_conn_expire_now(cp_c);
__ip_vs_conn_put(cp);
}
}
cond_resched_rcu();
}
rcu_read_unlock();
/* the counter may be not NULL, because maybe some conn entries
are run by slow timer handler or unhashed but still referred */
if (atomic_read(&ipvs->conn_count) != 0) {
schedule();
goto flush_again;
}
}
/*
* per netns init and exit
*/
int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs)
{
atomic_set(&ipvs->conn_count, 0);
proc_create("ip_vs_conn", 0, ipvs->net->proc_net, &ip_vs_conn_fops);
proc_create("ip_vs_conn_sync", 0, ipvs->net->proc_net,
&ip_vs_conn_sync_fops);
return 0;
}
void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs)
{
/* flush all the connection entries first */
ip_vs_conn_flush(ipvs);
remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net);
}
int __init ip_vs_conn_init(void)
{
int idx;
/* Compute size and mask */
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
/*
* Allocate the connection hash table and initialize its list heads
*/
ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
if (!ip_vs_conn_tab)
return -ENOMEM;
/* Allocate ip_vs_conn slab cache */
ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
sizeof(struct ip_vs_conn), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ip_vs_conn_cachep) {
vfree(ip_vs_conn_tab);
return -ENOMEM;
}
pr_info("Connection hash table configured "
"(size=%d, memory=%ldKbytes)\n",
ip_vs_conn_tab_size,
(long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
sizeof(struct ip_vs_conn));
for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
spin_lock_init(&__ip_vs_conntbl_lock_array[idx].l);
}
/* calculate the random value for connection hash */
get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
return 0;
}
void ip_vs_conn_cleanup(void)
{
/* Wait all ip_vs_conn_rcu_free() callbacks to complete */
rcu_barrier();
/* Release the empty cache */
kmem_cache_destroy(ip_vs_conn_cachep);
vfree(ip_vs_conn_tab);
}
| gpl-2.0 |
Jackeagle/android_kernel_sm_g800h_kk | drivers/sensorhub/stm_hestia/factory/temphumidity_shtc1.c | 489 | 19233 | /*
* Copyright (C) 2012, Samsung Electronics Co. Ltd. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "../ssp.h"
#include <linux/qpnp/qpnp-adc.h>
/*************************************************************************/
/* factory Sysfs */
/*************************************************************************/
#define VENDOR "SENSIRION"
#define CHIP_ID "SHTC1"
#define DONE_CAL 3
#define SHTC1_IOCTL_MAGIC 0xFB
#define IOCTL_READ_COMPLETE _IOR(SHTC1_IOCTL_MAGIC, 0x01, unsigned short *)
#define IOCTL_READ_ADC_BATT_DATA _IOR(SHTC1_IOCTL_MAGIC, 0x02, unsigned short *)
#define IOCTL_READ_ADC_CHG_DATA _IOR(SHTC1_IOCTL_MAGIC, 0x03, unsigned short *)
#define IOCTL_READ_THM_SHTC1_DATA _IOR(SHTC1_IOCTL_MAGIC, 0x04, short *)
#define IOCTL_READ_HUM_SHTC1_DATA _IOR(SHTC1_IOCTL_MAGIC, 0x05, unsigned short *)
#define IOCTL_READ_THM_BARO_DATA _IOR(SHTC1_IOCTL_MAGIC, 0x06, unsigned short *)
#define IOCTL_READ_THM_GYRO_DATA _IOR(SHTC1_IOCTL_MAGIC, 0x07, unsigned short *)
#if defined(CONFIG_MACH_KLTE_EUR)
#define MODEL_NAME "SM-G900F"
#elif defined(CONFIG_MACH_KLTE_ATT)
#define MODEL_NAME "SM-G900A"
#elif defined(CONFIG_MACH_KLTE_SPR)
#define MODEL_NAME "SM-G900P"
#elif defined(CONFIG_MACH_KLTE_TMO)
#define MODEL_NAME "SM-G900T"
#elif defined(CONFIG_MACH_KLTE_USC)
#define MODEL_NAME "SM-G900R4"
#elif defined(CONFIG_MACH_KLTE_VZW)
#define MODEL_NAME "SM-G900V"
#elif defined(CONFIG_MACH_KLTE_DCM)
#define MODEL_NAME "SM-G900D"
#elif defined(CONFIG_MACH_KLTE_KDI)
#define MODEL_NAME "SM-G900J"
#elif defined(CONFIG_MACH_KLTE_SBM)
#define MODEL_NAME "SM-G900Z"
#elif defined(CONFIG_MACH_KLTE_CMCC)
#define MODEL_NAME "SM-G9008V"
#elif defined(CONFIG_MACH_KLTE_CMCCDUOS)
#define MODEL_NAME "SM-G9008W"
#elif defined(CONFIG_MACH_KLTE_CU)
#define MODEL_NAME "SM-G9006V"
#elif defined(CONFIG_MACH_KLTE_CUDUOS)
#define MODEL_NAME "SM-G9006W"
#elif defined(CONFIG_MACH_KLTE_CTC)
#define MODEL_NAME "SM-G9009W"
#elif defined(CONFIG_MACH_K3GDUOS_CTC)
#define MODEL_NAME "SM-G9009D"
#elif defined(CONFIG_MACH_KLTE_CAN)
#define MODEL_NAME "SM-G900W8"
#elif defined(CONFIG_MACH_KLTE_SKT)
#define MODEL_NAME "SM-G900S"
#elif defined(CONFIG_MACH_KLTE_KTT)
#define MODEL_NAME "SM-G900K"
#elif defined(CONFIG_MACH_KLTE_LGT)
#define MODEL_NAME "SM-G900L"
#else
#define MODEL_NAME "SM-G900"
#endif
static struct cp_thm_adc_table temp_table_batt[] = {
{636, 600}, {659, 590}, {683, 580}, {707, 570}, {730, 560},
{754, 550}, {782, 540}, {810, 530}, {838, 520}, {866, 510},
{894, 500}, {924, 490}, {953, 480}, {982, 470}, {1011, 460},
{1040, 450}, {1077, 440}, {1114, 430}, {1152, 420}, {1189, 410},
{1227, 400}, {1259, 390}, {1293, 380}, {1326, 370}, {1360, 360},
{1394, 350}, {1435, 340}, {1476, 330}, {1516, 320}, {1557, 310},
{1598, 300}, {1642, 290}, {1687, 280}, {1731, 270}, {1776, 260},
{1820, 250}, {1866, 240}, {1913, 230}, {1961, 220}, {2008, 210},
{2055, 200}, {2111, 190}, {2166, 180}, {2222, 170}, {2277, 160},
{2333, 150}, {2382, 140}, {2431, 130}, {2480, 120}, {2529, 110},
{2578, 100}, {2624, 90}, {2670, 80}, {2717, 70}, {2763, 60},
{2810, 50}, {2851, 40}, {2892, 30}, {2933, 20}, {2973, 10},
{3014, 0}, {3056, -10}, {3099, -20}, {3142, -30}, {3185, -40},
{3229, -50}, {3320, -60}, {3336, -70}, {3352, -80}, {3368, -90},
{3385, -100}, {3494, -110}, {3509, -120}, {3524, -130}, {3539, -140},
{3554, -150}, {3614, -160}, {3629, -170}, {3644, -180},{3659, -190},
{3674, -200}
};
static struct cp_thm_adc_table temp_table_chg[] = {
{636, 600}, {659, 590}, {682, 580}, {705, 570}, {728, 560},
{751, 550}, {779, 540}, {808, 530}, {837, 520}, {866, 510},
{895, 500}, {924, 490}, {953, 480}, {982, 470}, {1011, 460},
{1040, 450}, {1077, 440}, {1115, 430}, {1152, 420}, {1189, 410},
{1227, 400}, {1260, 390}, {1293, 380}, {1326, 370}, {1360, 360},
{1393, 350}, {1435, 340}, {1477, 330}, {1520, 320}, {1562, 310},
{1604, 300}, {1648, 290}, {1691, 280}, {1735, 270}, {1778, 260},
{1822, 250}, {1869, 240}, {1915, 230}, {1962, 220}, {2009, 210},
{2056, 200}, {2110, 190}, {2164, 180}, {2219, 170}, {2273, 160},
{2328, 150}, {2379, 140}, {2430, 130}, {2481, 120}, {2532, 110},
{2584, 100}, {2629, 90}, {2674, 80}, {2719, 70}, {2764, 60},
{2810, 50}, {2852, 40}, {2895, 30}, {2937, 20}, {2980, 10},
{3022, 0}, {3063, -10}, {3103, -20}, {3144, -30}, {3184, -40},
{3225, -50}, {3257, -60}, {3290, -70}, {3322, -80}, {3355, -90},
{3387, -100}, {3523, -110}, {3532, -120}, {3540, -130}, {3549, -140},
{3558, -150}, {3654, -160}, {3658, -170}, {3661, -180}, {3664, -190},
{3667, -200}
};
struct qpnp_vadc_chip *ssp_vadc;
static long ssp_temphumidity_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct ssp_data *data
= container_of(file->private_data,
struct ssp_data, shtc1_device);
void __user *argp = (void __user *)arg;
int retries = 2;
int length = 0;
int ret = 0;
if (data->bulk_buffer == NULL)
return -EINVAL;
length = data->bulk_buffer->len;
mutex_lock(&data->bulk_temp_read_lock);
switch (cmd) {
case IOCTL_READ_COMPLETE: /* free */
if(data->bulk_buffer) {
kfree(data->bulk_buffer);
data->bulk_buffer = NULL;
}
length = 1;
break;
case IOCTL_READ_ADC_BATT_DATA:
while (retries--) {
ret = copy_to_user(argp,
data->bulk_buffer->batt,
data->bulk_buffer->len*2);
if (likely(!ret))
break;
}
if (unlikely(ret)) {
pr_err("[SSP] read bluk adc1 data err(%d)", ret);
goto ioctl_error;
}
break;
case IOCTL_READ_ADC_CHG_DATA:
while (retries--) {
ret = copy_to_user(argp,
data->bulk_buffer->chg,
data->bulk_buffer->len*2);
if (likely(!ret))
break;
}
if (unlikely(ret)) {
pr_err("[SSP] read bluk adc1 data err(%d)", ret);
goto ioctl_error;
}
break;
case IOCTL_READ_THM_SHTC1_DATA:
while (retries--) {
ret = copy_to_user(argp,
data->bulk_buffer->temp,
data->bulk_buffer->len*2);
if (likely(!ret))
break;
}
if (unlikely(ret)) {
pr_err("[SSP] read bluk adc1 data err(%d)", ret);
goto ioctl_error;
}
break;
case IOCTL_READ_HUM_SHTC1_DATA:
while (retries--) {
ret = copy_to_user(argp,
data->bulk_buffer->humidity,
data->bulk_buffer->len*2);
if (likely(!ret))
break;
}
if (unlikely(ret)) {
pr_err("[SSP] read bluk adc1 data err(%d)", ret);
goto ioctl_error;
}
break;
case IOCTL_READ_THM_BARO_DATA:
while (retries--) {
ret = copy_to_user(argp,
data->bulk_buffer->baro,
data->bulk_buffer->len*2);
if (likely(!ret))
break;
}
if (unlikely(ret)) {
pr_err("[SSP] read bluk adc1 data err(%d)", ret);
goto ioctl_error;
}
break;
case IOCTL_READ_THM_GYRO_DATA:
while (retries--) {
ret = copy_to_user(argp,
data->bulk_buffer->gyro,
data->bulk_buffer->len*2);
if (likely(!ret))
break;
}
if (unlikely(ret)) {
pr_err("[SSP] read bluk adc1 data err(%d)", ret);
goto ioctl_error;
}
break;
default:
pr_err("[SSP] temp ioctl cmd err(%d)", cmd);
ret = EINVAL;
goto ioctl_error;
}
mutex_unlock(&data->bulk_temp_read_lock);
return length;
ioctl_error:
mutex_unlock(&data->bulk_temp_read_lock);
return -ret;
}
static struct file_operations ssp_temphumidity_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
.unlocked_ioctl = ssp_temphumidity_ioctl,
};
static int get_cp_thm_value(struct ssp_data *data)
{
int err = 0;
struct qpnp_vadc_result results;
mutex_lock(&data->cp_temp_adc_lock);
err = qpnp_vadc_read(ssp_vadc, LR_MUX6_PU1_AMUX_THM3, &results);
mutex_unlock(&data->cp_temp_adc_lock);
if (err) {
pr_err("%s : error reading chn %d, rc = %d\n",
__func__, LR_MUX6_PU2_AMUX_THM3, err);
return err;
}
return results.adc_code;
}
static int get_cp_thm2_value(struct ssp_data *data)
{
int err = 0;
struct qpnp_vadc_result results;
mutex_lock(&data->cp_temp_adc_lock);
err = qpnp_vadc_read(ssp_vadc, LR_MUX8_PU1_AMUX_THM4, &results);
mutex_unlock(&data->cp_temp_adc_lock);
if (err) {
pr_err("%s : error reading chn %d, rc = %d\n",
__func__, LR_MUX8_PU2_AMUX_THM4, err);
return err;
}
return results.adc_code;
}
static int convert_adc_to_temp(struct ssp_data *data, unsigned int adc)
{
int err = 0;
struct qpnp_vadc_result results;
mutex_lock(&data->cp_temp_adc_lock);
err = qpnp_vadc_read(ssp_vadc, LR_MUX6_PU1_AMUX_THM3, &results);
mutex_unlock(&data->cp_temp_adc_lock);
if (err) {
pr_err("%s : error reading chn %d, rc = %d\n",
__func__, LR_MUX6_PU2_AMUX_THM3, err);
return err;
}
return results.physical * 10;
}
static int convert_adc_to_temp2(struct ssp_data *data, unsigned int adc)
{
int err = 0;
struct qpnp_vadc_result results;
mutex_lock(&data->cp_temp_adc_lock);
err = qpnp_vadc_read(ssp_vadc, LR_MUX8_PU1_AMUX_THM4, &results);
mutex_unlock(&data->cp_temp_adc_lock);
if (err) {
pr_err("%s : error reading chn %d, rc = %d\n",
__func__, LR_MUX8_PU2_AMUX_THM4, err);
return err;
}
return results.physical * 10;
}
static ssize_t temphumidity_vendor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", VENDOR);
}
static ssize_t temphumidity_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", CHIP_ID);
}
static ssize_t engine_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
pr_info("[SSP] %s - engine_ver = %s_%s\n",
__func__, MODEL_NAME, data->comp_engine_ver);
return sprintf(buf, "%s_%s\n",
MODEL_NAME, data->comp_engine_ver);
}
static ssize_t engine_version_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct ssp_data *data = dev_get_drvdata(dev);
kfree(data->comp_engine_ver);
data->comp_engine_ver =
kzalloc(((strlen(buf)+1) * sizeof(char)), GFP_KERNEL);
strncpy(data->comp_engine_ver, buf, strlen(buf)+1);
pr_info("[SSP] %s - engine_ver = %s, %s\n",
__func__, data->comp_engine_ver, buf);
return size;
}
static ssize_t engine_version2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
pr_info("[SSP] %s - engine_ver2 = %s_%s\n",
__func__, MODEL_NAME, data->comp_engine_ver2);
return sprintf(buf, "%s_%s\n",
MODEL_NAME, data->comp_engine_ver2);
}
static ssize_t engine_version2_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct ssp_data *data = dev_get_drvdata(dev);
kfree(data->comp_engine_ver2);
data->comp_engine_ver2 =
kzalloc(((strlen(buf)+1) * sizeof(char)), GFP_KERNEL);
strncpy(data->comp_engine_ver2, buf, strlen(buf)+1);
pr_info("[SSP] %s - engine_ver2 = %s, %s\n",
__func__, data->comp_engine_ver2, buf);
return size;
}
static ssize_t pam_adc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
int adc = 0;
if (data->bSspShutdown == true) {
adc = 0;
goto exit;
}
adc = get_cp_thm_value(data);
/* pr_info("[SSP] %s cp_thm = %dmV\n", __func__, adc); */
exit:
return sprintf(buf, "%d\n", adc);
}
static ssize_t pam_adc2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
int adc;
if (data->bSspShutdown == true) {
adc = 0;
goto exit;
}
adc = get_cp_thm2_value(data);
/* pr_info("[SSP] %s cp_thm = %dmV\n", __func__, adc); */
exit:
return sprintf(buf, "%d\n", adc);
}
static ssize_t pam_temp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
int adc, temp;
adc = get_cp_thm_value(data);
if (adc < 0) {
pr_err("[SSP] %s - reading adc failed.(%d)\n", __func__, adc);
temp = adc;
} else {
temp = convert_adc_to_temp(data, adc);
}
pr_info("[SSP] %s cp_temperature(Celsius * 10) = %d\n",
__func__, temp);
return sprintf(buf, "%d\n", temp);
}
static ssize_t pam_temp2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
int adc, temp;
adc = get_cp_thm_value(data);
if (adc < 0) {
pr_err("[SSP] %s - reading adc failed.(%d)\n", __func__, adc);
temp = adc;
} else {
temp = convert_adc_to_temp2(data, adc);
}
pr_info("[SSP] %s cp_temperature(Celsius * 10) = %d\n",
__func__, temp);
return sprintf(buf, "%d\n", temp);
}
s16 get_hub_adc(struct ssp_data *data, u32 chan) {
s16 adc = -1;
int iRet = 0;
struct ssp_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
msg->cmd = MSG2SSP_AP_GET_THERM;
msg->length = 2;
msg->options = AP2HUB_READ;
msg->data = chan;
msg->buffer = (char *) &adc;
msg->free_buffer = 0;
iRet = ssp_spi_sync(data, msg, 1000);
if (iRet != SUCCESS) {
pr_err("[SSP]: %s - i2c fail %d\n", __func__, iRet);
iRet = ERROR;
}
return adc;
}
static ssize_t hub_batt_adc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
static s16 prev_adc = 1865;
s16 adc;
if (data->bSspShutdown == true){
adc = 0;
goto exit;
}
adc = get_hub_adc(data, ADC_BATT);
if (adc > 0)
prev_adc = adc;
else
adc = prev_adc;
pr_info("[SSP]: %s: adc %d\n", __func__, adc);
exit:
return sprintf(buf, "%d\n", adc);
}
static ssize_t hub_chg_adc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
static s16 prev_adc = 1630;
s16 adc;
if (data->bSspShutdown == true){
adc = 0;
goto exit;
}
adc = get_hub_adc(data, ADC_CHG);
if (adc > 0)
prev_adc = adc;
else
adc = prev_adc;
pr_info("[SSP]: %s: adc %d\n", __func__, adc);
exit:
return sprintf(buf, "%d\n", adc);
}
static ssize_t hub_batt_temp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
int low = 0;
int high = 0;
int mid = 0;
u8 array_size = ARRAY_SIZE(temp_table_batt);
s16 adc = get_hub_adc(data, ADC_BATT);
high = array_size - 1;
while (low <= high) {
mid = (low + high) / 2;
if (temp_table_batt[mid].adc > adc)
high = mid - 1;
else if (temp_table_batt[mid].adc < adc)
low = mid + 1;
else
break;
}
pr_info("[SSP]: %s: adc %d\n", __func__, temp_table_batt[mid].temperature);
return sprintf(buf, "%d\n", temp_table_batt[mid].temperature);
}
static ssize_t hub_chg_temp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
int low = 0;
int high = 0;
int mid = 0;
u8 array_size = ARRAY_SIZE(temp_table_chg);
s16 adc = get_hub_adc(data, ADC_CHG);
high = array_size - 1;
while (low <= high) {
mid = (low + high) / 2;
if (temp_table_chg[mid].adc > adc)
high = mid - 1;
else if (temp_table_chg[mid].adc < adc)
low = mid + 1;
else
break;
}
pr_info("[SSP]: %s: adc %d\n", __func__, temp_table_chg[mid].temperature);
return sprintf(buf, "%d\n", temp_table_chg[mid].temperature);
}
static ssize_t temphumidity_crc_check(struct device *dev,
struct device_attribute *attr, char *buf)
{
char chTempBuf = 0xff;
int iRet = 0;
struct ssp_data *data = dev_get_drvdata(dev);
struct ssp_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
msg->cmd = TEMPHUMIDITY_CRC_FACTORY;
msg->length = 1;
msg->options = AP2HUB_READ;
msg->buffer = &chTempBuf;
msg->free_buffer = 0;
iRet = ssp_spi_sync(data, msg, 1000);
if (iRet != SUCCESS) {
pr_err("[SSP]: %s - Temphumidity check crc Timeout!! %d\n", __func__,
iRet);
goto exit;
}
pr_info("[SSP] : %s -Check_CRC : %d\n", __func__,
chTempBuf);
exit:
if (chTempBuf == 1)
return sprintf(buf, "%s\n", "OK");
else if (chTempBuf == 2)
return sprintf(buf, "%s\n", "NG_NC");
else
return sprintf(buf, "%s\n", "NG");
}
ssize_t temphumidity_send_accuracy(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct ssp_data *data = dev_get_drvdata(dev);
u8 accuracy;
if (kstrtou8(buf, 10, &accuracy) < 0) {
pr_err("[SSP] %s - read buf is fail(%s)\n", __func__, buf);
return size;
}
if (accuracy == DONE_CAL)
ssp_send_cmd(data, MSG2SSP_AP_TEMPHUMIDITY_CAL_DONE, 0);
pr_info("[SSP] %s - accuracy = %d\n", __func__, accuracy);
return size;
}
static DEVICE_ATTR(name, S_IRUGO, temphumidity_name_show, NULL);
static DEVICE_ATTR(vendor, S_IRUGO, temphumidity_vendor_show, NULL);
static DEVICE_ATTR(engine_ver, S_IRUGO | S_IWUSR | S_IWGRP,
engine_version_show, engine_version_store);
static DEVICE_ATTR(engine_ver2, S_IRUGO | S_IWUSR | S_IWGRP,
engine_version2_show, engine_version2_store);
static DEVICE_ATTR(cp_thm, S_IRUGO, pam_adc_show, NULL);
static DEVICE_ATTR(cp_thm2, S_IRUGO, pam_adc2_show, NULL);
static DEVICE_ATTR(cp_temperature, S_IRUGO, pam_temp_show, NULL);
static DEVICE_ATTR(cp_temperature2, S_IRUGO, pam_temp2_show, NULL);
static DEVICE_ATTR(mcu_batt_adc, S_IRUGO, hub_batt_adc_show, NULL);
static DEVICE_ATTR(mcu_chg_adc, S_IRUGO, hub_chg_adc_show, NULL);
static DEVICE_ATTR(batt_temperature, S_IRUGO, hub_batt_temp_show, NULL);
static DEVICE_ATTR(chg_temperature, S_IRUGO, hub_chg_temp_show, NULL);
static DEVICE_ATTR(crc_check, S_IRUGO,
temphumidity_crc_check, NULL);
static DEVICE_ATTR(send_accuracy, S_IWUSR | S_IWGRP,
NULL, temphumidity_send_accuracy);
static struct device_attribute *temphumidity_attrs[] = {
&dev_attr_name,
&dev_attr_vendor,
&dev_attr_engine_ver,
&dev_attr_engine_ver2,
&dev_attr_cp_thm,
&dev_attr_cp_thm2,
&dev_attr_cp_temperature,
&dev_attr_cp_temperature2,
&dev_attr_mcu_batt_adc,
&dev_attr_mcu_chg_adc,
&dev_attr_batt_temperature,
&dev_attr_chg_temperature,
&dev_attr_crc_check,
&dev_attr_send_accuracy,
NULL,
};
void initialize_temphumidity_factorytest(struct ssp_data *data)
{
int ret;
sensors_register(data->temphumidity_device,
data, temphumidity_attrs, "temphumidity_sensor");
data->shtc1_device.minor = MISC_DYNAMIC_MINOR;
data->shtc1_device.name = "shtc1_sensor";
data->shtc1_device.fops = &ssp_temphumidity_fops;
ret = misc_register(&data->shtc1_device);
if (ret < 0) {
pr_err("register temphumidity misc device err(%d)", ret);
}
ssp_vadc = qpnp_get_vadc(&data->spi->dev, "temphumidity_sensor");
if (IS_ERR(ssp_vadc)) {
ret = PTR_ERR(ssp_vadc);
if (ret != -EPROBE_DEFER)
pr_err("%s: Fail to get vadc %d\n", __func__, ret);
}
}
void remove_temphumidity_factorytest(struct ssp_data *data)
{
if (data->comp_engine_ver != NULL)
kfree(data->comp_engine_ver);
if (data->comp_engine_ver2 != NULL)
kfree(data->comp_engine_ver2);
sensors_unregister(data->temphumidity_device, temphumidity_attrs);
ssp_temphumidity_fops.unlocked_ioctl = NULL;
misc_deregister(&data->shtc1_device);
}
| gpl-2.0 |
StefanescuCristian/ubuntu-bfsq | drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 745 | 14456 | /*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <uapi/linux/kfd_ioctl.h>
#include <linux/time.h>
#include "kfd_priv.h"
#include <linux/mm.h>
#include <linux/mman.h>
#include <asm/processor.h>
/*
* The primary memory I/O features being added for revisions of gfxip
* beyond 7.0 (Kaveri) are:
*
* Access to ATC/IOMMU mapped memory w/ associated extension of VA to 48b
*
* “Flat” shader memory access – These are new shader vector memory
* operations that do not reference a T#/V# so a “pointer” is what is
* sourced from the vector gprs for direct access to memory.
* This pointer space has the Shared(LDS) and Private(Scratch) memory
* mapped into this pointer space as apertures.
* The hardware then determines how to direct the memory request
* based on what apertures the request falls in.
*
* Unaligned support and alignment check
*
*
* System Unified Address - SUA
*
* The standard usage for GPU virtual addresses are that they are mapped by
* a set of page tables we call GPUVM and these page tables are managed by
* a combination of vidMM/driver software components. The current virtual
* address (VA) range for GPUVM is 40b.
*
* As of gfxip7.1 and beyond we’re adding the ability for compute memory
* clients (CP/RLC, DMA, SHADER(ifetch, scalar, and vector ops)) to access
* the same page tables used by host x86 processors and that are managed by
* the operating system. This is via a technique and hardware called ATC/IOMMU.
* The GPU has the capability of accessing both the GPUVM and ATC address
* spaces for a given VMID (process) simultaneously and we call this feature
* system unified address (SUA).
*
* There are three fundamental address modes of operation for a given VMID
* (process) on the GPU:
*
* HSA64 – 64b pointers and the default address space is ATC
* HSA32 – 32b pointers and the default address space is ATC
* GPUVM – 64b pointers and the default address space is GPUVM (driver
* model mode)
*
*
* HSA64 - ATC/IOMMU 64b
*
* A 64b pointer in the AMD64/IA64 CPU architecture is not fully utilized
* by the CPU so an AMD CPU can only access the high area
* (VA[63:47] == 0x1FFFF) and low area (VA[63:47 == 0) of the address space
* so the actual VA carried to translation is 48b. There is a “hole” in
* the middle of the 64b VA space.
*
* The GPU not only has access to all of the CPU accessible address space via
* ATC/IOMMU, but it also has access to the GPUVM address space. The “system
* unified address” feature (SUA) is the mapping of GPUVM and ATC address
* spaces into a unified pointer space. The method we take for 64b mode is
* to map the full 40b GPUVM address space into the hole of the 64b address
* space.
* The GPUVM_Base/GPUVM_Limit defines the aperture in the 64b space where we
* direct requests to be translated via GPUVM page tables instead of the
* IOMMU path.
*
*
* 64b to 49b Address conversion
*
* Note that there are still significant portions of unused regions (holes)
* in the 64b address space even for the GPU. There are several places in
* the pipeline (sw and hw), we wish to compress the 64b virtual address
* to a 49b address. This 49b address is constituted of an “ATC” bit
* plus a 48b virtual address. This 49b address is what is passed to the
* translation hardware. ATC==0 means the 48b address is a GPUVM address
* (max of 2^40 – 1) intended to be translated via GPUVM page tables.
* ATC==1 means the 48b address is intended to be translated via IOMMU
* page tables.
*
* A 64b pointer is compared to the apertures that are defined (Base/Limit), in
* this case the GPUVM aperture (red) is defined and if a pointer falls in this
* aperture, we subtract the GPUVM_Base address and set the ATC bit to zero
* as part of the 64b to 49b conversion.
*
* Where this 64b to 49b conversion is done is a function of the usage.
* Most GPU memory access is via memory objects where the driver builds
* a descriptor which consists of a base address and a memory access by
* the GPU usually consists of some kind of an offset or Cartesian coordinate
* that references this memory descriptor. This is the case for shader
* instructions that reference the T# or V# constants, or for specified
* locations of assets (ex. the shader program location). In these cases
* the driver is what handles the 64b to 49b conversion and the base
* address in the descriptor (ex. V# or T# or shader program location)
* is defined as a 48b address w/ an ATC bit. For this usage a given
* memory object cannot straddle multiple apertures in the 64b address
* space. For example a shader program cannot jump in/out between ATC
* and GPUVM space.
*
* In some cases we wish to pass a 64b pointer to the GPU hardware and
* the GPU hw does the 64b to 49b conversion before passing memory
* requests to the cache/memory system. This is the case for the
* S_LOAD and FLAT_* shader memory instructions where we have 64b pointers
* in scalar and vector GPRs respectively.
*
* In all cases (no matter where the 64b -> 49b conversion is done), the gfxip
* hardware sends a 48b address along w/ an ATC bit, to the memory controller
* on the memory request interfaces.
*
* <client>_MC_rdreq_atc // read request ATC bit
*
* 0 : <client>_MC_rdreq_addr is a GPUVM VA
*
* 1 : <client>_MC_rdreq_addr is a ATC VA
*
*
* “Spare” aperture (APE1)
*
* We use the GPUVM aperture to differentiate ATC vs. GPUVM, but we also use
* apertures to set the Mtype field for S_LOAD/FLAT_* ops which is input to the
* config tables for setting cache policies. The “spare” (APE1) aperture is
* motivated by getting a different Mtype from the default.
* The default aperture isn’t an actual base/limit aperture; it is just the
* address space that doesn’t hit any defined base/limit apertures.
* The following diagram is a complete picture of the gfxip7.x SUA apertures.
* The APE1 can be placed either below or above
* the hole (cannot be in the hole).
*
*
* General Aperture definitions and rules
*
* An aperture register definition consists of a Base, Limit, Mtype, and
* usually an ATC bit indicating which translation tables that aperture uses.
* In all cases (for SUA and DUA apertures discussed later), aperture base
* and limit definitions are 64KB aligned.
*
* <ape>_Base[63:0] = { <ape>_Base_register[63:16], 0x0000 }
*
* <ape>_Limit[63:0] = { <ape>_Limit_register[63:16], 0xFFFF }
*
* The base and limit are considered inclusive to an aperture so being
* inside an aperture means (address >= Base) AND (address <= Limit).
*
* In no case is a payload that straddles multiple apertures expected to work.
* For example a load_dword_x4 that starts in one aperture and ends in another,
* does not work. For the vector FLAT_* ops we have detection capability in
* the shader for reporting a “memory violation” back to the
* SQ block for use in traps.
* A memory violation results when an op falls into the hole,
* or a payload straddles multiple apertures. The S_LOAD instruction
* does not have this detection.
*
* Apertures cannot overlap.
*
*
*
* HSA32 - ATC/IOMMU 32b
*
* For HSA32 mode, the pointers are interpreted as 32 bits and use a single GPR
* instead of two for the S_LOAD and FLAT_* ops. The entire GPUVM space of 40b
* will not fit so there is only partial visibility to the GPUVM
* space (defined by the aperture) for S_LOAD and FLAT_* ops.
* There is no spare (APE1) aperture for HSA32 mode.
*
*
* GPUVM 64b mode (driver model)
*
* This mode is related to HSA64 in that the difference really is that
* the default aperture is GPUVM (ATC==0) and not ATC space.
* We have gfxip7.x hardware that has FLAT_* and S_LOAD support for
* SUA GPUVM mode, but does not support HSA32/HSA64.
*
*
* Device Unified Address - DUA
*
* Device unified address (DUA) is the name of the feature that maps the
* Shared(LDS) memory and Private(Scratch) memory into the overall address
* space for use by the new FLAT_* vector memory ops. The Shared and
* Private memories are mapped as apertures into the address space,
* and the hardware detects when a FLAT_* memory request is to be redirected
* to the LDS or Scratch memory when it falls into one of these apertures.
* Like the SUA apertures, the Shared/Private apertures are 64KB aligned and
* the base/limit is “in” the aperture. For both HSA64 and GPUVM SUA modes,
* the Shared/Private apertures are always placed in a limited selection of
* options in the hole of the 64b address space. For HSA32 mode, the
* Shared/Private apertures can be placed anywhere in the 32b space
* except at 0.
*
*
* HSA64 Apertures for FLAT_* vector ops
*
* For HSA64 SUA mode, the Shared and Private apertures are always placed
* in the hole w/ a limited selection of possible locations. The requests
* that fall in the private aperture are expanded as a function of the
* work-item id (tid) and redirected to the location of the
* “hidden private memory”. The hidden private can be placed in either GPUVM
* or ATC space. The addresses that fall in the shared aperture are
* re-directed to the on-chip LDS memory hardware.
*
*
* HSA32 Apertures for FLAT_* vector ops
*
* In HSA32 mode, the Private and Shared apertures can be placed anywhere
* in the 32b space except at 0 (Private or Shared Base at zero disables
* the apertures). If the base address of the apertures are non-zero
* (ie apertures exists), the size is always 64KB.
*
*
* GPUVM Apertures for FLAT_* vector ops
*
* In GPUVM mode, the Shared/Private apertures are specified identically
* to HSA64 mode where they are always in the hole at a limited selection
* of locations.
*
*
* Aperture Definitions for SUA and DUA
*
* The interpretation of the aperture register definitions for a given
* VMID is a function of the “SUA Mode” which is one of HSA64, HSA32, or
* GPUVM64 discussed in previous sections. The mode is first decoded, and
* then the remaining register decode is a function of the mode.
*
*
* SUA Mode Decode
*
* For the S_LOAD and FLAT_* shader operations, the SUA mode is decoded from
* the COMPUTE_DISPATCH_INITIATOR:DATA_ATC bit and
* the SH_MEM_CONFIG:PTR32 bits.
*
* COMPUTE_DISPATCH_INITIATOR:DATA_ATC SH_MEM_CONFIG:PTR32 Mode
*
* 1 0 HSA64
*
* 1 1 HSA32
*
* 0 X GPUVM64
*
* In general the hardware will ignore the PTR32 bit and treat
* as “0” whenever DATA_ATC = “0”, but sw should set PTR32=0
* when DATA_ATC=0.
*
* The DATA_ATC bit is only set for compute dispatches.
* All “Draw” dispatches are hardcoded to GPUVM64 mode
* for FLAT_* / S_LOAD operations.
*/
#define MAKE_GPUVM_APP_BASE(gpu_num) \
(((uint64_t)(gpu_num) << 61) + 0x1000000000000L)
#define MAKE_GPUVM_APP_LIMIT(base) \
(((uint64_t)(base) & \
0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL)
#define MAKE_SCRATCH_APP_BASE(gpu_num) \
(((uint64_t)(gpu_num) << 61) + 0x100000000L)
#define MAKE_SCRATCH_APP_LIMIT(base) \
(((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
#define MAKE_LDS_APP_BASE(gpu_num) \
(((uint64_t)(gpu_num) << 61) + 0x0)
#define MAKE_LDS_APP_LIMIT(base) \
(((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
int kfd_init_apertures(struct kfd_process *process)
{
uint8_t id = 0;
struct kfd_dev *dev;
struct kfd_process_device *pdd;
/*Iterating over all devices*/
while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_create_process_device_data(dev, process);
if (pdd == NULL) {
pr_err("Failed to create process device data\n");
return -1;
}
/*
* For 64 bit process aperture will be statically reserved in
* the x86_64 non canonical process address space
* amdkfd doesn't currently support apertures for 32 bit process
*/
if (process->is_32bit_user_mode) {
pdd->lds_base = pdd->lds_limit = 0;
pdd->gpuvm_base = pdd->gpuvm_limit = 0;
pdd->scratch_base = pdd->scratch_limit = 0;
} else {
/*
* node id couldn't be 0 - the three MSB bits of
* aperture shoudn't be 0
*/
pdd->lds_base = MAKE_LDS_APP_BASE(id + 1);
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1);
pdd->gpuvm_limit =
MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base);
pdd->scratch_base = MAKE_SCRATCH_APP_BASE(id + 1);
pdd->scratch_limit =
MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
}
dev_dbg(kfd_device, "node id %u\n", id);
dev_dbg(kfd_device, "gpu id %u\n", pdd->dev->id);
dev_dbg(kfd_device, "lds_base %llX\n", pdd->lds_base);
dev_dbg(kfd_device, "lds_limit %llX\n", pdd->lds_limit);
dev_dbg(kfd_device, "gpuvm_base %llX\n", pdd->gpuvm_base);
dev_dbg(kfd_device, "gpuvm_limit %llX\n", pdd->gpuvm_limit);
dev_dbg(kfd_device, "scratch_base %llX\n", pdd->scratch_base);
dev_dbg(kfd_device, "scratch_limit %llX\n", pdd->scratch_limit);
id++;
}
return 0;
}
| gpl-2.0 |
merbanan/nm88472 | drivers/staging/rtl8188eu/core/rtw_rf.c | 1769 | 2468 | /******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
*
******************************************************************************/
#define _RTW_RF_C_
#include <osdep_service.h>
#include <drv_types.h>
#include <recv_osdep.h>
#include <xmit_osdep.h>
struct ch_freq {
u32 channel;
u32 frequency;
};
static struct ch_freq ch_freq_map[] = {
{1, 2412}, {2, 2417}, {3, 2422}, {4, 2427}, {5, 2432},
{6, 2437}, {7, 2442}, {8, 2447}, {9, 2452}, {10, 2457},
{11, 2462}, {12, 2467}, {13, 2472}, {14, 2484},
/* UNII */
{36, 5180}, {40, 5200}, {44, 5220}, {48, 5240}, {52, 5260},
{56, 5280}, {60, 5300}, {64, 5320}, {149, 5745}, {153, 5765},
{157, 5785}, {161, 5805}, {165, 5825}, {167, 5835}, {169, 5845},
{171, 5855}, {173, 5865},
/* HiperLAN2 */
{100, 5500}, {104, 5520}, {108, 5540}, {112, 5560}, {116, 5580},
{120, 5600}, {124, 5620}, {128, 5640}, {132, 5660}, {136, 5680},
{140, 5700},
/* Japan MMAC */
{34, 5170}, {38, 5190}, {42, 5210}, {46, 5230},
/* Japan */
{184, 4920}, {188, 4940}, {192, 4960}, {196, 4980},
{208, 5040},/* Japan, means J08 */
{212, 5060},/* Japan, means J12 */
{216, 5080},/* Japan, means J16 */
};
static int ch_freq_map_num = (sizeof(ch_freq_map) / sizeof(struct ch_freq));
u32 rtw_ch2freq(u32 channel)
{
u8 i;
u32 freq = 0;
for (i = 0; i < ch_freq_map_num; i++) {
if (channel == ch_freq_map[i].channel) {
freq = ch_freq_map[i].frequency;
break;
}
}
if (i == ch_freq_map_num)
freq = 2412;
return freq;
}
u32 rtw_freq2ch(u32 freq)
{
u8 i;
u32 ch = 0;
for (i = 0; i < ch_freq_map_num; i++) {
if (freq == ch_freq_map[i].frequency) {
ch = ch_freq_map[i].channel;
break;
}
}
if (i == ch_freq_map_num)
ch = 1;
return ch;
}
| gpl-2.0 |
TheBootloader/android_kernel_samsung_msm8930-common | fs/proc/inode.c | 3049 | 12271 | /*
* linux/fs/proc/inode.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/pid_namespace.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/completion.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/limits.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/mount.h>
#include <asm/uaccess.h>
#include "internal.h"
static void proc_evict_inode(struct inode *inode)
{
struct proc_dir_entry *de;
struct ctl_table_header *head;
const struct proc_ns_operations *ns_ops;
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
/* Stop tracking associated processes */
put_pid(PROC_I(inode)->pid);
/* Let go of any associated proc directory entry */
de = PROC_I(inode)->pde;
if (de)
pde_put(de);
head = PROC_I(inode)->sysctl;
if (head) {
rcu_assign_pointer(PROC_I(inode)->sysctl, NULL);
sysctl_head_put(head);
}
/* Release any associated namespace */
ns_ops = PROC_I(inode)->ns_ops;
if (ns_ops && ns_ops->put)
ns_ops->put(PROC_I(inode)->ns);
}
static struct kmem_cache * proc_inode_cachep;
static struct inode *proc_alloc_inode(struct super_block *sb)
{
struct proc_inode *ei;
struct inode *inode;
ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
ei->pid = NULL;
ei->fd = 0;
ei->op.proc_get_link = NULL;
ei->pde = NULL;
ei->sysctl = NULL;
ei->sysctl_entry = NULL;
ei->ns = NULL;
ei->ns_ops = NULL;
inode = &ei->vfs_inode;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
return inode;
}
static void proc_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(proc_inode_cachep, PROC_I(inode));
}
static void proc_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, proc_i_callback);
}
static void init_once(void *foo)
{
struct proc_inode *ei = (struct proc_inode *) foo;
inode_init_once(&ei->vfs_inode);
}
void __init proc_init_inodecache(void)
{
proc_inode_cachep = kmem_cache_create("proc_inode_cache",
sizeof(struct proc_inode),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_PANIC),
init_once);
}
static int proc_show_options(struct seq_file *seq, struct dentry *root)
{
struct super_block *sb = root->d_sb;
struct pid_namespace *pid = sb->s_fs_info;
if (pid->pid_gid)
seq_printf(seq, ",gid=%lu", (unsigned long)pid->pid_gid);
if (pid->hide_pid != 0)
seq_printf(seq, ",hidepid=%u", pid->hide_pid);
return 0;
}
static const struct super_operations proc_sops = {
.alloc_inode = proc_alloc_inode,
.destroy_inode = proc_destroy_inode,
.drop_inode = generic_delete_inode,
.evict_inode = proc_evict_inode,
.statfs = simple_statfs,
.remount_fs = proc_remount,
.show_options = proc_show_options,
};
static void __pde_users_dec(struct proc_dir_entry *pde)
{
pde->pde_users--;
if (pde->pde_unload_completion && pde->pde_users == 0)
complete(pde->pde_unload_completion);
}
void pde_users_dec(struct proc_dir_entry *pde)
{
spin_lock(&pde->pde_unload_lock);
__pde_users_dec(pde);
spin_unlock(&pde->pde_unload_lock);
}
static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
{
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
loff_t rv = -EINVAL;
loff_t (*llseek)(struct file *, loff_t, int);
spin_lock(&pde->pde_unload_lock);
/*
* remove_proc_entry() is going to delete PDE (as part of module
* cleanup sequence). No new callers into module allowed.
*/
if (!pde->proc_fops) {
spin_unlock(&pde->pde_unload_lock);
return rv;
}
/*
* Bump refcount so that remove_proc_entry will wail for ->llseek to
* complete.
*/
pde->pde_users++;
/*
* Save function pointer under lock, to protect against ->proc_fops
* NULL'ifying right after ->pde_unload_lock is dropped.
*/
llseek = pde->proc_fops->llseek;
spin_unlock(&pde->pde_unload_lock);
if (!llseek)
llseek = default_llseek;
rv = llseek(file, offset, whence);
pde_users_dec(pde);
return rv;
}
static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
ssize_t rv = -EIO;
ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
spin_lock(&pde->pde_unload_lock);
if (!pde->proc_fops) {
spin_unlock(&pde->pde_unload_lock);
return rv;
}
pde->pde_users++;
read = pde->proc_fops->read;
spin_unlock(&pde->pde_unload_lock);
if (read)
rv = read(file, buf, count, ppos);
pde_users_dec(pde);
return rv;
}
static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
ssize_t rv = -EIO;
ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
spin_lock(&pde->pde_unload_lock);
if (!pde->proc_fops) {
spin_unlock(&pde->pde_unload_lock);
return rv;
}
pde->pde_users++;
write = pde->proc_fops->write;
spin_unlock(&pde->pde_unload_lock);
if (write)
rv = write(file, buf, count, ppos);
pde_users_dec(pde);
return rv;
}
static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts)
{
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
unsigned int rv = DEFAULT_POLLMASK;
unsigned int (*poll)(struct file *, struct poll_table_struct *);
spin_lock(&pde->pde_unload_lock);
if (!pde->proc_fops) {
spin_unlock(&pde->pde_unload_lock);
return rv;
}
pde->pde_users++;
poll = pde->proc_fops->poll;
spin_unlock(&pde->pde_unload_lock);
if (poll)
rv = poll(file, pts);
pde_users_dec(pde);
return rv;
}
static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
long rv = -ENOTTY;
long (*ioctl)(struct file *, unsigned int, unsigned long);
spin_lock(&pde->pde_unload_lock);
if (!pde->proc_fops) {
spin_unlock(&pde->pde_unload_lock);
return rv;
}
pde->pde_users++;
ioctl = pde->proc_fops->unlocked_ioctl;
spin_unlock(&pde->pde_unload_lock);
if (ioctl)
rv = ioctl(file, cmd, arg);
pde_users_dec(pde);
return rv;
}
#ifdef CONFIG_COMPAT
static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
long rv = -ENOTTY;
long (*compat_ioctl)(struct file *, unsigned int, unsigned long);
spin_lock(&pde->pde_unload_lock);
if (!pde->proc_fops) {
spin_unlock(&pde->pde_unload_lock);
return rv;
}
pde->pde_users++;
compat_ioctl = pde->proc_fops->compat_ioctl;
spin_unlock(&pde->pde_unload_lock);
if (compat_ioctl)
rv = compat_ioctl(file, cmd, arg);
pde_users_dec(pde);
return rv;
}
#endif
static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
{
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
int rv = -EIO;
int (*mmap)(struct file *, struct vm_area_struct *);
spin_lock(&pde->pde_unload_lock);
if (!pde->proc_fops) {
spin_unlock(&pde->pde_unload_lock);
return rv;
}
pde->pde_users++;
mmap = pde->proc_fops->mmap;
spin_unlock(&pde->pde_unload_lock);
if (mmap)
rv = mmap(file, vma);
pde_users_dec(pde);
return rv;
}
static int proc_reg_open(struct inode *inode, struct file *file)
{
struct proc_dir_entry *pde = PDE(inode);
int rv = 0;
int (*open)(struct inode *, struct file *);
int (*release)(struct inode *, struct file *);
struct pde_opener *pdeo;
/*
* What for, you ask? Well, we can have open, rmmod, remove_proc_entry
* sequence. ->release won't be called because ->proc_fops will be
* cleared. Depending on complexity of ->release, consequences vary.
*
* We can't wait for mercy when close will be done for real, it's
* deadlockable: rmmod foo </proc/foo . So, we're going to do ->release
* by hand in remove_proc_entry(). For this, save opener's credentials
* for later.
*/
pdeo = kmalloc(sizeof(struct pde_opener), GFP_KERNEL);
if (!pdeo)
return -ENOMEM;
spin_lock(&pde->pde_unload_lock);
if (!pde->proc_fops) {
spin_unlock(&pde->pde_unload_lock);
kfree(pdeo);
return -ENOENT;
}
pde->pde_users++;
open = pde->proc_fops->open;
release = pde->proc_fops->release;
spin_unlock(&pde->pde_unload_lock);
if (open)
rv = open(inode, file);
spin_lock(&pde->pde_unload_lock);
if (rv == 0 && release) {
/* To know what to release. */
pdeo->inode = inode;
pdeo->file = file;
/* Strictly for "too late" ->release in proc_reg_release(). */
pdeo->release = release;
list_add(&pdeo->lh, &pde->pde_openers);
} else
kfree(pdeo);
__pde_users_dec(pde);
spin_unlock(&pde->pde_unload_lock);
return rv;
}
static struct pde_opener *find_pde_opener(struct proc_dir_entry *pde,
struct inode *inode, struct file *file)
{
struct pde_opener *pdeo;
list_for_each_entry(pdeo, &pde->pde_openers, lh) {
if (pdeo->inode == inode && pdeo->file == file)
return pdeo;
}
return NULL;
}
static int proc_reg_release(struct inode *inode, struct file *file)
{
struct proc_dir_entry *pde = PDE(inode);
int rv = 0;
int (*release)(struct inode *, struct file *);
struct pde_opener *pdeo;
spin_lock(&pde->pde_unload_lock);
pdeo = find_pde_opener(pde, inode, file);
if (!pde->proc_fops) {
/*
* Can't simply exit, __fput() will think that everything is OK,
* and move on to freeing struct file. remove_proc_entry() will
* find slacker in opener's list and will try to do non-trivial
* things with struct file. Therefore, remove opener from list.
*
* But if opener is removed from list, who will ->release it?
*/
if (pdeo) {
list_del(&pdeo->lh);
spin_unlock(&pde->pde_unload_lock);
rv = pdeo->release(inode, file);
kfree(pdeo);
} else
spin_unlock(&pde->pde_unload_lock);
return rv;
}
pde->pde_users++;
release = pde->proc_fops->release;
if (pdeo) {
list_del(&pdeo->lh);
kfree(pdeo);
}
spin_unlock(&pde->pde_unload_lock);
if (release)
rv = release(inode, file);
pde_users_dec(pde);
return rv;
}
static const struct file_operations proc_reg_file_ops = {
.llseek = proc_reg_llseek,
.read = proc_reg_read,
.write = proc_reg_write,
.poll = proc_reg_poll,
.unlocked_ioctl = proc_reg_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = proc_reg_compat_ioctl,
#endif
.mmap = proc_reg_mmap,
.open = proc_reg_open,
.release = proc_reg_release,
};
#ifdef CONFIG_COMPAT
static const struct file_operations proc_reg_file_ops_no_compat = {
.llseek = proc_reg_llseek,
.read = proc_reg_read,
.write = proc_reg_write,
.poll = proc_reg_poll,
.unlocked_ioctl = proc_reg_unlocked_ioctl,
.mmap = proc_reg_mmap,
.open = proc_reg_open,
.release = proc_reg_release,
};
#endif
struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
{
struct inode * inode;
inode = iget_locked(sb, de->low_ino);
if (!inode)
return NULL;
if (inode->i_state & I_NEW) {
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
PROC_I(inode)->fd = 0;
PROC_I(inode)->pde = de;
if (de->mode) {
inode->i_mode = de->mode;
inode->i_uid = de->uid;
inode->i_gid = de->gid;
}
if (de->size)
inode->i_size = de->size;
if (de->nlink)
set_nlink(inode, de->nlink);
if (de->proc_iops)
inode->i_op = de->proc_iops;
if (de->proc_fops) {
if (S_ISREG(inode->i_mode)) {
#ifdef CONFIG_COMPAT
if (!de->proc_fops->compat_ioctl)
inode->i_fop =
&proc_reg_file_ops_no_compat;
else
#endif
inode->i_fop = &proc_reg_file_ops;
} else {
inode->i_fop = de->proc_fops;
}
}
unlock_new_inode(inode);
} else
pde_put(de);
return inode;
}
int proc_fill_super(struct super_block *s)
{
s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = PROC_SUPER_MAGIC;
s->s_op = &proc_sops;
s->s_time_gran = 1;
pde_get(&proc_root);
s->s_root = d_make_root(proc_get_inode(s, &proc_root));
if (s->s_root)
return 0;
printk("proc_read_super: get root inode failed\n");
pde_put(&proc_root);
return -ENOMEM;
}
| gpl-2.0 |
hypnos-android/Hypnos | drivers/isdn/i4l/isdnhdlc.c | 4841 | 15718 | /*
* isdnhdlc.c -- General purpose ISDN HDLC decoder.
*
* Copyright (C)
* 2009 Karsten Keil <keil@b1-systems.de>
* 2002 Wolfgang Mües <wolfgang@iksw-muees.de>
* 2001 Frode Isaksen <fisaksen@bewan.com>
* 2001 Kai Germaschewski <kai.germaschewski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/crc-ccitt.h>
#include <linux/isdn/hdlc.h>
#include <linux/bitrev.h>
/*-------------------------------------------------------------------*/
MODULE_AUTHOR("Wolfgang Mües <wolfgang@iksw-muees.de>, "
"Frode Isaksen <fisaksen@bewan.com>, "
"Kai Germaschewski <kai.germaschewski@gmx.de>");
MODULE_DESCRIPTION("General purpose ISDN HDLC decoder");
MODULE_LICENSE("GPL");
/*-------------------------------------------------------------------*/
enum {
HDLC_FAST_IDLE, HDLC_GET_FLAG_B0, HDLC_GETFLAG_B1A6, HDLC_GETFLAG_B7,
HDLC_GET_DATA, HDLC_FAST_FLAG
};
enum {
HDLC_SEND_DATA, HDLC_SEND_CRC1, HDLC_SEND_FAST_FLAG,
HDLC_SEND_FIRST_FLAG, HDLC_SEND_CRC2, HDLC_SEND_CLOSING_FLAG,
HDLC_SEND_IDLE1, HDLC_SEND_FAST_IDLE, HDLC_SENDFLAG_B0,
HDLC_SENDFLAG_B1A6, HDLC_SENDFLAG_B7, STOPPED, HDLC_SENDFLAG_ONE
};
void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features)
{
memset(hdlc, 0, sizeof(struct isdnhdlc_vars));
hdlc->state = HDLC_GET_DATA;
if (features & HDLC_56KBIT)
hdlc->do_adapt56 = 1;
if (features & HDLC_BITREVERSE)
hdlc->do_bitreverse = 1;
}
EXPORT_SYMBOL(isdnhdlc_out_init);
void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features)
{
memset(hdlc, 0, sizeof(struct isdnhdlc_vars));
if (features & HDLC_DCHANNEL) {
hdlc->dchannel = 1;
hdlc->state = HDLC_SEND_FIRST_FLAG;
} else {
hdlc->dchannel = 0;
hdlc->state = HDLC_SEND_FAST_FLAG;
hdlc->ffvalue = 0x7e;
}
hdlc->cbin = 0x7e;
if (features & HDLC_56KBIT) {
hdlc->do_adapt56 = 1;
hdlc->state = HDLC_SENDFLAG_B0;
} else
hdlc->data_bits = 8;
if (features & HDLC_BITREVERSE)
hdlc->do_bitreverse = 1;
}
EXPORT_SYMBOL(isdnhdlc_rcv_init);
static int
check_frame(struct isdnhdlc_vars *hdlc)
{
int status;
if (hdlc->dstpos < 2) /* too small - framing error */
status = -HDLC_FRAMING_ERROR;
else if (hdlc->crc != 0xf0b8) /* crc error */
status = -HDLC_CRC_ERROR;
else {
/* remove CRC */
hdlc->dstpos -= 2;
/* good frame */
status = hdlc->dstpos;
}
return status;
}
/*
isdnhdlc_decode - decodes HDLC frames from a transparent bit stream.
The source buffer is scanned for valid HDLC frames looking for
flags (01111110) to indicate the start of a frame. If the start of
the frame is found, the bit stuffing is removed (0 after 5 1's).
When a new flag is found, the complete frame has been received
and the CRC is checked.
If a valid frame is found, the function returns the frame length
excluding the CRC with the bit HDLC_END_OF_FRAME set.
If the beginning of a valid frame is found, the function returns
the length.
If a framing error is found (too many 1s and not a flag) the function
returns the length with the bit HDLC_FRAMING_ERROR set.
If a CRC error is found the function returns the length with the
bit HDLC_CRC_ERROR set.
If the frame length exceeds the destination buffer size, the function
returns the length with the bit HDLC_LENGTH_ERROR set.
src - source buffer
slen - source buffer length
count - number of bytes removed (decoded) from the source buffer
dst _ destination buffer
dsize - destination buffer size
returns - number of decoded bytes in the destination buffer and status
flag.
*/
int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src, int slen,
int *count, u8 *dst, int dsize)
{
int status = 0;
static const unsigned char fast_flag[] = {
0x00, 0x00, 0x00, 0x20, 0x30, 0x38, 0x3c, 0x3e, 0x3f
};
static const unsigned char fast_flag_value[] = {
0x00, 0x7e, 0xfc, 0xf9, 0xf3, 0xe7, 0xcf, 0x9f, 0x3f
};
static const unsigned char fast_abort[] = {
0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff
};
#define handle_fast_flag(h) \
do {\
if (h->cbin == fast_flag[h->bit_shift]) {\
h->ffvalue = fast_flag_value[h->bit_shift];\
h->state = HDLC_FAST_FLAG;\
h->ffbit_shift = h->bit_shift;\
h->bit_shift = 1;\
} else {\
h->state = HDLC_GET_DATA;\
h->data_received = 0;\
} \
} while (0)
#define handle_abort(h) \
do {\
h->shift_reg = fast_abort[h->ffbit_shift - 1];\
h->hdlc_bits1 = h->ffbit_shift - 2;\
if (h->hdlc_bits1 < 0)\
h->hdlc_bits1 = 0;\
h->data_bits = h->ffbit_shift - 1;\
h->state = HDLC_GET_DATA;\
h->data_received = 0;\
} while (0)
*count = slen;
while (slen > 0) {
if (hdlc->bit_shift == 0) {
/* the code is for bitreverse streams */
if (hdlc->do_bitreverse == 0)
hdlc->cbin = bitrev8(*src++);
else
hdlc->cbin = *src++;
slen--;
hdlc->bit_shift = 8;
if (hdlc->do_adapt56)
hdlc->bit_shift--;
}
switch (hdlc->state) {
case STOPPED:
return 0;
case HDLC_FAST_IDLE:
if (hdlc->cbin == 0xff) {
hdlc->bit_shift = 0;
break;
}
hdlc->state = HDLC_GET_FLAG_B0;
hdlc->hdlc_bits1 = 0;
hdlc->bit_shift = 8;
break;
case HDLC_GET_FLAG_B0:
if (!(hdlc->cbin & 0x80)) {
hdlc->state = HDLC_GETFLAG_B1A6;
hdlc->hdlc_bits1 = 0;
} else {
if ((!hdlc->do_adapt56) &&
(++hdlc->hdlc_bits1 >= 8) &&
(hdlc->bit_shift == 1))
hdlc->state = HDLC_FAST_IDLE;
}
hdlc->cbin <<= 1;
hdlc->bit_shift--;
break;
case HDLC_GETFLAG_B1A6:
if (hdlc->cbin & 0x80) {
hdlc->hdlc_bits1++;
if (hdlc->hdlc_bits1 == 6)
hdlc->state = HDLC_GETFLAG_B7;
} else
hdlc->hdlc_bits1 = 0;
hdlc->cbin <<= 1;
hdlc->bit_shift--;
break;
case HDLC_GETFLAG_B7:
if (hdlc->cbin & 0x80) {
hdlc->state = HDLC_GET_FLAG_B0;
} else {
hdlc->state = HDLC_GET_DATA;
hdlc->crc = 0xffff;
hdlc->shift_reg = 0;
hdlc->hdlc_bits1 = 0;
hdlc->data_bits = 0;
hdlc->data_received = 0;
}
hdlc->cbin <<= 1;
hdlc->bit_shift--;
break;
case HDLC_GET_DATA:
if (hdlc->cbin & 0x80) {
hdlc->hdlc_bits1++;
switch (hdlc->hdlc_bits1) {
case 6:
break;
case 7:
if (hdlc->data_received)
/* bad frame */
status = -HDLC_FRAMING_ERROR;
if (!hdlc->do_adapt56) {
if (hdlc->cbin == fast_abort
[hdlc->bit_shift + 1]) {
hdlc->state =
HDLC_FAST_IDLE;
hdlc->bit_shift = 1;
break;
}
} else
hdlc->state = HDLC_GET_FLAG_B0;
break;
default:
hdlc->shift_reg >>= 1;
hdlc->shift_reg |= 0x80;
hdlc->data_bits++;
break;
}
} else {
switch (hdlc->hdlc_bits1) {
case 5:
break;
case 6:
if (hdlc->data_received)
status = check_frame(hdlc);
hdlc->crc = 0xffff;
hdlc->shift_reg = 0;
hdlc->data_bits = 0;
if (!hdlc->do_adapt56)
handle_fast_flag(hdlc);
else {
hdlc->state = HDLC_GET_DATA;
hdlc->data_received = 0;
}
break;
default:
hdlc->shift_reg >>= 1;
hdlc->data_bits++;
break;
}
hdlc->hdlc_bits1 = 0;
}
if (status) {
hdlc->dstpos = 0;
*count -= slen;
hdlc->cbin <<= 1;
hdlc->bit_shift--;
return status;
}
if (hdlc->data_bits == 8) {
hdlc->data_bits = 0;
hdlc->data_received = 1;
hdlc->crc = crc_ccitt_byte(hdlc->crc,
hdlc->shift_reg);
/* good byte received */
if (hdlc->dstpos < dsize)
dst[hdlc->dstpos++] = hdlc->shift_reg;
else {
/* frame too long */
status = -HDLC_LENGTH_ERROR;
hdlc->dstpos = 0;
}
}
hdlc->cbin <<= 1;
hdlc->bit_shift--;
break;
case HDLC_FAST_FLAG:
if (hdlc->cbin == hdlc->ffvalue) {
hdlc->bit_shift = 0;
break;
} else {
if (hdlc->cbin == 0xff) {
hdlc->state = HDLC_FAST_IDLE;
hdlc->bit_shift = 0;
} else if (hdlc->ffbit_shift == 8) {
hdlc->state = HDLC_GETFLAG_B7;
break;
} else
handle_abort(hdlc);
}
break;
default:
break;
}
}
*count -= slen;
return 0;
}
EXPORT_SYMBOL(isdnhdlc_decode);
/*
isdnhdlc_encode - encodes HDLC frames to a transparent bit stream.
The bit stream starts with a beginning flag (01111110). After
that each byte is added to the bit stream with bit stuffing added
(0 after 5 1's).
When the last byte has been removed from the source buffer, the
CRC (2 bytes is added) and the frame terminates with the ending flag.
For the dchannel, the idle character (all 1's) is also added at the end.
If this function is called with empty source buffer (slen=0), flags or
idle character will be generated.
src - source buffer
slen - source buffer length
count - number of bytes removed (encoded) from source buffer
dst _ destination buffer
dsize - destination buffer size
returns - number of encoded bytes in the destination buffer
*/
int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, u16 slen,
int *count, u8 *dst, int dsize)
{
static const unsigned char xfast_flag_value[] = {
0x7e, 0x3f, 0x9f, 0xcf, 0xe7, 0xf3, 0xf9, 0xfc, 0x7e
};
int len = 0;
*count = slen;
/* special handling for one byte frames */
if ((slen == 1) && (hdlc->state == HDLC_SEND_FAST_FLAG))
hdlc->state = HDLC_SENDFLAG_ONE;
while (dsize > 0) {
if (hdlc->bit_shift == 0) {
if (slen && !hdlc->do_closing) {
hdlc->shift_reg = *src++;
slen--;
if (slen == 0)
/* closing sequence, CRC + flag(s) */
hdlc->do_closing = 1;
hdlc->bit_shift = 8;
} else {
if (hdlc->state == HDLC_SEND_DATA) {
if (hdlc->data_received) {
hdlc->state = HDLC_SEND_CRC1;
hdlc->crc ^= 0xffff;
hdlc->bit_shift = 8;
hdlc->shift_reg =
hdlc->crc & 0xff;
} else if (!hdlc->do_adapt56)
hdlc->state =
HDLC_SEND_FAST_FLAG;
else
hdlc->state =
HDLC_SENDFLAG_B0;
}
}
}
switch (hdlc->state) {
case STOPPED:
while (dsize--)
*dst++ = 0xff;
return dsize;
case HDLC_SEND_FAST_FLAG:
hdlc->do_closing = 0;
if (slen == 0) {
/* the code is for bitreverse streams */
if (hdlc->do_bitreverse == 0)
*dst++ = bitrev8(hdlc->ffvalue);
else
*dst++ = hdlc->ffvalue;
len++;
dsize--;
break;
}
/* fall through */
case HDLC_SENDFLAG_ONE:
if (hdlc->bit_shift == 8) {
hdlc->cbin = hdlc->ffvalue >>
(8 - hdlc->data_bits);
hdlc->state = HDLC_SEND_DATA;
hdlc->crc = 0xffff;
hdlc->hdlc_bits1 = 0;
hdlc->data_received = 1;
}
break;
case HDLC_SENDFLAG_B0:
hdlc->do_closing = 0;
hdlc->cbin <<= 1;
hdlc->data_bits++;
hdlc->hdlc_bits1 = 0;
hdlc->state = HDLC_SENDFLAG_B1A6;
break;
case HDLC_SENDFLAG_B1A6:
hdlc->cbin <<= 1;
hdlc->data_bits++;
hdlc->cbin++;
if (++hdlc->hdlc_bits1 == 6)
hdlc->state = HDLC_SENDFLAG_B7;
break;
case HDLC_SENDFLAG_B7:
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (slen == 0) {
hdlc->state = HDLC_SENDFLAG_B0;
break;
}
if (hdlc->bit_shift == 8) {
hdlc->state = HDLC_SEND_DATA;
hdlc->crc = 0xffff;
hdlc->hdlc_bits1 = 0;
hdlc->data_received = 1;
}
break;
case HDLC_SEND_FIRST_FLAG:
hdlc->data_received = 1;
if (hdlc->data_bits == 8) {
hdlc->state = HDLC_SEND_DATA;
hdlc->crc = 0xffff;
hdlc->hdlc_bits1 = 0;
break;
}
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (hdlc->shift_reg & 0x01)
hdlc->cbin++;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
if (hdlc->bit_shift == 0) {
hdlc->state = HDLC_SEND_DATA;
hdlc->crc = 0xffff;
hdlc->hdlc_bits1 = 0;
}
break;
case HDLC_SEND_DATA:
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (hdlc->hdlc_bits1 == 5) {
hdlc->hdlc_bits1 = 0;
break;
}
if (hdlc->bit_shift == 8)
hdlc->crc = crc_ccitt_byte(hdlc->crc,
hdlc->shift_reg);
if (hdlc->shift_reg & 0x01) {
hdlc->hdlc_bits1++;
hdlc->cbin++;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
} else {
hdlc->hdlc_bits1 = 0;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
}
break;
case HDLC_SEND_CRC1:
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (hdlc->hdlc_bits1 == 5) {
hdlc->hdlc_bits1 = 0;
break;
}
if (hdlc->shift_reg & 0x01) {
hdlc->hdlc_bits1++;
hdlc->cbin++;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
} else {
hdlc->hdlc_bits1 = 0;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
}
if (hdlc->bit_shift == 0) {
hdlc->shift_reg = (hdlc->crc >> 8);
hdlc->state = HDLC_SEND_CRC2;
hdlc->bit_shift = 8;
}
break;
case HDLC_SEND_CRC2:
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (hdlc->hdlc_bits1 == 5) {
hdlc->hdlc_bits1 = 0;
break;
}
if (hdlc->shift_reg & 0x01) {
hdlc->hdlc_bits1++;
hdlc->cbin++;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
} else {
hdlc->hdlc_bits1 = 0;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
}
if (hdlc->bit_shift == 0) {
hdlc->shift_reg = 0x7e;
hdlc->state = HDLC_SEND_CLOSING_FLAG;
hdlc->bit_shift = 8;
}
break;
case HDLC_SEND_CLOSING_FLAG:
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (hdlc->hdlc_bits1 == 5) {
hdlc->hdlc_bits1 = 0;
break;
}
if (hdlc->shift_reg & 0x01)
hdlc->cbin++;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
if (hdlc->bit_shift == 0) {
hdlc->ffvalue =
xfast_flag_value[hdlc->data_bits];
if (hdlc->dchannel) {
hdlc->ffvalue = 0x7e;
hdlc->state = HDLC_SEND_IDLE1;
hdlc->bit_shift = 8-hdlc->data_bits;
if (hdlc->bit_shift == 0)
hdlc->state =
HDLC_SEND_FAST_IDLE;
} else {
if (!hdlc->do_adapt56) {
hdlc->state =
HDLC_SEND_FAST_FLAG;
hdlc->data_received = 0;
} else {
hdlc->state = HDLC_SENDFLAG_B0;
hdlc->data_received = 0;
}
/* Finished this frame, send flags */
if (dsize > 1)
dsize = 1;
}
}
break;
case HDLC_SEND_IDLE1:
hdlc->do_closing = 0;
hdlc->cbin <<= 1;
hdlc->cbin++;
hdlc->data_bits++;
hdlc->bit_shift--;
if (hdlc->bit_shift == 0) {
hdlc->state = HDLC_SEND_FAST_IDLE;
hdlc->bit_shift = 0;
}
break;
case HDLC_SEND_FAST_IDLE:
hdlc->do_closing = 0;
hdlc->cbin = 0xff;
hdlc->data_bits = 8;
if (hdlc->bit_shift == 8) {
hdlc->cbin = 0x7e;
hdlc->state = HDLC_SEND_FIRST_FLAG;
} else {
/* the code is for bitreverse streams */
if (hdlc->do_bitreverse == 0)
*dst++ = bitrev8(hdlc->cbin);
else
*dst++ = hdlc->cbin;
hdlc->bit_shift = 0;
hdlc->data_bits = 0;
len++;
dsize = 0;
}
break;
default:
break;
}
if (hdlc->do_adapt56) {
if (hdlc->data_bits == 7) {
hdlc->cbin <<= 1;
hdlc->cbin++;
hdlc->data_bits++;
}
}
if (hdlc->data_bits == 8) {
/* the code is for bitreverse streams */
if (hdlc->do_bitreverse == 0)
*dst++ = bitrev8(hdlc->cbin);
else
*dst++ = hdlc->cbin;
hdlc->data_bits = 0;
len++;
dsize--;
}
}
*count -= slen;
return len;
}
EXPORT_SYMBOL(isdnhdlc_encode);
| gpl-2.0 |
DooMLoRD/android_kernel_sony_msm8960 | drivers/mmc/host/imxmmc.c | 5097 | 31053 | /*
* linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver
*
* Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
* Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
*
* derived from pxamci.c by Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <mach/mmc.h>
#include <mach/imx-dma.h>
#include "imxmmc.h"
#define DRIVER_NAME "imx-mmc"
#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
struct imxmci_host {
struct mmc_host *mmc;
spinlock_t lock;
struct resource *res;
void __iomem *base;
int irq;
imx_dmach_t dma;
volatile unsigned int imask;
unsigned int power_mode;
unsigned int present;
struct imxmmc_platform_data *pdata;
struct mmc_request *req;
struct mmc_command *cmd;
struct mmc_data *data;
struct timer_list timer;
struct tasklet_struct tasklet;
unsigned int status_reg;
unsigned long pending_events;
/* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */
u16 *data_ptr;
unsigned int data_cnt;
atomic_t stuck_timeout;
unsigned int dma_nents;
unsigned int dma_size;
unsigned int dma_dir;
int dma_allocated;
unsigned char actual_bus_width;
int prev_cmd_code;
struct clk *clk;
};
#define IMXMCI_PEND_IRQ_b 0
#define IMXMCI_PEND_DMA_END_b 1
#define IMXMCI_PEND_DMA_ERR_b 2
#define IMXMCI_PEND_WAIT_RESP_b 3
#define IMXMCI_PEND_DMA_DATA_b 4
#define IMXMCI_PEND_CPU_DATA_b 5
#define IMXMCI_PEND_CARD_XCHG_b 6
#define IMXMCI_PEND_SET_INIT_b 7
#define IMXMCI_PEND_STARTED_b 8
#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
#define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b)
#define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
static void imxmci_stop_clock(struct imxmci_host *host)
{
int i = 0;
u16 reg;
reg = readw(host->base + MMC_REG_STR_STP_CLK);
writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
while (i < 0x1000) {
if (!(i & 0x7f)) {
reg = readw(host->base + MMC_REG_STR_STP_CLK);
writew(reg | STR_STP_CLK_STOP_CLK,
host->base + MMC_REG_STR_STP_CLK);
}
reg = readw(host->base + MMC_REG_STATUS);
if (!(reg & STATUS_CARD_BUS_CLK_RUN)) {
/* Check twice before cut */
reg = readw(host->base + MMC_REG_STATUS);
if (!(reg & STATUS_CARD_BUS_CLK_RUN))
return;
}
i++;
}
dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
}
static int imxmci_start_clock(struct imxmci_host *host)
{
unsigned int trials = 0;
unsigned int delay_limit = 128;
unsigned long flags;
u16 reg;
reg = readw(host->base + MMC_REG_STR_STP_CLK);
writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
/*
* Command start of the clock, this usually succeeds in less
* then 6 delay loops, but during card detection (low clockrate)
* it takes up to 5000 delay loops and sometimes fails for the first time
*/
reg = readw(host->base + MMC_REG_STR_STP_CLK);
writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
do {
unsigned int delay = delay_limit;
while (delay--) {
reg = readw(host->base + MMC_REG_STATUS);
if (reg & STATUS_CARD_BUS_CLK_RUN) {
/* Check twice before cut */
reg = readw(host->base + MMC_REG_STATUS);
if (reg & STATUS_CARD_BUS_CLK_RUN)
return 0;
}
if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
return 0;
}
local_irq_save(flags);
/*
* Ensure, that request is not doubled under all possible circumstances.
* It is possible, that cock running state is missed, because some other
* IRQ or schedule delays this function execution and the clocks has
* been already stopped by other means (response processing, SDHC HW)
*/
if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) {
reg = readw(host->base + MMC_REG_STR_STP_CLK);
writew(reg | STR_STP_CLK_START_CLK,
host->base + MMC_REG_STR_STP_CLK);
}
local_irq_restore(flags);
} while (++trials < 256);
dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
return -1;
}
static void imxmci_softreset(struct imxmci_host *host)
{
int i;
/* reset sequence */
writew(0x08, host->base + MMC_REG_STR_STP_CLK);
writew(0x0D, host->base + MMC_REG_STR_STP_CLK);
for (i = 0; i < 8; i++)
writew(0x05, host->base + MMC_REG_STR_STP_CLK);
writew(0xff, host->base + MMC_REG_RES_TO);
writew(512, host->base + MMC_REG_BLK_LEN);
writew(1, host->base + MMC_REG_NOB);
}
static int imxmci_busy_wait_for_status(struct imxmci_host *host,
unsigned int *pstat, unsigned int stat_mask,
int timeout, const char *where)
{
int loops = 0;
while (!(*pstat & stat_mask)) {
loops += 2;
if (loops >= timeout) {
dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
where, *pstat, stat_mask);
return -1;
}
udelay(2);
*pstat |= readw(host->base + MMC_REG_STATUS);
}
if (!loops)
return 0;
/* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000))
dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
loops, where, *pstat, stat_mask);
return loops;
}
static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
unsigned int blksz = data->blksz;
unsigned int datasz = nob * blksz;
int i;
if (data->flags & MMC_DATA_STREAM)
nob = 0xffff;
host->data = data;
data->bytes_xfered = 0;
writew(nob, host->base + MMC_REG_NOB);
writew(blksz, host->base + MMC_REG_BLK_LEN);
/*
* DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
* We are in big troubles for non-512 byte transfers according to note in the paragraph
* 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
* The situation is even more complex in reality. The SDHC in not able to handle wll
* partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
* This is required for SCR read at least.
*/
if (datasz < 512) {
host->dma_size = datasz;
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
/* Hack to enable read SCR */
writew(1, host->base + MMC_REG_NOB);
writew(512, host->base + MMC_REG_BLK_LEN);
} else {
host->dma_dir = DMA_TO_DEVICE;
}
/* Convert back to virtual address */
host->data_ptr = (u16 *)sg_virt(data->sg);
host->data_cnt = 0;
clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
return;
}
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, host->dma_dir);
imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
host->res->start + MMC_REG_BUFFER_ACCESS,
DMA_MODE_READ);
/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
} else {
host->dma_dir = DMA_TO_DEVICE;
host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, host->dma_dir);
imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
host->res->start + MMC_REG_BUFFER_ACCESS,
DMA_MODE_WRITE);
/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
}
#if 1 /* This code is there only for consistency checking and can be disabled in future */
host->dma_size = 0;
for (i = 0; i < host->dma_nents; i++)
host->dma_size += data->sg[i].length;
if (datasz > host->dma_size) {
dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
datasz, host->dma_size);
}
#endif
host->dma_size = datasz;
wmb();
set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
/* start DMA engine for read, write is delayed after initial response */
if (host->dma_dir == DMA_FROM_DEVICE)
imx_dma_enable(host->dma);
}
static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
{
unsigned long flags;
u32 imask;
WARN_ON(host->cmd != NULL);
host->cmd = cmd;
/* Ensure, that clock are stopped else command programming and start fails */
imxmci_stop_clock(host);
if (cmd->flags & MMC_RSP_BUSY)
cmdat |= CMD_DAT_CONT_BUSY;
switch (mmc_resp_type(cmd)) {
case MMC_RSP_R1: /* short CRC, OPCODE */
case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
break;
case MMC_RSP_R2: /* long 136 bit + CRC */
cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
break;
case MMC_RSP_R3: /* short */
cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
break;
default:
break;
}
if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events))
cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
if (host->actual_bus_width == MMC_BUS_WIDTH_4)
cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
writew(cmd->opcode, host->base + MMC_REG_CMD);
writew(cmd->arg >> 16, host->base + MMC_REG_ARGH);
writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL);
writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
atomic_set(&host->stuck_timeout, 0);
set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
imask = IMXMCI_INT_MASK_DEFAULT;
imask &= ~INT_MASK_END_CMD_RES;
if (cmdat & CMD_DAT_CONT_DATA_ENABLE) {
/* imask &= ~INT_MASK_BUF_READY; */
imask &= ~INT_MASK_DATA_TRAN;
if (cmdat & CMD_DAT_CONT_WRITE)
imask &= ~INT_MASK_WRITE_OP_DONE;
if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
imask &= ~INT_MASK_BUF_READY;
}
spin_lock_irqsave(&host->lock, flags);
host->imask = imask;
writew(host->imask, host->base + MMC_REG_INT_MASK);
spin_unlock_irqrestore(&host->lock, flags);
dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
cmd->opcode, cmd->opcode, imask);
imxmci_start_clock(host);
}
static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
host->imask = IMXMCI_INT_MASK_DEFAULT;
writew(host->imask, host->base + MMC_REG_INT_MASK);
spin_unlock_irqrestore(&host->lock, flags);
if (req && req->cmd)
host->prev_cmd_code = req->cmd->opcode;
host->req = NULL;
host->cmd = NULL;
host->data = NULL;
mmc_request_done(host->mmc, req);
}
static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
int data_error;
if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
imx_dma_disable(host->dma);
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
host->dma_dir);
}
if (stat & STATUS_ERR_MASK) {
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat);
if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
data->error = -EILSEQ;
else if (stat & STATUS_TIME_OUT_READ)
data->error = -ETIMEDOUT;
else
data->error = -EIO;
} else {
data->bytes_xfered = host->dma_size;
}
data_error = data->error;
host->data = NULL;
return data_error;
}
static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
{
struct mmc_command *cmd = host->cmd;
int i;
u32 a, b, c;
struct mmc_data *data = host->data;
if (!cmd)
return 0;
host->cmd = NULL;
if (stat & STATUS_TIME_OUT_RESP) {
dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
cmd->error = -ETIMEDOUT;
} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
cmd->error = -EILSEQ;
}
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
for (i = 0; i < 4; i++) {
a = readw(host->base + MMC_REG_RES_FIFO);
b = readw(host->base + MMC_REG_RES_FIFO);
cmd->resp[i] = a << 16 | b;
}
} else {
a = readw(host->base + MMC_REG_RES_FIFO);
b = readw(host->base + MMC_REG_RES_FIFO);
c = readw(host->base + MMC_REG_RES_FIFO);
cmd->resp[0] = a << 24 | b << 8 | c >> 8;
}
}
dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) {
if (host->req->data->flags & MMC_DATA_WRITE) {
/* Wait for FIFO to be empty before starting DMA write */
stat = readw(host->base + MMC_REG_STATUS);
if (imxmci_busy_wait_for_status(host, &stat,
STATUS_APPL_BUFF_FE,
40, "imxmci_cmd_done DMA WR") < 0) {
cmd->error = -EIO;
imxmci_finish_data(host, stat);
if (host->req)
imxmci_finish_request(host, host->req);
dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
stat);
return 0;
}
if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
imx_dma_enable(host->dma);
}
} else {
struct mmc_request *req;
imxmci_stop_clock(host);
req = host->req;
if (data)
imxmci_finish_data(host, stat);
if (req)
imxmci_finish_request(host, req);
else
dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
}
return 1;
}
static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
int data_error;
if (!data)
return 0;
data_error = imxmci_finish_data(host, stat);
if (host->req->stop) {
imxmci_stop_clock(host);
imxmci_start_cmd(host, host->req->stop, 0);
} else {
struct mmc_request *req;
req = host->req;
if (req)
imxmci_finish_request(host, req);
else
dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
}
return 1;
}
static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
{
int i;
int burst_len;
int trans_done = 0;
unsigned int stat = *pstat;
if (host->actual_bus_width != MMC_BUS_WIDTH_4)
burst_len = 16;
else
burst_len = 64;
/* This is unfortunately required */
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
stat);
udelay(20); /* required for clocks < 8MHz*/
if (host->dma_dir == DMA_FROM_DEVICE) {
imxmci_busy_wait_for_status(host, &stat,
STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
STATUS_TIME_OUT_READ,
50, "imxmci_cpu_driven_data read");
while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
!(stat & STATUS_TIME_OUT_READ) &&
(host->data_cnt < 512)) {
udelay(20); /* required for clocks < 8MHz*/
for (i = burst_len; i >= 2 ; i -= 2) {
u16 data;
data = readw(host->base + MMC_REG_BUFFER_ACCESS);
udelay(10); /* required for clocks < 8MHz*/
if (host->data_cnt+2 <= host->dma_size) {
*(host->data_ptr++) = data;
} else {
if (host->data_cnt < host->dma_size)
*(u8 *)(host->data_ptr) = data;
}
host->data_cnt += 2;
}
stat = readw(host->base + MMC_REG_STATUS);
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
host->data_cnt, burst_len, stat);
}
if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
trans_done = 1;
if (host->dma_size & 0x1ff)
stat &= ~STATUS_CRC_READ_ERR;
if (stat & STATUS_TIME_OUT_READ) {
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
stat);
trans_done = -1;
}
} else {
imxmci_busy_wait_for_status(host, &stat,
STATUS_APPL_BUFF_FE,
20, "imxmci_cpu_driven_data write");
while ((stat & STATUS_APPL_BUFF_FE) &&
(host->data_cnt < host->dma_size)) {
if (burst_len >= host->dma_size - host->data_cnt) {
burst_len = host->dma_size - host->data_cnt;
host->data_cnt = host->dma_size;
trans_done = 1;
} else {
host->data_cnt += burst_len;
}
for (i = burst_len; i > 0 ; i -= 2)
writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS);
stat = readw(host->base + MMC_REG_STATUS);
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
burst_len, stat);
}
}
*pstat = stat;
return trans_done;
}
static void imxmci_dma_irq(int dma, void *devid)
{
struct imxmci_host *host = devid;
u32 stat = readw(host->base + MMC_REG_STATUS);
atomic_set(&host->stuck_timeout, 0);
host->status_reg = stat;
set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
static irqreturn_t imxmci_irq(int irq, void *devid)
{
struct imxmci_host *host = devid;
u32 stat = readw(host->base + MMC_REG_STATUS);
int handled = 1;
writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT,
host->base + MMC_REG_INT_MASK);
atomic_set(&host->stuck_timeout, 0);
host->status_reg = stat;
set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
tasklet_schedule(&host->tasklet);
return IRQ_RETVAL(handled);
}
static void imxmci_tasklet_fnc(unsigned long data)
{
struct imxmci_host *host = (struct imxmci_host *)data;
u32 stat;
unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
int timeout = 0;
if (atomic_read(&host->stuck_timeout) > 4) {
char *what;
timeout = 1;
stat = readw(host->base + MMC_REG_STATUS);
host->status_reg = stat;
if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
what = "RESP+DMA";
else
what = "RESP";
else
if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
what = "DATA";
else
what = "DMA";
else
what = "???";
dev_err(mmc_dev(host->mmc),
"%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
what, stat,
readw(host->base + MMC_REG_INT_MASK));
dev_err(mmc_dev(host->mmc),
"CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
readw(host->base + MMC_REG_CMD_DAT_CONT),
readw(host->base + MMC_REG_BLK_LEN),
readw(host->base + MMC_REG_NOB),
CCR(host->dma));
dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
host->cmd ? host->cmd->opcode : 0,
host->prev_cmd_code,
1 << host->actual_bus_width, host->dma_size);
}
if (!host->present || timeout)
host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
stat = readw(host->base + MMC_REG_STATUS);
/*
* This is not required in theory, but there is chance to miss some flag
* which clears automatically by mask write, FreeScale original code keeps
* stat from IRQ time so do I
*/
stat |= host->status_reg;
if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
stat &= ~STATUS_CRC_READ_ERR;
if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
imxmci_busy_wait_for_status(host, &stat,
STATUS_END_CMD_RESP | STATUS_ERR_MASK,
20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
}
if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
imxmci_cmd_done(host, stat);
if (host->data && (stat & STATUS_ERR_MASK))
imxmci_data_done(host, stat);
}
if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
stat |= readw(host->base + MMC_REG_STATUS);
if (imxmci_cpu_driven_data(host, &stat)) {
if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
imxmci_cmd_done(host, stat);
atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
&host->pending_events);
imxmci_data_done(host, stat);
}
}
}
if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
!test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
stat = readw(host->base + MMC_REG_STATUS);
/* Same as above */
stat |= host->status_reg;
if (host->dma_dir == DMA_TO_DEVICE)
data_dir_mask = STATUS_WRITE_OP_DONE;
else
data_dir_mask = STATUS_DATA_TRANS_DONE;
if (stat & data_dir_mask) {
clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
imxmci_data_done(host, stat);
}
}
if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
if (host->cmd)
imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
if (host->data)
imxmci_data_done(host, STATUS_TIME_OUT_READ |
STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
if (host->req)
imxmci_finish_request(host, host->req);
mmc_detect_change(host->mmc, msecs_to_jiffies(100));
}
}
static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
{
struct imxmci_host *host = mmc_priv(mmc);
unsigned int cmdat;
WARN_ON(host->req != NULL);
host->req = req;
cmdat = 0;
if (req->data) {
imxmci_setup_data(host, req->data);
cmdat |= CMD_DAT_CONT_DATA_ENABLE;
if (req->data->flags & MMC_DATA_WRITE)
cmdat |= CMD_DAT_CONT_WRITE;
if (req->data->flags & MMC_DATA_STREAM)
cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
}
imxmci_start_cmd(host, req->cmd, cmdat);
}
#define CLK_RATE 19200000
static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct imxmci_host *host = mmc_priv(mmc);
int prescaler;
if (ios->bus_width == MMC_BUS_WIDTH_4) {
host->actual_bus_width = MMC_BUS_WIDTH_4;
imx_gpio_mode(PB11_PF_SD_DAT3);
BLR(host->dma) = 0; /* burst 64 byte read/write */
} else {
host->actual_bus_width = MMC_BUS_WIDTH_1;
imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
BLR(host->dma) = 16; /* burst 16 byte read/write */
}
if (host->power_mode != ios->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
break;
case MMC_POWER_UP:
set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
break;
case MMC_POWER_ON:
break;
}
host->power_mode = ios->power_mode;
}
if (ios->clock) {
unsigned int clk;
u16 reg;
/* The prescaler is 5 for PERCLK2 equal to 96MHz
* then 96MHz / 5 = 19.2 MHz
*/
clk = clk_get_rate(host->clk);
prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE;
switch (prescaler) {
case 0:
case 1: prescaler = 0;
break;
case 2: prescaler = 1;
break;
case 3: prescaler = 2;
break;
case 4: prescaler = 4;
break;
default:
case 5: prescaler = 5;
break;
}
dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
clk, prescaler);
for (clk = 0; clk < 8; clk++) {
int x;
x = CLK_RATE / (1 << clk);
if (x <= ios->clock)
break;
}
/* enable controller */
reg = readw(host->base + MMC_REG_STR_STP_CLK);
writew(reg | STR_STP_CLK_ENABLE,
host->base + MMC_REG_STR_STP_CLK);
imxmci_stop_clock(host);
writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE);
/*
* Under my understanding, clock should not be started there, because it would
* initiate SDHC sequencer and send last or random command into card
*/
/* imxmci_start_clock(host); */
dev_dbg(mmc_dev(host->mmc),
"MMC_CLK_RATE: 0x%08x\n",
readw(host->base + MMC_REG_CLK_RATE));
} else {
imxmci_stop_clock(host);
}
}
static int imxmci_get_ro(struct mmc_host *mmc)
{
struct imxmci_host *host = mmc_priv(mmc);
if (host->pdata && host->pdata->get_ro)
return !!host->pdata->get_ro(mmc_dev(mmc));
/*
* Board doesn't support read only detection; let the mmc core
* decide what to do.
*/
return -ENOSYS;
}
static const struct mmc_host_ops imxmci_ops = {
.request = imxmci_request,
.set_ios = imxmci_set_ios,
.get_ro = imxmci_get_ro,
};
static void imxmci_check_status(unsigned long data)
{
struct imxmci_host *host = (struct imxmci_host *)data;
if (host->pdata && host->pdata->card_present &&
host->pdata->card_present(mmc_dev(host->mmc)) != host->present) {
host->present ^= 1;
dev_info(mmc_dev(host->mmc), "card %s\n",
host->present ? "inserted" : "removed");
set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
atomic_inc(&host->stuck_timeout);
if (atomic_read(&host->stuck_timeout) > 4)
tasklet_schedule(&host->tasklet);
} else {
atomic_set(&host->stuck_timeout, 0);
}
mod_timer(&host->timer, jiffies + (HZ>>1));
}
static int __init imxmci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct imxmci_host *host = NULL;
struct resource *r;
int ret = 0, irq;
u16 rev_no;
pr_info("i.MX mmc driver\n");
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!r || irq < 0)
return -ENXIO;
r = request_mem_region(r->start, resource_size(r), pdev->name);
if (!r)
return -EBUSY;
mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto out;
}
mmc->ops = &imxmci_ops;
mmc->f_min = 150000;
mmc->f_max = CLK_RATE/2;
mmc->ocr_avail = MMC_VDD_32_33;
mmc->caps = MMC_CAP_4_BIT_DATA;
/* MMC core transfer sizes tunable parameters */
mmc->max_segs = 64;
mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
mmc->max_blk_size = 2048;
mmc->max_blk_count = 65535;
host = mmc_priv(mmc);
host->base = ioremap(r->start, resource_size(r));
if (!host->base) {
ret = -ENOMEM;
goto out;
}
host->mmc = mmc;
host->dma_allocated = 0;
host->pdata = pdev->dev.platform_data;
if (!host->pdata)
dev_warn(&pdev->dev, "No platform data provided!\n");
spin_lock_init(&host->lock);
host->res = r;
host->irq = irq;
host->clk = clk_get(&pdev->dev, "perclk2");
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
goto out;
}
clk_enable(host->clk);
imx_gpio_mode(PB8_PF_SD_DAT0);
imx_gpio_mode(PB9_PF_SD_DAT1);
imx_gpio_mode(PB10_PF_SD_DAT2);
/* Configured as GPIO with pull-up to ensure right MCC card mode */
/* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
/* imx_gpio_mode(PB11_PF_SD_DAT3); */
imx_gpio_mode(PB12_PF_SD_CLK);
imx_gpio_mode(PB13_PF_SD_CMD);
imxmci_softreset(host);
rev_no = readw(host->base + MMC_REG_REV_NO);
if (rev_no != 0x390) {
dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
readw(host->base + MMC_REG_REV_NO));
goto out;
}
/* recommended in data sheet */
writew(0x2db4, host->base + MMC_REG_READ_TO);
host->imask = IMXMCI_INT_MASK_DEFAULT;
writew(host->imask, host->base + MMC_REG_INT_MASK);
host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
if(host->dma < 0) {
dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
ret = -EBUSY;
goto out;
}
host->dma_allocated = 1;
imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
RSSR(host->dma) = DMA_REQ_SDHC;
tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
host->status_reg=0;
host->pending_events=0;
ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
if (ret)
goto out;
if (host->pdata && host->pdata->card_present)
host->present = host->pdata->card_present(mmc_dev(mmc));
else /* if there is no way to detect assume that card is present */
host->present = 1;
init_timer(&host->timer);
host->timer.data = (unsigned long)host;
host->timer.function = imxmci_check_status;
add_timer(&host->timer);
mod_timer(&host->timer, jiffies + (HZ >> 1));
platform_set_drvdata(pdev, mmc);
mmc_add_host(mmc);
return 0;
out:
if (host) {
if (host->dma_allocated) {
imx_dma_free(host->dma);
host->dma_allocated = 0;
}
if (host->clk) {
clk_disable(host->clk);
clk_put(host->clk);
}
if (host->base)
iounmap(host->base);
}
if (mmc)
mmc_free_host(mmc);
release_mem_region(r->start, resource_size(r));
return ret;
}
static int __exit imxmci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
if (mmc) {
struct imxmci_host *host = mmc_priv(mmc);
tasklet_disable(&host->tasklet);
del_timer_sync(&host->timer);
mmc_remove_host(mmc);
free_irq(host->irq, host);
iounmap(host->base);
if (host->dma_allocated) {
imx_dma_free(host->dma);
host->dma_allocated = 0;
}
tasklet_kill(&host->tasklet);
clk_disable(host->clk);
clk_put(host->clk);
release_mem_region(host->res->start, resource_size(host->res));
mmc_free_host(mmc);
}
return 0;
}
#ifdef CONFIG_PM
static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
{
struct mmc_host *mmc = platform_get_drvdata(dev);
int ret = 0;
if (mmc)
ret = mmc_suspend_host(mmc);
return ret;
}
static int imxmci_resume(struct platform_device *dev)
{
struct mmc_host *mmc = platform_get_drvdata(dev);
struct imxmci_host *host;
int ret = 0;
if (mmc) {
host = mmc_priv(mmc);
if (host)
set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
ret = mmc_resume_host(mmc);
}
return ret;
}
#else
#define imxmci_suspend NULL
#define imxmci_resume NULL
#endif /* CONFIG_PM */
static struct platform_driver imxmci_driver = {
.remove = __exit_p(imxmci_remove),
.suspend = imxmci_suspend,
.resume = imxmci_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
}
};
static int __init imxmci_init(void)
{
return platform_driver_probe(&imxmci_driver, imxmci_probe);
}
static void __exit imxmci_exit(void)
{
platform_driver_unregister(&imxmci_driver);
}
module_init(imxmci_init);
module_exit(imxmci_exit);
MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx-mmc");
| gpl-2.0 |
guylamar2006/android_kernel_samsung_smdk4412 | drivers/net/enic/vnic_vic.c | 13289 | 2047 | /*
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/slab.h>
#include "vnic_vic.h"
struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui,
const u8 type)
{
struct vic_provinfo *vp;
if (!oui)
return NULL;
vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
if (!vp)
return NULL;
memcpy(vp->oui, oui, sizeof(vp->oui));
vp->type = type;
vp->length = htonl(sizeof(vp->num_tlvs));
return vp;
}
void vic_provinfo_free(struct vic_provinfo *vp)
{
kfree(vp);
}
int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
const void *value)
{
struct vic_provinfo_tlv *tlv;
if (!vp || !value)
return -EINVAL;
if (ntohl(vp->length) + offsetof(struct vic_provinfo_tlv, value) +
length > VIC_PROVINFO_MAX_TLV_DATA)
return -ENOMEM;
tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv +
ntohl(vp->length) - sizeof(vp->num_tlvs));
tlv->type = htons(type);
tlv->length = htons(length);
memcpy(tlv->value, value, length);
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
vp->length = htonl(ntohl(vp->length) +
offsetof(struct vic_provinfo_tlv, value) + length);
return 0;
}
size_t vic_provinfo_size(struct vic_provinfo *vp)
{
return vp ? ntohl(vp->length) + sizeof(*vp) - sizeof(vp->num_tlvs) : 0;
}
| gpl-2.0 |
skelitonlord/android_kernel_samsung_matissewifi | arch/powerpc/boot/cuboot-pq2.c | 14057 | 7138 | /*
* Old U-boot compatibility for PowerQUICC II
* (a.k.a. 82xx with CPM, not the 8240 family of chips)
*
* Author: Scott Wood <scottwood@freescale.com>
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "stdio.h"
#include "cuboot.h"
#include "io.h"
#include "fsl-soc.h"
#define TARGET_CPM2
#define TARGET_HAS_ETH1
#include "ppcboot.h"
static bd_t bd;
struct cs_range {
u32 csnum;
u32 base; /* must be zero */
u32 addr;
u32 size;
};
struct pci_range {
u32 flags;
u32 pci_addr[2];
u32 phys_addr;
u32 size[2];
};
struct cs_range cs_ranges_buf[MAX_PROP_LEN / sizeof(struct cs_range)];
struct pci_range pci_ranges_buf[MAX_PROP_LEN / sizeof(struct pci_range)];
/* Different versions of u-boot put the BCSR in different places, and
* some don't set up the PCI PIC at all, so we assume the device tree is
* sane and update the BRx registers appropriately.
*
* For any node defined as compatible with fsl,pq2-localbus,
* #address/#size must be 2/1 for the localbus, and 1/1 for the parent bus.
* Ranges must be for whole chip selects.
*/
static void update_cs_ranges(void)
{
void *bus_node, *parent_node;
u32 *ctrl_addr;
unsigned long ctrl_size;
u32 naddr, nsize;
int len;
int i;
bus_node = finddevice("/localbus");
if (!bus_node || !dt_is_compatible(bus_node, "fsl,pq2-localbus"))
return;
dt_get_reg_format(bus_node, &naddr, &nsize);
if (naddr != 2 || nsize != 1)
goto err;
parent_node = get_parent(bus_node);
if (!parent_node)
goto err;
dt_get_reg_format(parent_node, &naddr, &nsize);
if (naddr != 1 || nsize != 1)
goto err;
if (!dt_xlate_reg(bus_node, 0, (unsigned long *)&ctrl_addr,
&ctrl_size))
goto err;
len = getprop(bus_node, "ranges", cs_ranges_buf, sizeof(cs_ranges_buf));
for (i = 0; i < len / sizeof(struct cs_range); i++) {
u32 base, option;
int cs = cs_ranges_buf[i].csnum;
if (cs >= ctrl_size / 8)
goto err;
if (cs_ranges_buf[i].base != 0)
goto err;
base = in_be32(&ctrl_addr[cs * 2]);
/* If CS is already valid, use the existing flags.
* Otherwise, guess a sane default.
*/
if (base & 1) {
base &= 0x7fff;
option = in_be32(&ctrl_addr[cs * 2 + 1]) & 0x7fff;
} else {
base = 0x1801;
option = 0x10;
}
out_be32(&ctrl_addr[cs * 2], 0);
out_be32(&ctrl_addr[cs * 2 + 1],
option | ~(cs_ranges_buf[i].size - 1));
out_be32(&ctrl_addr[cs * 2], base | cs_ranges_buf[i].addr);
}
return;
err:
printf("Bad /localbus node\r\n");
}
/* Older u-boots don't set PCI up properly. Update the hardware to match
* the device tree. The prefetch mem region and non-prefetch mem region
* must be contiguous in the host bus. As required by the PCI binding,
* PCI #addr/#size must be 3/2. The parent bus must be 1/1. Only
* 32-bit PCI is supported. All three region types (prefetchable mem,
* non-prefetchable mem, and I/O) must be present.
*/
static void fixup_pci(void)
{
struct pci_range *mem = NULL, *mmio = NULL,
*io = NULL, *mem_base = NULL;
u32 *pci_regs[3];
u8 *soc_regs;
int i, len;
void *node, *parent_node;
u32 naddr, nsize, mem_pow2, mem_mask;
node = finddevice("/pci");
if (!node || !dt_is_compatible(node, "fsl,pq2-pci"))
return;
for (i = 0; i < 3; i++)
if (!dt_xlate_reg(node, i,
(unsigned long *)&pci_regs[i], NULL))
goto err;
soc_regs = (u8 *)fsl_get_immr();
if (!soc_regs)
goto unhandled;
dt_get_reg_format(node, &naddr, &nsize);
if (naddr != 3 || nsize != 2)
goto err;
parent_node = get_parent(node);
if (!parent_node)
goto err;
dt_get_reg_format(parent_node, &naddr, &nsize);
if (naddr != 1 || nsize != 1)
goto unhandled;
len = getprop(node, "ranges", pci_ranges_buf,
sizeof(pci_ranges_buf));
for (i = 0; i < len / sizeof(struct pci_range); i++) {
u32 flags = pci_ranges_buf[i].flags & 0x43000000;
if (flags == 0x42000000)
mem = &pci_ranges_buf[i];
else if (flags == 0x02000000)
mmio = &pci_ranges_buf[i];
else if (flags == 0x01000000)
io = &pci_ranges_buf[i];
}
if (!mem || !mmio || !io)
goto unhandled;
if (mem->size[1] != mmio->size[1])
goto unhandled;
if (mem->size[1] & (mem->size[1] - 1))
goto unhandled;
if (io->size[1] & (io->size[1] - 1))
goto unhandled;
if (mem->phys_addr + mem->size[1] == mmio->phys_addr)
mem_base = mem;
else if (mmio->phys_addr + mmio->size[1] == mem->phys_addr)
mem_base = mmio;
else
goto unhandled;
out_be32(&pci_regs[1][0], mem_base->phys_addr | 1);
out_be32(&pci_regs[2][0], ~(mem->size[1] + mmio->size[1] - 1));
out_be32(&pci_regs[1][1], io->phys_addr | 1);
out_be32(&pci_regs[2][1], ~(io->size[1] - 1));
out_le32(&pci_regs[0][0], mem->pci_addr[1] >> 12);
out_le32(&pci_regs[0][2], mem->phys_addr >> 12);
out_le32(&pci_regs[0][4], (~(mem->size[1] - 1) >> 12) | 0xa0000000);
out_le32(&pci_regs[0][6], mmio->pci_addr[1] >> 12);
out_le32(&pci_regs[0][8], mmio->phys_addr >> 12);
out_le32(&pci_regs[0][10], (~(mmio->size[1] - 1) >> 12) | 0x80000000);
out_le32(&pci_regs[0][12], io->pci_addr[1] >> 12);
out_le32(&pci_regs[0][14], io->phys_addr >> 12);
out_le32(&pci_regs[0][16], (~(io->size[1] - 1) >> 12) | 0xc0000000);
/* Inbound translation */
out_le32(&pci_regs[0][58], 0);
out_le32(&pci_regs[0][60], 0);
mem_pow2 = 1 << (__ilog2_u32(bd.bi_memsize - 1) + 1);
mem_mask = ~(mem_pow2 - 1) >> 12;
out_le32(&pci_regs[0][62], 0xa0000000 | mem_mask);
/* If PCI is disabled, drive RST high to enable. */
if (!(in_le32(&pci_regs[0][32]) & 1)) {
/* Tpvrh (Power valid to RST# high) 100 ms */
udelay(100000);
out_le32(&pci_regs[0][32], 1);
/* Trhfa (RST# high to first cfg access) 2^25 clocks */
udelay(1020000);
}
/* Enable bus master and memory access */
out_le32(&pci_regs[0][64], 0x80000004);
out_le32(&pci_regs[0][65], in_le32(&pci_regs[0][65]) | 6);
/* Park the bus on PCI, and elevate PCI's arbitration priority,
* as required by section 9.6 of the user's manual.
*/
out_8(&soc_regs[0x10028], 3);
out_be32((u32 *)&soc_regs[0x1002c], 0x01236745);
return;
err:
printf("Bad PCI node -- using existing firmware setup.\r\n");
return;
unhandled:
printf("Unsupported PCI node -- using existing firmware setup.\r\n");
}
static void pq2_platform_fixups(void)
{
void *node;
dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
dt_fixup_mac_addresses(bd.bi_enetaddr, bd.bi_enet1addr);
dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq);
node = finddevice("/soc/cpm");
if (node)
setprop(node, "clock-frequency", &bd.bi_cpmfreq, 4);
node = finddevice("/soc/cpm/brg");
if (node)
setprop(node, "clock-frequency", &bd.bi_brgfreq, 4);
update_cs_ranges();
fixup_pci();
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
CUBOOT_INIT();
fdt_init(_dtb_start);
serial_console_init();
platform_ops.fixups = pq2_platform_fixups;
}
| gpl-2.0 |
sivasankariit/linux-rl | drivers/gpu/drm/nouveau/nouveau_ttm.c | 234 | 10995 | /*
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <subdev/fb.h>
#include <subdev/vm.h>
#include <subdev/instmem.h>
#include "nouveau_drm.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
/* nothing to do */
return 0;
}
static int
nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
{
/* nothing to do */
return 0;
}
static inline void
nouveau_mem_node_cleanup(struct nouveau_mem *node)
{
if (node->vma[0].node) {
nouveau_vm_unmap(&node->vma[0]);
nouveau_vm_put(&node->vma[0]);
}
if (node->vma[1].node) {
nouveau_vm_unmap(&node->vma[1]);
nouveau_vm_put(&node->vma[1]);
}
}
static void
nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nouveau_fb *pfb = nouveau_fb(drm->device);
nouveau_mem_node_cleanup(mem->mm_node);
pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
}
static int
nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_mem *node;
u32 size_nc = 0;
int ret;
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
size_nc = 1 << nvbo->page_shift;
ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) {
mem->mm_node = NULL;
return (ret == -ENOSPC) ? 0 : ret;
}
node->page_shift = nvbo->page_shift;
mem->mm_node = node;
mem->start = node->offset >> PAGE_SHIFT;
return 0;
}
static void
nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
u32 total = 0, free = 0;
mutex_lock(&mm->mutex);
list_for_each_entry(r, &mm->nodes, nl_entry) {
printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
prefix, r->type, ((u64)r->offset << 12),
(((u64)r->offset + r->length) << 12));
total += r->length;
if (!r->type)
free += r->length;
}
mutex_unlock(&mm->mutex);
printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
prefix, (u64)total << 12, (u64)free << 12);
printk(KERN_DEBUG "%s block: 0x%08x\n",
prefix, mm->block_size << 12);
}
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_init,
nouveau_vram_manager_fini,
nouveau_vram_manager_new,
nouveau_vram_manager_del,
nouveau_vram_manager_debug
};
static int
nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
return 0;
}
static int
nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
{
return 0;
}
static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
nouveau_mem_node_cleanup(mem->mm_node);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct nouveau_mem *node;
if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
return -ENOMEM;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
node->page_shift = 12;
mem->mm_node = node;
mem->start = 0;
return 0;
}
static void
nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
}
const struct ttm_mem_type_manager_func nouveau_gart_manager = {
nouveau_gart_manager_init,
nouveau_gart_manager_fini,
nouveau_gart_manager_new,
nouveau_gart_manager_del,
nouveau_gart_manager_debug
};
#include <core/subdev/vm/nv04.h>
static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
struct nv04_vmmgr_priv *priv = (void *)vmm;
struct nouveau_vm *vm = NULL;
nouveau_vm_ref(priv->vm, &vm, NULL);
man->priv = vm;
return 0;
}
static int
nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
{
struct nouveau_vm *vm = man->priv;
nouveau_vm_ref(NULL, &vm, NULL);
man->priv = NULL;
return 0;
}
static void
nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
{
struct nouveau_mem *node = mem->mm_node;
if (node->vma[0].node)
nouveau_vm_put(&node->vma[0]);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
static int
nv04_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct nouveau_mem *node;
int ret;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
node->page_shift = 12;
ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
NV_MEM_ACCESS_RW, &node->vma[0]);
if (ret) {
kfree(node);
return ret;
}
mem->mm_node = node;
mem->start = node->vma[0].offset >> PAGE_SHIFT;
return 0;
}
static void
nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
}
const struct ttm_mem_type_manager_func nv04_gart_manager = {
nv04_gart_manager_init,
nv04_gart_manager_fini,
nv04_gart_manager_new,
nv04_gart_manager_del,
nv04_gart_manager_debug
};
int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return drm_mmap(filp, vma);
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
static int
nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
static void
nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
int
nouveau_ttm_global_init(struct nouveau_drm *drm)
{
struct drm_global_reference *global_ref;
int ret;
global_ref = &drm->ttm.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &nouveau_ttm_mem_global_init;
global_ref->release = &nouveau_ttm_mem_global_release;
ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM memory accounting\n");
drm->ttm.mem_global_ref.release = NULL;
return ret;
}
drm->ttm.bo_global_ref.mem_glob = global_ref->object;
global_ref = &drm->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM BO subsystem\n");
drm_global_item_unref(&drm->ttm.mem_global_ref);
drm->ttm.mem_global_ref.release = NULL;
return ret;
}
return 0;
}
void
nouveau_ttm_global_release(struct nouveau_drm *drm)
{
if (drm->ttm.mem_global_ref.release == NULL)
return;
drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
drm_global_item_unref(&drm->ttm.mem_global_ref);
drm->ttm.mem_global_ref.release = NULL;
}
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
u32 bits;
int ret;
bits = nouveau_vmmgr(drm->device)->dma_bits;
if ( drm->agp.stat == ENABLED ||
!pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
bits = 32;
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
if (ret)
return ret;
ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
if (ret)
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
ret = nouveau_ttm_global_init(drm);
if (ret)
return ret;
ret = ttm_bo_device_init(&drm->ttm.bdev,
drm->ttm.bo_global_ref.ref.object,
&nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
bits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;
}
/* VRAM init */
drm->gem.vram_available = nouveau_fb(drm->device)->ram.size;
drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
if (ret) {
NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
return ret;
}
drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
pci_resource_len(dev->pdev, 1),
DRM_MTRR_WC);
/* GART init */
if (drm->agp.stat != ENABLED) {
drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
if (drm->gem.gart_available > 512 * 1024 * 1024)
drm->gem.gart_available = 512 * 1024 * 1024;
} else {
drm->gem.gart_available = drm->agp.size;
}
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
drm->gem.gart_available >> PAGE_SHIFT);
if (ret) {
NV_ERROR(drm, "GART mm init failed, %d\n", ret);
return ret;
}
NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
return 0;
}
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
mutex_lock(&drm->dev->struct_mutex);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
mutex_unlock(&drm->dev->struct_mutex);
ttm_bo_device_release(&drm->ttm.bdev);
nouveau_ttm_global_release(drm);
if (drm->ttm.mtrr >= 0) {
drm_mtrr_del(drm->ttm.mtrr,
pci_resource_start(drm->dev->pdev, 1),
pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
drm->ttm.mtrr = -1;
}
}
| gpl-2.0 |
kierank/p2-kernel | drivers/hwmon/fscher.c | 234 | 20211 | /*
* fscher.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (C) 2003, 2004 Reinhard Nissl <rnissl@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* fujitsu siemens hermes chip,
* module based on fscpos.c
* Copyright (C) 2000 Hermann Jung <hej@odn.de>
* Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
* and Philip Edelbrock <phil@netroedge.com>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
/*
* Addresses to scan
*/
static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
/*
* Insmod parameters
*/
I2C_CLIENT_INSMOD_1(fscher);
/*
* The FSCHER registers
*/
/* chip identification */
#define FSCHER_REG_IDENT_0 0x00
#define FSCHER_REG_IDENT_1 0x01
#define FSCHER_REG_IDENT_2 0x02
#define FSCHER_REG_REVISION 0x03
/* global control and status */
#define FSCHER_REG_EVENT_STATE 0x04
#define FSCHER_REG_CONTROL 0x05
/* watchdog */
#define FSCHER_REG_WDOG_PRESET 0x28
#define FSCHER_REG_WDOG_STATE 0x23
#define FSCHER_REG_WDOG_CONTROL 0x21
/* fan 0 */
#define FSCHER_REG_FAN0_MIN 0x55
#define FSCHER_REG_FAN0_ACT 0x0e
#define FSCHER_REG_FAN0_STATE 0x0d
#define FSCHER_REG_FAN0_RIPPLE 0x0f
/* fan 1 */
#define FSCHER_REG_FAN1_MIN 0x65
#define FSCHER_REG_FAN1_ACT 0x6b
#define FSCHER_REG_FAN1_STATE 0x62
#define FSCHER_REG_FAN1_RIPPLE 0x6f
/* fan 2 */
#define FSCHER_REG_FAN2_MIN 0xb5
#define FSCHER_REG_FAN2_ACT 0xbb
#define FSCHER_REG_FAN2_STATE 0xb2
#define FSCHER_REG_FAN2_RIPPLE 0xbf
/* voltage supervision */
#define FSCHER_REG_VOLT_12 0x45
#define FSCHER_REG_VOLT_5 0x42
#define FSCHER_REG_VOLT_BATT 0x48
/* temperature 0 */
#define FSCHER_REG_TEMP0_ACT 0x64
#define FSCHER_REG_TEMP0_STATE 0x71
/* temperature 1 */
#define FSCHER_REG_TEMP1_ACT 0x32
#define FSCHER_REG_TEMP1_STATE 0x81
/* temperature 2 */
#define FSCHER_REG_TEMP2_ACT 0x35
#define FSCHER_REG_TEMP2_STATE 0x91
/*
* Functions declaration
*/
static int fscher_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int fscher_detect(struct i2c_client *client, int kind,
struct i2c_board_info *info);
static int fscher_remove(struct i2c_client *client);
static struct fscher_data *fscher_update_device(struct device *dev);
static void fscher_init_client(struct i2c_client *client);
static int fscher_read_value(struct i2c_client *client, u8 reg);
static int fscher_write_value(struct i2c_client *client, u8 reg, u8 value);
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id fscher_id[] = {
{ "fscher", fscher },
{ }
};
static struct i2c_driver fscher_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "fscher",
},
.probe = fscher_probe,
.remove = fscher_remove,
.id_table = fscher_id,
.detect = fscher_detect,
.address_data = &addr_data,
};
/*
* Client data (each client gets its own)
*/
struct fscher_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* register values */
u8 revision; /* revision of chip */
u8 global_event; /* global event status */
u8 global_control; /* global control register */
u8 watchdog[3]; /* watchdog */
u8 volt[3]; /* 12, 5, battery voltage */
u8 temp_act[3]; /* temperature */
u8 temp_status[3]; /* status of sensor */
u8 fan_act[3]; /* fans revolutions per second */
u8 fan_status[3]; /* fan status */
u8 fan_min[3]; /* fan min value for rps */
u8 fan_ripple[3]; /* divider for rps */
};
/*
* Sysfs stuff
*/
#define sysfs_r(kind, sub, offset, reg) \
static ssize_t show_##kind##sub (struct fscher_data *, char *, int); \
static ssize_t show_##kind##offset##sub (struct device *, struct device_attribute *attr, char *); \
static ssize_t show_##kind##offset##sub (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct fscher_data *data = fscher_update_device(dev); \
return show_##kind##sub(data, buf, (offset)); \
}
#define sysfs_w(kind, sub, offset, reg) \
static ssize_t set_##kind##sub (struct i2c_client *, struct fscher_data *, const char *, size_t, int, int); \
static ssize_t set_##kind##offset##sub (struct device *, struct device_attribute *attr, const char *, size_t); \
static ssize_t set_##kind##offset##sub (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) \
{ \
struct i2c_client *client = to_i2c_client(dev); \
struct fscher_data *data = i2c_get_clientdata(client); \
return set_##kind##sub(client, data, buf, count, (offset), reg); \
}
#define sysfs_rw_n(kind, sub, offset, reg) \
sysfs_r(kind, sub, offset, reg) \
sysfs_w(kind, sub, offset, reg) \
static DEVICE_ATTR(kind##offset##sub, S_IRUGO | S_IWUSR, show_##kind##offset##sub, set_##kind##offset##sub);
#define sysfs_rw(kind, sub, reg) \
sysfs_r(kind, sub, 0, reg) \
sysfs_w(kind, sub, 0, reg) \
static DEVICE_ATTR(kind##sub, S_IRUGO | S_IWUSR, show_##kind##0##sub, set_##kind##0##sub);
#define sysfs_ro_n(kind, sub, offset, reg) \
sysfs_r(kind, sub, offset, reg) \
static DEVICE_ATTR(kind##offset##sub, S_IRUGO, show_##kind##offset##sub, NULL);
#define sysfs_ro(kind, sub, reg) \
sysfs_r(kind, sub, 0, reg) \
static DEVICE_ATTR(kind, S_IRUGO, show_##kind##0##sub, NULL);
#define sysfs_fan(offset, reg_status, reg_min, reg_ripple, reg_act) \
sysfs_rw_n(pwm, , offset, reg_min) \
sysfs_rw_n(fan, _status, offset, reg_status) \
sysfs_rw_n(fan, _div , offset, reg_ripple) \
sysfs_ro_n(fan, _input , offset, reg_act)
#define sysfs_temp(offset, reg_status, reg_act) \
sysfs_rw_n(temp, _status, offset, reg_status) \
sysfs_ro_n(temp, _input , offset, reg_act)
#define sysfs_in(offset, reg_act) \
sysfs_ro_n(in, _input, offset, reg_act)
#define sysfs_revision(reg_revision) \
sysfs_ro(revision, , reg_revision)
#define sysfs_alarms(reg_events) \
sysfs_ro(alarms, , reg_events)
#define sysfs_control(reg_control) \
sysfs_rw(control, , reg_control)
#define sysfs_watchdog(reg_control, reg_status, reg_preset) \
sysfs_rw(watchdog, _control, reg_control) \
sysfs_rw(watchdog, _status , reg_status) \
sysfs_rw(watchdog, _preset , reg_preset)
sysfs_fan(1, FSCHER_REG_FAN0_STATE, FSCHER_REG_FAN0_MIN,
FSCHER_REG_FAN0_RIPPLE, FSCHER_REG_FAN0_ACT)
sysfs_fan(2, FSCHER_REG_FAN1_STATE, FSCHER_REG_FAN1_MIN,
FSCHER_REG_FAN1_RIPPLE, FSCHER_REG_FAN1_ACT)
sysfs_fan(3, FSCHER_REG_FAN2_STATE, FSCHER_REG_FAN2_MIN,
FSCHER_REG_FAN2_RIPPLE, FSCHER_REG_FAN2_ACT)
sysfs_temp(1, FSCHER_REG_TEMP0_STATE, FSCHER_REG_TEMP0_ACT)
sysfs_temp(2, FSCHER_REG_TEMP1_STATE, FSCHER_REG_TEMP1_ACT)
sysfs_temp(3, FSCHER_REG_TEMP2_STATE, FSCHER_REG_TEMP2_ACT)
sysfs_in(0, FSCHER_REG_VOLT_12)
sysfs_in(1, FSCHER_REG_VOLT_5)
sysfs_in(2, FSCHER_REG_VOLT_BATT)
sysfs_revision(FSCHER_REG_REVISION)
sysfs_alarms(FSCHER_REG_EVENTS)
sysfs_control(FSCHER_REG_CONTROL)
sysfs_watchdog(FSCHER_REG_WDOG_CONTROL, FSCHER_REG_WDOG_STATE, FSCHER_REG_WDOG_PRESET)
static struct attribute *fscher_attributes[] = {
&dev_attr_revision.attr,
&dev_attr_alarms.attr,
&dev_attr_control.attr,
&dev_attr_watchdog_status.attr,
&dev_attr_watchdog_control.attr,
&dev_attr_watchdog_preset.attr,
&dev_attr_in0_input.attr,
&dev_attr_in1_input.attr,
&dev_attr_in2_input.attr,
&dev_attr_fan1_status.attr,
&dev_attr_fan1_div.attr,
&dev_attr_fan1_input.attr,
&dev_attr_pwm1.attr,
&dev_attr_fan2_status.attr,
&dev_attr_fan2_div.attr,
&dev_attr_fan2_input.attr,
&dev_attr_pwm2.attr,
&dev_attr_fan3_status.attr,
&dev_attr_fan3_div.attr,
&dev_attr_fan3_input.attr,
&dev_attr_pwm3.attr,
&dev_attr_temp1_status.attr,
&dev_attr_temp1_input.attr,
&dev_attr_temp2_status.attr,
&dev_attr_temp2_input.attr,
&dev_attr_temp3_status.attr,
&dev_attr_temp3_input.attr,
NULL
};
static const struct attribute_group fscher_group = {
.attrs = fscher_attributes,
};
/*
* Real code
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
static int fscher_detect(struct i2c_client *new_client, int kind,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
/* Do the remaining detection unless force or force_fscher parameter */
if (kind < 0) {
if ((i2c_smbus_read_byte_data(new_client,
FSCHER_REG_IDENT_0) != 0x48) /* 'H' */
|| (i2c_smbus_read_byte_data(new_client,
FSCHER_REG_IDENT_1) != 0x45) /* 'E' */
|| (i2c_smbus_read_byte_data(new_client,
FSCHER_REG_IDENT_2) != 0x52)) /* 'R' */
return -ENODEV;
}
strlcpy(info->type, "fscher", I2C_NAME_SIZE);
return 0;
}
static int fscher_probe(struct i2c_client *new_client,
const struct i2c_device_id *id)
{
struct fscher_data *data;
int err;
data = kzalloc(sizeof(struct fscher_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(new_client, data);
data->valid = 0;
mutex_init(&data->update_lock);
fscher_init_client(new_client);
/* Register sysfs hooks */
if ((err = sysfs_create_group(&new_client->dev.kobj, &fscher_group)))
goto exit_free;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove_files;
}
return 0;
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &fscher_group);
exit_free:
kfree(data);
exit:
return err;
}
static int fscher_remove(struct i2c_client *client)
{
struct fscher_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &fscher_group);
kfree(data);
return 0;
}
static int fscher_read_value(struct i2c_client *client, u8 reg)
{
dev_dbg(&client->dev, "read reg 0x%02x\n", reg);
return i2c_smbus_read_byte_data(client, reg);
}
static int fscher_write_value(struct i2c_client *client, u8 reg, u8 value)
{
dev_dbg(&client->dev, "write reg 0x%02x, val 0x%02x\n",
reg, value);
return i2c_smbus_write_byte_data(client, reg, value);
}
/* Called when we have found a new FSC Hermes. */
static void fscher_init_client(struct i2c_client *client)
{
struct fscher_data *data = i2c_get_clientdata(client);
/* Read revision from chip */
data->revision = fscher_read_value(client, FSCHER_REG_REVISION);
}
static struct fscher_data *fscher_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct fscher_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
dev_dbg(&client->dev, "Starting fscher update\n");
data->temp_act[0] = fscher_read_value(client, FSCHER_REG_TEMP0_ACT);
data->temp_act[1] = fscher_read_value(client, FSCHER_REG_TEMP1_ACT);
data->temp_act[2] = fscher_read_value(client, FSCHER_REG_TEMP2_ACT);
data->temp_status[0] = fscher_read_value(client, FSCHER_REG_TEMP0_STATE);
data->temp_status[1] = fscher_read_value(client, FSCHER_REG_TEMP1_STATE);
data->temp_status[2] = fscher_read_value(client, FSCHER_REG_TEMP2_STATE);
data->volt[0] = fscher_read_value(client, FSCHER_REG_VOLT_12);
data->volt[1] = fscher_read_value(client, FSCHER_REG_VOLT_5);
data->volt[2] = fscher_read_value(client, FSCHER_REG_VOLT_BATT);
data->fan_act[0] = fscher_read_value(client, FSCHER_REG_FAN0_ACT);
data->fan_act[1] = fscher_read_value(client, FSCHER_REG_FAN1_ACT);
data->fan_act[2] = fscher_read_value(client, FSCHER_REG_FAN2_ACT);
data->fan_status[0] = fscher_read_value(client, FSCHER_REG_FAN0_STATE);
data->fan_status[1] = fscher_read_value(client, FSCHER_REG_FAN1_STATE);
data->fan_status[2] = fscher_read_value(client, FSCHER_REG_FAN2_STATE);
data->fan_min[0] = fscher_read_value(client, FSCHER_REG_FAN0_MIN);
data->fan_min[1] = fscher_read_value(client, FSCHER_REG_FAN1_MIN);
data->fan_min[2] = fscher_read_value(client, FSCHER_REG_FAN2_MIN);
data->fan_ripple[0] = fscher_read_value(client, FSCHER_REG_FAN0_RIPPLE);
data->fan_ripple[1] = fscher_read_value(client, FSCHER_REG_FAN1_RIPPLE);
data->fan_ripple[2] = fscher_read_value(client, FSCHER_REG_FAN2_RIPPLE);
data->watchdog[0] = fscher_read_value(client, FSCHER_REG_WDOG_PRESET);
data->watchdog[1] = fscher_read_value(client, FSCHER_REG_WDOG_STATE);
data->watchdog[2] = fscher_read_value(client, FSCHER_REG_WDOG_CONTROL);
data->global_event = fscher_read_value(client, FSCHER_REG_EVENT_STATE);
data->global_control = fscher_read_value(client,
FSCHER_REG_CONTROL);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
#define FAN_INDEX_FROM_NUM(nr) ((nr) - 1)
static ssize_t set_fan_status(struct i2c_client *client, struct fscher_data *data,
const char *buf, size_t count, int nr, int reg)
{
/* bits 0..1, 3..7 reserved => mask with 0x04 */
unsigned long v = simple_strtoul(buf, NULL, 10) & 0x04;
mutex_lock(&data->update_lock);
data->fan_status[FAN_INDEX_FROM_NUM(nr)] &= ~v;
fscher_write_value(client, reg, v);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_fan_status(struct fscher_data *data, char *buf, int nr)
{
/* bits 0..1, 3..7 reserved => mask with 0x04 */
return sprintf(buf, "%u\n", data->fan_status[FAN_INDEX_FROM_NUM(nr)] & 0x04);
}
static ssize_t set_pwm(struct i2c_client *client, struct fscher_data *data,
const char *buf, size_t count, int nr, int reg)
{
unsigned long v = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->fan_min[FAN_INDEX_FROM_NUM(nr)] = v > 0xff ? 0xff : v;
fscher_write_value(client, reg, data->fan_min[FAN_INDEX_FROM_NUM(nr)]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_pwm(struct fscher_data *data, char *buf, int nr)
{
return sprintf(buf, "%u\n", data->fan_min[FAN_INDEX_FROM_NUM(nr)]);
}
static ssize_t set_fan_div(struct i2c_client *client, struct fscher_data *data,
const char *buf, size_t count, int nr, int reg)
{
/* supported values: 2, 4, 8 */
unsigned long v = simple_strtoul(buf, NULL, 10);
switch (v) {
case 2: v = 1; break;
case 4: v = 2; break;
case 8: v = 3; break;
default:
dev_err(&client->dev, "fan_div value %ld not "
"supported. Choose one of 2, 4 or 8!\n", v);
return -EINVAL;
}
mutex_lock(&data->update_lock);
/* bits 2..7 reserved => mask with 0x03 */
data->fan_ripple[FAN_INDEX_FROM_NUM(nr)] &= ~0x03;
data->fan_ripple[FAN_INDEX_FROM_NUM(nr)] |= v;
fscher_write_value(client, reg, data->fan_ripple[FAN_INDEX_FROM_NUM(nr)]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_fan_div(struct fscher_data *data, char *buf, int nr)
{
/* bits 2..7 reserved => mask with 0x03 */
return sprintf(buf, "%u\n", 1 << (data->fan_ripple[FAN_INDEX_FROM_NUM(nr)] & 0x03));
}
#define RPM_FROM_REG(val) (val*60)
static ssize_t show_fan_input (struct fscher_data *data, char *buf, int nr)
{
return sprintf(buf, "%u\n", RPM_FROM_REG(data->fan_act[FAN_INDEX_FROM_NUM(nr)]));
}
#define TEMP_INDEX_FROM_NUM(nr) ((nr) - 1)
static ssize_t set_temp_status(struct i2c_client *client, struct fscher_data *data,
const char *buf, size_t count, int nr, int reg)
{
/* bits 2..7 reserved, 0 read only => mask with 0x02 */
unsigned long v = simple_strtoul(buf, NULL, 10) & 0x02;
mutex_lock(&data->update_lock);
data->temp_status[TEMP_INDEX_FROM_NUM(nr)] &= ~v;
fscher_write_value(client, reg, v);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_temp_status(struct fscher_data *data, char *buf, int nr)
{
/* bits 2..7 reserved => mask with 0x03 */
return sprintf(buf, "%u\n", data->temp_status[TEMP_INDEX_FROM_NUM(nr)] & 0x03);
}
#define TEMP_FROM_REG(val) (((val) - 128) * 1000)
static ssize_t show_temp_input(struct fscher_data *data, char *buf, int nr)
{
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_act[TEMP_INDEX_FROM_NUM(nr)]));
}
/*
* The final conversion is specified in sensors.conf, as it depends on
* mainboard specific values. We export the registers contents as
* pseudo-hundredths-of-Volts (range 0V - 2.55V). Not that it makes much
* sense per se, but it minimizes the conversions count and keeps the
* values within a usual range.
*/
#define VOLT_FROM_REG(val) ((val) * 10)
static ssize_t show_in_input(struct fscher_data *data, char *buf, int nr)
{
return sprintf(buf, "%u\n", VOLT_FROM_REG(data->volt[nr]));
}
static ssize_t show_revision(struct fscher_data *data, char *buf, int nr)
{
return sprintf(buf, "%u\n", data->revision);
}
static ssize_t show_alarms(struct fscher_data *data, char *buf, int nr)
{
/* bits 2, 5..6 reserved => mask with 0x9b */
return sprintf(buf, "%u\n", data->global_event & 0x9b);
}
static ssize_t set_control(struct i2c_client *client, struct fscher_data *data,
const char *buf, size_t count, int nr, int reg)
{
/* bits 1..7 reserved => mask with 0x01 */
unsigned long v = simple_strtoul(buf, NULL, 10) & 0x01;
mutex_lock(&data->update_lock);
data->global_control = v;
fscher_write_value(client, reg, v);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_control(struct fscher_data *data, char *buf, int nr)
{
/* bits 1..7 reserved => mask with 0x01 */
return sprintf(buf, "%u\n", data->global_control & 0x01);
}
static ssize_t set_watchdog_control(struct i2c_client *client, struct
fscher_data *data, const char *buf, size_t count,
int nr, int reg)
{
/* bits 0..3 reserved => mask with 0xf0 */
unsigned long v = simple_strtoul(buf, NULL, 10) & 0xf0;
mutex_lock(&data->update_lock);
data->watchdog[2] &= ~0xf0;
data->watchdog[2] |= v;
fscher_write_value(client, reg, data->watchdog[2]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_watchdog_control(struct fscher_data *data, char *buf, int nr)
{
/* bits 0..3 reserved, bit 5 write only => mask with 0xd0 */
return sprintf(buf, "%u\n", data->watchdog[2] & 0xd0);
}
static ssize_t set_watchdog_status(struct i2c_client *client, struct fscher_data *data,
const char *buf, size_t count, int nr, int reg)
{
/* bits 0, 2..7 reserved => mask with 0x02 */
unsigned long v = simple_strtoul(buf, NULL, 10) & 0x02;
mutex_lock(&data->update_lock);
data->watchdog[1] &= ~v;
fscher_write_value(client, reg, v);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_watchdog_status(struct fscher_data *data, char *buf, int nr)
{
/* bits 0, 2..7 reserved => mask with 0x02 */
return sprintf(buf, "%u\n", data->watchdog[1] & 0x02);
}
static ssize_t set_watchdog_preset(struct i2c_client *client, struct fscher_data *data,
const char *buf, size_t count, int nr, int reg)
{
unsigned long v = simple_strtoul(buf, NULL, 10) & 0xff;
mutex_lock(&data->update_lock);
data->watchdog[0] = v;
fscher_write_value(client, reg, data->watchdog[0]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_watchdog_preset(struct fscher_data *data, char *buf, int nr)
{
return sprintf(buf, "%u\n", data->watchdog[0]);
}
static int __init sensors_fscher_init(void)
{
return i2c_add_driver(&fscher_driver);
}
static void __exit sensors_fscher_exit(void)
{
i2c_del_driver(&fscher_driver);
}
MODULE_AUTHOR("Reinhard Nissl <rnissl@gmx.de>");
MODULE_DESCRIPTION("FSC Hermes driver");
MODULE_LICENSE("GPL");
module_init(sensors_fscher_init);
module_exit(sensors_fscher_exit);
| gpl-2.0 |
sid1607/linux-3.14.65-src | drivers/misc/genwqe/card_dev.c | 234 | 35218 | /**
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
*
* Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
* Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
* Author: Michael Jung <mijung@de.ibm.com>
* Author: Michael Ruettger <michael@ibmra.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Character device representation of the GenWQE device. This allows
* user-space applications to communicate with the card.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include "card_base.h"
#include "card_ddcb.h"
static int genwqe_open_files(struct genwqe_dev *cd)
{
int rc;
unsigned long flags;
spin_lock_irqsave(&cd->file_lock, flags);
rc = list_empty(&cd->file_list);
spin_unlock_irqrestore(&cd->file_lock, flags);
return !rc;
}
static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
{
unsigned long flags;
cfile->owner = current;
spin_lock_irqsave(&cd->file_lock, flags);
list_add(&cfile->list, &cd->file_list);
spin_unlock_irqrestore(&cd->file_lock, flags);
}
static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
{
unsigned long flags;
spin_lock_irqsave(&cd->file_lock, flags);
list_del(&cfile->list);
spin_unlock_irqrestore(&cd->file_lock, flags);
return 0;
}
static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m)
{
unsigned long flags;
spin_lock_irqsave(&cfile->pin_lock, flags);
list_add(&m->pin_list, &cfile->pin_list);
spin_unlock_irqrestore(&cfile->pin_lock, flags);
}
static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
{
unsigned long flags;
spin_lock_irqsave(&cfile->pin_lock, flags);
list_del(&m->pin_list);
spin_unlock_irqrestore(&cfile->pin_lock, flags);
return 0;
}
/**
* genwqe_search_pin() - Search for the mapping for a userspace address
* @cfile: Descriptor of opened file
* @u_addr: User virtual address
* @size: Size of buffer
* @dma_addr: DMA address to be updated
*
* Return: Pointer to the corresponding mapping NULL if not found
*/
static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile,
unsigned long u_addr,
unsigned int size,
void **virt_addr)
{
unsigned long flags;
struct dma_mapping *m;
spin_lock_irqsave(&cfile->pin_lock, flags);
list_for_each_entry(m, &cfile->pin_list, pin_list) {
if ((((u64)m->u_vaddr) <= (u_addr)) &&
(((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
if (virt_addr)
*virt_addr = m->k_vaddr +
(u_addr - (u64)m->u_vaddr);
spin_unlock_irqrestore(&cfile->pin_lock, flags);
return m;
}
}
spin_unlock_irqrestore(&cfile->pin_lock, flags);
return NULL;
}
static void __genwqe_add_mapping(struct genwqe_file *cfile,
struct dma_mapping *dma_map)
{
unsigned long flags;
spin_lock_irqsave(&cfile->map_lock, flags);
list_add(&dma_map->card_list, &cfile->map_list);
spin_unlock_irqrestore(&cfile->map_lock, flags);
}
static void __genwqe_del_mapping(struct genwqe_file *cfile,
struct dma_mapping *dma_map)
{
unsigned long flags;
spin_lock_irqsave(&cfile->map_lock, flags);
list_del(&dma_map->card_list);
spin_unlock_irqrestore(&cfile->map_lock, flags);
}
/**
* __genwqe_search_mapping() - Search for the mapping for a userspace address
* @cfile: descriptor of opened file
* @u_addr: user virtual address
* @size: size of buffer
* @dma_addr: DMA address to be updated
* Return: Pointer to the corresponding mapping NULL if not found
*/
static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
unsigned long u_addr,
unsigned int size,
dma_addr_t *dma_addr,
void **virt_addr)
{
unsigned long flags;
struct dma_mapping *m;
struct pci_dev *pci_dev = cfile->cd->pci_dev;
spin_lock_irqsave(&cfile->map_lock, flags);
list_for_each_entry(m, &cfile->map_list, card_list) {
if ((((u64)m->u_vaddr) <= (u_addr)) &&
(((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
/* match found: current is as expected and
addr is in range */
if (dma_addr)
*dma_addr = m->dma_addr +
(u_addr - (u64)m->u_vaddr);
if (virt_addr)
*virt_addr = m->k_vaddr +
(u_addr - (u64)m->u_vaddr);
spin_unlock_irqrestore(&cfile->map_lock, flags);
return m;
}
}
spin_unlock_irqrestore(&cfile->map_lock, flags);
dev_err(&pci_dev->dev,
"[%s] Entry not found: u_addr=%lx, size=%x\n",
__func__, u_addr, size);
return NULL;
}
static void genwqe_remove_mappings(struct genwqe_file *cfile)
{
int i = 0;
struct list_head *node, *next;
struct dma_mapping *dma_map;
struct genwqe_dev *cd = cfile->cd;
struct pci_dev *pci_dev = cfile->cd->pci_dev;
list_for_each_safe(node, next, &cfile->map_list) {
dma_map = list_entry(node, struct dma_mapping, card_list);
list_del_init(&dma_map->card_list);
/*
* This is really a bug, because those things should
* have been already tidied up.
*
* GENWQE_MAPPING_RAW should have been removed via mmunmap().
* GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code.
*/
dev_err(&pci_dev->dev,
"[%s] %d. cleanup mapping: u_vaddr=%p "
"u_kaddr=%016lx dma_addr=%lx\n", __func__, i++,
dma_map->u_vaddr, (unsigned long)dma_map->k_vaddr,
(unsigned long)dma_map->dma_addr);
if (dma_map->type == GENWQE_MAPPING_RAW) {
/* we allocated this dynamically */
__genwqe_free_consistent(cd, dma_map->size,
dma_map->k_vaddr,
dma_map->dma_addr);
kfree(dma_map);
} else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
/* we use dma_map statically from the request */
genwqe_user_vunmap(cd, dma_map, NULL);
}
}
}
static void genwqe_remove_pinnings(struct genwqe_file *cfile)
{
struct list_head *node, *next;
struct dma_mapping *dma_map;
struct genwqe_dev *cd = cfile->cd;
list_for_each_safe(node, next, &cfile->pin_list) {
dma_map = list_entry(node, struct dma_mapping, pin_list);
/*
* This is not a bug, because a killed processed might
* not call the unpin ioctl, which is supposed to free
* the resources.
*
* Pinnings are dymically allocated and need to be
* deleted.
*/
list_del_init(&dma_map->pin_list);
genwqe_user_vunmap(cd, dma_map, NULL);
kfree(dma_map);
}
}
/**
* genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
*
* E.g. genwqe_send_signal(cd, SIGIO);
*/
static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
{
unsigned int files = 0;
unsigned long flags;
struct genwqe_file *cfile;
spin_lock_irqsave(&cd->file_lock, flags);
list_for_each_entry(cfile, &cd->file_list, list) {
if (cfile->async_queue)
kill_fasync(&cfile->async_queue, sig, POLL_HUP);
files++;
}
spin_unlock_irqrestore(&cd->file_lock, flags);
return files;
}
static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
{
unsigned int files = 0;
unsigned long flags;
struct genwqe_file *cfile;
spin_lock_irqsave(&cd->file_lock, flags);
list_for_each_entry(cfile, &cd->file_list, list) {
force_sig(sig, cfile->owner);
files++;
}
spin_unlock_irqrestore(&cd->file_lock, flags);
return files;
}
/**
* genwqe_open() - file open
* @inode: file system information
* @filp: file handle
*
* This function is executed whenever an application calls
* open("/dev/genwqe",..).
*
* Return: 0 if successful or <0 if errors
*/
static int genwqe_open(struct inode *inode, struct file *filp)
{
struct genwqe_dev *cd;
struct genwqe_file *cfile;
struct pci_dev *pci_dev;
cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
if (cfile == NULL)
return -ENOMEM;
cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
pci_dev = cd->pci_dev;
cfile->cd = cd;
cfile->filp = filp;
cfile->client = NULL;
spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */
INIT_LIST_HEAD(&cfile->map_list);
spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */
INIT_LIST_HEAD(&cfile->pin_list);
filp->private_data = cfile;
genwqe_add_file(cd, cfile);
return 0;
}
/**
* genwqe_fasync() - Setup process to receive SIGIO.
* @fd: file descriptor
* @filp: file handle
* @mode: file mode
*
* Sending a signal is working as following:
*
* if (cdev->async_queue)
* kill_fasync(&cdev->async_queue, SIGIO, POLL_IN);
*
* Some devices also implement asynchronous notification to indicate
* when the device can be written; in this case, of course,
* kill_fasync must be called with a mode of POLL_OUT.
*/
static int genwqe_fasync(int fd, struct file *filp, int mode)
{
struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
return fasync_helper(fd, filp, mode, &cdev->async_queue);
}
/**
* genwqe_release() - file close
* @inode: file system information
* @filp: file handle
*
* This function is executed whenever an application calls 'close(fd_genwqe)'
*
* Return: always 0
*/
static int genwqe_release(struct inode *inode, struct file *filp)
{
struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
struct genwqe_dev *cd = cfile->cd;
/* there must be no entries in these lists! */
genwqe_remove_mappings(cfile);
genwqe_remove_pinnings(cfile);
/* remove this filp from the asynchronously notified filp's */
genwqe_fasync(-1, filp, 0);
/*
* For this to work we must not release cd when this cfile is
* not yet released, otherwise the list entry is invalid,
* because the list itself gets reinstantiated!
*/
genwqe_del_file(cd, cfile);
kfree(cfile);
return 0;
}
static void genwqe_vma_open(struct vm_area_struct *vma)
{
/* nothing ... */
}
/**
* genwqe_vma_close() - Called each time when vma is unmapped
*
* Free memory which got allocated by GenWQE mmap().
*/
static void genwqe_vma_close(struct vm_area_struct *vma)
{
unsigned long vsize = vma->vm_end - vma->vm_start;
struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct dma_mapping *dma_map;
struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
cdev_genwqe);
struct pci_dev *pci_dev = cd->pci_dev;
dma_addr_t d_addr = 0;
struct genwqe_file *cfile = vma->vm_private_data;
dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
&d_addr, NULL);
if (dma_map == NULL) {
dev_err(&pci_dev->dev,
" [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n",
__func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
vsize);
return;
}
__genwqe_del_mapping(cfile, dma_map);
__genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr,
dma_map->dma_addr);
kfree(dma_map);
}
static struct vm_operations_struct genwqe_vma_ops = {
.open = genwqe_vma_open,
.close = genwqe_vma_close,
};
/**
* genwqe_mmap() - Provide contignous buffers to userspace
*
* We use mmap() to allocate contignous buffers used for DMA
* transfers. After the buffer is allocated we remap it to user-space
* and remember a reference to our dma_mapping data structure, where
* we store the associated DMA address and allocated size.
*
* When we receive a DDCB execution request with the ATS bits set to
* plain buffer, we lookup our dma_mapping list to find the
* corresponding DMA address for the associated user-space address.
*/
static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
{
int rc;
unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
struct genwqe_dev *cd = cfile->cd;
struct dma_mapping *dma_map;
if (vsize == 0)
return -EINVAL;
if (get_order(vsize) > MAX_ORDER)
return -ENOMEM;
dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC);
if (dma_map == NULL)
return -ENOMEM;
genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW);
dma_map->u_vaddr = (void *)vma->vm_start;
dma_map->size = vsize;
dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize,
&dma_map->dma_addr);
if (dma_map->k_vaddr == NULL) {
rc = -ENOMEM;
goto free_dma_map;
}
if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t)))
*(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr;
pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
rc = remap_pfn_range(vma,
vma->vm_start,
pfn,
vsize,
vma->vm_page_prot);
if (rc != 0) {
rc = -EFAULT;
goto free_dma_mem;
}
vma->vm_private_data = cfile;
vma->vm_ops = &genwqe_vma_ops;
__genwqe_add_mapping(cfile, dma_map);
return 0;
free_dma_mem:
__genwqe_free_consistent(cd, dma_map->size,
dma_map->k_vaddr,
dma_map->dma_addr);
free_dma_map:
kfree(dma_map);
return rc;
}
/**
* do_flash_update() - Excute flash update (write image or CVPD)
* @cd: genwqe device
* @load: details about image load
*
* Return: 0 if successful
*/
#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
static int do_flash_update(struct genwqe_file *cfile,
struct genwqe_bitstream *load)
{
int rc = 0;
int blocks_to_flash;
dma_addr_t dma_addr;
u64 flash = 0;
size_t tocopy = 0;
u8 __user *buf;
u8 *xbuf;
u32 crc;
u8 cmdopts;
struct genwqe_dev *cd = cfile->cd;
struct pci_dev *pci_dev = cd->pci_dev;
if ((load->size & 0x3) != 0)
return -EINVAL;
if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
return -EINVAL;
/* FIXME Bits have changed for new service layer! */
switch ((char)load->partition) {
case '0':
cmdopts = 0x14;
break; /* download/erase_first/part_0 */
case '1':
cmdopts = 0x1C;
break; /* download/erase_first/part_1 */
case 'v': /* cmdopts = 0x0c (VPD) */
default:
return -EINVAL;
}
buf = (u8 __user *)load->data_addr;
xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
if (xbuf == NULL)
return -ENOMEM;
blocks_to_flash = load->size / FLASH_BLOCK;
while (load->size) {
struct genwqe_ddcb_cmd *req;
/*
* We must be 4 byte aligned. Buffer must be 0 appened
* to have defined values when calculating CRC.
*/
tocopy = min_t(size_t, load->size, FLASH_BLOCK);
rc = copy_from_user(xbuf, buf, tocopy);
if (rc) {
rc = -EFAULT;
goto free_buffer;
}
crc = genwqe_crc32(xbuf, tocopy, 0xffffffff);
dev_dbg(&pci_dev->dev,
"[%s] DMA: %lx CRC: %08x SZ: %ld %d\n",
__func__, (unsigned long)dma_addr, crc, tocopy,
blocks_to_flash);
/* prepare DDCB for SLU process */
req = ddcb_requ_alloc();
if (req == NULL) {
rc = -ENOMEM;
goto free_buffer;
}
req->cmd = SLCMD_MOVE_FLASH;
req->cmdopts = cmdopts;
/* prepare invariant values */
if (genwqe_get_slu_id(cd) <= 0x2) {
*(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr);
*(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy);
*(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
*(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
req->__asiv[24] = load->uid;
*(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
/* for simulation only */
*(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
*(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
req->asiv_length = 32; /* bytes included in crc calc */
} else { /* setup DDCB for ATS architecture */
*(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr);
*(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy);
*(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */
*(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
*(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
*(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
/* for simulation only */
*(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
*(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
/* Rd only */
req->ats = 0x4ULL << 44;
req->asiv_length = 40; /* bytes included in crc calc */
}
req->asv_length = 8;
/* For Genwqe5 we get back the calculated CRC */
*(u64 *)&req->asv[0] = 0ULL; /* 0x80 */
rc = __genwqe_execute_raw_ddcb(cd, req);
load->retc = req->retc;
load->attn = req->attn;
load->progress = req->progress;
if (rc < 0) {
ddcb_requ_free(req);
goto free_buffer;
}
if (req->retc != DDCB_RETC_COMPLETE) {
rc = -EIO;
ddcb_requ_free(req);
goto free_buffer;
}
load->size -= tocopy;
flash += tocopy;
buf += tocopy;
blocks_to_flash--;
ddcb_requ_free(req);
}
free_buffer:
__genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
return rc;
}
static int do_flash_read(struct genwqe_file *cfile,
struct genwqe_bitstream *load)
{
int rc, blocks_to_flash;
dma_addr_t dma_addr;
u64 flash = 0;
size_t tocopy = 0;
u8 __user *buf;
u8 *xbuf;
u8 cmdopts;
struct genwqe_dev *cd = cfile->cd;
struct pci_dev *pci_dev = cd->pci_dev;
struct genwqe_ddcb_cmd *cmd;
if ((load->size & 0x3) != 0)
return -EINVAL;
if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
return -EINVAL;
/* FIXME Bits have changed for new service layer! */
switch ((char)load->partition) {
case '0':
cmdopts = 0x12;
break; /* upload/part_0 */
case '1':
cmdopts = 0x1A;
break; /* upload/part_1 */
case 'v':
default:
return -EINVAL;
}
buf = (u8 __user *)load->data_addr;
xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
if (xbuf == NULL)
return -ENOMEM;
blocks_to_flash = load->size / FLASH_BLOCK;
while (load->size) {
/*
* We must be 4 byte aligned. Buffer must be 0 appened
* to have defined values when calculating CRC.
*/
tocopy = min_t(size_t, load->size, FLASH_BLOCK);
dev_dbg(&pci_dev->dev,
"[%s] DMA: %lx SZ: %ld %d\n",
__func__, (unsigned long)dma_addr, tocopy,
blocks_to_flash);
/* prepare DDCB for SLU process */
cmd = ddcb_requ_alloc();
if (cmd == NULL) {
rc = -ENOMEM;
goto free_buffer;
}
cmd->cmd = SLCMD_MOVE_FLASH;
cmd->cmdopts = cmdopts;
/* prepare invariant values */
if (genwqe_get_slu_id(cd) <= 0x2) {
*(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr);
*(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy);
*(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash);
*(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0);
cmd->__asiv[24] = load->uid;
*(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */;
cmd->asiv_length = 32; /* bytes included in crc calc */
} else { /* setup DDCB for ATS architecture */
*(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr);
*(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy);
*(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */
*(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash);
*(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24);
*(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */
/* rd/wr */
cmd->ats = 0x5ULL << 44;
cmd->asiv_length = 40; /* bytes included in crc calc */
}
cmd->asv_length = 8;
/* we only get back the calculated CRC */
*(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */
rc = __genwqe_execute_raw_ddcb(cd, cmd);
load->retc = cmd->retc;
load->attn = cmd->attn;
load->progress = cmd->progress;
if ((rc < 0) && (rc != -EBADMSG)) {
ddcb_requ_free(cmd);
goto free_buffer;
}
rc = copy_to_user(buf, xbuf, tocopy);
if (rc) {
rc = -EFAULT;
ddcb_requ_free(cmd);
goto free_buffer;
}
/* We know that we can get retc 0x104 with CRC err */
if (((cmd->retc == DDCB_RETC_FAULT) &&
(cmd->attn != 0x02)) || /* Normally ignore CRC error */
((cmd->retc == DDCB_RETC_COMPLETE) &&
(cmd->attn != 0x00))) { /* Everything was fine */
rc = -EIO;
ddcb_requ_free(cmd);
goto free_buffer;
}
load->size -= tocopy;
flash += tocopy;
buf += tocopy;
blocks_to_flash--;
ddcb_requ_free(cmd);
}
rc = 0;
free_buffer:
__genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
return rc;
}
static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
{
int rc;
struct genwqe_dev *cd = cfile->cd;
struct pci_dev *pci_dev = cfile->cd->pci_dev;
struct dma_mapping *dma_map;
unsigned long map_addr;
unsigned long map_size;
if ((m->addr == 0x0) || (m->size == 0))
return -EINVAL;
map_addr = (m->addr & PAGE_MASK);
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC);
if (dma_map == NULL)
return -ENOMEM;
genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL);
if (rc != 0) {
dev_err(&pci_dev->dev,
"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
kfree(dma_map);
return rc;
}
genwqe_add_pin(cfile, dma_map);
return 0;
}
static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
{
struct genwqe_dev *cd = cfile->cd;
struct dma_mapping *dma_map;
unsigned long map_addr;
unsigned long map_size;
if (m->addr == 0x0)
return -EINVAL;
map_addr = (m->addr & PAGE_MASK);
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL);
if (dma_map == NULL)
return -ENOENT;
genwqe_del_pin(cfile, dma_map);
genwqe_user_vunmap(cd, dma_map, NULL);
kfree(dma_map);
return 0;
}
/**
* ddcb_cmd_cleanup() - Remove dynamically created fixup entries
*
* Only if there are any. Pinnings are not removed.
*/
static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
{
unsigned int i;
struct dma_mapping *dma_map;
struct genwqe_dev *cd = cfile->cd;
for (i = 0; i < DDCB_FIXUPS; i++) {
dma_map = &req->dma_mappings[i];
if (dma_mapping_used(dma_map)) {
__genwqe_del_mapping(cfile, dma_map);
genwqe_user_vunmap(cd, dma_map, req);
}
if (req->sgl[i] != NULL) {
genwqe_free_sgl(cd, req->sgl[i],
req->sgl_dma_addr[i],
req->sgl_size[i]);
req->sgl[i] = NULL;
req->sgl_dma_addr[i] = 0x0;
req->sgl_size[i] = 0;
}
}
return 0;
}
/**
* ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
*
* Before the DDCB gets executed we need to handle the fixups. We
* replace the user-space addresses with DMA addresses or do
* additional setup work e.g. generating a scatter-gather list which
* is used to describe the memory referred to in the fixup.
*/
static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
{
int rc;
unsigned int asiv_offs, i;
struct genwqe_dev *cd = cfile->cd;
struct genwqe_ddcb_cmd *cmd = &req->cmd;
struct dma_mapping *m;
const char *type = "UNKNOWN";
for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
i++, asiv_offs += 0x08) {
u64 u_addr;
dma_addr_t d_addr;
u32 u_size = 0;
u64 ats_flags;
ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs);
switch (ats_flags) {
case ATS_TYPE_DATA:
break; /* nothing to do here */
case ATS_TYPE_FLAT_RDWR:
case ATS_TYPE_FLAT_RD: {
u_addr = be64_to_cpu(*((__be64 *)&cmd->
asiv[asiv_offs]));
u_size = be32_to_cpu(*((__be32 *)&cmd->
asiv[asiv_offs + 0x08]));
/*
* No data available. Ignore u_addr in this
* case and set addr to 0. Hardware must not
* fetch the buffer.
*/
if (u_size == 0x0) {
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(0x0);
break;
}
m = __genwqe_search_mapping(cfile, u_addr, u_size,
&d_addr, NULL);
if (m == NULL) {
rc = -EFAULT;
goto err_out;
}
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(d_addr);
break;
}
case ATS_TYPE_SGL_RDWR:
case ATS_TYPE_SGL_RD: {
int page_offs, nr_pages, offs;
u_addr = be64_to_cpu(*((__be64 *)
&cmd->asiv[asiv_offs]));
u_size = be32_to_cpu(*((__be32 *)
&cmd->asiv[asiv_offs + 0x08]));
/*
* No data available. Ignore u_addr in this
* case and set addr to 0. Hardware must not
* fetch the empty sgl.
*/
if (u_size == 0x0) {
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(0x0);
break;
}
m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
if (m != NULL) {
type = "PINNING";
page_offs = (u_addr -
(u64)m->u_vaddr)/PAGE_SIZE;
} else {
type = "MAPPING";
m = &req->dma_mappings[i];
genwqe_mapping_init(m,
GENWQE_MAPPING_SGL_TEMP);
rc = genwqe_user_vmap(cd, m, (void *)u_addr,
u_size, req);
if (rc != 0)
goto err_out;
__genwqe_add_mapping(cfile, m);
page_offs = 0;
}
offs = offset_in_page(u_addr);
nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE);
/* create genwqe style scatter gather list */
req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages,
&req->sgl_dma_addr[i],
&req->sgl_size[i]);
if (req->sgl[i] == NULL) {
rc = -ENOMEM;
goto err_out;
}
genwqe_setup_sgl(cd, offs, u_size,
req->sgl[i],
req->sgl_dma_addr[i],
req->sgl_size[i],
m->dma_list,
page_offs,
nr_pages);
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(req->sgl_dma_addr[i]);
break;
}
default:
rc = -EINVAL;
goto err_out;
}
}
return 0;
err_out:
ddcb_cmd_cleanup(cfile, req);
return rc;
}
/**
* genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
*
* The code will build up the translation tables or lookup the
* contignous memory allocation table to find the right translations
* and DMA addresses.
*/
static int genwqe_execute_ddcb(struct genwqe_file *cfile,
struct genwqe_ddcb_cmd *cmd)
{
int rc;
struct genwqe_dev *cd = cfile->cd;
struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
rc = ddcb_cmd_fixups(cfile, req);
if (rc != 0)
return rc;
rc = __genwqe_execute_raw_ddcb(cd, cmd);
ddcb_cmd_cleanup(cfile, req);
return rc;
}
static int do_execute_ddcb(struct genwqe_file *cfile,
unsigned long arg, int raw)
{
int rc;
struct genwqe_ddcb_cmd *cmd;
struct ddcb_requ *req;
struct genwqe_dev *cd = cfile->cd;
cmd = ddcb_requ_alloc();
if (cmd == NULL)
return -ENOMEM;
req = container_of(cmd, struct ddcb_requ, cmd);
if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
ddcb_requ_free(cmd);
return -EFAULT;
}
if (!raw)
rc = genwqe_execute_ddcb(cfile, cmd);
else
rc = __genwqe_execute_raw_ddcb(cd, cmd);
/* Copy back only the modifed fields. Do not copy ASIV
back since the copy got modified by the driver. */
if (copy_to_user((void __user *)arg, cmd,
sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
ddcb_requ_free(cmd);
return -EFAULT;
}
ddcb_requ_free(cmd);
return rc;
}
/**
* genwqe_ioctl() - IO control
* @filp: file handle
* @cmd: command identifier (passed from user)
* @arg: argument (passed from user)
*
* Return: 0 success
*/
static long genwqe_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc = 0;
struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
struct genwqe_dev *cd = cfile->cd;
struct genwqe_reg_io __user *io;
u64 val;
u32 reg_offs;
if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
return -EINVAL;
switch (cmd) {
case GENWQE_GET_CARD_STATE:
put_user(cd->card_state, (enum genwqe_card_state __user *)arg);
return 0;
/* Register access */
case GENWQE_READ_REG64: {
io = (struct genwqe_reg_io __user *)arg;
if (get_user(reg_offs, &io->num))
return -EFAULT;
if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
return -EINVAL;
val = __genwqe_readq(cd, reg_offs);
put_user(val, &io->val64);
return 0;
}
case GENWQE_WRITE_REG64: {
io = (struct genwqe_reg_io __user *)arg;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
return -EPERM;
if (get_user(reg_offs, &io->num))
return -EFAULT;
if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
return -EINVAL;
if (get_user(val, &io->val64))
return -EFAULT;
__genwqe_writeq(cd, reg_offs, val);
return 0;
}
case GENWQE_READ_REG32: {
io = (struct genwqe_reg_io __user *)arg;
if (get_user(reg_offs, &io->num))
return -EFAULT;
if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
return -EINVAL;
val = __genwqe_readl(cd, reg_offs);
put_user(val, &io->val64);
return 0;
}
case GENWQE_WRITE_REG32: {
io = (struct genwqe_reg_io __user *)arg;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
return -EPERM;
if (get_user(reg_offs, &io->num))
return -EFAULT;
if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
return -EINVAL;
if (get_user(val, &io->val64))
return -EFAULT;
__genwqe_writel(cd, reg_offs, val);
return 0;
}
/* Flash update/reading */
case GENWQE_SLU_UPDATE: {
struct genwqe_bitstream load;
if (!genwqe_is_privileged(cd))
return -EPERM;
if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
return -EPERM;
if (copy_from_user(&load, (void __user *)arg,
sizeof(load)))
return -EFAULT;
rc = do_flash_update(cfile, &load);
if (copy_to_user((void __user *)arg, &load, sizeof(load)))
return -EFAULT;
return rc;
}
case GENWQE_SLU_READ: {
struct genwqe_bitstream load;
if (!genwqe_is_privileged(cd))
return -EPERM;
if (genwqe_flash_readback_fails(cd))
return -ENOSPC; /* known to fail for old versions */
if (copy_from_user(&load, (void __user *)arg, sizeof(load)))
return -EFAULT;
rc = do_flash_read(cfile, &load);
if (copy_to_user((void __user *)arg, &load, sizeof(load)))
return -EFAULT;
return rc;
}
/* memory pinning and unpinning */
case GENWQE_PIN_MEM: {
struct genwqe_mem m;
if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
return -EFAULT;
return genwqe_pin_mem(cfile, &m);
}
case GENWQE_UNPIN_MEM: {
struct genwqe_mem m;
if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
return -EFAULT;
return genwqe_unpin_mem(cfile, &m);
}
/* launch an DDCB and wait for completion */
case GENWQE_EXECUTE_DDCB:
return do_execute_ddcb(cfile, arg, 0);
case GENWQE_EXECUTE_RAW_DDCB: {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return do_execute_ddcb(cfile, arg, 1);
}
default:
return -EINVAL;
}
return rc;
}
#if defined(CONFIG_COMPAT)
/**
* genwqe_compat_ioctl() - Compatibility ioctl
*
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/genwqe<n>_card.
*
* @filp: file pointer.
* @cmd: command.
* @arg: user argument.
* Return: zero on success or negative number on failure.
*/
static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
return genwqe_ioctl(filp, cmd, arg);
}
#endif /* defined(CONFIG_COMPAT) */
static const struct file_operations genwqe_fops = {
.owner = THIS_MODULE,
.open = genwqe_open,
.fasync = genwqe_fasync,
.mmap = genwqe_mmap,
.unlocked_ioctl = genwqe_ioctl,
#if defined(CONFIG_COMPAT)
.compat_ioctl = genwqe_compat_ioctl,
#endif
.release = genwqe_release,
};
static int genwqe_device_initialized(struct genwqe_dev *cd)
{
return cd->dev != NULL;
}
/**
* genwqe_device_create() - Create and configure genwqe char device
* @cd: genwqe device descriptor
*
* This function must be called before we create any more genwqe
* character devices, because it is allocating the major and minor
* number which are supposed to be used by the client drivers.
*/
int genwqe_device_create(struct genwqe_dev *cd)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
/*
* Here starts the individual setup per client. It must
* initialize its own cdev data structure with its own fops.
* The appropriate devnum needs to be created. The ranges must
* not overlap.
*/
rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
GENWQE_MAX_MINOR, GENWQE_DEVNAME);
if (rc < 0) {
dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n");
goto err_dev;
}
cdev_init(&cd->cdev_genwqe, &genwqe_fops);
cd->cdev_genwqe.owner = THIS_MODULE;
rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1);
if (rc < 0) {
dev_err(&pci_dev->dev, "err: cdev_add failed\n");
goto err_add;
}
/*
* Finally the device in /dev/... must be created. The rule is
* to use card%d_clientname for each created device.
*/
cd->dev = device_create_with_groups(cd->class_genwqe,
&cd->pci_dev->dev,
cd->devnum_genwqe, cd,
genwqe_attribute_groups,
GENWQE_DEVNAME "%u_card",
cd->card_idx);
if (IS_ERR(cd->dev)) {
rc = PTR_ERR(cd->dev);
goto err_cdev;
}
rc = genwqe_init_debugfs(cd);
if (rc != 0)
goto err_debugfs;
return 0;
err_debugfs:
device_destroy(cd->class_genwqe, cd->devnum_genwqe);
err_cdev:
cdev_del(&cd->cdev_genwqe);
err_add:
unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
err_dev:
cd->dev = NULL;
return rc;
}
static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
{
int rc;
unsigned int i;
struct pci_dev *pci_dev = cd->pci_dev;
if (!genwqe_open_files(cd))
return 0;
dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
rc = genwqe_kill_fasync(cd, SIGIO);
if (rc > 0) {
/* give kill_timeout seconds to close file descriptors ... */
for (i = 0; (i < genwqe_kill_timeout) &&
genwqe_open_files(cd); i++) {
dev_info(&pci_dev->dev, " %d sec ...", i);
cond_resched();
msleep(1000);
}
/* if no open files we can safely continue, else ... */
if (!genwqe_open_files(cd))
return 0;
dev_warn(&pci_dev->dev,
"[%s] send SIGKILL and wait ...\n", __func__);
rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
if (rc) {
/* Give kill_timout more seconds to end processes */
for (i = 0; (i < genwqe_kill_timeout) &&
genwqe_open_files(cd); i++) {
dev_warn(&pci_dev->dev, " %d sec ...", i);
cond_resched();
msleep(1000);
}
}
}
return 0;
}
/**
* genwqe_device_remove() - Remove genwqe's char device
*
* This function must be called after the client devices are removed
* because it will free the major/minor number range for the genwqe
* drivers.
*
* This function must be robust enough to be called twice.
*/
int genwqe_device_remove(struct genwqe_dev *cd)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
if (!genwqe_device_initialized(cd))
return 1;
genwqe_inform_and_stop_processes(cd);
/*
* We currently do wait until all filedescriptors are
* closed. This leads to a problem when we abort the
* application which will decrease this reference from
* 1/unused to 0/illegal and not from 2/used 1/empty.
*/
rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount);
if (rc != 1) {
dev_err(&pci_dev->dev,
"[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
panic("Fatal err: cannot free resources with pending references!");
}
genqwe_exit_debugfs(cd);
device_destroy(cd->class_genwqe, cd->devnum_genwqe);
cdev_del(&cd->cdev_genwqe);
unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
cd->dev = NULL;
return 0;
}
| gpl-2.0 |
OPTICM/android_kernel_samsung_msm8660-common | arch/arm/mach-msm/clock-dummy.c | 490 | 1498 | /* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "clock.h"
static int dummy_clk_reset(struct clk *clk, enum clk_reset_action action)
{
return 0;
}
static int dummy_clk_set_rate(struct clk *clk, unsigned long rate)
{
return 0;
}
static int dummy_clk_set_max_rate(struct clk *clk, unsigned long rate)
{
return 0;
}
static int dummy_clk_set_flags(struct clk *clk, unsigned flags)
{
return 0;
}
static unsigned long dummy_clk_get_rate(struct clk *clk)
{
return 0;
}
static long dummy_clk_round_rate(struct clk *clk, unsigned long rate)
{
return rate;
}
static bool dummy_clk_is_local(struct clk *clk)
{
return true;
}
static struct clk_ops clk_ops_dummy = {
.reset = dummy_clk_reset,
.set_rate = dummy_clk_set_rate,
.set_max_rate = dummy_clk_set_max_rate,
.set_flags = dummy_clk_set_flags,
.get_rate = dummy_clk_get_rate,
.round_rate = dummy_clk_round_rate,
.is_local = dummy_clk_is_local,
};
struct clk dummy_clk = {
.dbg_name = "dummy_clk",
.ops = &clk_ops_dummy,
CLK_INIT(dummy_clk),
};
| gpl-2.0 |
W4TCH0UT/kernel-msm-ghost | arch/powerpc/platforms/85xx/p1022_ds.c | 1514 | 17018 | /*
* P1022DS board specific routines
*
* Authors: Travis Wheatley <travis.wheatley@freescale.com>
* Dave Liu <daveliu@freescale.com>
* Timur Tabi <timur@freescale.com>
*
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This file is taken from the Freescale P1022DS BSP, with modifications:
* 2) No AMP support
* 3) No PCI endpoint support
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/pci.h>
#include <linux/of_platform.h>
#include <linux/memblock.h>
#include <asm/div64.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include <asm/udbg.h>
#include <asm/fsl_guts.h>
#include <asm/fsl_lbc.h>
#include "smp.h"
#include "mpc85xx.h"
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
#define PMUXCR_ELBCDIU_MASK 0xc0000000
#define PMUXCR_ELBCDIU_NOR16 0x80000000
#define PMUXCR_ELBCDIU_DIU 0x40000000
/*
* Board-specific initialization of the DIU. This code should probably be
* executed when the DIU is opened, rather than in arch code, but the DIU
* driver does not have a mechanism for this (yet).
*
* This is especially problematic on the P1022DS because the local bus (eLBC)
* and the DIU video signals share the same pins, which means that enabling the
* DIU will disable access to NOR flash.
*/
/* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */
#define CLKDVDR_PXCKEN 0x80000000
#define CLKDVDR_PXCKINV 0x10000000
#define CLKDVDR_PXCKDLY 0x06000000
#define CLKDVDR_PXCLK_MASK 0x00FF0000
/* Some ngPIXIS register definitions */
#define PX_CTL 3
#define PX_BRDCFG0 8
#define PX_BRDCFG1 9
#define PX_BRDCFG0_ELBC_SPI_MASK 0xc0
#define PX_BRDCFG0_ELBC_SPI_ELBC 0x00
#define PX_BRDCFG0_ELBC_SPI_NULL 0xc0
#define PX_BRDCFG0_ELBC_DIU 0x02
#define PX_BRDCFG1_DVIEN 0x80
#define PX_BRDCFG1_DFPEN 0x40
#define PX_BRDCFG1_BACKLIGHT 0x20
#define PX_BRDCFG1_DDCEN 0x10
#define PX_CTL_ALTACC 0x80
/*
* DIU Area Descriptor
*
* Note that we need to byte-swap the value before it's written to the AD
* register. So even though the registers don't look like they're in the same
* bit positions as they are on the MPC8610, the same value is written to the
* AD register on the MPC8610 and on the P1022.
*/
#define AD_BYTE_F 0x10000000
#define AD_ALPHA_C_MASK 0x0E000000
#define AD_ALPHA_C_SHIFT 25
#define AD_BLUE_C_MASK 0x01800000
#define AD_BLUE_C_SHIFT 23
#define AD_GREEN_C_MASK 0x00600000
#define AD_GREEN_C_SHIFT 21
#define AD_RED_C_MASK 0x00180000
#define AD_RED_C_SHIFT 19
#define AD_PALETTE 0x00040000
#define AD_PIXEL_S_MASK 0x00030000
#define AD_PIXEL_S_SHIFT 16
#define AD_COMP_3_MASK 0x0000F000
#define AD_COMP_3_SHIFT 12
#define AD_COMP_2_MASK 0x00000F00
#define AD_COMP_2_SHIFT 8
#define AD_COMP_1_MASK 0x000000F0
#define AD_COMP_1_SHIFT 4
#define AD_COMP_0_MASK 0x0000000F
#define AD_COMP_0_SHIFT 0
#define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \
cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \
(blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \
(red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \
(c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \
(c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT))
/**
* p1022ds_get_pixel_format: return the Area Descriptor for a given pixel depth
*
* The Area Descriptor is a 32-bit value that determine which bits in each
* pixel are to be used for each color.
*/
static u32 p1022ds_get_pixel_format(enum fsl_diu_monitor_port port,
unsigned int bits_per_pixel)
{
switch (bits_per_pixel) {
case 32:
/* 0x88883316 */
return MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8);
case 24:
/* 0x88082219 */
return MAKE_AD(4, 0, 1, 2, 2, 0, 8, 8, 8);
case 16:
/* 0x65053118 */
return MAKE_AD(4, 2, 1, 0, 1, 5, 6, 5, 0);
default:
pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel);
return 0;
}
}
/**
* p1022ds_set_gamma_table: update the gamma table, if necessary
*
* On some boards, the gamma table for some ports may need to be modified.
* This is not the case on the P1022DS, so we do nothing.
*/
static void p1022ds_set_gamma_table(enum fsl_diu_monitor_port port,
char *gamma_table_base)
{
}
struct fsl_law {
u32 lawbar;
u32 reserved1;
u32 lawar;
u32 reserved[5];
};
#define LAWBAR_MASK 0x00F00000
#define LAWBAR_SHIFT 12
#define LAWAR_EN 0x80000000
#define LAWAR_TGT_MASK 0x01F00000
#define LAW_TRGT_IF_LBC (0x04 << 20)
#define LAWAR_MASK (LAWAR_EN | LAWAR_TGT_MASK)
#define LAWAR_MATCH (LAWAR_EN | LAW_TRGT_IF_LBC)
#define BR_BA 0xFFFF8000
/*
* Map a BRx value to a physical address
*
* The localbus BRx registers only store the lower 32 bits of the address. To
* obtain the upper four bits, we need to scan the LAW table. The entry which
* maps to the localbus will contain the upper four bits.
*/
static phys_addr_t lbc_br_to_phys(const void *ecm, unsigned int count, u32 br)
{
#ifndef CONFIG_PHYS_64BIT
/*
* If we only have 32-bit addressing, then the BRx address *is* the
* physical address.
*/
return br & BR_BA;
#else
const struct fsl_law *law = ecm + 0xc08;
unsigned int i;
for (i = 0; i < count; i++) {
u64 lawbar = in_be32(&law[i].lawbar);
u32 lawar = in_be32(&law[i].lawar);
if ((lawar & LAWAR_MASK) == LAWAR_MATCH)
/* Extract the upper four bits */
return (br & BR_BA) | ((lawbar & LAWBAR_MASK) << 12);
}
return 0;
#endif
}
/**
* p1022ds_set_monitor_port: switch the output to a different monitor port
*/
static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
{
struct device_node *guts_node;
struct device_node *lbc_node = NULL;
struct device_node *law_node = NULL;
struct ccsr_guts __iomem *guts;
struct fsl_lbc_regs *lbc = NULL;
void *ecm = NULL;
u8 __iomem *lbc_lcs0_ba = NULL;
u8 __iomem *lbc_lcs1_ba = NULL;
phys_addr_t cs0_addr, cs1_addr;
u32 br0, or0, br1, or1;
const __be32 *iprop;
unsigned int num_laws;
u8 b;
/* Map the global utilities registers. */
guts_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
if (!guts_node) {
pr_err("p1022ds: missing global utilties device node\n");
return;
}
guts = of_iomap(guts_node, 0);
if (!guts) {
pr_err("p1022ds: could not map global utilties device\n");
goto exit;
}
lbc_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
if (!lbc_node) {
pr_err("p1022ds: missing localbus node\n");
goto exit;
}
lbc = of_iomap(lbc_node, 0);
if (!lbc) {
pr_err("p1022ds: could not map localbus node\n");
goto exit;
}
law_node = of_find_compatible_node(NULL, NULL, "fsl,ecm-law");
if (!law_node) {
pr_err("p1022ds: missing local access window node\n");
goto exit;
}
ecm = of_iomap(law_node, 0);
if (!ecm) {
pr_err("p1022ds: could not map local access window node\n");
goto exit;
}
iprop = of_get_property(law_node, "fsl,num-laws", 0);
if (!iprop) {
pr_err("p1022ds: LAW node is missing fsl,num-laws property\n");
goto exit;
}
num_laws = be32_to_cpup(iprop);
/*
* Indirect mode requires both BR0 and BR1 to be set to "GPCM",
* otherwise writes to these addresses won't actually appear on the
* local bus, and so the PIXIS won't see them.
*
* In FCM mode, writes go to the NAND controller, which does not pass
* them to the localbus directly. So we force BR0 and BR1 into GPCM
* mode, since we don't care about what's behind the localbus any
* more.
*/
br0 = in_be32(&lbc->bank[0].br);
br1 = in_be32(&lbc->bank[1].br);
or0 = in_be32(&lbc->bank[0].or);
or1 = in_be32(&lbc->bank[1].or);
/* Make sure CS0 and CS1 are programmed */
if (!(br0 & BR_V) || !(br1 & BR_V)) {
pr_err("p1022ds: CS0 and/or CS1 is not programmed\n");
goto exit;
}
/*
* Use the existing BRx/ORx values if it's already GPCM. Otherwise,
* force the values to simple 32KB GPCM windows with the most
* conservative timing.
*/
if ((br0 & BR_MSEL) != BR_MS_GPCM) {
br0 = (br0 & BR_BA) | BR_V;
or0 = 0xFFFF8000 | 0xFF7;
out_be32(&lbc->bank[0].br, br0);
out_be32(&lbc->bank[0].or, or0);
}
if ((br1 & BR_MSEL) != BR_MS_GPCM) {
br1 = (br1 & BR_BA) | BR_V;
or1 = 0xFFFF8000 | 0xFF7;
out_be32(&lbc->bank[1].br, br1);
out_be32(&lbc->bank[1].or, or1);
}
cs0_addr = lbc_br_to_phys(ecm, num_laws, br0);
if (!cs0_addr) {
pr_err("p1022ds: could not determine physical address for CS0"
" (BR0=%08x)\n", br0);
goto exit;
}
cs1_addr = lbc_br_to_phys(ecm, num_laws, br1);
if (!cs0_addr) {
pr_err("p1022ds: could not determine physical address for CS1"
" (BR1=%08x)\n", br1);
goto exit;
}
lbc_lcs0_ba = ioremap(cs0_addr, 1);
if (!lbc_lcs0_ba) {
pr_err("p1022ds: could not ioremap CS0 address %llx\n",
(unsigned long long)cs0_addr);
goto exit;
}
lbc_lcs1_ba = ioremap(cs1_addr, 1);
if (!lbc_lcs1_ba) {
pr_err("p1022ds: could not ioremap CS1 address %llx\n",
(unsigned long long)cs1_addr);
goto exit;
}
/* Make sure we're in indirect mode first. */
if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) !=
PMUXCR_ELBCDIU_DIU) {
struct device_node *pixis_node;
void __iomem *pixis;
pixis_node =
of_find_compatible_node(NULL, NULL, "fsl,p1022ds-fpga");
if (!pixis_node) {
pr_err("p1022ds: missing pixis node\n");
goto exit;
}
pixis = of_iomap(pixis_node, 0);
of_node_put(pixis_node);
if (!pixis) {
pr_err("p1022ds: could not map pixis registers\n");
goto exit;
}
/* Enable indirect PIXIS mode. */
setbits8(pixis + PX_CTL, PX_CTL_ALTACC);
iounmap(pixis);
/* Switch the board mux to the DIU */
out_8(lbc_lcs0_ba, PX_BRDCFG0); /* BRDCFG0 */
b = in_8(lbc_lcs1_ba);
b |= PX_BRDCFG0_ELBC_DIU;
out_8(lbc_lcs1_ba, b);
/* Set the chip mux to DIU mode. */
clrsetbits_be32(&guts->pmuxcr, PMUXCR_ELBCDIU_MASK,
PMUXCR_ELBCDIU_DIU);
in_be32(&guts->pmuxcr);
}
switch (port) {
case FSL_DIU_PORT_DVI:
/* Enable the DVI port, disable the DFP and the backlight */
out_8(lbc_lcs0_ba, PX_BRDCFG1);
b = in_8(lbc_lcs1_ba);
b &= ~(PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT);
b |= PX_BRDCFG1_DVIEN;
out_8(lbc_lcs1_ba, b);
break;
case FSL_DIU_PORT_LVDS:
/*
* LVDS also needs backlight enabled, otherwise the display
* will be blank.
*/
/* Enable the DFP port, disable the DVI and the backlight */
out_8(lbc_lcs0_ba, PX_BRDCFG1);
b = in_8(lbc_lcs1_ba);
b &= ~PX_BRDCFG1_DVIEN;
b |= PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT;
out_8(lbc_lcs1_ba, b);
break;
default:
pr_err("p1022ds: unsupported monitor port %i\n", port);
}
exit:
if (lbc_lcs1_ba)
iounmap(lbc_lcs1_ba);
if (lbc_lcs0_ba)
iounmap(lbc_lcs0_ba);
if (lbc)
iounmap(lbc);
if (ecm)
iounmap(ecm);
if (guts)
iounmap(guts);
of_node_put(law_node);
of_node_put(lbc_node);
of_node_put(guts_node);
}
/**
* p1022ds_set_pixel_clock: program the DIU's clock
*
* @pixclock: the wavelength, in picoseconds, of the clock
*/
void p1022ds_set_pixel_clock(unsigned int pixclock)
{
struct device_node *guts_np = NULL;
struct ccsr_guts __iomem *guts;
unsigned long freq;
u64 temp;
u32 pxclk;
/* Map the global utilities registers. */
guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
if (!guts_np) {
pr_err("p1022ds: missing global utilties device node\n");
return;
}
guts = of_iomap(guts_np, 0);
of_node_put(guts_np);
if (!guts) {
pr_err("p1022ds: could not map global utilties device\n");
return;
}
/* Convert pixclock from a wavelength to a frequency */
temp = 1000000000000ULL;
do_div(temp, pixclock);
freq = temp;
/*
* 'pxclk' is the ratio of the platform clock to the pixel clock.
* This number is programmed into the CLKDVDR register, and the valid
* range of values is 2-255.
*/
pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq);
pxclk = clamp_t(u32, pxclk, 2, 255);
/* Disable the pixel clock, and set it to non-inverted and no delay */
clrbits32(&guts->clkdvdr,
CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK);
/* Enable the clock and set the pxclk */
setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16));
iounmap(guts);
}
/**
* p1022ds_valid_monitor_port: set the monitor port for sysfs
*/
enum fsl_diu_monitor_port
p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port)
{
switch (port) {
case FSL_DIU_PORT_DVI:
case FSL_DIU_PORT_LVDS:
return port;
default:
return FSL_DIU_PORT_DVI; /* Dual-link LVDS is not supported */
}
}
#endif
void __init p1022_ds_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
}
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
/*
* Disables a node in the device tree.
*
* This function is called before kmalloc() is available, so the 'new' object
* should be allocated in the global area. The easiest way is to do that is
* to allocate one static local variable for each call to this function.
*/
static void __init disable_one_node(struct device_node *np, struct property *new)
{
struct property *old;
old = of_find_property(np, new->name, NULL);
if (old)
prom_update_property(np, new, old);
else
prom_add_property(np, new);
pr_info("p1022ds: disabling %s node\n", np->full_name);
}
/* TRUE if there is a "video=fslfb" command-line parameter. */
static bool fslfb;
/*
* Search for a "video=fslfb" command-line parameter, and set 'fslfb' to
* true if we find it.
*
* We need to use early_param() instead of __setup() because the normal
* __setup() gets called to late. However, early_param() gets called very
* early, before the device tree is unflattened, so all we can do now is set a
* global variable. Later on, p1022_ds_setup_arch() will use that variable
* to determine if we need to update the device tree.
*/
static int __init early_video_setup(char *options)
{
fslfb = (strncmp(options, "fslfb:", 6) == 0);
return 0;
}
early_param("video", early_video_setup);
#endif
/*
* Setup the architecture
*/
static void __init p1022_ds_setup_arch(void)
{
#ifdef CONFIG_PCI
struct device_node *np;
#endif
dma_addr_t max = 0xffffffff;
if (ppc_md.progress)
ppc_md.progress("p1022_ds_setup_arch()", 0);
#ifdef CONFIG_PCI
for_each_compatible_node(np, "pci", "fsl,p1022-pcie") {
struct resource rsrc;
struct pci_controller *hose;
of_address_to_resource(np, 0, &rsrc);
if ((rsrc.start & 0xfffff) == 0x8000)
fsl_add_bridge(np, 1);
else
fsl_add_bridge(np, 0);
hose = pci_find_hose_for_OF_device(np);
max = min(max, hose->dma_window_base_cur +
hose->dma_window_size);
}
#endif
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
diu_ops.get_pixel_format = p1022ds_get_pixel_format;
diu_ops.set_gamma_table = p1022ds_set_gamma_table;
diu_ops.set_monitor_port = p1022ds_set_monitor_port;
diu_ops.set_pixel_clock = p1022ds_set_pixel_clock;
diu_ops.valid_monitor_port = p1022ds_valid_monitor_port;
/*
* Disable the NOR and NAND flash nodes if there is video=fslfb...
* command-line parameter. When the DIU is active, the localbus is
* unavailable, so we have to disable these nodes before the MTD
* driver loads.
*/
if (fslfb) {
struct device_node *np =
of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
if (np) {
struct device_node *np2;
of_node_get(np);
np2 = of_find_compatible_node(np, NULL, "cfi-flash");
if (np2) {
static struct property nor_status = {
.name = "status",
.value = "disabled",
.length = sizeof("disabled"),
};
disable_one_node(np2, &nor_status);
of_node_put(np2);
}
of_node_get(np);
np2 = of_find_compatible_node(np, NULL,
"fsl,elbc-fcm-nand");
if (np2) {
static struct property nand_status = {
.name = "status",
.value = "disabled",
.length = sizeof("disabled"),
};
disable_one_node(np2, &nand_status);
of_node_put(np2);
}
of_node_put(np);
}
}
#endif
mpc85xx_smp_init();
#ifdef CONFIG_SWIOTLB
if (memblock_end_of_DRAM() > max) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
}
#endif
pr_info("Freescale P1022 DS reference board\n");
}
machine_device_initcall(p1022_ds, mpc85xx_common_publish_devices);
machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
/*
* Called very early, device-tree isn't unflattened
*/
static int __init p1022_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "fsl,p1022ds");
}
define_machine(p1022_ds) {
.name = "P1022 DS",
.probe = p1022_ds_probe,
.setup_arch = p1022_ds_setup_arch,
.init_IRQ = p1022_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
| gpl-2.0 |
LEPT-Development/android_kernel_lge_msm8916 | arch/arm/plat-versatile/platsmp.c | 2026 | 2300 | /*
* linux/arch/arm/plat-versatile/platsmp.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
/*
* Write pen_release in a way that is guaranteed to be visible to all
* observers, irrespective of whether they're taking part in coherency
* or not. This is necessary for the hotplug code to work reliably.
*/
static void __cpuinit write_pen_release(int val)
{
pen_release = val;
smp_wmb();
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
}
static DEFINE_SPINLOCK(boot_lock);
void __cpuinit versatile_secondary_init(unsigned int cpu)
{
/*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
write_pen_release(-1);
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
}
int __cpuinit versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
/*
* Set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
/*
* This is really belt and braces; we hold unintended secondary
* CPUs in the holding pen until we're ready for them. However,
* since we haven't sent them a soft interrupt, they shouldn't
* be there.
*/
write_pen_release(cpu_logical_map(cpu));
/*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
smp_rmb();
if (pen_release == -1)
break;
udelay(10);
}
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
| gpl-2.0 |
CyanideDevices/android_kernel_samsung_smdk4412 | drivers/block/swim3.c | 2538 | 30387 | /*
* Driver for the SWIM3 (Super Woz Integrated Machine 3)
* floppy controller found on Power Macintoshes.
*
* Copyright (C) 1996 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* TODO:
* handle 2 drives
* handle GCR disks
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/fd.h>
#include <linux/ioctl.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/prom.h>
#include <asm/uaccess.h>
#include <asm/mediabay.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
static DEFINE_MUTEX(swim3_mutex);
static struct request_queue *swim3_queue;
static struct gendisk *disks[2];
static struct request *fd_req;
#define MAX_FLOPPIES 2
enum swim_state {
idle,
locating,
seeking,
settling,
do_transfer,
jogging,
available,
revalidating,
ejecting
};
#define REG(x) unsigned char x; char x ## _pad[15];
/*
* The names for these registers mostly represent speculation on my part.
* It will be interesting to see how close they are to the names Apple uses.
*/
struct swim3 {
REG(data);
REG(timer); /* counts down at 1MHz */
REG(error);
REG(mode);
REG(select); /* controls CA0, CA1, CA2 and LSTRB signals */
REG(setup);
REG(control); /* writing bits clears them */
REG(status); /* writing bits sets them in control */
REG(intr);
REG(nseek); /* # tracks to seek */
REG(ctrack); /* current track number */
REG(csect); /* current sector number */
REG(gap3); /* size of gap 3 in track format */
REG(sector); /* sector # to read or write */
REG(nsect); /* # sectors to read or write */
REG(intr_enable);
};
#define control_bic control
#define control_bis status
/* Bits in select register */
#define CA_MASK 7
#define LSTRB 8
/* Bits in control register */
#define DO_SEEK 0x80
#define FORMAT 0x40
#define SELECT 0x20
#define WRITE_SECTORS 0x10
#define DO_ACTION 0x08
#define DRIVE2_ENABLE 0x04
#define DRIVE_ENABLE 0x02
#define INTR_ENABLE 0x01
/* Bits in status register */
#define FIFO_1BYTE 0x80
#define FIFO_2BYTE 0x40
#define ERROR 0x20
#define DATA 0x08
#define RDDATA 0x04
#define INTR_PENDING 0x02
#define MARK_BYTE 0x01
/* Bits in intr and intr_enable registers */
#define ERROR_INTR 0x20
#define DATA_CHANGED 0x10
#define TRANSFER_DONE 0x08
#define SEEN_SECTOR 0x04
#define SEEK_DONE 0x02
#define TIMER_DONE 0x01
/* Bits in error register */
#define ERR_DATA_CRC 0x80
#define ERR_ADDR_CRC 0x40
#define ERR_OVERRUN 0x04
#define ERR_UNDERRUN 0x01
/* Bits in setup register */
#define S_SW_RESET 0x80
#define S_GCR_WRITE 0x40
#define S_IBM_DRIVE 0x20
#define S_TEST_MODE 0x10
#define S_FCLK_DIV2 0x08
#define S_GCR 0x04
#define S_COPY_PROT 0x02
#define S_INV_WDATA 0x01
/* Select values for swim3_action */
#define SEEK_POSITIVE 0
#define SEEK_NEGATIVE 4
#define STEP 1
#define MOTOR_ON 2
#define MOTOR_OFF 6
#define INDEX 3
#define EJECT 7
#define SETMFM 9
#define SETGCR 13
/* Select values for swim3_select and swim3_readbit */
#define STEP_DIR 0
#define STEPPING 1
#define MOTOR_ON 2
#define RELAX 3 /* also eject in progress */
#define READ_DATA_0 4
#define TWOMEG_DRIVE 5
#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
#define DRIVE_PRESENT 7
#define DISK_IN 8
#define WRITE_PROT 9
#define TRACK_ZERO 10
#define TACHO 11
#define READ_DATA_1 12
#define MFM_MODE 13
#define SEEK_COMPLETE 14
#define ONEMEG_MEDIA 15
/* Definitions of values used in writing and formatting */
#define DATA_ESCAPE 0x99
#define GCR_SYNC_EXC 0x3f
#define GCR_SYNC_CONV 0x80
#define GCR_FIRST_MARK 0xd5
#define GCR_SECOND_MARK 0xaa
#define GCR_ADDR_MARK "\xd5\xaa\x00"
#define GCR_DATA_MARK "\xd5\xaa\x0b"
#define GCR_SLIP_BYTE "\x27\xaa"
#define GCR_SELF_SYNC "\x3f\xbf\x1e\x34\x3c\x3f"
#define DATA_99 "\x99\x99"
#define MFM_ADDR_MARK "\x99\xa1\x99\xa1\x99\xa1\x99\xfe"
#define MFM_INDEX_MARK "\x99\xc2\x99\xc2\x99\xc2\x99\xfc"
#define MFM_GAP_LEN 12
struct floppy_state {
enum swim_state state;
spinlock_t lock;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
int dma_intr; /* interrupt number for DMA channel */
int cur_cyl; /* cylinder head is on, or -1 */
int cur_sector; /* last sector we saw go past */
int req_cyl; /* the cylinder for the current r/w request */
int head; /* head number ditto */
int req_sector; /* sector number ditto */
int scount; /* # sectors we're transferring at present */
int retries;
int settle_time;
int secpercyl; /* disk geometry information */
int secpertrack;
int total_secs;
int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */
struct dbdma_cmd *dma_cmd;
int ref_count;
int expect_cyl;
struct timer_list timeout;
int timeout_pending;
int ejected;
wait_queue_head_t wait;
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
};
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
static unsigned short write_preamble[] = {
0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, /* gap field */
0, 0, 0, 0, 0, 0, /* sync field */
0x99a1, 0x99a1, 0x99a1, 0x99fb, /* data address mark */
0x990f /* no escape for 512 bytes */
};
static unsigned short write_postamble[] = {
0x9904, /* insert CRC */
0x4e4e, 0x4e4e,
0x9908, /* stop writing */
0, 0, 0, 0, 0, 0
};
static void swim3_select(struct floppy_state *fs, int sel);
static void swim3_action(struct floppy_state *fs, int action);
static int swim3_readbit(struct floppy_state *fs, int bit);
static void do_fd_request(struct request_queue * q);
static void start_request(struct floppy_state *fs);
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long));
static void scan_track(struct floppy_state *fs);
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
static void setup_transfer(struct floppy_state *fs);
static void act(struct floppy_state *fs);
static void scan_timeout(unsigned long data);
static void seek_timeout(unsigned long data);
static void settle_timeout(unsigned long data);
static void xfer_timeout(unsigned long data);
static irqreturn_t swim3_interrupt(int irq, void *dev_id);
/*static void fd_dma_interrupt(int irq, void *dev_id);*/
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible);
static void release_drive(struct floppy_state *fs);
static int fd_eject(struct floppy_state *fs);
static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param);
static int floppy_open(struct block_device *bdev, fmode_t mode);
static int floppy_release(struct gendisk *disk, fmode_t mode);
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
static bool swim3_end_request(int err, unsigned int nr_bytes)
{
if (__blk_end_request(fd_req, err, nr_bytes))
return true;
fd_req = NULL;
return false;
}
static bool swim3_end_request_cur(int err)
{
return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
}
static void swim3_select(struct floppy_state *fs, int sel)
{
struct swim3 __iomem *sw = fs->swim3;
out_8(&sw->select, RELAX);
if (sel & 8)
out_8(&sw->control_bis, SELECT);
else
out_8(&sw->control_bic, SELECT);
out_8(&sw->select, sel & CA_MASK);
}
static void swim3_action(struct floppy_state *fs, int action)
{
struct swim3 __iomem *sw = fs->swim3;
swim3_select(fs, action);
udelay(1);
out_8(&sw->select, sw->select | LSTRB);
udelay(2);
out_8(&sw->select, sw->select & ~LSTRB);
udelay(1);
}
static int swim3_readbit(struct floppy_state *fs, int bit)
{
struct swim3 __iomem *sw = fs->swim3;
int stat;
swim3_select(fs, bit);
udelay(1);
stat = in_8(&sw->status);
return (stat & DATA) == 0;
}
static void do_fd_request(struct request_queue * q)
{
int i;
for(i=0; i<floppy_count; i++) {
struct floppy_state *fs = &floppy_states[i];
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
continue;
start_request(fs);
}
}
static void start_request(struct floppy_state *fs)
{
struct request *req;
unsigned long x;
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
}
while (fs->state == idle) {
if (!fd_req) {
fd_req = blk_fetch_request(swim3_queue);
if (!fd_req)
break;
}
req = fd_req;
#if 0
printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
(long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
printk(" errors=%d current_nr_sectors=%u\n",
req->errors, blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
swim3_end_request_cur(-EIO);
continue;
}
if (fs->ejected) {
swim3_end_request_cur(-EIO);
continue;
}
if (rq_data_dir(req) == WRITE) {
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
swim3_end_request_cur(-EIO);
continue;
}
}
/* Do not remove the cast. blk_rq_pos(req) is now a
* sector_t and can be 64 bits, but it will never go
* past 32 bits for this driver anyway, so we can
* safely cast it down and not have to do a 64/32
* division
*/
fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
fd_req = req;
fs->state = do_transfer;
fs->retries = 0;
act(fs);
}
}
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long))
{
unsigned long flags;
spin_lock_irqsave(&fs->lock, flags);
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
fs->timeout.function = proc;
fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
spin_unlock_irqrestore(&fs->lock, flags);
}
static inline void scan_track(struct floppy_state *fs)
{
struct swim3 __iomem *sw = fs->swim3;
swim3_select(fs, READ_DATA_0);
in_8(&sw->intr); /* clear SEEN_SECTOR bit */
in_8(&sw->error);
out_8(&sw->intr_enable, SEEN_SECTOR);
out_8(&sw->control_bis, DO_ACTION);
/* enable intr when track found */
set_timeout(fs, HZ, scan_timeout); /* enable timeout */
}
static inline void seek_track(struct floppy_state *fs, int n)
{
struct swim3 __iomem *sw = fs->swim3;
if (n >= 0) {
swim3_action(fs, SEEK_POSITIVE);
sw->nseek = n;
} else {
swim3_action(fs, SEEK_NEGATIVE);
sw->nseek = -n;
}
fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1;
swim3_select(fs, STEP);
in_8(&sw->error);
/* enable intr when seek finished */
out_8(&sw->intr_enable, SEEK_DONE);
out_8(&sw->control_bis, DO_SEEK);
set_timeout(fs, 3*HZ, seek_timeout); /* enable timeout */
fs->settle_time = 0;
}
static inline void init_dma(struct dbdma_cmd *cp, int cmd,
void *buf, int count)
{
st_le16(&cp->req_count, count);
st_le16(&cp->command, cmd);
st_le32(&cp->phy_addr, virt_to_bus(buf));
cp->xfer_status = 0;
}
static inline void setup_transfer(struct floppy_state *fs)
{
int n;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
if (blk_rq_cur_sectors(fd_req) <= 0) {
printk(KERN_ERR "swim3: transfer 0 sectors?\n");
return;
}
if (rq_data_dir(fd_req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
if (n > blk_rq_cur_sectors(fd_req))
n = blk_rq_cur_sectors(fd_req);
}
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, virt_to_bus(cp));
if (rq_data_dir(fd_req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
if (rq_data_dir(fd_req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
/* enable intr when transfer complete */
out_8(&sw->intr_enable, TRANSFER_DONE);
out_8(&sw->control_bis, DO_ACTION);
set_timeout(fs, 2*HZ, xfer_timeout); /* enable timeout */
}
static void act(struct floppy_state *fs)
{
for (;;) {
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
else
fs->state = seeking;
break;
}
scan_track(fs);
return;
case seeking:
if (fs->cur_cyl < 0) {
fs->expect_cyl = -1;
fs->state = locating;
break;
}
if (fs->req_cyl == fs->cur_cyl) {
printk("whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
seek_track(fs, fs->req_cyl - fs->cur_cyl);
return;
case settling:
/* check for SEEK_COMPLETE after 30ms */
fs->settle_time = (HZ + 32) / 33;
set_timeout(fs, fs->settle_time, settle_timeout);
return;
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
swim3_end_request_cur(-EIO);
fs->state = idle;
return;
}
fs->state = seeking;
break;
}
setup_transfer(fs);
return;
case jogging:
seek_track(fs, -5);
return;
default:
printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
return;
}
}
}
static void scan_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
}
static void seek_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
printk(KERN_ERR "swim3: seek timeout\n");
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
static void settle_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
return;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
return;
}
printk(KERN_ERR "swim3: seek settle timeout\n");
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
static void xfer_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
int n;
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
udelay(1);
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
(rq_data_dir(fd_req)==WRITE? "writ": "read"),
(long)blk_rq_pos(fd_req));
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
{
struct floppy_state *fs = (struct floppy_state *) dev_id;
struct swim3 __iomem *sw = fs->swim3;
int intr, err, n;
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(fd_req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
break;
}
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
break;
case seeking:
case jogging:
if (sw->nseek == 0) {
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (fs->state == seeking)
++fs->retries;
fs->state = settling;
act(fs);
}
break;
case settling:
out_8(&sw->intr_enable, 0);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
act(fs);
break;
case do_transfer:
if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
break;
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
del_timer(&fs->timeout);
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
if (rq_data_dir(fd_req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
* On writing, the swim3 sometimes doesn't use
* up all the bytes of the postamble, so we can still
* see DMA active here. That doesn't matter as long
* as all the sector data has been transferred.
*/
if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
/* wait a little while for DMA to complete */
for (n = 0; n < 100; ++n) {
if (cp->xfer_status != 0)
break;
udelay(1);
barrier();
}
}
/* turn off DMA */
out_le32(&dr->control, (RUN | PAUSE) << 16);
stat = ld_le16(&cp->xfer_status);
resid = ld_le16(&cp->res_count);
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
blk_update_request(fd_req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
printk("swim3: error %sing block %ld (err=%x)\n",
rq_data_dir(fd_req) == WRITE? "writ": "read",
(long)blk_rq_pos(fd_req), err);
swim3_end_request_cur(-EIO);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(fd_req), intr, err);
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
break;
}
if (swim3_end_request(0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
if (++fs->head > 1) {
fs->head = 0;
++fs->req_cyl;
}
}
act(fs);
} else
fs->state = idle;
}
if (fs->state == idle)
start_request(fs);
break;
default:
printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
}
return IRQ_HANDLED;
}
/*
static void fd_dma_interrupt(int irq, void *dev_id)
{
}
*/
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
spin_lock_irqsave(&fs->lock, flags);
if (fs->state != idle) {
++fs->wanted;
while (fs->state != available) {
if (interruptible && signal_pending(current)) {
--fs->wanted;
spin_unlock_irqrestore(&fs->lock, flags);
return -EINTR;
}
interruptible_sleep_on(&fs->wait);
}
--fs->wanted;
}
fs->state = state;
spin_unlock_irqrestore(&fs->lock, flags);
return 0;
}
static void release_drive(struct floppy_state *fs)
{
unsigned long flags;
spin_lock_irqsave(&fs->lock, flags);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&fs->lock, flags);
}
static int fd_eject(struct floppy_state *fs)
{
int err, n;
err = grab_drive(fs, ejecting, 1);
if (err)
return err;
swim3_action(fs, EJECT);
for (n = 20; n > 0; --n) {
if (signal_pending(current)) {
err = -EINTR;
break;
}
swim3_select(fs, RELAX);
schedule_timeout_interruptible(1);
if (swim3_readbit(fs, DISK_IN) == 0)
break;
}
swim3_select(fs, RELAX);
udelay(150);
fs->ejected = 1;
release_drive(fs);
return err;
}
static struct floppy_struct floppy_type =
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
int err;
if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
switch (cmd) {
case FDEJECT:
if (fs->ref_count != 1)
return -EBUSY;
err = fd_eject(fs);
return err;
case FDGETPRM:
if (copy_to_user((void __user *) param, &floppy_type,
sizeof(struct floppy_struct)))
return -EFAULT;
return 0;
}
return -ENOTTY;
}
static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
mutex_lock(&swim3_mutex);
ret = floppy_locked_ioctl(bdev, mode, cmd, param);
mutex_unlock(&swim3_mutex);
return ret;
}
static int floppy_open(struct block_device *bdev, fmode_t mode)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
int n, err = 0;
if (fs->ref_count == 0) {
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
out_8(&sw->control_bic, 0xff);
out_8(&sw->mode, 0x95);
udelay(10);
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE);
swim3_action(fs, MOTOR_ON);
fs->write_prot = -1;
fs->cur_cyl = -1;
for (n = 0; n < 2 * HZ; ++n) {
if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE))
break;
if (signal_pending(current)) {
err = -EINTR;
break;
}
swim3_select(fs, RELAX);
schedule_timeout_interruptible(1);
}
if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
|| swim3_readbit(fs, DISK_IN) == 0))
err = -ENXIO;
swim3_action(fs, SETMFM);
swim3_select(fs, RELAX);
} else if (fs->ref_count == -1 || mode & FMODE_EXCL)
return -EBUSY;
if (err == 0 && (mode & FMODE_NDELAY) == 0
&& (mode & (FMODE_READ|FMODE_WRITE))) {
check_disk_change(bdev);
if (fs->ejected)
err = -ENXIO;
}
if (err == 0 && (mode & FMODE_WRITE)) {
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot)
err = -EROFS;
}
if (err) {
if (fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE);
swim3_select(fs, RELAX);
}
return err;
}
if (mode & FMODE_EXCL)
fs->ref_count = -1;
else
++fs->ref_count;
return 0;
}
static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
{
int ret;
mutex_lock(&swim3_mutex);
ret = floppy_open(bdev, mode);
mutex_unlock(&swim3_mutex);
return ret;
}
static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
out_8(&sw->control_bic, 0xff);
swim3_select(fs, RELAX);
}
mutex_unlock(&swim3_mutex);
return 0;
}
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct floppy_state *fs = disk->private_data;
return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static int floppy_revalidate(struct gendisk *disk)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw;
int ret, n;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
sw = fs->swim3;
grab_drive(fs, revalidating, 0);
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bis, DRIVE_ENABLE);
swim3_action(fs, MOTOR_ON); /* necessary? */
fs->write_prot = -1;
fs->cur_cyl = -1;
mdelay(1);
for (n = HZ; n > 0; --n) {
if (swim3_readbit(fs, SEEK_COMPLETE))
break;
if (signal_pending(current))
break;
swim3_select(fs, RELAX);
schedule_timeout_interruptible(1);
}
ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
|| swim3_readbit(fs, DISK_IN) == 0;
if (ret)
swim3_action(fs, MOTOR_OFF);
else {
fs->ejected = 0;
swim3_action(fs, SETMFM);
}
swim3_select(fs, RELAX);
release_drive(fs);
return ret;
}
static const struct block_device_operations floppy_fops = {
.open = floppy_unlocked_open,
.release = floppy_release,
.ioctl = floppy_ioctl,
.check_events = floppy_check_events,
.revalidate_disk= floppy_revalidate,
};
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
printk(KERN_WARNING "ifd%d: no address for %s\n",
index, swim->full_name);
return -ENXIO;
}
if (macio_irq_count(mdev) < 2) {
printk(KERN_WARNING "fd%d: no intrs for device %s\n",
index, swim->full_name);
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
index, swim->full_name);
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
index, swim->full_name);
macio_release_resource(mdev, 0);
return -EBUSY;
}
dev_set_drvdata(&mdev->ofdev.dev, fs);
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
memset(fs, 0, sizeof(*fs));
spin_lock_init(&fs->lock);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
printk("fd%d: couldn't map registers for %s\n",
index, swim->full_name);
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
printk("fd%d: couldn't map DMA for %s\n",
index, swim->full_name);
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
}
fs->swim3_intr = macio_irq(mdev, 0);
fs->dma_intr = macio_irq(mdev, 1);
fs->cur_cyl = -1;
fs->cur_sector = -1;
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
fs->mdev = mdev;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
index, fs->swim3_intr, swim->full_name);
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
return -EBUSY;
}
/*
if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
fs->dma_intr);
return -EBUSY;
}
*/
init_timer(&fs->timeout);
printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
mdev->media_bay ? "in media bay" : "");
return 0;
out_unmap:
iounmap(fs->dma);
iounmap(fs->swim3);
out_release:
macio_release_resource(mdev, 0);
macio_release_resource(mdev, 1);
return rc;
}
static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
int i, rc;
struct gendisk *disk;
/* Add the drive */
rc = swim3_add_device(mdev, floppy_count);
if (rc)
return rc;
/* Now create the queue if not there yet */
if (swim3_queue == NULL) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
if (swim3_queue == NULL) {
unregister_blkdev(FLOPPY_MAJOR, "fd");
return 0;
}
}
/* Now register that disk. Same comment about failure handling */
i = floppy_count++;
disk = disks[i] = alloc_disk(1);
if (disk == NULL)
return 0;
disk->major = FLOPPY_MAJOR;
disk->first_minor = i;
disk->fops = &floppy_fops;
disk->private_data = &floppy_states[i];
disk->queue = swim3_queue;
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", i);
set_capacity(disk, 2880);
add_disk(disk);
return 0;
}
static struct of_device_id swim3_match[] =
{
{
.name = "swim3",
},
{
.compatible = "ohare-swim3"
},
{
.compatible = "swim3"
},
};
static struct macio_driver swim3_driver =
{
.driver = {
.name = "swim3",
.of_match_table = swim3_match,
},
.probe = swim3_attach,
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,
#endif
};
int swim3_init(void)
{
macio_register_driver(&swim3_driver);
return 0;
}
module_init(swim3_init)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul Mackerras");
MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
| gpl-2.0 |
mcmenaminadrian/vmufat | fs/9p/vfs_inode_dotl.c | 4330 | 26224 | /*
* linux/fs/9p/vfs_inode_dotl.c
*
* This file contains vfs inode ops for the 9P2000.L protocol.
*
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/inet.h>
#include <linux/namei.h>
#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "fid.h"
#include "cache.h"
#include "xattr.h"
#include "acl.h"
static int
v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
dev_t rdev);
/**
* v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
* new file system object. This checks the S_ISGID to determine the owning
* group of the new file system object.
*/
static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
{
BUG_ON(dir_inode == NULL);
if (dir_inode->i_mode & S_ISGID) {
/* set_gid bit is set.*/
return dir_inode->i_gid;
}
return current_fsgid();
}
/**
* v9fs_dentry_from_dir_inode - helper function to get the dentry from
* dir inode.
*
*/
static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
{
struct dentry *dentry;
spin_lock(&inode->i_lock);
/* Directory should have only one entry. */
BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
spin_unlock(&inode->i_lock);
return dentry;
}
static int v9fs_test_inode_dotl(struct inode *inode, void *data)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
struct p9_stat_dotl *st = (struct p9_stat_dotl *)data;
/* don't match inode of different type */
if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
return 0;
if (inode->i_generation != st->st_gen)
return 0;
/* compare qid details */
if (memcmp(&v9inode->qid.version,
&st->qid.version, sizeof(v9inode->qid.version)))
return 0;
if (v9inode->qid.type != st->qid.type)
return 0;
return 1;
}
/* Always get a new inode */
static int v9fs_test_new_inode_dotl(struct inode *inode, void *data)
{
return 0;
}
static int v9fs_set_inode_dotl(struct inode *inode, void *data)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
struct p9_stat_dotl *st = (struct p9_stat_dotl *)data;
memcpy(&v9inode->qid, &st->qid, sizeof(st->qid));
inode->i_generation = st->st_gen;
return 0;
}
static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
struct p9_qid *qid,
struct p9_fid *fid,
struct p9_stat_dotl *st,
int new)
{
int retval;
unsigned long i_ino;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
int (*test)(struct inode *, void *);
if (new)
test = v9fs_test_new_inode_dotl;
else
test = v9fs_test_inode_dotl;
i_ino = v9fs_qid2ino(qid);
inode = iget5_locked(sb, i_ino, test, v9fs_set_inode_dotl, st);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
/*
* initialize the inode with the stat info
* FIXME!! we may need support for stale inodes
* later.
*/
inode->i_ino = i_ino;
retval = v9fs_init_inode(v9ses, inode,
st->st_mode, new_decode_dev(st->st_rdev));
if (retval)
goto error;
v9fs_stat2inode_dotl(st, inode);
#ifdef CONFIG_9P_FSCACHE
v9fs_cache_inode_get_cookie(inode);
#endif
retval = v9fs_get_acl(inode, fid);
if (retval)
goto error;
unlock_new_inode(inode);
return inode;
error:
unlock_new_inode(inode);
iput(inode);
return ERR_PTR(retval);
}
struct inode *
v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
struct super_block *sb, int new)
{
struct p9_stat_dotl *st;
struct inode *inode = NULL;
st = p9_client_getattr_dotl(fid, P9_STATS_BASIC | P9_STATS_GEN);
if (IS_ERR(st))
return ERR_CAST(st);
inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new);
kfree(st);
return inode;
}
struct dotl_openflag_map {
int open_flag;
int dotl_flag;
};
static int v9fs_mapped_dotl_flags(int flags)
{
int i;
int rflags = 0;
struct dotl_openflag_map dotl_oflag_map[] = {
{ O_CREAT, P9_DOTL_CREATE },
{ O_EXCL, P9_DOTL_EXCL },
{ O_NOCTTY, P9_DOTL_NOCTTY },
{ O_TRUNC, P9_DOTL_TRUNC },
{ O_APPEND, P9_DOTL_APPEND },
{ O_NONBLOCK, P9_DOTL_NONBLOCK },
{ O_DSYNC, P9_DOTL_DSYNC },
{ FASYNC, P9_DOTL_FASYNC },
{ O_DIRECT, P9_DOTL_DIRECT },
{ O_LARGEFILE, P9_DOTL_LARGEFILE },
{ O_DIRECTORY, P9_DOTL_DIRECTORY },
{ O_NOFOLLOW, P9_DOTL_NOFOLLOW },
{ O_NOATIME, P9_DOTL_NOATIME },
{ O_CLOEXEC, P9_DOTL_CLOEXEC },
{ O_SYNC, P9_DOTL_SYNC},
};
for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
if (flags & dotl_oflag_map[i].open_flag)
rflags |= dotl_oflag_map[i].dotl_flag;
}
return rflags;
}
/**
* v9fs_open_to_dotl_flags- convert Linux specific open flags to
* plan 9 open flag.
* @flags: flags to convert
*/
int v9fs_open_to_dotl_flags(int flags)
{
int rflags = 0;
/*
* We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
* and P9_DOTL_NOACCESS
*/
rflags |= flags & O_ACCMODE;
rflags |= v9fs_mapped_dotl_flags(flags);
return rflags;
}
/**
* v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
* @dir: directory inode that is being created
* @dentry: dentry that is being deleted
* @mode: create permissions
* @nd: path information
*
*/
static int
v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
struct nameidata *nd)
{
int err = 0;
gid_t gid;
int flags;
umode_t mode;
char *name = NULL;
struct file *filp;
struct p9_qid qid;
struct inode *inode;
struct p9_fid *fid = NULL;
struct v9fs_inode *v9inode;
struct p9_fid *dfid, *ofid, *inode_fid;
struct v9fs_session_info *v9ses;
struct posix_acl *pacl = NULL, *dacl = NULL;
v9ses = v9fs_inode2v9ses(dir);
if (nd)
flags = nd->intent.open.flags;
else {
/*
* create call without LOOKUP_OPEN is due
* to mknod of regular files. So use mknod
* operation.
*/
return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
}
name = (char *) dentry->d_name.name;
p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%hx\n",
name, flags, omode);
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
return err;
}
/* clone a fid to use for creation */
ofid = p9_client_walk(dfid, 0, NULL, 1);
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
return err;
}
gid = v9fs_get_fsgid_for_create(dir);
mode = omode;
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n",
err);
goto error;
}
err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
mode, gid, &qid);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n",
err);
goto error;
}
v9fs_invalidate_inode_attr(dir);
/* instantiate inode and assign the unopened fid to the dentry */
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
fid = NULL;
goto error;
}
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
goto error;
}
err = v9fs_fid_add(dentry, fid);
if (err < 0)
goto error;
d_instantiate(dentry, inode);
/* Now set the ACL based on the default value */
v9fs_set_create_acl(dentry, &dacl, &pacl);
v9inode = V9FS_I(inode);
mutex_lock(&v9inode->v_mutex);
if (v9ses->cache && !v9inode->writeback_fid &&
((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
* page dirty time via write_begin/page_mkwrite
* because we want write after unlink usecase
* to work.
*/
inode_fid = v9fs_writeback_fid(dentry);
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
mutex_unlock(&v9inode->v_mutex);
goto err_clunk_old_fid;
}
v9inode->writeback_fid = (void *) inode_fid;
}
mutex_unlock(&v9inode->v_mutex);
/* Since we are opening a file, assign the open fid to the file */
filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
err = PTR_ERR(filp);
goto err_clunk_old_fid;
}
filp->private_data = ofid;
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache)
v9fs_cache_inode_set_cookie(inode, filp);
#endif
return 0;
error:
if (fid)
p9_client_clunk(fid);
err_clunk_old_fid:
if (ofid)
p9_client_clunk(ofid);
v9fs_set_create_acl(NULL, &dacl, &pacl);
return err;
}
/**
* v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
* @dir: inode that is being unlinked
* @dentry: dentry that is being unlinked
* @mode: mode for new directory
*
*/
static int v9fs_vfs_mkdir_dotl(struct inode *dir,
struct dentry *dentry, umode_t omode)
{
int err;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
gid_t gid;
char *name;
umode_t mode;
struct inode *inode;
struct p9_qid qid;
struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
err = 0;
v9ses = v9fs_inode2v9ses(dir);
omode |= S_IFDIR;
if (dir->i_mode & S_ISGID)
omode |= S_ISGID;
dir_dentry = v9fs_dentry_from_dir_inode(dir);
dfid = v9fs_fid_lookup(dir_dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
dfid = NULL;
goto error;
}
gid = v9fs_get_fsgid_for_create(dir);
mode = omode;
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mkdir %d\n",
err);
goto error;
}
name = (char *) dentry->d_name.name;
err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
if (err < 0)
goto error;
/* instantiate inode and assign the unopened fid to the dentry */
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
fid = NULL;
goto error;
}
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
err);
goto error;
}
err = v9fs_fid_add(dentry, fid);
if (err < 0)
goto error;
d_instantiate(dentry, inode);
fid = NULL;
} else {
/*
* Not in cached mode. No need to populate
* inode with stat. We need to get an inode
* so that we can set the acl with dentry
*/
inode = v9fs_get_inode(dir->i_sb, mode, 0);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto error;
}
d_instantiate(dentry, inode);
}
/* Now set the ACL based on the default value */
v9fs_set_create_acl(dentry, &dacl, &pacl);
inc_nlink(dir);
v9fs_invalidate_inode_attr(dir);
error:
if (fid)
p9_client_clunk(fid);
v9fs_set_create_acl(NULL, &dacl, &pacl);
return err;
}
static int
v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
int err;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct p9_stat_dotl *st;
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(dentry->d_inode, stat);
return 0;
}
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
/* Ask for all the fields in stat structure. Server will return
* whatever it supports
*/
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
if (IS_ERR(st))
return PTR_ERR(st);
v9fs_stat2inode_dotl(st, dentry->d_inode);
generic_fillattr(dentry->d_inode, stat);
/* Change block size to what the server returned */
stat->blksize = st->st_blksize;
kfree(st);
return 0;
}
/*
* Attribute flags.
*/
#define P9_ATTR_MODE (1 << 0)
#define P9_ATTR_UID (1 << 1)
#define P9_ATTR_GID (1 << 2)
#define P9_ATTR_SIZE (1 << 3)
#define P9_ATTR_ATIME (1 << 4)
#define P9_ATTR_MTIME (1 << 5)
#define P9_ATTR_CTIME (1 << 6)
#define P9_ATTR_ATIME_SET (1 << 7)
#define P9_ATTR_MTIME_SET (1 << 8)
struct dotl_iattr_map {
int iattr_valid;
int p9_iattr_valid;
};
static int v9fs_mapped_iattr_valid(int iattr_valid)
{
int i;
int p9_iattr_valid = 0;
struct dotl_iattr_map dotl_iattr_map[] = {
{ ATTR_MODE, P9_ATTR_MODE },
{ ATTR_UID, P9_ATTR_UID },
{ ATTR_GID, P9_ATTR_GID },
{ ATTR_SIZE, P9_ATTR_SIZE },
{ ATTR_ATIME, P9_ATTR_ATIME },
{ ATTR_MTIME, P9_ATTR_MTIME },
{ ATTR_CTIME, P9_ATTR_CTIME },
{ ATTR_ATIME_SET, P9_ATTR_ATIME_SET },
{ ATTR_MTIME_SET, P9_ATTR_MTIME_SET },
};
for (i = 0; i < ARRAY_SIZE(dotl_iattr_map); i++) {
if (iattr_valid & dotl_iattr_map[i].iattr_valid)
p9_iattr_valid |= dotl_iattr_map[i].p9_iattr_valid;
}
return p9_iattr_valid;
}
/**
* v9fs_vfs_setattr_dotl - set file metadata
* @dentry: file whose metadata to set
* @iattr: metadata assignment structure
*
*/
int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
{
int retval;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct p9_iattr_dotl p9attr;
p9_debug(P9_DEBUG_VFS, "\n");
retval = inode_change_ok(dentry->d_inode, iattr);
if (retval)
return retval;
p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid);
p9attr.mode = iattr->ia_mode;
p9attr.uid = iattr->ia_uid;
p9attr.gid = iattr->ia_gid;
p9attr.size = iattr->ia_size;
p9attr.atime_sec = iattr->ia_atime.tv_sec;
p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
retval = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
/* Write all dirty data */
if (S_ISREG(dentry->d_inode->i_mode))
filemap_write_and_wait(dentry->d_inode->i_mapping);
retval = p9_client_setattr(fid, &p9attr);
if (retval < 0)
return retval;
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(dentry->d_inode))
truncate_setsize(dentry->d_inode, iattr->ia_size);
v9fs_invalidate_inode_attr(dentry->d_inode);
setattr_copy(dentry->d_inode, iattr);
mark_inode_dirty(dentry->d_inode);
if (iattr->ia_valid & ATTR_MODE) {
/* We also want to update ACL when we update mode bits */
retval = v9fs_acl_chmod(dentry);
if (retval < 0)
return retval;
}
return 0;
}
/**
* v9fs_stat2inode_dotl - populate an inode structure with stat info
* @stat: stat structure
* @inode: inode to populate
* @sb: superblock of filesystem
*
*/
void
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
{
umode_t mode;
struct v9fs_inode *v9inode = V9FS_I(inode);
if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
inode->i_atime.tv_sec = stat->st_atime_sec;
inode->i_atime.tv_nsec = stat->st_atime_nsec;
inode->i_mtime.tv_sec = stat->st_mtime_sec;
inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
inode->i_ctime.tv_sec = stat->st_ctime_sec;
inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
inode->i_uid = stat->st_uid;
inode->i_gid = stat->st_gid;
set_nlink(inode, stat->st_nlink);
mode = stat->st_mode & S_IALLUGO;
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
i_size_write(inode, stat->st_size);
inode->i_blocks = stat->st_blocks;
} else {
if (stat->st_result_mask & P9_STATS_ATIME) {
inode->i_atime.tv_sec = stat->st_atime_sec;
inode->i_atime.tv_nsec = stat->st_atime_nsec;
}
if (stat->st_result_mask & P9_STATS_MTIME) {
inode->i_mtime.tv_sec = stat->st_mtime_sec;
inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
}
if (stat->st_result_mask & P9_STATS_CTIME) {
inode->i_ctime.tv_sec = stat->st_ctime_sec;
inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
}
if (stat->st_result_mask & P9_STATS_UID)
inode->i_uid = stat->st_uid;
if (stat->st_result_mask & P9_STATS_GID)
inode->i_gid = stat->st_gid;
if (stat->st_result_mask & P9_STATS_NLINK)
set_nlink(inode, stat->st_nlink);
if (stat->st_result_mask & P9_STATS_MODE) {
inode->i_mode = stat->st_mode;
if ((S_ISBLK(inode->i_mode)) ||
(S_ISCHR(inode->i_mode)))
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
}
if (stat->st_result_mask & P9_STATS_RDEV)
inode->i_rdev = new_decode_dev(stat->st_rdev);
if (stat->st_result_mask & P9_STATS_SIZE)
i_size_write(inode, stat->st_size);
if (stat->st_result_mask & P9_STATS_BLOCKS)
inode->i_blocks = stat->st_blocks;
}
if (stat->st_result_mask & P9_STATS_GEN)
inode->i_generation = stat->st_gen;
/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
* because the inode structure does not have fields for them.
*/
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
static int
v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
const char *symname)
{
int err;
gid_t gid;
char *name;
struct p9_qid qid;
struct inode *inode;
struct p9_fid *dfid;
struct p9_fid *fid = NULL;
struct v9fs_session_info *v9ses;
name = (char *) dentry->d_name.name;
p9_debug(P9_DEBUG_VFS, "%lu,%s,%s\n", dir->i_ino, name, symname);
v9ses = v9fs_inode2v9ses(dir);
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
return err;
}
gid = v9fs_get_fsgid_for_create(dir);
/* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
goto error;
}
v9fs_invalidate_inode_attr(dir);
if (v9ses->cache) {
/* Now walk from the parent so we can get an unopened fid. */
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
fid = NULL;
goto error;
}
/* instantiate inode and assign the unopened fid to dentry */
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
err);
goto error;
}
err = v9fs_fid_add(dentry, fid);
if (err < 0)
goto error;
d_instantiate(dentry, inode);
fid = NULL;
} else {
/* Not in cached mode. No need to populate inode with stat */
inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto error;
}
d_instantiate(dentry, inode);
}
error:
if (fid)
p9_client_clunk(fid);
return err;
}
/**
* v9fs_vfs_link_dotl - create a hardlink for dotl
* @old_dentry: dentry for file to link to
* @dir: inode destination for new link
* @dentry: dentry for link
*
*/
static int
v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
int err;
char *name;
struct dentry *dir_dentry;
struct p9_fid *dfid, *oldfid;
struct v9fs_session_info *v9ses;
p9_debug(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
dir->i_ino, old_dentry->d_name.name, dentry->d_name.name);
v9ses = v9fs_inode2v9ses(dir);
dir_dentry = v9fs_dentry_from_dir_inode(dir);
dfid = v9fs_fid_lookup(dir_dentry);
if (IS_ERR(dfid))
return PTR_ERR(dfid);
oldfid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(oldfid))
return PTR_ERR(oldfid);
name = (char *) dentry->d_name.name;
err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
return err;
}
v9fs_invalidate_inode_attr(dir);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
/* Get the latest stat info from server. */
struct p9_fid *fid;
fid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
v9fs_refresh_inode_dotl(fid, old_dentry->d_inode);
}
ihold(old_dentry->d_inode);
d_instantiate(dentry, old_dentry->d_inode);
return err;
}
/**
* v9fs_vfs_mknod_dotl - create a special file
* @dir: inode destination for new link
* @dentry: dentry for file
* @mode: mode for creation
* @rdev: device associated with special file
*
*/
static int
v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
dev_t rdev)
{
int err;
gid_t gid;
char *name;
umode_t mode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
struct p9_qid qid;
struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n",
dir->i_ino, dentry->d_name.name, omode,
MAJOR(rdev), MINOR(rdev));
if (!new_valid_dev(rdev))
return -EINVAL;
v9ses = v9fs_inode2v9ses(dir);
dir_dentry = v9fs_dentry_from_dir_inode(dir);
dfid = v9fs_fid_lookup(dir_dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
dfid = NULL;
goto error;
}
gid = v9fs_get_fsgid_for_create(dir);
mode = omode;
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mknod %d\n",
err);
goto error;
}
name = (char *) dentry->d_name.name;
err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
if (err < 0)
goto error;
v9fs_invalidate_inode_attr(dir);
/* instantiate inode and assign the unopened fid to the dentry */
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
fid = NULL;
goto error;
}
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
err);
goto error;
}
err = v9fs_fid_add(dentry, fid);
if (err < 0)
goto error;
d_instantiate(dentry, inode);
fid = NULL;
} else {
/*
* Not in cached mode. No need to populate inode with stat.
* socket syscall returns a fd, so we need instantiate
*/
inode = v9fs_get_inode(dir->i_sb, mode, rdev);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto error;
}
d_instantiate(dentry, inode);
}
/* Now set the ACL based on the default value */
v9fs_set_create_acl(dentry, &dacl, &pacl);
error:
if (fid)
p9_client_clunk(fid);
v9fs_set_create_acl(NULL, &dacl, &pacl);
return err;
}
/**
* v9fs_vfs_follow_link_dotl - follow a symlink path
* @dentry: dentry for symlink
* @nd: nameidata
*
*/
static void *
v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
{
int retval;
struct p9_fid *fid;
char *link = __getname();
char *target;
p9_debug(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
if (!link) {
link = ERR_PTR(-ENOMEM);
goto ndset;
}
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid)) {
__putname(link);
link = ERR_CAST(fid);
goto ndset;
}
retval = p9_client_readlink(fid, &target);
if (!retval) {
strcpy(link, target);
kfree(target);
goto ndset;
}
__putname(link);
link = ERR_PTR(retval);
ndset:
nd_set_link(nd, link);
return NULL;
}
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
{
loff_t i_size;
struct p9_stat_dotl *st;
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
if (IS_ERR(st))
return PTR_ERR(st);
/*
* Don't update inode if the file type is different
*/
if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
goto out;
spin_lock(&inode->i_lock);
/*
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
i_size = inode->i_size;
v9fs_stat2inode_dotl(st, inode);
if (v9ses->cache)
inode->i_size = i_size;
spin_unlock(&inode->i_lock);
out:
kfree(st);
return 0;
}
const struct inode_operations v9fs_dir_inode_operations_dotl = {
.create = v9fs_vfs_create_dotl,
.lookup = v9fs_vfs_lookup,
.link = v9fs_vfs_link_dotl,
.symlink = v9fs_vfs_symlink_dotl,
.unlink = v9fs_vfs_unlink,
.mkdir = v9fs_vfs_mkdir_dotl,
.rmdir = v9fs_vfs_rmdir,
.mknod = v9fs_vfs_mknod_dotl,
.rename = v9fs_vfs_rename,
.getattr = v9fs_vfs_getattr_dotl,
.setattr = v9fs_vfs_setattr_dotl,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = v9fs_listxattr,
.get_acl = v9fs_iop_get_acl,
};
const struct inode_operations v9fs_file_inode_operations_dotl = {
.getattr = v9fs_vfs_getattr_dotl,
.setattr = v9fs_vfs_setattr_dotl,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = v9fs_listxattr,
.get_acl = v9fs_iop_get_acl,
};
const struct inode_operations v9fs_symlink_inode_operations_dotl = {
.readlink = generic_readlink,
.follow_link = v9fs_vfs_follow_link_dotl,
.put_link = v9fs_vfs_put_link,
.getattr = v9fs_vfs_getattr_dotl,
.setattr = v9fs_vfs_setattr_dotl,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = v9fs_listxattr,
};
| gpl-2.0 |
RyanMallon/linux-ep93xx | arch/arm/mm/fsr-2level.c | 4330 | 3931 | static struct fsr_info fsr_info[] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts. ARMv5
* defines these to be "precise" aborts.
*/
{ do_bad, SIGSEGV, 0, "vector exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGKILL, 0, "terminal exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
/*
* The following are "imprecise" aborts, which are signalled by bit
* 10 of the FSR, and may not be recoverable. These are only
* supported if the CPU abort handler supports bit 10.
*/
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" },
};
static struct fsr_info ifsr_info[] = {
{ do_bad, SIGBUS, 0, "unknown 0" },
{ do_bad, SIGBUS, 0, "unknown 1" },
{ do_bad, SIGBUS, 0, "debug event" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
{ do_bad, SIGBUS, 0, "unknown 4" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "unknown 10" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "unknown 20" },
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, 0, "unknown 22" },
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "unknown 24" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" },
};
| gpl-2.0 |
javelinanddart/ElementalX-m9 | drivers/ide/qd65xx.c | 4842 | 11149 | /*
* Copyright (C) 1996-2001 Linus Torvalds & author (see below)
*/
/*
* Version 0.03 Cleaned auto-tune, added probe
* Version 0.04 Added second channel tuning
* Version 0.05 Enhanced tuning ; added qd6500 support
* Version 0.06 Added dos driver's list
* Version 0.07 Second channel bug fix
*
* QDI QD6500/QD6580 EIDE controller fast support
*
* To activate controller support, use "ide0=qd65xx"
*/
/*
* Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
* Samuel Thibault <samuel.thibault@ens-lyon.org>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/blkdev.h>
#include <linux/ide.h>
#include <linux/init.h>
#include <asm/io.h>
#define DRV_NAME "qd65xx"
#include "qd65xx.h"
/*
* I/O ports are 0x30-0x31 (and 0x32-0x33 for qd6580)
* or 0xb0-0xb1 (and 0xb2-0xb3 for qd6580)
* -- qd6500 is a single IDE interface
* -- qd6580 is a dual IDE interface
*
* More research on qd6580 being done by willmore@cig.mot.com (David)
* More Information given by Petr Soucek (petr@ryston.cz)
* http://www.ryston.cz/petr/vlb
*/
/*
* base: Timer1
*
*
* base+0x01: Config (R/O)
*
* bit 0: ide baseport: 1 = 0x1f0 ; 0 = 0x170 (only useful for qd6500)
* bit 1: qd65xx baseport: 1 = 0xb0 ; 0 = 0x30
* bit 2: ID3: bus speed: 1 = <=33MHz ; 0 = >33MHz
* bit 3: qd6500: 1 = disabled, 0 = enabled
* qd6580: 1
* upper nibble:
* qd6500: 1100
* qd6580: either 1010 or 0101
*
*
* base+0x02: Timer2 (qd6580 only)
*
*
* base+0x03: Control (qd6580 only)
*
* bits 0-3 must always be set 1
* bit 4 must be set 1, but is set 0 by dos driver while measuring vlb clock
* bit 0 : 1 = Only primary port enabled : channel 0 for hda, channel 1 for hdb
* 0 = Primary and Secondary ports enabled : channel 0 for hda & hdb
* channel 1 for hdc & hdd
* bit 1 : 1 = only disks on primary port
* 0 = disks & ATAPI devices on primary port
* bit 2-4 : always 0
* bit 5 : status, but of what ?
* bit 6 : always set 1 by dos driver
* bit 7 : set 1 for non-ATAPI devices on primary port
* (maybe read-ahead and post-write buffer ?)
*/
static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */
/*
* qd65xx_select:
*
* This routine is invoked to prepare for access to a given drive.
*/
static void qd65xx_dev_select(ide_drive_t *drive)
{
u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) |
(QD_TIMREG(drive) & 0x02);
if (timings[index] != QD_TIMING(drive))
outb(timings[index] = QD_TIMING(drive), QD_TIMREG(drive));
outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr);
}
/*
* qd6500_compute_timing
*
* computes the timing value where
* lower nibble represents active time, in count of VLB clocks
* upper nibble represents recovery time, in count of VLB clocks
*/
static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time)
{
int clk = ide_vlb_clk ? ide_vlb_clk : 50;
u8 act_cyc, rec_cyc;
if (clk <= 33) {
act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9);
rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15);
} else {
act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8);
rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18);
}
return (rec_cyc << 4) | 0x08 | act_cyc;
}
/*
* qd6580_compute_timing
*
* idem for qd6580
*/
static u8 qd6580_compute_timing (int active_time, int recovery_time)
{
int clk = ide_vlb_clk ? ide_vlb_clk : 50;
u8 act_cyc, rec_cyc;
act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17);
rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15);
return (rec_cyc << 4) | act_cyc;
}
/*
* qd_find_disk_type
*
* tries to find timing from dos driver's table
*/
static int qd_find_disk_type (ide_drive_t *drive,
int *active_time, int *recovery_time)
{
struct qd65xx_timing_s *p;
char *m = (char *)&drive->id[ATA_ID_PROD];
char model[ATA_ID_PROD_LEN];
if (*m == 0)
return 0;
strncpy(model, m, ATA_ID_PROD_LEN);
ide_fixstring(model, ATA_ID_PROD_LEN, 1); /* byte-swap */
for (p = qd65xx_timing ; p->offset != -1 ; p++) {
if (!strncmp(p->model, model+p->offset, 4)) {
printk(KERN_DEBUG "%s: listed !\n", drive->name);
*active_time = p->active;
*recovery_time = p->recovery;
return 1;
}
}
return 0;
}
/*
* qd_set_timing:
*
* records the timing
*/
static void qd_set_timing (ide_drive_t *drive, u8 timing)
{
unsigned long data = (unsigned long)ide_get_drivedata(drive);
data &= 0xff00;
data |= timing;
ide_set_drivedata(drive, (void *)data);
printk(KERN_DEBUG "%s: %#x\n", drive->name, timing);
}
static void qd6500_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
u16 *id = drive->id;
int active_time = 175;
int recovery_time = 415; /* worst case values from the dos driver */
/* FIXME: use drive->pio_mode value */
if (!qd_find_disk_type(drive, &active_time, &recovery_time) &&
(id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) &&
id[ATA_ID_EIDE_PIO] >= 240) {
printk(KERN_INFO "%s: PIO mode%d\n", drive->name,
id[ATA_ID_OLD_PIO_MODES] & 0xff);
active_time = 110;
recovery_time = drive->id[ATA_ID_EIDE_PIO] - 120;
}
qd_set_timing(drive, qd6500_compute_timing(drive->hwif,
active_time, recovery_time));
}
static void qd6580_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
const u8 pio = drive->pio_mode - XFER_PIO_0;
struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
unsigned int cycle_time;
int active_time = 175;
int recovery_time = 415; /* worst case values from the dos driver */
u8 base = (hwif->config_data & 0xff00) >> 8;
if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) {
cycle_time = ide_pio_cycle_time(drive, pio);
switch (pio) {
case 0: break;
case 3:
if (cycle_time >= 110) {
active_time = 86;
recovery_time = cycle_time - 102;
} else
printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name);
break;
case 4:
if (cycle_time >= 69) {
active_time = 70;
recovery_time = cycle_time - 61;
} else
printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name);
break;
default:
if (cycle_time >= 180) {
active_time = 110;
recovery_time = cycle_time - 120;
} else {
active_time = t->active;
recovery_time = cycle_time - active_time;
}
}
printk(KERN_INFO "%s: PIO mode%d\n", drive->name,pio);
}
if (!hwif->channel && drive->media != ide_disk) {
outb(0x5f, QD_CONTROL_PORT);
printk(KERN_WARNING "%s: ATAPI: disabled read-ahead FIFO "
"and post-write buffer on %s.\n",
drive->name, hwif->name);
}
qd_set_timing(drive, qd6580_compute_timing(active_time, recovery_time));
}
/*
* qd_testreg
*
* tests if the given port is a register
*/
static int __init qd_testreg(int port)
{
unsigned long flags;
u8 savereg, readreg;
local_irq_save(flags);
savereg = inb_p(port);
outb_p(QD_TESTVAL, port); /* safe value */
readreg = inb_p(port);
outb(savereg, port);
local_irq_restore(flags);
if (savereg == QD_TESTVAL) {
printk(KERN_ERR "Outch ! the probe for qd65xx isn't reliable !\n");
printk(KERN_ERR "Please contact maintainers to tell about your hardware\n");
printk(KERN_ERR "Assuming qd65xx is not present.\n");
return 1;
}
return (readreg != QD_TESTVAL);
}
static void __init qd6500_init_dev(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
u8 base = (hwif->config_data & 0xff00) >> 8;
u8 config = QD_CONFIG(hwif);
ide_set_drivedata(drive, (void *)QD6500_DEF_DATA);
}
static void __init qd6580_init_dev(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
u16 t1, t2;
u8 base = (hwif->config_data & 0xff00) >> 8;
u8 config = QD_CONFIG(hwif);
if (hwif->host_flags & IDE_HFLAG_SINGLE) {
t1 = QD6580_DEF_DATA;
t2 = QD6580_DEF_DATA2;
} else
t2 = t1 = hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA;
ide_set_drivedata(drive, (void *)((drive->dn & 1) ? t2 : t1));
}
static const struct ide_tp_ops qd65xx_tp_ops = {
.exec_command = ide_exec_command,
.read_status = ide_read_status,
.read_altstatus = ide_read_altstatus,
.write_devctl = ide_write_devctl,
.dev_select = qd65xx_dev_select,
.tf_load = ide_tf_load,
.tf_read = ide_tf_read,
.input_data = ide_input_data,
.output_data = ide_output_data,
};
static const struct ide_port_ops qd6500_port_ops = {
.init_dev = qd6500_init_dev,
.set_pio_mode = qd6500_set_pio_mode,
};
static const struct ide_port_ops qd6580_port_ops = {
.init_dev = qd6580_init_dev,
.set_pio_mode = qd6580_set_pio_mode,
};
static const struct ide_port_info qd65xx_port_info __initconst = {
.name = DRV_NAME,
.tp_ops = &qd65xx_tp_ops,
.chipset = ide_qd65xx,
.host_flags = IDE_HFLAG_IO_32BIT |
IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
};
/*
* qd_probe:
*
* looks at the specified baseport, and if qd found, registers & initialises it
* return 1 if another qd may be probed
*/
static int __init qd_probe(int base)
{
int rc;
u8 config, unit, control;
struct ide_port_info d = qd65xx_port_info;
config = inb(QD_CONFIG_PORT);
if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) )
return -ENODEV;
unit = ! (config & QD_CONFIG_IDE_BASEPORT);
if (unit)
d.host_flags |= IDE_HFLAG_QD_2ND_PORT;
switch (config & 0xf0) {
case QD_CONFIG_QD6500:
if (qd_testreg(base))
return -ENODEV; /* bad register */
if (config & QD_CONFIG_DISABLED) {
printk(KERN_WARNING "qd6500 is disabled !\n");
return -ENODEV;
}
printk(KERN_NOTICE "qd6500 at %#x\n", base);
printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n",
config, QD_ID3);
d.port_ops = &qd6500_port_ops;
d.host_flags |= IDE_HFLAG_SINGLE;
break;
case QD_CONFIG_QD6580_A:
case QD_CONFIG_QD6580_B:
if (qd_testreg(base) || qd_testreg(base + 0x02))
return -ENODEV; /* bad registers */
control = inb(QD_CONTROL_PORT);
printk(KERN_NOTICE "qd6580 at %#x\n", base);
printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n",
config, control, QD_ID3);
outb(QD_DEF_CONTR, QD_CONTROL_PORT);
d.port_ops = &qd6580_port_ops;
if (control & QD_CONTR_SEC_DISABLED)
d.host_flags |= IDE_HFLAG_SINGLE;
printk(KERN_INFO "qd6580: %s IDE board\n",
(control & QD_CONTR_SEC_DISABLED) ? "single" : "dual");
break;
default:
return -ENODEV;
}
rc = ide_legacy_device_add(&d, (base << 8) | config);
if (d.host_flags & IDE_HFLAG_SINGLE)
return (rc == 0) ? 1 : rc;
return rc;
}
static bool probe_qd65xx;
module_param_named(probe, probe_qd65xx, bool, 0);
MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
static int __init qd65xx_init(void)
{
int rc1, rc2 = -ENODEV;
if (probe_qd65xx == 0)
return -ENODEV;
rc1 = qd_probe(0x30);
if (rc1)
rc2 = qd_probe(0xb0);
if (rc1 < 0 && rc2 < 0)
return -ENODEV;
return 0;
}
module_init(qd65xx_init);
MODULE_AUTHOR("Samuel Thibault");
MODULE_DESCRIPTION("support of qd65xx vlb ide chipset");
MODULE_LICENSE("GPL");
| gpl-2.0 |
rwaterspf1/android_kernel_lge_hammerhead | drivers/mfd/tc6393xb.c | 7402 | 22229 | /*
* Toshiba TC6393XB SoC support
*
* Copyright(c) 2005-2006 Chris Humbert
* Copyright(c) 2005 Dirk Opfer
* Copyright(c) 2005 Ian Molton <spyro@f2s.com>
* Copyright(c) 2007 Dmitry Baryshkov
*
* Based on code written by Sharp/Lineo for 2.4 kernels
* Based on locomo.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/mfd/tc6393xb.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#define SCR_REVID 0x08 /* b Revision ID */
#define SCR_ISR 0x50 /* b Interrupt Status */
#define SCR_IMR 0x52 /* b Interrupt Mask */
#define SCR_IRR 0x54 /* b Interrupt Routing */
#define SCR_GPER 0x60 /* w GP Enable */
#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */
#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */
#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */
#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */
#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */
#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */
#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */
#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */
#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */
#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */
#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */
#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */
#define SCR_CCR 0x98 /* w Clock Control */
#define SCR_PLL2CR 0x9a /* w PLL2 Control */
#define SCR_PLL1CR 0x9c /* l PLL1 Control */
#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */
#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */
#define SCR_FER 0xe0 /* b Function Enable */
#define SCR_MCR 0xe4 /* w Mode Control */
#define SCR_CONFIG 0xfc /* b Configuration Control */
#define SCR_DEBUG 0xff /* b Debug */
#define SCR_CCR_CK32K BIT(0)
#define SCR_CCR_USBCK BIT(1)
#define SCR_CCR_UNK1 BIT(4)
#define SCR_CCR_MCLK_MASK (7 << 8)
#define SCR_CCR_MCLK_OFF (0 << 8)
#define SCR_CCR_MCLK_12 (1 << 8)
#define SCR_CCR_MCLK_24 (2 << 8)
#define SCR_CCR_MCLK_48 (3 << 8)
#define SCR_CCR_HCLK_MASK (3 << 12)
#define SCR_CCR_HCLK_24 (0 << 12)
#define SCR_CCR_HCLK_48 (1 << 12)
#define SCR_FER_USBEN BIT(0) /* USB host enable */
#define SCR_FER_LCDCVEN BIT(1) /* polysilicon TFT enable */
#define SCR_FER_SLCDEN BIT(2) /* SLCD enable */
#define SCR_MCR_RDY_MASK (3 << 0)
#define SCR_MCR_RDY_OPENDRAIN (0 << 0)
#define SCR_MCR_RDY_TRISTATE (1 << 0)
#define SCR_MCR_RDY_PUSHPULL (2 << 0)
#define SCR_MCR_RDY_UNK BIT(2)
#define SCR_MCR_RDY_EN BIT(3)
#define SCR_MCR_INT_MASK (3 << 4)
#define SCR_MCR_INT_OPENDRAIN (0 << 4)
#define SCR_MCR_INT_TRISTATE (1 << 4)
#define SCR_MCR_INT_PUSHPULL (2 << 4)
#define SCR_MCR_INT_UNK BIT(6)
#define SCR_MCR_INT_EN BIT(7)
/* bits 8 - 16 are unknown */
#define TC_GPIO_BIT(i) (1 << (i & 0x7))
/*--------------------------------------------------------------------------*/
struct tc6393xb {
void __iomem *scr;
struct gpio_chip gpio;
struct clk *clk; /* 3,6 Mhz */
spinlock_t lock; /* protects RMW cycles */
struct {
u8 fer;
u16 ccr;
u8 gpi_bcr[3];
u8 gpo_dsr[3];
u8 gpo_doecr[3];
} suspend_state;
struct resource rscr;
struct resource *iomem;
int irq;
int irq_base;
};
enum {
TC6393XB_CELL_NAND,
TC6393XB_CELL_MMC,
TC6393XB_CELL_OHCI,
TC6393XB_CELL_FB,
};
/*--------------------------------------------------------------------------*/
static int tc6393xb_nand_enable(struct platform_device *nand)
{
struct platform_device *dev = to_platform_device(nand->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&tc6393xb->lock, flags);
/* SMD buffer on */
dev_dbg(&dev->dev, "SMD buffer on\n");
tmio_iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1));
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static struct resource __devinitdata tc6393xb_nand_resources[] = {
{
.start = 0x1000,
.end = 0x1007,
.flags = IORESOURCE_MEM,
},
{
.start = 0x0100,
.end = 0x01ff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TC6393_NAND,
.end = IRQ_TC6393_NAND,
.flags = IORESOURCE_IRQ,
},
};
static struct resource tc6393xb_mmc_resources[] = {
{
.start = 0x800,
.end = 0x9ff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TC6393_MMC,
.end = IRQ_TC6393_MMC,
.flags = IORESOURCE_IRQ,
},
};
static const struct resource tc6393xb_ohci_resources[] = {
{
.start = 0x3000,
.end = 0x31ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x0300,
.end = 0x03ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x010000,
.end = 0x017fff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x018000,
.end = 0x01ffff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TC6393_OHCI,
.end = IRQ_TC6393_OHCI,
.flags = IORESOURCE_IRQ,
},
};
static struct resource __devinitdata tc6393xb_fb_resources[] = {
{
.start = 0x5000,
.end = 0x51ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x0500,
.end = 0x05ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x100000,
.end = 0x1fffff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TC6393_FB,
.end = IRQ_TC6393_FB,
.flags = IORESOURCE_IRQ,
},
};
static int tc6393xb_ohci_enable(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
unsigned long flags;
u16 ccr;
u8 fer;
spin_lock_irqsave(&tc6393xb->lock, flags);
ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
ccr |= SCR_CCR_USBCK;
tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
fer = tmio_ioread8(tc6393xb->scr + SCR_FER);
fer |= SCR_FER_USBEN;
tmio_iowrite8(fer, tc6393xb->scr + SCR_FER);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static int tc6393xb_ohci_disable(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
unsigned long flags;
u16 ccr;
u8 fer;
spin_lock_irqsave(&tc6393xb->lock, flags);
fer = tmio_ioread8(tc6393xb->scr + SCR_FER);
fer &= ~SCR_FER_USBEN;
tmio_iowrite8(fer, tc6393xb->scr + SCR_FER);
ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
ccr &= ~SCR_CCR_USBCK;
tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static int tc6393xb_fb_enable(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
unsigned long flags;
u16 ccr;
spin_lock_irqsave(&tc6393xb->lock, flags);
ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
ccr &= ~SCR_CCR_MCLK_MASK;
ccr |= SCR_CCR_MCLK_48;
tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static int tc6393xb_fb_disable(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
unsigned long flags;
u16 ccr;
spin_lock_irqsave(&tc6393xb->lock, flags);
ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
ccr &= ~SCR_CCR_MCLK_MASK;
ccr |= SCR_CCR_MCLK_OFF;
tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
int tc6393xb_lcd_set_power(struct platform_device *fb, bool on)
{
struct platform_device *dev = to_platform_device(fb->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
u8 fer;
unsigned long flags;
spin_lock_irqsave(&tc6393xb->lock, flags);
fer = ioread8(tc6393xb->scr + SCR_FER);
if (on)
fer |= SCR_FER_SLCDEN;
else
fer &= ~SCR_FER_SLCDEN;
iowrite8(fer, tc6393xb->scr + SCR_FER);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
EXPORT_SYMBOL(tc6393xb_lcd_set_power);
int tc6393xb_lcd_mode(struct platform_device *fb,
const struct fb_videomode *mode) {
struct platform_device *dev = to_platform_device(fb->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&tc6393xb->lock, flags);
iowrite16(mode->pixclock, tc6393xb->scr + SCR_PLL1CR + 0);
iowrite16(mode->pixclock >> 16, tc6393xb->scr + SCR_PLL1CR + 2);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
EXPORT_SYMBOL(tc6393xb_lcd_mode);
static int tc6393xb_mmc_enable(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
tmio_core_mmc_enable(tc6393xb->scr + 0x200, 0,
tc6393xb_mmc_resources[0].start & 0xfffe);
return 0;
}
static int tc6393xb_mmc_resume(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
tmio_core_mmc_resume(tc6393xb->scr + 0x200, 0,
tc6393xb_mmc_resources[0].start & 0xfffe);
return 0;
}
static void tc6393xb_mmc_pwr(struct platform_device *mmc, int state)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
tmio_core_mmc_pwr(tc6393xb->scr + 0x200, 0, state);
}
static void tc6393xb_mmc_clk_div(struct platform_device *mmc, int state)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
tmio_core_mmc_clk_div(tc6393xb->scr + 0x200, 0, state);
}
static struct tmio_mmc_data tc6393xb_mmc_data = {
.hclk = 24000000,
.set_pwr = tc6393xb_mmc_pwr,
.set_clk_div = tc6393xb_mmc_clk_div,
};
static struct mfd_cell __devinitdata tc6393xb_cells[] = {
[TC6393XB_CELL_NAND] = {
.name = "tmio-nand",
.enable = tc6393xb_nand_enable,
.num_resources = ARRAY_SIZE(tc6393xb_nand_resources),
.resources = tc6393xb_nand_resources,
},
[TC6393XB_CELL_MMC] = {
.name = "tmio-mmc",
.enable = tc6393xb_mmc_enable,
.resume = tc6393xb_mmc_resume,
.platform_data = &tc6393xb_mmc_data,
.pdata_size = sizeof(tc6393xb_mmc_data),
.num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
.resources = tc6393xb_mmc_resources,
},
[TC6393XB_CELL_OHCI] = {
.name = "tmio-ohci",
.num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
.resources = tc6393xb_ohci_resources,
.enable = tc6393xb_ohci_enable,
.suspend = tc6393xb_ohci_disable,
.resume = tc6393xb_ohci_enable,
.disable = tc6393xb_ohci_disable,
},
[TC6393XB_CELL_FB] = {
.name = "tmio-fb",
.num_resources = ARRAY_SIZE(tc6393xb_fb_resources),
.resources = tc6393xb_fb_resources,
.enable = tc6393xb_fb_enable,
.suspend = tc6393xb_fb_disable,
.resume = tc6393xb_fb_enable,
.disable = tc6393xb_fb_disable,
},
};
/*--------------------------------------------------------------------------*/
static int tc6393xb_gpio_get(struct gpio_chip *chip,
unsigned offset)
{
struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
/* XXX: does dsr also represent inputs? */
return tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8))
& TC_GPIO_BIT(offset);
}
static void __tc6393xb_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
u8 dsr;
dsr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8));
if (value)
dsr |= TC_GPIO_BIT(offset);
else
dsr &= ~TC_GPIO_BIT(offset);
tmio_iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8));
}
static void tc6393xb_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
unsigned long flags;
spin_lock_irqsave(&tc6393xb->lock, flags);
__tc6393xb_gpio_set(chip, offset, value);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
}
static int tc6393xb_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
unsigned long flags;
u8 doecr;
spin_lock_irqsave(&tc6393xb->lock, flags);
doecr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
doecr &= ~TC_GPIO_BIT(offset);
tmio_iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
unsigned long flags;
u8 doecr;
spin_lock_irqsave(&tc6393xb->lock, flags);
__tc6393xb_gpio_set(chip, offset, value);
doecr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
doecr |= TC_GPIO_BIT(offset);
tmio_iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
spin_unlock_irqrestore(&tc6393xb->lock, flags);
return 0;
}
static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
{
tc6393xb->gpio.label = "tc6393xb";
tc6393xb->gpio.base = gpio_base;
tc6393xb->gpio.ngpio = 16;
tc6393xb->gpio.set = tc6393xb_gpio_set;
tc6393xb->gpio.get = tc6393xb_gpio_get;
tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input;
tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output;
return gpiochip_add(&tc6393xb->gpio);
}
/*--------------------------------------------------------------------------*/
static void
tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
{
struct tc6393xb *tc6393xb = irq_get_handler_data(irq);
unsigned int isr;
unsigned int i, irq_base;
irq_base = tc6393xb->irq_base;
while ((isr = tmio_ioread8(tc6393xb->scr + SCR_ISR) &
~tmio_ioread8(tc6393xb->scr + SCR_IMR)))
for (i = 0; i < TC6393XB_NR_IRQS; i++) {
if (isr & (1 << i))
generic_handle_irq(irq_base + i);
}
}
static void tc6393xb_irq_ack(struct irq_data *data)
{
}
static void tc6393xb_irq_mask(struct irq_data *data)
{
struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
unsigned long flags;
u8 imr;
spin_lock_irqsave(&tc6393xb->lock, flags);
imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
imr |= 1 << (data->irq - tc6393xb->irq_base);
tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
}
static void tc6393xb_irq_unmask(struct irq_data *data)
{
struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
unsigned long flags;
u8 imr;
spin_lock_irqsave(&tc6393xb->lock, flags);
imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
imr &= ~(1 << (data->irq - tc6393xb->irq_base));
tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
}
static struct irq_chip tc6393xb_chip = {
.name = "tc6393xb",
.irq_ack = tc6393xb_irq_ack,
.irq_mask = tc6393xb_irq_mask,
.irq_unmask = tc6393xb_irq_unmask,
};
static void tc6393xb_attach_irq(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
unsigned int irq, irq_base;
irq_base = tc6393xb->irq_base;
for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq);
irq_set_chip_data(irq, tc6393xb);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
irq_set_handler_data(tc6393xb->irq, tc6393xb);
irq_set_chained_handler(tc6393xb->irq, tc6393xb_irq);
}
static void tc6393xb_detach_irq(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
unsigned int irq, irq_base;
irq_set_chained_handler(tc6393xb->irq, NULL);
irq_set_handler_data(tc6393xb->irq, NULL);
irq_base = tc6393xb->irq_base;
for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
set_irq_flags(irq, 0);
irq_set_chip(irq, NULL);
irq_set_chip_data(irq, NULL);
}
}
/*--------------------------------------------------------------------------*/
static int __devinit tc6393xb_probe(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
struct tc6393xb *tc6393xb;
struct resource *iomem, *rscr;
int ret, temp;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!iomem)
return -EINVAL;
tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL);
if (!tc6393xb) {
ret = -ENOMEM;
goto err_kzalloc;
}
spin_lock_init(&tc6393xb->lock);
platform_set_drvdata(dev, tc6393xb);
ret = platform_get_irq(dev, 0);
if (ret >= 0)
tc6393xb->irq = ret;
else
goto err_noirq;
tc6393xb->iomem = iomem;
tc6393xb->irq_base = tcpd->irq_base;
tc6393xb->clk = clk_get(&dev->dev, "CLK_CK3P6MI");
if (IS_ERR(tc6393xb->clk)) {
ret = PTR_ERR(tc6393xb->clk);
goto err_clk_get;
}
rscr = &tc6393xb->rscr;
rscr->name = "tc6393xb-core";
rscr->start = iomem->start;
rscr->end = iomem->start + 0xff;
rscr->flags = IORESOURCE_MEM;
ret = request_resource(iomem, rscr);
if (ret)
goto err_request_scr;
tc6393xb->scr = ioremap(rscr->start, resource_size(rscr));
if (!tc6393xb->scr) {
ret = -ENOMEM;
goto err_ioremap;
}
ret = clk_enable(tc6393xb->clk);
if (ret)
goto err_clk_enable;
ret = tcpd->enable(dev);
if (ret)
goto err_enable;
iowrite8(0, tc6393xb->scr + SCR_FER);
iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR);
iowrite16(SCR_CCR_UNK1 | SCR_CCR_HCLK_48,
tc6393xb->scr + SCR_CCR);
iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
BIT(15), tc6393xb->scr + SCR_MCR);
iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER);
iowrite8(0, tc6393xb->scr + SCR_IRR);
iowrite8(0xbf, tc6393xb->scr + SCR_IMR);
printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n",
tmio_ioread8(tc6393xb->scr + SCR_REVID),
(unsigned long) iomem->start, tc6393xb->irq);
tc6393xb->gpio.base = -1;
if (tcpd->gpio_base >= 0) {
ret = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base);
if (ret)
goto err_gpio_add;
}
tc6393xb_attach_irq(dev);
if (tcpd->setup) {
ret = tcpd->setup(dev);
if (ret)
goto err_setup;
}
tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = tcpd->nand_data;
tc6393xb_cells[TC6393XB_CELL_NAND].pdata_size =
sizeof(*tcpd->nand_data);
tc6393xb_cells[TC6393XB_CELL_FB].platform_data = tcpd->fb_data;
tc6393xb_cells[TC6393XB_CELL_FB].pdata_size = sizeof(*tcpd->fb_data);
ret = mfd_add_devices(&dev->dev, dev->id,
tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
iomem, tcpd->irq_base);
if (!ret)
return 0;
if (tcpd->teardown)
tcpd->teardown(dev);
err_setup:
tc6393xb_detach_irq(dev);
err_gpio_add:
if (tc6393xb->gpio.base != -1)
temp = gpiochip_remove(&tc6393xb->gpio);
tcpd->disable(dev);
err_enable:
clk_disable(tc6393xb->clk);
err_clk_enable:
iounmap(tc6393xb->scr);
err_ioremap:
release_resource(&tc6393xb->rscr);
err_request_scr:
clk_put(tc6393xb->clk);
err_noirq:
err_clk_get:
kfree(tc6393xb);
err_kzalloc:
return ret;
}
static int __devexit tc6393xb_remove(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
int ret;
mfd_remove_devices(&dev->dev);
if (tcpd->teardown)
tcpd->teardown(dev);
tc6393xb_detach_irq(dev);
if (tc6393xb->gpio.base != -1) {
ret = gpiochip_remove(&tc6393xb->gpio);
if (ret) {
dev_err(&dev->dev, "Can't remove gpio chip: %d\n", ret);
return ret;
}
}
ret = tcpd->disable(dev);
clk_disable(tc6393xb->clk);
iounmap(tc6393xb->scr);
release_resource(&tc6393xb->rscr);
platform_set_drvdata(dev, NULL);
clk_put(tc6393xb->clk);
kfree(tc6393xb);
return ret;
}
#ifdef CONFIG_PM
static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
{
struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
int i, ret;
tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR);
tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER);
for (i = 0; i < 3; i++) {
tc6393xb->suspend_state.gpo_dsr[i] =
ioread8(tc6393xb->scr + SCR_GPO_DSR(i));
tc6393xb->suspend_state.gpo_doecr[i] =
ioread8(tc6393xb->scr + SCR_GPO_DOECR(i));
tc6393xb->suspend_state.gpi_bcr[i] =
ioread8(tc6393xb->scr + SCR_GPI_BCR(i));
}
ret = tcpd->suspend(dev);
clk_disable(tc6393xb->clk);
return ret;
}
static int tc6393xb_resume(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
int ret;
int i;
clk_enable(tc6393xb->clk);
ret = tcpd->resume(dev);
if (ret)
return ret;
if (!tcpd->resume_restore)
return 0;
iowrite8(tc6393xb->suspend_state.fer, tc6393xb->scr + SCR_FER);
iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR);
iowrite16(tc6393xb->suspend_state.ccr, tc6393xb->scr + SCR_CCR);
iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
BIT(15), tc6393xb->scr + SCR_MCR);
iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER);
iowrite8(0, tc6393xb->scr + SCR_IRR);
iowrite8(0xbf, tc6393xb->scr + SCR_IMR);
for (i = 0; i < 3; i++) {
iowrite8(tc6393xb->suspend_state.gpo_dsr[i],
tc6393xb->scr + SCR_GPO_DSR(i));
iowrite8(tc6393xb->suspend_state.gpo_doecr[i],
tc6393xb->scr + SCR_GPO_DOECR(i));
iowrite8(tc6393xb->suspend_state.gpi_bcr[i],
tc6393xb->scr + SCR_GPI_BCR(i));
}
return 0;
}
#else
#define tc6393xb_suspend NULL
#define tc6393xb_resume NULL
#endif
static struct platform_driver tc6393xb_driver = {
.probe = tc6393xb_probe,
.remove = __devexit_p(tc6393xb_remove),
.suspend = tc6393xb_suspend,
.resume = tc6393xb_resume,
.driver = {
.name = "tc6393xb",
.owner = THIS_MODULE,
},
};
static int __init tc6393xb_init(void)
{
return platform_driver_register(&tc6393xb_driver);
}
static void __exit tc6393xb_exit(void)
{
platform_driver_unregister(&tc6393xb_driver);
}
subsys_initcall(tc6393xb_init);
module_exit(tc6393xb_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
MODULE_ALIAS("platform:tc6393xb");
| gpl-2.0 |
sakindia123/kernel_3.4_samsung_exynos4 | arch/sh/kernel/cpu/sh2a/opcode_helper.c | 9194 | 1548 | /*
* arch/sh/kernel/cpu/sh2a/opcode_helper.c
*
* Helper for the SH-2A 32-bit opcodes.
*
* Copyright (C) 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
/*
* Instructions on SH are generally fixed at 16-bits, however, SH-2A
* introduces some 32-bit instructions. Since there are no real
* constraints on their use (and they can be mixed and matched), we need
* to check the instruction encoding to work out if it's a true 32-bit
* instruction or not.
*
* Presently, 32-bit opcodes have only slight variations in what the
* actual encoding looks like in the first-half of the instruction, which
* makes it fairly straightforward to differentiate from the 16-bit ones.
*
* First 16-bits of encoding Used by
*
* 0011nnnnmmmm0001 mov.b, mov.w, mov.l, fmov.d,
* fmov.s, movu.b, movu.w
*
* 0011nnnn0iii1001 bclr.b, bld.b, bset.b, bst.b, band.b,
* bandnot.b, bldnot.b, bor.b, bornot.b,
* bxor.b
*
* 0000nnnniiii0000 movi20
* 0000nnnniiii0001 movi20s
*/
unsigned int instruction_size(unsigned int insn)
{
/* Look for the common cases */
switch ((insn & 0xf00f)) {
case 0x0000: /* movi20 */
case 0x0001: /* movi20s */
case 0x3001: /* 32-bit mov/fmov/movu variants */
return 4;
}
/* And the special cases.. */
switch ((insn & 0xf08f)) {
case 0x3009: /* 32-bit b*.b bit operations */
return 4;
}
return 2;
}
| gpl-2.0 |
schacon/linux | sound/isa/gus/gus_mem.c | 14058 | 9900 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* GUS's memory allocation routines / bottom layer
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/slab.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/gus.h>
#include <sound/info.h>
#ifdef CONFIG_SND_DEBUG
static void snd_gf1_mem_info_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer);
#endif
void snd_gf1_mem_lock(struct snd_gf1_mem * alloc, int xup)
{
if (!xup) {
mutex_lock(&alloc->memory_mutex);
} else {
mutex_unlock(&alloc->memory_mutex);
}
}
static struct snd_gf1_mem_block *snd_gf1_mem_xalloc(struct snd_gf1_mem * alloc,
struct snd_gf1_mem_block * block)
{
struct snd_gf1_mem_block *pblock, *nblock;
nblock = kmalloc(sizeof(struct snd_gf1_mem_block), GFP_KERNEL);
if (nblock == NULL)
return NULL;
*nblock = *block;
pblock = alloc->first;
while (pblock) {
if (pblock->ptr > nblock->ptr) {
nblock->prev = pblock->prev;
nblock->next = pblock;
pblock->prev = nblock;
if (pblock == alloc->first)
alloc->first = nblock;
else
nblock->prev->next = nblock;
mutex_unlock(&alloc->memory_mutex);
return NULL;
}
pblock = pblock->next;
}
nblock->next = NULL;
if (alloc->last == NULL) {
nblock->prev = NULL;
alloc->first = alloc->last = nblock;
} else {
nblock->prev = alloc->last;
alloc->last->next = nblock;
alloc->last = nblock;
}
return nblock;
}
int snd_gf1_mem_xfree(struct snd_gf1_mem * alloc, struct snd_gf1_mem_block * block)
{
if (block->share) { /* ok.. shared block */
block->share--;
mutex_unlock(&alloc->memory_mutex);
return 0;
}
if (alloc->first == block) {
alloc->first = block->next;
if (block->next)
block->next->prev = NULL;
} else {
block->prev->next = block->next;
if (block->next)
block->next->prev = block->prev;
}
if (alloc->last == block) {
alloc->last = block->prev;
if (block->prev)
block->prev->next = NULL;
} else {
block->next->prev = block->prev;
if (block->prev)
block->prev->next = block->next;
}
kfree(block->name);
kfree(block);
return 0;
}
static struct snd_gf1_mem_block *snd_gf1_mem_look(struct snd_gf1_mem * alloc,
unsigned int address)
{
struct snd_gf1_mem_block *block;
for (block = alloc->first; block; block = block->next) {
if (block->ptr == address) {
return block;
}
}
return NULL;
}
static struct snd_gf1_mem_block *snd_gf1_mem_share(struct snd_gf1_mem * alloc,
unsigned int *share_id)
{
struct snd_gf1_mem_block *block;
if (!share_id[0] && !share_id[1] &&
!share_id[2] && !share_id[3])
return NULL;
for (block = alloc->first; block; block = block->next)
if (!memcmp(share_id, block->share_id,
sizeof(block->share_id)))
return block;
return NULL;
}
static int snd_gf1_mem_find(struct snd_gf1_mem * alloc,
struct snd_gf1_mem_block * block,
unsigned int size, int w_16, int align)
{
struct snd_gf1_bank_info *info = w_16 ? alloc->banks_16 : alloc->banks_8;
unsigned int idx, boundary;
int size1;
struct snd_gf1_mem_block *pblock;
unsigned int ptr1, ptr2;
if (w_16 && align < 2)
align = 2;
block->flags = w_16 ? SNDRV_GF1_MEM_BLOCK_16BIT : 0;
block->owner = SNDRV_GF1_MEM_OWNER_DRIVER;
block->share = 0;
block->share_id[0] = block->share_id[1] =
block->share_id[2] = block->share_id[3] = 0;
block->name = NULL;
block->prev = block->next = NULL;
for (pblock = alloc->first, idx = 0; pblock; pblock = pblock->next) {
while (pblock->ptr >= (boundary = info[idx].address + info[idx].size))
idx++;
while (pblock->ptr + pblock->size >= (boundary = info[idx].address + info[idx].size))
idx++;
ptr2 = boundary;
if (pblock->next) {
if (pblock->ptr + pblock->size == pblock->next->ptr)
continue;
if (pblock->next->ptr < boundary)
ptr2 = pblock->next->ptr;
}
ptr1 = ALIGN(pblock->ptr + pblock->size, align);
if (ptr1 >= ptr2)
continue;
size1 = ptr2 - ptr1;
if ((int)size <= size1) {
block->ptr = ptr1;
block->size = size;
return 0;
}
}
while (++idx < 4) {
if (size <= info[idx].size) {
/* I assume that bank address is already aligned.. */
block->ptr = info[idx].address;
block->size = size;
return 0;
}
}
return -ENOMEM;
}
struct snd_gf1_mem_block *snd_gf1_mem_alloc(struct snd_gf1_mem * alloc, int owner,
char *name, int size, int w_16, int align,
unsigned int *share_id)
{
struct snd_gf1_mem_block block, *nblock;
snd_gf1_mem_lock(alloc, 0);
if (share_id != NULL) {
nblock = snd_gf1_mem_share(alloc, share_id);
if (nblock != NULL) {
if (size != (int)nblock->size) {
/* TODO: remove in the future */
snd_printk(KERN_ERR "snd_gf1_mem_alloc - share: sizes differ\n");
goto __std;
}
nblock->share++;
snd_gf1_mem_lock(alloc, 1);
return NULL;
}
}
__std:
if (snd_gf1_mem_find(alloc, &block, size, w_16, align) < 0) {
snd_gf1_mem_lock(alloc, 1);
return NULL;
}
if (share_id != NULL)
memcpy(&block.share_id, share_id, sizeof(block.share_id));
block.owner = owner;
block.name = kstrdup(name, GFP_KERNEL);
nblock = snd_gf1_mem_xalloc(alloc, &block);
snd_gf1_mem_lock(alloc, 1);
return nblock;
}
int snd_gf1_mem_free(struct snd_gf1_mem * alloc, unsigned int address)
{
int result;
struct snd_gf1_mem_block *block;
snd_gf1_mem_lock(alloc, 0);
if ((block = snd_gf1_mem_look(alloc, address)) != NULL) {
result = snd_gf1_mem_xfree(alloc, block);
snd_gf1_mem_lock(alloc, 1);
return result;
}
snd_gf1_mem_lock(alloc, 1);
return -EINVAL;
}
int snd_gf1_mem_init(struct snd_gus_card * gus)
{
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block block;
#ifdef CONFIG_SND_DEBUG
struct snd_info_entry *entry;
#endif
alloc = &gus->gf1.mem_alloc;
mutex_init(&alloc->memory_mutex);
alloc->first = alloc->last = NULL;
if (!gus->gf1.memory)
return 0;
memset(&block, 0, sizeof(block));
block.owner = SNDRV_GF1_MEM_OWNER_DRIVER;
if (gus->gf1.enh_mode) {
block.ptr = 0;
block.size = 1024;
block.name = kstrdup("InterWave LFOs", GFP_KERNEL);
if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
return -ENOMEM;
}
block.ptr = gus->gf1.default_voice_address;
block.size = 4;
block.name = kstrdup("Voice default (NULL's)", GFP_KERNEL);
if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
return -ENOMEM;
#ifdef CONFIG_SND_DEBUG
if (! snd_card_proc_new(gus->card, "gusmem", &entry))
snd_info_set_text_ops(entry, gus, snd_gf1_mem_info_read);
#endif
return 0;
}
int snd_gf1_mem_done(struct snd_gus_card * gus)
{
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block *block, *nblock;
alloc = &gus->gf1.mem_alloc;
block = alloc->first;
while (block) {
nblock = block->next;
snd_gf1_mem_xfree(alloc, block);
block = nblock;
}
return 0;
}
#ifdef CONFIG_SND_DEBUG
static void snd_gf1_mem_info_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_gus_card *gus;
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block *block;
unsigned int total, used;
int i;
gus = entry->private_data;
alloc = &gus->gf1.mem_alloc;
mutex_lock(&alloc->memory_mutex);
snd_iprintf(buffer, "8-bit banks : \n ");
for (i = 0; i < 4; i++)
snd_iprintf(buffer, "0x%06x (%04ik)%s", alloc->banks_8[i].address, alloc->banks_8[i].size >> 10, i + 1 < 4 ? "," : "");
snd_iprintf(buffer, "\n"
"16-bit banks : \n ");
for (i = total = 0; i < 4; i++) {
snd_iprintf(buffer, "0x%06x (%04ik)%s", alloc->banks_16[i].address, alloc->banks_16[i].size >> 10, i + 1 < 4 ? "," : "");
total += alloc->banks_16[i].size;
}
snd_iprintf(buffer, "\n");
used = 0;
for (block = alloc->first, i = 0; block; block = block->next, i++) {
used += block->size;
snd_iprintf(buffer, "Block %i at 0x%lx onboard 0x%x size %i (0x%x):\n", i, (long) block, block->ptr, block->size, block->size);
if (block->share ||
block->share_id[0] || block->share_id[1] ||
block->share_id[2] || block->share_id[3])
snd_iprintf(buffer, " Share : %i [id0 0x%x] [id1 0x%x] [id2 0x%x] [id3 0x%x]\n",
block->share,
block->share_id[0], block->share_id[1],
block->share_id[2], block->share_id[3]);
snd_iprintf(buffer, " Flags :%s\n",
block->flags & SNDRV_GF1_MEM_BLOCK_16BIT ? " 16-bit" : "");
snd_iprintf(buffer, " Owner : ");
switch (block->owner) {
case SNDRV_GF1_MEM_OWNER_DRIVER:
snd_iprintf(buffer, "driver - %s\n", block->name);
break;
case SNDRV_GF1_MEM_OWNER_WAVE_SIMPLE:
snd_iprintf(buffer, "SIMPLE wave\n");
break;
case SNDRV_GF1_MEM_OWNER_WAVE_GF1:
snd_iprintf(buffer, "GF1 wave\n");
break;
case SNDRV_GF1_MEM_OWNER_WAVE_IWFFFF:
snd_iprintf(buffer, "IWFFFF wave\n");
break;
default:
snd_iprintf(buffer, "unknown\n");
}
}
snd_iprintf(buffer, " Total: memory = %i, used = %i, free = %i\n",
total, used, total - used);
mutex_unlock(&alloc->memory_mutex);
#if 0
ultra_iprintf(buffer, " Verify: free = %i, max 8-bit block = %i, max 16-bit block = %i\n",
ultra_memory_free_size(card, &card->gf1.mem_alloc),
ultra_memory_free_block(card, &card->gf1.mem_alloc, 0),
ultra_memory_free_block(card, &card->gf1.mem_alloc, 1));
#endif
}
#endif
| gpl-2.0 |
streamagame/streamagame_kernel | drivers/power/ipaq_micro_battery.c | 235 | 7848 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* h3xxx atmel micro companion support, battery subdevice
* based on previous kernel 2.4 version
* Author : Alessandro Gardich <gremlin@gremlin.it>
* Author : Linus Walleij <linus.walleij@linaro.org>
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mfd/ipaq-micro.h>
#include <linux/power_supply.h>
#include <linux/workqueue.h>
#define BATT_PERIOD 100000 /* 100 seconds in milliseconds */
#define MICRO_BATT_CHEM_ALKALINE 0x01
#define MICRO_BATT_CHEM_NICD 0x02
#define MICRO_BATT_CHEM_NIMH 0x03
#define MICRO_BATT_CHEM_LION 0x04
#define MICRO_BATT_CHEM_LIPOLY 0x05
#define MICRO_BATT_CHEM_NOT_INSTALLED 0x06
#define MICRO_BATT_CHEM_UNKNOWN 0xff
#define MICRO_BATT_STATUS_HIGH 0x01
#define MICRO_BATT_STATUS_LOW 0x02
#define MICRO_BATT_STATUS_CRITICAL 0x04
#define MICRO_BATT_STATUS_CHARGING 0x08
#define MICRO_BATT_STATUS_CHARGEMAIN 0x10
#define MICRO_BATT_STATUS_DEAD 0x20 /* Battery will not charge */
#define MICRO_BATT_STATUS_NOTINSTALLED 0x20 /* For expansion pack batteries */
#define MICRO_BATT_STATUS_FULL 0x40 /* Battery fully charged */
#define MICRO_BATT_STATUS_NOBATTERY 0x80
#define MICRO_BATT_STATUS_UNKNOWN 0xff
struct micro_battery {
struct ipaq_micro *micro;
struct workqueue_struct *wq;
struct delayed_work update;
u8 ac;
u8 chemistry;
unsigned int voltage;
u16 temperature;
u8 flag;
};
static void micro_battery_work(struct work_struct *work)
{
struct micro_battery *mb = container_of(work,
struct micro_battery, update.work);
struct ipaq_micro_msg msg_battery = {
.id = MSG_BATTERY,
};
struct ipaq_micro_msg msg_sensor = {
.id = MSG_THERMAL_SENSOR,
};
/* First send battery message */
ipaq_micro_tx_msg_sync(mb->micro, &msg_battery);
if (msg_battery.rx_len < 4)
pr_info("ERROR");
/*
* Returned message format:
* byte 0: 0x00 = Not plugged in
* 0x01 = AC adapter plugged in
* byte 1: chemistry
* byte 2: voltage LSB
* byte 3: voltage MSB
* byte 4: flags
* byte 5-9: same for battery 2
*/
mb->ac = msg_battery.rx_data[0];
mb->chemistry = msg_battery.rx_data[1];
mb->voltage = ((((unsigned short)msg_battery.rx_data[3] << 8) +
msg_battery.rx_data[2]) * 5000L) * 1000 / 1024;
mb->flag = msg_battery.rx_data[4];
if (msg_battery.rx_len == 9)
pr_debug("second battery ignored\n");
/* Then read the sensor */
ipaq_micro_tx_msg_sync(mb->micro, &msg_sensor);
mb->temperature = msg_sensor.rx_data[1] << 8 | msg_sensor.rx_data[0];
queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD));
}
static int get_capacity(struct power_supply *b)
{
struct micro_battery *mb = dev_get_drvdata(b->dev->parent);
switch (mb->flag & 0x07) {
case MICRO_BATT_STATUS_HIGH:
return 100;
break;
case MICRO_BATT_STATUS_LOW:
return 50;
break;
case MICRO_BATT_STATUS_CRITICAL:
return 5;
break;
default:
break;
}
return 0;
}
static int get_status(struct power_supply *b)
{
struct micro_battery *mb = dev_get_drvdata(b->dev->parent);
if (mb->flag == MICRO_BATT_STATUS_UNKNOWN)
return POWER_SUPPLY_STATUS_UNKNOWN;
if (mb->flag & MICRO_BATT_STATUS_FULL)
return POWER_SUPPLY_STATUS_FULL;
if ((mb->flag & MICRO_BATT_STATUS_CHARGING) ||
(mb->flag & MICRO_BATT_STATUS_CHARGEMAIN))
return POWER_SUPPLY_STATUS_CHARGING;
return POWER_SUPPLY_STATUS_DISCHARGING;
}
static int micro_batt_get_property(struct power_supply *b,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct micro_battery *mb = dev_get_drvdata(b->dev->parent);
switch (psp) {
case POWER_SUPPLY_PROP_TECHNOLOGY:
switch (mb->chemistry) {
case MICRO_BATT_CHEM_NICD:
val->intval = POWER_SUPPLY_TECHNOLOGY_NiCd;
break;
case MICRO_BATT_CHEM_NIMH:
val->intval = POWER_SUPPLY_TECHNOLOGY_NiMH;
break;
case MICRO_BATT_CHEM_LION:
val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
break;
case MICRO_BATT_CHEM_LIPOLY:
val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
break;
default:
val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
break;
};
break;
case POWER_SUPPLY_PROP_STATUS:
val->intval = get_status(b);
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
val->intval = 4700000;
break;
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = get_capacity(b);
break;
case POWER_SUPPLY_PROP_TEMP:
val->intval = mb->temperature;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = mb->voltage;
break;
default:
return -EINVAL;
};
return 0;
}
static int micro_ac_get_property(struct power_supply *b,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct micro_battery *mb = dev_get_drvdata(b->dev->parent);
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = mb->ac;
break;
default:
return -EINVAL;
};
return 0;
}
static enum power_supply_property micro_batt_power_props[] = {
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
};
static struct power_supply micro_batt_power = {
.name = "main-battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = micro_batt_power_props,
.num_properties = ARRAY_SIZE(micro_batt_power_props),
.get_property = micro_batt_get_property,
.use_for_apm = 1,
};
static enum power_supply_property micro_ac_power_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
static struct power_supply micro_ac_power = {
.name = "ac",
.type = POWER_SUPPLY_TYPE_MAINS,
.properties = micro_ac_power_props,
.num_properties = ARRAY_SIZE(micro_ac_power_props),
.get_property = micro_ac_get_property,
};
static int micro_batt_probe(struct platform_device *pdev)
{
struct micro_battery *mb;
int ret;
mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
if (!mb)
return -ENOMEM;
mb->micro = dev_get_drvdata(pdev->dev.parent);
mb->wq = create_singlethread_workqueue("ipaq-battery-wq");
if (!mb->wq)
return -ENOMEM;
INIT_DELAYED_WORK(&mb->update, micro_battery_work);
platform_set_drvdata(pdev, mb);
queue_delayed_work(mb->wq, &mb->update, 1);
ret = power_supply_register(&pdev->dev, µ_batt_power);
if (ret < 0)
goto batt_err;
ret = power_supply_register(&pdev->dev, µ_ac_power);
if (ret < 0)
goto ac_err;
dev_info(&pdev->dev, "iPAQ micro battery driver\n");
return 0;
ac_err:
power_supply_unregister(µ_ac_power);
batt_err:
cancel_delayed_work_sync(&mb->update);
destroy_workqueue(mb->wq);
return ret;
}
static int micro_batt_remove(struct platform_device *pdev)
{
struct micro_battery *mb = platform_get_drvdata(pdev);
power_supply_unregister(µ_ac_power);
power_supply_unregister(µ_batt_power);
cancel_delayed_work_sync(&mb->update);
destroy_workqueue(mb->wq);
return 0;
}
static int micro_batt_suspend(struct device *dev)
{
struct micro_battery *mb = dev_get_drvdata(dev);
cancel_delayed_work_sync(&mb->update);
return 0;
}
static int micro_batt_resume(struct device *dev)
{
struct micro_battery *mb = dev_get_drvdata(dev);
queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD));
return 0;
}
static const struct dev_pm_ops micro_batt_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(micro_batt_suspend, micro_batt_resume)
};
static struct platform_driver micro_batt_device_driver = {
.driver = {
.name = "ipaq-micro-battery",
.pm = µ_batt_dev_pm_ops,
},
.probe = micro_batt_probe,
.remove = micro_batt_remove,
};
module_platform_driver(micro_batt_device_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("driver for iPAQ Atmel micro battery");
MODULE_ALIAS("platform:battery-ipaq-micro");
| gpl-2.0 |
linuxium/rkm-kk | drivers/md/dm.c | 491 | 60653 | /*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include "dm.h"
#include "dm-uevent.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <trace/events/block.h>
#define DM_MSG_PREFIX "core"
/*
* Cookies are numeric values sent with CHANGE and REMOVE
* uevents while resuming, removing or renaming the device.
*/
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24
static const char *_name = DM_NAME;
static unsigned int major = 0;
static unsigned int _major = 0;
static DEFINE_IDR(_minor_idr);
static DEFINE_SPINLOCK(_minor_lock);
/*
* For bio-based dm.
* One of these is allocated per bio.
*/
struct dm_io {
struct mapped_device *md;
int error;
atomic_t io_count;
struct bio *bio;
unsigned long start_time;
spinlock_t endio_lock;
};
/*
* For bio-based dm.
* One of these is allocated per target within a bio. Hopefully
* this will be simplified out one day.
*/
struct dm_target_io {
struct dm_io *io;
struct dm_target *ti;
union map_info info;
};
/*
* For request-based dm.
* One of these is allocated per request.
*/
struct dm_rq_target_io {
struct mapped_device *md;
struct dm_target *ti;
struct request *orig, clone;
int error;
union map_info info;
};
/*
* For request-based dm.
* One of these is allocated per bio.
*/
struct dm_rq_clone_bio_info {
struct bio *orig;
struct dm_rq_target_io *tio;
};
union map_info *dm_get_mapinfo(struct bio *bio)
{
if (bio && bio->bi_private)
return &((struct dm_target_io *)bio->bi_private)->info;
return NULL;
}
union map_info *dm_get_rq_mapinfo(struct request *rq)
{
if (rq && rq->end_io_data)
return &((struct dm_rq_target_io *)rq->end_io_data)->info;
return NULL;
}
EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define MINOR_ALLOCED ((void *)-1)
/*
* Bits for the md->flags field.
*/
#define DMF_BLOCK_IO_FOR_SUSPEND 0
#define DMF_SUSPENDED 1
#define DMF_FROZEN 2
#define DMF_FREEING 3
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
/*
* Work processed by per-device workqueue.
*/
struct mapped_device {
struct rw_semaphore io_lock;
struct mutex suspend_lock;
rwlock_t map_lock;
atomic_t holders;
atomic_t open_count;
unsigned long flags;
struct request_queue *queue;
unsigned type;
/* Protect queue and type against concurrent access. */
struct mutex type_lock;
struct gendisk *disk;
char name[16];
void *interface_ptr;
/*
* A list of ios that arrived while we were suspended.
*/
atomic_t pending[2];
wait_queue_head_t wait;
struct work_struct work;
struct bio_list deferred;
spinlock_t deferred_lock;
/*
* Processing queue (flush)
*/
struct workqueue_struct *wq;
/*
* The current mapping.
*/
struct dm_table *map;
/*
* io objects are allocated from here.
*/
mempool_t *io_pool;
mempool_t *tio_pool;
struct bio_set *bs;
/*
* Event handling.
*/
atomic_t event_nr;
wait_queue_head_t eventq;
atomic_t uevent_seq;
struct list_head uevent_list;
spinlock_t uevent_lock; /* Protect access to uevent_list */
/*
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
struct block_device *bdev;
/* forced geometry settings */
struct hd_geometry geometry;
/* For saving the address of __make_request for request based dm */
make_request_fn *saved_make_request_fn;
/* sysfs handle */
struct kobject kobj;
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
};
/*
* For mempools pre-allocation at the table loading time.
*/
struct dm_md_mempools {
mempool_t *io_pool;
mempool_t *tio_pool;
struct bio_set *bs;
};
#define MIN_IOS 256
static struct kmem_cache *_io_cache;
static struct kmem_cache *_tio_cache;
static struct kmem_cache *_rq_tio_cache;
static struct kmem_cache *_rq_bio_info_cache;
static int __init local_init(void)
{
int r = -ENOMEM;
/* allocate a slab for the dm_ios */
_io_cache = KMEM_CACHE(dm_io, 0);
if (!_io_cache)
return r;
/* allocate a slab for the target ios */
_tio_cache = KMEM_CACHE(dm_target_io, 0);
if (!_tio_cache)
goto out_free_io_cache;
_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
if (!_rq_tio_cache)
goto out_free_tio_cache;
_rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
if (!_rq_bio_info_cache)
goto out_free_rq_tio_cache;
r = dm_uevent_init();
if (r)
goto out_free_rq_bio_info_cache;
_major = major;
r = register_blkdev(_major, _name);
if (r < 0)
goto out_uevent_exit;
if (!_major)
_major = r;
return 0;
out_uevent_exit:
dm_uevent_exit();
out_free_rq_bio_info_cache:
kmem_cache_destroy(_rq_bio_info_cache);
out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache);
out_free_tio_cache:
kmem_cache_destroy(_tio_cache);
out_free_io_cache:
kmem_cache_destroy(_io_cache);
return r;
}
static void local_exit(void)
{
kmem_cache_destroy(_rq_bio_info_cache);
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
dm_uevent_exit();
_major = 0;
DMINFO("cleaned up");
}
static int (*_inits[])(void) __initdata = {
local_init,
dm_target_init,
dm_linear_init,
dm_stripe_init,
dm_io_init,
dm_kcopyd_init,
dm_interface_init,
};
static void (*_exits[])(void) = {
local_exit,
dm_target_exit,
dm_linear_exit,
dm_stripe_exit,
dm_io_exit,
dm_kcopyd_exit,
dm_interface_exit,
};
static int __init dm_init(void)
{
const int count = ARRAY_SIZE(_inits);
int r, i;
for (i = 0; i < count; i++) {
r = _inits[i]();
if (r)
goto bad;
}
return 0;
bad:
while (i--)
_exits[i]();
return r;
}
static void __exit dm_exit(void)
{
int i = ARRAY_SIZE(_exits);
while (i--)
_exits[i]();
/*
* Should be empty by this point.
*/
idr_remove_all(&_minor_idr);
idr_destroy(&_minor_idr);
}
/*
* Block device functions
*/
int dm_deleting_md(struct mapped_device *md)
{
return test_bit(DMF_DELETING, &md->flags);
}
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
spin_lock(&_minor_lock);
md = bdev->bd_disk->private_data;
if (!md)
goto out;
if (test_bit(DMF_FREEING, &md->flags) ||
dm_deleting_md(md)) {
md = NULL;
goto out;
}
dm_get(md);
atomic_inc(&md->open_count);
out:
spin_unlock(&_minor_lock);
return md ? 0 : -ENXIO;
}
static int dm_blk_close(struct gendisk *disk, fmode_t mode)
{
struct mapped_device *md = disk->private_data;
spin_lock(&_minor_lock);
atomic_dec(&md->open_count);
dm_put(md);
spin_unlock(&_minor_lock);
return 0;
}
int dm_open_count(struct mapped_device *md)
{
return atomic_read(&md->open_count);
}
/*
* Guarantees nothing is using the device before it's deleted.
*/
int dm_lock_for_deletion(struct mapped_device *md)
{
int r = 0;
spin_lock(&_minor_lock);
if (dm_open_count(md))
r = -EBUSY;
else
set_bit(DMF_DELETING, &md->flags);
spin_unlock(&_minor_lock);
return r;
}
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mapped_device *md = bdev->bd_disk->private_data;
return dm_get_geometry(md, geo);
}
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
struct dm_table *map = dm_get_live_table(md);
struct dm_target *tgt;
int r = -ENOTTY;
if (!map || !dm_table_get_size(map))
goto out;
/* We only support devices that have a single target */
if (dm_table_get_num_targets(map) != 1)
goto out;
tgt = dm_table_get_target(map, 0);
if (dm_suspended_md(md)) {
r = -EAGAIN;
goto out;
}
if (tgt->type->ioctl)
r = tgt->type->ioctl(tgt, cmd, arg);
out:
dm_table_put(map);
return r;
}
static struct dm_io *alloc_io(struct mapped_device *md)
{
return mempool_alloc(md->io_pool, GFP_NOIO);
}
static void free_io(struct mapped_device *md, struct dm_io *io)
{
mempool_free(io, md->io_pool);
}
static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
{
mempool_free(tio, md->tio_pool);
}
static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
gfp_t gfp_mask)
{
return mempool_alloc(md->tio_pool, gfp_mask);
}
static void free_rq_tio(struct dm_rq_target_io *tio)
{
mempool_free(tio, tio->md->tio_pool);
}
static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
{
return mempool_alloc(md->io_pool, GFP_ATOMIC);
}
static void free_bio_info(struct dm_rq_clone_bio_info *info)
{
mempool_free(info, info->tio->md->io_pool);
}
static int md_in_flight(struct mapped_device *md)
{
return atomic_read(&md->pending[READ]) +
atomic_read(&md->pending[WRITE]);
}
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
int cpu;
int rw = bio_data_dir(io->bio);
io->start_time = jiffies;
cpu = part_stat_lock();
part_round_stats(cpu, &dm_disk(md)->part0);
part_stat_unlock();
atomic_set(&dm_disk(md)->part0.in_flight[rw],
atomic_inc_return(&md->pending[rw]));
}
static void end_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->bio;
unsigned long duration = jiffies - io->start_time;
int pending, cpu;
int rw = bio_data_dir(bio);
cpu = part_stat_lock();
part_round_stats(cpu, &dm_disk(md)->part0);
part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
part_stat_unlock();
/*
* After this is decremented the bio must not be touched if it is
* a flush.
*/
pending = atomic_dec_return(&md->pending[rw]);
atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
pending += atomic_read(&md->pending[rw^0x1]);
/* nudge anyone waiting on suspend queue */
if (!pending)
wake_up(&md->wait);
}
/*
* Add the bio to the list of deferred io.
*/
static void queue_io(struct mapped_device *md, struct bio *bio)
{
unsigned long flags;
spin_lock_irqsave(&md->deferred_lock, flags);
bio_list_add(&md->deferred, bio);
spin_unlock_irqrestore(&md->deferred_lock, flags);
queue_work(md->wq, &md->work);
}
/*
* Everyone (including functions in this file), should use this
* function to access the md->map field, and make sure they call
* dm_table_put() when finished.
*/
struct dm_table *dm_get_live_table(struct mapped_device *md)
{
struct dm_table *t;
unsigned long flags;
read_lock_irqsave(&md->map_lock, flags);
t = md->map;
if (t)
dm_table_get(t);
read_unlock_irqrestore(&md->map_lock, flags);
return t;
}
/*
* Get the geometry associated with a dm device
*/
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
*geo = md->geometry;
return 0;
}
/*
* Set the geometry of a device.
*/
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
if (geo->start > sz) {
DMWARN("Start sector is beyond the geometry limits.");
return -EINVAL;
}
md->geometry = *geo;
return 0;
}
/*-----------------------------------------------------------------
* CRUD START:
* A more elegant soln is in the works that uses the queue
* merge fn, unfortunately there are a couple of changes to
* the block layer that I want to make for this. So in the
* interests of getting something for people to use I give
* you this clearly demarcated crap.
*---------------------------------------------------------------*/
static int __noflush_suspending(struct mapped_device *md)
{
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}
/*
* Decrements the number of outstanding ios that a bio has been
* cloned into, completing the original io if necc.
*/
static void dec_pending(struct dm_io *io, int error)
{
unsigned long flags;
int io_error;
struct bio *bio;
struct mapped_device *md = io->md;
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
spin_lock_irqsave(&io->endio_lock, flags);
if (!(io->error > 0 && __noflush_suspending(md)))
io->error = error;
spin_unlock_irqrestore(&io->endio_lock, flags);
}
if (atomic_dec_and_test(&io->io_count)) {
if (io->error == DM_ENDIO_REQUEUE) {
/*
* Target requested pushing back the I/O.
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md))
bio_list_add_head(&md->deferred, io->bio);
else
/* noflush suspend was interrupted. */
io->error = -EIO;
spin_unlock_irqrestore(&md->deferred_lock, flags);
}
io_error = io->error;
bio = io->bio;
end_io_acct(io);
free_io(md, io);
if (io_error == DM_ENDIO_REQUEUE)
return;
if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
/*
* Preflush done for flush with data, reissue
* without REQ_FLUSH.
*/
bio->bi_rw &= ~REQ_FLUSH;
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
}
static void clone_endio(struct bio *bio, int error)
{
int r = 0;
struct dm_target_io *tio = bio->bi_private;
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
if (!bio_flagged(bio, BIO_UPTODATE) && !error)
error = -EIO;
if (endio) {
r = endio(tio->ti, bio, error, &tio->info);
if (r < 0 || r == DM_ENDIO_REQUEUE)
/*
* error and requeue request are handled
* in dec_pending().
*/
error = r;
else if (r == DM_ENDIO_INCOMPLETE)
/* The target will handle the io */
return;
else if (r) {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
}
}
/*
* Store md for cleanup instead of tio which is about to get freed.
*/
bio->bi_private = md->bs;
free_tio(md, tio);
bio_put(bio);
dec_pending(io, error);
}
/*
* Partial completion handling for request-based dm
*/
static void end_clone_bio(struct bio *clone, int error)
{
struct dm_rq_clone_bio_info *info = clone->bi_private;
struct dm_rq_target_io *tio = info->tio;
struct bio *bio = info->orig;
unsigned int nr_bytes = info->orig->bi_size;
bio_put(clone);
if (tio->error)
/*
* An error has already been detected on the request.
* Once error occurred, just let clone->end_io() handle
* the remainder.
*/
return;
else if (error) {
/*
* Don't notice the error to the upper layer yet.
* The error handling decision is made by the target driver,
* when the request is completed.
*/
tio->error = error;
return;
}
/*
* I/O for the bio successfully completed.
* Notice the data completion to the upper layer.
*/
/*
* bios are processed from the head of the list.
* So the completing bio should always be rq->bio.
* If it's not, something wrong is happening.
*/
if (tio->orig->bio != bio)
DMERR("bio completion is going in the middle of the request");
/*
* Update the original request.
* Do not use blk_end_request() here, because it may complete
* the original request before the clone, and break the ordering.
*/
blk_update_request(tio->orig, 0, nr_bytes);
}
/*
* Don't touch any member of the md after calling this function because
* the md may be freed in dm_put() at the end of this function.
* Or do dm_get() before calling this function and dm_put() later.
*/
static void rq_completed(struct mapped_device *md, int rw, int run_queue)
{
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
if (!md_in_flight(md))
wake_up(&md->wait);
/*
* Run this off this callpath, as drivers could invoke end_io while
* inside their request_fn (and holding the queue lock). Calling
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
if (run_queue)
blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
*/
dm_put(md);
}
static void free_rq_clone(struct request *clone)
{
struct dm_rq_target_io *tio = clone->end_io_data;
blk_rq_unprep_clone(clone);
free_rq_tio(tio);
}
/*
* Complete the clone and the original request.
* Must be called without queue lock.
*/
static void dm_end_request(struct request *clone, int error)
{
int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
rq->errors = clone->errors;
rq->resid_len = clone->resid_len;
if (rq->sense)
/*
* We are using the sense buffer of the original
* request.
* So setting the length of the sense data is enough.
*/
rq->sense_len = clone->sense_len;
}
free_rq_clone(clone);
blk_end_request_all(rq, error);
rq_completed(md, rw, true);
}
static void dm_unprep_request(struct request *rq)
{
struct request *clone = rq->special;
rq->special = NULL;
rq->cmd_flags &= ~REQ_DONTPREP;
free_rq_clone(clone);
}
/*
* Requeue the original request of a clone.
*/
void dm_requeue_unmapped_request(struct request *clone)
{
int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request_queue *q = rq->q;
unsigned long flags;
dm_unprep_request(rq);
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
rq_completed(md, rw, 0);
}
EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
static void __stop_queue(struct request_queue *q)
{
blk_stop_queue(q);
}
static void stop_queue(struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
static void __start_queue(struct request_queue *q)
{
if (blk_queue_stopped(q))
blk_start_queue(q);
}
static void start_queue(struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
static void dm_done(struct request *clone, int error, bool mapped)
{
int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
dm_request_endio_fn rq_end_io = NULL;
if (tio->ti) {
rq_end_io = tio->ti->type->rq_end_io;
if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
}
if (r <= 0)
/* The target wants to complete the I/O */
dm_end_request(clone, r);
else if (r == DM_ENDIO_INCOMPLETE)
/* The target will handle the I/O */
return;
else if (r == DM_ENDIO_REQUEUE)
/* The target wants to requeue the I/O */
dm_requeue_unmapped_request(clone);
else {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
}
}
/*
* Request completion handler for request-based dm
*/
static void dm_softirq_done(struct request *rq)
{
bool mapped = true;
struct request *clone = rq->completion_data;
struct dm_rq_target_io *tio = clone->end_io_data;
if (rq->cmd_flags & REQ_FAILED)
mapped = false;
dm_done(clone, tio->error, mapped);
}
/*
* Complete the clone and the original request with the error status
* through softirq context.
*/
static void dm_complete_request(struct request *clone, int error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
tio->error = error;
rq->completion_data = clone;
blk_complete_request(rq);
}
/*
* Complete the not-mapped clone and the original request with the error status
* through softirq context.
* Target's rq_end_io() function isn't called.
* This may be used when the target's map_rq() function fails.
*/
void dm_kill_unmapped_request(struct request *clone, int error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
rq->cmd_flags |= REQ_FAILED;
dm_complete_request(clone, error);
}
EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
/*
* Called with the queue lock held
*/
static void end_clone_request(struct request *clone, int error)
{
/*
* For just cleaning up the information of the queue in which
* the clone was dispatched.
* The clone is *NOT* freed actually here because it is alloced from
* dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
*/
__blk_put_request(clone->q, clone);
/*
* Actual request completion is done in a softirq context which doesn't
* hold the queue lock. Otherwise, deadlock could occur because:
* - another request may be submitted by the upper level driver
* of the stacking during the completion
* - the submission which requires queue lock may be done
* against this queue
*/
dm_complete_request(clone, error);
}
/*
* Return maximum size of I/O possible at the supplied sector up to the current
* target boundary.
*/
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
{
sector_t target_offset = dm_target_offset(ti, sector);
return ti->len - target_offset;
}
static sector_t max_io_len(sector_t sector, struct dm_target *ti)
{
sector_t len = max_io_len_target_boundary(sector, ti);
/*
* Does the target need to split even further ?
*/
if (ti->split_io) {
sector_t boundary;
sector_t offset = dm_target_offset(ti, sector);
boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
- offset;
if (len > boundary)
len = boundary;
}
return len;
}
static void __map_bio(struct dm_target *ti, struct bio *clone,
struct dm_target_io *tio)
{
int r;
sector_t sector;
struct mapped_device *md;
clone->bi_end_io = clone_endio;
clone->bi_private = tio;
/*
* Map the clone. If r == 0 we don't need to do
* anything, the target has assumed ownership of
* this io.
*/
atomic_inc(&tio->io->io_count);
sector = clone->bi_sector;
r = ti->type->map(ti, clone, &tio->info);
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
tio->io->bio->bi_bdev->bd_dev, sector);
generic_make_request(clone);
} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
/* error the io and bail out, or requeue it if needed */
md = tio->io->md;
dec_pending(tio->io, r);
/*
* Store bio_set for cleanup.
*/
clone->bi_private = md->bs;
bio_put(clone);
free_tio(md, tio);
} else if (r) {
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
}
struct clone_info {
struct mapped_device *md;
struct dm_table *map;
struct bio *bio;
struct dm_io *io;
sector_t sector;
sector_t sector_count;
unsigned short idx;
};
static void dm_bio_destructor(struct bio *bio)
{
struct bio_set *bs = bio->bi_private;
bio_free(bio, bs);
}
/*
* Creates a little bio that just does part of a bvec.
*/
static struct bio *split_bvec(struct bio *bio, sector_t sector,
unsigned short idx, unsigned int offset,
unsigned int len, struct bio_set *bs)
{
struct bio *clone;
struct bio_vec *bv = bio->bi_io_vec + idx;
clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
clone->bi_destructor = dm_bio_destructor;
*clone->bi_io_vec = *bv;
clone->bi_sector = sector;
clone->bi_bdev = bio->bi_bdev;
clone->bi_rw = bio->bi_rw;
clone->bi_vcnt = 1;
clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
clone->bi_io_vec->bv_len = clone->bi_size;
clone->bi_flags |= 1 << BIO_CLONED;
if (bio_integrity(bio)) {
bio_integrity_clone(clone, bio, GFP_NOIO, bs);
bio_integrity_trim(clone,
bio_sector_offset(bio, idx, offset), len);
}
return clone;
}
/*
* Creates a bio that consists of range of complete bvecs.
*/
static struct bio *clone_bio(struct bio *bio, sector_t sector,
unsigned short idx, unsigned short bv_count,
unsigned int len, struct bio_set *bs)
{
struct bio *clone;
clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
__bio_clone(clone, bio);
clone->bi_destructor = dm_bio_destructor;
clone->bi_sector = sector;
clone->bi_idx = idx;
clone->bi_vcnt = idx + bv_count;
clone->bi_size = to_bytes(len);
clone->bi_flags &= ~(1 << BIO_SEG_VALID);
if (bio_integrity(bio)) {
bio_integrity_clone(clone, bio, GFP_NOIO, bs);
if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
bio_integrity_trim(clone,
bio_sector_offset(bio, idx, 0), len);
}
return clone;
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
struct dm_target *ti)
{
struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
tio->io = ci->io;
tio->ti = ti;
memset(&tio->info, 0, sizeof(tio->info));
return tio;
}
static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
unsigned request_nr, sector_t len)
{
struct dm_target_io *tio = alloc_tio(ci, ti);
struct bio *clone;
tio->info.target_request_nr = request_nr;
/*
* Discard requests require the bio's inline iovecs be initialized.
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
* and discard, so no need for concern about wasted bvec allocations.
*/
clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
__bio_clone(clone, ci->bio);
clone->bi_destructor = dm_bio_destructor;
if (len) {
clone->bi_sector = ci->sector;
clone->bi_size = to_bytes(len);
}
__map_bio(ti, clone, tio);
}
static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
unsigned num_requests, sector_t len)
{
unsigned request_nr;
for (request_nr = 0; request_nr < num_requests; request_nr++)
__issue_target_request(ci, ti, request_nr, len);
}
static int __clone_and_map_empty_flush(struct clone_info *ci)
{
unsigned target_nr = 0;
struct dm_target *ti;
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__issue_target_requests(ci, ti, ti->num_flush_requests, 0);
return 0;
}
/*
* Perform all io with a single clone.
*/
static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
{
struct bio *clone, *bio = ci->bio;
struct dm_target_io *tio;
tio = alloc_tio(ci, ti);
clone = clone_bio(bio, ci->sector, ci->idx,
bio->bi_vcnt - ci->idx, ci->sector_count,
ci->md->bs);
__map_bio(ti, clone, tio);
ci->sector_count = 0;
}
static int __clone_and_map_discard(struct clone_info *ci)
{
struct dm_target *ti;
sector_t len;
do {
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
/*
* Even though the device advertised discard support,
* reconfiguration might have changed that since the
* check was performed.
*/
if (!ti->num_discard_requests)
return -EOPNOTSUPP;
len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
__issue_target_requests(ci, ti, ti->num_discard_requests, len);
ci->sector += len;
} while (ci->sector_count -= len);
return 0;
}
static int __clone_and_map(struct clone_info *ci)
{
struct bio *clone, *bio = ci->bio;
struct dm_target *ti;
sector_t len = 0, max;
struct dm_target_io *tio;
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __clone_and_map_discard(ci);
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
max = max_io_len(ci->sector, ti);
if (ci->sector_count <= max) {
/*
* Optimise for the simple case where we can do all of
* the remaining io with a single clone.
*/
__clone_and_map_simple(ci, ti);
} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
/*
* There are some bvecs that don't span targets.
* Do as many of these as possible.
*/
int i;
sector_t remaining = max;
sector_t bv_len;
for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
bv_len = to_sector(bio->bi_io_vec[i].bv_len);
if (bv_len > remaining)
break;
remaining -= bv_len;
len += bv_len;
}
tio = alloc_tio(ci, ti);
clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
ci->md->bs);
__map_bio(ti, clone, tio);
ci->sector += len;
ci->sector_count -= len;
ci->idx = i;
} else {
/*
* Handle a bvec that must be split between two or more targets.
*/
struct bio_vec *bv = bio->bi_io_vec + ci->idx;
sector_t remaining = to_sector(bv->bv_len);
unsigned int offset = 0;
do {
if (offset) {
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
max = max_io_len(ci->sector, ti);
}
len = min(remaining, max);
tio = alloc_tio(ci, ti);
clone = split_bvec(bio, ci->sector, ci->idx,
bv->bv_offset + offset, len,
ci->md->bs);
__map_bio(ti, clone, tio);
ci->sector += len;
ci->sector_count -= len;
offset += to_bytes(len);
} while (remaining -= len);
ci->idx++;
}
return 0;
}
/*
* Split the bio into several clones and submit it to targets.
*/
static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
{
struct clone_info ci;
int error = 0;
ci.map = dm_get_live_table(md);
if (unlikely(!ci.map)) {
bio_io_error(bio);
return;
}
ci.md = md;
ci.io = alloc_io(md);
ci.io->error = 0;
atomic_set(&ci.io->io_count, 1);
ci.io->bio = bio;
ci.io->md = md;
spin_lock_init(&ci.io->endio_lock);
ci.sector = bio->bi_sector;
ci.idx = bio->bi_idx;
start_io_acct(ci.io);
if (bio->bi_rw & REQ_FLUSH) {
ci.bio = &ci.md->flush_bio;
ci.sector_count = 0;
error = __clone_and_map_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
while (ci.sector_count && !error)
error = __clone_and_map(&ci);
}
/* drop the extra reference count */
dec_pending(ci.io, error);
dm_table_put(ci.map);
}
/*-----------------------------------------------------------------
* CRUD END
*---------------------------------------------------------------*/
static int dm_merge_bvec(struct request_queue *q,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table(md);
struct dm_target *ti;
sector_t max_sectors;
int max_size = 0;
if (unlikely(!map))
goto out;
ti = dm_table_find_target(map, bvm->bi_sector);
if (!dm_target_is_valid(ti))
goto out_table;
/*
* Find maximum amount of I/O that won't need splitting
*/
max_sectors = min(max_io_len(bvm->bi_sector, ti),
(sector_t) BIO_MAX_SECTORS);
max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
if (max_size < 0)
max_size = 0;
/*
* merge_bvec_fn() returns number of bytes
* it can accept at this offset
* max is precomputed maximal io size
*/
if (max_size && ti->type->merge)
max_size = ti->type->merge(ti, bvm, biovec, max_size);
/*
* If the target doesn't support merge method and some of the devices
* provided their merge_bvec method (we know this by looking at
* queue_max_hw_sectors), then we can't allow bios with multiple vector
* entries. So always set max_size to 0, and the code below allows
* just one page.
*/
else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
max_size = 0;
out_table:
dm_table_put(map);
out:
/*
* Always allow an entire first page
*/
if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
max_size = biovec->bv_len;
return max_size;
}
/*
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
static int _dm_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
int cpu;
down_read(&md->io_lock);
cpu = part_stat_lock();
part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
part_stat_unlock();
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
up_read(&md->io_lock);
if (bio_rw(bio) != READA)
queue_io(md, bio);
else
bio_io_error(bio);
return 0;
}
__split_and_process_bio(md, bio);
up_read(&md->io_lock);
return 0;
}
static int dm_make_request(struct request_queue *q, struct bio *bio)
{
struct mapped_device *md = q->queuedata;
return md->saved_make_request_fn(q, bio); /* call __make_request() */
}
static int dm_request_based(struct mapped_device *md)
{
return blk_queue_stackable(md->queue);
}
static int dm_request(struct request_queue *q, struct bio *bio)
{
struct mapped_device *md = q->queuedata;
if (dm_request_based(md))
return dm_make_request(q, bio);
return _dm_request(q, bio);
}
void dm_dispatch_request(struct request *rq)
{
int r;
if (blk_queue_io_stat(rq->q))
rq->cmd_flags |= REQ_IO_STAT;
rq->start_time = jiffies;
r = blk_insert_cloned_request(rq->q, rq);
if (r)
dm_complete_request(rq, r);
}
EXPORT_SYMBOL_GPL(dm_dispatch_request);
static void dm_rq_bio_destructor(struct bio *bio)
{
struct dm_rq_clone_bio_info *info = bio->bi_private;
struct mapped_device *md = info->tio->md;
free_bio_info(info);
bio_free(bio, md->bs);
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
{
struct dm_rq_target_io *tio = data;
struct mapped_device *md = tio->md;
struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
if (!info)
return -ENOMEM;
info->orig = bio_orig;
info->tio = tio;
bio->bi_end_io = end_clone_bio;
bio->bi_private = info;
bio->bi_destructor = dm_rq_bio_destructor;
return 0;
}
static int setup_clone(struct request *clone, struct request *rq,
struct dm_rq_target_io *tio)
{
int r;
r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
dm_rq_bio_constructor, tio);
if (r)
return r;
clone->cmd = rq->cmd;
clone->cmd_len = rq->cmd_len;
clone->sense = rq->sense;
clone->buffer = rq->buffer;
clone->end_io = end_clone_request;
clone->end_io_data = tio;
return 0;
}
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
gfp_t gfp_mask)
{
struct request *clone;
struct dm_rq_target_io *tio;
tio = alloc_rq_tio(md, gfp_mask);
if (!tio)
return NULL;
tio->md = md;
tio->ti = NULL;
tio->orig = rq;
tio->error = 0;
memset(&tio->info, 0, sizeof(tio->info));
clone = &tio->clone;
if (setup_clone(clone, rq, tio)) {
/* -ENOMEM */
free_rq_tio(tio);
return NULL;
}
return clone;
}
/*
* Called with the queue lock held.
*/
static int dm_prep_fn(struct request_queue *q, struct request *rq)
{
struct mapped_device *md = q->queuedata;
struct request *clone;
if (unlikely(rq->special)) {
DMWARN("Already has something in rq->special.");
return BLKPREP_KILL;
}
clone = clone_rq(rq, md, GFP_ATOMIC);
if (!clone)
return BLKPREP_DEFER;
rq->special = clone;
rq->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK;
}
/*
* Returns:
* 0 : the request has been processed (not requeued)
* !0 : the request has been requeued
*/
static int map_request(struct dm_target *ti, struct request *clone,
struct mapped_device *md)
{
int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data;
tio->ti = ti;
r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
break;
case DM_MAPIO_REMAPPED:
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(tio->orig));
dm_dispatch_request(clone);
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
dm_requeue_unmapped_request(clone);
requeued = 1;
break;
default:
if (r > 0) {
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
/* The target wants to complete the I/O */
dm_kill_unmapped_request(clone, r);
break;
}
return requeued;
}
static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
{
struct request *clone;
blk_start_request(orig);
clone = orig->special;
atomic_inc(&md->pending[rq_data_dir(clone)]);
/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
* because the device may be closed during the request completion
* when all bios are completed.
* See the comment in rq_completed() too.
*/
dm_get(md);
return clone;
}
/*
* q->request_fn for request-based dm.
* Called with the queue lock held.
*/
static void dm_request_fn(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table(md);
struct dm_target *ti;
struct request *rq, *clone;
sector_t pos;
/*
* For suspend, check blk_queue_stopped() and increment
* ->pending within a single queue_lock not to increment the
* number of in-flight I/Os after the queue is stopped in
* dm_suspend().
*/
while (!blk_queue_stopped(q)) {
rq = blk_peek_request(q);
if (!rq)
goto delay_and_out;
/* always use block 0 to find the target for flushes for now */
pos = 0;
if (!(rq->cmd_flags & REQ_FLUSH))
pos = blk_rq_pos(rq);
ti = dm_table_find_target(map, pos);
if (!dm_target_is_valid(ti)) {
/*
* Must perform setup, that dm_done() requires,
* before calling dm_kill_unmapped_request
*/
DMERR_LIMIT("request attempted access beyond the end of device");
clone = dm_start_request(md, rq);
dm_kill_unmapped_request(clone, -EIO);
continue;
}
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
clone = dm_start_request(md, rq);
spin_unlock(q->queue_lock);
if (map_request(ti, clone, md))
goto requeued;
BUG_ON(!irqs_disabled());
spin_lock(q->queue_lock);
}
goto out;
requeued:
BUG_ON(!irqs_disabled());
spin_lock(q->queue_lock);
delay_and_out:
blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);
}
int dm_underlying_device_busy(struct request_queue *q)
{
return blk_lld_busy(q);
}
EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
static int dm_lld_busy(struct request_queue *q)
{
int r;
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table(md);
if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
r = 1;
else
r = dm_table_any_busy_target(map);
dm_table_put(map);
return r;
}
static int dm_any_congested(void *congested_data, int bdi_bits)
{
int r = bdi_bits;
struct mapped_device *md = congested_data;
struct dm_table *map;
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
map = dm_get_live_table(md);
if (map) {
/*
* Request-based dm cares about only own queue for
* the query about congestion status of request_queue
*/
if (dm_request_based(md))
r = md->queue->backing_dev_info.state &
bdi_bits;
else
r = dm_table_any_congested(map, bdi_bits);
dm_table_put(map);
}
}
return r;
}
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
static void free_minor(int minor)
{
spin_lock(&_minor_lock);
idr_remove(&_minor_idr, minor);
spin_unlock(&_minor_lock);
}
/*
* See if the device with a specific minor # is free.
*/
static int specific_minor(int minor)
{
int r, m;
if (minor >= (1 << MINORBITS))
return -EINVAL;
r = idr_pre_get(&_minor_idr, GFP_KERNEL);
if (!r)
return -ENOMEM;
spin_lock(&_minor_lock);
if (idr_find(&_minor_idr, minor)) {
r = -EBUSY;
goto out;
}
r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
if (r)
goto out;
if (m != minor) {
idr_remove(&_minor_idr, m);
r = -EBUSY;
goto out;
}
out:
spin_unlock(&_minor_lock);
return r;
}
static int next_free_minor(int *minor)
{
int r, m;
r = idr_pre_get(&_minor_idr, GFP_KERNEL);
if (!r)
return -ENOMEM;
spin_lock(&_minor_lock);
r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
if (r)
goto out;
if (m >= (1 << MINORBITS)) {
idr_remove(&_minor_idr, m);
r = -ENOSPC;
goto out;
}
*minor = m;
out:
spin_unlock(&_minor_lock);
return r;
}
static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
static void dm_init_md_queue(struct mapped_device *md)
{
/*
* Request-based dm devices cannot be stacked on top of bio-based dm
* devices. The type of this dm device has not been decided yet.
* The type is decided at the first table loading time.
* To prevent problematic device stacking, clear the queue flag
* for request stacking support until then.
*
* This queue is new, so no concurrency on the queue_flags.
*/
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
}
/*
* Allocate and initialise a blank device with a given minor.
*/
static struct mapped_device *alloc_dev(int minor)
{
int r;
struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
void *old_md;
if (!md) {
DMWARN("unable to allocate device, out of memory.");
return NULL;
}
if (!try_module_get(THIS_MODULE))
goto bad_module_get;
/* get a minor number for the dev */
if (minor == DM_ANY_MINOR)
r = next_free_minor(&minor);
else
r = specific_minor(minor);
if (r < 0)
goto bad_minor;
md->type = DM_TYPE_NONE;
init_rwsem(&md->io_lock);
mutex_init(&md->suspend_lock);
mutex_init(&md->type_lock);
spin_lock_init(&md->deferred_lock);
rwlock_init(&md->map_lock);
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
atomic_set(&md->event_nr, 0);
atomic_set(&md->uevent_seq, 0);
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);
md->queue = blk_alloc_queue(GFP_KERNEL);
if (!md->queue)
goto bad_queue;
dm_init_md_queue(md);
md->disk = alloc_disk(1);
if (!md->disk)
goto bad_disk;
atomic_set(&md->pending[0], 0);
atomic_set(&md->pending[1], 0);
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->fops = &dm_blk_dops;
md->disk->queue = md->queue;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
add_disk(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush",
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
if (!md->wq)
goto bad_thread;
md->bdev = bdget_disk(md->disk, 0);
if (!md->bdev)
goto bad_bdev;
bio_init(&md->flush_bio);
md->flush_bio.bi_bdev = md->bdev;
md->flush_bio.bi_rw = WRITE_FLUSH;
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
spin_unlock(&_minor_lock);
BUG_ON(old_md != MINOR_ALLOCED);
return md;
bad_bdev:
destroy_workqueue(md->wq);
bad_thread:
del_gendisk(md->disk);
put_disk(md->disk);
bad_disk:
blk_cleanup_queue(md->queue);
bad_queue:
free_minor(minor);
bad_minor:
module_put(THIS_MODULE);
bad_module_get:
kfree(md);
return NULL;
}
static void unlock_fs(struct mapped_device *md);
static void free_dev(struct mapped_device *md)
{
int minor = MINOR(disk_devt(md->disk));
unlock_fs(md);
bdput(md->bdev);
destroy_workqueue(md->wq);
if (md->tio_pool)
mempool_destroy(md->tio_pool);
if (md->io_pool)
mempool_destroy(md->io_pool);
if (md->bs)
bioset_free(md->bs);
blk_integrity_unregister(md->disk);
del_gendisk(md->disk);
free_minor(minor);
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
spin_unlock(&_minor_lock);
put_disk(md->disk);
blk_cleanup_queue(md->queue);
module_put(THIS_MODULE);
kfree(md);
}
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p;
if (md->io_pool && md->tio_pool && md->bs)
/* the md already has necessary mempools */
goto out;
p = dm_table_get_md_mempools(t);
BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
md->io_pool = p->io_pool;
p->io_pool = NULL;
md->tio_pool = p->tio_pool;
p->tio_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
out:
/* mempool bind completed, now no need any mempools in the table */
dm_table_free_md_mempools(t);
}
/*
* Bind a table to the device.
*/
static void event_callback(void *context)
{
unsigned long flags;
LIST_HEAD(uevents);
struct mapped_device *md = (struct mapped_device *) context;
spin_lock_irqsave(&md->uevent_lock, flags);
list_splice_init(&md->uevent_list, &uevents);
spin_unlock_irqrestore(&md->uevent_lock, flags);
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
wake_up(&md->eventq);
}
/*
* Protected by md->suspend_lock obtained by dm_swap_table().
*/
static void __set_size(struct mapped_device *md, sector_t size)
{
set_capacity(md->disk, size);
i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
}
/*
* Returns old map, which caller must destroy.
*/
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{
struct dm_table *old_map;
struct request_queue *q = md->queue;
sector_t size;
unsigned long flags;
size = dm_table_get_size(t);
/*
* Wipe any geometry if the size of the table changed.
*/
if (size != get_capacity(md->disk))
memset(&md->geometry, 0, sizeof(md->geometry));
__set_size(md, size);
dm_table_event_callback(t, event_callback, md);
/*
* The queue hasn't been stopped yet, if the old table type wasn't
* for request-based during suspension. So stop it to prevent
* I/O mapping before resume.
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
if (dm_table_request_based(t) && !blk_queue_stopped(q))
stop_queue(q);
__bind_mempools(md, t);
write_lock_irqsave(&md->map_lock, flags);
old_map = md->map;
md->map = t;
dm_table_set_restrictions(t, q, limits);
write_unlock_irqrestore(&md->map_lock, flags);
return old_map;
}
/*
* Returns unbound table for the caller to free.
*/
static struct dm_table *__unbind(struct mapped_device *md)
{
struct dm_table *map = md->map;
unsigned long flags;
if (!map)
return NULL;
dm_table_event_callback(map, NULL, NULL);
write_lock_irqsave(&md->map_lock, flags);
md->map = NULL;
write_unlock_irqrestore(&md->map_lock, flags);
return map;
}
/*
* Constructor for a new device.
*/
int dm_create(int minor, struct mapped_device **result)
{
struct mapped_device *md;
md = alloc_dev(minor);
if (!md)
return -ENXIO;
dm_sysfs_init(md);
*result = md;
return 0;
}
/*
* Functions to manage md->type.
* All are required to hold md->type_lock.
*/
void dm_lock_md_type(struct mapped_device *md)
{
mutex_lock(&md->type_lock);
}
void dm_unlock_md_type(struct mapped_device *md)
{
mutex_unlock(&md->type_lock);
}
void dm_set_md_type(struct mapped_device *md, unsigned type)
{
md->type = type;
}
unsigned dm_get_md_type(struct mapped_device *md)
{
return md->type;
}
/*
* Fully initialize a request-based queue (->elevator, ->request_fn, etc).
*/
static int dm_init_request_based_queue(struct mapped_device *md)
{
struct request_queue *q = NULL;
if (md->queue->elevator)
return 1;
/* Fully initialize the queue */
q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
if (!q)
return 0;
md->queue = q;
md->saved_make_request_fn = md->queue->make_request_fn;
dm_init_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
blk_queue_lld_busy(md->queue, dm_lld_busy);
elv_register_queue(md->queue);
return 1;
}
/*
* Setup the DM device's queue based on md's type
*/
int dm_setup_md_queue(struct mapped_device *md)
{
if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
!dm_init_request_based_queue(md)) {
DMWARN("Cannot initialize queue for request-based mapped device");
return -EINVAL;
}
return 0;
}
static struct mapped_device *dm_find_md(dev_t dev)
{
struct mapped_device *md;
unsigned minor = MINOR(dev);
if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
return NULL;
spin_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor);
if (md && (md == MINOR_ALLOCED ||
(MINOR(disk_devt(dm_disk(md))) != minor) ||
dm_deleting_md(md) ||
test_bit(DMF_FREEING, &md->flags))) {
md = NULL;
goto out;
}
out:
spin_unlock(&_minor_lock);
return md;
}
struct mapped_device *dm_get_md(dev_t dev)
{
struct mapped_device *md = dm_find_md(dev);
if (md)
dm_get(md);
return md;
}
void *dm_get_mdptr(struct mapped_device *md)
{
return md->interface_ptr;
}
void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
md->interface_ptr = ptr;
}
void dm_get(struct mapped_device *md)
{
atomic_inc(&md->holders);
BUG_ON(test_bit(DMF_FREEING, &md->flags));
}
const char *dm_device_name(struct mapped_device *md)
{
return md->name;
}
EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
struct dm_table *map;
might_sleep();
spin_lock(&_minor_lock);
map = dm_get_live_table(md);
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
}
/*
* Rare, but there may be I/O requests still going to complete,
* for example. Wait for all references to disappear.
* No one should increment the reference count of the mapped_device,
* after the mapped_device state becomes DMF_FREEING.
*/
if (wait)
while (atomic_read(&md->holders))
msleep(1);
else if (atomic_read(&md->holders))
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
dm_device_name(md), atomic_read(&md->holders));
dm_sysfs_exit(md);
dm_table_put(map);
dm_table_destroy(__unbind(md));
free_dev(md);
}
void dm_destroy(struct mapped_device *md)
{
__dm_destroy(md, true);
}
void dm_destroy_immediate(struct mapped_device *md)
{
__dm_destroy(md, false);
}
void dm_put(struct mapped_device *md)
{
atomic_dec(&md->holders);
}
EXPORT_SYMBOL_GPL(dm_put);
static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
{
int r = 0;
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&md->wait, &wait);
while (1) {
set_current_state(interruptible);
smp_mb();
if (!md_in_flight(md))
break;
if (interruptible == TASK_INTERRUPTIBLE &&
signal_pending(current)) {
r = -EINTR;
break;
}
io_schedule();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&md->wait, &wait);
return r;
}
/*
* Process the deferred bios
*/
static void dm_wq_work(struct work_struct *work)
{
struct mapped_device *md = container_of(work, struct mapped_device,
work);
struct bio *c;
down_read(&md->io_lock);
while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
spin_lock_irq(&md->deferred_lock);
c = bio_list_pop(&md->deferred);
spin_unlock_irq(&md->deferred_lock);
if (!c)
break;
up_read(&md->io_lock);
if (dm_request_based(md))
generic_make_request(c);
else
__split_and_process_bio(md, c);
down_read(&md->io_lock);
}
up_read(&md->io_lock);
}
static void dm_queue_flush(struct mapped_device *md)
{
clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
smp_mb__after_clear_bit();
queue_work(md->wq, &md->work);
}
/*
* Swap in a new table, returning the old one for the caller to destroy.
*/
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
struct dm_table *map = ERR_PTR(-EINVAL);
struct queue_limits limits;
int r;
mutex_lock(&md->suspend_lock);
/* device must be suspended */
if (!dm_suspended_md(md))
goto out;
r = dm_calculate_queue_limits(table, &limits);
if (r) {
map = ERR_PTR(r);
goto out;
}
map = __bind(md, table, &limits);
out:
mutex_unlock(&md->suspend_lock);
return map;
}
/*
* Functions to lock and unlock any filesystem running on the
* device.
*/
static int lock_fs(struct mapped_device *md)
{
int r;
WARN_ON(md->frozen_sb);
md->frozen_sb = freeze_bdev(md->bdev);
if (IS_ERR(md->frozen_sb)) {
r = PTR_ERR(md->frozen_sb);
md->frozen_sb = NULL;
return r;
}
set_bit(DMF_FROZEN, &md->flags);
return 0;
}
static void unlock_fs(struct mapped_device *md)
{
if (!test_bit(DMF_FROZEN, &md->flags))
return;
thaw_bdev(md->bdev, md->frozen_sb);
md->frozen_sb = NULL;
clear_bit(DMF_FROZEN, &md->flags);
}
/*
* We need to be able to change a mapping table under a mounted
* filesystem. For example we might want to move some data in
* the background. Before the table can be swapped with
* dm_bind_table, dm_suspend must be called to flush any in
* flight bios and ensure that any further io gets deferred.
*/
/*
* Suspend mechanism in request-based dm.
*
* 1. Flush all I/Os by lock_fs() if needed.
* 2. Stop dispatching any I/O by stopping the request_queue.
* 3. Wait for all in-flight I/Os to be completed or requeued.
*
* To abort suspend, start the request_queue.
*/
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
struct dm_table *map = NULL;
int r = 0;
int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
mutex_lock(&md->suspend_lock);
if (dm_suspended_md(md)) {
r = -EINVAL;
goto out_unlock;
}
map = dm_get_live_table(md);
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
* This flag is cleared before dm_suspend returns.
*/
if (noflush)
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
/* This does not get reverted if there's an error later. */
dm_table_presuspend_targets(map);
/*
* Flush I/O to the device.
* Any I/O submitted after lock_fs() may not be flushed.
* noflush takes precedence over do_lockfs.
* (lock_fs() flushes I/Os and waits for them to complete.)
*/
if (!noflush && do_lockfs) {
r = lock_fs(md);
if (r)
goto out;
}
/*
* Here we must make sure that no processes are submitting requests
* to target drivers i.e. no one may be executing
* __split_and_process_bio. This is called from dm_request and
* dm_wq_work.
*
* To get all processes out of __split_and_process_bio in dm_request,
* we take the write lock. To prevent any process from reentering
* __split_and_process_bio from dm_request and quiesce the thread
* (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
* flush_workqueue(md->wq).
*/
down_write(&md->io_lock);
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
up_write(&md->io_lock);
/*
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
if (dm_request_based(md))
stop_queue(md->queue);
flush_workqueue(md->wq);
/*
* At this point no more requests are entering target request routines.
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
down_write(&md->io_lock);
if (noflush)
clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
up_write(&md->io_lock);
/* were we interrupted ? */
if (r < 0) {
dm_queue_flush(md);
if (dm_request_based(md))
start_queue(md->queue);
unlock_fs(md);
goto out; /* pushback list is already flushed, so skip flush */
}
/*
* If dm_wait_for_completion returned 0, the device is completely
* quiescent now. There is no request-processing activity. All new
* requests are being added to md->deferred list.
*/
set_bit(DMF_SUSPENDED, &md->flags);
dm_table_postsuspend_targets(map);
out:
dm_table_put(map);
out_unlock:
mutex_unlock(&md->suspend_lock);
return r;
}
int dm_resume(struct mapped_device *md)
{
int r = -EINVAL;
struct dm_table *map = NULL;
mutex_lock(&md->suspend_lock);
if (!dm_suspended_md(md))
goto out;
map = dm_get_live_table(md);
if (!map || !dm_table_get_size(map))
goto out;
r = dm_table_resume_targets(map);
if (r)
goto out;
dm_queue_flush(md);
/*
* Flushing deferred I/Os must be done after targets are resumed
* so that mapping of targets can work correctly.
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
if (dm_request_based(md))
start_queue(md->queue);
unlock_fs(md);
clear_bit(DMF_SUSPENDED, &md->flags);
r = 0;
out:
dm_table_put(map);
mutex_unlock(&md->suspend_lock);
return r;
}
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie)
{
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[] = { udev_cookie, NULL };
if (!cookie)
return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
else {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
action, envp);
}
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
return atomic_add_return(1, &md->uevent_seq);
}
uint32_t dm_get_event_nr(struct mapped_device *md)
{
return atomic_read(&md->event_nr);
}
int dm_wait_event(struct mapped_device *md, int event_nr)
{
return wait_event_interruptible(md->eventq,
(event_nr != atomic_read(&md->event_nr)));
}
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{
unsigned long flags;
spin_lock_irqsave(&md->uevent_lock, flags);
list_add(elist, &md->uevent_list);
spin_unlock_irqrestore(&md->uevent_lock, flags);
}
/*
* The gendisk is only valid as long as you have a reference
* count on 'md'.
*/
struct gendisk *dm_disk(struct mapped_device *md)
{
return md->disk;
}
struct kobject *dm_kobject(struct mapped_device *md)
{
return &md->kobj;
}
/*
* struct mapped_device should not be exported outside of dm.c
* so use this check to verify that kobj is part of md structure
*/
struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
struct mapped_device *md;
md = container_of(kobj, struct mapped_device, kobj);
if (&md->kobj != kobj)
return NULL;
if (test_bit(DMF_FREEING, &md->flags) ||
dm_deleting_md(md))
return NULL;
dm_get(md);
return md;
}
int dm_suspended_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED, &md->flags);
}
int dm_suspended(struct dm_target *ti)
{
return dm_suspended_md(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
{
struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
if (!pools)
return NULL;
pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
mempool_create_slab_pool(MIN_IOS, _io_cache) :
mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
if (!pools->io_pool)
goto free_pools_and_out;
pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
mempool_create_slab_pool(MIN_IOS, _tio_cache) :
mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
if (!pools->tio_pool)
goto free_io_pool_and_out;
pools->bs = bioset_create(pool_size, 0);
if (!pools->bs)
goto free_tio_pool_and_out;
if (integrity && bioset_integrity_create(pools->bs, pool_size))
goto free_bioset_and_out;
return pools;
free_bioset_and_out:
bioset_free(pools->bs);
free_tio_pool_and_out:
mempool_destroy(pools->tio_pool);
free_io_pool_and_out:
mempool_destroy(pools->io_pool);
free_pools_and_out:
kfree(pools);
return NULL;
}
void dm_free_md_mempools(struct dm_md_mempools *pools)
{
if (!pools)
return;
if (pools->io_pool)
mempool_destroy(pools->io_pool);
if (pools->tio_pool)
mempool_destroy(pools->tio_pool);
if (pools->bs)
bioset_free(pools->bs);
kfree(pools);
}
static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
.owner = THIS_MODULE
};
EXPORT_SYMBOL(dm_get_mapinfo);
/*
* module hooks
*/
module_init(dm_init);
module_exit(dm_exit);
module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
petkan/linux | drivers/iio/accel/bma180.c | 491 | 21132 | /*
* bma180.c - IIO driver for Bosch BMA180 triaxial acceleration sensor
*
* Copyright 2013 Oleksandr Kravchenko <x0199363@ti.com>
*
* Support for BMA250 (c) Peter Meerwald <pmeerw@pmeerw.net>
*
* This file is subject to the terms and conditions of version 2 of
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
* SPI is not supported by driver
* BMA180: 7-bit I2C slave address 0x40 or 0x41
* BMA250: 7-bit I2C slave address 0x18 or 0x19
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#define BMA180_DRV_NAME "bma180"
#define BMA180_IRQ_NAME "bma180_event"
enum {
BMA180,
BMA250,
};
struct bma180_data;
struct bma180_part_info {
const struct iio_chan_spec *channels;
unsigned num_channels;
const int *scale_table;
unsigned num_scales;
const int *bw_table;
unsigned num_bw;
u8 int_reset_reg, int_reset_mask;
u8 sleep_reg, sleep_mask;
u8 bw_reg, bw_mask;
u8 scale_reg, scale_mask;
u8 power_reg, power_mask, lowpower_val;
u8 int_enable_reg, int_enable_mask;
u8 softreset_reg;
int (*chip_config)(struct bma180_data *data);
void (*chip_disable)(struct bma180_data *data);
};
/* Register set */
#define BMA180_CHIP_ID 0x00 /* Need to distinguish BMA180 from other */
#define BMA180_ACC_X_LSB 0x02 /* First of 6 registers of accel data */
#define BMA180_TEMP 0x08
#define BMA180_CTRL_REG0 0x0d
#define BMA180_RESET 0x10
#define BMA180_BW_TCS 0x20
#define BMA180_CTRL_REG3 0x21
#define BMA180_TCO_Z 0x30
#define BMA180_OFFSET_LSB1 0x35
/* BMA180_CTRL_REG0 bits */
#define BMA180_DIS_WAKE_UP BIT(0) /* Disable wake up mode */
#define BMA180_SLEEP BIT(1) /* 1 - chip will sleep */
#define BMA180_EE_W BIT(4) /* Unlock writing to addr from 0x20 */
#define BMA180_RESET_INT BIT(6) /* Reset pending interrupts */
/* BMA180_CTRL_REG3 bits */
#define BMA180_NEW_DATA_INT BIT(1) /* Intr every new accel data is ready */
/* BMA180_OFFSET_LSB1 skipping mode bit */
#define BMA180_SMP_SKIP BIT(0)
/* Bit masks for registers bit fields */
#define BMA180_RANGE 0x0e /* Range of measured accel values */
#define BMA180_BW 0xf0 /* Accel bandwidth */
#define BMA180_MODE_CONFIG 0x03 /* Config operation modes */
/* We have to write this value in reset register to do soft reset */
#define BMA180_RESET_VAL 0xb6
#define BMA180_ID_REG_VAL 0x03
/* Chip power modes */
#define BMA180_LOW_POWER 0x03
#define BMA250_RANGE_REG 0x0f
#define BMA250_BW_REG 0x10
#define BMA250_POWER_REG 0x11
#define BMA250_RESET_REG 0x14
#define BMA250_INT_ENABLE_REG 0x17
#define BMA250_INT_MAP_REG 0x1a
#define BMA250_INT_RESET_REG 0x21
#define BMA250_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
#define BMA250_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
#define BMA250_SUSPEND_MASK BIT(7) /* chip will sleep */
#define BMA250_LOWPOWER_MASK BIT(6)
#define BMA250_DATA_INTEN_MASK BIT(4)
#define BMA250_INT1_DATA_MASK BIT(0)
#define BMA250_INT_RESET_MASK BIT(7) /* Reset pending interrupts */
struct bma180_data {
struct i2c_client *client;
struct iio_trigger *trig;
const struct bma180_part_info *part_info;
struct mutex mutex;
bool sleep_state;
int scale;
int bw;
bool pmode;
u8 buff[16]; /* 3x 16-bit + 8-bit + padding + timestamp */
};
enum bma180_chan {
AXIS_X,
AXIS_Y,
AXIS_Z,
TEMP
};
static int bma180_bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
static int bma180_scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
static int bma250_bw_table[] = { 8, 16, 31, 63, 125, 250 }; /* Hz */
static int bma250_scale_table[] = { 0, 0, 0, 38344, 0, 76590, 0, 0, 153180, 0,
0, 0, 306458 };
static int bma180_get_data_reg(struct bma180_data *data, enum bma180_chan chan)
{
int ret;
if (data->sleep_state)
return -EBUSY;
switch (chan) {
case TEMP:
ret = i2c_smbus_read_byte_data(data->client, BMA180_TEMP);
if (ret < 0)
dev_err(&data->client->dev, "failed to read temp register\n");
break;
default:
ret = i2c_smbus_read_word_data(data->client,
BMA180_ACC_X_LSB + chan * 2);
if (ret < 0)
dev_err(&data->client->dev,
"failed to read accel_%c register\n",
'x' + chan);
}
return ret;
}
static int bma180_set_bits(struct bma180_data *data, u8 reg, u8 mask, u8 val)
{
int ret = i2c_smbus_read_byte_data(data->client, reg);
u8 reg_val = (ret & ~mask) | (val << (ffs(mask) - 1));
if (ret < 0)
return ret;
return i2c_smbus_write_byte_data(data->client, reg, reg_val);
}
static int bma180_reset_intr(struct bma180_data *data)
{
int ret = bma180_set_bits(data, data->part_info->int_reset_reg,
data->part_info->int_reset_mask, 1);
if (ret)
dev_err(&data->client->dev, "failed to reset interrupt\n");
return ret;
}
static int bma180_set_new_data_intr_state(struct bma180_data *data, bool state)
{
int ret = bma180_set_bits(data, data->part_info->int_enable_reg,
data->part_info->int_enable_mask, state);
if (ret)
goto err;
ret = bma180_reset_intr(data);
if (ret)
goto err;
return 0;
err:
dev_err(&data->client->dev,
"failed to set new data interrupt state %d\n", state);
return ret;
}
static int bma180_set_sleep_state(struct bma180_data *data, bool state)
{
int ret = bma180_set_bits(data, data->part_info->sleep_reg,
data->part_info->sleep_mask, state);
if (ret) {
dev_err(&data->client->dev,
"failed to set sleep state %d\n", state);
return ret;
}
data->sleep_state = state;
return 0;
}
static int bma180_set_ee_writing_state(struct bma180_data *data, bool state)
{
int ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_EE_W, state);
if (ret)
dev_err(&data->client->dev,
"failed to set ee writing state %d\n", state);
return ret;
}
static int bma180_set_bw(struct bma180_data *data, int val)
{
int ret, i;
if (data->sleep_state)
return -EBUSY;
for (i = 0; i < data->part_info->num_bw; ++i) {
if (data->part_info->bw_table[i] == val) {
ret = bma180_set_bits(data, data->part_info->bw_reg,
data->part_info->bw_mask, i);
if (ret) {
dev_err(&data->client->dev,
"failed to set bandwidth\n");
return ret;
}
data->bw = val;
return 0;
}
}
return -EINVAL;
}
static int bma180_set_scale(struct bma180_data *data, int val)
{
int ret, i;
if (data->sleep_state)
return -EBUSY;
for (i = 0; i < data->part_info->num_scales; ++i)
if (data->part_info->scale_table[i] == val) {
ret = bma180_set_bits(data, data->part_info->scale_reg,
data->part_info->scale_mask, i);
if (ret) {
dev_err(&data->client->dev,
"failed to set scale\n");
return ret;
}
data->scale = val;
return 0;
}
return -EINVAL;
}
static int bma180_set_pmode(struct bma180_data *data, bool mode)
{
u8 reg_val = mode ? data->part_info->lowpower_val : 0;
int ret = bma180_set_bits(data, data->part_info->power_reg,
data->part_info->power_mask, reg_val);
if (ret) {
dev_err(&data->client->dev, "failed to set power mode\n");
return ret;
}
data->pmode = mode;
return 0;
}
static int bma180_soft_reset(struct bma180_data *data)
{
int ret = i2c_smbus_write_byte_data(data->client,
data->part_info->softreset_reg, BMA180_RESET_VAL);
if (ret)
dev_err(&data->client->dev, "failed to reset the chip\n");
return ret;
}
static int bma180_chip_init(struct bma180_data *data)
{
/* Try to read chip_id register. It must return 0x03. */
int ret = i2c_smbus_read_byte_data(data->client, BMA180_CHIP_ID);
if (ret < 0)
return ret;
if (ret != BMA180_ID_REG_VAL)
return -ENODEV;
ret = bma180_soft_reset(data);
if (ret)
return ret;
/*
* No serial transaction should occur within minimum 10 us
* after soft_reset command
*/
msleep(20);
ret = bma180_set_new_data_intr_state(data, false);
if (ret)
return ret;
return bma180_set_pmode(data, false);
}
static int bma180_chip_config(struct bma180_data *data)
{
int ret = bma180_chip_init(data);
if (ret)
goto err;
ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_DIS_WAKE_UP, 1);
if (ret)
goto err;
ret = bma180_set_ee_writing_state(data, true);
if (ret)
goto err;
ret = bma180_set_bits(data, BMA180_OFFSET_LSB1, BMA180_SMP_SKIP, 1);
if (ret)
goto err;
ret = bma180_set_bw(data, 20); /* 20 Hz */
if (ret)
goto err;
ret = bma180_set_scale(data, 2452); /* 2 G */
if (ret)
goto err;
return 0;
err:
dev_err(&data->client->dev, "failed to config the chip\n");
return ret;
}
static int bma250_chip_config(struct bma180_data *data)
{
int ret = bma180_chip_init(data);
if (ret)
goto err;
ret = bma180_set_bw(data, 16); /* 16 Hz */
if (ret)
goto err;
ret = bma180_set_scale(data, 38344); /* 2 G */
if (ret)
goto err;
ret = bma180_set_bits(data, BMA250_INT_MAP_REG,
BMA250_INT1_DATA_MASK, 1);
if (ret)
goto err;
return 0;
err:
dev_err(&data->client->dev, "failed to config the chip\n");
return ret;
}
static void bma180_chip_disable(struct bma180_data *data)
{
if (bma180_set_new_data_intr_state(data, false))
goto err;
if (bma180_set_ee_writing_state(data, false))
goto err;
if (bma180_set_sleep_state(data, true))
goto err;
return;
err:
dev_err(&data->client->dev, "failed to disable the chip\n");
}
static void bma250_chip_disable(struct bma180_data *data)
{
if (bma180_set_new_data_intr_state(data, false))
goto err;
if (bma180_set_sleep_state(data, true))
goto err;
return;
err:
dev_err(&data->client->dev, "failed to disable the chip\n");
}
static ssize_t bma180_show_avail(char *buf, const int *vals, unsigned n,
bool micros)
{
size_t len = 0;
int i;
for (i = 0; i < n; i++) {
if (!vals[i])
continue;
len += scnprintf(buf + len, PAGE_SIZE - len,
micros ? "0.%06d " : "%d ", vals[i]);
}
buf[len - 1] = '\n';
return len;
}
static ssize_t bma180_show_filter_freq_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bma180_data *data = iio_priv(dev_to_iio_dev(dev));
return bma180_show_avail(buf, data->part_info->bw_table,
data->part_info->num_bw, false);
}
static ssize_t bma180_show_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bma180_data *data = iio_priv(dev_to_iio_dev(dev));
return bma180_show_avail(buf, data->part_info->scale_table,
data->part_info->num_scales, true);
}
static IIO_DEVICE_ATTR(in_accel_filter_low_pass_3db_frequency_available,
S_IRUGO, bma180_show_filter_freq_avail, NULL, 0);
static IIO_DEVICE_ATTR(in_accel_scale_available,
S_IRUGO, bma180_show_scale_avail, NULL, 0);
static struct attribute *bma180_attributes[] = {
&iio_dev_attr_in_accel_filter_low_pass_3db_frequency_available.
dev_attr.attr,
&iio_dev_attr_in_accel_scale_available.dev_attr.attr,
NULL,
};
static const struct attribute_group bma180_attrs_group = {
.attrs = bma180_attributes,
};
static int bma180_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2,
long mask)
{
struct bma180_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&data->mutex);
if (iio_buffer_enabled(indio_dev)) {
mutex_unlock(&data->mutex);
return -EBUSY;
}
ret = bma180_get_data_reg(data, chan->scan_index);
mutex_unlock(&data->mutex);
if (ret < 0)
return ret;
*val = sign_extend32(ret >> chan->scan_type.shift,
chan->scan_type.realbits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
*val = data->bw;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ACCEL:
*val = 0;
*val2 = data->scale;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
*val = 500;
return IIO_VAL_INT;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_OFFSET:
*val = 48; /* 0 LSB @ 24 degree C */
return IIO_VAL_INT;
default:
return -EINVAL;
}
}
static int bma180_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
struct bma180_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
if (val)
return -EINVAL;
mutex_lock(&data->mutex);
ret = bma180_set_scale(data, val2);
mutex_unlock(&data->mutex);
return ret;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
if (val2)
return -EINVAL;
mutex_lock(&data->mutex);
ret = bma180_set_bw(data, val);
mutex_unlock(&data->mutex);
return ret;
default:
return -EINVAL;
}
}
static const struct iio_info bma180_info = {
.attrs = &bma180_attrs_group,
.read_raw = bma180_read_raw,
.write_raw = bma180_write_raw,
.driver_module = THIS_MODULE,
};
static const char * const bma180_power_modes[] = { "low_noise", "low_power" };
static int bma180_get_power_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
struct bma180_data *data = iio_priv(indio_dev);
return data->pmode;
}
static int bma180_set_power_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int mode)
{
struct bma180_data *data = iio_priv(indio_dev);
int ret;
mutex_lock(&data->mutex);
ret = bma180_set_pmode(data, mode);
mutex_unlock(&data->mutex);
return ret;
}
static const struct iio_enum bma180_power_mode_enum = {
.items = bma180_power_modes,
.num_items = ARRAY_SIZE(bma180_power_modes),
.get = bma180_get_power_mode,
.set = bma180_set_power_mode,
};
static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
IIO_ENUM("power_mode", true, &bma180_power_mode_enum),
IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
{ },
};
#define BMA180_ACC_CHANNEL(_axis, _bits) { \
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##_axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
.scan_index = AXIS_##_axis, \
.scan_type = { \
.sign = 's', \
.realbits = _bits, \
.storagebits = 16, \
.shift = 16 - _bits, \
}, \
.ext_info = bma180_ext_info, \
}
#define BMA180_TEMP_CHANNEL { \
.type = IIO_TEMP, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), \
.scan_index = TEMP, \
.scan_type = { \
.sign = 's', \
.realbits = 8, \
.storagebits = 16, \
}, \
}
static const struct iio_chan_spec bma180_channels[] = {
BMA180_ACC_CHANNEL(X, 14),
BMA180_ACC_CHANNEL(Y, 14),
BMA180_ACC_CHANNEL(Z, 14),
BMA180_TEMP_CHANNEL,
IIO_CHAN_SOFT_TIMESTAMP(4),
};
static const struct iio_chan_spec bma250_channels[] = {
BMA180_ACC_CHANNEL(X, 10),
BMA180_ACC_CHANNEL(Y, 10),
BMA180_ACC_CHANNEL(Z, 10),
BMA180_TEMP_CHANNEL,
IIO_CHAN_SOFT_TIMESTAMP(4),
};
static const struct bma180_part_info bma180_part_info[] = {
[BMA180] = {
bma180_channels, ARRAY_SIZE(bma180_channels),
bma180_scale_table, ARRAY_SIZE(bma180_scale_table),
bma180_bw_table, ARRAY_SIZE(bma180_bw_table),
BMA180_CTRL_REG0, BMA180_RESET_INT,
BMA180_CTRL_REG0, BMA180_SLEEP,
BMA180_BW_TCS, BMA180_BW,
BMA180_OFFSET_LSB1, BMA180_RANGE,
BMA180_TCO_Z, BMA180_MODE_CONFIG, BMA180_LOW_POWER,
BMA180_CTRL_REG3, BMA180_NEW_DATA_INT,
BMA180_RESET,
bma180_chip_config,
bma180_chip_disable,
},
[BMA250] = {
bma250_channels, ARRAY_SIZE(bma250_channels),
bma250_scale_table, ARRAY_SIZE(bma250_scale_table),
bma250_bw_table, ARRAY_SIZE(bma250_bw_table),
BMA250_INT_RESET_REG, BMA250_INT_RESET_MASK,
BMA250_POWER_REG, BMA250_SUSPEND_MASK,
BMA250_BW_REG, BMA250_BW_MASK,
BMA250_RANGE_REG, BMA250_RANGE_MASK,
BMA250_POWER_REG, BMA250_LOWPOWER_MASK, 1,
BMA250_INT_ENABLE_REG, BMA250_DATA_INTEN_MASK,
BMA250_RESET_REG,
bma250_chip_config,
bma250_chip_disable,
},
};
static irqreturn_t bma180_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bma180_data *data = iio_priv(indio_dev);
int64_t time_ns = iio_get_time_ns();
int bit, ret, i = 0;
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = bma180_get_data_reg(data, bit);
if (ret < 0) {
mutex_unlock(&data->mutex);
goto err;
}
((s16 *)data->buff)[i++] = ret;
}
mutex_unlock(&data->mutex);
iio_push_to_buffers_with_timestamp(indio_dev, data->buff, time_ns);
err:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
static int bma180_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct bma180_data *data = iio_priv(indio_dev);
return bma180_set_new_data_intr_state(data, state);
}
static int bma180_trig_try_reen(struct iio_trigger *trig)
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct bma180_data *data = iio_priv(indio_dev);
return bma180_reset_intr(data);
}
static const struct iio_trigger_ops bma180_trigger_ops = {
.set_trigger_state = bma180_data_rdy_trigger_set_state,
.try_reenable = bma180_trig_try_reen,
.owner = THIS_MODULE,
};
static int bma180_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bma180_data *data;
struct iio_dev *indio_dev;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
data->part_info = &bma180_part_info[id->driver_data];
ret = data->part_info->chip_config(data);
if (ret < 0)
goto err_chip_disable;
mutex_init(&data->mutex);
indio_dev->dev.parent = &client->dev;
indio_dev->channels = data->part_info->channels;
indio_dev->num_channels = data->part_info->num_channels;
indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bma180_info;
if (client->irq > 0) {
data->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name,
indio_dev->id);
if (!data->trig) {
ret = -ENOMEM;
goto err_chip_disable;
}
ret = devm_request_irq(&client->dev, client->irq,
iio_trigger_generic_data_rdy_poll, IRQF_TRIGGER_RISING,
"bma180_event", data->trig);
if (ret) {
dev_err(&client->dev, "unable to request IRQ\n");
goto err_trigger_free;
}
data->trig->dev.parent = &client->dev;
data->trig->ops = &bma180_trigger_ops;
iio_trigger_set_drvdata(data->trig, indio_dev);
indio_dev->trig = iio_trigger_get(data->trig);
ret = iio_trigger_register(data->trig);
if (ret)
goto err_trigger_free;
}
ret = iio_triggered_buffer_setup(indio_dev, NULL,
bma180_trigger_handler, NULL);
if (ret < 0) {
dev_err(&client->dev, "unable to setup iio triggered buffer\n");
goto err_trigger_unregister;
}
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
goto err_buffer_cleanup;
}
return 0;
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
if (data->trig)
iio_trigger_unregister(data->trig);
err_trigger_free:
iio_trigger_free(data->trig);
err_chip_disable:
data->part_info->chip_disable(data);
return ret;
}
static int bma180_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct bma180_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
if (data->trig) {
iio_trigger_unregister(data->trig);
iio_trigger_free(data->trig);
}
mutex_lock(&data->mutex);
data->part_info->chip_disable(data);
mutex_unlock(&data->mutex);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int bma180_suspend(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct bma180_data *data = iio_priv(indio_dev);
int ret;
mutex_lock(&data->mutex);
ret = bma180_set_sleep_state(data, true);
mutex_unlock(&data->mutex);
return ret;
}
static int bma180_resume(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct bma180_data *data = iio_priv(indio_dev);
int ret;
mutex_lock(&data->mutex);
ret = bma180_set_sleep_state(data, false);
mutex_unlock(&data->mutex);
return ret;
}
static SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
#define BMA180_PM_OPS (&bma180_pm_ops)
#else
#define BMA180_PM_OPS NULL
#endif
static struct i2c_device_id bma180_ids[] = {
{ "bma180", BMA180 },
{ "bma250", BMA250 },
{ }
};
MODULE_DEVICE_TABLE(i2c, bma180_ids);
static struct i2c_driver bma180_driver = {
.driver = {
.name = "bma180",
.pm = BMA180_PM_OPS,
},
.probe = bma180_probe,
.remove = bma180_remove,
.id_table = bma180_ids,
};
module_i2c_driver(bma180_driver);
MODULE_AUTHOR("Kravchenko Oleksandr <x0199363@ti.com>");
MODULE_AUTHOR("Texas Instruments, Inc.");
MODULE_DESCRIPTION("Bosch BMA180/BMA250 triaxial acceleration sensor");
MODULE_LICENSE("GPL");
| gpl-2.0 |
seem-sky/linux | net/dccp/minisocks.c | 491 | 7264 | /*
* net/dccp/minisocks.c
*
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/dccp.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <net/sock.h>
#include <net/xfrm.h>
#include <net/inet_timewait_sock.h>
#include "ackvec.h"
#include "ccid.h"
#include "dccp.h"
#include "feat.h"
struct inet_timewait_death_row dccp_death_row = {
.sysctl_max_tw_buckets = NR_FILE * 2,
.hashinfo = &dccp_hashinfo,
};
EXPORT_SYMBOL_GPL(dccp_death_row);
void dccp_time_wait(struct sock *sk, int state, int timeo)
{
struct inet_timewait_sock *tw;
tw = inet_twsk_alloc(sk, &dccp_death_row, state);
if (tw != NULL) {
const struct inet_connection_sock *icsk = inet_csk(sk);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == PF_INET6) {
tw->tw_v6_daddr = sk->sk_v6_daddr;
tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
tw->tw_ipv6only = sk->sk_ipv6only;
}
#endif
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
/* Get the TIME_WAIT timeout firing. */
if (timeo < rto)
timeo = rto;
tw->tw_timeout = DCCP_TIMEWAIT_LEN;
if (state == DCCP_TIME_WAIT)
timeo = DCCP_TIMEWAIT_LEN;
inet_twsk_schedule(tw, timeo);
inet_twsk_put(tw);
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
* non-graceful socket closings.
*/
DCCP_WARN("time wait bucket table overflow\n");
}
dccp_done(sk);
}
struct sock *dccp_create_openreq_child(struct sock *sk,
const struct request_sock *req,
const struct sk_buff *skb)
{
/*
* Step 3: Process LISTEN state
*
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
*/
struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
if (newsk != NULL) {
struct dccp_request_sock *dreq = dccp_rsk(req);
struct inet_connection_sock *newicsk = inet_csk(newsk);
struct dccp_sock *newdp = dccp_sk(newsk);
newdp->dccps_role = DCCP_ROLE_SERVER;
newdp->dccps_hc_rx_ackvec = NULL;
newdp->dccps_service_list = NULL;
newdp->dccps_service = dreq->dreq_service;
newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
newicsk->icsk_rto = DCCP_TIMEOUT_INIT;
INIT_LIST_HEAD(&newdp->dccps_featneg);
/*
* Step 3: Process LISTEN state
*
* Choose S.ISS (initial seqno) or set from Init Cookies
* Initialize S.GAR := S.ISS
* Set S.ISR, S.GSR from packet (or Init Cookies)
*
* Setting AWL/AWH and SWL/SWH happens as part of the feature
* activation below, as these windows all depend on the local
* and remote Sequence Window feature values (7.5.2).
*/
newdp->dccps_iss = dreq->dreq_iss;
newdp->dccps_gss = dreq->dreq_gss;
newdp->dccps_gar = newdp->dccps_iss;
newdp->dccps_isr = dreq->dreq_isr;
newdp->dccps_gsr = dreq->dreq_gsr;
/*
* Activate features: initialise CCIDs, sequence windows etc.
*/
if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) {
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newsk->sk_destruct = NULL;
sk_free(newsk);
return NULL;
}
dccp_init_xmit_timers(newsk);
DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS);
}
return newsk;
}
EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
/*
* Process an incoming packet for RESPOND sockets represented
* as an request_sock.
*/
struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
struct sock *child = NULL;
struct dccp_request_sock *dreq = dccp_rsk(req);
/* Check for retransmitted REQUEST */
if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) {
dccp_pr_debug("Retransmitted REQUEST\n");
dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq;
/*
* Send another RESPONSE packet
* To protect against Request floods, increment retrans
* counter (backoff, monitored by dccp_response_timer).
*/
inet_rtx_syn_ack(sk, req);
}
/* Network Duplicate, discard packet */
return NULL;
}
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK &&
dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK)
goto drop;
/* Invalid ACK */
if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
dreq->dreq_iss, dreq->dreq_gss)) {
dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
"dreq_iss=%llu, dreq_gss=%llu\n",
(unsigned long long)
DCCP_SKB_CB(skb)->dccpd_ack_seq,
(unsigned long long) dreq->dreq_iss,
(unsigned long long) dreq->dreq_gss);
goto drop;
}
if (dccp_parse_options(sk, dreq, skb))
goto drop;
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
if (child == NULL)
goto listen_overflow;
inet_csk_reqsk_queue_drop(sk, req);
inet_csk_reqsk_queue_add(sk, req, child);
out:
return child;
listen_overflow:
dccp_pr_debug("listen_overflow!\n");
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
drop:
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req);
goto out;
}
EXPORT_SYMBOL_GPL(dccp_check_req);
/*
* Queue segment on the new socket if the new socket is active,
* otherwise we just shortcircuit this and continue with
* the new socket.
*/
int dccp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb)
{
int ret = 0;
const int state = child->sk_state;
if (!sock_owned_by_user(child)) {
ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb),
skb->len);
/* Wakeup parent, send SIGIO */
if (state == DCCP_RESPOND && child->sk_state != state)
parent->sk_data_ready(parent);
} else {
/* Alas, it is possible again, because we do lookup
* in main socket hash table and lock on listening
* socket does not protect us more.
*/
__sk_add_backlog(child, skb);
}
bh_unlock_sock(child);
sock_put(child);
return ret;
}
EXPORT_SYMBOL_GPL(dccp_child_process);
void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
struct request_sock *rsk)
{
DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
}
EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack);
int dccp_reqsk_init(struct request_sock *req,
struct dccp_sock const *dp, struct sk_buff const *skb)
{
struct dccp_request_sock *dreq = dccp_rsk(req);
inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
inet_rsk(req)->acked = 0;
dreq->dreq_timestamp_echo = 0;
/* inherit feature negotiation options from listening socket */
return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg);
}
EXPORT_SYMBOL_GPL(dccp_reqsk_init);
| gpl-2.0 |
allanm84/linux-fslc | drivers/soc/ti/knav_qmss_acc.c | 747 | 16084 | /*
* Keystone accumulator queue manager
*
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
* Author: Sandeep Nair <sandeep_n@ti.com>
* Cyril Chemparathy <cyril@ti.com>
* Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/soc/ti/knav_qmss.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/firmware.h>
#include "knav_qmss.h"
#define knav_range_offset_to_inst(kdev, range, q) \
(range->queue_base_inst + (q << kdev->inst_shift))
static void __knav_acc_notify(struct knav_range_info *range,
struct knav_acc_channel *acc)
{
struct knav_device *kdev = range->kdev;
struct knav_queue_inst *inst;
int range_base, queue;
range_base = kdev->base_id + range->queue_base;
if (range->flags & RANGE_MULTI_QUEUE) {
for (queue = 0; queue < range->num_queues; queue++) {
inst = knav_range_offset_to_inst(kdev, range,
queue);
if (inst->notify_needed) {
inst->notify_needed = 0;
dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
range_base + queue);
knav_queue_notify(inst);
}
}
} else {
queue = acc->channel - range->acc_info.start_channel;
inst = knav_range_offset_to_inst(kdev, range, queue);
dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
range_base + queue);
knav_queue_notify(inst);
}
}
static int knav_acc_set_notify(struct knav_range_info *range,
struct knav_queue_inst *kq,
bool enabled)
{
struct knav_pdsp_info *pdsp = range->acc_info.pdsp;
struct knav_device *kdev = range->kdev;
u32 mask, offset;
/*
* when enabling, we need to re-trigger an interrupt if we
* have descriptors pending
*/
if (!enabled || atomic_read(&kq->desc_count) <= 0)
return 0;
kq->notify_needed = 1;
atomic_inc(&kq->acc->retrigger_count);
mask = BIT(kq->acc->channel % 32);
offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel);
dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n",
kq->acc->name);
writel_relaxed(mask, pdsp->intd + offset);
return 0;
}
static irqreturn_t knav_acc_int_handler(int irq, void *_instdata)
{
struct knav_acc_channel *acc;
struct knav_queue_inst *kq = NULL;
struct knav_range_info *range;
struct knav_pdsp_info *pdsp;
struct knav_acc_info *info;
struct knav_device *kdev;
u32 *list, *list_cpu, val, idx, notifies;
int range_base, channel, queue = 0;
dma_addr_t list_dma;
range = _instdata;
info = &range->acc_info;
kdev = range->kdev;
pdsp = range->acc_info.pdsp;
acc = range->acc;
range_base = kdev->base_id + range->queue_base;
if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
for (queue = 0; queue < range->num_irqs; queue++)
if (range->irqs[queue].irq == irq)
break;
kq = knav_range_offset_to_inst(kdev, range, queue);
acc += queue;
}
channel = acc->channel;
list_dma = acc->list_dma[acc->list_index];
list_cpu = acc->list_cpu[acc->list_index];
dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, phys %x\n",
channel, acc->list_index, list_cpu, list_dma);
if (atomic_read(&acc->retrigger_count)) {
atomic_dec(&acc->retrigger_count);
__knav_acc_notify(range, acc);
writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
/* ack the interrupt */
writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
pdsp->intd + ACC_INTD_OFFSET_EOI);
return IRQ_HANDLED;
}
notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
WARN_ON(!notifies);
dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size,
DMA_FROM_DEVICE);
for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32));
list += ACC_LIST_ENTRY_WORDS) {
if (ACC_LIST_ENTRY_WORDS == 1) {
dev_dbg(kdev->dev,
"acc-irq: list %d, entry @%p, %08x\n",
acc->list_index, list, list[0]);
} else if (ACC_LIST_ENTRY_WORDS == 2) {
dev_dbg(kdev->dev,
"acc-irq: list %d, entry @%p, %08x %08x\n",
acc->list_index, list, list[0], list[1]);
} else if (ACC_LIST_ENTRY_WORDS == 4) {
dev_dbg(kdev->dev,
"acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n",
acc->list_index, list, list[0], list[1],
list[2], list[3]);
}
val = list[ACC_LIST_ENTRY_DESC_IDX];
if (!val)
break;
if (range->flags & RANGE_MULTI_QUEUE) {
queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
if (queue < range_base ||
queue >= range_base + range->num_queues) {
dev_err(kdev->dev,
"bad queue %d, expecting %d-%d\n",
queue, range_base,
range_base + range->num_queues);
break;
}
queue -= range_base;
kq = knav_range_offset_to_inst(kdev, range,
queue);
}
if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) {
atomic_dec(&kq->desc_count);
dev_err(kdev->dev,
"acc-irq: queue %d full, entry dropped\n",
queue + range_base);
continue;
}
idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK;
kq->descs[idx] = val;
kq->notify_needed = 1;
dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n",
val, idx, queue + range_base);
}
__knav_acc_notify(range, acc);
memset(list_cpu, 0, info->list_size);
dma_sync_single_for_device(kdev->dev, list_dma, info->list_size,
DMA_TO_DEVICE);
/* flip to the other list */
acc->list_index ^= 1;
/* reset the interrupt counter */
writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
/* ack the interrupt */
writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
pdsp->intd + ACC_INTD_OFFSET_EOI);
return IRQ_HANDLED;
}
static int knav_range_setup_acc_irq(struct knav_range_info *range,
int queue, bool enabled)
{
struct knav_device *kdev = range->kdev;
struct knav_acc_channel *acc;
unsigned long cpu_map;
int ret = 0, irq;
u32 old, new;
if (range->flags & RANGE_MULTI_QUEUE) {
acc = range->acc;
irq = range->irqs[0].irq;
cpu_map = range->irqs[0].cpu_map;
} else {
acc = range->acc + queue;
irq = range->irqs[queue].irq;
cpu_map = range->irqs[queue].cpu_map;
}
old = acc->open_mask;
if (enabled)
new = old | BIT(queue);
else
new = old & ~BIT(queue);
acc->open_mask = new;
dev_dbg(kdev->dev,
"setup-acc-irq: open mask old %08x, new %08x, channel %s\n",
old, new, acc->name);
if (likely(new == old))
return 0;
if (new && !old) {
dev_dbg(kdev->dev,
"setup-acc-irq: requesting %s for channel %s\n",
acc->name, acc->name);
ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
range);
if (!ret && cpu_map) {
ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
if (ret) {
dev_warn(range->kdev->dev,
"Failed to set IRQ affinity\n");
return ret;
}
}
}
if (old && !new) {
dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n",
acc->name, acc->name);
free_irq(irq, range);
}
return ret;
}
static const char *knav_acc_result_str(enum knav_acc_result result)
{
static const char * const result_str[] = {
[ACC_RET_IDLE] = "idle",
[ACC_RET_SUCCESS] = "success",
[ACC_RET_INVALID_COMMAND] = "invalid command",
[ACC_RET_INVALID_CHANNEL] = "invalid channel",
[ACC_RET_INACTIVE_CHANNEL] = "inactive channel",
[ACC_RET_ACTIVE_CHANNEL] = "active channel",
[ACC_RET_INVALID_QUEUE] = "invalid queue",
[ACC_RET_INVALID_RET] = "invalid return code",
};
if (result >= ARRAY_SIZE(result_str))
return result_str[ACC_RET_INVALID_RET];
else
return result_str[result];
}
static enum knav_acc_result
knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp,
struct knav_reg_acc_command *cmd)
{
u32 result;
dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n",
cmd->command, cmd->queue_mask, cmd->list_phys,
cmd->queue_num, cmd->timer_config);
writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config);
writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num);
writel_relaxed(cmd->list_phys, &pdsp->acc_command->list_phys);
writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);
writel_relaxed(cmd->command, &pdsp->acc_command->command);
/* wait for the command to clear */
do {
result = readl_relaxed(&pdsp->acc_command->command);
} while ((result >> 8) & 0xff);
return (result >> 24) & 0xff;
}
static void knav_acc_setup_cmd(struct knav_device *kdev,
struct knav_range_info *range,
struct knav_reg_acc_command *cmd,
int queue)
{
struct knav_acc_info *info = &range->acc_info;
struct knav_acc_channel *acc;
int queue_base;
u32 queue_mask;
if (range->flags & RANGE_MULTI_QUEUE) {
acc = range->acc;
queue_base = range->queue_base;
queue_mask = BIT(range->num_queues) - 1;
} else {
acc = range->acc + queue;
queue_base = range->queue_base + queue;
queue_mask = 0;
}
memset(cmd, 0, sizeof(*cmd));
cmd->command = acc->channel;
cmd->queue_mask = queue_mask;
cmd->list_phys = acc->list_dma[0];
cmd->queue_num = info->list_entries << 16;
cmd->queue_num |= queue_base;
cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18;
if (range->flags & RANGE_MULTI_QUEUE)
cmd->timer_config |= ACC_CFG_MULTI_QUEUE;
cmd->timer_config |= info->pacing_mode << 16;
cmd->timer_config |= info->timer_count;
}
static void knav_acc_stop(struct knav_device *kdev,
struct knav_range_info *range,
int queue)
{
struct knav_reg_acc_command cmd;
struct knav_acc_channel *acc;
enum knav_acc_result result;
acc = range->acc + queue;
knav_acc_setup_cmd(kdev, range, &cmd, queue);
cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8;
result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n",
acc->name, knav_acc_result_str(result));
}
static enum knav_acc_result knav_acc_start(struct knav_device *kdev,
struct knav_range_info *range,
int queue)
{
struct knav_reg_acc_command cmd;
struct knav_acc_channel *acc;
enum knav_acc_result result;
acc = range->acc + queue;
knav_acc_setup_cmd(kdev, range, &cmd, queue);
cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8;
result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
dev_dbg(kdev->dev, "started acc channel %s, result %s\n",
acc->name, knav_acc_result_str(result));
return result;
}
static int knav_acc_init_range(struct knav_range_info *range)
{
struct knav_device *kdev = range->kdev;
struct knav_acc_channel *acc;
enum knav_acc_result result;
int queue;
for (queue = 0; queue < range->num_queues; queue++) {
acc = range->acc + queue;
knav_acc_stop(kdev, range, queue);
acc->list_index = 0;
result = knav_acc_start(kdev, range, queue);
if (result != ACC_RET_SUCCESS)
return -EIO;
if (range->flags & RANGE_MULTI_QUEUE)
return 0;
}
return 0;
}
static int knav_acc_init_queue(struct knav_range_info *range,
struct knav_queue_inst *kq)
{
unsigned id = kq->id - range->queue_base;
kq->descs = devm_kzalloc(range->kdev->dev,
ACC_DESCS_MAX * sizeof(u32), GFP_KERNEL);
if (!kq->descs)
return -ENOMEM;
kq->acc = range->acc;
if ((range->flags & RANGE_MULTI_QUEUE) == 0)
kq->acc += id;
return 0;
}
static int knav_acc_open_queue(struct knav_range_info *range,
struct knav_queue_inst *inst, unsigned flags)
{
unsigned id = inst->id - range->queue_base;
return knav_range_setup_acc_irq(range, id, true);
}
static int knav_acc_close_queue(struct knav_range_info *range,
struct knav_queue_inst *inst)
{
unsigned id = inst->id - range->queue_base;
return knav_range_setup_acc_irq(range, id, false);
}
static int knav_acc_free_range(struct knav_range_info *range)
{
struct knav_device *kdev = range->kdev;
struct knav_acc_channel *acc;
struct knav_acc_info *info;
int channel, channels;
info = &range->acc_info;
if (range->flags & RANGE_MULTI_QUEUE)
channels = 1;
else
channels = range->num_queues;
for (channel = 0; channel < channels; channel++) {
acc = range->acc + channel;
if (!acc->list_cpu[0])
continue;
dma_unmap_single(kdev->dev, acc->list_dma[0],
info->mem_size, DMA_BIDIRECTIONAL);
free_pages_exact(acc->list_cpu[0], info->mem_size);
}
devm_kfree(range->kdev->dev, range->acc);
return 0;
}
struct knav_range_ops knav_acc_range_ops = {
.set_notify = knav_acc_set_notify,
.init_queue = knav_acc_init_queue,
.open_queue = knav_acc_open_queue,
.close_queue = knav_acc_close_queue,
.init_range = knav_acc_init_range,
.free_range = knav_acc_free_range,
};
/**
* knav_init_acc_range: Initialise accumulator ranges
*
* @kdev: qmss device
* @node: device node
* @range: qmms range information
*
* Return 0 on success or error
*/
int knav_init_acc_range(struct knav_device *kdev,
struct device_node *node,
struct knav_range_info *range)
{
struct knav_acc_channel *acc;
struct knav_pdsp_info *pdsp;
struct knav_acc_info *info;
int ret, channel, channels;
int list_size, mem_size;
dma_addr_t list_dma;
void *list_mem;
u32 config[5];
range->flags |= RANGE_HAS_ACCUMULATOR;
info = &range->acc_info;
ret = of_property_read_u32_array(node, "accumulator", config, 5);
if (ret)
return ret;
info->pdsp_id = config[0];
info->start_channel = config[1];
info->list_entries = config[2];
info->pacing_mode = config[3];
info->timer_count = config[4] / ACC_DEFAULT_PERIOD;
if (info->start_channel > ACC_MAX_CHANNEL) {
dev_err(kdev->dev, "channel %d invalid for range %s\n",
info->start_channel, range->name);
return -EINVAL;
}
if (info->pacing_mode > 3) {
dev_err(kdev->dev, "pacing mode %d invalid for range %s\n",
info->pacing_mode, range->name);
return -EINVAL;
}
pdsp = knav_find_pdsp(kdev, info->pdsp_id);
if (!pdsp) {
dev_err(kdev->dev, "pdsp id %d not found for range %s\n",
info->pdsp_id, range->name);
return -EINVAL;
}
info->pdsp = pdsp;
channels = range->num_queues;
if (of_get_property(node, "multi-queue", NULL)) {
range->flags |= RANGE_MULTI_QUEUE;
channels = 1;
if (range->queue_base & (32 - 1)) {
dev_err(kdev->dev,
"misaligned multi-queue accumulator range %s\n",
range->name);
return -EINVAL;
}
if (range->num_queues > 32) {
dev_err(kdev->dev,
"too many queues in accumulator range %s\n",
range->name);
return -EINVAL;
}
}
/* figure out list size */
list_size = info->list_entries;
list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32);
info->list_size = list_size;
mem_size = PAGE_ALIGN(list_size * 2);
info->mem_size = mem_size;
range->acc = devm_kzalloc(kdev->dev, channels * sizeof(*range->acc),
GFP_KERNEL);
if (!range->acc)
return -ENOMEM;
for (channel = 0; channel < channels; channel++) {
acc = range->acc + channel;
acc->channel = info->start_channel + channel;
/* allocate memory for the two lists */
list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA);
if (!list_mem)
return -ENOMEM;
list_dma = dma_map_single(kdev->dev, list_mem, mem_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev->dev, list_dma)) {
free_pages_exact(list_mem, mem_size);
return -ENOMEM;
}
memset(list_mem, 0, mem_size);
dma_sync_single_for_device(kdev->dev, list_dma, mem_size,
DMA_TO_DEVICE);
scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d",
acc->channel);
acc->list_cpu[0] = list_mem;
acc->list_cpu[1] = list_mem + list_size;
acc->list_dma[0] = list_dma;
acc->list_dma[1] = list_dma + list_size;
dev_dbg(kdev->dev, "%s: channel %d, phys %08x, virt %8p\n",
acc->name, acc->channel, list_dma, list_mem);
}
range->ops = &knav_acc_range_ops;
return 0;
}
EXPORT_SYMBOL_GPL(knav_init_acc_range);
| gpl-2.0 |
schlund/2.6.35-photonic-kernel | arch/powerpc/kernel/ptrace32.c | 1515 | 9160 | /*
* ptrace for 32-bit processes running on a 64-bit kernel.
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/m68k/kernel/ptrace.c"
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* Modified by Cort Dougan (cort@hq.fsmlabs.com)
* and Paul Mackerras (paulus@samba.org).
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
* we mark them as obsolete now, they will be removed in a future version
*/
static long compat_ptrace_old(struct task_struct *child, long request,
long addr, long data)
{
switch (request) {
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
return copy_regset_to_user(child,
task_user_regset_view(current), 0,
0, 32 * sizeof(compat_long_t),
compat_ptr(data));
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
return copy_regset_from_user(child,
task_user_regset_view(current), 0,
0, 32 * sizeof(compat_long_t),
compat_ptr(data));
}
return -EPERM;
}
/* Macros to workout the correct index for the FPR in the thread struct */
#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
#define FPRHALF(i) (((i) - PT_FPR0) & 1)
#define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i)
#define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0))
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
unsigned long addr = caddr;
unsigned long data = cdata;
int ret;
switch (request) {
/*
* Read 4 bytes of the other process' storage
* data is a pointer specifying where the user wants the
* 4 bytes copied into
* addr is a pointer in the user's storage that contains an 8 byte
* address in the other process of the 4 bytes that is to be read
* (this is run in a 32-bit process looking at a 64-bit process)
* when I and D space are separate, these will need to be fixed.
*/
case PPC_PTRACE_PEEKTEXT_3264:
case PPC_PTRACE_PEEKDATA_3264: {
u32 tmp;
int copied;
u32 __user * addrOthers;
ret = -EIO;
/* Get the addr in the other process that we want to read */
if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
break;
copied = access_process_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp), 0);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *)data);
break;
}
/* Read a register (specified by ADDR) out of the "user area" */
case PTRACE_PEEKUSR: {
int index;
unsigned long tmp;
ret = -EIO;
/* convert to index and check */
index = (unsigned long) addr >> 2;
if ((addr & 3) || (index > PT_FPSCR32))
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
tmp = ptrace_get_reg(child, index);
} else {
flush_fp_to_thread(child);
/*
* the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the
* index passed in is based on this assumption.
*/
tmp = ((unsigned int *)child->thread.fpr)
[FPRINDEX(index)];
}
ret = put_user((unsigned int)tmp, (u32 __user *)data);
break;
}
/*
* Read 4 bytes out of the other process' pt_regs area
* data is a pointer specifying where the user wants the
* 4 bytes copied into
* addr is the offset into the other process' pt_regs structure
* that is to be read
* (this is run in a 32-bit process looking at a 64-bit process)
*/
case PPC_PTRACE_PEEKUSR_3264: {
u32 index;
u32 reg32bits;
u64 tmp;
u32 numReg;
u32 part;
ret = -EIO;
/* Determine which register the user wants */
index = (u64)addr >> 2;
numReg = index / 2;
/* Determine which part of the register the user wants */
if (index % 2)
part = 1; /* want the 2nd half of the register (right-most). */
else
part = 0; /* want the 1st half of the register (left-most). */
/* Validate the input - check to see if address is on the wrong boundary
* or beyond the end of the user area
*/
if ((addr & 3) || numReg > PT_FPSCR)
break;
CHECK_FULL_REGS(child->thread.regs);
if (numReg >= PT_FPR0) {
flush_fp_to_thread(child);
/* get 64 bit FPR */
tmp = ((u64 *)child->thread.fpr)
[FPRINDEX_3264(numReg)];
} else { /* register within PT_REGS struct */
tmp = ptrace_get_reg(child, numReg);
}
reg32bits = ((u32*)&tmp)[part];
ret = put_user(reg32bits, (u32 __user *)data);
break;
}
/*
* Write 4 bytes into the other process' storage
* data is the 4 bytes that the user wants written
* addr is a pointer in the user's storage that contains an
* 8 byte address in the other process where the 4 bytes
* that is to be written
* (this is run in a 32-bit process looking at a 64-bit process)
* when I and D space are separate, these will need to be fixed.
*/
case PPC_PTRACE_POKETEXT_3264:
case PPC_PTRACE_POKEDATA_3264: {
u32 tmp = data;
u32 __user * addrOthers;
/* Get the addr in the other process that we want to write into */
ret = -EIO;
if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
break;
ret = 0;
if (access_process_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp), 1) == sizeof(tmp))
break;
ret = -EIO;
break;
}
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR: {
unsigned long index;
ret = -EIO;
/* convert to index and check */
index = (unsigned long) addr >> 2;
if ((addr & 3) || (index > PT_FPSCR32))
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_put_reg(child, index, data);
} else {
flush_fp_to_thread(child);
/*
* the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the
* index passed in is based on this assumption.
*/
((unsigned int *)child->thread.fpr)
[FPRINDEX(index)] = data;
ret = 0;
}
break;
}
/*
* Write 4 bytes into the other process' pt_regs area
* data is the 4 bytes that the user wants written
* addr is the offset into the other process' pt_regs structure
* that is to be written into
* (this is run in a 32-bit process looking at a 64-bit process)
*/
case PPC_PTRACE_POKEUSR_3264: {
u32 index;
u32 numReg;
ret = -EIO;
/* Determine which register the user wants */
index = (u64)addr >> 2;
numReg = index / 2;
/*
* Validate the input - check to see if address is on the
* wrong boundary or beyond the end of the user area
*/
if ((addr & 3) || (numReg > PT_FPSCR))
break;
CHECK_FULL_REGS(child->thread.regs);
if (numReg < PT_FPR0) {
unsigned long freg = ptrace_get_reg(child, numReg);
if (index % 2)
freg = (freg & ~0xfffffffful) | (data & 0xfffffffful);
else
freg = (freg & 0xfffffffful) | (data << 32);
ret = ptrace_put_reg(child, numReg, freg);
} else {
u64 *tmp;
flush_fp_to_thread(child);
/* get 64 bit FPR ... */
tmp = &(((u64 *)child->thread.fpr)
[FPRINDEX_3264(numReg)]);
/* ... write the 32 bit part we want */
((u32 *)tmp)[index % 2] = data;
ret = 0;
}
break;
}
case PTRACE_GET_DEBUGREG: {
ret = -EINVAL;
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
break;
ret = put_user(child->thread.dabr, (u32 __user *)data);
break;
}
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
return copy_regset_to_user(
child, task_user_regset_view(current), 0,
0, PT_REGS_COUNT * sizeof(compat_long_t),
compat_ptr(data));
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(
child, task_user_regset_view(current), 0,
0, PT_REGS_COUNT * sizeof(compat_long_t),
compat_ptr(data));
case PTRACE_GETFPREGS:
case PTRACE_SETFPREGS:
case PTRACE_GETVRREGS:
case PTRACE_SETVRREGS:
case PTRACE_GETVSRREGS:
case PTRACE_SETVSRREGS:
case PTRACE_GETREGS64:
case PTRACE_SETREGS64:
case PPC_PTRACE_GETFPREGS:
case PPC_PTRACE_SETFPREGS:
case PTRACE_KILL:
case PTRACE_SINGLESTEP:
case PTRACE_DETACH:
case PTRACE_SET_DEBUGREG:
case PTRACE_SYSCALL:
case PTRACE_CONT:
ret = arch_ptrace(child, request, addr, data);
break;
/* Old reverse args ptrace callss */
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
ret = compat_ptrace_old(child, request, addr, data);
break;
default:
ret = compat_ptrace_request(child, request, addr, data);
break;
}
return ret;
}
| gpl-2.0 |
goodwinos/linux-2.6 | drivers/iio/adc/ad7291.c | 1515 | 13913 | /*
* AD7291 8-Channel, I2C, 12-Bit SAR ADC with Temperature Sensor
*
* Copyright 2010-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
#include <linux/platform_data/ad7291.h>
/*
* Simplified handling
*
* If no events enabled - single polled channel read
* If event enabled direct reads disable unless channel
* is in the read mask.
*
* The noise-delayed bit as per datasheet suggestion is always enabled.
*/
/*
* AD7291 registers definition
*/
#define AD7291_COMMAND 0x00
#define AD7291_VOLTAGE 0x01
#define AD7291_T_SENSE 0x02
#define AD7291_T_AVERAGE 0x03
#define AD7291_DATA_HIGH(x) ((x) * 3 + 0x4)
#define AD7291_DATA_LOW(x) ((x) * 3 + 0x5)
#define AD7291_HYST(x) ((x) * 3 + 0x6)
#define AD7291_VOLTAGE_ALERT_STATUS 0x1F
#define AD7291_T_ALERT_STATUS 0x20
#define AD7291_BITS 12
#define AD7291_VOLTAGE_LIMIT_COUNT 8
/*
* AD7291 command
*/
#define AD7291_AUTOCYCLE BIT(0)
#define AD7291_RESET BIT(1)
#define AD7291_ALERT_CLEAR BIT(2)
#define AD7291_ALERT_POLARITY BIT(3)
#define AD7291_EXT_REF BIT(4)
#define AD7291_NOISE_DELAY BIT(5)
#define AD7291_T_SENSE_MASK BIT(7)
#define AD7291_VOLTAGE_MASK GENMASK(15, 8)
#define AD7291_VOLTAGE_OFFSET 8
/*
* AD7291 value masks
*/
#define AD7291_VALUE_MASK GENMASK(11, 0)
/*
* AD7291 alert register bits
*/
#define AD7291_T_LOW BIT(0)
#define AD7291_T_HIGH BIT(1)
#define AD7291_T_AVG_LOW BIT(2)
#define AD7291_T_AVG_HIGH BIT(3)
#define AD7291_V_LOW(x) BIT((x) * 2)
#define AD7291_V_HIGH(x) BIT((x) * 2 + 1)
struct ad7291_chip_info {
struct i2c_client *client;
struct regulator *reg;
u16 command;
u16 c_mask; /* Active voltage channels for events */
struct mutex state_lock;
};
static int ad7291_i2c_read(struct ad7291_chip_info *chip, u8 reg, u16 *data)
{
struct i2c_client *client = chip->client;
int ret = 0;
ret = i2c_smbus_read_word_swapped(client, reg);
if (ret < 0) {
dev_err(&client->dev, "I2C read error\n");
return ret;
}
*data = ret;
return 0;
}
static int ad7291_i2c_write(struct ad7291_chip_info *chip, u8 reg, u16 data)
{
return i2c_smbus_write_word_swapped(chip->client, reg, data);
}
static irqreturn_t ad7291_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct ad7291_chip_info *chip = iio_priv(private);
u16 t_status, v_status;
u16 command;
int i;
s64 timestamp = iio_get_time_ns();
if (ad7291_i2c_read(chip, AD7291_T_ALERT_STATUS, &t_status))
return IRQ_HANDLED;
if (ad7291_i2c_read(chip, AD7291_VOLTAGE_ALERT_STATUS, &v_status))
return IRQ_HANDLED;
if (!(t_status || v_status))
return IRQ_HANDLED;
command = chip->command | AD7291_ALERT_CLEAR;
ad7291_i2c_write(chip, AD7291_COMMAND, command);
command = chip->command & ~AD7291_ALERT_CLEAR;
ad7291_i2c_write(chip, AD7291_COMMAND, command);
/* For now treat t_sense and t_sense_average the same */
if ((t_status & AD7291_T_LOW) || (t_status & AD7291_T_AVG_LOW))
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
if ((t_status & AD7291_T_HIGH) || (t_status & AD7291_T_AVG_HIGH))
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
for (i = 0; i < AD7291_VOLTAGE_LIMIT_COUNT; i++) {
if (v_status & AD7291_V_LOW(i))
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
i,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
if (v_status & AD7291_V_HIGH(i))
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
i,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
}
return IRQ_HANDLED;
}
static unsigned int ad7291_threshold_reg(const struct iio_chan_spec *chan,
enum iio_event_direction dir,
enum iio_event_info info)
{
unsigned int offset;
switch (chan->type) {
case IIO_VOLTAGE:
offset = chan->channel;
break;
case IIO_TEMP:
offset = AD7291_VOLTAGE_OFFSET;
break;
default:
return 0;
}
switch (info) {
case IIO_EV_INFO_VALUE:
if (dir == IIO_EV_DIR_FALLING)
return AD7291_DATA_HIGH(offset);
else
return AD7291_DATA_LOW(offset);
case IIO_EV_INFO_HYSTERESIS:
return AD7291_HYST(offset);
default:
break;
}
return 0;
}
static int ad7291_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int *val, int *val2)
{
struct ad7291_chip_info *chip = iio_priv(indio_dev);
int ret;
u16 uval;
ret = ad7291_i2c_read(chip, ad7291_threshold_reg(chan, dir, info),
&uval);
if (ret < 0)
return ret;
if (info == IIO_EV_INFO_HYSTERESIS || chan->type == IIO_VOLTAGE)
*val = uval & AD7291_VALUE_MASK;
else
*val = sign_extend32(uval, 11);
return IIO_VAL_INT;
}
static int ad7291_write_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int val, int val2)
{
struct ad7291_chip_info *chip = iio_priv(indio_dev);
if (info == IIO_EV_INFO_HYSTERESIS || chan->type == IIO_VOLTAGE) {
if (val > AD7291_VALUE_MASK || val < 0)
return -EINVAL;
} else {
if (val > 2047 || val < -2048)
return -EINVAL;
}
return ad7291_i2c_write(chip, ad7291_threshold_reg(chan, dir, info),
val);
}
static int ad7291_read_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir)
{
struct ad7291_chip_info *chip = iio_priv(indio_dev);
/*
* To be enabled the channel must simply be on. If any are enabled
* we are in continuous sampling mode
*/
switch (chan->type) {
case IIO_VOLTAGE:
return !!(chip->c_mask & BIT(15 - chan->channel));
case IIO_TEMP:
/* always on */
return 1;
default:
return -EINVAL;
}
}
static int ad7291_write_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
int state)
{
int ret = 0;
struct ad7291_chip_info *chip = iio_priv(indio_dev);
unsigned int mask;
u16 regval;
mutex_lock(&chip->state_lock);
regval = chip->command;
/*
* To be enabled the channel must simply be on. If any are enabled
* use continuous sampling mode.
* Possible to disable temp as well but that makes single read tricky.
*/
mask = BIT(15 - chan->channel);
switch (chan->type) {
case IIO_VOLTAGE:
if ((!state) && (chip->c_mask & mask))
chip->c_mask &= ~mask;
else if (state && (!(chip->c_mask & mask)))
chip->c_mask |= mask;
else
break;
regval &= ~AD7291_AUTOCYCLE;
regval |= chip->c_mask;
if (chip->c_mask) /* Enable autocycle? */
regval |= AD7291_AUTOCYCLE;
ret = ad7291_i2c_write(chip, AD7291_COMMAND, regval);
if (ret < 0)
goto error_ret;
chip->command = regval;
break;
default:
ret = -EINVAL;
}
error_ret:
mutex_unlock(&chip->state_lock);
return ret;
}
static int ad7291_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long mask)
{
int ret;
struct ad7291_chip_info *chip = iio_priv(indio_dev);
u16 regval;
switch (mask) {
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_VOLTAGE:
mutex_lock(&chip->state_lock);
/* If in autocycle mode drop through */
if (chip->command & AD7291_AUTOCYCLE) {
mutex_unlock(&chip->state_lock);
return -EBUSY;
}
/* Enable this channel alone */
regval = chip->command & (~AD7291_VOLTAGE_MASK);
regval |= BIT(15 - chan->channel);
ret = ad7291_i2c_write(chip, AD7291_COMMAND, regval);
if (ret < 0) {
mutex_unlock(&chip->state_lock);
return ret;
}
/* Read voltage */
ret = i2c_smbus_read_word_swapped(chip->client,
AD7291_VOLTAGE);
if (ret < 0) {
mutex_unlock(&chip->state_lock);
return ret;
}
*val = ret & AD7291_VALUE_MASK;
mutex_unlock(&chip->state_lock);
return IIO_VAL_INT;
case IIO_TEMP:
/* Assumes tsense bit of command register always set */
ret = i2c_smbus_read_word_swapped(chip->client,
AD7291_T_SENSE);
if (ret < 0)
return ret;
*val = sign_extend32(ret, 11);
return IIO_VAL_INT;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_AVERAGE_RAW:
ret = i2c_smbus_read_word_swapped(chip->client,
AD7291_T_AVERAGE);
if (ret < 0)
return ret;
*val = sign_extend32(ret, 11);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
if (chip->reg) {
int vref;
vref = regulator_get_voltage(chip->reg);
if (vref < 0)
return vref;
*val = vref / 1000;
} else {
*val = 2500;
}
*val2 = AD7291_BITS;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
/*
* One LSB of the ADC corresponds to 0.25 deg C.
* The temperature reading is in 12-bit twos
* complement format
*/
*val = 250;
return IIO_VAL_INT;
default:
return -EINVAL;
}
default:
return -EINVAL;
}
}
static const struct iio_event_spec ad7291_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_EITHER,
.mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
},
};
#define AD7291_VOLTAGE_CHAN(_chan) \
{ \
.type = IIO_VOLTAGE, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.indexed = 1, \
.channel = _chan, \
.event_spec = ad7291_events, \
.num_event_specs = ARRAY_SIZE(ad7291_events), \
}
static const struct iio_chan_spec ad7291_channels[] = {
AD7291_VOLTAGE_CHAN(0),
AD7291_VOLTAGE_CHAN(1),
AD7291_VOLTAGE_CHAN(2),
AD7291_VOLTAGE_CHAN(3),
AD7291_VOLTAGE_CHAN(4),
AD7291_VOLTAGE_CHAN(5),
AD7291_VOLTAGE_CHAN(6),
AD7291_VOLTAGE_CHAN(7),
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_AVERAGE_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
.indexed = 1,
.channel = 0,
.event_spec = ad7291_events,
.num_event_specs = ARRAY_SIZE(ad7291_events),
}
};
static const struct iio_info ad7291_info = {
.read_raw = &ad7291_read_raw,
.read_event_config = &ad7291_read_event_config,
.write_event_config = &ad7291_write_event_config,
.read_event_value = &ad7291_read_event_value,
.write_event_value = &ad7291_write_event_value,
.driver_module = THIS_MODULE,
};
static int ad7291_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad7291_platform_data *pdata = client->dev.platform_data;
struct ad7291_chip_info *chip;
struct iio_dev *indio_dev;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
chip = iio_priv(indio_dev);
if (pdata && pdata->use_external_ref) {
chip->reg = devm_regulator_get(&client->dev, "vref");
if (IS_ERR(chip->reg))
return PTR_ERR(chip->reg);
ret = regulator_enable(chip->reg);
if (ret)
return ret;
}
mutex_init(&chip->state_lock);
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
chip->client = client;
chip->command = AD7291_NOISE_DELAY |
AD7291_T_SENSE_MASK | /* Tsense always enabled */
AD7291_ALERT_POLARITY; /* set irq polarity low level */
if (pdata && pdata->use_external_ref)
chip->command |= AD7291_EXT_REF;
indio_dev->name = id->name;
indio_dev->channels = ad7291_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7291_channels);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &ad7291_info;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = ad7291_i2c_write(chip, AD7291_COMMAND, AD7291_RESET);
if (ret) {
ret = -EIO;
goto error_disable_reg;
}
ret = ad7291_i2c_write(chip, AD7291_COMMAND, chip->command);
if (ret) {
ret = -EIO;
goto error_disable_reg;
}
if (client->irq > 0) {
ret = request_threaded_irq(client->irq,
NULL,
&ad7291_event_handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
id->name,
indio_dev);
if (ret)
goto error_disable_reg;
}
ret = iio_device_register(indio_dev);
if (ret)
goto error_unreg_irq;
return 0;
error_unreg_irq:
if (client->irq)
free_irq(client->irq, indio_dev);
error_disable_reg:
if (chip->reg)
regulator_disable(chip->reg);
return ret;
}
static int ad7291_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ad7291_chip_info *chip = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
if (client->irq)
free_irq(client->irq, indio_dev);
if (chip->reg)
regulator_disable(chip->reg);
return 0;
}
static const struct i2c_device_id ad7291_id[] = {
{ "ad7291", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, ad7291_id);
static struct i2c_driver ad7291_driver = {
.driver = {
.name = KBUILD_MODNAME,
},
.probe = ad7291_probe,
.remove = ad7291_remove,
.id_table = ad7291_id,
};
module_i2c_driver(ad7291_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD7291 ADC driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
chuncky/nuc970kernel | linux-3.10.x/drivers/tty/serial/omap-serial.c | 2027 | 43653 | /*
* Driver for OMAP-UART controller.
* Based on drivers/serial/8250.c
*
* Copyright (C) 2010 Texas Instruments.
*
* Authors:
* Govindraj R <govindraj.raja@ti.com>
* Thara Gopinath <thara@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Note: This driver is made separate from 8250 driver as we cannot
* over load 8250 driver with omap platform specific configuration for
* features like DMA, it makes easier to implement features like DMA and
* hardware flow control and software flow control configuration with
* this driver as required for the omap-platform.
*/
#if defined(CONFIG_SERIAL_OMAP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/serial_reg.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/serial_core.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/serial-omap.h>
#define OMAP_MAX_HSUART_PORTS 6
#define UART_BUILD_REVISION(x, y) (((x) << 8) | (y))
#define OMAP_UART_REV_42 0x0402
#define OMAP_UART_REV_46 0x0406
#define OMAP_UART_REV_52 0x0502
#define OMAP_UART_REV_63 0x0603
#define UART_ERRATA_i202_MDR1_ACCESS BIT(0)
#define UART_ERRATA_i291_DMA_FORCEIDLE BIT(1)
#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz*/
/* SCR register bitmasks */
#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7)
#define OMAP_UART_SCR_TX_TRIG_GRANU1_MASK (1 << 6)
#define OMAP_UART_SCR_TX_EMPTY (1 << 3)
/* FCR register bitmasks */
#define OMAP_UART_FCR_RX_FIFO_TRIG_MASK (0x3 << 6)
#define OMAP_UART_FCR_TX_FIFO_TRIG_MASK (0x3 << 4)
/* MVR register bitmasks */
#define OMAP_UART_MVR_SCHEME_SHIFT 30
#define OMAP_UART_LEGACY_MVR_MAJ_MASK 0xf0
#define OMAP_UART_LEGACY_MVR_MAJ_SHIFT 4
#define OMAP_UART_LEGACY_MVR_MIN_MASK 0x0f
#define OMAP_UART_MVR_MAJ_MASK 0x700
#define OMAP_UART_MVR_MAJ_SHIFT 8
#define OMAP_UART_MVR_MIN_MASK 0x3f
#define OMAP_UART_DMA_CH_FREE -1
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
#define OMAP_MODE13X_SPEED 230400
/* WER = 0x7F
* Enable module level wakeup in WER reg
*/
#define OMAP_UART_WER_MOD_WKUP 0X7F
/* Enable XON/XOFF flow control on output */
#define OMAP_UART_SW_TX 0x08
/* Enable XON/XOFF flow control on input */
#define OMAP_UART_SW_RX 0x02
#define OMAP_UART_SW_CLR 0xF0
#define OMAP_UART_TCR_TRIG 0x0F
struct uart_omap_dma {
u8 uart_dma_tx;
u8 uart_dma_rx;
int rx_dma_channel;
int tx_dma_channel;
dma_addr_t rx_buf_dma_phys;
dma_addr_t tx_buf_dma_phys;
unsigned int uart_base;
/*
* Buffer for rx dma.It is not required for tx because the buffer
* comes from port structure.
*/
unsigned char *rx_buf;
unsigned int prev_rx_dma_pos;
int tx_buf_size;
int tx_dma_used;
int rx_dma_used;
spinlock_t tx_lock;
spinlock_t rx_lock;
/* timer to poll activity on rx dma */
struct timer_list rx_timer;
unsigned int rx_buf_size;
unsigned int rx_poll_rate;
unsigned int rx_timeout;
};
struct uart_omap_port {
struct uart_port port;
struct uart_omap_dma uart_dma;
struct device *dev;
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
unsigned char fcr;
unsigned char efr;
unsigned char dll;
unsigned char dlh;
unsigned char mdr1;
unsigned char scr;
int use_dma;
/*
* Some bits in registers are cleared on a read, so they must
* be saved whenever the register is read but the bits will not
* be immediately processed.
*/
unsigned int lsr_break_flag;
unsigned char msr_saved_flags;
char name[20];
unsigned long port_activity;
int context_loss_cnt;
u32 errata;
u8 wakeups_enabled;
int DTR_gpio;
int DTR_inverted;
int DTR_active;
struct pm_qos_request pm_qos_request;
u32 latency;
u32 calc_latency;
struct work_struct qos_work;
struct pinctrl *pins;
};
#define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
/* Forward declaration of functions */
static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1);
static struct workqueue_struct *serial_omap_uart_wq;
static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
{
offset <<= up->port.regshift;
return readw(up->port.membase + offset);
}
static inline void serial_out(struct uart_omap_port *up, int offset, int value)
{
offset <<= up->port.regshift;
writew(value, up->port.membase + offset);
}
static inline void serial_omap_clear_fifos(struct uart_omap_port *up)
{
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
}
static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
{
struct omap_uart_port_info *pdata = up->dev->platform_data;
if (!pdata || !pdata->get_context_loss_count)
return 0;
return pdata->get_context_loss_count(up->dev);
}
static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
{
struct omap_uart_port_info *pdata = up->dev->platform_data;
if (!pdata || !pdata->enable_wakeup)
return;
pdata->enable_wakeup(up->dev, enable);
}
/*
* serial_omap_baud_is_mode16 - check if baud rate is MODE16X
* @port: uart port info
* @baud: baudrate for which mode needs to be determined
*
* Returns true if baud rate is MODE16X and false if MODE13X
* Original table in OMAP TRM named "UART Mode Baud Rates, Divisor Values,
* and Error Rates" determines modes not for all common baud rates.
* E.g. for 1000000 baud rate mode must be 16x, but according to that
* table it's determined as 13x.
*/
static bool
serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
{
unsigned int n13 = port->uartclk / (13 * baud);
unsigned int n16 = port->uartclk / (16 * baud);
int baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
int baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
if(baudAbsDiff13 < 0)
baudAbsDiff13 = -baudAbsDiff13;
if(baudAbsDiff16 < 0)
baudAbsDiff16 = -baudAbsDiff16;
return (baudAbsDiff13 > baudAbsDiff16);
}
/*
* serial_omap_get_divisor - calculate divisor value
* @port: uart port info
* @baud: baudrate for which divisor needs to be calculated.
*/
static unsigned int
serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
{
unsigned int divisor;
if (!serial_omap_baud_is_mode16(port, baud))
divisor = 13;
else
divisor = 16;
return port->uartclk/(baud * divisor);
}
static void serial_omap_enable_ms(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_stop_tx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
pm_runtime_get_sync(up->dev);
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_stop_rx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
pm_runtime_get_sync(up->dev);
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
{
struct circ_buf *xmit = &up->port.state->xmit;
int count;
if (up->port.x_char) {
serial_out(up, UART_TX, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
serial_omap_stop_tx(&up->port);
return;
}
count = up->port.fifosize / 4;
do {
serial_out(up, UART_TX, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
up->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) {
spin_unlock(&up->port.lock);
uart_write_wakeup(&up->port);
spin_lock(&up->port.lock);
}
if (uart_circ_empty(xmit))
serial_omap_stop_tx(&up->port);
}
static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up)
{
if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
static void serial_omap_start_tx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
pm_runtime_get_sync(up->dev);
serial_omap_enable_ier_thri(up);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_throttle(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
pm_runtime_get_sync(up->dev);
spin_lock_irqsave(&up->port.lock, flags);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
serial_out(up, UART_IER, up->ier);
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_unthrottle(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
pm_runtime_get_sync(up->dev);
spin_lock_irqsave(&up->port.lock, flags);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static unsigned int check_modem_status(struct uart_omap_port *up)
{
unsigned int status;
status = serial_in(up, UART_MSR);
status |= up->msr_saved_flags;
up->msr_saved_flags = 0;
if ((status & UART_MSR_ANY_DELTA) == 0)
return status;
if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
up->port.state != NULL) {
if (status & UART_MSR_TERI)
up->port.icount.rng++;
if (status & UART_MSR_DDSR)
up->port.icount.dsr++;
if (status & UART_MSR_DDCD)
uart_handle_dcd_change
(&up->port, status & UART_MSR_DCD);
if (status & UART_MSR_DCTS)
uart_handle_cts_change
(&up->port, status & UART_MSR_CTS);
wake_up_interruptible(&up->port.state->port.delta_msr_wait);
}
return status;
}
static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
{
unsigned int flag;
unsigned char ch = 0;
if (likely(lsr & UART_LSR_DR))
ch = serial_in(up, UART_RX);
up->port.icount.rx++;
flag = TTY_NORMAL;
if (lsr & UART_LSR_BI) {
flag = TTY_BREAK;
lsr &= ~(UART_LSR_FE | UART_LSR_PE);
up->port.icount.brk++;
/*
* We do the SysRQ and SAK checking
* here because otherwise the break
* may get masked by ignore_status_mask
* or read_status_mask.
*/
if (uart_handle_break(&up->port))
return;
}
if (lsr & UART_LSR_PE) {
flag = TTY_PARITY;
up->port.icount.parity++;
}
if (lsr & UART_LSR_FE) {
flag = TTY_FRAME;
up->port.icount.frame++;
}
if (lsr & UART_LSR_OE)
up->port.icount.overrun++;
#ifdef CONFIG_SERIAL_OMAP_CONSOLE
if (up->port.line == up->port.cons->index) {
/* Recover the break flag from console xmit */
lsr |= up->lsr_break_flag;
}
#endif
uart_insert_char(&up->port, lsr, UART_LSR_OE, 0, flag);
}
static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
{
unsigned char ch = 0;
unsigned int flag;
if (!(lsr & UART_LSR_DR))
return;
ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
up->port.icount.rx++;
if (uart_handle_sysrq_char(&up->port, ch))
return;
uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
}
/**
* serial_omap_irq() - This handles the interrupt from one port
* @irq: uart port irq number
* @dev_id: uart port info
*/
static irqreturn_t serial_omap_irq(int irq, void *dev_id)
{
struct uart_omap_port *up = dev_id;
unsigned int iir, lsr;
unsigned int type;
irqreturn_t ret = IRQ_NONE;
int max_count = 256;
spin_lock(&up->port.lock);
pm_runtime_get_sync(up->dev);
do {
iir = serial_in(up, UART_IIR);
if (iir & UART_IIR_NO_INT)
break;
ret = IRQ_HANDLED;
lsr = serial_in(up, UART_LSR);
/* extract IRQ type from IIR register */
type = iir & 0x3e;
switch (type) {
case UART_IIR_MSI:
check_modem_status(up);
break;
case UART_IIR_THRI:
transmit_chars(up, lsr);
break;
case UART_IIR_RX_TIMEOUT:
/* FALLTHROUGH */
case UART_IIR_RDI:
serial_omap_rdi(up, lsr);
break;
case UART_IIR_RLSI:
serial_omap_rlsi(up, lsr);
break;
case UART_IIR_CTS_RTS_DSR:
/* simply try again */
break;
case UART_IIR_XOFF:
/* FALLTHROUGH */
default:
break;
}
} while (!(iir & UART_IIR_NO_INT) && max_count--);
spin_unlock(&up->port.lock);
tty_flip_buffer_push(&up->port.state->port);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
up->port_activity = jiffies;
return ret;
}
static unsigned int serial_omap_tx_empty(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags = 0;
unsigned int ret = 0;
pm_runtime_get_sync(up->dev);
dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
return ret;
}
static unsigned int serial_omap_get_mctrl(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int status;
unsigned int ret = 0;
pm_runtime_get_sync(up->dev);
status = check_modem_status(up);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->port.line);
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
if (status & UART_MSR_RI)
ret |= TIOCM_RNG;
if (status & UART_MSR_DSR)
ret |= TIOCM_DSR;
if (status & UART_MSR_CTS)
ret |= TIOCM_CTS;
return ret;
}
static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char mcr = 0, old_mcr;
dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line);
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_OUT1)
mcr |= UART_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
mcr |= UART_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
pm_runtime_get_sync(up->dev);
old_mcr = serial_in(up, UART_MCR);
old_mcr &= ~(UART_MCR_LOOP | UART_MCR_OUT2 | UART_MCR_OUT1 |
UART_MCR_DTR | UART_MCR_RTS);
up->mcr = old_mcr | mcr;
serial_out(up, UART_MCR, up->mcr);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (gpio_is_valid(up->DTR_gpio) &&
!!(mctrl & TIOCM_DTR) != up->DTR_active) {
up->DTR_active = !up->DTR_active;
if (gpio_cansleep(up->DTR_gpio))
schedule_work(&up->qos_work);
else
gpio_set_value(up->DTR_gpio,
up->DTR_active != up->DTR_inverted);
}
}
static void serial_omap_break_ctl(struct uart_port *port, int break_state)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags = 0;
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static int serial_omap_startup(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags = 0;
int retval;
/*
* Allocate the IRQ
*/
retval = request_irq(up->port.irq, serial_omap_irq, up->port.irqflags,
up->name, up);
if (retval)
return retval;
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
*/
serial_omap_clear_fifos(up);
/* For Hardware flow control */
serial_out(up, UART_MCR, UART_MCR_RTS);
/*
* Clear the interrupt registers.
*/
(void) serial_in(up, UART_LSR);
if (serial_in(up, UART_LSR) & UART_LSR_DR)
(void) serial_in(up, UART_RX);
(void) serial_in(up, UART_IIR);
(void) serial_in(up, UART_MSR);
/*
* Now, initialize the UART
*/
serial_out(up, UART_LCR, UART_LCR_WLEN8);
spin_lock_irqsave(&up->port.lock, flags);
/*
* Most PC uarts need OUT2 raised to enable interrupts.
*/
up->port.mctrl |= TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
up->msr_saved_flags = 0;
/*
* Finally, enable interrupts. Note: Modem status interrupts
* are set via set_termios(), which will be occurring imminently
* anyway, so we don't enable them here.
*/
up->ier = UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
/* Enable module level wake up */
serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
up->port_activity = jiffies;
return 0;
}
static void serial_omap_shutdown(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags = 0;
dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
/*
* Disable interrupts from this port
*/
up->ier = 0;
serial_out(up, UART_IER, 0);
spin_lock_irqsave(&up->port.lock, flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/*
* Disable break condition and FIFOs
*/
serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
serial_omap_clear_fifos(up);
/*
* Read data port to reset things, and then free the irq
*/
if (serial_in(up, UART_LSR) & UART_LSR_DR)
(void) serial_in(up, UART_RX);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
free_irq(up->port.irq, up);
}
static void serial_omap_uart_qos_work(struct work_struct *work)
{
struct uart_omap_port *up = container_of(work, struct uart_omap_port,
qos_work);
pm_qos_update_request(&up->pm_qos_request, up->latency);
if (gpio_is_valid(up->DTR_gpio))
gpio_set_value_cansleep(up->DTR_gpio,
up->DTR_active != up->DTR_inverted);
}
static void
serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char cval = 0;
unsigned long flags = 0;
unsigned int baud, quot;
switch (termios->c_cflag & CSIZE) {
case CS5:
cval = UART_LCR_WLEN5;
break;
case CS6:
cval = UART_LCR_WLEN6;
break;
case CS7:
cval = UART_LCR_WLEN7;
break;
default:
case CS8:
cval = UART_LCR_WLEN8;
break;
}
if (termios->c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
if (termios->c_cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
cval |= UART_LCR_EPAR;
if (termios->c_cflag & CMSPAR)
cval |= UART_LCR_SPAR;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
quot = serial_omap_get_divisor(port, baud);
/* calculate wakeup latency constraint */
up->calc_latency = (USEC_PER_SEC * up->port.fifosize) / (baud / 8);
up->latency = up->calc_latency;
schedule_work(&up->qos_work);
up->dll = quot & 0xff;
up->dlh = quot >> 8;
up->mdr1 = UART_OMAP_MDR1_DISABLE;
up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
UART_FCR_ENABLE_FIFO;
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
pm_runtime_get_sync(up->dev);
spin_lock_irqsave(&up->port.lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/*
* Characters to ignore
*/
up->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (termios->c_iflag & IGNBRK) {
up->port.ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_OE;
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= UART_LSR_DR;
/*
* Modem status interrupts
*/
up->ier &= ~UART_IER_MSI;
if (UART_ENABLE_MS(&up->port, termios->c_cflag))
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval); /* reset DLAB */
up->lcr = cval;
up->scr = 0;
/* FIFOs and DMA Settings */
/* FCR can be changed only when the
* baud clock is not running
* DLL_REG and DLH_REG set to 0.
*/
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_DLL, 0);
serial_out(up, UART_DLM, 0);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR) & ~UART_EFR_ECB;
up->efr &= ~UART_EFR_SCD;
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
up->mcr = serial_in(up, UART_MCR) & ~UART_MCR_TCRTLR;
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
/* FIFO ENABLE, DMA MODE */
up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
/*
* NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK
* sets Enables the granularity of 1 for TRIGGER RX
* level. Along with setting RX FIFO trigger level
* to 1 (as noted below, 16 characters) and TLR[3:0]
* to zero this will result RX FIFO threshold level
* to 1 character, instead of 16 as noted in comment
* below.
*/
/* Set receive FIFO threshold to 16 characters and
* transmit FIFO threshold to 16 spaces
*/
up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK;
up->fcr &= ~OMAP_UART_FCR_TX_FIFO_TRIG_MASK;
up->fcr |= UART_FCR6_R_TRIGGER_16 | UART_FCR6_T_TRIGGER_24 |
UART_FCR_ENABLE_FIFO;
serial_out(up, UART_FCR, up->fcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_OMAP_SCR, up->scr);
/* Reset UART_MCR_TCRTLR: this must be done with the EFR_ECB bit set */
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
/* Protocol, Baud Rate, and Interrupt Settings */
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, up->mdr1);
else
serial_out(up, UART_OMAP_MDR1, up->mdr1);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_DLL, up->dll); /* LS of divisor */
serial_out(up, UART_DLM, up->dlh); /* MS of divisor */
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, cval);
if (!serial_omap_baud_is_mode16(port, baud))
up->mdr1 = UART_OMAP_MDR1_13X_MODE;
else
up->mdr1 = UART_OMAP_MDR1_16X_MODE;
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, up->mdr1);
else
serial_out(up, UART_OMAP_MDR1, up->mdr1);
/* Configure flow control */
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/* XON1/XOFF1 accessible mode B, TCRTLR=0, ECB=0 */
serial_out(up, UART_XON1, termios->c_cc[VSTART]);
serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
/* Enable access to TCR/TLR */
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
/* Enable AUTORTS and AUTOCTS */
up->efr |= UART_EFR_CTS | UART_EFR_RTS;
/* Ensure MCR RTS is asserted */
up->mcr |= UART_MCR_RTS;
} else {
/* Disable AUTORTS and AUTOCTS */
up->efr &= ~(UART_EFR_CTS | UART_EFR_RTS);
}
if (up->port.flags & UPF_SOFT_FLOW) {
/* clear SW control mode bits */
up->efr &= OMAP_UART_SW_CLR;
/*
* IXON Flag:
* Enable XON/XOFF flow control on input.
* Receiver compares XON1, XOFF1.
*/
if (termios->c_iflag & IXON)
up->efr |= OMAP_UART_SW_RX;
/*
* IXOFF Flag:
* Enable XON/XOFF flow control on output.
* Transmit XON1, XOFF1
*/
if (termios->c_iflag & IXOFF)
up->efr |= OMAP_UART_SW_TX;
/*
* IXANY Flag:
* Enable any character to restart output.
* Operation resumes after receiving any
* character after recognition of the XOFF character
*/
if (termios->c_iflag & IXANY)
up->mcr |= UART_MCR_XONANY;
else
up->mcr &= ~UART_MCR_XONANY;
}
serial_out(up, UART_MCR, up->mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, up->lcr);
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
}
static int serial_omap_set_wake(struct uart_port *port, unsigned int state)
{
struct uart_omap_port *up = to_uart_omap_port(port);
serial_omap_enable_wakeup(up, state);
return 0;
}
static void
serial_omap_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char efr;
dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
if (!device_may_wakeup(up->dev)) {
if (!state)
pm_runtime_forbid(up->dev);
else
pm_runtime_allow(up->dev);
}
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static void serial_omap_release_port(struct uart_port *port)
{
dev_dbg(port->dev, "serial_omap_release_port+\n");
}
static int serial_omap_request_port(struct uart_port *port)
{
dev_dbg(port->dev, "serial_omap_request_port+\n");
return 0;
}
static void serial_omap_config_port(struct uart_port *port, int flags)
{
struct uart_omap_port *up = to_uart_omap_port(port);
dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
up->port.line);
up->port.type = PORT_OMAP;
up->port.flags |= UPF_SOFT_FLOW | UPF_HARD_FLOW;
}
static int
serial_omap_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* we don't want the core code to modify any port params */
dev_dbg(port->dev, "serial_omap_verify_port+\n");
return -EINVAL;
}
static const char *
serial_omap_type(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->port.line);
return up->name;
}
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
static inline void wait_for_xmitr(struct uart_omap_port *up)
{
unsigned int status, tmout = 10000;
/* Wait up to 10ms for the character(s) to be sent. */
do {
status = serial_in(up, UART_LSR);
if (status & UART_LSR_BI)
up->lsr_break_flag = UART_LSR_BI;
if (--tmout == 0)
break;
udelay(1);
} while ((status & BOTH_EMPTY) != BOTH_EMPTY);
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
tmout = 1000000;
for (tmout = 1000000; tmout; tmout--) {
unsigned int msr = serial_in(up, UART_MSR);
up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
if (msr & UART_MSR_CTS)
break;
udelay(1);
}
}
}
#ifdef CONFIG_CONSOLE_POLL
static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
{
struct uart_omap_port *up = to_uart_omap_port(port);
pm_runtime_get_sync(up->dev);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
static int serial_omap_poll_get_char(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int status;
pm_runtime_get_sync(up->dev);
status = serial_in(up, UART_LSR);
if (!(status & UART_LSR_DR)) {
status = NO_POLL_CHAR;
goto out;
}
status = serial_in(up, UART_RX);
out:
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
return status;
}
#endif /* CONFIG_CONSOLE_POLL */
#ifdef CONFIG_SERIAL_OMAP_CONSOLE
static struct uart_omap_port *serial_omap_console_ports[OMAP_MAX_HSUART_PORTS];
static struct uart_driver serial_omap_reg;
static void serial_omap_console_putchar(struct uart_port *port, int ch)
{
struct uart_omap_port *up = to_uart_omap_port(port);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
}
static void
serial_omap_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_omap_port *up = serial_omap_console_ports[co->index];
unsigned long flags;
unsigned int ier;
int locked = 1;
pm_runtime_get_sync(up->dev);
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock(&up->port.lock);
else
spin_lock(&up->port.lock);
/*
* First save the IER then disable the interrupts
*/
ier = serial_in(up, UART_IER);
serial_out(up, UART_IER, 0);
uart_console_write(&up->port, s, count, serial_omap_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
serial_out(up, UART_IER, ier);
/*
* The receive handling will happen properly because the
* receive ready bit will still be set; it is not cleared
* on read. However, modem control will not, we must
* call it if we have saved something in the saved flags
* while processing with interrupts off.
*/
if (up->msr_saved_flags)
check_modem_status(up);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
}
static int __init
serial_omap_console_setup(struct console *co, char *options)
{
struct uart_omap_port *up;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (serial_omap_console_ports[co->index] == NULL)
return -ENODEV;
up = serial_omap_console_ports[co->index];
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console serial_omap_console = {
.name = OMAP_SERIAL_NAME,
.write = serial_omap_console_write,
.device = uart_console_device,
.setup = serial_omap_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &serial_omap_reg,
};
static void serial_omap_add_console_port(struct uart_omap_port *up)
{
serial_omap_console_ports[up->port.line] = up;
}
#define OMAP_CONSOLE (&serial_omap_console)
#else
#define OMAP_CONSOLE NULL
static inline void serial_omap_add_console_port(struct uart_omap_port *up)
{}
#endif
static struct uart_ops serial_omap_pops = {
.tx_empty = serial_omap_tx_empty,
.set_mctrl = serial_omap_set_mctrl,
.get_mctrl = serial_omap_get_mctrl,
.stop_tx = serial_omap_stop_tx,
.start_tx = serial_omap_start_tx,
.throttle = serial_omap_throttle,
.unthrottle = serial_omap_unthrottle,
.stop_rx = serial_omap_stop_rx,
.enable_ms = serial_omap_enable_ms,
.break_ctl = serial_omap_break_ctl,
.startup = serial_omap_startup,
.shutdown = serial_omap_shutdown,
.set_termios = serial_omap_set_termios,
.pm = serial_omap_pm,
.set_wake = serial_omap_set_wake,
.type = serial_omap_type,
.release_port = serial_omap_release_port,
.request_port = serial_omap_request_port,
.config_port = serial_omap_config_port,
.verify_port = serial_omap_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = serial_omap_poll_put_char,
.poll_get_char = serial_omap_poll_get_char,
#endif
};
static struct uart_driver serial_omap_reg = {
.owner = THIS_MODULE,
.driver_name = "OMAP-SERIAL",
.dev_name = OMAP_SERIAL_NAME,
.nr = OMAP_MAX_HSUART_PORTS,
.cons = OMAP_CONSOLE,
};
#ifdef CONFIG_PM_SLEEP
static int serial_omap_suspend(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
uart_suspend_port(&serial_omap_reg, &up->port);
flush_work(&up->qos_work);
return 0;
}
static int serial_omap_resume(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
uart_resume_port(&serial_omap_reg, &up->port);
return 0;
}
#endif
static void omap_serial_fill_features_erratas(struct uart_omap_port *up)
{
u32 mvr, scheme;
u16 revision, major, minor;
mvr = serial_in(up, UART_OMAP_MVER);
/* Check revision register scheme */
scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT;
switch (scheme) {
case 0: /* Legacy Scheme: OMAP2/3 */
/* MINOR_REV[0:4], MAJOR_REV[4:7] */
major = (mvr & OMAP_UART_LEGACY_MVR_MAJ_MASK) >>
OMAP_UART_LEGACY_MVR_MAJ_SHIFT;
minor = (mvr & OMAP_UART_LEGACY_MVR_MIN_MASK);
break;
case 1:
/* New Scheme: OMAP4+ */
/* MINOR_REV[0:5], MAJOR_REV[8:10] */
major = (mvr & OMAP_UART_MVR_MAJ_MASK) >>
OMAP_UART_MVR_MAJ_SHIFT;
minor = (mvr & OMAP_UART_MVR_MIN_MASK);
break;
default:
dev_warn(up->dev,
"Unknown %s revision, defaulting to highest\n",
up->name);
/* highest possible revision */
major = 0xff;
minor = 0xff;
}
/* normalize revision for the driver */
revision = UART_BUILD_REVISION(major, minor);
switch (revision) {
case OMAP_UART_REV_46:
up->errata |= (UART_ERRATA_i202_MDR1_ACCESS |
UART_ERRATA_i291_DMA_FORCEIDLE);
break;
case OMAP_UART_REV_52:
up->errata |= (UART_ERRATA_i202_MDR1_ACCESS |
UART_ERRATA_i291_DMA_FORCEIDLE);
break;
case OMAP_UART_REV_63:
up->errata |= UART_ERRATA_i202_MDR1_ACCESS;
break;
default:
break;
}
}
static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
{
struct omap_uart_port_info *omap_up_info;
omap_up_info = devm_kzalloc(dev, sizeof(*omap_up_info), GFP_KERNEL);
if (!omap_up_info)
return NULL; /* out of memory */
of_property_read_u32(dev->of_node, "clock-frequency",
&omap_up_info->uartclk);
return omap_up_info;
}
static int serial_omap_probe(struct platform_device *pdev)
{
struct uart_omap_port *up;
struct resource *mem, *irq;
struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
int ret;
if (pdev->dev.of_node)
omap_up_info = of_get_uart_port_info(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
return -ENODEV;
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
dev_err(&pdev->dev, "no irq resource?\n");
return -ENODEV;
}
if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
pdev->dev.driver->name)) {
dev_err(&pdev->dev, "memory region already claimed\n");
return -EBUSY;
}
if (gpio_is_valid(omap_up_info->DTR_gpio) &&
omap_up_info->DTR_present) {
ret = gpio_request(omap_up_info->DTR_gpio, "omap-serial");
if (ret < 0)
return ret;
ret = gpio_direction_output(omap_up_info->DTR_gpio,
omap_up_info->DTR_inverted);
if (ret < 0)
return ret;
}
up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL);
if (!up)
return -ENOMEM;
if (gpio_is_valid(omap_up_info->DTR_gpio) &&
omap_up_info->DTR_present) {
up->DTR_gpio = omap_up_info->DTR_gpio;
up->DTR_inverted = omap_up_info->DTR_inverted;
} else
up->DTR_gpio = -EINVAL;
up->DTR_active = 0;
up->dev = &pdev->dev;
up->port.dev = &pdev->dev;
up->port.type = PORT_OMAP;
up->port.iotype = UPIO_MEM;
up->port.irq = irq->start;
up->port.regshift = 2;
up->port.fifosize = 64;
up->port.ops = &serial_omap_pops;
if (pdev->dev.of_node)
up->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
else
up->port.line = pdev->id;
if (up->port.line < 0) {
dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
up->port.line);
ret = -ENODEV;
goto err_port_line;
}
up->pins = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(up->pins)) {
dev_warn(&pdev->dev, "did not get pins for uart%i error: %li\n",
up->port.line, PTR_ERR(up->pins));
up->pins = NULL;
}
sprintf(up->name, "OMAP UART%d", up->port.line);
up->port.mapbase = mem->start;
up->port.membase = devm_ioremap(&pdev->dev, mem->start,
resource_size(mem));
if (!up->port.membase) {
dev_err(&pdev->dev, "can't ioremap UART\n");
ret = -ENOMEM;
goto err_ioremap;
}
up->port.flags = omap_up_info->flags;
up->port.uartclk = omap_up_info->uartclk;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
dev_warn(&pdev->dev, "No clock speed specified: using default:"
"%d\n", DEFAULT_CLK_SPEED);
}
up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
pm_qos_add_request(&up->pm_qos_request,
PM_QOS_CPU_DMA_LATENCY, up->latency);
serial_omap_uart_wq = create_singlethread_workqueue(up->name);
INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
platform_set_drvdata(pdev, up);
pm_runtime_enable(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev,
omap_up_info->autosuspend_timeout);
pm_runtime_irq_safe(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
omap_serial_fill_features_erratas(up);
ui[up->port.line] = up;
serial_omap_add_console_port(up);
ret = uart_add_one_port(&serial_omap_reg, &up->port);
if (ret != 0)
goto err_add_port;
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
return 0;
err_add_port:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
err_ioremap:
err_port_line:
dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
pdev->id, __func__, ret);
return ret;
}
static int serial_omap_remove(struct platform_device *dev)
{
struct uart_omap_port *up = platform_get_drvdata(dev);
pm_runtime_put_sync(up->dev);
pm_runtime_disable(up->dev);
uart_remove_one_port(&serial_omap_reg, &up->port);
pm_qos_remove_request(&up->pm_qos_request);
return 0;
}
/*
* Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
* The access to uart register after MDR1 Access
* causes UART to corrupt data.
*
* Need a delay =
* 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
* give 10 times as much
*/
static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
{
u8 timeout = 255;
serial_out(up, UART_OMAP_MDR1, mdr1);
udelay(2);
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
UART_FCR_CLEAR_RCVR);
/*
* Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
* TX_FIFO_E bit is 1.
*/
while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
(UART_LSR_THRE | UART_LSR_DR))) {
timeout--;
if (!timeout) {
/* Should *never* happen. we warn and carry on */
dev_crit(up->dev, "Errata i202: timedout %x\n",
serial_in(up, UART_LSR));
break;
}
udelay(1);
}
}
#ifdef CONFIG_PM_RUNTIME
static void serial_omap_restore_context(struct uart_omap_port *up)
{
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE);
else
serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
serial_out(up, UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, 0x0); /* Operational mode */
serial_out(up, UART_IER, 0x0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
serial_out(up, UART_DLL, up->dll);
serial_out(up, UART_DLM, up->dlh);
serial_out(up, UART_LCR, 0x0); /* Operational mode */
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_FCR, up->fcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
serial_out(up, UART_OMAP_SCR, up->scr);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, up->lcr);
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, up->mdr1);
else
serial_out(up, UART_OMAP_MDR1, up->mdr1);
}
static int serial_omap_runtime_suspend(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
struct omap_uart_port_info *pdata = dev->platform_data;
if (!up)
return -EINVAL;
if (!pdata)
return 0;
up->context_loss_cnt = serial_omap_get_context_loss_count(up);
if (device_may_wakeup(dev)) {
if (!up->wakeups_enabled) {
serial_omap_enable_wakeup(up, true);
up->wakeups_enabled = true;
}
} else {
if (up->wakeups_enabled) {
serial_omap_enable_wakeup(up, false);
up->wakeups_enabled = false;
}
}
up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
schedule_work(&up->qos_work);
return 0;
}
static int serial_omap_runtime_resume(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
int loss_cnt = serial_omap_get_context_loss_count(up);
if (loss_cnt < 0) {
dev_err(dev, "serial_omap_get_context_loss_count failed : %d\n",
loss_cnt);
serial_omap_restore_context(up);
} else if (up->context_loss_cnt != loss_cnt) {
serial_omap_restore_context(up);
}
up->latency = up->calc_latency;
schedule_work(&up->qos_work);
return 0;
}
#endif
static const struct dev_pm_ops serial_omap_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume)
SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend,
serial_omap_runtime_resume, NULL)
};
#if defined(CONFIG_OF)
static const struct of_device_id omap_serial_of_match[] = {
{ .compatible = "ti,omap2-uart" },
{ .compatible = "ti,omap3-uart" },
{ .compatible = "ti,omap4-uart" },
{},
};
MODULE_DEVICE_TABLE(of, omap_serial_of_match);
#endif
static struct platform_driver serial_omap_driver = {
.probe = serial_omap_probe,
.remove = serial_omap_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &serial_omap_dev_pm_ops,
.of_match_table = of_match_ptr(omap_serial_of_match),
},
};
static int __init serial_omap_init(void)
{
int ret;
ret = uart_register_driver(&serial_omap_reg);
if (ret != 0)
return ret;
ret = platform_driver_register(&serial_omap_driver);
if (ret != 0)
uart_unregister_driver(&serial_omap_reg);
return ret;
}
static void __exit serial_omap_exit(void)
{
platform_driver_unregister(&serial_omap_driver);
uart_unregister_driver(&serial_omap_reg);
}
module_init(serial_omap_init);
module_exit(serial_omap_exit);
MODULE_DESCRIPTION("OMAP High Speed UART driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments Inc");
| gpl-2.0 |
CyanogenMod/lge-kernel-gproj | drivers/scsi/nsp32.c | 3819 | 91227 | /*
* NinjaSCSI-32Bi Cardbus, NinjaSCSI-32UDE PCI/CardBus SCSI driver
* Copyright (C) 2001, 2002, 2003
* YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
* GOTO Masanori <gotom@debian.or.jp>, <gotom@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* Revision History:
* 1.0: Initial Release.
* 1.1: Add /proc SDTR status.
* Remove obsolete error handler nsp32_reset.
* Some clean up.
* 1.2: PowerPC (big endian) support.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include "nsp32.h"
/***********************************************************************
* Module parameters
*/
static int trans_mode = 0; /* default: BIOS */
module_param (trans_mode, int, 0);
MODULE_PARM_DESC(trans_mode, "transfer mode (0: BIOS(default) 1: Async 2: Ultra20M");
#define ASYNC_MODE 1
#define ULTRA20M_MODE 2
static bool auto_param = 0; /* default: ON */
module_param (auto_param, bool, 0);
MODULE_PARM_DESC(auto_param, "AutoParameter mode (0: ON(default) 1: OFF)");
static bool disc_priv = 1; /* default: OFF */
module_param (disc_priv, bool, 0);
MODULE_PARM_DESC(disc_priv, "disconnection privilege mode (0: ON 1: OFF(default))");
MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>, GOTO Masanori <gotom@debian.or.jp>");
MODULE_DESCRIPTION("Workbit NinjaSCSI-32Bi/UDE CardBus/PCI SCSI host bus adapter module");
MODULE_LICENSE("GPL");
static const char *nsp32_release_version = "1.2";
/****************************************************************************
* Supported hardware
*/
static struct pci_device_id nsp32_pci_table[] __devinitdata = {
{
.vendor = PCI_VENDOR_ID_IODATA,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_IODATA,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_KME,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_KME,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_WBT,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_WORKBIT,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_WORKBIT_STANDARD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_WORKBIT,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_LOGITEC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_LOGITEC,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_LOGITEC,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_MELCO,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO_II,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_MELCO,
},
{0,0,},
};
MODULE_DEVICE_TABLE(pci, nsp32_pci_table);
static nsp32_hw_data nsp32_data_base; /* probe <-> detect glue */
/*
* Period/AckWidth speed conversion table
*
* Note: This period/ackwidth speed table must be in descending order.
*/
static nsp32_sync_table nsp32_sync_table_40M[] = {
/* {PNo, AW, SP, EP, SREQ smpl} Speed(MB/s) Period AckWidth */
{0x1, 0, 0x0c, 0x0c, SMPL_40M}, /* 20.0 : 50ns, 25ns */
{0x2, 0, 0x0d, 0x18, SMPL_40M}, /* 13.3 : 75ns, 25ns */
{0x3, 1, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */
{0x4, 1, 0x1a, 0x1f, SMPL_20M}, /* 8.0 : 125ns, 50ns */
{0x5, 2, 0x20, 0x25, SMPL_20M}, /* 6.7 : 150ns, 75ns */
{0x6, 2, 0x26, 0x31, SMPL_20M}, /* 5.7 : 175ns, 75ns */
{0x7, 3, 0x32, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */
{0x8, 3, 0x33, 0x38, SMPL_10M}, /* 4.4 : 225ns, 100ns */
{0x9, 3, 0x39, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */
};
static nsp32_sync_table nsp32_sync_table_20M[] = {
{0x1, 0, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */
{0x2, 0, 0x1a, 0x25, SMPL_20M}, /* 6.7 : 150ns, 50ns */
{0x3, 1, 0x26, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */
{0x4, 1, 0x33, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */
{0x5, 2, 0x3f, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 150ns */
{0x6, 2, 0x4c, 0x57, SMPL_10M}, /* 2.8 : 350ns, 150ns */
{0x7, 3, 0x58, 0x64, SMPL_10M}, /* 2.5 : 400ns, 200ns */
{0x8, 3, 0x65, 0x70, SMPL_10M}, /* 2.2 : 450ns, 200ns */
{0x9, 3, 0x71, 0x7d, SMPL_10M}, /* 2.0 : 500ns, 200ns */
};
static nsp32_sync_table nsp32_sync_table_pci[] = {
{0x1, 0, 0x0c, 0x0f, SMPL_40M}, /* 16.6 : 60ns, 30ns */
{0x2, 0, 0x10, 0x16, SMPL_40M}, /* 11.1 : 90ns, 30ns */
{0x3, 1, 0x17, 0x1e, SMPL_20M}, /* 8.3 : 120ns, 60ns */
{0x4, 1, 0x1f, 0x25, SMPL_20M}, /* 6.7 : 150ns, 60ns */
{0x5, 2, 0x26, 0x2d, SMPL_20M}, /* 5.6 : 180ns, 90ns */
{0x6, 2, 0x2e, 0x34, SMPL_10M}, /* 4.8 : 210ns, 90ns */
{0x7, 3, 0x35, 0x3c, SMPL_10M}, /* 4.2 : 240ns, 120ns */
{0x8, 3, 0x3d, 0x43, SMPL_10M}, /* 3.7 : 270ns, 120ns */
{0x9, 3, 0x44, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 120ns */
};
/*
* function declaration
*/
/* module entry point */
static int __devinit nsp32_probe (struct pci_dev *, const struct pci_device_id *);
static void __devexit nsp32_remove(struct pci_dev *);
static int __init init_nsp32 (void);
static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int);
static int nsp32_detect (struct pci_dev *pdev);
static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static const char *nsp32_info (struct Scsi_Host *);
static int nsp32_release (struct Scsi_Host *);
/* SCSI error handler */
static int nsp32_eh_abort (struct scsi_cmnd *);
static int nsp32_eh_bus_reset (struct scsi_cmnd *);
static int nsp32_eh_host_reset(struct scsi_cmnd *);
/* generate SCSI message */
static void nsp32_build_identify(struct scsi_cmnd *);
static void nsp32_build_nop (struct scsi_cmnd *);
static void nsp32_build_reject (struct scsi_cmnd *);
static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, unsigned char);
/* SCSI message handler */
static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short);
static void nsp32_msgout_occur (struct scsi_cmnd *);
static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, unsigned short);
static int nsp32_setup_sg_table (struct scsi_cmnd *);
static int nsp32_selection_autopara(struct scsi_cmnd *);
static int nsp32_selection_autoscsi(struct scsi_cmnd *);
static void nsp32_scsi_done (struct scsi_cmnd *);
static int nsp32_arbitration (struct scsi_cmnd *, unsigned int);
static int nsp32_reselection (struct scsi_cmnd *, unsigned char);
static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int);
static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short);
/* SCSI SDTR */
static void nsp32_analyze_sdtr (struct scsi_cmnd *);
static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, unsigned char);
static void nsp32_set_async (nsp32_hw_data *, nsp32_target *);
static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, unsigned char *, unsigned char *);
static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, int, unsigned char);
/* SCSI bus status handler */
static void nsp32_wait_req (nsp32_hw_data *, int);
static void nsp32_wait_sack (nsp32_hw_data *, int);
static void nsp32_sack_assert (nsp32_hw_data *);
static void nsp32_sack_negate (nsp32_hw_data *);
static void nsp32_do_bus_reset(nsp32_hw_data *);
/* hardware interrupt handler */
static irqreturn_t do_nsp32_isr(int, void *);
/* initialize hardware */
static int nsp32hw_init(nsp32_hw_data *);
/* EEPROM handler */
static int nsp32_getprom_param (nsp32_hw_data *);
static int nsp32_getprom_at24 (nsp32_hw_data *);
static int nsp32_getprom_c16 (nsp32_hw_data *);
static void nsp32_prom_start (nsp32_hw_data *);
static void nsp32_prom_stop (nsp32_hw_data *);
static int nsp32_prom_read (nsp32_hw_data *, int);
static int nsp32_prom_read_bit (nsp32_hw_data *);
static void nsp32_prom_write_bit(nsp32_hw_data *, int);
static void nsp32_prom_set (nsp32_hw_data *, int, int);
static int nsp32_prom_get (nsp32_hw_data *, int);
/* debug/warning/info message */
static void nsp32_message (const char *, int, char *, char *, ...);
#ifdef NSP32_DEBUG
static void nsp32_dmessage(const char *, int, int, char *, ...);
#endif
/*
* max_sectors is currently limited up to 128.
*/
static struct scsi_host_template nsp32_template = {
.proc_name = "nsp32",
.name = "Workbit NinjaSCSI-32Bi/UDE",
.proc_info = nsp32_proc_info,
.info = nsp32_info,
.queuecommand = nsp32_queuecommand,
.can_queue = 1,
.sg_tablesize = NSP32_SG_SIZE,
.max_sectors = 128,
.cmd_per_lun = 1,
.this_id = NSP32_HOST_SCSIID,
.use_clustering = DISABLE_CLUSTERING,
.eh_abort_handler = nsp32_eh_abort,
.eh_bus_reset_handler = nsp32_eh_bus_reset,
.eh_host_reset_handler = nsp32_eh_host_reset,
/* .highmem_io = 1, */
};
#include "nsp32_io.h"
/***********************************************************************
* debug, error print
*/
#ifndef NSP32_DEBUG
# define NSP32_DEBUG_MASK 0x000000
# define nsp32_msg(type, args...) nsp32_message ("", 0, (type), args)
# define nsp32_dbg(mask, args...) /* */
#else
# define NSP32_DEBUG_MASK 0xffffff
# define nsp32_msg(type, args...) \
nsp32_message (__func__, __LINE__, (type), args)
# define nsp32_dbg(mask, args...) \
nsp32_dmessage(__func__, __LINE__, (mask), args)
#endif
#define NSP32_DEBUG_QUEUECOMMAND BIT(0)
#define NSP32_DEBUG_REGISTER BIT(1)
#define NSP32_DEBUG_AUTOSCSI BIT(2)
#define NSP32_DEBUG_INTR BIT(3)
#define NSP32_DEBUG_SGLIST BIT(4)
#define NSP32_DEBUG_BUSFREE BIT(5)
#define NSP32_DEBUG_CDB_CONTENTS BIT(6)
#define NSP32_DEBUG_RESELECTION BIT(7)
#define NSP32_DEBUG_MSGINOCCUR BIT(8)
#define NSP32_DEBUG_EEPROM BIT(9)
#define NSP32_DEBUG_MSGOUTOCCUR BIT(10)
#define NSP32_DEBUG_BUSRESET BIT(11)
#define NSP32_DEBUG_RESTART BIT(12)
#define NSP32_DEBUG_SYNC BIT(13)
#define NSP32_DEBUG_WAIT BIT(14)
#define NSP32_DEBUG_TARGETFLAG BIT(15)
#define NSP32_DEBUG_PROC BIT(16)
#define NSP32_DEBUG_INIT BIT(17)
#define NSP32_SPECIAL_PRINT_REGISTER BIT(20)
#define NSP32_DEBUG_BUF_LEN 100
static void nsp32_message(const char *func, int line, char *type, char *fmt, ...)
{
va_list args;
char buf[NSP32_DEBUG_BUF_LEN];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
#ifndef NSP32_DEBUG
printk("%snsp32: %s\n", type, buf);
#else
printk("%snsp32: %s (%d): %s\n", type, func, line, buf);
#endif
}
#ifdef NSP32_DEBUG
static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...)
{
va_list args;
char buf[NSP32_DEBUG_BUF_LEN];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (mask & NSP32_DEBUG_MASK) {
printk("nsp32-debug: 0x%x %s (%d): %s\n", mask, func, line, buf);
}
}
#endif
#ifdef NSP32_DEBUG
# include "nsp32_debug.c"
#else
# define show_command(arg) /* */
# define show_busphase(arg) /* */
# define show_autophase(arg) /* */
#endif
/*
* IDENTIFY Message
*/
static void nsp32_build_identify(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
int mode = FALSE;
/* XXX: Auto DiscPriv detection is progressing... */
if (disc_priv == 0) {
/* mode = TRUE; */
}
data->msgoutbuf[pos] = IDENTIFY(mode, SCpnt->device->lun); pos++;
data->msgout_len = pos;
}
/*
* SDTR Message Routine
*/
static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt,
unsigned char period,
unsigned char offset)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR; pos++;
data->msgoutbuf[pos] = period; pos++;
data->msgoutbuf[pos] = offset; pos++;
data->msgout_len = pos;
}
/*
* No Operation Message
*/
static void nsp32_build_nop(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
if (pos != 0) {
nsp32_msg(KERN_WARNING,
"Some messages are already contained!");
return;
}
data->msgoutbuf[pos] = NOP; pos++;
data->msgout_len = pos;
}
/*
* Reject Message
*/
static void nsp32_build_reject(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
data->msgoutbuf[pos] = MESSAGE_REJECT; pos++;
data->msgout_len = pos;
}
/*
* timer
*/
#if 0
static void nsp32_start_timer(struct scsi_cmnd *SCpnt, int time)
{
unsigned int base = SCpnt->host->io_port;
nsp32_dbg(NSP32_DEBUG_INTR, "timer=%d", time);
if (time & (~TIMER_CNT_MASK)) {
nsp32_dbg(NSP32_DEBUG_INTR, "timer set overflow");
}
nsp32_write2(base, TIMER_SET, time & TIMER_CNT_MASK);
}
#endif
/*
* set SCSI command and other parameter to asic, and start selection phase
*/
static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
unsigned int host_id = SCpnt->device->host->this_id;
unsigned char target = scmd_id(SCpnt);
nsp32_autoparam *param = data->autoparam;
unsigned char phase;
int i, ret;
unsigned int msgout;
u16_le s;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
/*
* check bus free
*/
phase = nsp32_read1(base, SCSI_BUS_MONITOR);
if (phase != BUSMON_BUS_FREE) {
nsp32_msg(KERN_WARNING, "bus busy");
show_busphase(phase & BUSMON_PHASE_MASK);
SCpnt->result = DID_BUS_BUSY << 16;
return FALSE;
}
/*
* message out
*
* Note: If the range of msgout_len is 1 - 3, fill scsi_msgout.
* over 3 messages needs another routine.
*/
if (data->msgout_len == 0) {
nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!");
SCpnt->result = DID_ERROR << 16;
return FALSE;
} else if (data->msgout_len > 0 && data->msgout_len <= 3) {
msgout = 0;
for (i = 0; i < data->msgout_len; i++) {
/*
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
* MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
}
msgout |= MV_VALID; /* MV valid */
msgout |= (unsigned int)data->msgout_len; /* len */
} else {
/* data->msgout_len > 3 */
msgout = 0;
}
// nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", nsp32_read2(base, SEL_TIME_OUT));
// nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
* setup asic parameter
*/
memset(param, 0, sizeof(nsp32_autoparam));
/* cdb */
for (i = 0; i < SCpnt->cmd_len; i++) {
param->cdb[4 * i] = SCpnt->cmnd[i];
}
/* outgoing messages */
param->msgout = cpu_to_le32(msgout);
/* syncreg, ackwidth, target id, SREQ sampling rate */
param->syncreg = data->cur_target->syncreg;
param->ackwidth = data->cur_target->ackwidth;
param->target_id = BIT(host_id) | BIT(target);
param->sample_reg = data->cur_target->sample_reg;
// nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "sample rate=0x%x\n", data->cur_target->sample_reg);
/* command control */
param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER |
AUTOSCSI_START |
AUTO_MSGIN_00_OR_04 |
AUTO_MSGIN_02 |
AUTO_ATN );
/* transfer control */
s = 0;
switch (data->trans_method) {
case NSP32_TRANSFER_BUSMASTER:
s |= BM_START;
break;
case NSP32_TRANSFER_MMIO:
s |= CB_MMIO_MODE;
break;
case NSP32_TRANSFER_PIO:
s |= CB_IO_MODE;
break;
default:
nsp32_msg(KERN_ERR, "unknown trans_method");
break;
}
/*
* OR-ed BLIEND_MODE, FIFO intr is decreased, instead of PCI bus waits.
* For bus master transfer, it's taken off.
*/
s |= (TRANSFER_GO | ALL_COUNTER_CLR);
param->transfer_control = cpu_to_le16(s);
/* sg table addr */
param->sgt_pointer = cpu_to_le32(data->cur_lunt->sglun_paddr);
/*
* transfer parameter to ASIC
*/
nsp32_write4(base, SGT_ADR, data->auto_paddr);
nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER |
AUTO_PARAMETER );
/*
* Check arbitration
*/
ret = nsp32_arbitration(SCpnt, base);
return ret;
}
/*
* Selection with AUTO SCSI (without AUTO PARAMETER)
*/
static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
unsigned int host_id = SCpnt->device->host->this_id;
unsigned char target = scmd_id(SCpnt);
unsigned char phase;
int status;
unsigned short command = 0;
unsigned int msgout = 0;
unsigned short execph;
int i;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
/*
* IRQ disable
*/
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
/*
* check bus line
*/
phase = nsp32_read1(base, SCSI_BUS_MONITOR);
if(((phase & BUSMON_BSY) == 1) || (phase & BUSMON_SEL) == 1) {
nsp32_msg(KERN_WARNING, "bus busy");
SCpnt->result = DID_BUS_BUSY << 16;
status = 1;
goto out;
}
/*
* clear execph
*/
execph = nsp32_read2(base, SCSI_EXECUTE_PHASE);
/*
* clear FIFO counter to set CDBs
*/
nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER);
/*
* set CDB0 - CDB15
*/
for (i = 0; i < SCpnt->cmd_len; i++) {
nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]);
}
nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]);
/*
* set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID
*/
nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target));
/*
* set SCSI MSGOUT REG
*
* Note: If the range of msgout_len is 1 - 3, fill scsi_msgout.
* over 3 messages needs another routine.
*/
if (data->msgout_len == 0) {
nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!");
SCpnt->result = DID_ERROR << 16;
status = 1;
goto out;
} else if (data->msgout_len > 0 && data->msgout_len <= 3) {
msgout = 0;
for (i = 0; i < data->msgout_len; i++) {
/*
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
* MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
}
msgout |= MV_VALID; /* MV valid */
msgout |= (unsigned int)data->msgout_len; /* len */
nsp32_write4(base, SCSI_MSG_OUT, msgout);
} else {
/* data->msgout_len > 3 */
nsp32_write4(base, SCSI_MSG_OUT, 0);
}
/*
* set selection timeout(= 250ms)
*/
nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
* set SREQ hazard killer sampling rate
*
* TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz.
* check other internal clock!
*/
nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg);
/*
* clear Arbit
*/
nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR);
/*
* set SYNCREG
* Don't set BM_START_ADR before setting this register.
*/
nsp32_write1(base, SYNC_REG, data->cur_target->syncreg);
/*
* set ACKWIDTH
*/
nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth);
nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
"syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x",
nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH),
nsp32_read4(base, SGT_ADR), nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x",
data->msgout_len, msgout);
/*
* set SGT ADDR (physical address)
*/
nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr);
/*
* set TRANSFER CONTROL REG
*/
command = 0;
command |= (TRANSFER_GO | ALL_COUNTER_CLR);
if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
if (scsi_bufflen(SCpnt) > 0) {
command |= BM_START;
}
} else if (data->trans_method & NSP32_TRANSFER_MMIO) {
command |= CB_MMIO_MODE;
} else if (data->trans_method & NSP32_TRANSFER_PIO) {
command |= CB_IO_MODE;
}
nsp32_write2(base, TRANSFER_CONTROL, command);
/*
* start AUTO SCSI, kick off arbitration
*/
command = (CLEAR_CDB_FIFO_POINTER |
AUTOSCSI_START |
AUTO_MSGIN_00_OR_04 |
AUTO_MSGIN_02 |
AUTO_ATN );
nsp32_write2(base, COMMAND_CONTROL, command);
/*
* Check arbitration
*/
status = nsp32_arbitration(SCpnt, base);
out:
/*
* IRQ enable
*/
nsp32_write2(base, IRQ_CONTROL, 0);
return status;
}
/*
* Arbitration Status Check
*
* Note: Arbitration counter is waited during ARBIT_GO is not lifting.
* Using udelay(1) consumes CPU time and system time, but
* arbitration delay time is defined minimal 2.4us in SCSI
* specification, thus udelay works as coarse grained wait timer.
*/
static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base)
{
unsigned char arbit;
int status = TRUE;
int time = 0;
do {
arbit = nsp32_read1(base, ARBIT_STATUS);
time++;
} while ((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 &&
(time <= ARBIT_TIMEOUT_TIME));
nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
"arbit: 0x%x, delay time: %d", arbit, time);
if (arbit & ARBIT_WIN) {
/* Arbitration succeeded */
SCpnt->result = DID_OK << 16;
nsp32_index_write1(base, EXT_PORT, LED_ON); /* PCI LED on */
} else if (arbit & ARBIT_FAIL) {
/* Arbitration failed */
SCpnt->result = DID_BUS_BUSY << 16;
status = FALSE;
} else {
/*
* unknown error or ARBIT_GO timeout,
* something lock up! guess no connection.
*/
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout");
SCpnt->result = DID_NO_CONNECT << 16;
status = FALSE;
}
/*
* clear Arbit
*/
nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR);
return status;
}
/*
* reselection
*
* Note: This reselection routine is called from msgin_occur,
* reselection target id&lun must be already set.
* SCSI-2 says IDENTIFY implies RESTORE_POINTER operation.
*/
static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int host_id = SCpnt->device->host->this_id;
unsigned int base = SCpnt->device->host->io_port;
unsigned char tmpid, newid;
nsp32_dbg(NSP32_DEBUG_RESELECTION, "enter");
/*
* calculate reselected SCSI ID
*/
tmpid = nsp32_read1(base, RESELECT_ID);
tmpid &= (~BIT(host_id));
newid = 0;
while (tmpid) {
if (tmpid & 1) {
break;
}
tmpid >>= 1;
newid++;
}
/*
* If reselected New ID:LUN is not existed
* or current nexus is not existed, unexpected
* reselection is occurred. Send reject message.
*/
if (newid >= ARRAY_SIZE(data->lunt) || newlun >= ARRAY_SIZE(data->lunt[0])) {
nsp32_msg(KERN_WARNING, "unknown id/lun");
return FALSE;
} else if(data->lunt[newid][newlun].SCpnt == NULL) {
nsp32_msg(KERN_WARNING, "no SCSI command is processing");
return FALSE;
}
data->cur_id = newid;
data->cur_lun = newlun;
data->cur_target = &(data->target[newid]);
data->cur_lunt = &(data->lunt[newid][newlun]);
/* reset SACK/SavedACK counter (or ALL clear?) */
nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
return TRUE;
}
/*
* nsp32_setup_sg_table - build scatter gather list for transfer data
* with bus master.
*
* Note: NinjaSCSI-32Bi/UDE bus master can not transfer over 64KB at a time.
*/
static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
struct scatterlist *sg;
nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
int num, i;
u32_le l;
if (sgt == NULL) {
nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null");
return FALSE;
}
num = scsi_dma_map(SCpnt);
if (!num)
return TRUE;
else if (num < 0)
return FALSE;
else {
scsi_for_each_sg(SCpnt, sg, num, i) {
/*
* Build nsp32_sglist, substitute sg dma addresses.
*/
sgt[i].addr = cpu_to_le32(sg_dma_address(sg));
sgt[i].len = cpu_to_le32(sg_dma_len(sg));
if (le32_to_cpu(sgt[i].len) > 0x10000) {
nsp32_msg(KERN_ERR,
"can't transfer over 64KB at a time, size=0x%lx", le32_to_cpu(sgt[i].len));
return FALSE;
}
nsp32_dbg(NSP32_DEBUG_SGLIST,
"num 0x%x : addr 0x%lx len 0x%lx",
i,
le32_to_cpu(sgt[i].addr),
le32_to_cpu(sgt[i].len ));
}
/* set end mark */
l = le32_to_cpu(sgt[num-1].len);
sgt[num-1].len = cpu_to_le32(l | SGTEND);
}
return TRUE;
}
static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target;
nsp32_lunt *cur_lunt;
int ret;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x "
"use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
data->CurrentSC = NULL;
SCpnt->result = DID_NO_CONNECT << 16;
done(SCpnt);
return 0;
}
/* check target ID is not same as this initiator ID */
if (scmd_id(SCpnt) == SCpnt->device->host->this_id) {
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "terget==host???");
SCpnt->result = DID_BAD_TARGET << 16;
done(SCpnt);
return 0;
}
/* check target LUN is allowable value */
if (SCpnt->device->lun >= MAX_LUN) {
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "no more lun");
SCpnt->result = DID_BAD_TARGET << 16;
done(SCpnt);
return 0;
}
show_command(SCpnt);
SCpnt->scsi_done = done;
data->CurrentSC = SCpnt;
SCpnt->SCp.Status = CHECK_CONDITION;
SCpnt->SCp.Message = 0;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
SCpnt->SCp.buffer = NULL;
SCpnt->SCp.buffers_residual = 0;
/* initialize data */
data->msgout_len = 0;
data->msgin_len = 0;
cur_lunt = &(data->lunt[SCpnt->device->id][SCpnt->device->lun]);
cur_lunt->SCpnt = SCpnt;
cur_lunt->save_datp = 0;
cur_lunt->msgin03 = FALSE;
data->cur_lunt = cur_lunt;
data->cur_id = SCpnt->device->id;
data->cur_lun = SCpnt->device->lun;
ret = nsp32_setup_sg_table(SCpnt);
if (ret == FALSE) {
nsp32_msg(KERN_ERR, "SGT fail");
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return 0;
}
/* Build IDENTIFY */
nsp32_build_identify(SCpnt);
/*
* If target is the first time to transfer after the reset
* (target don't have SDTR_DONE and SDTR_INITIATOR), sync
* message SDTR is needed to do synchronous transfer.
*/
target = &data->target[scmd_id(SCpnt)];
data->cur_target = target;
if (!(target->sync_flag & (SDTR_DONE | SDTR_INITIATOR | SDTR_TARGET))) {
unsigned char period, offset;
if (trans_mode != ASYNC_MODE) {
nsp32_set_max_sync(data, target, &period, &offset);
nsp32_build_sdtr(SCpnt, period, offset);
target->sync_flag |= SDTR_INITIATOR;
} else {
nsp32_set_async(data, target);
target->sync_flag |= SDTR_DONE;
}
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"SDTR: entry: %d start_period: 0x%x offset: 0x%x\n",
target->limit_entry, period, offset);
} else if (target->sync_flag & SDTR_INITIATOR) {
/*
* It was negotiating SDTR with target, sending from the
* initiator, but there are no chance to remove this flag.
* Set async because we don't get proper negotiation.
*/
nsp32_set_async(data, target);
target->sync_flag &= ~SDTR_INITIATOR;
target->sync_flag |= SDTR_DONE;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"SDTR_INITIATOR: fall back to async");
} else if (target->sync_flag & SDTR_TARGET) {
/*
* It was negotiating SDTR with target, sending from target,
* but there are no chance to remove this flag. Set async
* because we don't get proper negotiation.
*/
nsp32_set_async(data, target);
target->sync_flag &= ~SDTR_TARGET;
target->sync_flag |= SDTR_DONE;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"Unknown SDTR from target is reached, fall back to async.");
}
nsp32_dbg(NSP32_DEBUG_TARGETFLAG,
"target: %d sync_flag: 0x%x syncreg: 0x%x ackwidth: 0x%x",
SCpnt->device->id, target->sync_flag, target->syncreg,
target->ackwidth);
/* Selection */
if (auto_param == 0) {
ret = nsp32_selection_autopara(SCpnt);
} else {
ret = nsp32_selection_autoscsi(SCpnt);
}
if (ret != TRUE) {
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "selection fail");
nsp32_scsi_done(SCpnt);
}
return 0;
}
static DEF_SCSI_QCMD(nsp32_queuecommand)
/* initialize asic */
static int nsp32hw_init(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned short irq_stat;
unsigned long lc_reg;
unsigned char power;
lc_reg = nsp32_index_read4(base, CFG_LATE_CACHE);
if ((lc_reg & 0xff00) == 0) {
lc_reg |= (0x20 << 8);
nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff);
}
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
do {
irq_stat = nsp32_read2(base, IRQ_STATUS);
nsp32_dbg(NSP32_DEBUG_INIT, "irq_stat 0x%x", irq_stat);
} while (irq_stat & IRQSTATUS_ANY_IRQ);
/*
* Fill FIFO_FULL_SHLD, FIFO_EMPTY_SHLD. Below parameter is
* designated by specification.
*/
if ((data->trans_method & NSP32_TRANSFER_PIO) ||
(data->trans_method & NSP32_TRANSFER_MMIO)) {
nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x40);
nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x40);
} else if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x10);
nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x60);
} else {
nsp32_dbg(NSP32_DEBUG_INIT, "unknown transfer mode");
}
nsp32_dbg(NSP32_DEBUG_INIT, "full 0x%x emp 0x%x",
nsp32_index_read1(base, FIFO_FULL_SHLD_COUNT),
nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT));
nsp32_index_write1(base, CLOCK_DIV, data->clock);
nsp32_index_write1(base, BM_CYCLE, MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD);
nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */
/*
* initialize MISC_WRRD register
*
* Note: Designated parameters is obeyed as following:
* MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set.
* MISC_MASTER_TERMINATION_SELECT: It must be set.
* MISC_BMREQ_NEGATE_TIMING_SEL: It should be set.
* MISC_AUTOSEL_TIMING_SEL: It should be set.
* MISC_BMSTOP_CHANGE2_NONDATA_PHASE: It should be set.
* MISC_DELAYED_BMSTART: It's selected for safety.
*
* Note: If MISC_BMSTOP_CHANGE2_NONDATA_PHASE is set, then
* we have to set TRANSFERCONTROL_BM_START as 0 and set
* appropriate value before restarting bus master transfer.
*/
nsp32_index_write2(base, MISC_WR,
(SCSI_DIRECTION_DETECTOR_SELECT |
DELAYED_BMSTART |
MASTER_TERMINATION_SELECT |
BMREQ_NEGATE_TIMING_SEL |
AUTOSEL_TIMING_SEL |
BMSTOP_CHANGE2_NONDATA_PHASE));
nsp32_index_write1(base, TERM_PWR_CONTROL, 0);
power = nsp32_index_read1(base, TERM_PWR_CONTROL);
if (!(power & SENSE)) {
nsp32_msg(KERN_INFO, "term power on");
nsp32_index_write1(base, TERM_PWR_CONTROL, BPWR);
}
nsp32_write2(base, TIMER_SET, TIMER_STOP);
nsp32_write2(base, TIMER_SET, TIMER_STOP); /* Required 2 times */
nsp32_write1(base, SYNC_REG, 0);
nsp32_write1(base, ACK_WIDTH, 0);
nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
* enable to select designated IRQ (except for
* IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR)
*/
nsp32_index_write2(base, IRQ_SELECT, IRQSELECT_TIMER_IRQ |
IRQSELECT_SCSIRESET_IRQ |
IRQSELECT_FIFO_SHLD_IRQ |
IRQSELECT_RESELECT_IRQ |
IRQSELECT_PHASE_CHANGE_IRQ |
IRQSELECT_AUTO_SCSI_SEQ_IRQ |
// IRQSELECT_BMCNTERR_IRQ |
IRQSELECT_TARGET_ABORT_IRQ |
IRQSELECT_MASTER_ABORT_IRQ );
nsp32_write2(base, IRQ_CONTROL, 0);
/* PCI LED off */
nsp32_index_write1(base, EXT_PORT_DDR, LED_OFF);
nsp32_index_write1(base, EXT_PORT, LED_OFF);
return TRUE;
}
/* interrupt routine */
static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
{
nsp32_hw_data *data = dev_id;
unsigned int base = data->BaseAddress;
struct scsi_cmnd *SCpnt = data->CurrentSC;
unsigned short auto_stat, irq_stat, trans_stat;
unsigned char busmon, busphase;
unsigned long flags;
int ret;
int handled = 0;
struct Scsi_Host *host = data->Host;
spin_lock_irqsave(host->host_lock, flags);
/*
* IRQ check, then enable IRQ mask
*/
irq_stat = nsp32_read2(base, IRQ_STATUS);
nsp32_dbg(NSP32_DEBUG_INTR,
"enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat);
/* is this interrupt comes from Ninja asic? */
if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) {
nsp32_dbg(NSP32_DEBUG_INTR, "shared interrupt: irq other 0x%x", irq_stat);
goto out2;
}
handled = 1;
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
busmon = nsp32_read1(base, SCSI_BUS_MONITOR);
busphase = busmon & BUSMON_PHASE_MASK;
trans_stat = nsp32_read2(base, TRANSFER_STATUS);
if ((irq_stat == 0xffff) && (trans_stat == 0xffff)) {
nsp32_msg(KERN_INFO, "card disconnect");
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_INFO, "clean up current SCSI command");
SCpnt->result = DID_BAD_TARGET << 16;
nsp32_scsi_done(SCpnt);
}
goto out;
}
/* Timer IRQ */
if (irq_stat & IRQSTATUS_TIMER_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "timer stop");
nsp32_write2(base, TIMER_SET, TIMER_STOP);
goto out;
}
/* SCSI reset */
if (irq_stat & IRQSTATUS_SCSIRESET_IRQ) {
nsp32_msg(KERN_INFO, "detected someone do bus reset");
nsp32_do_bus_reset(data);
if (SCpnt != NULL) {
SCpnt->result = DID_RESET << 16;
nsp32_scsi_done(SCpnt);
}
goto out;
}
if (SCpnt == NULL) {
nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened");
nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
goto out;
}
/*
* AutoSCSI Interrupt.
* Note: This interrupt is occurred when AutoSCSI is finished. Then
* check SCSIEXECUTEPHASE, and do appropriate action. Each phases are
* recorded when AutoSCSI sequencer has been processed.
*/
if(irq_stat & IRQSTATUS_AUTOSCSI_IRQ) {
/* getting SCSI executed phase */
auto_stat = nsp32_read2(base, SCSI_EXECUTE_PHASE);
nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
/* Selection Timeout, go busfree phase. */
if (auto_stat & SELECTION_TIMEOUT) {
nsp32_dbg(NSP32_DEBUG_INTR,
"selection timeout occurred");
SCpnt->result = DID_TIME_OUT << 16;
nsp32_scsi_done(SCpnt);
goto out;
}
if (auto_stat & MSGOUT_PHASE) {
/*
* MsgOut phase was processed.
* If MSG_IN_OCCUER is not set, then MsgOut phase is
* completed. Thus, msgout_len must reset. Otherwise,
* nothing to do here. If MSG_OUT_OCCUER is occurred,
* then we will encounter the condition and check.
*/
if (!(auto_stat & MSG_IN_OCCUER) &&
(data->msgout_len <= 3)) {
/*
* !MSG_IN_OCCUER && msgout_len <=3
* ---> AutoSCSI with MSGOUTreg is processed.
*/
data->msgout_len = 0;
};
nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed");
}
if ((auto_stat & DATA_IN_PHASE) &&
(scsi_get_resid(SCpnt) > 0) &&
((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) {
printk( "auto+fifo\n");
//nsp32_pio_read(SCpnt);
}
if (auto_stat & (DATA_IN_PHASE | DATA_OUT_PHASE)) {
/* DATA_IN_PHASE/DATA_OUT_PHASE was processed. */
nsp32_dbg(NSP32_DEBUG_INTR,
"Data in/out phase processed");
/* read BMCNT, SGT pointer addr */
nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx",
nsp32_read4(base, BM_CNT));
nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx",
nsp32_read4(base, SGT_ADR));
nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx",
nsp32_read4(base, SACK_CNT));
nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
nsp32_read4(base, SAVED_SACK_CNT));
scsi_set_resid(SCpnt, 0); /* all data transferred! */
}
/*
* MsgIn Occur
*/
if (auto_stat & MSG_IN_OCCUER) {
nsp32_msgin_occur(SCpnt, irq_stat, auto_stat);
}
/*
* MsgOut Occur
*/
if (auto_stat & MSG_OUT_OCCUER) {
nsp32_msgout_occur(SCpnt);
}
/*
* Bus Free Occur
*/
if (auto_stat & BUS_FREE_OCCUER) {
ret = nsp32_busfree_occur(SCpnt, auto_stat);
if (ret == TRUE) {
goto out;
}
}
if (auto_stat & STATUS_PHASE) {
/*
* Read CSB and substitute CSB for SCpnt->result
* to save status phase stutas byte.
* scsi error handler checks host_byte (DID_*:
* low level driver to indicate status), then checks
* status_byte (SCSI status byte).
*/
SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN);
}
if (auto_stat & ILLEGAL_PHASE) {
/* Illegal phase is detected. SACK is not back. */
nsp32_msg(KERN_WARNING,
"AUTO SCSI ILLEGAL PHASE OCCUR!!!!");
/* TODO: currently we don't have any action... bus reset? */
/*
* To send back SACK, assert, wait, and negate.
*/
nsp32_sack_assert(data);
nsp32_wait_req(data, NEGATE);
nsp32_sack_negate(data);
}
if (auto_stat & COMMAND_PHASE) {
/* nothing to do */
nsp32_dbg(NSP32_DEBUG_INTR, "Command phase processed");
}
if (auto_stat & AUTOSCSI_BUSY) {
/* AutoSCSI is running */
}
show_autophase(auto_stat);
}
/* FIFO_SHLD_IRQ */
if (irq_stat & IRQSTATUS_FIFO_SHLD_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "FIFO IRQ");
switch(busphase) {
case BUSPHASE_DATA_OUT:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/write");
//nsp32_pio_write(SCpnt);
break;
case BUSPHASE_DATA_IN:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/read");
//nsp32_pio_read(SCpnt);
break;
case BUSPHASE_STATUS:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status");
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
break;
default:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase");
nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
show_busphase(busphase);
break;
}
goto out;
}
/* Phase Change IRQ */
if (irq_stat & IRQSTATUS_PHASE_CHANGE_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "phase change IRQ");
switch(busphase) {
case BUSPHASE_MESSAGE_IN:
nsp32_dbg(NSP32_DEBUG_INTR, "phase chg/msg in");
nsp32_msgin_occur(SCpnt, irq_stat, 0);
break;
default:
nsp32_msg(KERN_WARNING, "phase chg/other phase?");
nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x\n",
irq_stat, trans_stat);
show_busphase(busphase);
break;
}
goto out;
}
/* PCI_IRQ */
if (irq_stat & IRQSTATUS_PCI_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "PCI IRQ occurred");
/* Do nothing */
}
/* BMCNTERR_IRQ */
if (irq_stat & IRQSTATUS_BMCNTERR_IRQ) {
nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! ");
/*
* TODO: To be implemented improving bus master
* transfer reliability when BMCNTERR is occurred in
* AutoSCSI phase described in specification.
*/
}
#if 0
nsp32_dbg(NSP32_DEBUG_INTR,
"irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
show_busphase(busphase);
#endif
out:
/* disable IRQ mask */
nsp32_write2(base, IRQ_CONTROL, 0);
out2:
spin_unlock_irqrestore(host->host_lock, flags);
nsp32_dbg(NSP32_DEBUG_INTR, "exit");
return IRQ_RETVAL(handled);
}
#undef SPRINTF
#define SPRINTF(args...) \
do { \
if(length > (pos - buffer)) { \
pos += snprintf(pos, length - (pos - buffer) + 1, ## args); \
nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\
} \
} while(0)
static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start,
off_t offset, int length, int inout)
{
char *pos = buffer;
int thislength;
unsigned long flags;
nsp32_hw_data *data;
int hostno;
unsigned int base;
unsigned char mode_reg;
int id, speed;
long model;
/* Write is not supported, just return. */
if (inout == TRUE) {
return -EINVAL;
}
hostno = host->host_no;
data = (nsp32_hw_data *)host->hostdata;
base = host->io_port;
SPRINTF("NinjaSCSI-32 status\n\n");
SPRINTF("Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version);
SPRINTF("SCSI host No.: %d\n", hostno);
SPRINTF("IRQ: %d\n", host->irq);
SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
SPRINTF("sg_tablesize: %d\n", host->sg_tablesize);
SPRINTF("Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
mode_reg = nsp32_index_read1(base, CHIP_MODE);
model = data->pci_devid->driver_data;
#ifdef CONFIG_PM
SPRINTF("Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no");
#endif
SPRINTF("OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
spin_lock_irqsave(&(data->Lock), flags);
SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC);
spin_unlock_irqrestore(&(data->Lock), flags);
SPRINTF("SDTR status\n");
for (id = 0; id < ARRAY_SIZE(data->target); id++) {
SPRINTF("id %d: ", id);
if (id == host->this_id) {
SPRINTF("----- NinjaSCSI-32 host adapter\n");
continue;
}
if (data->target[id].sync_flag == SDTR_DONE) {
if (data->target[id].period == 0 &&
data->target[id].offset == ASYNC_OFFSET ) {
SPRINTF("async");
} else {
SPRINTF(" sync");
}
} else {
SPRINTF(" none");
}
if (data->target[id].period != 0) {
speed = 1000000 / (data->target[id].period * 4);
SPRINTF(" transfer %d.%dMB/s, offset %d",
speed / 1000,
speed % 1000,
data->target[id].offset
);
}
SPRINTF("\n");
}
thislength = pos - (buffer + offset);
if(thislength < 0) {
*start = NULL;
return 0;
}
thislength = min(thislength, length);
*start = buffer + offset;
return thislength;
}
#undef SPRINTF
/*
* Reset parameters and call scsi_done for data->cur_lunt.
* Be careful setting SCpnt->result = DID_* before calling this function.
*/
static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
scsi_dma_unmap(SCpnt);
/*
* clear TRANSFERCONTROL_BM_START
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write4(base, BM_CNT, 0);
/*
* call scsi_done
*/
(*SCpnt->scsi_done)(SCpnt);
/*
* reset parameters
*/
data->cur_lunt->SCpnt = NULL;
data->cur_lunt = NULL;
data->cur_target = NULL;
data->CurrentSC = NULL;
}
/*
* Bus Free Occur
*
* Current Phase is BUSFREE. AutoSCSI is automatically execute BUSFREE phase
* with ACK reply when below condition is matched:
* MsgIn 00: Command Complete.
* MsgIn 02: Save Data Pointer.
* MsgIn 04: Diconnect.
* In other case, unexpected BUSFREE is detected.
*/
static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph);
show_autophase(execph);
nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, TRANSFER_CONTROL, 0);
/*
* MsgIn 02: Save Data Pointer
*
* VALID:
* Save Data Pointer is received. Adjust pointer.
*
* NO-VALID:
* SCSI-3 says if Save Data Pointer is not received, then we restart
* processing and we can't adjust any SCSI data pointer in next data
* phase.
*/
if (execph & MSGIN_02_VALID) {
nsp32_dbg(NSP32_DEBUG_BUSFREE, "MsgIn02_Valid");
/*
* Check sack_cnt/saved_sack_cnt, then adjust sg table if
* needed.
*/
if (!(execph & MSGIN_00_VALID) &&
((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) {
unsigned int sacklen, s_sacklen;
/*
* Read SACK count and SAVEDSACK count, then compare.
*/
sacklen = nsp32_read4(base, SACK_CNT );
s_sacklen = nsp32_read4(base, SAVED_SACK_CNT);
/*
* If SAVEDSACKCNT == 0, it means SavedDataPointer is
* come after data transferring.
*/
if (s_sacklen > 0) {
/*
* Comparing between sack and savedsack to
* check the condition of AutoMsgIn03.
*
* If they are same, set msgin03 == TRUE,
* COMMANDCONTROL_AUTO_MSGIN_03 is enabled at
* reselection. On the other hand, if they
* aren't same, set msgin03 == FALSE, and
* COMMANDCONTROL_AUTO_MSGIN_03 is disabled at
* reselection.
*/
if (sacklen != s_sacklen) {
data->cur_lunt->msgin03 = FALSE;
} else {
data->cur_lunt->msgin03 = TRUE;
}
nsp32_adjust_busfree(SCpnt, s_sacklen);
}
}
/* This value has not substitude with valid value yet... */
//data->cur_lunt->save_datp = data->cur_datp;
} else {
/*
* no processing.
*/
}
if (execph & MSGIN_03_VALID) {
/* MsgIn03 was valid to be processed. No need processing. */
}
/*
* target SDTR check
*/
if (data->cur_target->sync_flag & SDTR_INITIATOR) {
/*
* SDTR negotiation pulled by the initiator has not
* finished yet. Fall back to ASYNC mode.
*/
nsp32_set_async(data, data->cur_target);
data->cur_target->sync_flag &= ~SDTR_INITIATOR;
data->cur_target->sync_flag |= SDTR_DONE;
} else if (data->cur_target->sync_flag & SDTR_TARGET) {
/*
* SDTR negotiation pulled by the target has been
* negotiating.
*/
if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) {
/*
* If valid message is received, then
* negotiation is succeeded.
*/
} else {
/*
* On the contrary, if unexpected bus free is
* occurred, then negotiation is failed. Fall
* back to ASYNC mode.
*/
nsp32_set_async(data, data->cur_target);
}
data->cur_target->sync_flag &= ~SDTR_TARGET;
data->cur_target->sync_flag |= SDTR_DONE;
}
/*
* It is always ensured by SCSI standard that initiator
* switches into Bus Free Phase after
* receiving message 00 (Command Complete), 04 (Disconnect).
* It's the reason that processing here is valid.
*/
if (execph & MSGIN_00_VALID) {
/* MsgIn 00: Command Complete */
nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete");
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
SCpnt->SCp.Message = 0;
nsp32_dbg(NSP32_DEBUG_BUSFREE,
"normal end stat=0x%x resid=0x%x\n",
SCpnt->SCp.Status, scsi_get_resid(SCpnt));
SCpnt->result = (DID_OK << 16) |
(SCpnt->SCp.Message << 8) |
(SCpnt->SCp.Status << 0);
nsp32_scsi_done(SCpnt);
/* All operation is done */
return TRUE;
} else if (execph & MSGIN_04_VALID) {
/* MsgIn 04: Disconnect */
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
SCpnt->SCp.Message = 4;
nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect");
return TRUE;
} else {
/* Unexpected bus free */
nsp32_msg(KERN_WARNING, "unexpected bus free occurred");
/* DID_ERROR? */
//SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0);
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return TRUE;
}
return FALSE;
}
/*
* nsp32_adjust_busfree - adjusting SG table
*
* Note: This driver adjust the SG table using SCSI ACK
* counter instead of BMCNT counter!
*/
static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int old_entry = data->cur_entry;
int new_entry;
int sg_num = data->cur_lunt->sg_num;
nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
unsigned int restlen, sentlen;
u32_le len, addr;
nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
/* adjust saved SACK count with 4 byte start address boundary */
s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
/*
* calculate new_entry from sack count and each sgt[].len
* calculate the byte which is intent to send
*/
sentlen = 0;
for (new_entry = old_entry; new_entry < sg_num; new_entry++) {
sentlen += (le32_to_cpu(sgt[new_entry].len) & ~SGTEND);
if (sentlen > s_sacklen) {
break;
}
}
/* all sgt is processed */
if (new_entry == sg_num) {
goto last;
}
if (sentlen == s_sacklen) {
/* XXX: confirm it's ok or not */
/* In this case, it's ok because we are at
the head element of the sg. restlen is correctly calculated. */
}
/* calculate the rest length for transferring */
restlen = sentlen - s_sacklen;
/* update adjusting current SG table entry */
len = le32_to_cpu(sgt[new_entry].len);
addr = le32_to_cpu(sgt[new_entry].addr);
addr += (len - restlen);
sgt[new_entry].addr = cpu_to_le32(addr);
sgt[new_entry].len = cpu_to_le32(restlen);
/* set cur_entry with new_entry */
data->cur_entry = new_entry;
return;
last:
if (scsi_get_resid(SCpnt) < sentlen) {
nsp32_msg(KERN_ERR, "resid underflow");
}
scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen);
nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt));
/* update hostdata and lun */
return;
}
/*
* It's called MsgOut phase occur.
* NinjaSCSI-32Bi/UDE automatically processes up to 3 messages in
* message out phase. It, however, has more than 3 messages,
* HBA creates the interrupt and we have to process by hand.
*/
static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
//unsigned short command;
long new_sgtp;
int i;
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
"enter: msgout_len: 0x%x", data->msgout_len);
/*
* If MsgOut phase is occurred without having any
* message, then No_Operation is sent (SCSI-2).
*/
if (data->msgout_len == 0) {
nsp32_build_nop(SCpnt);
}
/*
* Set SGTP ADDR current entry for restarting AUTOSCSI,
* because SGTP is incremented next point.
* There is few statement in the specification...
*/
new_sgtp = data->cur_lunt->sglun_paddr +
(data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
/*
* send messages
*/
for (i = 0; i < data->msgout_len; i++) {
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
"%d : 0x%x", i, data->msgoutbuf[i]);
/*
* Check REQ is asserted.
*/
nsp32_wait_req(data, ASSERT);
if (i == (data->msgout_len - 1)) {
/*
* If the last message, set the AutoSCSI restart
* before send back the ack message. AutoSCSI
* restart automatically negate ATN signal.
*/
//command = (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02);
//nsp32_restart_autoscsi(SCpnt, command);
nsp32_write2(base, COMMAND_CONTROL,
(CLEAR_CDB_FIFO_POINTER |
AUTO_COMMAND_PHASE |
AUTOSCSI_RESTART |
AUTO_MSGIN_00_OR_04 |
AUTO_MSGIN_02 ));
}
/*
* Write data with SACK, then wait sack is
* automatically negated.
*/
nsp32_write1(base, SCSI_DATA_WITH_ACK, data->msgoutbuf[i]);
nsp32_wait_sack(data, NEGATE);
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n",
nsp32_read1(base, SCSI_BUS_MONITOR));
};
data->msgout_len = 0;
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "exit");
}
/*
* Restart AutoSCSI
*
* Note: Restarting AutoSCSI needs set:
* SYNC_REG, ACK_WIDTH, SGT_ADR, TRANSFER_CONTROL
*/
static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short command)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = data->BaseAddress;
unsigned short transfer = 0;
nsp32_dbg(NSP32_DEBUG_RESTART, "enter");
if (data->cur_target == NULL || data->cur_lunt == NULL) {
nsp32_msg(KERN_ERR, "Target or Lun is invalid");
}
/*
* set SYNC_REG
* Don't set BM_START_ADR before setting this register.
*/
nsp32_write1(base, SYNC_REG, data->cur_target->syncreg);
/*
* set ACKWIDTH
*/
nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth);
/*
* set SREQ hazard killer sampling rate
*/
nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg);
/*
* set SGT ADDR (physical address)
*/
nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr);
/*
* set TRANSFER CONTROL REG
*/
transfer = 0;
transfer |= (TRANSFER_GO | ALL_COUNTER_CLR);
if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
if (scsi_bufflen(SCpnt) > 0) {
transfer |= BM_START;
}
} else if (data->trans_method & NSP32_TRANSFER_MMIO) {
transfer |= CB_MMIO_MODE;
} else if (data->trans_method & NSP32_TRANSFER_PIO) {
transfer |= CB_IO_MODE;
}
nsp32_write2(base, TRANSFER_CONTROL, transfer);
/*
* restart AutoSCSI
*
* TODO: COMMANDCONTROL_AUTO_COMMAND_PHASE is needed ?
*/
command |= (CLEAR_CDB_FIFO_POINTER |
AUTO_COMMAND_PHASE |
AUTOSCSI_RESTART );
nsp32_write2(base, COMMAND_CONTROL, command);
nsp32_dbg(NSP32_DEBUG_RESTART, "exit");
}
/*
* cannot run automatically message in occur
*/
static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
unsigned long irq_status,
unsigned short execph)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
unsigned char msg;
unsigned char msgtype;
unsigned char newlun;
unsigned short command = 0;
int msgclear = TRUE;
long new_sgtp;
int ret;
/*
* read first message
* Use SCSIDATA_W_ACK instead of SCSIDATAIN, because the procedure
* of Message-In have to be processed before sending back SCSI ACK.
*/
msg = nsp32_read1(base, SCSI_DATA_IN);
data->msginbuf[(unsigned char)data->msgin_len] = msg;
msgtype = data->msginbuf[0];
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR,
"enter: msglen: 0x%x msgin: 0x%x msgtype: 0x%x",
data->msgin_len, msg, msgtype);
/*
* TODO: We need checking whether bus phase is message in?
*/
/*
* assert SCSI ACK
*/
nsp32_sack_assert(data);
/*
* processing IDENTIFY
*/
if (msgtype & 0x80) {
if (!(irq_status & IRQSTATUS_RESELECT_OCCUER)) {
/* Invalid (non reselect) phase */
goto reject;
}
newlun = msgtype & 0x1f; /* TODO: SPI-3 compliant? */
ret = nsp32_reselection(SCpnt, newlun);
if (ret == TRUE) {
goto restart;
} else {
goto reject;
}
}
/*
* processing messages except for IDENTIFY
*
* TODO: Messages are all SCSI-2 terminology. SCSI-3 compliance is TODO.
*/
switch (msgtype) {
/*
* 1-byte message
*/
case COMMAND_COMPLETE:
case DISCONNECT:
/*
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
nsp32_msg(KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: 0x%x", msg);
break;
case RESTORE_POINTERS:
/*
* AutoMsgIn03 is disabled, and HBA gets this message.
*/
if ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE)) {
unsigned int s_sacklen;
s_sacklen = nsp32_read4(base, SAVED_SACK_CNT);
if ((execph & MSGIN_02_VALID) && (s_sacklen > 0)) {
nsp32_adjust_busfree(SCpnt, s_sacklen);
} else {
/* No need to rewrite SGT */
}
}
data->cur_lunt->msgin03 = FALSE;
/* Update with the new value */
/* reset SACK/SavedACK counter (or ALL clear?) */
nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
/*
* set new sg pointer
*/
new_sgtp = data->cur_lunt->sglun_paddr +
(data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
nsp32_write4(base, SGT_ADR, new_sgtp);
break;
case SAVE_POINTERS:
/*
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
nsp32_msg (KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: SAVE_POINTERS");
break;
case MESSAGE_REJECT:
/* If previous message_out is sending SDTR, and get
message_reject from target, SDTR negotiation is failed */
if (data->cur_target->sync_flag &
(SDTR_INITIATOR | SDTR_TARGET)) {
/*
* Current target is negotiating SDTR, but it's
* failed. Fall back to async transfer mode, and set
* SDTR_DONE.
*/
nsp32_set_async(data, data->cur_target);
data->cur_target->sync_flag &= ~SDTR_INITIATOR;
data->cur_target->sync_flag |= SDTR_DONE;
}
break;
case LINKED_CMD_COMPLETE:
case LINKED_FLG_CMD_COMPLETE:
/* queue tag is not supported currently */
nsp32_msg (KERN_WARNING,
"unsupported message: 0x%x", msgtype);
break;
case INITIATE_RECOVERY:
/* staring ECA (Extended Contingent Allegiance) state. */
/* This message is declined in SPI2 or later. */
goto reject;
/*
* 2-byte message
*/
case SIMPLE_QUEUE_TAG:
case 0x23:
/*
* 0x23: Ignore_Wide_Residue is not declared in scsi.h.
* No support is needed.
*/
if (data->msgin_len >= 1) {
goto reject;
}
/* current position is 1-byte of 2 byte */
msgclear = FALSE;
break;
/*
* extended message
*/
case EXTENDED_MESSAGE:
if (data->msgin_len < 1) {
/*
* Current position does not reach 2-byte
* (2-byte is extended message length).
*/
msgclear = FALSE;
break;
}
if ((data->msginbuf[1] + 1) > data->msgin_len) {
/*
* Current extended message has msginbuf[1] + 2
* (msgin_len starts counting from 0, so buf[1] + 1).
* If current message position is not finished,
* continue receiving message.
*/
msgclear = FALSE;
break;
}
/*
* Reach here means regular length of each type of
* extended messages.
*/
switch (data->msginbuf[2]) {
case EXTENDED_MODIFY_DATA_POINTER:
/* TODO */
goto reject; /* not implemented yet */
break;
case EXTENDED_SDTR:
/*
* Exchange this message between initiator and target.
*/
if (data->msgin_len != EXTENDED_SDTR_LEN + 1) {
/*
* received inappropriate message.
*/
goto reject;
break;
}
nsp32_analyze_sdtr(SCpnt);
break;
case EXTENDED_EXTENDED_IDENTIFY:
/* SCSI-I only, not supported. */
goto reject; /* not implemented yet */
break;
case EXTENDED_WDTR:
goto reject; /* not implemented yet */
break;
default:
goto reject;
}
break;
default:
goto reject;
}
restart:
if (msgclear == TRUE) {
data->msgin_len = 0;
/*
* If restarting AutoSCSI, but there are some message to out
* (msgout_len > 0), set AutoATN, and set SCSIMSGOUT as 0
* (MV_VALID = 0). When commandcontrol is written with
* AutoSCSI restart, at the same time MsgOutOccur should be
* happened (however, such situation is really possible...?).
*/
if (data->msgout_len > 0) {
nsp32_write4(base, SCSI_MSG_OUT, 0);
command |= AUTO_ATN;
}
/*
* restart AutoSCSI
* If it's failed, COMMANDCONTROL_AUTO_COMMAND_PHASE is needed.
*/
command |= (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02);
/*
* If current msgin03 is TRUE, then flag on.
*/
if (data->cur_lunt->msgin03 == TRUE) {
command |= AUTO_MSGIN_03;
}
data->cur_lunt->msgin03 = FALSE;
} else {
data->msgin_len++;
}
/*
* restart AutoSCSI
*/
nsp32_restart_autoscsi(SCpnt, command);
/*
* wait SCSI REQ negate for REQ-ACK handshake
*/
nsp32_wait_req(data, NEGATE);
/*
* negate SCSI ACK
*/
nsp32_sack_negate(data);
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit");
return;
reject:
nsp32_msg(KERN_WARNING,
"invalid or unsupported MessageIn, rejected. "
"current msg: 0x%x (len: 0x%x), processing msg: 0x%x",
msg, data->msgin_len, msgtype);
nsp32_build_reject(SCpnt);
data->msgin_len = 0;
goto restart;
}
/*
*
*/
static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target = data->cur_target;
nsp32_sync_table *synct;
unsigned char get_period = data->msginbuf[3];
unsigned char get_offset = data->msginbuf[4];
int entry;
int syncnum;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter");
synct = data->synct;
syncnum = data->syncnum;
/*
* If this inititor sent the SDTR message, then target responds SDTR,
* initiator SYNCREG, ACKWIDTH from SDTR parameter.
* Messages are not appropriate, then send back reject message.
* If initiator did not send the SDTR, but target sends SDTR,
* initiator calculator the appropriate parameter and send back SDTR.
*/
if (target->sync_flag & SDTR_INITIATOR) {
/*
* Initiator sent SDTR, the target responds and
* send back negotiation SDTR.
*/
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR");
target->sync_flag &= ~SDTR_INITIATOR;
target->sync_flag |= SDTR_DONE;
/*
* offset:
*/
if (get_offset > SYNC_OFFSET) {
/*
* Negotiation is failed, the target send back
* unexpected offset value.
*/
goto reject;
}
if (get_offset == ASYNC_OFFSET) {
/*
* Negotiation is succeeded, the target want
* to fall back into asynchronous transfer mode.
*/
goto async;
}
/*
* period:
* Check whether sync period is too short. If too short,
* fall back to async mode. If it's ok, then investigate
* the received sync period. If sync period is acceptable
* between sync table start_period and end_period, then
* set this I_T nexus as sent offset and period.
* If it's not acceptable, send back reject and fall back
* to async mode.
*/
if (get_period < data->synct[0].period_num) {
/*
* Negotiation is failed, the target send back
* unexpected period value.
*/
goto reject;
}
entry = nsp32_search_period_entry(data, target, get_period);
if (entry < 0) {
/*
* Target want to use long period which is not
* acceptable NinjaSCSI-32Bi/UDE.
*/
goto reject;
}
/*
* Set new sync table and offset in this I_T nexus.
*/
nsp32_set_sync_entry(data, target, entry, get_offset);
} else {
/* Target send SDTR to initiator. */
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR");
target->sync_flag |= SDTR_INITIATOR;
/* offset: */
if (get_offset > SYNC_OFFSET) {
/* send back as SYNC_OFFSET */
get_offset = SYNC_OFFSET;
}
/* period: */
if (get_period < data->synct[0].period_num) {
get_period = data->synct[0].period_num;
}
entry = nsp32_search_period_entry(data, target, get_period);
if (get_offset == ASYNC_OFFSET || entry < 0) {
nsp32_set_async(data, target);
nsp32_build_sdtr(SCpnt, 0, ASYNC_OFFSET);
} else {
nsp32_set_sync_entry(data, target, entry, get_offset);
nsp32_build_sdtr(SCpnt, get_period, get_offset);
}
}
target->period = get_period;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit");
return;
reject:
/*
* If the current message is unacceptable, send back to the target
* with reject message.
*/
nsp32_build_reject(SCpnt);
async:
nsp32_set_async(data, target); /* set as ASYNC transfer mode */
target->period = 0;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit: set async");
return;
}
/*
* Search config entry number matched in sync_table from given
* target and speed period value. If failed to search, return negative value.
*/
static int nsp32_search_period_entry(nsp32_hw_data *data,
nsp32_target *target,
unsigned char period)
{
int i;
if (target->limit_entry >= data->syncnum) {
nsp32_msg(KERN_ERR, "limit_entry exceeds syncnum!");
target->limit_entry = 0;
}
for (i = target->limit_entry; i < data->syncnum; i++) {
if (period >= data->synct[i].start_period &&
period <= data->synct[i].end_period) {
break;
}
}
/*
* Check given period value is over the sync_table value.
* If so, return max value.
*/
if (i == data->syncnum) {
i = -1;
}
return i;
}
/*
* target <-> initiator use ASYNC transfer
*/
static void nsp32_set_async(nsp32_hw_data *data, nsp32_target *target)
{
unsigned char period = data->synct[target->limit_entry].period_num;
target->offset = ASYNC_OFFSET;
target->period = 0;
target->syncreg = TO_SYNCREG(period, ASYNC_OFFSET);
target->ackwidth = 0;
target->sample_reg = 0;
nsp32_dbg(NSP32_DEBUG_SYNC, "set async");
}
/*
* target <-> initiator use maximum SYNC transfer
*/
static void nsp32_set_max_sync(nsp32_hw_data *data,
nsp32_target *target,
unsigned char *period,
unsigned char *offset)
{
unsigned char period_num, ackwidth;
period_num = data->synct[target->limit_entry].period_num;
*period = data->synct[target->limit_entry].start_period;
ackwidth = data->synct[target->limit_entry].ackwidth;
*offset = SYNC_OFFSET;
target->syncreg = TO_SYNCREG(period_num, *offset);
target->ackwidth = ackwidth;
target->offset = *offset;
target->sample_reg = 0; /* disable SREQ sampling */
}
/*
* target <-> initiator use entry number speed
*/
static void nsp32_set_sync_entry(nsp32_hw_data *data,
nsp32_target *target,
int entry,
unsigned char offset)
{
unsigned char period, ackwidth, sample_rate;
period = data->synct[entry].period_num;
ackwidth = data->synct[entry].ackwidth;
offset = offset;
sample_rate = data->synct[entry].sample_rate;
target->syncreg = TO_SYNCREG(period, offset);
target->ackwidth = ackwidth;
target->offset = offset;
target->sample_reg = sample_rate | SAMPLING_ENABLE;
nsp32_dbg(NSP32_DEBUG_SYNC, "set sync");
}
/*
* It waits until SCSI REQ becomes assertion or negation state.
*
* Note: If nsp32_msgin_occur is called, we asserts SCSI ACK. Then
* connected target responds SCSI REQ negation. We have to wait
* SCSI REQ becomes negation in order to negate SCSI ACK signal for
* REQ-ACK handshake.
*/
static void nsp32_wait_req(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
int wait_time = 0;
unsigned char bus, req_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
nsp32_msg(KERN_ERR, "unknown state designation");
}
/* REQ is BIT(5) */
req_bit = (state == ASSERT ? BUSMON_REQ : 0);
do {
bus = nsp32_read1(base, SCSI_BUS_MONITOR);
if ((bus & BUSMON_REQ) == req_bit) {
nsp32_dbg(NSP32_DEBUG_WAIT,
"wait_time: %d", wait_time);
return;
}
udelay(1);
wait_time++;
} while (wait_time < REQSACK_TIMEOUT_TIME);
nsp32_msg(KERN_WARNING, "wait REQ timeout, req_bit: 0x%x", req_bit);
}
/*
* It waits until SCSI SACK becomes assertion or negation state.
*/
static void nsp32_wait_sack(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
int wait_time = 0;
unsigned char bus, ack_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
nsp32_msg(KERN_ERR, "unknown state designation");
}
/* ACK is BIT(4) */
ack_bit = (state == ASSERT ? BUSMON_ACK : 0);
do {
bus = nsp32_read1(base, SCSI_BUS_MONITOR);
if ((bus & BUSMON_ACK) == ack_bit) {
nsp32_dbg(NSP32_DEBUG_WAIT,
"wait_time: %d", wait_time);
return;
}
udelay(1);
wait_time++;
} while (wait_time < REQSACK_TIMEOUT_TIME);
nsp32_msg(KERN_WARNING, "wait SACK timeout, ack_bit: 0x%x", ack_bit);
}
/*
* assert SCSI ACK
*
* Note: SCSI ACK assertion needs with ACKENB=1, AUTODIRECTION=1.
*/
static void nsp32_sack_assert(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned char busctrl;
busctrl = nsp32_read1(base, SCSI_BUS_CONTROL);
busctrl |= (BUSCTL_ACK | AUTODIRECTION | ACKENB);
nsp32_write1(base, SCSI_BUS_CONTROL, busctrl);
}
/*
* negate SCSI ACK
*/
static void nsp32_sack_negate(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned char busctrl;
busctrl = nsp32_read1(base, SCSI_BUS_CONTROL);
busctrl &= ~BUSCTL_ACK;
nsp32_write1(base, SCSI_BUS_CONTROL, busctrl);
}
/*
* Note: n_io_port is defined as 0x7f because I/O register port is
* assigned as:
* 0x800-0x8ff: memory mapped I/O port
* 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly)
* 0xc00-0xfff: CardBus status registers
*/
static int nsp32_detect(struct pci_dev *pdev)
{
struct Scsi_Host *host; /* registered host structure */
struct resource *res;
nsp32_hw_data *data;
int ret;
int i, j;
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
/*
* register this HBA as SCSI device
*/
host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data));
if (host == NULL) {
nsp32_msg (KERN_ERR, "failed to scsi register");
goto err;
}
/*
* set nsp32_hw_data
*/
data = (nsp32_hw_data *)host->hostdata;
memcpy(data, &nsp32_data_base, sizeof(nsp32_hw_data));
host->irq = data->IrqNumber;
host->io_port = data->BaseAddress;
host->unique_id = data->BaseAddress;
host->n_io_port = data->NumAddress;
host->base = (unsigned long)data->MmioAddress;
data->Host = host;
spin_lock_init(&(data->Lock));
data->cur_lunt = NULL;
data->cur_target = NULL;
/*
* Bus master transfer mode is supported currently.
*/
data->trans_method = NSP32_TRANSFER_BUSMASTER;
/*
* Set clock div, CLOCK_4 (HBA has own external clock, and
* dividing * 100ns/4).
* Currently CLOCK_4 has only tested, not for CLOCK_2/PCICLK yet.
*/
data->clock = CLOCK_4;
/*
* Select appropriate nsp32_sync_table and set I_CLOCKDIV.
*/
switch (data->clock) {
case CLOCK_4:
/* If data->clock is CLOCK_4, then select 40M sync table. */
data->synct = nsp32_sync_table_40M;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M);
break;
case CLOCK_2:
/* If data->clock is CLOCK_2, then select 20M sync table. */
data->synct = nsp32_sync_table_20M;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_20M);
break;
case PCICLK:
/* If data->clock is PCICLK, then select pci sync table. */
data->synct = nsp32_sync_table_pci;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_pci);
break;
default:
nsp32_msg(KERN_WARNING,
"Invalid clock div is selected, set CLOCK_4.");
/* Use default value CLOCK_4 */
data->clock = CLOCK_4;
data->synct = nsp32_sync_table_40M;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M);
}
/*
* setup nsp32_lunt
*/
/*
* setup DMA
*/
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
goto scsi_unregister;
}
/*
* allocate autoparam DMA resource.
*/
data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
if (data->autoparam == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto scsi_unregister;
}
/*
* allocate scatter-gather DMA resource.
*/
data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
&(data->sg_paddr));
if (data->sg_list == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto free_autoparam;
}
for (i = 0; i < ARRAY_SIZE(data->lunt); i++) {
for (j = 0; j < ARRAY_SIZE(data->lunt[0]); j++) {
int offset = i * ARRAY_SIZE(data->lunt[0]) + j;
nsp32_lunt tmp = {
.SCpnt = NULL,
.save_datp = 0,
.msgin03 = FALSE,
.sg_num = 0,
.cur_entry = 0,
.sglun = &(data->sg_list[offset]),
.sglun_paddr = data->sg_paddr + (offset * sizeof(nsp32_sglun)),
};
data->lunt[i][j] = tmp;
}
}
/*
* setup target
*/
for (i = 0; i < ARRAY_SIZE(data->target); i++) {
nsp32_target *target = &(data->target[i]);
target->limit_entry = 0;
target->sync_flag = 0;
nsp32_set_async(data, target);
}
/*
* EEPROM check
*/
ret = nsp32_getprom_param(data);
if (ret == FALSE) {
data->resettime = 3; /* default 3 */
}
/*
* setup HBA
*/
nsp32hw_init(data);
snprintf(data->info_str, sizeof(data->info_str),
"NinjaSCSI-32Bi/UDE: irq %d, io 0x%lx+0x%x",
host->irq, host->io_port, host->n_io_port);
/*
* SCSI bus reset
*
* Note: It's important to reset SCSI bus in initialization phase.
* NinjaSCSI-32Bi/UDE HBA EEPROM seems to exchange SDTR when
* system is coming up, so SCSI devices connected to HBA is set as
* un-asynchronous mode. It brings the merit that this HBA is
* ready to start synchronous transfer without any preparation,
* but we are difficult to control transfer speed. In addition,
* it prevents device transfer speed from effecting EEPROM start-up
* SDTR. NinjaSCSI-32Bi/UDE has the feature if EEPROM is set as
* Auto Mode, then FAST-10M is selected when SCSI devices are
* connected same or more than 4 devices. It should be avoided
* depending on this specification. Thus, resetting the SCSI bus
* restores all connected SCSI devices to asynchronous mode, then
* this driver set SDTR safely later, and we can control all SCSI
* device transfer mode.
*/
nsp32_do_bus_reset(data);
ret = request_irq(host->irq, do_nsp32_isr, IRQF_SHARED, "nsp32", data);
if (ret < 0) {
nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 "
"SCSI PCI controller. Interrupt: %d", host->irq);
goto free_sg_list;
}
/*
* PCI IO register
*/
res = request_region(host->io_port, host->n_io_port, "nsp32");
if (res == NULL) {
nsp32_msg(KERN_ERR,
"I/O region 0x%lx+0x%lx is already used",
data->BaseAddress, data->NumAddress);
goto free_irq;
}
ret = scsi_add_host(host, &pdev->dev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to add scsi host");
goto free_region;
}
scsi_scan_host(host);
pci_set_drvdata(pdev, host);
return 0;
free_region:
release_region(host->io_port, host->n_io_port);
free_irq:
free_irq(host->irq, data);
free_sg_list:
pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
free_autoparam:
pci_free_consistent(pdev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
scsi_unregister:
scsi_host_put(host);
err:
return 1;
}
static int nsp32_release(struct Scsi_Host *host)
{
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
if (data->autoparam) {
pci_free_consistent(data->Pci, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
}
if (data->sg_list) {
pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
}
if (host->irq) {
free_irq(host->irq, data);
}
if (host->io_port && host->n_io_port) {
release_region(host->io_port, host->n_io_port);
}
if (data->MmioAddress) {
iounmap(data->MmioAddress);
}
return 0;
}
static const char *nsp32_info(struct Scsi_Host *shpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)shpnt->hostdata;
return data->info_str;
}
/****************************************************************************
* error handler
*/
static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
nsp32_msg(KERN_WARNING, "abort");
if (data->cur_lunt->SCpnt == NULL) {
nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort failed");
return FAILED;
}
if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) {
/* reset SDTR negotiation */
data->cur_target->sync_flag = 0;
nsp32_set_async(data, data->cur_target);
}
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write2(base, BM_CNT, 0);
SCpnt->result = DID_ABORT << 16;
nsp32_scsi_done(SCpnt);
nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort success");
return SUCCESS;
}
static int nsp32_eh_bus_reset(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
spin_lock_irq(SCpnt->device->host->host_lock);
nsp32_msg(KERN_INFO, "Bus Reset");
nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
nsp32_do_bus_reset(data);
nsp32_write2(base, IRQ_CONTROL, 0);
spin_unlock_irq(SCpnt->device->host->host_lock);
return SUCCESS; /* SCSI bus reset is succeeded at any time. */
}
static void nsp32_do_bus_reset(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned short intrdat;
int i;
nsp32_dbg(NSP32_DEBUG_BUSRESET, "in");
/*
* stop all transfer
* clear TRANSFERCONTROL_BM_START
* clear counter
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write4(base, BM_CNT, 0);
nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
/*
* fall back to asynchronous transfer mode
* initialize SDTR negotiation flag
*/
for (i = 0; i < ARRAY_SIZE(data->target); i++) {
nsp32_target *target = &data->target[i];
target->sync_flag = 0;
nsp32_set_async(data, target);
}
/*
* reset SCSI bus
*/
nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST);
udelay(RESET_HOLD_TIME);
nsp32_write1(base, SCSI_BUS_CONTROL, 0);
for(i = 0; i < 5; i++) {
intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat);
}
data->CurrentSC = NULL;
}
static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt)
{
struct Scsi_Host *host = SCpnt->device->host;
unsigned int base = SCpnt->device->host->io_port;
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
nsp32_msg(KERN_INFO, "Host Reset");
nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
spin_lock_irq(SCpnt->device->host->host_lock);
nsp32hw_init(data);
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
nsp32_do_bus_reset(data);
nsp32_write2(base, IRQ_CONTROL, 0);
spin_unlock_irq(SCpnt->device->host->host_lock);
return SUCCESS; /* Host reset is succeeded at any time. */
}
/**************************************************************************
* EEPROM handler
*/
/*
* getting EEPROM parameter
*/
static int nsp32_getprom_param(nsp32_hw_data *data)
{
int vendor = data->pci_devid->vendor;
int device = data->pci_devid->device;
int ret, val, i;
/*
* EEPROM checking.
*/
ret = nsp32_prom_read(data, 0x7e);
if (ret != 0x55) {
nsp32_msg(KERN_INFO, "No EEPROM detected: 0x%x", ret);
return FALSE;
}
ret = nsp32_prom_read(data, 0x7f);
if (ret != 0xaa) {
nsp32_msg(KERN_INFO, "Invalid number: 0x%x", ret);
return FALSE;
}
/*
* check EEPROM type
*/
if (vendor == PCI_VENDOR_ID_WORKBIT &&
device == PCI_DEVICE_ID_WORKBIT_STANDARD) {
ret = nsp32_getprom_c16(data);
} else if (vendor == PCI_VENDOR_ID_WORKBIT &&
device == PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC) {
ret = nsp32_getprom_at24(data);
} else if (vendor == PCI_VENDOR_ID_WORKBIT &&
device == PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO ) {
ret = nsp32_getprom_at24(data);
} else {
nsp32_msg(KERN_WARNING, "Unknown EEPROM");
ret = FALSE;
}
/* for debug : SPROM data full checking */
for (i = 0; i <= 0x1f; i++) {
val = nsp32_prom_read(data, i);
nsp32_dbg(NSP32_DEBUG_EEPROM,
"rom address 0x%x : 0x%x", i, val);
}
return ret;
}
/*
* AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map:
*
* ROMADDR
* 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M
* 0x07 : HBA Synchronous Transfer Period
* Value 0: AutoSync, 1: Manual Setting
* 0x08 - 0x0f : Not Used? (0x0)
* 0x10 : Bus Termination
* Value 0: Auto[ON], 1: ON, 2: OFF
* 0x11 : Not Used? (0)
* 0x12 : Bus Reset Delay Time (0x03)
* 0x13 : Bootable CD Support
* Value 0: Disable, 1: Enable
* 0x14 : Device Scan
* Bit 7 6 5 4 3 2 1 0
* | <----------------->
* | SCSI ID: Value 0: Skip, 1: YES
* |-> Value 0: ALL scan, Value 1: Manual
* 0x15 - 0x1b : Not Used? (0)
* 0x1c : Constant? (0x01) (clock div?)
* 0x1d - 0x7c : Not Used (0xff)
* 0x7d : Not Used? (0xff)
* 0x7e : Constant (0x55), Validity signature
* 0x7f : Constant (0xaa), Validity signature
*/
static int nsp32_getprom_at24(nsp32_hw_data *data)
{
int ret, i;
int auto_sync;
nsp32_target *target;
int entry;
/*
* Reset time which is designated by EEPROM.
*
* TODO: Not used yet.
*/
data->resettime = nsp32_prom_read(data, 0x12);
/*
* HBA Synchronous Transfer Period
*
* Note: auto_sync = 0: auto, 1: manual. Ninja SCSI HBA spec says
* that if auto_sync is 0 (auto), and connected SCSI devices are
* same or lower than 3, then transfer speed is set as ULTRA-20M.
* On the contrary if connected SCSI devices are same or higher
* than 4, then transfer speed is set as FAST-10M.
*
* I break this rule. The number of connected SCSI devices are
* only ignored. If auto_sync is 0 (auto), then transfer speed is
* forced as ULTRA-20M.
*/
ret = nsp32_prom_read(data, 0x07);
switch (ret) {
case 0:
auto_sync = TRUE;
break;
case 1:
auto_sync = FALSE;
break;
default:
nsp32_msg(KERN_WARNING,
"Unsupported Auto Sync mode. Fall back to manual mode.");
auto_sync = TRUE;
}
if (trans_mode == ULTRA20M_MODE) {
auto_sync = TRUE;
}
/*
* each device Synchronous Transfer Period
*/
for (i = 0; i < NSP32_HOST_SCSIID; i++) {
target = &data->target[i];
if (auto_sync == TRUE) {
target->limit_entry = 0; /* set as ULTRA20M */
} else {
ret = nsp32_prom_read(data, i);
entry = nsp32_search_period_entry(data, target, ret);
if (entry < 0) {
/* search failed... set maximum speed */
entry = 0;
}
target->limit_entry = entry;
}
}
return TRUE;
}
/*
* C16 110 (I-O Data: SC-NBD) data map:
*
* ROMADDR
* 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC
* 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync)
* 0x08 - 0x0f : Not Used? (0x0)
* 0x10 : Transfer Mode
* Value 0: PIO, 1: Busmater
* 0x11 : Bus Reset Delay Time (0x00-0x20)
* 0x12 : Bus Termination
* Value 0: Disable, 1: Enable
* 0x13 - 0x19 : Disconnection
* Value 0: Disable, 1: Enable
* 0x1a - 0x7c : Not Used? (0)
* 0x7d : Not Used? (0xf8)
* 0x7e : Constant (0x55), Validity signature
* 0x7f : Constant (0xaa), Validity signature
*/
static int nsp32_getprom_c16(nsp32_hw_data *data)
{
int ret, i;
nsp32_target *target;
int entry, val;
/*
* Reset time which is designated by EEPROM.
*
* TODO: Not used yet.
*/
data->resettime = nsp32_prom_read(data, 0x11);
/*
* each device Synchronous Transfer Period
*/
for (i = 0; i < NSP32_HOST_SCSIID; i++) {
target = &data->target[i];
ret = nsp32_prom_read(data, i);
switch (ret) {
case 0: /* 20MB/s */
val = 0x0c;
break;
case 1: /* 10MB/s */
val = 0x19;
break;
case 2: /* 5MB/s */
val = 0x32;
break;
case 3: /* ASYNC */
val = 0x00;
break;
default: /* default 20MB/s */
val = 0x0c;
break;
}
entry = nsp32_search_period_entry(data, target, val);
if (entry < 0 || trans_mode == ULTRA20M_MODE) {
/* search failed... set maximum speed */
entry = 0;
}
target->limit_entry = entry;
}
return TRUE;
}
/*
* Atmel AT24C01A (drived in 5V) serial EEPROM routines
*/
static int nsp32_prom_read(nsp32_hw_data *data, int romaddr)
{
int i, val;
/* start condition */
nsp32_prom_start(data);
/* device address */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */
/* R/W: W for dummy write */
nsp32_prom_write_bit(data, 0);
/* ack */
nsp32_prom_write_bit(data, 0);
/* word address */
for (i = 7; i >= 0; i--) {
nsp32_prom_write_bit(data, ((romaddr >> i) & 1));
}
/* ack */
nsp32_prom_write_bit(data, 0);
/* start condition */
nsp32_prom_start(data);
/* device address */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */
/* R/W: R */
nsp32_prom_write_bit(data, 1);
/* ack */
nsp32_prom_write_bit(data, 0);
/* data... */
val = 0;
for (i = 7; i >= 0; i--) {
val += (nsp32_prom_read_bit(data) << i);
}
/* no ack */
nsp32_prom_write_bit(data, 1);
/* stop condition */
nsp32_prom_stop(data);
return val;
}
static void nsp32_prom_set(nsp32_hw_data *data, int bit, int val)
{
int base = data->BaseAddress;
int tmp;
tmp = nsp32_index_read1(base, SERIAL_ROM_CTL);
if (val == 0) {
tmp &= ~bit;
} else {
tmp |= bit;
}
nsp32_index_write1(base, SERIAL_ROM_CTL, tmp);
udelay(10);
}
static int nsp32_prom_get(nsp32_hw_data *data, int bit)
{
int base = data->BaseAddress;
int tmp, ret;
if (bit != SDA) {
nsp32_msg(KERN_ERR, "return value is not appropriate");
return 0;
}
tmp = nsp32_index_read1(base, SERIAL_ROM_CTL) & bit;
if (tmp == 0) {
ret = 0;
} else {
ret = 1;
}
udelay(10);
return ret;
}
static void nsp32_prom_start (nsp32_hw_data *data)
{
/* start condition */
nsp32_prom_set(data, SCL, 1);
nsp32_prom_set(data, SDA, 1);
nsp32_prom_set(data, ENA, 1); /* output mode */
nsp32_prom_set(data, SDA, 0); /* keeping SCL=1 and transiting
* SDA 1->0 is start condition */
nsp32_prom_set(data, SCL, 0);
}
static void nsp32_prom_stop (nsp32_hw_data *data)
{
/* stop condition */
nsp32_prom_set(data, SCL, 1);
nsp32_prom_set(data, SDA, 0);
nsp32_prom_set(data, ENA, 1); /* output mode */
nsp32_prom_set(data, SDA, 1);
nsp32_prom_set(data, SCL, 0);
}
static void nsp32_prom_write_bit(nsp32_hw_data *data, int val)
{
/* write */
nsp32_prom_set(data, SDA, val);
nsp32_prom_set(data, SCL, 1 );
nsp32_prom_set(data, SCL, 0 );
}
static int nsp32_prom_read_bit(nsp32_hw_data *data)
{
int val;
/* read */
nsp32_prom_set(data, ENA, 0); /* input mode */
nsp32_prom_set(data, SCL, 1);
val = nsp32_prom_get(data, SDA);
nsp32_prom_set(data, SCL, 0);
nsp32_prom_set(data, ENA, 1); /* output mode */
return val;
}
/**************************************************************************
* Power Management
*/
#ifdef CONFIG_PM
/* Device suspended */
static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state=%ld, slot=%s, host=0x%p", pdev, state, pci_name(pdev), host);
pci_save_state (pdev);
pci_disable_device (pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
/* Device woken up */
static int nsp32_resume(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
unsigned short reg;
nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", pdev, pci_name(pdev), host);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake (pdev, PCI_D0, 0);
pci_restore_state (pdev);
reg = nsp32_read2(data->BaseAddress, INDEX_REG);
nsp32_msg(KERN_INFO, "io=0x%x reg=0x%x", data->BaseAddress, reg);
if (reg == 0xffff) {
nsp32_msg(KERN_INFO, "missing device. abort resume.");
return 0;
}
nsp32hw_init (data);
nsp32_do_bus_reset(data);
nsp32_msg(KERN_INFO, "resume success");
return 0;
}
#endif
/************************************************************************
* PCI/Cardbus probe/remove routine
*/
static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
nsp32_hw_data *data = &nsp32_data_base;
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
ret = pci_enable_device(pdev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to enable pci device");
return ret;
}
data->Pci = pdev;
data->pci_devid = id;
data->IrqNumber = pdev->irq;
data->BaseAddress = pci_resource_start(pdev, 0);
data->NumAddress = pci_resource_len (pdev, 0);
data->MmioAddress = pci_ioremap_bar(pdev, 1);
data->MmioLength = pci_resource_len (pdev, 1);
pci_set_master(pdev);
ret = nsp32_detect(pdev);
nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s",
pdev->irq,
data->MmioAddress, data->MmioLength,
pci_name(pdev),
nsp32_model[id->driver_data]);
nsp32_dbg(NSP32_DEBUG_REGISTER, "exit %d", ret);
return ret;
}
static void __devexit nsp32_remove(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
scsi_remove_host(host);
nsp32_release(host);
scsi_host_put(host);
}
static struct pci_driver nsp32_driver = {
.name = "nsp32",
.id_table = nsp32_pci_table,
.probe = nsp32_probe,
.remove = __devexit_p(nsp32_remove),
#ifdef CONFIG_PM
.suspend = nsp32_suspend,
.resume = nsp32_resume,
#endif
};
/*********************************************************************
* Moule entry point
*/
static int __init init_nsp32(void) {
nsp32_msg(KERN_INFO, "loading...");
return pci_register_driver(&nsp32_driver);
}
static void __exit exit_nsp32(void) {
nsp32_msg(KERN_INFO, "unloading...");
pci_unregister_driver(&nsp32_driver);
}
module_init(init_nsp32);
module_exit(exit_nsp32);
/* end */
| gpl-2.0 |
estiko/kernel_smartfren_d5c | arch/mn10300/kernel/smp.c | 4075 | 28249 | /* SMP support routines.
*
* Copyright (C) 2006-2008 Panasonic Corporation
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/profile.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
#include <asm/processor.h>
#include <asm/bug.h>
#include <asm/exceptions.h>
#include <asm/hardirq.h>
#include <asm/fpu.h>
#include <asm/mmu_context.h>
#include <asm/thread_info.h>
#include <asm/cpu-regs.h>
#include <asm/intctl-regs.h>
#include "internal.h"
#ifdef CONFIG_HOTPLUG_CPU
#include <asm/cacheflush.h>
static unsigned long sleep_mode[NR_CPUS];
static void run_sleep_cpu(unsigned int cpu);
static void run_wakeup_cpu(unsigned int cpu);
#endif /* CONFIG_HOTPLUG_CPU */
/*
* Debug Message function
*/
#undef DEBUG_SMP
#ifdef DEBUG_SMP
#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
#else
#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
#endif
/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
#define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
/*
* Structure and data for smp_nmi_call_function().
*/
struct nmi_call_data_struct {
smp_call_func_t func;
void *info;
cpumask_t started;
cpumask_t finished;
int wait;
char size_alignment[0]
__attribute__ ((__aligned__(SMP_CACHE_BYTES)));
} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
static DEFINE_SPINLOCK(smp_nmi_call_lock);
static struct nmi_call_data_struct *nmi_call_data;
/*
* Data structures and variables
*/
static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */
static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */
cpumask_t cpu_boot_map; /* Bitmask of boot APs */
unsigned long start_stack[NR_CPUS - 1];
/*
* Per CPU parameters
*/
struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
static int cpucount; /* The count of boot CPUs */
static cpumask_t smp_commenced_mask;
cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
/*
* Function Prototypes
*/
static int do_boot_cpu(int);
static void smp_show_cpu_info(int cpu_id);
static void smp_callin(void);
static void smp_online(void);
static void smp_store_cpu_info(int);
static void smp_cpu_init(void);
static void smp_tune_scheduling(void);
static void send_IPI_mask(const cpumask_t *cpumask, int irq);
static void init_ipi(void);
/*
* IPI Initialization interrupt definitions
*/
static void mn10300_ipi_disable(unsigned int irq);
static void mn10300_ipi_enable(unsigned int irq);
static void mn10300_ipi_chip_disable(struct irq_data *d);
static void mn10300_ipi_chip_enable(struct irq_data *d);
static void mn10300_ipi_ack(struct irq_data *d);
static void mn10300_ipi_nop(struct irq_data *d);
static struct irq_chip mn10300_ipi_type = {
.name = "cpu_ipi",
.irq_disable = mn10300_ipi_chip_disable,
.irq_enable = mn10300_ipi_chip_enable,
.irq_ack = mn10300_ipi_ack,
.irq_eoi = mn10300_ipi_nop
};
static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
static struct irqaction reschedule_ipi = {
.handler = smp_reschedule_interrupt,
.name = "smp reschedule IPI"
};
static struct irqaction call_function_ipi = {
.handler = smp_call_function_interrupt,
.name = "smp call function IPI"
};
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
static struct irqaction local_timer_ipi = {
.handler = smp_ipi_timer_interrupt,
.flags = IRQF_DISABLED,
.name = "smp local timer IPI"
};
#endif
/**
* init_ipi - Initialise the IPI mechanism
*/
static void init_ipi(void)
{
unsigned long flags;
u16 tmp16;
/* set up the reschedule IPI */
irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type,
handle_percpu_irq);
setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
mn10300_ipi_enable(RESCHEDULE_IPI);
/* set up the call function IPI */
irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type,
handle_percpu_irq);
setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
/* set up the local timer IPI */
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type,
handle_percpu_irq);
setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
mn10300_ipi_enable(LOCAL_TIMER_IPI);
#endif
#ifdef CONFIG_MN10300_CACHE_ENABLED
/* set up the cache flush IPI */
flags = arch_local_cli_save();
__set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
mn10300_low_ipi_handler);
GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
mn10300_ipi_enable(FLUSH_CACHE_IPI);
arch_local_irq_restore(flags);
#endif
/* set up the NMI call function IPI */
flags = arch_local_cli_save();
GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
arch_local_irq_restore(flags);
/* set up the SMP boot IPI */
flags = arch_local_cli_save();
__set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
mn10300_low_ipi_handler);
arch_local_irq_restore(flags);
}
/**
* mn10300_ipi_shutdown - Shut down handling of an IPI
* @irq: The IPI to be shut down.
*/
static void mn10300_ipi_shutdown(unsigned int irq)
{
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
/**
* mn10300_ipi_enable - Enable an IPI
* @irq: The IPI to be enabled.
*/
static void mn10300_ipi_enable(unsigned int irq)
{
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void mn10300_ipi_chip_enable(struct irq_data *d)
{
mn10300_ipi_enable(d->irq);
}
/**
* mn10300_ipi_disable - Disable an IPI
* @irq: The IPI to be disabled.
*/
static void mn10300_ipi_disable(unsigned int irq)
{
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
tmp = GxICR(irq);
GxICR(irq) = tmp & GxICR_LEVEL;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void mn10300_ipi_chip_disable(struct irq_data *d)
{
mn10300_ipi_disable(d->irq);
}
/**
* mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
* @irq: The IPI to be acknowledged.
*
* Clear the interrupt detection flag for the IPI on the appropriate interrupt
* channel in the PIC.
*/
static void mn10300_ipi_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
GxICR_u8(irq) = GxICR_DETECT;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
/**
* mn10300_ipi_nop - Dummy IPI action
* @irq: The IPI to be acted upon.
*/
static void mn10300_ipi_nop(struct irq_data *d)
{
}
/**
* send_IPI_mask - Send IPIs to all CPUs in list
* @cpumask: The list of CPUs to target.
* @irq: The IPI request to be sent.
*
* Send the specified IPI to all the CPUs in the list, not waiting for them to
* finish before returning. The caller is responsible for synchronisation if
* that is needed.
*/
static void send_IPI_mask(const cpumask_t *cpumask, int irq)
{
int i;
u16 tmp;
for (i = 0; i < NR_CPUS; i++) {
if (cpumask_test_cpu(i, cpumask)) {
/* send IPI */
tmp = CROSS_GxICR(irq, i);
CROSS_GxICR(irq, i) =
tmp | GxICR_REQUEST | GxICR_DETECT;
tmp = CROSS_GxICR(irq, i); /* flush write buffer */
}
}
}
/**
* send_IPI_self - Send an IPI to this CPU.
* @irq: The IPI request to be sent.
*
* Send the specified IPI to the current CPU.
*/
void send_IPI_self(int irq)
{
send_IPI_mask(cpumask_of(smp_processor_id()), irq);
}
/**
* send_IPI_allbutself - Send IPIs to all the other CPUs.
* @irq: The IPI request to be sent.
*
* Send the specified IPI to all CPUs in the system barring the current one,
* not waiting for them to finish before returning. The caller is responsible
* for synchronisation if that is needed.
*/
void send_IPI_allbutself(int irq)
{
cpumask_t cpumask;
cpumask_copy(&cpumask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &cpumask);
send_IPI_mask(&cpumask, irq);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
BUG();
/*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
}
void arch_send_call_function_single_ipi(int cpu)
{
send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
}
/**
* smp_send_reschedule - Send reschedule IPI to a CPU
* @cpu: The CPU to target.
*/
void smp_send_reschedule(int cpu)
{
send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
}
/**
* smp_nmi_call_function - Send a call function NMI IPI to all CPUs
* @func: The function to ask to be run.
* @info: The context data to pass to that function.
* @wait: If true, wait (atomically) until function is run on all CPUs.
*
* Send a non-maskable request to all CPUs in the system, requesting them to
* run the specified function with the given context data, and, potentially, to
* wait for completion of that function on all CPUs.
*
* Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
* timeout.
*/
int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
{
struct nmi_call_data_struct data;
unsigned long flags;
unsigned int cnt;
int cpus, ret = 0;
cpus = num_online_cpus() - 1;
if (cpus < 1)
return 0;
data.func = func;
data.info = info;
cpumask_copy(&data.started, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &data.started);
data.wait = wait;
if (wait)
data.finished = data.started;
spin_lock_irqsave(&smp_nmi_call_lock, flags);
nmi_call_data = &data;
smp_mb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
/* Wait for response */
if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
for (cnt = 0;
cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
!cpumask_empty(&data.started);
cnt++)
mdelay(1);
if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
for (cnt = 0;
cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
!cpumask_empty(&data.finished);
cnt++)
mdelay(1);
}
if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
ret = -ETIMEDOUT;
} else {
/* If timeout value is zero, wait until cpumask has been
* cleared */
while (!cpumask_empty(&data.started))
barrier();
if (wait)
while (!cpumask_empty(&data.finished))
barrier();
}
spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
return ret;
}
/**
* smp_jump_to_debugger - Make other CPUs enter the debugger by sending an IPI
*
* Send a non-maskable request to all other CPUs in the system, instructing
* them to jump into the debugger. The caller is responsible for checking that
* the other CPUs responded to the instruction.
*
* The caller should make sure that this CPU's debugger IPI is disabled.
*/
void smp_jump_to_debugger(void)
{
if (num_online_cpus() > 1)
/* Send a message to all other CPUs */
send_IPI_allbutself(DEBUGGER_NMI_IPI);
}
/**
* stop_this_cpu - Callback to stop a CPU.
* @unused: Callback context (ignored).
*/
void stop_this_cpu(void *unused)
{
static volatile int stopflag;
unsigned long flags;
#ifdef CONFIG_GDBSTUB
/* In case of single stepping smp_send_stop by other CPU,
* clear procindebug to avoid deadlock.
*/
atomic_set(&procindebug[smp_processor_id()], 0);
#endif /* CONFIG_GDBSTUB */
flags = arch_local_cli_save();
set_cpu_online(smp_processor_id(), false);
while (!stopflag)
cpu_relax();
set_cpu_online(smp_processor_id(), true);
arch_local_irq_restore(flags);
}
/**
* smp_send_stop - Send a stop request to all CPUs.
*/
void smp_send_stop(void)
{
smp_nmi_call_function(stop_this_cpu, NULL, 0);
}
/**
* smp_reschedule_interrupt - Reschedule IPI handler
* @irq: The interrupt number.
* @dev_id: The device ID.
*
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
*/
static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED;
}
/**
* smp_call_function_interrupt - Call function IPI handler
* @irq: The interrupt number.
* @dev_id: The device ID.
*
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
*/
static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
{
/* generic_smp_call_function_interrupt(); */
generic_smp_call_function_single_interrupt();
return IRQ_HANDLED;
}
/**
* smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
*/
void smp_nmi_call_function_interrupt(void)
{
smp_call_func_t func = nmi_call_data->func;
void *info = nmi_call_data->info;
int wait = nmi_call_data->wait;
/* Notify the initiating CPU that I've grabbed the data and am about to
* execute the function
*/
smp_mb();
cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started);
(*func)(info);
if (wait) {
smp_mb();
cpumask_clear_cpu(smp_processor_id(),
&nmi_call_data->finished);
}
}
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
/**
* smp_ipi_timer_interrupt - Local timer IPI handler
* @irq: The interrupt number.
* @dev_id: The device ID.
*
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
*/
static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
{
return local_timer_interrupt();
}
#endif
void __init smp_init_cpus(void)
{
int i;
for (i = 0; i < NR_CPUS; i++) {
set_cpu_possible(i, true);
set_cpu_present(i, true);
}
}
/**
* smp_cpu_init - Initialise AP in start_secondary.
*
* For this Application Processor, set up init_mm, initialise FPU and set
* interrupt level 0-6 setting.
*/
static void __init smp_cpu_init(void)
{
unsigned long flags;
int cpu_id = smp_processor_id();
u16 tmp16;
if (test_and_set_bit(cpu_id, &cpu_initialized)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
for (;;)
local_irq_enable();
}
printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
/* Force FPU initialization */
clear_using_fpu(current);
GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
mn10300_ipi_enable(LOCAL_TIMER_IPI);
GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
mn10300_ipi_enable(RESCHEDULE_IPI);
#ifdef CONFIG_MN10300_CACHE_ENABLED
GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
mn10300_ipi_enable(FLUSH_CACHE_IPI);
#endif
mn10300_ipi_shutdown(SMP_BOOT_IRQ);
/* Set up the non-maskable call function IPI */
flags = arch_local_cli_save();
GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
arch_local_irq_restore(flags);
}
/**
* smp_prepare_cpu_init - Initialise CPU in startup_secondary
*
* Set interrupt level 0-6 setting and init ICR of the kernel debugger.
*/
void smp_prepare_cpu_init(void)
{
int loop;
/* Set the interrupt vector registers */
IVAR0 = EXCEP_IRQ_LEVEL0;
IVAR1 = EXCEP_IRQ_LEVEL1;
IVAR2 = EXCEP_IRQ_LEVEL2;
IVAR3 = EXCEP_IRQ_LEVEL3;
IVAR4 = EXCEP_IRQ_LEVEL4;
IVAR5 = EXCEP_IRQ_LEVEL5;
IVAR6 = EXCEP_IRQ_LEVEL6;
/* Disable all interrupts and set to priority 6 (lowest) */
for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
#ifdef CONFIG_KERNEL_DEBUGGER
/* initialise the kernel debugger interrupt */
do {
unsigned long flags;
u16 tmp16;
flags = arch_local_cli_save();
GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
tmp16 = GxICR(DEBUGGER_NMI_IPI);
arch_local_irq_restore(flags);
} while (0);
#endif
}
/**
* start_secondary - Activate a secondary CPU (AP)
* @unused: Thread parameter (ignored).
*/
int __init start_secondary(void *unused)
{
smp_cpu_init();
smp_callin();
while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
cpu_relax();
local_flush_tlb();
preempt_disable();
smp_online();
#ifdef CONFIG_GENERIC_CLOCKEVENTS
init_clockevents();
#endif
cpu_idle();
return 0;
}
/**
* smp_prepare_cpus - Boot up secondary CPUs (APs)
* @max_cpus: Maximum number of CPUs to boot.
*
* Call do_boot_cpu, and boot up APs.
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int phy_id;
/* Setup boot CPU information */
smp_store_cpu_info(0);
smp_tune_scheduling();
init_ipi();
/* If SMP should be disabled, then finish */
if (max_cpus == 0) {
printk(KERN_INFO "SMP mode deactivated.\n");
goto smp_done;
}
/* Boot secondary CPUs (for which phy_id > 0) */
for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
/* Don't boot primary CPU */
if (max_cpus <= cpucount + 1)
continue;
if (phy_id != 0)
do_boot_cpu(phy_id);
set_cpu_possible(phy_id, true);
smp_show_cpu_info(phy_id);
}
smp_done:
Dprintk("Boot done.\n");
}
/**
* smp_store_cpu_info - Save a CPU's information
* @cpu: The CPU to save for.
*
* Save boot_cpu_data and jiffy for the specified CPU.
*/
static void __init smp_store_cpu_info(int cpu)
{
struct mn10300_cpuinfo *ci = &cpu_data[cpu];
*ci = boot_cpu_data;
ci->loops_per_jiffy = loops_per_jiffy;
ci->type = CPUREV;
}
/**
* smp_tune_scheduling - Set time slice value
*
* Nothing to do here.
*/
static void __init smp_tune_scheduling(void)
{
}
/**
* do_boot_cpu: Boot up one CPU
* @phy_id: Physical ID of CPU to boot.
*
* Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1
* otherwise.
*/
static int __init do_boot_cpu(int phy_id)
{
struct task_struct *idle;
unsigned long send_status, callin_status;
int timeout, cpu_id;
send_status = GxICR_REQUEST;
callin_status = 0;
timeout = 0;
cpu_id = phy_id;
cpucount++;
/* Create idle thread for this CPU */
idle = fork_idle(cpu_id);
if (IS_ERR(idle))
panic("Failed fork for CPU#%d.", cpu_id);
idle->thread.pc = (unsigned long)start_secondary;
printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
start_stack[cpu_id - 1] = idle->thread.sp;
task_thread_info(idle)->cpu = cpu_id;
/* Send boot IPI to AP */
send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
Dprintk("Waiting for send to finish...\n");
/* Wait for AP's IPI receive in 100[ms] */
do {
udelay(1000);
send_status =
CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
} while (send_status == GxICR_REQUEST && timeout++ < 100);
Dprintk("Waiting for cpu_callin_map.\n");
if (send_status == 0) {
/* Allow AP to start initializing */
cpumask_set_cpu(cpu_id, &cpu_callout_map);
/* Wait for setting cpu_callin_map */
timeout = 0;
do {
udelay(1000);
callin_status = cpumask_test_cpu(cpu_id,
&cpu_callin_map);
} while (callin_status == 0 && timeout++ < 5000);
if (callin_status == 0)
Dprintk("Not responding.\n");
} else {
printk(KERN_WARNING "IPI not delivered.\n");
}
if (send_status == GxICR_REQUEST || callin_status == 0) {
cpumask_clear_cpu(cpu_id, &cpu_callout_map);
cpumask_clear_cpu(cpu_id, &cpu_callin_map);
cpumask_clear_cpu(cpu_id, &cpu_initialized);
cpucount--;
return 1;
}
return 0;
}
/**
* smp_show_cpu_info - Show SMP CPU information
* @cpu: The CPU of interest.
*/
static void __init smp_show_cpu_info(int cpu)
{
struct mn10300_cpuinfo *ci = &cpu_data[cpu];
printk(KERN_INFO
"CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
cpu,
MN10300_IOCLK / 1000000,
(MN10300_IOCLK / 10000) % 100,
ci->loops_per_jiffy / (500000 / HZ),
(ci->loops_per_jiffy / (5000 / HZ)) % 100);
}
/**
* smp_callin - Set cpu_callin_map of the current CPU ID
*/
static void __init smp_callin(void)
{
unsigned long timeout;
int cpu;
cpu = smp_processor_id();
timeout = jiffies + (2 * HZ);
if (cpumask_test_cpu(cpu, &cpu_callin_map)) {
printk(KERN_ERR "CPU#%d already present.\n", cpu);
BUG();
}
Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
/* Wait for AP startup 2s total */
while (time_before(jiffies, timeout)) {
if (cpumask_test_cpu(cpu, &cpu_callout_map))
break;
cpu_relax();
}
if (!time_before(jiffies, timeout)) {
printk(KERN_ERR
"BUG: CPU#%d started up but did not get a callout!\n",
cpu);
BUG();
}
#ifdef CONFIG_CALIBRATE_DELAY
calibrate_delay(); /* Get our bogomips */
#endif
/* Save our processor parameters */
smp_store_cpu_info(cpu);
/* Allow the boot processor to continue */
cpumask_set_cpu(cpu, &cpu_callin_map);
}
/**
* smp_online - Set cpu_online_mask
*/
static void __init smp_online(void)
{
int cpu;
cpu = smp_processor_id();
notify_cpu_starting(cpu);
ipi_call_lock();
set_cpu_online(cpu, true);
ipi_call_unlock();
local_irq_enable();
}
/**
* smp_cpus_done -
* @max_cpus: Maximum CPU count.
*
* Do nothing.
*/
void __init smp_cpus_done(unsigned int max_cpus)
{
}
/*
* smp_prepare_boot_cpu - Set up stuff for the boot processor.
*
* Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot
* processor (CPU 0).
*/
void __devinit smp_prepare_boot_cpu(void)
{
cpumask_set_cpu(0, &cpu_callout_map);
cpumask_set_cpu(0, &cpu_callin_map);
current_thread_info()->cpu = 0;
}
/*
* initialize_secondary - Initialise a secondary CPU (Application Processor).
*
* Set SP register and jump to thread's PC address.
*/
void initialize_secondary(void)
{
asm volatile (
"mov %0,sp \n"
"jmp (%1) \n"
:
: "a"(current->thread.sp), "a"(current->thread.pc));
}
/**
* __cpu_up - Set smp_commenced_mask for the nominated CPU
* @cpu: The target CPU.
*/
int __devinit __cpu_up(unsigned int cpu)
{
int timeout;
#ifdef CONFIG_HOTPLUG_CPU
if (num_online_cpus() == 1)
disable_hlt();
if (sleep_mode[cpu])
run_wakeup_cpu(cpu);
#endif /* CONFIG_HOTPLUG_CPU */
cpumask_set_cpu(cpu, &smp_commenced_mask);
/* Wait 5s total for a response */
for (timeout = 0 ; timeout < 5000 ; timeout++) {
if (cpu_online(cpu))
break;
udelay(1000);
}
BUG_ON(!cpu_online(cpu));
return 0;
}
/**
* setup_profiling_timer - Set up the profiling timer
* @multiplier - The frequency multiplier to use
*
* The frequency of the profiling timer can be changed by writing a multiplier
* value into /proc/profile.
*/
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
/*
* CPU hotplug routines
*/
#ifdef CONFIG_HOTPLUG_CPU
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int cpu, ret;
for_each_cpu(cpu) {
ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
if (ret)
printk(KERN_WARNING
"topology_init: register_cpu %d failed (%d)\n",
cpu, ret);
}
return 0;
}
subsys_initcall(topology_init);
int __cpu_disable(void)
{
int cpu = smp_processor_id();
if (cpu == 0)
return -EBUSY;
migrate_irqs();
cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm));
return 0;
}
void __cpu_die(unsigned int cpu)
{
run_sleep_cpu(cpu);
if (num_online_cpus() == 1)
enable_hlt();
}
#ifdef CONFIG_MN10300_CACHE_ENABLED
static inline void hotplug_cpu_disable_cache(void)
{
int tmp;
asm volatile(
" movhu (%1),%0 \n"
" and %2,%0 \n"
" movhu %0,(%1) \n"
"1: movhu (%1),%0 \n"
" btst %3,%0 \n"
" bne 1b \n"
: "=&r"(tmp)
: "a"(&CHCTR),
"i"(~(CHCTR_ICEN | CHCTR_DCEN)),
"i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
: "memory", "cc");
}
static inline void hotplug_cpu_enable_cache(void)
{
int tmp;
asm volatile(
"movhu (%1),%0 \n"
"or %2,%0 \n"
"movhu %0,(%1) \n"
: "=&r"(tmp)
: "a"(&CHCTR),
"i"(CHCTR_ICEN | CHCTR_DCEN)
: "memory", "cc");
}
static inline void hotplug_cpu_invalidate_cache(void)
{
int tmp;
asm volatile (
"movhu (%1),%0 \n"
"or %2,%0 \n"
"movhu %0,(%1) \n"
: "=&r"(tmp)
: "a"(&CHCTR),
"i"(CHCTR_ICINV | CHCTR_DCINV)
: "cc");
}
#else /* CONFIG_MN10300_CACHE_ENABLED */
#define hotplug_cpu_disable_cache() do {} while (0)
#define hotplug_cpu_enable_cache() do {} while (0)
#define hotplug_cpu_invalidate_cache() do {} while (0)
#endif /* CONFIG_MN10300_CACHE_ENABLED */
/**
* hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
* @cpumask: List of target CPUs.
* @func: The function to call on those CPUs.
* @info: The context data for the function to be called.
* @wait: Whether to wait for the calls to complete.
*
* Non-maskably call a function on another CPU for hotplug purposes.
*
* This function must be called with maskable interrupts disabled.
*/
static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
smp_call_func_t func, void *info,
int wait)
{
/*
* The address and the size of nmi_call_func_mask_data
* need to be aligned on L1_CACHE_BYTES.
*/
static struct nmi_call_data_struct nmi_call_func_mask_data
__cacheline_aligned;
unsigned long start, end;
start = (unsigned long)&nmi_call_func_mask_data;
end = start + sizeof(struct nmi_call_data_struct);
nmi_call_func_mask_data.func = func;
nmi_call_func_mask_data.info = info;
nmi_call_func_mask_data.started = cpumask;
nmi_call_func_mask_data.wait = wait;
if (wait)
nmi_call_func_mask_data.finished = cpumask;
spin_lock(&smp_nmi_call_lock);
nmi_call_data = &nmi_call_func_mask_data;
mn10300_local_dcache_flush_range(start, end);
smp_wmb();
send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
do {
mn10300_local_dcache_inv_range(start, end);
barrier();
} while (!cpumask_empty(&nmi_call_func_mask_data.started));
if (wait) {
do {
mn10300_local_dcache_inv_range(start, end);
barrier();
} while (!cpumask_empty(&nmi_call_func_mask_data.finished));
}
spin_unlock(&smp_nmi_call_lock);
return 0;
}
static void restart_wakeup_cpu(void)
{
unsigned int cpu = smp_processor_id();
cpumask_set_cpu(cpu, &cpu_callin_map);
local_flush_tlb();
set_cpu_online(cpu, true);
smp_wmb();
}
static void prepare_sleep_cpu(void *unused)
{
sleep_mode[smp_processor_id()] = 1;
smp_mb();
mn10300_local_dcache_flush_inv();
hotplug_cpu_disable_cache();
hotplug_cpu_invalidate_cache();
}
/* when this function called, IE=0, NMID=0. */
static void sleep_cpu(void *unused)
{
unsigned int cpu_id = smp_processor_id();
/*
* CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
* before this cpu goes in SLEEP mode.
*/
do {
smp_mb();
__sleep_cpu();
} while (sleep_mode[cpu_id]);
restart_wakeup_cpu();
}
static void run_sleep_cpu(unsigned int cpu)
{
unsigned long flags;
cpumask_t cpumask;
cpumask_copy(&cpumask, &cpumask_of(cpu));
flags = arch_local_cli_save();
hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
udelay(1); /* delay for the cpu to sleep. */
arch_local_irq_restore(flags);
}
static void wakeup_cpu(void)
{
hotplug_cpu_invalidate_cache();
hotplug_cpu_enable_cache();
smp_mb();
sleep_mode[smp_processor_id()] = 0;
}
static void run_wakeup_cpu(unsigned int cpu)
{
unsigned long flags;
flags = arch_local_cli_save();
#if NR_CPUS == 2
mn10300_local_dcache_flush_inv();
#else
/*
* Before waking up the cpu,
* all online cpus should stop and flush D-Cache for global data.
*/
#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
#endif
hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
arch_local_irq_restore(flags);
}
#endif /* CONFIG_HOTPLUG_CPU */
| gpl-2.0 |
hsr0/android_kernel_sony_msm7x27a | drivers/bluetooth/bt3c_cs.c | 4587 | 16040 | /*
*
* Driver for the 3Com Bluetooth PCMCIA card
*
* Copyright (C) 2001-2002 Marcel Holtmann <marcel@holtmann.org>
* Jose Orlando Pereira <jop@di.uminho.pt>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The initial developer of the original code is David A. Hinds
* <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/io.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
/* ======================== Module parameters ======================== */
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth driver for the 3Com Bluetooth PCMCIA card");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("BT3CPCC.bin");
/* ======================== Local structures ======================== */
typedef struct bt3c_info_t {
struct pcmcia_device *p_dev;
struct hci_dev *hdev;
spinlock_t lock; /* For serializing operations */
struct sk_buff_head txq;
unsigned long tx_state;
unsigned long rx_state;
unsigned long rx_count;
struct sk_buff *rx_skb;
} bt3c_info_t;
static int bt3c_config(struct pcmcia_device *link);
static void bt3c_release(struct pcmcia_device *link);
static void bt3c_detach(struct pcmcia_device *p_dev);
/* Transmit states */
#define XMIT_SENDING 1
#define XMIT_WAKEUP 2
#define XMIT_WAITING 8
/* Receiver states */
#define RECV_WAIT_PACKET_TYPE 0
#define RECV_WAIT_EVENT_HEADER 1
#define RECV_WAIT_ACL_HEADER 2
#define RECV_WAIT_SCO_HEADER 3
#define RECV_WAIT_DATA 4
/* ======================== Special I/O functions ======================== */
#define DATA_L 0
#define DATA_H 1
#define ADDR_L 2
#define ADDR_H 3
#define CONTROL 4
static inline void bt3c_address(unsigned int iobase, unsigned short addr)
{
outb(addr & 0xff, iobase + ADDR_L);
outb((addr >> 8) & 0xff, iobase + ADDR_H);
}
static inline void bt3c_put(unsigned int iobase, unsigned short value)
{
outb(value & 0xff, iobase + DATA_L);
outb((value >> 8) & 0xff, iobase + DATA_H);
}
static inline void bt3c_io_write(unsigned int iobase, unsigned short addr, unsigned short value)
{
bt3c_address(iobase, addr);
bt3c_put(iobase, value);
}
static inline unsigned short bt3c_get(unsigned int iobase)
{
unsigned short value = inb(iobase + DATA_L);
value |= inb(iobase + DATA_H) << 8;
return value;
}
static inline unsigned short bt3c_read(unsigned int iobase, unsigned short addr)
{
bt3c_address(iobase, addr);
return bt3c_get(iobase);
}
/* ======================== Interrupt handling ======================== */
static int bt3c_write(unsigned int iobase, int fifo_size, __u8 *buf, int len)
{
int actual = 0;
bt3c_address(iobase, 0x7080);
/* Fill FIFO with current frame */
while (actual < len) {
/* Transmit next byte */
bt3c_put(iobase, buf[actual]);
actual++;
}
bt3c_io_write(iobase, 0x7005, actual);
return actual;
}
static void bt3c_write_wakeup(bt3c_info_t *info)
{
if (!info) {
BT_ERR("Unknown device");
return;
}
if (test_and_set_bit(XMIT_SENDING, &(info->tx_state)))
return;
do {
register unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
register int len;
if (!pcmcia_dev_present(info->p_dev))
break;
if (!(skb = skb_dequeue(&(info->txq)))) {
clear_bit(XMIT_SENDING, &(info->tx_state));
break;
}
/* Send frame */
len = bt3c_write(iobase, 256, skb->data, skb->len);
if (len != skb->len) {
BT_ERR("Very strange");
}
kfree_skb(skb);
info->hdev->stat.byte_tx += len;
} while (0);
}
static void bt3c_receive(bt3c_info_t *info)
{
unsigned int iobase;
int size = 0, avail;
if (!info) {
BT_ERR("Unknown device");
return;
}
iobase = info->p_dev->resource[0]->start;
avail = bt3c_read(iobase, 0x7006);
//printk("bt3c_cs: receiving %d bytes\n", avail);
bt3c_address(iobase, 0x7480);
while (size < avail) {
size++;
info->hdev->stat.byte_rx++;
/* Allocate packet */
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
BT_ERR("Can't allocate mem for new packet");
return;
}
}
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
inb(iobase + DATA_H);
//printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
switch (bt_cb(info->rx_skb)->pkt_type) {
case HCI_EVENT_PKT:
info->rx_state = RECV_WAIT_EVENT_HEADER;
info->rx_count = HCI_EVENT_HDR_SIZE;
break;
case HCI_ACLDATA_PKT:
info->rx_state = RECV_WAIT_ACL_HEADER;
info->rx_count = HCI_ACL_HDR_SIZE;
break;
case HCI_SCODATA_PKT:
info->rx_state = RECV_WAIT_SCO_HEADER;
info->rx_count = HCI_SCO_HDR_SIZE;
break;
default:
/* Unknown packet */
BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
info->hdev->stat.err_rx++;
clear_bit(HCI_RUNNING, &(info->hdev->flags));
kfree_skb(info->rx_skb);
info->rx_skb = NULL;
break;
}
} else {
__u8 x = inb(iobase + DATA_L);
*skb_put(info->rx_skb, 1) = x;
inb(iobase + DATA_H);
info->rx_count--;
if (info->rx_count == 0) {
int dlen;
struct hci_event_hdr *eh;
struct hci_acl_hdr *ah;
struct hci_sco_hdr *sh;
switch (info->rx_state) {
case RECV_WAIT_EVENT_HEADER:
eh = hci_event_hdr(info->rx_skb);
info->rx_state = RECV_WAIT_DATA;
info->rx_count = eh->plen;
break;
case RECV_WAIT_ACL_HEADER:
ah = hci_acl_hdr(info->rx_skb);
dlen = __le16_to_cpu(ah->dlen);
info->rx_state = RECV_WAIT_DATA;
info->rx_count = dlen;
break;
case RECV_WAIT_SCO_HEADER:
sh = hci_sco_hdr(info->rx_skb);
info->rx_state = RECV_WAIT_DATA;
info->rx_count = sh->dlen;
break;
case RECV_WAIT_DATA:
hci_recv_frame(info->rx_skb);
info->rx_skb = NULL;
break;
}
}
}
}
bt3c_io_write(iobase, 0x7006, 0x0000);
}
static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
{
bt3c_info_t *info = dev_inst;
unsigned int iobase;
int iir;
irqreturn_t r = IRQ_NONE;
if (!info || !info->hdev)
/* our irq handler is shared */
return IRQ_NONE;
iobase = info->p_dev->resource[0]->start;
spin_lock(&(info->lock));
iir = inb(iobase + CONTROL);
if (iir & 0x80) {
int stat = bt3c_read(iobase, 0x7001);
if ((stat & 0xff) == 0x7f) {
BT_ERR("Very strange (stat=0x%04x)", stat);
} else if ((stat & 0xff) != 0xff) {
if (stat & 0x0020) {
int status = bt3c_read(iobase, 0x7002) & 0x10;
BT_INFO("%s: Antenna %s", info->hdev->name,
status ? "out" : "in");
}
if (stat & 0x0001)
bt3c_receive(info);
if (stat & 0x0002) {
//BT_ERR("Ack (stat=0x%04x)", stat);
clear_bit(XMIT_SENDING, &(info->tx_state));
bt3c_write_wakeup(info);
}
bt3c_io_write(iobase, 0x7001, 0x0000);
outb(iir, iobase + CONTROL);
}
r = IRQ_HANDLED;
}
spin_unlock(&(info->lock));
return r;
}
/* ======================== HCI interface ======================== */
static int bt3c_hci_flush(struct hci_dev *hdev)
{
bt3c_info_t *info = (bt3c_info_t *)(hdev->driver_data);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
return 0;
}
static int bt3c_hci_open(struct hci_dev *hdev)
{
set_bit(HCI_RUNNING, &(hdev->flags));
return 0;
}
static int bt3c_hci_close(struct hci_dev *hdev)
{
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
bt3c_hci_flush(hdev);
return 0;
}
static int bt3c_hci_send_frame(struct sk_buff *skb)
{
bt3c_info_t *info;
struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
unsigned long flags;
if (!hdev) {
BT_ERR("Frame for unknown HCI device (hdev=NULL)");
return -ENODEV;
}
info = (bt3c_info_t *) (hdev->driver_data);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
hdev->stat.acl_tx++;
break;
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
break;
};
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
skb_queue_tail(&(info->txq), skb);
spin_lock_irqsave(&(info->lock), flags);
bt3c_write_wakeup(info);
spin_unlock_irqrestore(&(info->lock), flags);
return 0;
}
static void bt3c_hci_destruct(struct hci_dev *hdev)
{
}
static int bt3c_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
}
/* ======================== Card services HCI interaction ======================== */
static int bt3c_load_firmware(bt3c_info_t *info, const unsigned char *firmware,
int count)
{
char *ptr = (char *) firmware;
char b[9];
unsigned int iobase, size, addr, fcs, tmp;
int i, err = 0;
iobase = info->p_dev->resource[0]->start;
/* Reset */
bt3c_io_write(iobase, 0x8040, 0x0404);
bt3c_io_write(iobase, 0x8040, 0x0400);
udelay(1);
bt3c_io_write(iobase, 0x8040, 0x0404);
udelay(17);
/* Load */
while (count) {
if (ptr[0] != 'S') {
BT_ERR("Bad address in firmware");
err = -EFAULT;
goto error;
}
memset(b, 0, sizeof(b));
memcpy(b, ptr + 2, 2);
size = simple_strtoul(b, NULL, 16);
memset(b, 0, sizeof(b));
memcpy(b, ptr + 4, 8);
addr = simple_strtoul(b, NULL, 16);
memset(b, 0, sizeof(b));
memcpy(b, ptr + (size * 2) + 2, 2);
fcs = simple_strtoul(b, NULL, 16);
memset(b, 0, sizeof(b));
for (tmp = 0, i = 0; i < size; i++) {
memcpy(b, ptr + (i * 2) + 2, 2);
tmp += simple_strtol(b, NULL, 16);
}
if (((tmp + fcs) & 0xff) != 0xff) {
BT_ERR("Checksum error in firmware");
err = -EILSEQ;
goto error;
}
if (ptr[1] == '3') {
bt3c_address(iobase, addr);
memset(b, 0, sizeof(b));
for (i = 0; i < (size - 4) / 2; i++) {
memcpy(b, ptr + (i * 4) + 12, 4);
tmp = simple_strtoul(b, NULL, 16);
bt3c_put(iobase, tmp);
}
}
ptr += (size * 2) + 6;
count -= (size * 2) + 6;
}
udelay(17);
/* Boot */
bt3c_address(iobase, 0x3000);
outb(inb(iobase + CONTROL) | 0x40, iobase + CONTROL);
error:
udelay(17);
/* Clear */
bt3c_io_write(iobase, 0x7006, 0x0000);
bt3c_io_write(iobase, 0x7005, 0x0000);
bt3c_io_write(iobase, 0x7001, 0x0000);
return err;
}
static int bt3c_open(bt3c_info_t *info)
{
const struct firmware *firmware;
struct hci_dev *hdev;
int err;
spin_lock_init(&(info->lock));
skb_queue_head_init(&(info->txq));
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
info->rx_skb = NULL;
/* Initialize HCI device */
hdev = hci_alloc_dev();
if (!hdev) {
BT_ERR("Can't allocate HCI device");
return -ENOMEM;
}
info->hdev = hdev;
hdev->bus = HCI_PCCARD;
hdev->driver_data = info;
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
hdev->open = bt3c_hci_open;
hdev->close = bt3c_hci_close;
hdev->flush = bt3c_hci_flush;
hdev->send = bt3c_hci_send_frame;
hdev->destruct = bt3c_hci_destruct;
hdev->ioctl = bt3c_hci_ioctl;
hdev->owner = THIS_MODULE;
/* Load firmware */
err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev);
if (err < 0) {
BT_ERR("Firmware request failed");
goto error;
}
err = bt3c_load_firmware(info, firmware->data, firmware->size);
release_firmware(firmware);
if (err < 0) {
BT_ERR("Firmware loading failed");
goto error;
}
/* Timeout before it is safe to send the first HCI packet */
msleep(1000);
/* Register HCI device */
err = hci_register_dev(hdev);
if (err < 0) {
BT_ERR("Can't register HCI device");
goto error;
}
return 0;
error:
info->hdev = NULL;
hci_free_dev(hdev);
return err;
}
static int bt3c_close(bt3c_info_t *info)
{
struct hci_dev *hdev = info->hdev;
if (!hdev)
return -ENODEV;
bt3c_hci_close(hdev);
if (hci_unregister_dev(hdev) < 0)
BT_ERR("Can't unregister HCI device %s", hdev->name);
hci_free_dev(hdev);
return 0;
}
static int bt3c_probe(struct pcmcia_device *link)
{
bt3c_info_t *info;
/* Create new info device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->p_dev = link;
link->priv = info;
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
CONF_AUTO_SET_IO;
return bt3c_config(link);
}
static void bt3c_detach(struct pcmcia_device *link)
{
bt3c_info_t *info = link->priv;
bt3c_release(link);
kfree(info);
}
static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
{
int *try = priv_data;
if (try == 0)
p_dev->io_lines = 16;
if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0))
return -EINVAL;
p_dev->resource[0]->end = 8;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
return pcmcia_request_io(p_dev);
}
static int bt3c_check_config_notpicky(struct pcmcia_device *p_dev,
void *priv_data)
{
static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
int j;
if (p_dev->io_lines > 3)
return -ENODEV;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
p_dev->resource[0]->end = 8;
for (j = 0; j < 5; j++) {
p_dev->resource[0]->start = base[j];
p_dev->io_lines = base[j] ? 16 : 3;
if (!pcmcia_request_io(p_dev))
return 0;
}
return -ENODEV;
}
static int bt3c_config(struct pcmcia_device *link)
{
bt3c_info_t *info = link->priv;
int i;
unsigned long try;
/* First pass: look for a config entry that looks normal.
Two tries: without IO aliases, then with aliases */
for (try = 0; try < 2; try++)
if (!pcmcia_loop_config(link, bt3c_check_config, (void *) try))
goto found_port;
/* Second pass: try to find an entry that isn't picky about
its base address, then try to grab any standard serial port
address, and finally try to get any free port. */
if (!pcmcia_loop_config(link, bt3c_check_config_notpicky, NULL))
goto found_port;
BT_ERR("No usable port range found");
goto failed;
found_port:
i = pcmcia_request_irq(link, &bt3c_interrupt);
if (i != 0)
goto failed;
i = pcmcia_enable_device(link);
if (i != 0)
goto failed;
if (bt3c_open(info) != 0)
goto failed;
return 0;
failed:
bt3c_release(link);
return -ENODEV;
}
static void bt3c_release(struct pcmcia_device *link)
{
bt3c_info_t *info = link->priv;
bt3c_close(info);
pcmcia_disable_device(link);
}
static struct pcmcia_device_id bt3c_ids[] = {
PCMCIA_DEVICE_PROD_ID13("3COM", "Bluetooth PC Card", 0xefce0a31, 0xd4ce9b02),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, bt3c_ids);
static struct pcmcia_driver bt3c_driver = {
.owner = THIS_MODULE,
.name = "bt3c_cs",
.probe = bt3c_probe,
.remove = bt3c_detach,
.id_table = bt3c_ids,
};
static int __init init_bt3c_cs(void)
{
return pcmcia_register_driver(&bt3c_driver);
}
static void __exit exit_bt3c_cs(void)
{
pcmcia_unregister_driver(&bt3c_driver);
}
module_init(init_bt3c_cs);
module_exit(exit_bt3c_cs);
| gpl-2.0 |
TeamHorizon/android_kernel_samsung_hlte | drivers/scsi/bfa/bfa_ioc.c | 4843 | 138699 | /*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfa_ioc.h"
#include "bfi_reg.h"
#include "bfa_defs.h"
#include "bfa_defs_svc.h"
BFA_TRC_FILE(CNA, IOC);
/*
* IOC local definitions
*/
#define BFA_IOC_TOV 3000 /* msecs */
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
#define bfa_ioc_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
#define bfa_hb_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
/*
* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
*/
#define bfa_ioc_firmware_lock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
#define bfa_ioc_firmware_unlock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
#define bfa_ioc_sync_start(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
#define bfa_ioc_sync_ack(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
#define bfa_ioc_sync_complete(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
readl((__ioc)->ioc_regs.hfn_mbox_cmd))
bfa_boolean_t bfa_auto_recover = BFA_TRUE;
/*
* forward declarations
*/
static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
static void bfa_ioc_timeout(void *ioc);
static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
enum bfa_ioc_event_e event);
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
/*
* IOC state machine definitions/declarations
*/
enum ioc_event {
IOC_E_RESET = 1, /* IOC reset request */
IOC_E_ENABLE = 2, /* IOC enable request */
IOC_E_DISABLE = 3, /* IOC disable request */
IOC_E_DETACH = 4, /* driver detach cleanup */
IOC_E_ENABLED = 5, /* f/w enabled */
IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
IOC_E_DISABLED = 7, /* f/w disabled */
IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
IOC_E_HBFAIL = 9, /* heartbeat failure */
IOC_E_HWERROR = 10, /* hardware error interrupt */
IOC_E_TIMEOUT = 11, /* timeout */
IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
};
bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
};
/*
* IOCPF state machine definitions/declarations
*/
#define bfa_iocpf_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
#define bfa_iocpf_poll_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
#define bfa_sem_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
/*
* Forward declareations for iocpf state machine
*/
static void bfa_iocpf_timeout(void *ioc_arg);
static void bfa_iocpf_sem_timeout(void *ioc_arg);
static void bfa_iocpf_poll_timeout(void *ioc_arg);
/*
* IOCPF state machine events
*/
enum iocpf_event {
IOCPF_E_ENABLE = 1, /* IOCPF enable request */
IOCPF_E_DISABLE = 2, /* IOCPF disable request */
IOCPF_E_STOP = 3, /* stop on driver detach */
IOCPF_E_FWREADY = 4, /* f/w initialization done */
IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
};
/*
* IOCPF states
*/
enum bfa_iocpf_state {
BFA_IOCPF_RESET = 1, /* IOC is in reset state */
BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
BFA_IOCPF_READY = 4, /* IOCPF is initialized */
BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
BFA_IOCPF_FAIL = 6, /* IOCPF failed */
BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
};
bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
static struct bfa_sm_table_s iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
};
/*
* IOC State Machine
*/
/*
* Beginning state. IOC uninit state.
*/
static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
{
}
/*
* IOC is in uninit state.
*/
static void
bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_RESET:
bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Reset entry actions -- initialize state machine
*/
static void
bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
{
bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
/*
* IOC is in reset state.
*/
static void
bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
break;
case IOC_E_DISABLE:
bfa_ioc_disable_comp(ioc);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
{
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
}
/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
case IOC_E_PFFAILED:
/* !!! fall through !!! */
case IOC_E_HWERROR:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
break;
case IOC_E_HWFAILED:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
break;
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
case IOC_E_ENABLE:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_timer_start(ioc);
bfa_ioc_send_getattr(ioc);
}
/*
* IOC configuration in progress. Timer is active.
*/
static void
bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_FWRSP_GETATTR:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
/* !!! fall through !!! */
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
break;
case IOC_E_DISABLE:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_ENABLE:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
bfa_ioc_hb_monitor(ioc);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
}
static void
bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
break;
case IOC_E_DISABLE:
bfa_hb_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_hb_timer_stop(ioc);
/* !!! fall through !!! */
case IOC_E_HBFAIL:
if (ioc->iocpf.auto_recover)
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
else
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
bfa_ioc_fail_notify(ioc);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
}
/*
* IOC is being disabled
*/
static void
bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_DISABLED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
case IOC_E_HWERROR:
/*
* No state change. Will move to disabled state
* after iocpf sm completes failure processing and
* moves to disabled state.
*/
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
break;
case IOC_E_HWFAILED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
bfa_ioc_disable_comp(ioc);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* IOC disable completion entry.
*/
static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_disable_comp(ioc);
}
static void
bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
break;
case IOC_E_DISABLE:
ioc->cbfn->disable_cbfn(ioc->bfa);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
{
bfa_trc(ioc, 0);
}
/*
* Hardware initialization retry.
*/
static void
bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
/*
* Initialization retry failed.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
break;
case IOC_E_HWFAILED:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
break;
case IOC_E_ENABLE:
break;
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
{
bfa_trc(ioc, 0);
}
/*
* IOC failure.
*/
static void
bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
break;
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
case IOC_E_HWERROR:
/*
* HB failure notification, ignore.
*/
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
{
bfa_trc(ioc, 0);
}
static void
bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
break;
case IOC_E_DISABLE:
ioc->cbfn->disable_cbfn(ioc->bfa);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* IOCPF State Machine
*/
/*
* Reset entry actions -- initialize state machine
*/
static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
{
iocpf->fw_mismatch_notified = BFA_FALSE;
iocpf->auto_recover = bfa_auto_recover;
}
/*
* Beginning state. IOC is in reset state.
*/
static void
bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_ENABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
break;
case IOCPF_E_STOP:
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Semaphore should be acquired for version check.
*/
static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
{
struct bfi_ioc_image_hdr_s fwhdr;
u32 r32, fwstate, pgnum, pgoff, loff = 0;
int i;
/*
* Spin on init semaphore to serialize.
*/
r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
while (r32 & 0x1) {
udelay(20);
r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
}
/* h/w sem init */
fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
if (fwstate == BFI_IOC_UNINIT) {
writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
goto sem_get;
}
bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
goto sem_get;
}
/*
* Clear fwver hdr
*/
pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
loff += sizeof(u32);
}
bfa_trc(iocpf->ioc, fwstate);
bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
/*
* Unlock the hw semaphore. Should be here only once per boot.
*/
readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
/*
* unlock init semaphore.
*/
writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
sem_get:
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* Awaiting h/w semaphore to continue with version check.
*/
static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
if (bfa_ioc_sync_start(ioc)) {
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
} else {
bfa_ioc_firmware_unlock(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_sem_timer_start(ioc);
}
} else {
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
}
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
bfa_fsm_send_event(ioc, IOC_E_DISABLED);
break;
case IOCPF_E_STOP:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Notify enable completion callback.
*/
static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
{
/*
* Call only the first time sm enters fwmismatch state.
*/
if (iocpf->fw_mismatch_notified == BFA_FALSE)
bfa_ioc_pf_fwmismatch(iocpf->ioc);
iocpf->fw_mismatch_notified = BFA_TRUE;
bfa_iocpf_timer_start(iocpf->ioc);
}
/*
* Awaiting firmware version match.
*/
static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_TIMEOUT:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
break;
case IOCPF_E_DISABLE:
bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
bfa_fsm_send_event(ioc, IOC_E_DISABLED);
break;
case IOCPF_E_STOP:
bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Request for semaphore.
*/
static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* Awaiting semaphore for h/w initialzation.
*/
static void
bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_sync_complete(ioc)) {
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
} else {
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_sem_timer_start(ioc);
}
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
{
iocpf->poll_time = 0;
bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
}
/*
* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
static void
bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_FWREADY:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
break;
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
case IOCPF_E_DISABLE:
bfa_iocpf_timer_stop(ioc);
bfa_ioc_sync_leave(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
{
bfa_iocpf_timer_start(iocpf->ioc);
/*
* Enable Interrupts before sending fw IOC ENABLE cmd.
*/
iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
bfa_ioc_send_enable(iocpf->ioc);
}
/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_FWRSP_ENABLE:
bfa_iocpf_timer_stop(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
break;
case IOCPF_E_INITFAIL:
bfa_iocpf_timer_stop(ioc);
/*
* !!! fall through !!!
*/
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
if (event == IOCPF_E_TIMEOUT)
bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
case IOCPF_E_DISABLE:
bfa_iocpf_timer_stop(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
{
bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
}
static void
bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_DISABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
case IOCPF_E_GETATTRFAIL:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
case IOCPF_E_FAIL:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
{
bfa_iocpf_timer_start(iocpf->ioc);
bfa_ioc_send_disable(iocpf->ioc);
}
/*
* IOC is being disabled
*/
static void
bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_FWRSP_DISABLE:
bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FAIL:
bfa_iocpf_timer_stop(ioc);
/*
* !!! fall through !!!
*/
case IOCPF_E_TIMEOUT:
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FWRSP_ENABLE:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* IOC hb ack request is being removed.
*/
static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_sync_leave(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_FAIL:
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* IOC disable completion entry.
*/
static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_mbox_flush(iocpf->ioc);
bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
}
static void
bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_ENABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
break;
case IOCPF_E_STOP:
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_debug_save_ftrc(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* Hardware initialization failed.
*/
static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_notify_fail(ioc);
bfa_ioc_sync_leave(ioc);
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_STOP:
bfa_sem_timer_stop(ioc);
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
case IOCPF_E_FAIL:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
{
bfa_trc(iocpf->ioc, 0);
}
/*
* Hardware initialization failed.
*/
static void
bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_DISABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
case IOCPF_E_STOP:
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
{
/*
* Mark IOC as failed in hardware and stop firmware.
*/
bfa_ioc_lpu_stop(iocpf->ioc);
/*
* Flush any queued up mailbox requests.
*/
bfa_ioc_mbox_flush(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_sync_ack(ioc);
bfa_ioc_notify_fail(ioc);
if (!iocpf->auto_recover) {
bfa_ioc_sync_leave(ioc);
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
} else {
if (bfa_ioc_sync_complete(ioc))
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
else {
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
}
}
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FAIL:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
{
bfa_trc(iocpf->ioc, 0);
}
/*
* IOC is in failed state.
*/
static void
bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_DISABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* BFA IOC private functions
*/
/*
* Notify common modules registered for notification.
*/
static void
bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
{
struct bfa_ioc_notify_s *notify;
struct list_head *qe;
list_for_each(qe, &ioc->notify_q) {
notify = (struct bfa_ioc_notify_s *)qe;
notify->cbfn(notify->cbarg, event);
}
}
static void
bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
{
ioc->cbfn->disable_cbfn(ioc->bfa);
bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
}
bfa_boolean_t
bfa_ioc_sem_get(void __iomem *sem_reg)
{
u32 r32;
int cnt = 0;
#define BFA_SEM_SPINCNT 3000
r32 = readl(sem_reg);
while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
cnt++;
udelay(2);
r32 = readl(sem_reg);
}
if (!(r32 & 1))
return BFA_TRUE;
return BFA_FALSE;
}
static void
bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
{
u32 r32;
/*
* First read to the semaphore register will return 0, subsequent reads
* will return 1. Semaphore is released by writing 1 to the register
*/
r32 = readl(ioc->ioc_regs.ioc_sem_reg);
if (r32 == ~0) {
WARN_ON(r32 == ~0);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
return;
}
if (!(r32 & 1)) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
return;
}
bfa_sem_timer_start(ioc);
}
/*
* Initialize LPU local memory (aka secondary memory / SRAM)
*/
static void
bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
int i;
#define PSS_LMEM_INIT_TIME 10000
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl &= ~__PSS_LMEM_RESET;
pss_ctl |= __PSS_LMEM_INIT_EN;
/*
* i2c workaround 12.5khz clock
*/
pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
/*
* wait for memory initialization to be complete
*/
i = 0;
do {
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
i++;
} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
/*
* If memory initialization is not successful, IOC timeout will catch
* such failures.
*/
WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
bfa_trc(ioc, pss_ctl);
pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
static void
bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
/*
* Take processor out of reset.
*/
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl &= ~__PSS_LPU0_RESET;
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
static void
bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
/*
* Put processors in reset.
*/
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
/*
* Get driver and firmware versions.
*/
void
bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
u32 pgnum, pgoff;
u32 loff = 0;
int i;
u32 *fwsig = (u32 *) fwhdr;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
i++) {
fwsig[i] =
bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
loff += sizeof(u32);
}
}
/*
* Returns TRUE if same.
*/
bfa_boolean_t
bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
struct bfi_ioc_image_hdr_s *drv_fwhdr;
int i;
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
bfa_trc(ioc, i);
bfa_trc(ioc, fwhdr->md5sum[i]);
bfa_trc(ioc, drv_fwhdr->md5sum[i]);
return BFA_FALSE;
}
}
bfa_trc(ioc, fwhdr->md5sum[0]);
return BFA_TRUE;
}
/*
* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
static bfa_boolean_t
bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
bfa_ioc_fwver_get(ioc, &fwhdr);
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
if (fwhdr.signature != drv_fwhdr->signature) {
bfa_trc(ioc, fwhdr.signature);
bfa_trc(ioc, drv_fwhdr->signature);
return BFA_FALSE;
}
if (swab32(fwhdr.bootenv) != boot_env) {
bfa_trc(ioc, fwhdr.bootenv);
bfa_trc(ioc, boot_env);
return BFA_FALSE;
}
return bfa_ioc_fwver_cmp(ioc, &fwhdr);
}
/*
* Conditionally flush any pending message from firmware at start.
*/
static void
bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
{
u32 r32;
r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
if (r32)
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
}
static void
bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
{
enum bfi_ioc_state ioc_fwstate;
bfa_boolean_t fwvalid;
u32 boot_type;
u32 boot_env;
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
bfa_trc(ioc, ioc_fwstate);
boot_type = BFI_FWBOOT_TYPE_NORMAL;
boot_env = BFI_FWBOOT_ENV_OS;
/*
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
bfa_ioc_boot(ioc, boot_type, boot_env);
bfa_ioc_poll_fwinit(ioc);
return;
}
/*
* If hardware initialization is in progress (initialized by other IOC),
* just wait for an initialization completion interrupt.
*/
if (ioc_fwstate == BFI_IOC_INITING) {
bfa_ioc_poll_fwinit(ioc);
return;
}
/*
* If IOC function is disabled and firmware version is same,
* just re-enable IOC.
*
* If option rom, IOC must not be in operational state. With
* convergence, IOC will be in operational state when 2nd driver
* is loaded.
*/
if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
/*
* When using MSI-X any pending firmware ready event should
* be flushed. Otherwise MSI-X interrupts are not delivered.
*/
bfa_ioc_msgflush(ioc);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
return;
}
/*
* Initialize the h/w for any other states.
*/
bfa_ioc_boot(ioc, boot_type, boot_env);
bfa_ioc_poll_fwinit(ioc);
}
static void
bfa_ioc_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_trc(ioc, 0);
bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
}
void
bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
{
u32 *msgp = (u32 *) ioc_msg;
u32 i;
bfa_trc(ioc, msgp[0]);
bfa_trc(ioc, len);
WARN_ON(len > BFI_IOC_MSGLEN_MAX);
/*
* first write msg to mailbox registers
*/
for (i = 0; i < len / sizeof(u32); i++)
writel(cpu_to_le32(msgp[i]),
ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
/*
* write 1 to mailbox CMD to trigger LPU event
*/
writel(1, ioc->ioc_regs.hfn_mbox_cmd);
(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
}
static void
bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s enable_req;
struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = cpu_to_be16(ioc->clscode);
do_gettimeofday(&tv);
enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
static void
bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s disable_req;
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
static void
bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_getattr_req_s attr_req;
bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
bfa_ioc_portid(ioc));
bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
}
static void
bfa_ioc_hb_check(void *cbarg)
{
struct bfa_ioc_s *ioc = cbarg;
u32 hb_count;
hb_count = readl(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
bfa_ioc_recover(ioc);
return;
} else {
ioc->hb_count = hb_count;
}
bfa_ioc_mbox_poll(ioc);
bfa_hb_timer_start(ioc);
}
static void
bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
{
ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
bfa_hb_timer_start(ioc);
}
/*
* Initiate a full firmware download.
*/
static void
bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 boot_env)
{
u32 *fwimg;
u32 pgnum, pgoff;
u32 loff = 0;
u32 chunkno = 0;
u32 i;
u32 asicmode;
bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
}
/*
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
loff += sizeof(u32);
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
ioc->ioc_regs.host_page_num_fn);
/*
* Set boot type and device mode at the end.
*/
asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
ioc->port0_mode, ioc->port1_mode);
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
swab32(asicmode));
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
swab32(boot_type));
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
swab32(boot_env));
}
/*
* Update BFA configuration from firmware configuration.
*/
static void
bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_attr_s *attr = ioc->attr;
attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
attr->card_type = be32_to_cpu(attr->card_type);
attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
/*
* Attach time initialization of mbox logic.
*/
static void
bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
int mc;
INIT_LIST_HEAD(&mod->cmd_q);
for (mc = 0; mc < BFI_MC_MAX; mc++) {
mod->mbhdlr[mc].cbfn = NULL;
mod->mbhdlr[mc].cbarg = ioc->bfa;
}
}
/*
* Mbox poll timer -- restarts any pending mailbox requests.
*/
static void
bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd_s *cmd;
u32 stat;
/*
* If no command pending, do nothing
*/
if (list_empty(&mod->cmd_q))
return;
/*
* If previous command is not yet fetched by firmware, do nothing
*/
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat)
return;
/*
* Enqueue command to firmware.
*/
bfa_q_deq(&mod->cmd_q, &cmd);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
/*
* Cleanup any pending requests.
*/
static void
bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd_s *cmd;
while (!list_empty(&mod->cmd_q))
bfa_q_deq(&mod->cmd_q, &cmd);
}
/*
* Read data from SMEM to host through PCI memmap
*
* @param[in] ioc memory for IOC
* @param[in] tbuf app memory to store data from smem
* @param[in] soff smem offset
* @param[in] sz size of smem in bytes
*/
static bfa_status_t
bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
{
u32 pgnum, loff;
__be32 r32;
int i, len;
u32 *buf = tbuf;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
loff = PSS_SMEM_PGOFF(soff);
bfa_trc(ioc, pgnum);
bfa_trc(ioc, loff);
bfa_trc(ioc, sz);
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
bfa_trc(ioc, 0);
return BFA_STATUS_FAILED;
}
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
len = sz/sizeof(u32);
bfa_trc(ioc, len);
for (i = 0; i < len; i++) {
r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
buf[i] = be32_to_cpu(r32);
loff += sizeof(u32);
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
ioc->ioc_regs.host_page_num_fn);
/*
* release semaphore.
*/
readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
bfa_trc(ioc, pgnum);
return BFA_STATUS_OK;
}
/*
* Clear SMEM data from host through PCI memmap
*
* @param[in] ioc memory for IOC
* @param[in] soff smem offset
* @param[in] sz size of smem in bytes
*/
static bfa_status_t
bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
{
int i, len;
u32 pgnum, loff;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
loff = PSS_SMEM_PGOFF(soff);
bfa_trc(ioc, pgnum);
bfa_trc(ioc, loff);
bfa_trc(ioc, sz);
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
bfa_trc(ioc, 0);
return BFA_STATUS_FAILED;
}
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
len = sz/sizeof(u32); /* len in words */
bfa_trc(ioc, len);
for (i = 0; i < len; i++) {
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
loff += sizeof(u32);
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
ioc->ioc_regs.host_page_num_fn);
/*
* release semaphore.
*/
readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
bfa_trc(ioc, pgnum);
return BFA_STATUS_OK;
}
static void
bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
/*
* Notify driver and common modules registered for notification.
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
bfa_ioc_debug_save_ftrc(ioc);
BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
"Heart Beat of IOC has failed\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
}
static void
bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
/*
* Provide enable completion callback.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
"Running firmware version is incompatible "
"with the driver version\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
}
bfa_status_t
bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
{
/*
* Hold semaphore so that nobody can access the chip during init.
*/
bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
bfa_ioc_pll_init_asic(ioc);
ioc->pllinit = BFA_TRUE;
/*
* Initialize LMEM
*/
bfa_ioc_lmem_init(ioc);
/*
* release semaphore.
*/
readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
return BFA_STATUS_OK;
}
/*
* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
void
bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
{
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
return;
/*
* Initialize IOC state of all functions on a chip reset.
*/
if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
} else {
writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
}
bfa_ioc_msgflush(ioc);
bfa_ioc_download_fw(ioc, boot_type, boot_env);
bfa_ioc_lpu_start(ioc);
}
/*
* Enable/disable IOC failure auto recovery.
*/
void
bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
{
bfa_auto_recover = auto_recover;
}
bfa_boolean_t
bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
}
bfa_boolean_t
bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
return ((r32 != BFI_IOC_UNINIT) &&
(r32 != BFI_IOC_INITING) &&
(r32 != BFI_IOC_MEMTEST));
}
bfa_boolean_t
bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
{
__be32 *msgp = mbmsg;
u32 r32;
int i;
r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
if ((r32 & 1) == 0)
return BFA_FALSE;
/*
* read the MBOX msg
*/
for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
i++) {
r32 = readl(ioc->ioc_regs.lpu_mbox +
i * sizeof(u32));
msgp[i] = cpu_to_be32(r32);
}
/*
* turn off mailbox interrupt by clearing mailbox status
*/
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
readl(ioc->ioc_regs.lpu_mbox_cmd);
return BFA_TRUE;
}
void
bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
{
union bfi_ioc_i2h_msg_u *msg;
struct bfa_iocpf_s *iocpf = &ioc->iocpf;
msg = (union bfi_ioc_i2h_msg_u *) m;
bfa_ioc_stats(ioc, ioc_isrs);
switch (msg->mh.msg_id) {
case BFI_IOC_I2H_HBEAT:
break;
case BFI_IOC_I2H_ENABLE_REPLY:
ioc->port_mode = ioc->port_mode_cfg =
(enum bfa_mode_s)msg->fw_event.port_mode;
ioc->ad_cap_bm = msg->fw_event.cap_bm;
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
break;
case BFI_IOC_I2H_DISABLE_REPLY:
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
break;
case BFI_IOC_I2H_GETATTR_REPLY:
bfa_ioc_getattr_reply(ioc);
break;
default:
bfa_trc(ioc, msg->mh.msg_id);
WARN_ON(1);
}
}
/*
* IOC attach time initialization and setup.
*
* @param[in] ioc memory for IOC
* @param[in] bfa driver instance structure
*/
void
bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
struct bfa_timer_mod_s *timer_mod)
{
ioc->bfa = bfa;
ioc->cbfn = cbfn;
ioc->timer_mod = timer_mod;
ioc->fcmode = BFA_FALSE;
ioc->pllinit = BFA_FALSE;
ioc->dbg_fwsave_once = BFA_TRUE;
ioc->iocpf.ioc = ioc;
bfa_ioc_mbox_attach(ioc);
INIT_LIST_HEAD(&ioc->notify_q);
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(ioc, IOC_E_RESET);
}
/*
* Driver detach time IOC cleanup.
*/
void
bfa_ioc_detach(struct bfa_ioc_s *ioc)
{
bfa_fsm_send_event(ioc, IOC_E_DETACH);
INIT_LIST_HEAD(&ioc->notify_q);
}
/*
* Setup IOC PCI properties.
*
* @param[in] pcidev PCI device information for this IOC
*/
void
bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
enum bfi_pcifn_class clscode)
{
ioc->clscode = clscode;
ioc->pcidev = *pcidev;
/*
* Initialize IOC and device personality
*/
ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
ioc->asic_mode = BFI_ASIC_MODE_FC;
switch (pcidev->device_id) {
case BFA_PCI_DEVICE_ID_FC_8G1P:
case BFA_PCI_DEVICE_ID_FC_8G2P:
ioc->asic_gen = BFI_ASIC_GEN_CB;
ioc->fcmode = BFA_TRUE;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
ioc->ad_cap_bm = BFA_CM_HBA;
break;
case BFA_PCI_DEVICE_ID_CT:
ioc->asic_gen = BFI_ASIC_GEN_CT;
ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
ioc->asic_mode = BFI_ASIC_MODE_ETH;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
ioc->ad_cap_bm = BFA_CM_CNA;
break;
case BFA_PCI_DEVICE_ID_CT_FC:
ioc->asic_gen = BFI_ASIC_GEN_CT;
ioc->fcmode = BFA_TRUE;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
ioc->ad_cap_bm = BFA_CM_HBA;
break;
case BFA_PCI_DEVICE_ID_CT2:
ioc->asic_gen = BFI_ASIC_GEN_CT2;
if (clscode == BFI_PCIFN_CLASS_FC &&
pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
ioc->asic_mode = BFI_ASIC_MODE_FC16;
ioc->fcmode = BFA_TRUE;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
ioc->ad_cap_bm = BFA_CM_HBA;
} else {
ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
ioc->asic_mode = BFI_ASIC_MODE_ETH;
if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
ioc->port_mode =
ioc->port_mode_cfg = BFA_MODE_CNA;
ioc->ad_cap_bm = BFA_CM_CNA;
} else {
ioc->port_mode =
ioc->port_mode_cfg = BFA_MODE_NIC;
ioc->ad_cap_bm = BFA_CM_NIC;
}
}
break;
default:
WARN_ON(1);
}
/*
* Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
*/
if (ioc->asic_gen == BFI_ASIC_GEN_CB)
bfa_ioc_set_cb_hwif(ioc);
else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
bfa_ioc_set_ct_hwif(ioc);
else {
WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
bfa_ioc_set_ct2_hwif(ioc);
bfa_ioc_ct2_poweron(ioc);
}
bfa_ioc_map_port(ioc);
bfa_ioc_reg_init(ioc);
}
/*
* Initialize IOC dma memory
*
* @param[in] dm_kva kernel virtual address of IOC dma memory
* @param[in] dm_pa physical address of IOC dma memory
*/
void
bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
{
/*
* dma memory for firmware attribute
*/
ioc->attr_dma.kva = dm_kva;
ioc->attr_dma.pa = dm_pa;
ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
}
void
bfa_ioc_enable(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_enables);
ioc->dbg_fwsave_once = BFA_TRUE;
bfa_fsm_send_event(ioc, IOC_E_ENABLE);
}
void
bfa_ioc_disable(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_disables);
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
/*
* Initialize memory for saving firmware trace. Driver must initialize
* trace memory before call bfa_ioc_enable().
*/
void
bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
{
ioc->dbg_fwsave = dbg_fwsave;
ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
}
/*
* Register mailbox message handler functions
*
* @param[in] ioc IOC instance
* @param[in] mcfuncs message class handler functions
*/
void
bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
int mc;
for (mc = 0; mc < BFI_MC_MAX; mc++)
mod->mbhdlr[mc].cbfn = mcfuncs[mc];
}
/*
* Register mailbox message handler function, to be called by common modules
*/
void
bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
mod->mbhdlr[mc].cbfn = cbfn;
mod->mbhdlr[mc].cbarg = cbarg;
}
/*
* Queue a mailbox command request to firmware. Waits if mailbox is busy.
* Responsibility of caller to serialize
*
* @param[in] ioc IOC instance
* @param[i] cmd Mailbox command
*/
void
bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
u32 stat;
/*
* If a previous command is pending, queue new command
*/
if (!list_empty(&mod->cmd_q)) {
list_add_tail(&cmd->qe, &mod->cmd_q);
return;
}
/*
* If mailbox is busy, queue command for poll timer
*/
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat) {
list_add_tail(&cmd->qe, &mod->cmd_q);
return;
}
/*
* mailbox is free -- queue command to firmware
*/
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
/*
* Handle mailbox interrupts
*/
void
bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfi_mbmsg_s m;
int mc;
if (bfa_ioc_msgget(ioc, &m)) {
/*
* Treat IOC message class as special.
*/
mc = m.mh.msg_class;
if (mc == BFI_MC_IOC) {
bfa_ioc_isr(ioc, &m);
return;
}
if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
return;
mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
}
bfa_ioc_lpu_read_stat(ioc);
/*
* Try to send pending mailbox commands
*/
bfa_ioc_mbox_poll(ioc);
}
void
bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_hbfails);
ioc->stats.hb_count = ioc->hb_count;
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
/*
* return true if IOC is disabled
*/
bfa_boolean_t
bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
/*
* return true if IOC firmware is different.
*/
bfa_boolean_t
bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
}
#define bfa_ioc_state_disabled(__sm) \
(((__sm) == BFI_IOC_UNINIT) || \
((__sm) == BFI_IOC_INITING) || \
((__sm) == BFI_IOC_HWINIT) || \
((__sm) == BFI_IOC_DISABLED) || \
((__sm) == BFI_IOC_FAIL) || \
((__sm) == BFI_IOC_CFG_DISABLED))
/*
* Check if adapter is disabled -- both IOCs should be in a disabled
* state.
*/
bfa_boolean_t
bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
{
u32 ioc_state;
if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
return BFA_FALSE;
ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
}
return BFA_TRUE;
}
/*
* Reset IOC fwstate registers.
*/
void
bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
{
writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
}
#define BFA_MFG_NAME "Brocade"
void
bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
struct bfa_adapter_attr_s *ad_attr)
{
struct bfi_ioc_attr_s *ioc_attr;
ioc_attr = ioc->attr;
bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
memcpy(&ad_attr->vpd, &ioc_attr->vpd,
sizeof(struct bfa_mfg_vpd_s));
ad_attr->nports = bfa_ioc_get_nports(ioc);
ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
bfa_ioc_get_adapter_model(ioc, ad_attr->model);
/* For now, model descr uses same model string */
bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
ad_attr->card_type = ioc_attr->card_type;
ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
ad_attr->prototype = 1;
else
ad_attr->prototype = 0;
ad_attr->pwwn = ioc->attr->pwwn;
ad_attr->mac = bfa_ioc_get_mac(ioc);
ad_attr->pcie_gen = ioc_attr->pcie_gen;
ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
ad_attr->asic_rev = ioc_attr->asic_rev;
bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
ad_attr->trunk_capable = (ad_attr->nports > 1) &&
!bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
}
enum bfa_ioc_type_e
bfa_ioc_get_type(struct bfa_ioc_s *ioc)
{
if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
return BFA_IOC_TYPE_LL;
WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
}
void
bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
{
memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
memcpy((void *)serial_num,
(void *)ioc->attr->brcd_serialnum,
BFA_ADAPTER_SERIAL_NUM_LEN);
}
void
bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
{
memset((void *)fw_ver, 0, BFA_VERSION_LEN);
memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
}
void
bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
{
WARN_ON(!chip_rev);
memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
chip_rev[0] = 'R';
chip_rev[1] = 'e';
chip_rev[2] = 'v';
chip_rev[3] = '-';
chip_rev[4] = ioc->attr->asic_rev;
chip_rev[5] = '\0';
}
void
bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
{
memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
memcpy(optrom_ver, ioc->attr->optrom_version,
BFA_VERSION_LEN);
}
void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
{
struct bfi_ioc_attr_s *ioc_attr;
WARN_ON(!model);
memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
ioc_attr = ioc->attr;
snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
BFA_MFG_NAME, ioc_attr->card_type);
}
enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc_s *ioc)
{
enum bfa_iocpf_state iocpf_st;
enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
if (ioc_st == BFA_IOC_ENABLING ||
ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
switch (iocpf_st) {
case BFA_IOCPF_SEMWAIT:
ioc_st = BFA_IOC_SEMWAIT;
break;
case BFA_IOCPF_HWINIT:
ioc_st = BFA_IOC_HWINIT;
break;
case BFA_IOCPF_FWMISMATCH:
ioc_st = BFA_IOC_FWMISMATCH;
break;
case BFA_IOCPF_FAIL:
ioc_st = BFA_IOC_FAIL;
break;
case BFA_IOCPF_INITFAIL:
ioc_st = BFA_IOC_INITFAIL;
break;
default:
break;
}
}
return ioc_st;
}
void
bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
{
memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
ioc_attr->state = bfa_ioc_get_state(ioc);
ioc_attr->port_id = ioc->port_id;
ioc_attr->port_mode = ioc->port_mode;
ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
ioc_attr->cap_bm = ioc->ad_cap_bm;
ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
mac_t
bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
{
/*
* Check the IOC type and return the appropriate MAC
*/
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
return ioc->attr->fcoe_mac;
else
return ioc->attr->mac;
}
mac_t
bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
{
mac_t m;
m = ioc->attr->mfg_mac;
if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
else
bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
bfa_ioc_pcifn(ioc));
return m;
}
/*
* Send AEN notification
*/
void
bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
struct bfa_aen_entry_s *aen_entry;
enum bfa_ioc_type_e ioc_type;
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
ioc_type = bfa_ioc_get_type(ioc);
switch (ioc_type) {
case BFA_IOC_TYPE_FC:
aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
break;
case BFA_IOC_TYPE_FCoE:
aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
break;
case BFA_IOC_TYPE_LL:
aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
break;
default:
WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
break;
}
/* Send the AEN notification */
aen_entry->aen_data.ioc.ioc_type = ioc_type;
bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
BFA_AEN_CAT_IOC, event);
}
/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
{
int tlen;
if (ioc->dbg_fwsave_len == 0)
return BFA_STATUS_ENOFSAVE;
tlen = *trclen;
if (tlen > ioc->dbg_fwsave_len)
tlen = ioc->dbg_fwsave_len;
memcpy(trcdata, ioc->dbg_fwsave, tlen);
*trclen = tlen;
return BFA_STATUS_OK;
}
/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
{
u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
int tlen;
bfa_status_t status;
bfa_trc(ioc, *trclen);
tlen = *trclen;
if (tlen > BFA_DBG_FWTRC_LEN)
tlen = BFA_DBG_FWTRC_LEN;
status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
*trclen = tlen;
return status;
}
static void
bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
{
struct bfa_mbox_cmd_s cmd;
struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
bfa_ioc_portid(ioc));
req->clscode = cpu_to_be16(ioc->clscode);
bfa_ioc_mbox_queue(ioc, &cmd);
}
static void
bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
{
u32 fwsync_iter = 1000;
bfa_ioc_send_fwsync(ioc);
/*
* After sending a fw sync mbox command wait for it to
* take effect. We will not wait for a response because
* 1. fw_sync mbox cmd doesn't have a response.
* 2. Even if we implement that, interrupts might not
* be enabled when we call this function.
* So, just keep checking if any mbox cmd is pending, and
* after waiting for a reasonable amount of time, go ahead.
* It is possible that fw has crashed and the mbox command
* is never acknowledged.
*/
while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
fwsync_iter--;
}
/*
* Dump firmware smem
*/
bfa_status_t
bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
u32 *offset, int *buflen)
{
u32 loff;
int dlen;
bfa_status_t status;
u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
if (*offset >= smem_len) {
*offset = *buflen = 0;
return BFA_STATUS_EINVAL;
}
loff = *offset;
dlen = *buflen;
/*
* First smem read, sync smem before proceeding
* No need to sync before reading every chunk.
*/
if (loff == 0)
bfa_ioc_fwsync(ioc);
if ((loff + dlen) >= smem_len)
dlen = smem_len - loff;
status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
if (status != BFA_STATUS_OK) {
*offset = *buflen = 0;
return status;
}
*offset += dlen;
if (*offset >= smem_len)
*offset = 0;
*buflen = dlen;
return status;
}
/*
* Firmware statistics
*/
bfa_status_t
bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
{
u32 loff = BFI_IOC_FWSTATS_OFF + \
BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
int tlen;
bfa_status_t status;
if (ioc->stats_busy) {
bfa_trc(ioc, ioc->stats_busy);
return BFA_STATUS_DEVBUSY;
}
ioc->stats_busy = BFA_TRUE;
tlen = sizeof(struct bfa_fw_stats_s);
status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
ioc->stats_busy = BFA_FALSE;
return status;
}
bfa_status_t
bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
{
u32 loff = BFI_IOC_FWSTATS_OFF + \
BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
int tlen;
bfa_status_t status;
if (ioc->stats_busy) {
bfa_trc(ioc, ioc->stats_busy);
return BFA_STATUS_DEVBUSY;
}
ioc->stats_busy = BFA_TRUE;
tlen = sizeof(struct bfa_fw_stats_s);
status = bfa_ioc_smem_clr(ioc, loff, tlen);
ioc->stats_busy = BFA_FALSE;
return status;
}
/*
* Save firmware trace if configured.
*/
static void
bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
{
int tlen;
if (ioc->dbg_fwsave_once) {
ioc->dbg_fwsave_once = BFA_FALSE;
if (ioc->dbg_fwsave_len) {
tlen = ioc->dbg_fwsave_len;
bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
}
}
}
/*
* Firmware failure detected. Start recovery actions.
*/
static void
bfa_ioc_recover(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_hbfails);
ioc->stats.hb_count = ioc->hb_count;
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
/*
* BFA IOC PF private functions
*/
static void
bfa_iocpf_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_trc(ioc, 0);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
}
static void
bfa_iocpf_sem_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_ioc_hw_sem_get(ioc);
}
static void
bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
{
u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
bfa_trc(ioc, fwstate);
if (fwstate == BFI_IOC_DISABLED) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
return;
}
if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
bfa_iocpf_timeout(ioc);
else {
ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
bfa_iocpf_poll_timer_start(ioc);
}
}
static void
bfa_iocpf_poll_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_ioc_poll_fwinit(ioc);
}
/*
* bfa timer function
*/
void
bfa_timer_beat(struct bfa_timer_mod_s *mod)
{
struct list_head *qh = &mod->timer_q;
struct list_head *qe, *qe_next;
struct bfa_timer_s *elem;
struct list_head timedout_q;
INIT_LIST_HEAD(&timedout_q);
qe = bfa_q_next(qh);
while (qe != qh) {
qe_next = bfa_q_next(qe);
elem = (struct bfa_timer_s *) qe;
if (elem->timeout <= BFA_TIMER_FREQ) {
elem->timeout = 0;
list_del(&elem->qe);
list_add_tail(&elem->qe, &timedout_q);
} else {
elem->timeout -= BFA_TIMER_FREQ;
}
qe = qe_next; /* go to next elem */
}
/*
* Pop all the timeout entries
*/
while (!list_empty(&timedout_q)) {
bfa_q_deq(&timedout_q, &elem);
elem->timercb(elem->arg);
}
}
/*
* Should be called with lock protection
*/
void
bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
void (*timercb) (void *), void *arg, unsigned int timeout)
{
WARN_ON(timercb == NULL);
WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
timer->timeout = timeout;
timer->timercb = timercb;
timer->arg = arg;
list_add_tail(&timer->qe, &mod->timer_q);
}
/*
* Should be called with lock protection
*/
void
bfa_timer_stop(struct bfa_timer_s *timer)
{
WARN_ON(list_empty(&timer->qe));
list_del(&timer->qe);
}
/*
* ASIC block related
*/
static void
bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
{
struct bfa_ablk_cfg_inst_s *cfg_inst;
int i, j;
u16 be16;
u32 be32;
for (i = 0; i < BFA_ABLK_MAX; i++) {
cfg_inst = &cfg->inst[i];
for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
be16 = cfg_inst->pf_cfg[j].pers;
cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
be16 = cfg_inst->pf_cfg[j].num_qpairs;
cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
be16 = cfg_inst->pf_cfg[j].num_vectors;
cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
be32 = cfg_inst->pf_cfg[j].bw;
cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
}
}
}
static void
bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
{
struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
bfa_ablk_cbfn_t cbfn;
WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
bfa_trc(ablk->ioc, msg->mh.msg_id);
switch (msg->mh.msg_id) {
case BFI_ABLK_I2H_QUERY:
if (rsp->status == BFA_STATUS_OK) {
memcpy(ablk->cfg, ablk->dma_addr.kva,
sizeof(struct bfa_ablk_cfg_s));
bfa_ablk_config_swap(ablk->cfg);
ablk->cfg = NULL;
}
break;
case BFI_ABLK_I2H_ADPT_CONFIG:
case BFI_ABLK_I2H_PORT_CONFIG:
/* update config port mode */
ablk->ioc->port_mode_cfg = rsp->port_mode;
case BFI_ABLK_I2H_PF_DELETE:
case BFI_ABLK_I2H_PF_UPDATE:
case BFI_ABLK_I2H_OPTROM_ENABLE:
case BFI_ABLK_I2H_OPTROM_DISABLE:
/* No-op */
break;
case BFI_ABLK_I2H_PF_CREATE:
*(ablk->pcifn) = rsp->pcifn;
ablk->pcifn = NULL;
break;
default:
WARN_ON(1);
}
ablk->busy = BFA_FALSE;
if (ablk->cbfn) {
cbfn = ablk->cbfn;
ablk->cbfn = NULL;
cbfn(ablk->cbarg, rsp->status);
}
}
static void
bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
{
struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
bfa_trc(ablk->ioc, event);
switch (event) {
case BFA_IOC_E_ENABLED:
WARN_ON(ablk->busy != BFA_FALSE);
break;
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
/* Fail any pending requests */
ablk->pcifn = NULL;
if (ablk->busy) {
if (ablk->cbfn)
ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
ablk->cbfn = NULL;
ablk->busy = BFA_FALSE;
}
break;
default:
WARN_ON(1);
break;
}
}
u32
bfa_ablk_meminfo(void)
{
return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
}
void
bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
{
ablk->dma_addr.kva = dma_kva;
ablk->dma_addr.pa = dma_pa;
}
void
bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
{
ablk->ioc = ioc;
bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
bfa_q_qe_init(&ablk->ioc_notify);
bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
}
bfa_status_t
bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_query_s *m;
WARN_ON(!ablk_cfg);
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cfg = ablk_cfg;
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
bfa_ioc_portid(ablk->ioc));
bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
u8 port, enum bfi_pcifn_class personality, int bw,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_pf_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->pcifn = pcifn;
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
bfa_ioc_portid(ablk->ioc));
m->pers = cpu_to_be16((u16)personality);
m->bw = cpu_to_be32(bw);
m->port = port;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_pf_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
bfa_ioc_portid(ablk->ioc));
m->pcifn = (u8)pcifn;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_cfg_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
bfa_ioc_portid(ablk->ioc));
m->mode = (u8)mode;
m->max_pf = (u8)max_pf;
m->max_vf = (u8)max_vf;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_cfg_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
bfa_ioc_portid(ablk->ioc));
m->port = (u8)port;
m->mode = (u8)mode;
m->max_pf = (u8)max_pf;
m->max_vf = (u8)max_vf;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_pf_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
bfa_ioc_portid(ablk->ioc));
m->pcifn = (u8)pcifn;
m->bw = cpu_to_be32(bw);
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_optrom_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
bfa_ioc_portid(ablk->ioc));
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_optrom_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
bfa_ioc_portid(ablk->ioc));
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
/*
* SFP module specific
*/
/* forward declarations */
static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
enum bfa_port_speed portspeed);
static void
bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
{
bfa_trc(sfp, sfp->lock);
if (sfp->cbfn)
sfp->cbfn(sfp->cbarg, sfp->status);
sfp->lock = 0;
sfp->cbfn = NULL;
}
static void
bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
{
bfa_trc(sfp, sfp->portspeed);
if (sfp->media) {
bfa_sfp_media_get(sfp);
if (sfp->state_query_cbfn)
sfp->state_query_cbfn(sfp->state_query_cbarg,
sfp->status);
sfp->media = NULL;
}
if (sfp->portspeed) {
sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
if (sfp->state_query_cbfn)
sfp->state_query_cbfn(sfp->state_query_cbarg,
sfp->status);
sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
}
sfp->state_query_lock = 0;
sfp->state_query_cbfn = NULL;
}
/*
* IOC event handler.
*/
static void
bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
{
struct bfa_sfp_s *sfp = sfp_arg;
bfa_trc(sfp, event);
bfa_trc(sfp, sfp->lock);
bfa_trc(sfp, sfp->state_query_lock);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (sfp->lock) {
sfp->status = BFA_STATUS_IOC_FAILURE;
bfa_cb_sfp_show(sfp);
}
if (sfp->state_query_lock) {
sfp->status = BFA_STATUS_IOC_FAILURE;
bfa_cb_sfp_state_query(sfp);
}
break;
default:
break;
}
}
/*
* SFP's State Change Notification post to AEN
*/
static void
bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
{
struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
struct bfa_aen_entry_s *aen_entry;
enum bfa_port_aen_event aen_evt = 0;
bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
((u64)rsp->event));
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
switch (rsp->event) {
case BFA_SFP_SCN_INSERTED:
aen_evt = BFA_PORT_AEN_SFP_INSERT;
break;
case BFA_SFP_SCN_REMOVED:
aen_evt = BFA_PORT_AEN_SFP_REMOVE;
break;
case BFA_SFP_SCN_FAILED:
aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
break;
case BFA_SFP_SCN_UNSUPPORT:
aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
break;
case BFA_SFP_SCN_POM:
aen_evt = BFA_PORT_AEN_SFP_POM;
aen_entry->aen_data.port.level = rsp->pomlvl;
break;
default:
bfa_trc(sfp, rsp->event);
WARN_ON(1);
}
/* Send the AEN notification */
bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
BFA_AEN_CAT_PORT, aen_evt);
}
/*
* SFP get data send
*/
static void
bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
{
struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
bfa_trc(sfp, req->memtype);
/* build host command */
bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
bfa_ioc_portid(sfp->ioc));
/* send mbox cmd */
bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
}
/*
* SFP is valid, read sfp data
*/
static void
bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
{
struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
WARN_ON(sfp->lock != 0);
bfa_trc(sfp, sfp->state);
sfp->lock = 1;
sfp->memtype = memtype;
req->memtype = memtype;
/* Setup SG list */
bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
bfa_sfp_getdata_send(sfp);
}
/*
* SFP scn handler
*/
static void
bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
{
struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
switch (rsp->event) {
case BFA_SFP_SCN_INSERTED:
sfp->state = BFA_SFP_STATE_INSERTED;
sfp->data_valid = 0;
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_REMOVED:
sfp->state = BFA_SFP_STATE_REMOVED;
sfp->data_valid = 0;
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_FAILED:
sfp->state = BFA_SFP_STATE_FAILED;
sfp->data_valid = 0;
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_UNSUPPORT:
sfp->state = BFA_SFP_STATE_UNSUPPORT;
bfa_sfp_scn_aen_post(sfp, rsp);
if (!sfp->lock)
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
break;
case BFA_SFP_SCN_POM:
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_VALID:
sfp->state = BFA_SFP_STATE_VALID;
if (!sfp->lock)
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
break;
default:
bfa_trc(sfp, rsp->event);
WARN_ON(1);
}
}
/*
* SFP show complete
*/
static void
bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
{
struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
if (!sfp->lock) {
/*
* receiving response after ioc failure
*/
bfa_trc(sfp, sfp->lock);
return;
}
bfa_trc(sfp, rsp->status);
if (rsp->status == BFA_STATUS_OK) {
sfp->data_valid = 1;
if (sfp->state == BFA_SFP_STATE_VALID)
sfp->status = BFA_STATUS_OK;
else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
sfp->status = BFA_STATUS_SFP_UNSUPP;
else
bfa_trc(sfp, sfp->state);
} else {
sfp->data_valid = 0;
sfp->status = rsp->status;
/* sfpshow shouldn't change sfp state */
}
bfa_trc(sfp, sfp->memtype);
if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
bfa_trc(sfp, sfp->data_valid);
if (sfp->data_valid) {
u32 size = sizeof(struct sfp_mem_s);
u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
memcpy(des, sfp->dbuf_kva, size);
}
/*
* Queue completion callback.
*/
bfa_cb_sfp_show(sfp);
} else
sfp->lock = 0;
bfa_trc(sfp, sfp->state_query_lock);
if (sfp->state_query_lock) {
sfp->state = rsp->state;
/* Complete callback */
bfa_cb_sfp_state_query(sfp);
}
}
/*
* SFP query fw sfp state
*/
static void
bfa_sfp_state_query(struct bfa_sfp_s *sfp)
{
struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
/* Should not be doing query if not in _INIT state */
WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
WARN_ON(sfp->state_query_lock != 0);
bfa_trc(sfp, sfp->state);
sfp->state_query_lock = 1;
req->memtype = 0;
if (!sfp->lock)
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
}
static void
bfa_sfp_media_get(struct bfa_sfp_s *sfp)
{
enum bfa_defs_sfp_media_e *media = sfp->media;
*media = BFA_SFP_MEDIA_UNKNOWN;
if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
*media = BFA_SFP_MEDIA_UNSUPPORT;
else if (sfp->state == BFA_SFP_STATE_VALID) {
union sfp_xcvr_e10g_code_u e10g;
struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
(sfpmem->srlid_base.xcvr[5] >> 1);
e10g.b = sfpmem->srlid_base.xcvr[0];
bfa_trc(sfp, e10g.b);
bfa_trc(sfp, xmtr_tech);
/* check fc transmitter tech */
if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
(xmtr_tech & SFP_XMTR_TECH_CP) ||
(xmtr_tech & SFP_XMTR_TECH_CA))
*media = BFA_SFP_MEDIA_CU;
else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
(xmtr_tech & SFP_XMTR_TECH_EL_INTER))
*media = BFA_SFP_MEDIA_EL;
else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
(xmtr_tech & SFP_XMTR_TECH_LC))
*media = BFA_SFP_MEDIA_LW;
else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
(xmtr_tech & SFP_XMTR_TECH_SN) ||
(xmtr_tech & SFP_XMTR_TECH_SA))
*media = BFA_SFP_MEDIA_SW;
/* Check 10G Ethernet Compilance code */
else if (e10g.r.e10g_sr)
*media = BFA_SFP_MEDIA_SW;
else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
*media = BFA_SFP_MEDIA_LW;
else if (e10g.r.e10g_unall)
*media = BFA_SFP_MEDIA_UNKNOWN;
else
bfa_trc(sfp, 0);
} else
bfa_trc(sfp, sfp->state);
}
static bfa_status_t
bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
{
struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
if (portspeed == BFA_PORT_SPEED_10GBPS) {
if (e10g.r.e10g_sr || e10g.r.e10g_lr)
return BFA_STATUS_OK;
else {
bfa_trc(sfp, e10g.b);
return BFA_STATUS_UNSUPP_SPEED;
}
}
if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
return BFA_STATUS_OK;
else {
bfa_trc(sfp, portspeed);
bfa_trc(sfp, fc3.b);
bfa_trc(sfp, e10g.b);
return BFA_STATUS_UNSUPP_SPEED;
}
}
/*
* SFP hmbox handler
*/
void
bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
{
struct bfa_sfp_s *sfp = sfparg;
switch (msg->mh.msg_id) {
case BFI_SFP_I2H_SHOW:
bfa_sfp_show_comp(sfp, msg);
break;
case BFI_SFP_I2H_SCN:
bfa_sfp_scn(sfp, msg);
break;
default:
bfa_trc(sfp, msg->mh.msg_id);
WARN_ON(1);
}
}
/*
* Return DMA memory needed by sfp module.
*/
u32
bfa_sfp_meminfo(void)
{
return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
}
/*
* Attach virtual and physical memory for SFP.
*/
void
bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod)
{
sfp->dev = dev;
sfp->ioc = ioc;
sfp->trcmod = trcmod;
sfp->cbfn = NULL;
sfp->cbarg = NULL;
sfp->sfpmem = NULL;
sfp->lock = 0;
sfp->data_valid = 0;
sfp->state = BFA_SFP_STATE_INIT;
sfp->state_query_lock = 0;
sfp->state_query_cbfn = NULL;
sfp->state_query_cbarg = NULL;
sfp->media = NULL;
sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
sfp->is_elb = BFA_FALSE;
bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
bfa_q_qe_init(&sfp->ioc_notify);
bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
}
/*
* Claim Memory for SFP
*/
void
bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
{
sfp->dbuf_kva = dm_kva;
sfp->dbuf_pa = dm_pa;
memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
}
/*
* Show SFP eeprom content
*
* @param[in] sfp - bfa sfp module
*
* @param[out] sfpmem - sfp eeprom data
*
*/
bfa_status_t
bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
bfa_cb_sfp_t cbfn, void *cbarg)
{
if (!bfa_ioc_is_operational(sfp->ioc)) {
bfa_trc(sfp, 0);
return BFA_STATUS_IOC_NON_OP;
}
if (sfp->lock) {
bfa_trc(sfp, 0);
return BFA_STATUS_DEVBUSY;
}
sfp->cbfn = cbfn;
sfp->cbarg = cbarg;
sfp->sfpmem = sfpmem;
bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
return BFA_STATUS_OK;
}
/*
* Return SFP Media type
*
* @param[in] sfp - bfa sfp module
*
* @param[out] media - port speed from user
*
*/
bfa_status_t
bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
bfa_cb_sfp_t cbfn, void *cbarg)
{
if (!bfa_ioc_is_operational(sfp->ioc)) {
bfa_trc(sfp, 0);
return BFA_STATUS_IOC_NON_OP;
}
sfp->media = media;
if (sfp->state == BFA_SFP_STATE_INIT) {
if (sfp->state_query_lock) {
bfa_trc(sfp, 0);
return BFA_STATUS_DEVBUSY;
} else {
sfp->state_query_cbfn = cbfn;
sfp->state_query_cbarg = cbarg;
bfa_sfp_state_query(sfp);
return BFA_STATUS_SFP_NOT_READY;
}
}
bfa_sfp_media_get(sfp);
return BFA_STATUS_OK;
}
/*
* Check if user set port speed is allowed by the SFP
*
* @param[in] sfp - bfa sfp module
* @param[in] portspeed - port speed from user
*
*/
bfa_status_t
bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
bfa_cb_sfp_t cbfn, void *cbarg)
{
WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
if (!bfa_ioc_is_operational(sfp->ioc))
return BFA_STATUS_IOC_NON_OP;
/* For Mezz card, all speed is allowed */
if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
return BFA_STATUS_OK;
/* Check SFP state */
sfp->portspeed = portspeed;
if (sfp->state == BFA_SFP_STATE_INIT) {
if (sfp->state_query_lock) {
bfa_trc(sfp, 0);
return BFA_STATUS_DEVBUSY;
} else {
sfp->state_query_cbfn = cbfn;
sfp->state_query_cbarg = cbarg;
bfa_sfp_state_query(sfp);
return BFA_STATUS_SFP_NOT_READY;
}
}
if (sfp->state == BFA_SFP_STATE_REMOVED ||
sfp->state == BFA_SFP_STATE_FAILED) {
bfa_trc(sfp, sfp->state);
return BFA_STATUS_NO_SFP_DEV;
}
if (sfp->state == BFA_SFP_STATE_INSERTED) {
bfa_trc(sfp, sfp->state);
return BFA_STATUS_DEVBUSY; /* sfp is reading data */
}
/* For eloopback, all speed is allowed */
if (sfp->is_elb)
return BFA_STATUS_OK;
return bfa_sfp_speed_valid(sfp, portspeed);
}
/*
* Flash module specific
*/
/*
* FLASH DMA buffer should be big enough to hold both MFG block and
* asic block(64k) at the same time and also should be 2k aligned to
* avoid write segement to cross sector boundary.
*/
#define BFA_FLASH_SEG_SZ 2048
#define BFA_FLASH_DMA_BUF_SZ \
BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
static void
bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
int inst, int type)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
struct bfa_aen_entry_s *aen_entry;
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
aen_entry->aen_data.audit.partition_inst = inst;
aen_entry->aen_data.audit.partition_type = type;
/* Send the AEN notification */
bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
BFA_AEN_CAT_AUDIT, event);
}
static void
bfa_flash_cb(struct bfa_flash_s *flash)
{
flash->op_busy = 0;
if (flash->cbfn)
flash->cbfn(flash->cbarg, flash->status);
}
static void
bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
{
struct bfa_flash_s *flash = cbarg;
bfa_trc(flash, event);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (flash->op_busy) {
flash->status = BFA_STATUS_IOC_FAILURE;
flash->cbfn(flash->cbarg, flash->status);
flash->op_busy = 0;
}
break;
default:
break;
}
}
/*
* Send flash attribute query request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_query_send(void *cbarg)
{
struct bfa_flash_s *flash = cbarg;
struct bfi_flash_query_req_s *msg =
(struct bfi_flash_query_req_s *) flash->mb.msg;
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
bfa_ioc_portid(flash->ioc));
bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
flash->dbuf_pa);
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
}
/*
* Send flash write request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_write_send(struct bfa_flash_s *flash)
{
struct bfi_flash_write_req_s *msg =
(struct bfi_flash_write_req_s *) flash->mb.msg;
u32 len;
msg->type = be32_to_cpu(flash->type);
msg->instance = flash->instance;
msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
flash->residue : BFA_FLASH_DMA_BUF_SZ;
msg->length = be32_to_cpu(len);
/* indicate if it's the last msg of the whole write operation */
msg->last = (len == flash->residue) ? 1 : 0;
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
bfa_ioc_portid(flash->ioc));
bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
flash->residue -= len;
flash->offset += len;
}
/*
* Send flash read request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_read_send(void *cbarg)
{
struct bfa_flash_s *flash = cbarg;
struct bfi_flash_read_req_s *msg =
(struct bfi_flash_read_req_s *) flash->mb.msg;
u32 len;
msg->type = be32_to_cpu(flash->type);
msg->instance = flash->instance;
msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
flash->residue : BFA_FLASH_DMA_BUF_SZ;
msg->length = be32_to_cpu(len);
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
bfa_ioc_portid(flash->ioc));
bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
}
/*
* Send flash erase request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_erase_send(void *cbarg)
{
struct bfa_flash_s *flash = cbarg;
struct bfi_flash_erase_req_s *msg =
(struct bfi_flash_erase_req_s *) flash->mb.msg;
msg->type = be32_to_cpu(flash->type);
msg->instance = flash->instance;
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
bfa_ioc_portid(flash->ioc));
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
}
/*
* Process flash response messages upon receiving interrupts.
*
* @param[in] flasharg - flash structure
* @param[in] msg - message structure
*/
static void
bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
{
struct bfa_flash_s *flash = flasharg;
u32 status;
union {
struct bfi_flash_query_rsp_s *query;
struct bfi_flash_erase_rsp_s *erase;
struct bfi_flash_write_rsp_s *write;
struct bfi_flash_read_rsp_s *read;
struct bfi_flash_event_s *event;
struct bfi_mbmsg_s *msg;
} m;
m.msg = msg;
bfa_trc(flash, msg->mh.msg_id);
if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
/* receiving response after ioc failure */
bfa_trc(flash, 0x9999);
return;
}
switch (msg->mh.msg_id) {
case BFI_FLASH_I2H_QUERY_RSP:
status = be32_to_cpu(m.query->status);
bfa_trc(flash, status);
if (status == BFA_STATUS_OK) {
u32 i;
struct bfa_flash_attr_s *attr, *f;
attr = (struct bfa_flash_attr_s *) flash->ubuf;
f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
attr->status = be32_to_cpu(f->status);
attr->npart = be32_to_cpu(f->npart);
bfa_trc(flash, attr->status);
bfa_trc(flash, attr->npart);
for (i = 0; i < attr->npart; i++) {
attr->part[i].part_type =
be32_to_cpu(f->part[i].part_type);
attr->part[i].part_instance =
be32_to_cpu(f->part[i].part_instance);
attr->part[i].part_off =
be32_to_cpu(f->part[i].part_off);
attr->part[i].part_size =
be32_to_cpu(f->part[i].part_size);
attr->part[i].part_len =
be32_to_cpu(f->part[i].part_len);
attr->part[i].part_status =
be32_to_cpu(f->part[i].part_status);
}
}
flash->status = status;
bfa_flash_cb(flash);
break;
case BFI_FLASH_I2H_ERASE_RSP:
status = be32_to_cpu(m.erase->status);
bfa_trc(flash, status);
flash->status = status;
bfa_flash_cb(flash);
break;
case BFI_FLASH_I2H_WRITE_RSP:
status = be32_to_cpu(m.write->status);
bfa_trc(flash, status);
if (status != BFA_STATUS_OK || flash->residue == 0) {
flash->status = status;
bfa_flash_cb(flash);
} else {
bfa_trc(flash, flash->offset);
bfa_flash_write_send(flash);
}
break;
case BFI_FLASH_I2H_READ_RSP:
status = be32_to_cpu(m.read->status);
bfa_trc(flash, status);
if (status != BFA_STATUS_OK) {
flash->status = status;
bfa_flash_cb(flash);
} else {
u32 len = be32_to_cpu(m.read->length);
bfa_trc(flash, flash->offset);
bfa_trc(flash, len);
memcpy(flash->ubuf + flash->offset,
flash->dbuf_kva, len);
flash->residue -= len;
flash->offset += len;
if (flash->residue == 0) {
flash->status = status;
bfa_flash_cb(flash);
} else
bfa_flash_read_send(flash);
}
break;
case BFI_FLASH_I2H_BOOT_VER_RSP:
break;
case BFI_FLASH_I2H_EVENT:
status = be32_to_cpu(m.event->status);
bfa_trc(flash, status);
if (status == BFA_STATUS_BAD_FWCFG)
bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
else if (status == BFA_STATUS_INVALID_VENDOR) {
u32 param;
param = be32_to_cpu(m.event->param);
bfa_trc(flash, param);
bfa_ioc_aen_post(flash->ioc,
BFA_IOC_AEN_INVALID_VENDOR);
}
break;
default:
WARN_ON(1);
}
}
/*
* Flash memory info API.
*
* @param[in] mincfg - minimal cfg variable
*/
u32
bfa_flash_meminfo(bfa_boolean_t mincfg)
{
/* min driver doesn't need flash */
if (mincfg)
return 0;
return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Flash attach API.
*
* @param[in] flash - flash structure
* @param[in] ioc - ioc structure
* @param[in] dev - device structure
* @param[in] trcmod - trace module
* @param[in] logmod - log module
*/
void
bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
{
flash->ioc = ioc;
flash->trcmod = trcmod;
flash->cbfn = NULL;
flash->cbarg = NULL;
flash->op_busy = 0;
bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
bfa_q_qe_init(&flash->ioc_notify);
bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
/* min driver doesn't need flash */
if (mincfg) {
flash->dbuf_kva = NULL;
flash->dbuf_pa = 0;
}
}
/*
* Claim memory for flash
*
* @param[in] flash - flash structure
* @param[in] dm_kva - pointer to virtual memory address
* @param[in] dm_pa - physical memory address
* @param[in] mincfg - minimal cfg variable
*/
void
bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
bfa_boolean_t mincfg)
{
if (mincfg)
return;
flash->dbuf_kva = dm_kva;
flash->dbuf_pa = dm_pa;
memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Get flash attribute.
*
* @param[in] flash - flash structure
* @param[in] attr - flash attribute structure
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->ubuf = (u8 *) attr;
bfa_flash_query_send(flash);
return BFA_STATUS_OK;
}
/*
* Erase flash partition.
*
* @param[in] flash - flash structure
* @param[in] type - flash partition type
* @param[in] instance - flash partition instance
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
bfa_trc(flash, type);
bfa_trc(flash, instance);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->type = type;
flash->instance = instance;
bfa_flash_erase_send(flash);
bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
instance, type);
return BFA_STATUS_OK;
}
/*
* Update flash partition.
*
* @param[in] flash - flash structure
* @param[in] type - flash partition type
* @param[in] instance - flash partition instance
* @param[in] buf - update data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to the partition starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
u8 instance, void *buf, u32 len, u32 offset,
bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
bfa_trc(flash, type);
bfa_trc(flash, instance);
bfa_trc(flash, len);
bfa_trc(flash, offset);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
/*
* 'len' must be in word (4-byte) boundary
* 'offset' must be in sector (16kb) boundary
*/
if (!len || (len & 0x03) || (offset & 0x00003FFF))
return BFA_STATUS_FLASH_BAD_LEN;
if (type == BFA_FLASH_PART_MFG)
return BFA_STATUS_EINVAL;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->type = type;
flash->instance = instance;
flash->residue = len;
flash->offset = 0;
flash->addr_off = offset;
flash->ubuf = buf;
bfa_flash_write_send(flash);
return BFA_STATUS_OK;
}
/*
* Read flash partition.
*
* @param[in] flash - flash structure
* @param[in] type - flash partition type
* @param[in] instance - flash partition instance
* @param[in] buf - read data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to the partition starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
u8 instance, void *buf, u32 len, u32 offset,
bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
bfa_trc(flash, type);
bfa_trc(flash, instance);
bfa_trc(flash, len);
bfa_trc(flash, offset);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
/*
* 'len' must be in word (4-byte) boundary
* 'offset' must be in sector (16kb) boundary
*/
if (!len || (len & 0x03) || (offset & 0x00003FFF))
return BFA_STATUS_FLASH_BAD_LEN;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->type = type;
flash->instance = instance;
flash->residue = len;
flash->offset = 0;
flash->addr_off = offset;
flash->ubuf = buf;
bfa_flash_read_send(flash);
return BFA_STATUS_OK;
}
/*
* DIAG module specific
*/
#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
/* IOC event handler */
static void
bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
{
struct bfa_diag_s *diag = diag_arg;
bfa_trc(diag, event);
bfa_trc(diag, diag->block);
bfa_trc(diag, diag->fwping.lock);
bfa_trc(diag, diag->tsensor.lock);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (diag->fwping.lock) {
diag->fwping.status = BFA_STATUS_IOC_FAILURE;
diag->fwping.cbfn(diag->fwping.cbarg,
diag->fwping.status);
diag->fwping.lock = 0;
}
if (diag->tsensor.lock) {
diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
diag->tsensor.cbfn(diag->tsensor.cbarg,
diag->tsensor.status);
diag->tsensor.lock = 0;
}
if (diag->block) {
if (diag->timer_active) {
bfa_timer_stop(&diag->timer);
diag->timer_active = 0;
}
diag->status = BFA_STATUS_IOC_FAILURE;
diag->cbfn(diag->cbarg, diag->status);
diag->block = 0;
}
break;
default:
break;
}
}
static void
bfa_diag_memtest_done(void *cbarg)
{
struct bfa_diag_s *diag = cbarg;
struct bfa_ioc_s *ioc = diag->ioc;
struct bfa_diag_memtest_result *res = diag->result;
u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
u32 pgnum, pgoff, i;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
sizeof(u32)); i++) {
/* read test result from smem */
*((u32 *) res + i) =
bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
loff += sizeof(u32);
}
/* Reset IOC fwstates to BFI_IOC_UNINIT */
bfa_ioc_reset_fwstate(ioc);
res->status = swab32(res->status);
bfa_trc(diag, res->status);
if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
diag->status = BFA_STATUS_OK;
else {
diag->status = BFA_STATUS_MEMTEST_FAILED;
res->addr = swab32(res->addr);
res->exp = swab32(res->exp);
res->act = swab32(res->act);
res->err_status = swab32(res->err_status);
res->err_status1 = swab32(res->err_status1);
res->err_addr = swab32(res->err_addr);
bfa_trc(diag, res->addr);
bfa_trc(diag, res->exp);
bfa_trc(diag, res->act);
bfa_trc(diag, res->err_status);
bfa_trc(diag, res->err_status1);
bfa_trc(diag, res->err_addr);
}
diag->timer_active = 0;
diag->cbfn(diag->cbarg, diag->status);
diag->block = 0;
}
/*
* Firmware ping
*/
/*
* Perform DMA test directly
*/
static void
diag_fwping_send(struct bfa_diag_s *diag)
{
struct bfi_diag_fwping_req_s *fwping_req;
u32 i;
bfa_trc(diag, diag->fwping.dbuf_pa);
/* fill DMA area with pattern */
for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
/* Fill mbox msg */
fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
/* Setup SG list */
bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
diag->fwping.dbuf_pa);
/* Set up dma count */
fwping_req->count = cpu_to_be32(diag->fwping.count);
/* Set up data pattern */
fwping_req->data = diag->fwping.data;
/* build host command */
bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
bfa_ioc_portid(diag->ioc));
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
}
static void
diag_fwping_comp(struct bfa_diag_s *diag,
struct bfi_diag_fwping_rsp_s *diag_rsp)
{
u32 rsp_data = diag_rsp->data;
u8 rsp_dma_status = diag_rsp->dma_status;
bfa_trc(diag, rsp_data);
bfa_trc(diag, rsp_dma_status);
if (rsp_dma_status == BFA_STATUS_OK) {
u32 i, pat;
pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
diag->fwping.data;
/* Check mbox data */
if (diag->fwping.data != rsp_data) {
bfa_trc(diag, rsp_data);
diag->fwping.result->dmastatus =
BFA_STATUS_DATACORRUPTED;
diag->fwping.status = BFA_STATUS_DATACORRUPTED;
diag->fwping.cbfn(diag->fwping.cbarg,
diag->fwping.status);
diag->fwping.lock = 0;
return;
}
/* Check dma pattern */
for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
bfa_trc(diag, i);
bfa_trc(diag, pat);
bfa_trc(diag,
*((u32 *)diag->fwping.dbuf_kva + i));
diag->fwping.result->dmastatus =
BFA_STATUS_DATACORRUPTED;
diag->fwping.status = BFA_STATUS_DATACORRUPTED;
diag->fwping.cbfn(diag->fwping.cbarg,
diag->fwping.status);
diag->fwping.lock = 0;
return;
}
}
diag->fwping.result->dmastatus = BFA_STATUS_OK;
diag->fwping.status = BFA_STATUS_OK;
diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
diag->fwping.lock = 0;
} else {
diag->fwping.status = BFA_STATUS_HDMA_FAILED;
diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
diag->fwping.lock = 0;
}
}
/*
* Temperature Sensor
*/
static void
diag_tempsensor_send(struct bfa_diag_s *diag)
{
struct bfi_diag_ts_req_s *msg;
msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
bfa_trc(diag, msg->temp);
/* build host command */
bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
bfa_ioc_portid(diag->ioc));
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
}
static void
diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
{
if (!diag->tsensor.lock) {
/* receiving response after ioc failure */
bfa_trc(diag, diag->tsensor.lock);
return;
}
/*
* ASIC junction tempsensor is a reg read operation
* it will always return OK
*/
diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
diag->tsensor.temp->ts_junc = rsp->ts_junc;
diag->tsensor.temp->ts_brd = rsp->ts_brd;
diag->tsensor.temp->status = BFA_STATUS_OK;
if (rsp->ts_brd) {
if (rsp->status == BFA_STATUS_OK) {
diag->tsensor.temp->brd_temp =
be16_to_cpu(rsp->brd_temp);
} else {
bfa_trc(diag, rsp->status);
diag->tsensor.temp->brd_temp = 0;
diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
}
}
bfa_trc(diag, rsp->ts_junc);
bfa_trc(diag, rsp->temp);
bfa_trc(diag, rsp->ts_brd);
bfa_trc(diag, rsp->brd_temp);
diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
diag->tsensor.lock = 0;
}
/*
* LED Test command
*/
static void
diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
{
struct bfi_diag_ledtest_req_s *msg;
msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
/* build host command */
bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
bfa_ioc_portid(diag->ioc));
/*
* convert the freq from N blinks per 10 sec to
* crossbow ontime value. We do it here because division is need
*/
if (ledtest->freq)
ledtest->freq = 500 / ledtest->freq;
if (ledtest->freq == 0)
ledtest->freq = 1;
bfa_trc(diag, ledtest->freq);
/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
msg->cmd = (u8) ledtest->cmd;
msg->color = (u8) ledtest->color;
msg->portid = bfa_ioc_portid(diag->ioc);
msg->led = ledtest->led;
msg->freq = cpu_to_be16(ledtest->freq);
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
}
static void
diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
{
bfa_trc(diag, diag->ledtest.lock);
diag->ledtest.lock = BFA_FALSE;
/* no bfa_cb_queue is needed because driver is not waiting */
}
/*
* Port beaconing
*/
static void
diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
{
struct bfi_diag_portbeacon_req_s *msg;
msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
/* build host command */
bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
bfa_ioc_portid(diag->ioc));
msg->beacon = beacon;
msg->period = cpu_to_be32(sec);
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
}
static void
diag_portbeacon_comp(struct bfa_diag_s *diag)
{
bfa_trc(diag, diag->beacon.state);
diag->beacon.state = BFA_FALSE;
if (diag->cbfn_beacon)
diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
}
/*
* Diag hmbox handler
*/
void
bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
{
struct bfa_diag_s *diag = diagarg;
switch (msg->mh.msg_id) {
case BFI_DIAG_I2H_PORTBEACON:
diag_portbeacon_comp(diag);
break;
case BFI_DIAG_I2H_FWPING:
diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
break;
case BFI_DIAG_I2H_TEMPSENSOR:
diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
break;
case BFI_DIAG_I2H_LEDTEST:
diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
break;
default:
bfa_trc(diag, msg->mh.msg_id);
WARN_ON(1);
}
}
/*
* Gen RAM Test
*
* @param[in] *diag - diag data struct
* @param[in] *memtest - mem test params input from upper layer,
* @param[in] pattern - mem test pattern
* @param[in] *result - mem test result
* @param[in] cbfn - mem test callback functioin
* @param[in] cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
u32 pattern, struct bfa_diag_memtest_result *result,
bfa_cb_diag_t cbfn, void *cbarg)
{
u32 memtest_tov;
bfa_trc(diag, pattern);
if (!bfa_ioc_adapter_is_disabled(diag->ioc))
return BFA_STATUS_ADAPTER_ENABLED;
/* check to see if there is another destructive diag cmd running */
if (diag->block) {
bfa_trc(diag, diag->block);
return BFA_STATUS_DEVBUSY;
} else
diag->block = 1;
diag->result = result;
diag->cbfn = cbfn;
diag->cbarg = cbarg;
/* download memtest code and take LPU0 out of reset */
bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
bfa_diag_memtest_done, diag, memtest_tov);
diag->timer_active = 1;
return BFA_STATUS_OK;
}
/*
* DIAG firmware ping command
*
* @param[in] *diag - diag data struct
* @param[in] cnt - dma loop count for testing PCIE
* @param[in] data - data pattern to pass in fw
* @param[in] *result - pt to bfa_diag_fwping_result_t data struct
* @param[in] cbfn - callback function
* @param[in] *cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
void *cbarg)
{
bfa_trc(diag, cnt);
bfa_trc(diag, data);
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
return BFA_STATUS_CMD_NOTSUPP;
/* check to see if there is another destructive diag cmd running */
if (diag->block || diag->fwping.lock) {
bfa_trc(diag, diag->block);
bfa_trc(diag, diag->fwping.lock);
return BFA_STATUS_DEVBUSY;
}
/* Initialization */
diag->fwping.lock = 1;
diag->fwping.cbfn = cbfn;
diag->fwping.cbarg = cbarg;
diag->fwping.result = result;
diag->fwping.data = data;
diag->fwping.count = cnt;
/* Init test results */
diag->fwping.result->data = 0;
diag->fwping.result->status = BFA_STATUS_OK;
/* kick off the first ping */
diag_fwping_send(diag);
return BFA_STATUS_OK;
}
/*
* Read Temperature Sensor
*
* @param[in] *diag - diag data struct
* @param[in] *result - pt to bfa_diag_temp_t data struct
* @param[in] cbfn - callback function
* @param[in] *cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_diag_tsensor_query(struct bfa_diag_s *diag,
struct bfa_diag_results_tempsensor_s *result,
bfa_cb_diag_t cbfn, void *cbarg)
{
/* check to see if there is a destructive diag cmd running */
if (diag->block || diag->tsensor.lock) {
bfa_trc(diag, diag->block);
bfa_trc(diag, diag->tsensor.lock);
return BFA_STATUS_DEVBUSY;
}
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
/* Init diag mod params */
diag->tsensor.lock = 1;
diag->tsensor.temp = result;
diag->tsensor.cbfn = cbfn;
diag->tsensor.cbarg = cbarg;
/* Send msg to fw */
diag_tempsensor_send(diag);
return BFA_STATUS_OK;
}
/*
* LED Test command
*
* @param[in] *diag - diag data struct
* @param[in] *ledtest - pt to ledtest data structure
*
* @param[out]
*/
bfa_status_t
bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
{
bfa_trc(diag, ledtest->cmd);
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
if (diag->beacon.state)
return BFA_STATUS_BEACON_ON;
if (diag->ledtest.lock)
return BFA_STATUS_LEDTEST_OP;
/* Send msg to fw */
diag->ledtest.lock = BFA_TRUE;
diag_ledtest_send(diag, ledtest);
return BFA_STATUS_OK;
}
/*
* Port beaconing command
*
* @param[in] *diag - diag data struct
* @param[in] beacon - port beaconing 1:ON 0:OFF
* @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
* @param[in] sec - beaconing duration in seconds
*
* @param[out]
*/
bfa_status_t
bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
bfa_boolean_t link_e2e_beacon, uint32_t sec)
{
bfa_trc(diag, beacon);
bfa_trc(diag, link_e2e_beacon);
bfa_trc(diag, sec);
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
if (diag->ledtest.lock)
return BFA_STATUS_LEDTEST_OP;
if (diag->beacon.state && beacon) /* beacon alread on */
return BFA_STATUS_BEACON_ON;
diag->beacon.state = beacon;
diag->beacon.link_e2e = link_e2e_beacon;
if (diag->cbfn_beacon)
diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
/* Send msg to fw */
diag_portbeacon_send(diag, beacon, sec);
return BFA_STATUS_OK;
}
/*
* Return DMA memory needed by diag module.
*/
u32
bfa_diag_meminfo(void)
{
return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Attach virtual and physical memory for Diag.
*/
void
bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
{
diag->dev = dev;
diag->ioc = ioc;
diag->trcmod = trcmod;
diag->block = 0;
diag->cbfn = NULL;
diag->cbarg = NULL;
diag->result = NULL;
diag->cbfn_beacon = cbfn_beacon;
bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
bfa_q_qe_init(&diag->ioc_notify);
bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
}
void
bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
{
diag->fwping.dbuf_kva = dm_kva;
diag->fwping.dbuf_pa = dm_pa;
memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
}
/*
* PHY module specific
*/
#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
static void
bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
{
int i, m = sz >> 2;
for (i = 0; i < m; i++)
obuf[i] = be32_to_cpu(ibuf[i]);
}
static bfa_boolean_t
bfa_phy_present(struct bfa_phy_s *phy)
{
return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
}
static void
bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
{
struct bfa_phy_s *phy = cbarg;
bfa_trc(phy, event);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (phy->op_busy) {
phy->status = BFA_STATUS_IOC_FAILURE;
phy->cbfn(phy->cbarg, phy->status);
phy->op_busy = 0;
}
break;
default:
break;
}
}
/*
* Send phy attribute query request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_query_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_query_req_s *msg =
(struct bfi_phy_query_req_s *) phy->mb.msg;
msg->instance = phy->instance;
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
}
/*
* Send phy write request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_write_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_write_req_s *msg =
(struct bfi_phy_write_req_s *) phy->mb.msg;
u32 len;
u16 *buf, *dbuf;
int i, sz;
msg->instance = phy->instance;
msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
phy->residue : BFA_PHY_DMA_BUF_SZ;
msg->length = cpu_to_be32(len);
/* indicate if it's the last msg of the whole write operation */
msg->last = (len == phy->residue) ? 1 : 0;
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
buf = (u16 *) (phy->ubuf + phy->offset);
dbuf = (u16 *)phy->dbuf_kva;
sz = len >> 1;
for (i = 0; i < sz; i++)
buf[i] = cpu_to_be16(dbuf[i]);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
phy->residue -= len;
phy->offset += len;
}
/*
* Send phy read request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_read_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_read_req_s *msg =
(struct bfi_phy_read_req_s *) phy->mb.msg;
u32 len;
msg->instance = phy->instance;
msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
phy->residue : BFA_PHY_DMA_BUF_SZ;
msg->length = cpu_to_be32(len);
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
}
/*
* Send phy stats request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_stats_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_stats_req_s *msg =
(struct bfi_phy_stats_req_s *) phy->mb.msg;
msg->instance = phy->instance;
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
}
/*
* Flash memory info API.
*
* @param[in] mincfg - minimal cfg variable
*/
u32
bfa_phy_meminfo(bfa_boolean_t mincfg)
{
/* min driver doesn't need phy */
if (mincfg)
return 0;
return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Flash attach API.
*
* @param[in] phy - phy structure
* @param[in] ioc - ioc structure
* @param[in] dev - device structure
* @param[in] trcmod - trace module
* @param[in] logmod - log module
*/
void
bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
{
phy->ioc = ioc;
phy->trcmod = trcmod;
phy->cbfn = NULL;
phy->cbarg = NULL;
phy->op_busy = 0;
bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
bfa_q_qe_init(&phy->ioc_notify);
bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
/* min driver doesn't need phy */
if (mincfg) {
phy->dbuf_kva = NULL;
phy->dbuf_pa = 0;
}
}
/*
* Claim memory for phy
*
* @param[in] phy - phy structure
* @param[in] dm_kva - pointer to virtual memory address
* @param[in] dm_pa - physical memory address
* @param[in] mincfg - minimal cfg variable
*/
void
bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
bfa_boolean_t mincfg)
{
if (mincfg)
return;
phy->dbuf_kva = dm_kva;
phy->dbuf_pa = dm_pa;
memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
bfa_boolean_t
bfa_phy_busy(struct bfa_ioc_s *ioc)
{
void __iomem *rb;
rb = bfa_ioc_bar0(ioc);
return readl(rb + BFA_PHY_LOCK_STATUS);
}
/*
* Get phy attribute.
*
* @param[in] phy - phy structure
* @param[in] attr - phy attribute structure
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
bfa_trc(phy, instance);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->ubuf = (uint8_t *) attr;
bfa_phy_query_send(phy);
return BFA_STATUS_OK;
}
/*
* Get phy stats.
*
* @param[in] phy - phy structure
* @param[in] instance - phy image instance
* @param[in] stats - pointer to phy stats
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
struct bfa_phy_stats_s *stats,
bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
bfa_trc(phy, instance);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->ubuf = (u8 *) stats;
bfa_phy_stats_send(phy);
return BFA_STATUS_OK;
}
/*
* Update phy image.
*
* @param[in] phy - phy structure
* @param[in] instance - phy image instance
* @param[in] buf - update data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
void *buf, u32 len, u32 offset,
bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
bfa_trc(phy, instance);
bfa_trc(phy, len);
bfa_trc(phy, offset);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
/* 'len' must be in word (4-byte) boundary */
if (!len || (len & 0x03))
return BFA_STATUS_FAILED;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->residue = len;
phy->offset = 0;
phy->addr_off = offset;
phy->ubuf = buf;
bfa_phy_write_send(phy);
return BFA_STATUS_OK;
}
/*
* Read phy image.
*
* @param[in] phy - phy structure
* @param[in] instance - phy image instance
* @param[in] buf - read data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
void *buf, u32 len, u32 offset,
bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
bfa_trc(phy, instance);
bfa_trc(phy, len);
bfa_trc(phy, offset);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
/* 'len' must be in word (4-byte) boundary */
if (!len || (len & 0x03))
return BFA_STATUS_FAILED;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->residue = len;
phy->offset = 0;
phy->addr_off = offset;
phy->ubuf = buf;
bfa_phy_read_send(phy);
return BFA_STATUS_OK;
}
/*
* Process phy response messages upon receiving interrupts.
*
* @param[in] phyarg - phy structure
* @param[in] msg - message structure
*/
void
bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
{
struct bfa_phy_s *phy = phyarg;
u32 status;
union {
struct bfi_phy_query_rsp_s *query;
struct bfi_phy_stats_rsp_s *stats;
struct bfi_phy_write_rsp_s *write;
struct bfi_phy_read_rsp_s *read;
struct bfi_mbmsg_s *msg;
} m;
m.msg = msg;
bfa_trc(phy, msg->mh.msg_id);
if (!phy->op_busy) {
/* receiving response after ioc failure */
bfa_trc(phy, 0x9999);
return;
}
switch (msg->mh.msg_id) {
case BFI_PHY_I2H_QUERY_RSP:
status = be32_to_cpu(m.query->status);
bfa_trc(phy, status);
if (status == BFA_STATUS_OK) {
struct bfa_phy_attr_s *attr =
(struct bfa_phy_attr_s *) phy->ubuf;
bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
sizeof(struct bfa_phy_attr_s));
bfa_trc(phy, attr->status);
bfa_trc(phy, attr->length);
}
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
break;
case BFI_PHY_I2H_STATS_RSP:
status = be32_to_cpu(m.stats->status);
bfa_trc(phy, status);
if (status == BFA_STATUS_OK) {
struct bfa_phy_stats_s *stats =
(struct bfa_phy_stats_s *) phy->ubuf;
bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
sizeof(struct bfa_phy_stats_s));
bfa_trc(phy, stats->status);
}
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
break;
case BFI_PHY_I2H_WRITE_RSP:
status = be32_to_cpu(m.write->status);
bfa_trc(phy, status);
if (status != BFA_STATUS_OK || phy->residue == 0) {
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
} else {
bfa_trc(phy, phy->offset);
bfa_phy_write_send(phy);
}
break;
case BFI_PHY_I2H_READ_RSP:
status = be32_to_cpu(m.read->status);
bfa_trc(phy, status);
if (status != BFA_STATUS_OK) {
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
} else {
u32 len = be32_to_cpu(m.read->length);
u16 *buf = (u16 *)(phy->ubuf + phy->offset);
u16 *dbuf = (u16 *)phy->dbuf_kva;
int i, sz = len >> 1;
bfa_trc(phy, phy->offset);
bfa_trc(phy, len);
for (i = 0; i < sz; i++)
buf[i] = be16_to_cpu(dbuf[i]);
phy->residue -= len;
phy->offset += len;
if (phy->residue == 0) {
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
} else
bfa_phy_read_send(phy);
}
break;
default:
WARN_ON(1);
}
}
/*
* DCONF module specific
*/
BFA_MODULE(dconf);
/*
* DCONF state machine events
*/
enum bfa_dconf_event {
BFA_DCONF_SM_INIT = 1, /* dconf Init */
BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
BFA_DCONF_SM_WR = 3, /* binding change, map */
BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
};
/* forward declaration of DCONF state machine */
static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
static void bfa_dconf_timer(void *cbarg);
static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
/*
* Begining state of dconf module. Waiting for an event to start.
*/
static void
bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_status_t bfa_status;
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_INIT:
if (dconf->min_cfg) {
bfa_trc(dconf->bfa, dconf->min_cfg);
bfa_fsm_send_event(&dconf->bfa->iocfc,
IOCFC_E_DCONF_DONE);
return;
}
bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
BFA_FLASH_PART_DRV, dconf->instance,
dconf->dconf,
sizeof(struct bfa_dconf_s), 0,
bfa_dconf_init_cb, dconf->bfa);
if (bfa_status != BFA_STATUS_OK) {
bfa_timer_stop(&dconf->timer);
bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
return;
}
break;
case BFA_DCONF_SM_EXIT:
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_WR:
case BFA_DCONF_SM_FLASH_COMP:
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* Read flash for dconf entries and make a call back to the driver once done.
*/
static void
bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_FLASH_COMP:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
break;
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
break;
case BFA_DCONF_SM_EXIT:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_IOCDISABLE:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* DCONF Module is in ready state. Has completed the initialization.
*/
static void
bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_WR:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_INIT:
case BFA_DCONF_SM_IOCDISABLE:
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* entries are dirty, write back to the flash.
*/
static void
bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
bfa_dconf_flash_write(dconf);
break;
case BFA_DCONF_SM_WR:
bfa_timer_stop(&dconf->timer);
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
break;
case BFA_DCONF_SM_EXIT:
bfa_timer_stop(&dconf->timer);
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
bfa_dconf_flash_write(dconf);
break;
case BFA_DCONF_SM_FLASH_COMP:
break;
case BFA_DCONF_SM_IOCDISABLE:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* Sync the dconf entries to the flash.
*/
static void
bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_FLASH_COMP:
bfa_timer_stop(&dconf->timer);
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
static void
bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_FLASH_COMP:
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
break;
case BFA_DCONF_SM_WR:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
break;
case BFA_DCONF_SM_IOCDISABLE:
bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
static void
bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_INIT:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_IOCDISABLE:
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* Compute and return memory needed by DRV_CFG module.
*/
static void
bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
struct bfa_s *bfa)
{
struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
if (cfg->drvcfg.min_cfg)
bfa_mem_kva_setup(meminfo, dconf_kva,
sizeof(struct bfa_dconf_hdr_s));
else
bfa_mem_kva_setup(meminfo, dconf_kva,
sizeof(struct bfa_dconf_s));
}
static void
bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
dconf->bfad = bfad;
dconf->bfa = bfa;
dconf->instance = bfa->ioc.port_id;
bfa_trc(bfa, dconf->instance);
dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
if (cfg->drvcfg.min_cfg) {
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
dconf->min_cfg = BFA_TRUE;
} else {
dconf->min_cfg = BFA_FALSE;
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
}
bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
}
static void
bfa_dconf_init_cb(void *arg, bfa_status_t status)
{
struct bfa_s *bfa = arg;
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
if (status == BFA_STATUS_OK) {
bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
dconf->dconf->hdr.version = BFI_DCONF_VERSION;
}
bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
}
void
bfa_dconf_modinit(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
}
static void
bfa_dconf_start(struct bfa_s *bfa)
{
}
static void
bfa_dconf_stop(struct bfa_s *bfa)
{
}
static void bfa_dconf_timer(void *cbarg)
{
struct bfa_dconf_mod_s *dconf = cbarg;
bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
}
static void
bfa_dconf_iocdisable(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
}
static void
bfa_dconf_detach(struct bfa_s *bfa)
{
}
static bfa_status_t
bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
{
bfa_status_t bfa_status;
bfa_trc(dconf->bfa, 0);
bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
BFA_FLASH_PART_DRV, dconf->instance,
dconf->dconf, sizeof(struct bfa_dconf_s), 0,
bfa_dconf_cbfn, dconf);
if (bfa_status != BFA_STATUS_OK)
WARN_ON(bfa_status);
bfa_trc(dconf->bfa, bfa_status);
return bfa_status;
}
bfa_status_t
bfa_dconf_update(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_trc(dconf->bfa, 0);
if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
return BFA_STATUS_FAILED;
if (dconf->min_cfg) {
bfa_trc(dconf->bfa, dconf->min_cfg);
return BFA_STATUS_FAILED;
}
bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
return BFA_STATUS_OK;
}
static void
bfa_dconf_cbfn(void *arg, bfa_status_t status)
{
struct bfa_dconf_mod_s *dconf = arg;
WARN_ON(status);
bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
}
void
bfa_dconf_modexit(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
}
| gpl-2.0 |
NuriJ/sony_kernel_msm8960 | drivers/scsi/bfa/bfa_ioc.c | 4843 | 138699 | /*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfa_ioc.h"
#include "bfi_reg.h"
#include "bfa_defs.h"
#include "bfa_defs_svc.h"
BFA_TRC_FILE(CNA, IOC);
/*
* IOC local definitions
*/
#define BFA_IOC_TOV 3000 /* msecs */
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
#define bfa_ioc_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
#define bfa_hb_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
/*
* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
*/
#define bfa_ioc_firmware_lock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
#define bfa_ioc_firmware_unlock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
#define bfa_ioc_sync_start(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
#define bfa_ioc_sync_ack(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
#define bfa_ioc_sync_complete(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
readl((__ioc)->ioc_regs.hfn_mbox_cmd))
bfa_boolean_t bfa_auto_recover = BFA_TRUE;
/*
* forward declarations
*/
static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
static void bfa_ioc_timeout(void *ioc);
static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
enum bfa_ioc_event_e event);
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
/*
* IOC state machine definitions/declarations
*/
enum ioc_event {
IOC_E_RESET = 1, /* IOC reset request */
IOC_E_ENABLE = 2, /* IOC enable request */
IOC_E_DISABLE = 3, /* IOC disable request */
IOC_E_DETACH = 4, /* driver detach cleanup */
IOC_E_ENABLED = 5, /* f/w enabled */
IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
IOC_E_DISABLED = 7, /* f/w disabled */
IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
IOC_E_HBFAIL = 9, /* heartbeat failure */
IOC_E_HWERROR = 10, /* hardware error interrupt */
IOC_E_TIMEOUT = 11, /* timeout */
IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
};
bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
};
/*
* IOCPF state machine definitions/declarations
*/
#define bfa_iocpf_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
#define bfa_iocpf_poll_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
#define bfa_sem_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
/*
* Forward declareations for iocpf state machine
*/
static void bfa_iocpf_timeout(void *ioc_arg);
static void bfa_iocpf_sem_timeout(void *ioc_arg);
static void bfa_iocpf_poll_timeout(void *ioc_arg);
/*
* IOCPF state machine events
*/
enum iocpf_event {
IOCPF_E_ENABLE = 1, /* IOCPF enable request */
IOCPF_E_DISABLE = 2, /* IOCPF disable request */
IOCPF_E_STOP = 3, /* stop on driver detach */
IOCPF_E_FWREADY = 4, /* f/w initialization done */
IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
};
/*
* IOCPF states
*/
enum bfa_iocpf_state {
BFA_IOCPF_RESET = 1, /* IOC is in reset state */
BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
BFA_IOCPF_READY = 4, /* IOCPF is initialized */
BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
BFA_IOCPF_FAIL = 6, /* IOCPF failed */
BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
};
bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
static struct bfa_sm_table_s iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
};
/*
* IOC State Machine
*/
/*
* Beginning state. IOC uninit state.
*/
static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
{
}
/*
* IOC is in uninit state.
*/
static void
bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_RESET:
bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Reset entry actions -- initialize state machine
*/
static void
bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
{
bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
/*
* IOC is in reset state.
*/
static void
bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
break;
case IOC_E_DISABLE:
bfa_ioc_disable_comp(ioc);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
{
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
}
/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
case IOC_E_PFFAILED:
/* !!! fall through !!! */
case IOC_E_HWERROR:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
break;
case IOC_E_HWFAILED:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
break;
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
case IOC_E_ENABLE:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_timer_start(ioc);
bfa_ioc_send_getattr(ioc);
}
/*
* IOC configuration in progress. Timer is active.
*/
static void
bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_FWRSP_GETATTR:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
/* !!! fall through !!! */
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
break;
case IOC_E_DISABLE:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_ENABLE:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
bfa_ioc_hb_monitor(ioc);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
}
static void
bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
break;
case IOC_E_DISABLE:
bfa_hb_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_hb_timer_stop(ioc);
/* !!! fall through !!! */
case IOC_E_HBFAIL:
if (ioc->iocpf.auto_recover)
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
else
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
bfa_ioc_fail_notify(ioc);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
}
/*
* IOC is being disabled
*/
static void
bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_DISABLED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
case IOC_E_HWERROR:
/*
* No state change. Will move to disabled state
* after iocpf sm completes failure processing and
* moves to disabled state.
*/
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
break;
case IOC_E_HWFAILED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
bfa_ioc_disable_comp(ioc);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* IOC disable completion entry.
*/
static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_disable_comp(ioc);
}
static void
bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
break;
case IOC_E_DISABLE:
ioc->cbfn->disable_cbfn(ioc->bfa);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
{
bfa_trc(ioc, 0);
}
/*
* Hardware initialization retry.
*/
static void
bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLED:
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
/*
* Initialization retry failed.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
break;
case IOC_E_HWFAILED:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
break;
case IOC_E_ENABLE:
break;
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
{
bfa_trc(ioc, 0);
}
/*
* IOC failure.
*/
static void
bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
break;
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
break;
case IOC_E_HWERROR:
/*
* HB failure notification, ignore.
*/
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
{
bfa_trc(ioc, 0);
}
static void
bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
break;
case IOC_E_DISABLE:
ioc->cbfn->disable_cbfn(ioc->bfa);
break;
case IOC_E_DETACH:
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* IOCPF State Machine
*/
/*
* Reset entry actions -- initialize state machine
*/
static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
{
iocpf->fw_mismatch_notified = BFA_FALSE;
iocpf->auto_recover = bfa_auto_recover;
}
/*
* Beginning state. IOC is in reset state.
*/
static void
bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_ENABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
break;
case IOCPF_E_STOP:
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Semaphore should be acquired for version check.
*/
static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
{
struct bfi_ioc_image_hdr_s fwhdr;
u32 r32, fwstate, pgnum, pgoff, loff = 0;
int i;
/*
* Spin on init semaphore to serialize.
*/
r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
while (r32 & 0x1) {
udelay(20);
r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
}
/* h/w sem init */
fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
if (fwstate == BFI_IOC_UNINIT) {
writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
goto sem_get;
}
bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
goto sem_get;
}
/*
* Clear fwver hdr
*/
pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
loff += sizeof(u32);
}
bfa_trc(iocpf->ioc, fwstate);
bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
/*
* Unlock the hw semaphore. Should be here only once per boot.
*/
readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
/*
* unlock init semaphore.
*/
writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
sem_get:
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* Awaiting h/w semaphore to continue with version check.
*/
static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
if (bfa_ioc_sync_start(ioc)) {
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
} else {
bfa_ioc_firmware_unlock(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_sem_timer_start(ioc);
}
} else {
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
}
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
bfa_fsm_send_event(ioc, IOC_E_DISABLED);
break;
case IOCPF_E_STOP:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Notify enable completion callback.
*/
static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
{
/*
* Call only the first time sm enters fwmismatch state.
*/
if (iocpf->fw_mismatch_notified == BFA_FALSE)
bfa_ioc_pf_fwmismatch(iocpf->ioc);
iocpf->fw_mismatch_notified = BFA_TRUE;
bfa_iocpf_timer_start(iocpf->ioc);
}
/*
* Awaiting firmware version match.
*/
static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_TIMEOUT:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
break;
case IOCPF_E_DISABLE:
bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
bfa_fsm_send_event(ioc, IOC_E_DISABLED);
break;
case IOCPF_E_STOP:
bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* Request for semaphore.
*/
static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* Awaiting semaphore for h/w initialzation.
*/
static void
bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_sync_complete(ioc)) {
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
} else {
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_sem_timer_start(ioc);
}
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
{
iocpf->poll_time = 0;
bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
}
/*
* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
static void
bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_FWREADY:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
break;
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
case IOCPF_E_DISABLE:
bfa_iocpf_timer_stop(ioc);
bfa_ioc_sync_leave(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
{
bfa_iocpf_timer_start(iocpf->ioc);
/*
* Enable Interrupts before sending fw IOC ENABLE cmd.
*/
iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
bfa_ioc_send_enable(iocpf->ioc);
}
/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_FWRSP_ENABLE:
bfa_iocpf_timer_stop(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
break;
case IOCPF_E_INITFAIL:
bfa_iocpf_timer_stop(ioc);
/*
* !!! fall through !!!
*/
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
if (event == IOCPF_E_TIMEOUT)
bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
case IOCPF_E_DISABLE:
bfa_iocpf_timer_stop(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
{
bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
}
static void
bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_DISABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
case IOCPF_E_GETATTRFAIL:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
case IOCPF_E_FAIL:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
{
bfa_iocpf_timer_start(iocpf->ioc);
bfa_ioc_send_disable(iocpf->ioc);
}
/*
* IOC is being disabled
*/
static void
bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_FWRSP_DISABLE:
bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FAIL:
bfa_iocpf_timer_stop(ioc);
/*
* !!! fall through !!!
*/
case IOCPF_E_TIMEOUT:
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FWRSP_ENABLE:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* IOC hb ack request is being removed.
*/
static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_sync_leave(ioc);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_FAIL:
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* IOC disable completion entry.
*/
static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_mbox_flush(iocpf->ioc);
bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
}
static void
bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_ENABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
break;
case IOCPF_E_STOP:
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
{
bfa_ioc_debug_save_ftrc(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/*
* Hardware initialization failed.
*/
static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_notify_fail(ioc);
bfa_ioc_sync_leave(ioc);
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_STOP:
bfa_sem_timer_stop(ioc);
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
case IOCPF_E_FAIL:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
{
bfa_trc(iocpf->ioc, 0);
}
/*
* Hardware initialization failed.
*/
static void
bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_DISABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
case IOCPF_E_STOP:
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
{
/*
* Mark IOC as failed in hardware and stop firmware.
*/
bfa_ioc_lpu_stop(iocpf->ioc);
/*
* Flush any queued up mailbox requests.
*/
bfa_ioc_mbox_flush(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_sync_ack(ioc);
bfa_ioc_notify_fail(ioc);
if (!iocpf->auto_recover) {
bfa_ioc_sync_leave(ioc);
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
} else {
if (bfa_ioc_sync_complete(ioc))
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
else {
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
}
}
break;
case IOCPF_E_SEM_ERROR:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FAIL:
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
{
bfa_trc(iocpf->ioc, 0);
}
/*
* IOC is in failed state.
*/
static void
bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
{
struct bfa_ioc_s *ioc = iocpf->ioc;
bfa_trc(ioc, event);
switch (event) {
case IOCPF_E_DISABLE:
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/*
* BFA IOC private functions
*/
/*
* Notify common modules registered for notification.
*/
static void
bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
{
struct bfa_ioc_notify_s *notify;
struct list_head *qe;
list_for_each(qe, &ioc->notify_q) {
notify = (struct bfa_ioc_notify_s *)qe;
notify->cbfn(notify->cbarg, event);
}
}
static void
bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
{
ioc->cbfn->disable_cbfn(ioc->bfa);
bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
}
bfa_boolean_t
bfa_ioc_sem_get(void __iomem *sem_reg)
{
u32 r32;
int cnt = 0;
#define BFA_SEM_SPINCNT 3000
r32 = readl(sem_reg);
while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
cnt++;
udelay(2);
r32 = readl(sem_reg);
}
if (!(r32 & 1))
return BFA_TRUE;
return BFA_FALSE;
}
static void
bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
{
u32 r32;
/*
* First read to the semaphore register will return 0, subsequent reads
* will return 1. Semaphore is released by writing 1 to the register
*/
r32 = readl(ioc->ioc_regs.ioc_sem_reg);
if (r32 == ~0) {
WARN_ON(r32 == ~0);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
return;
}
if (!(r32 & 1)) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
return;
}
bfa_sem_timer_start(ioc);
}
/*
* Initialize LPU local memory (aka secondary memory / SRAM)
*/
static void
bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
int i;
#define PSS_LMEM_INIT_TIME 10000
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl &= ~__PSS_LMEM_RESET;
pss_ctl |= __PSS_LMEM_INIT_EN;
/*
* i2c workaround 12.5khz clock
*/
pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
/*
* wait for memory initialization to be complete
*/
i = 0;
do {
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
i++;
} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
/*
* If memory initialization is not successful, IOC timeout will catch
* such failures.
*/
WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
bfa_trc(ioc, pss_ctl);
pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
static void
bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
/*
* Take processor out of reset.
*/
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl &= ~__PSS_LPU0_RESET;
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
static void
bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
/*
* Put processors in reset.
*/
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
/*
* Get driver and firmware versions.
*/
void
bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
u32 pgnum, pgoff;
u32 loff = 0;
int i;
u32 *fwsig = (u32 *) fwhdr;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
i++) {
fwsig[i] =
bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
loff += sizeof(u32);
}
}
/*
* Returns TRUE if same.
*/
bfa_boolean_t
bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
struct bfi_ioc_image_hdr_s *drv_fwhdr;
int i;
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
bfa_trc(ioc, i);
bfa_trc(ioc, fwhdr->md5sum[i]);
bfa_trc(ioc, drv_fwhdr->md5sum[i]);
return BFA_FALSE;
}
}
bfa_trc(ioc, fwhdr->md5sum[0]);
return BFA_TRUE;
}
/*
* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
static bfa_boolean_t
bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
bfa_ioc_fwver_get(ioc, &fwhdr);
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
if (fwhdr.signature != drv_fwhdr->signature) {
bfa_trc(ioc, fwhdr.signature);
bfa_trc(ioc, drv_fwhdr->signature);
return BFA_FALSE;
}
if (swab32(fwhdr.bootenv) != boot_env) {
bfa_trc(ioc, fwhdr.bootenv);
bfa_trc(ioc, boot_env);
return BFA_FALSE;
}
return bfa_ioc_fwver_cmp(ioc, &fwhdr);
}
/*
* Conditionally flush any pending message from firmware at start.
*/
static void
bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
{
u32 r32;
r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
if (r32)
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
}
static void
bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
{
enum bfi_ioc_state ioc_fwstate;
bfa_boolean_t fwvalid;
u32 boot_type;
u32 boot_env;
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
bfa_trc(ioc, ioc_fwstate);
boot_type = BFI_FWBOOT_TYPE_NORMAL;
boot_env = BFI_FWBOOT_ENV_OS;
/*
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
bfa_ioc_boot(ioc, boot_type, boot_env);
bfa_ioc_poll_fwinit(ioc);
return;
}
/*
* If hardware initialization is in progress (initialized by other IOC),
* just wait for an initialization completion interrupt.
*/
if (ioc_fwstate == BFI_IOC_INITING) {
bfa_ioc_poll_fwinit(ioc);
return;
}
/*
* If IOC function is disabled and firmware version is same,
* just re-enable IOC.
*
* If option rom, IOC must not be in operational state. With
* convergence, IOC will be in operational state when 2nd driver
* is loaded.
*/
if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
/*
* When using MSI-X any pending firmware ready event should
* be flushed. Otherwise MSI-X interrupts are not delivered.
*/
bfa_ioc_msgflush(ioc);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
return;
}
/*
* Initialize the h/w for any other states.
*/
bfa_ioc_boot(ioc, boot_type, boot_env);
bfa_ioc_poll_fwinit(ioc);
}
static void
bfa_ioc_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_trc(ioc, 0);
bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
}
void
bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
{
u32 *msgp = (u32 *) ioc_msg;
u32 i;
bfa_trc(ioc, msgp[0]);
bfa_trc(ioc, len);
WARN_ON(len > BFI_IOC_MSGLEN_MAX);
/*
* first write msg to mailbox registers
*/
for (i = 0; i < len / sizeof(u32); i++)
writel(cpu_to_le32(msgp[i]),
ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
/*
* write 1 to mailbox CMD to trigger LPU event
*/
writel(1, ioc->ioc_regs.hfn_mbox_cmd);
(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
}
static void
bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s enable_req;
struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = cpu_to_be16(ioc->clscode);
do_gettimeofday(&tv);
enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
static void
bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s disable_req;
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
static void
bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_getattr_req_s attr_req;
bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
bfa_ioc_portid(ioc));
bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
}
static void
bfa_ioc_hb_check(void *cbarg)
{
struct bfa_ioc_s *ioc = cbarg;
u32 hb_count;
hb_count = readl(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
bfa_ioc_recover(ioc);
return;
} else {
ioc->hb_count = hb_count;
}
bfa_ioc_mbox_poll(ioc);
bfa_hb_timer_start(ioc);
}
static void
bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
{
ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
bfa_hb_timer_start(ioc);
}
/*
* Initiate a full firmware download.
*/
static void
bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 boot_env)
{
u32 *fwimg;
u32 pgnum, pgoff;
u32 loff = 0;
u32 chunkno = 0;
u32 i;
u32 asicmode;
bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
}
/*
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
loff += sizeof(u32);
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
ioc->ioc_regs.host_page_num_fn);
/*
* Set boot type and device mode at the end.
*/
asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
ioc->port0_mode, ioc->port1_mode);
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
swab32(asicmode));
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
swab32(boot_type));
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
swab32(boot_env));
}
/*
* Update BFA configuration from firmware configuration.
*/
static void
bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_attr_s *attr = ioc->attr;
attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
attr->card_type = be32_to_cpu(attr->card_type);
attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
/*
* Attach time initialization of mbox logic.
*/
static void
bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
int mc;
INIT_LIST_HEAD(&mod->cmd_q);
for (mc = 0; mc < BFI_MC_MAX; mc++) {
mod->mbhdlr[mc].cbfn = NULL;
mod->mbhdlr[mc].cbarg = ioc->bfa;
}
}
/*
* Mbox poll timer -- restarts any pending mailbox requests.
*/
static void
bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd_s *cmd;
u32 stat;
/*
* If no command pending, do nothing
*/
if (list_empty(&mod->cmd_q))
return;
/*
* If previous command is not yet fetched by firmware, do nothing
*/
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat)
return;
/*
* Enqueue command to firmware.
*/
bfa_q_deq(&mod->cmd_q, &cmd);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
/*
* Cleanup any pending requests.
*/
static void
bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd_s *cmd;
while (!list_empty(&mod->cmd_q))
bfa_q_deq(&mod->cmd_q, &cmd);
}
/*
* Read data from SMEM to host through PCI memmap
*
* @param[in] ioc memory for IOC
* @param[in] tbuf app memory to store data from smem
* @param[in] soff smem offset
* @param[in] sz size of smem in bytes
*/
static bfa_status_t
bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
{
u32 pgnum, loff;
__be32 r32;
int i, len;
u32 *buf = tbuf;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
loff = PSS_SMEM_PGOFF(soff);
bfa_trc(ioc, pgnum);
bfa_trc(ioc, loff);
bfa_trc(ioc, sz);
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
bfa_trc(ioc, 0);
return BFA_STATUS_FAILED;
}
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
len = sz/sizeof(u32);
bfa_trc(ioc, len);
for (i = 0; i < len; i++) {
r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
buf[i] = be32_to_cpu(r32);
loff += sizeof(u32);
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
ioc->ioc_regs.host_page_num_fn);
/*
* release semaphore.
*/
readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
bfa_trc(ioc, pgnum);
return BFA_STATUS_OK;
}
/*
* Clear SMEM data from host through PCI memmap
*
* @param[in] ioc memory for IOC
* @param[in] soff smem offset
* @param[in] sz size of smem in bytes
*/
static bfa_status_t
bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
{
int i, len;
u32 pgnum, loff;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
loff = PSS_SMEM_PGOFF(soff);
bfa_trc(ioc, pgnum);
bfa_trc(ioc, loff);
bfa_trc(ioc, sz);
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
bfa_trc(ioc, 0);
return BFA_STATUS_FAILED;
}
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
len = sz/sizeof(u32); /* len in words */
bfa_trc(ioc, len);
for (i = 0; i < len; i++) {
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
loff += sizeof(u32);
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
}
}
writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
ioc->ioc_regs.host_page_num_fn);
/*
* release semaphore.
*/
readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
bfa_trc(ioc, pgnum);
return BFA_STATUS_OK;
}
static void
bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
/*
* Notify driver and common modules registered for notification.
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
bfa_ioc_debug_save_ftrc(ioc);
BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
"Heart Beat of IOC has failed\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
}
static void
bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
/*
* Provide enable completion callback.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
"Running firmware version is incompatible "
"with the driver version\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
}
bfa_status_t
bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
{
/*
* Hold semaphore so that nobody can access the chip during init.
*/
bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
bfa_ioc_pll_init_asic(ioc);
ioc->pllinit = BFA_TRUE;
/*
* Initialize LMEM
*/
bfa_ioc_lmem_init(ioc);
/*
* release semaphore.
*/
readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
return BFA_STATUS_OK;
}
/*
* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
void
bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
{
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
return;
/*
* Initialize IOC state of all functions on a chip reset.
*/
if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
} else {
writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
}
bfa_ioc_msgflush(ioc);
bfa_ioc_download_fw(ioc, boot_type, boot_env);
bfa_ioc_lpu_start(ioc);
}
/*
* Enable/disable IOC failure auto recovery.
*/
void
bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
{
bfa_auto_recover = auto_recover;
}
bfa_boolean_t
bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
}
bfa_boolean_t
bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
return ((r32 != BFI_IOC_UNINIT) &&
(r32 != BFI_IOC_INITING) &&
(r32 != BFI_IOC_MEMTEST));
}
bfa_boolean_t
bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
{
__be32 *msgp = mbmsg;
u32 r32;
int i;
r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
if ((r32 & 1) == 0)
return BFA_FALSE;
/*
* read the MBOX msg
*/
for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
i++) {
r32 = readl(ioc->ioc_regs.lpu_mbox +
i * sizeof(u32));
msgp[i] = cpu_to_be32(r32);
}
/*
* turn off mailbox interrupt by clearing mailbox status
*/
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
readl(ioc->ioc_regs.lpu_mbox_cmd);
return BFA_TRUE;
}
void
bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
{
union bfi_ioc_i2h_msg_u *msg;
struct bfa_iocpf_s *iocpf = &ioc->iocpf;
msg = (union bfi_ioc_i2h_msg_u *) m;
bfa_ioc_stats(ioc, ioc_isrs);
switch (msg->mh.msg_id) {
case BFI_IOC_I2H_HBEAT:
break;
case BFI_IOC_I2H_ENABLE_REPLY:
ioc->port_mode = ioc->port_mode_cfg =
(enum bfa_mode_s)msg->fw_event.port_mode;
ioc->ad_cap_bm = msg->fw_event.cap_bm;
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
break;
case BFI_IOC_I2H_DISABLE_REPLY:
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
break;
case BFI_IOC_I2H_GETATTR_REPLY:
bfa_ioc_getattr_reply(ioc);
break;
default:
bfa_trc(ioc, msg->mh.msg_id);
WARN_ON(1);
}
}
/*
* IOC attach time initialization and setup.
*
* @param[in] ioc memory for IOC
* @param[in] bfa driver instance structure
*/
void
bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
struct bfa_timer_mod_s *timer_mod)
{
ioc->bfa = bfa;
ioc->cbfn = cbfn;
ioc->timer_mod = timer_mod;
ioc->fcmode = BFA_FALSE;
ioc->pllinit = BFA_FALSE;
ioc->dbg_fwsave_once = BFA_TRUE;
ioc->iocpf.ioc = ioc;
bfa_ioc_mbox_attach(ioc);
INIT_LIST_HEAD(&ioc->notify_q);
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(ioc, IOC_E_RESET);
}
/*
* Driver detach time IOC cleanup.
*/
void
bfa_ioc_detach(struct bfa_ioc_s *ioc)
{
bfa_fsm_send_event(ioc, IOC_E_DETACH);
INIT_LIST_HEAD(&ioc->notify_q);
}
/*
* Setup IOC PCI properties.
*
* @param[in] pcidev PCI device information for this IOC
*/
void
bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
enum bfi_pcifn_class clscode)
{
ioc->clscode = clscode;
ioc->pcidev = *pcidev;
/*
* Initialize IOC and device personality
*/
ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
ioc->asic_mode = BFI_ASIC_MODE_FC;
switch (pcidev->device_id) {
case BFA_PCI_DEVICE_ID_FC_8G1P:
case BFA_PCI_DEVICE_ID_FC_8G2P:
ioc->asic_gen = BFI_ASIC_GEN_CB;
ioc->fcmode = BFA_TRUE;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
ioc->ad_cap_bm = BFA_CM_HBA;
break;
case BFA_PCI_DEVICE_ID_CT:
ioc->asic_gen = BFI_ASIC_GEN_CT;
ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
ioc->asic_mode = BFI_ASIC_MODE_ETH;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
ioc->ad_cap_bm = BFA_CM_CNA;
break;
case BFA_PCI_DEVICE_ID_CT_FC:
ioc->asic_gen = BFI_ASIC_GEN_CT;
ioc->fcmode = BFA_TRUE;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
ioc->ad_cap_bm = BFA_CM_HBA;
break;
case BFA_PCI_DEVICE_ID_CT2:
ioc->asic_gen = BFI_ASIC_GEN_CT2;
if (clscode == BFI_PCIFN_CLASS_FC &&
pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
ioc->asic_mode = BFI_ASIC_MODE_FC16;
ioc->fcmode = BFA_TRUE;
ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
ioc->ad_cap_bm = BFA_CM_HBA;
} else {
ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
ioc->asic_mode = BFI_ASIC_MODE_ETH;
if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
ioc->port_mode =
ioc->port_mode_cfg = BFA_MODE_CNA;
ioc->ad_cap_bm = BFA_CM_CNA;
} else {
ioc->port_mode =
ioc->port_mode_cfg = BFA_MODE_NIC;
ioc->ad_cap_bm = BFA_CM_NIC;
}
}
break;
default:
WARN_ON(1);
}
/*
* Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
*/
if (ioc->asic_gen == BFI_ASIC_GEN_CB)
bfa_ioc_set_cb_hwif(ioc);
else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
bfa_ioc_set_ct_hwif(ioc);
else {
WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
bfa_ioc_set_ct2_hwif(ioc);
bfa_ioc_ct2_poweron(ioc);
}
bfa_ioc_map_port(ioc);
bfa_ioc_reg_init(ioc);
}
/*
* Initialize IOC dma memory
*
* @param[in] dm_kva kernel virtual address of IOC dma memory
* @param[in] dm_pa physical address of IOC dma memory
*/
void
bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
{
/*
* dma memory for firmware attribute
*/
ioc->attr_dma.kva = dm_kva;
ioc->attr_dma.pa = dm_pa;
ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
}
void
bfa_ioc_enable(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_enables);
ioc->dbg_fwsave_once = BFA_TRUE;
bfa_fsm_send_event(ioc, IOC_E_ENABLE);
}
void
bfa_ioc_disable(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_disables);
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
/*
* Initialize memory for saving firmware trace. Driver must initialize
* trace memory before call bfa_ioc_enable().
*/
void
bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
{
ioc->dbg_fwsave = dbg_fwsave;
ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
}
/*
* Register mailbox message handler functions
*
* @param[in] ioc IOC instance
* @param[in] mcfuncs message class handler functions
*/
void
bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
int mc;
for (mc = 0; mc < BFI_MC_MAX; mc++)
mod->mbhdlr[mc].cbfn = mcfuncs[mc];
}
/*
* Register mailbox message handler function, to be called by common modules
*/
void
bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
mod->mbhdlr[mc].cbfn = cbfn;
mod->mbhdlr[mc].cbarg = cbarg;
}
/*
* Queue a mailbox command request to firmware. Waits if mailbox is busy.
* Responsibility of caller to serialize
*
* @param[in] ioc IOC instance
* @param[i] cmd Mailbox command
*/
void
bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
u32 stat;
/*
* If a previous command is pending, queue new command
*/
if (!list_empty(&mod->cmd_q)) {
list_add_tail(&cmd->qe, &mod->cmd_q);
return;
}
/*
* If mailbox is busy, queue command for poll timer
*/
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat) {
list_add_tail(&cmd->qe, &mod->cmd_q);
return;
}
/*
* mailbox is free -- queue command to firmware
*/
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
/*
* Handle mailbox interrupts
*/
void
bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfi_mbmsg_s m;
int mc;
if (bfa_ioc_msgget(ioc, &m)) {
/*
* Treat IOC message class as special.
*/
mc = m.mh.msg_class;
if (mc == BFI_MC_IOC) {
bfa_ioc_isr(ioc, &m);
return;
}
if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
return;
mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
}
bfa_ioc_lpu_read_stat(ioc);
/*
* Try to send pending mailbox commands
*/
bfa_ioc_mbox_poll(ioc);
}
void
bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_hbfails);
ioc->stats.hb_count = ioc->hb_count;
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
/*
* return true if IOC is disabled
*/
bfa_boolean_t
bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
/*
* return true if IOC firmware is different.
*/
bfa_boolean_t
bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
}
#define bfa_ioc_state_disabled(__sm) \
(((__sm) == BFI_IOC_UNINIT) || \
((__sm) == BFI_IOC_INITING) || \
((__sm) == BFI_IOC_HWINIT) || \
((__sm) == BFI_IOC_DISABLED) || \
((__sm) == BFI_IOC_FAIL) || \
((__sm) == BFI_IOC_CFG_DISABLED))
/*
* Check if adapter is disabled -- both IOCs should be in a disabled
* state.
*/
bfa_boolean_t
bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
{
u32 ioc_state;
if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
return BFA_FALSE;
ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
}
return BFA_TRUE;
}
/*
* Reset IOC fwstate registers.
*/
void
bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
{
writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
}
#define BFA_MFG_NAME "Brocade"
void
bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
struct bfa_adapter_attr_s *ad_attr)
{
struct bfi_ioc_attr_s *ioc_attr;
ioc_attr = ioc->attr;
bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
memcpy(&ad_attr->vpd, &ioc_attr->vpd,
sizeof(struct bfa_mfg_vpd_s));
ad_attr->nports = bfa_ioc_get_nports(ioc);
ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
bfa_ioc_get_adapter_model(ioc, ad_attr->model);
/* For now, model descr uses same model string */
bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
ad_attr->card_type = ioc_attr->card_type;
ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
ad_attr->prototype = 1;
else
ad_attr->prototype = 0;
ad_attr->pwwn = ioc->attr->pwwn;
ad_attr->mac = bfa_ioc_get_mac(ioc);
ad_attr->pcie_gen = ioc_attr->pcie_gen;
ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
ad_attr->asic_rev = ioc_attr->asic_rev;
bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
ad_attr->trunk_capable = (ad_attr->nports > 1) &&
!bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
}
enum bfa_ioc_type_e
bfa_ioc_get_type(struct bfa_ioc_s *ioc)
{
if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
return BFA_IOC_TYPE_LL;
WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
}
void
bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
{
memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
memcpy((void *)serial_num,
(void *)ioc->attr->brcd_serialnum,
BFA_ADAPTER_SERIAL_NUM_LEN);
}
void
bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
{
memset((void *)fw_ver, 0, BFA_VERSION_LEN);
memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
}
void
bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
{
WARN_ON(!chip_rev);
memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
chip_rev[0] = 'R';
chip_rev[1] = 'e';
chip_rev[2] = 'v';
chip_rev[3] = '-';
chip_rev[4] = ioc->attr->asic_rev;
chip_rev[5] = '\0';
}
void
bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
{
memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
memcpy(optrom_ver, ioc->attr->optrom_version,
BFA_VERSION_LEN);
}
void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
{
struct bfi_ioc_attr_s *ioc_attr;
WARN_ON(!model);
memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
ioc_attr = ioc->attr;
snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
BFA_MFG_NAME, ioc_attr->card_type);
}
enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc_s *ioc)
{
enum bfa_iocpf_state iocpf_st;
enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
if (ioc_st == BFA_IOC_ENABLING ||
ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
switch (iocpf_st) {
case BFA_IOCPF_SEMWAIT:
ioc_st = BFA_IOC_SEMWAIT;
break;
case BFA_IOCPF_HWINIT:
ioc_st = BFA_IOC_HWINIT;
break;
case BFA_IOCPF_FWMISMATCH:
ioc_st = BFA_IOC_FWMISMATCH;
break;
case BFA_IOCPF_FAIL:
ioc_st = BFA_IOC_FAIL;
break;
case BFA_IOCPF_INITFAIL:
ioc_st = BFA_IOC_INITFAIL;
break;
default:
break;
}
}
return ioc_st;
}
void
bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
{
memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
ioc_attr->state = bfa_ioc_get_state(ioc);
ioc_attr->port_id = ioc->port_id;
ioc_attr->port_mode = ioc->port_mode;
ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
ioc_attr->cap_bm = ioc->ad_cap_bm;
ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
mac_t
bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
{
/*
* Check the IOC type and return the appropriate MAC
*/
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
return ioc->attr->fcoe_mac;
else
return ioc->attr->mac;
}
mac_t
bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
{
mac_t m;
m = ioc->attr->mfg_mac;
if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
else
bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
bfa_ioc_pcifn(ioc));
return m;
}
/*
* Send AEN notification
*/
void
bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
struct bfa_aen_entry_s *aen_entry;
enum bfa_ioc_type_e ioc_type;
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
ioc_type = bfa_ioc_get_type(ioc);
switch (ioc_type) {
case BFA_IOC_TYPE_FC:
aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
break;
case BFA_IOC_TYPE_FCoE:
aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
break;
case BFA_IOC_TYPE_LL:
aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
break;
default:
WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
break;
}
/* Send the AEN notification */
aen_entry->aen_data.ioc.ioc_type = ioc_type;
bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
BFA_AEN_CAT_IOC, event);
}
/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
{
int tlen;
if (ioc->dbg_fwsave_len == 0)
return BFA_STATUS_ENOFSAVE;
tlen = *trclen;
if (tlen > ioc->dbg_fwsave_len)
tlen = ioc->dbg_fwsave_len;
memcpy(trcdata, ioc->dbg_fwsave, tlen);
*trclen = tlen;
return BFA_STATUS_OK;
}
/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
{
u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
int tlen;
bfa_status_t status;
bfa_trc(ioc, *trclen);
tlen = *trclen;
if (tlen > BFA_DBG_FWTRC_LEN)
tlen = BFA_DBG_FWTRC_LEN;
status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
*trclen = tlen;
return status;
}
static void
bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
{
struct bfa_mbox_cmd_s cmd;
struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
bfa_ioc_portid(ioc));
req->clscode = cpu_to_be16(ioc->clscode);
bfa_ioc_mbox_queue(ioc, &cmd);
}
static void
bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
{
u32 fwsync_iter = 1000;
bfa_ioc_send_fwsync(ioc);
/*
* After sending a fw sync mbox command wait for it to
* take effect. We will not wait for a response because
* 1. fw_sync mbox cmd doesn't have a response.
* 2. Even if we implement that, interrupts might not
* be enabled when we call this function.
* So, just keep checking if any mbox cmd is pending, and
* after waiting for a reasonable amount of time, go ahead.
* It is possible that fw has crashed and the mbox command
* is never acknowledged.
*/
while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
fwsync_iter--;
}
/*
* Dump firmware smem
*/
bfa_status_t
bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
u32 *offset, int *buflen)
{
u32 loff;
int dlen;
bfa_status_t status;
u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
if (*offset >= smem_len) {
*offset = *buflen = 0;
return BFA_STATUS_EINVAL;
}
loff = *offset;
dlen = *buflen;
/*
* First smem read, sync smem before proceeding
* No need to sync before reading every chunk.
*/
if (loff == 0)
bfa_ioc_fwsync(ioc);
if ((loff + dlen) >= smem_len)
dlen = smem_len - loff;
status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
if (status != BFA_STATUS_OK) {
*offset = *buflen = 0;
return status;
}
*offset += dlen;
if (*offset >= smem_len)
*offset = 0;
*buflen = dlen;
return status;
}
/*
* Firmware statistics
*/
bfa_status_t
bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
{
u32 loff = BFI_IOC_FWSTATS_OFF + \
BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
int tlen;
bfa_status_t status;
if (ioc->stats_busy) {
bfa_trc(ioc, ioc->stats_busy);
return BFA_STATUS_DEVBUSY;
}
ioc->stats_busy = BFA_TRUE;
tlen = sizeof(struct bfa_fw_stats_s);
status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
ioc->stats_busy = BFA_FALSE;
return status;
}
bfa_status_t
bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
{
u32 loff = BFI_IOC_FWSTATS_OFF + \
BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
int tlen;
bfa_status_t status;
if (ioc->stats_busy) {
bfa_trc(ioc, ioc->stats_busy);
return BFA_STATUS_DEVBUSY;
}
ioc->stats_busy = BFA_TRUE;
tlen = sizeof(struct bfa_fw_stats_s);
status = bfa_ioc_smem_clr(ioc, loff, tlen);
ioc->stats_busy = BFA_FALSE;
return status;
}
/*
* Save firmware trace if configured.
*/
static void
bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
{
int tlen;
if (ioc->dbg_fwsave_once) {
ioc->dbg_fwsave_once = BFA_FALSE;
if (ioc->dbg_fwsave_len) {
tlen = ioc->dbg_fwsave_len;
bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
}
}
}
/*
* Firmware failure detected. Start recovery actions.
*/
static void
bfa_ioc_recover(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_hbfails);
ioc->stats.hb_count = ioc->hb_count;
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
/*
* BFA IOC PF private functions
*/
static void
bfa_iocpf_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_trc(ioc, 0);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
}
static void
bfa_iocpf_sem_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_ioc_hw_sem_get(ioc);
}
static void
bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
{
u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
bfa_trc(ioc, fwstate);
if (fwstate == BFI_IOC_DISABLED) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
return;
}
if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
bfa_iocpf_timeout(ioc);
else {
ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
bfa_iocpf_poll_timer_start(ioc);
}
}
static void
bfa_iocpf_poll_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
bfa_ioc_poll_fwinit(ioc);
}
/*
* bfa timer function
*/
void
bfa_timer_beat(struct bfa_timer_mod_s *mod)
{
struct list_head *qh = &mod->timer_q;
struct list_head *qe, *qe_next;
struct bfa_timer_s *elem;
struct list_head timedout_q;
INIT_LIST_HEAD(&timedout_q);
qe = bfa_q_next(qh);
while (qe != qh) {
qe_next = bfa_q_next(qe);
elem = (struct bfa_timer_s *) qe;
if (elem->timeout <= BFA_TIMER_FREQ) {
elem->timeout = 0;
list_del(&elem->qe);
list_add_tail(&elem->qe, &timedout_q);
} else {
elem->timeout -= BFA_TIMER_FREQ;
}
qe = qe_next; /* go to next elem */
}
/*
* Pop all the timeout entries
*/
while (!list_empty(&timedout_q)) {
bfa_q_deq(&timedout_q, &elem);
elem->timercb(elem->arg);
}
}
/*
* Should be called with lock protection
*/
void
bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
void (*timercb) (void *), void *arg, unsigned int timeout)
{
WARN_ON(timercb == NULL);
WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
timer->timeout = timeout;
timer->timercb = timercb;
timer->arg = arg;
list_add_tail(&timer->qe, &mod->timer_q);
}
/*
* Should be called with lock protection
*/
void
bfa_timer_stop(struct bfa_timer_s *timer)
{
WARN_ON(list_empty(&timer->qe));
list_del(&timer->qe);
}
/*
* ASIC block related
*/
static void
bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
{
struct bfa_ablk_cfg_inst_s *cfg_inst;
int i, j;
u16 be16;
u32 be32;
for (i = 0; i < BFA_ABLK_MAX; i++) {
cfg_inst = &cfg->inst[i];
for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
be16 = cfg_inst->pf_cfg[j].pers;
cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
be16 = cfg_inst->pf_cfg[j].num_qpairs;
cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
be16 = cfg_inst->pf_cfg[j].num_vectors;
cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
be32 = cfg_inst->pf_cfg[j].bw;
cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
}
}
}
static void
bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
{
struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
bfa_ablk_cbfn_t cbfn;
WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
bfa_trc(ablk->ioc, msg->mh.msg_id);
switch (msg->mh.msg_id) {
case BFI_ABLK_I2H_QUERY:
if (rsp->status == BFA_STATUS_OK) {
memcpy(ablk->cfg, ablk->dma_addr.kva,
sizeof(struct bfa_ablk_cfg_s));
bfa_ablk_config_swap(ablk->cfg);
ablk->cfg = NULL;
}
break;
case BFI_ABLK_I2H_ADPT_CONFIG:
case BFI_ABLK_I2H_PORT_CONFIG:
/* update config port mode */
ablk->ioc->port_mode_cfg = rsp->port_mode;
case BFI_ABLK_I2H_PF_DELETE:
case BFI_ABLK_I2H_PF_UPDATE:
case BFI_ABLK_I2H_OPTROM_ENABLE:
case BFI_ABLK_I2H_OPTROM_DISABLE:
/* No-op */
break;
case BFI_ABLK_I2H_PF_CREATE:
*(ablk->pcifn) = rsp->pcifn;
ablk->pcifn = NULL;
break;
default:
WARN_ON(1);
}
ablk->busy = BFA_FALSE;
if (ablk->cbfn) {
cbfn = ablk->cbfn;
ablk->cbfn = NULL;
cbfn(ablk->cbarg, rsp->status);
}
}
static void
bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
{
struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
bfa_trc(ablk->ioc, event);
switch (event) {
case BFA_IOC_E_ENABLED:
WARN_ON(ablk->busy != BFA_FALSE);
break;
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
/* Fail any pending requests */
ablk->pcifn = NULL;
if (ablk->busy) {
if (ablk->cbfn)
ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
ablk->cbfn = NULL;
ablk->busy = BFA_FALSE;
}
break;
default:
WARN_ON(1);
break;
}
}
u32
bfa_ablk_meminfo(void)
{
return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
}
void
bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
{
ablk->dma_addr.kva = dma_kva;
ablk->dma_addr.pa = dma_pa;
}
void
bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
{
ablk->ioc = ioc;
bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
bfa_q_qe_init(&ablk->ioc_notify);
bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
}
bfa_status_t
bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_query_s *m;
WARN_ON(!ablk_cfg);
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cfg = ablk_cfg;
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
bfa_ioc_portid(ablk->ioc));
bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
u8 port, enum bfi_pcifn_class personality, int bw,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_pf_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->pcifn = pcifn;
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
bfa_ioc_portid(ablk->ioc));
m->pers = cpu_to_be16((u16)personality);
m->bw = cpu_to_be32(bw);
m->port = port;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_pf_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
bfa_ioc_portid(ablk->ioc));
m->pcifn = (u8)pcifn;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_cfg_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
bfa_ioc_portid(ablk->ioc));
m->mode = (u8)mode;
m->max_pf = (u8)max_pf;
m->max_vf = (u8)max_vf;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_cfg_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
bfa_ioc_portid(ablk->ioc));
m->port = (u8)port;
m->mode = (u8)mode;
m->max_pf = (u8)max_pf;
m->max_vf = (u8)max_vf;
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_pf_req_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
bfa_ioc_portid(ablk->ioc));
m->pcifn = (u8)pcifn;
m->bw = cpu_to_be32(bw);
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_optrom_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
bfa_ioc_portid(ablk->ioc));
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
{
struct bfi_ablk_h2i_optrom_s *m;
if (!bfa_ioc_is_operational(ablk->ioc)) {
bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
return BFA_STATUS_IOC_FAILURE;
}
if (ablk->busy) {
bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
return BFA_STATUS_DEVBUSY;
}
ablk->cbfn = cbfn;
ablk->cbarg = cbarg;
ablk->busy = BFA_TRUE;
m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
bfa_ioc_portid(ablk->ioc));
bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
return BFA_STATUS_OK;
}
/*
* SFP module specific
*/
/* forward declarations */
static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
enum bfa_port_speed portspeed);
static void
bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
{
bfa_trc(sfp, sfp->lock);
if (sfp->cbfn)
sfp->cbfn(sfp->cbarg, sfp->status);
sfp->lock = 0;
sfp->cbfn = NULL;
}
static void
bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
{
bfa_trc(sfp, sfp->portspeed);
if (sfp->media) {
bfa_sfp_media_get(sfp);
if (sfp->state_query_cbfn)
sfp->state_query_cbfn(sfp->state_query_cbarg,
sfp->status);
sfp->media = NULL;
}
if (sfp->portspeed) {
sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
if (sfp->state_query_cbfn)
sfp->state_query_cbfn(sfp->state_query_cbarg,
sfp->status);
sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
}
sfp->state_query_lock = 0;
sfp->state_query_cbfn = NULL;
}
/*
* IOC event handler.
*/
static void
bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
{
struct bfa_sfp_s *sfp = sfp_arg;
bfa_trc(sfp, event);
bfa_trc(sfp, sfp->lock);
bfa_trc(sfp, sfp->state_query_lock);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (sfp->lock) {
sfp->status = BFA_STATUS_IOC_FAILURE;
bfa_cb_sfp_show(sfp);
}
if (sfp->state_query_lock) {
sfp->status = BFA_STATUS_IOC_FAILURE;
bfa_cb_sfp_state_query(sfp);
}
break;
default:
break;
}
}
/*
* SFP's State Change Notification post to AEN
*/
static void
bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
{
struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
struct bfa_aen_entry_s *aen_entry;
enum bfa_port_aen_event aen_evt = 0;
bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
((u64)rsp->event));
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
switch (rsp->event) {
case BFA_SFP_SCN_INSERTED:
aen_evt = BFA_PORT_AEN_SFP_INSERT;
break;
case BFA_SFP_SCN_REMOVED:
aen_evt = BFA_PORT_AEN_SFP_REMOVE;
break;
case BFA_SFP_SCN_FAILED:
aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
break;
case BFA_SFP_SCN_UNSUPPORT:
aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
break;
case BFA_SFP_SCN_POM:
aen_evt = BFA_PORT_AEN_SFP_POM;
aen_entry->aen_data.port.level = rsp->pomlvl;
break;
default:
bfa_trc(sfp, rsp->event);
WARN_ON(1);
}
/* Send the AEN notification */
bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
BFA_AEN_CAT_PORT, aen_evt);
}
/*
* SFP get data send
*/
static void
bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
{
struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
bfa_trc(sfp, req->memtype);
/* build host command */
bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
bfa_ioc_portid(sfp->ioc));
/* send mbox cmd */
bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
}
/*
* SFP is valid, read sfp data
*/
static void
bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
{
struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
WARN_ON(sfp->lock != 0);
bfa_trc(sfp, sfp->state);
sfp->lock = 1;
sfp->memtype = memtype;
req->memtype = memtype;
/* Setup SG list */
bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
bfa_sfp_getdata_send(sfp);
}
/*
* SFP scn handler
*/
static void
bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
{
struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
switch (rsp->event) {
case BFA_SFP_SCN_INSERTED:
sfp->state = BFA_SFP_STATE_INSERTED;
sfp->data_valid = 0;
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_REMOVED:
sfp->state = BFA_SFP_STATE_REMOVED;
sfp->data_valid = 0;
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_FAILED:
sfp->state = BFA_SFP_STATE_FAILED;
sfp->data_valid = 0;
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_UNSUPPORT:
sfp->state = BFA_SFP_STATE_UNSUPPORT;
bfa_sfp_scn_aen_post(sfp, rsp);
if (!sfp->lock)
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
break;
case BFA_SFP_SCN_POM:
bfa_sfp_scn_aen_post(sfp, rsp);
break;
case BFA_SFP_SCN_VALID:
sfp->state = BFA_SFP_STATE_VALID;
if (!sfp->lock)
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
break;
default:
bfa_trc(sfp, rsp->event);
WARN_ON(1);
}
}
/*
* SFP show complete
*/
static void
bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
{
struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
if (!sfp->lock) {
/*
* receiving response after ioc failure
*/
bfa_trc(sfp, sfp->lock);
return;
}
bfa_trc(sfp, rsp->status);
if (rsp->status == BFA_STATUS_OK) {
sfp->data_valid = 1;
if (sfp->state == BFA_SFP_STATE_VALID)
sfp->status = BFA_STATUS_OK;
else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
sfp->status = BFA_STATUS_SFP_UNSUPP;
else
bfa_trc(sfp, sfp->state);
} else {
sfp->data_valid = 0;
sfp->status = rsp->status;
/* sfpshow shouldn't change sfp state */
}
bfa_trc(sfp, sfp->memtype);
if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
bfa_trc(sfp, sfp->data_valid);
if (sfp->data_valid) {
u32 size = sizeof(struct sfp_mem_s);
u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
memcpy(des, sfp->dbuf_kva, size);
}
/*
* Queue completion callback.
*/
bfa_cb_sfp_show(sfp);
} else
sfp->lock = 0;
bfa_trc(sfp, sfp->state_query_lock);
if (sfp->state_query_lock) {
sfp->state = rsp->state;
/* Complete callback */
bfa_cb_sfp_state_query(sfp);
}
}
/*
* SFP query fw sfp state
*/
static void
bfa_sfp_state_query(struct bfa_sfp_s *sfp)
{
struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
/* Should not be doing query if not in _INIT state */
WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
WARN_ON(sfp->state_query_lock != 0);
bfa_trc(sfp, sfp->state);
sfp->state_query_lock = 1;
req->memtype = 0;
if (!sfp->lock)
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
}
static void
bfa_sfp_media_get(struct bfa_sfp_s *sfp)
{
enum bfa_defs_sfp_media_e *media = sfp->media;
*media = BFA_SFP_MEDIA_UNKNOWN;
if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
*media = BFA_SFP_MEDIA_UNSUPPORT;
else if (sfp->state == BFA_SFP_STATE_VALID) {
union sfp_xcvr_e10g_code_u e10g;
struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
(sfpmem->srlid_base.xcvr[5] >> 1);
e10g.b = sfpmem->srlid_base.xcvr[0];
bfa_trc(sfp, e10g.b);
bfa_trc(sfp, xmtr_tech);
/* check fc transmitter tech */
if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
(xmtr_tech & SFP_XMTR_TECH_CP) ||
(xmtr_tech & SFP_XMTR_TECH_CA))
*media = BFA_SFP_MEDIA_CU;
else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
(xmtr_tech & SFP_XMTR_TECH_EL_INTER))
*media = BFA_SFP_MEDIA_EL;
else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
(xmtr_tech & SFP_XMTR_TECH_LC))
*media = BFA_SFP_MEDIA_LW;
else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
(xmtr_tech & SFP_XMTR_TECH_SN) ||
(xmtr_tech & SFP_XMTR_TECH_SA))
*media = BFA_SFP_MEDIA_SW;
/* Check 10G Ethernet Compilance code */
else if (e10g.r.e10g_sr)
*media = BFA_SFP_MEDIA_SW;
else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
*media = BFA_SFP_MEDIA_LW;
else if (e10g.r.e10g_unall)
*media = BFA_SFP_MEDIA_UNKNOWN;
else
bfa_trc(sfp, 0);
} else
bfa_trc(sfp, sfp->state);
}
static bfa_status_t
bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
{
struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
if (portspeed == BFA_PORT_SPEED_10GBPS) {
if (e10g.r.e10g_sr || e10g.r.e10g_lr)
return BFA_STATUS_OK;
else {
bfa_trc(sfp, e10g.b);
return BFA_STATUS_UNSUPP_SPEED;
}
}
if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
return BFA_STATUS_OK;
else {
bfa_trc(sfp, portspeed);
bfa_trc(sfp, fc3.b);
bfa_trc(sfp, e10g.b);
return BFA_STATUS_UNSUPP_SPEED;
}
}
/*
* SFP hmbox handler
*/
void
bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
{
struct bfa_sfp_s *sfp = sfparg;
switch (msg->mh.msg_id) {
case BFI_SFP_I2H_SHOW:
bfa_sfp_show_comp(sfp, msg);
break;
case BFI_SFP_I2H_SCN:
bfa_sfp_scn(sfp, msg);
break;
default:
bfa_trc(sfp, msg->mh.msg_id);
WARN_ON(1);
}
}
/*
* Return DMA memory needed by sfp module.
*/
u32
bfa_sfp_meminfo(void)
{
return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
}
/*
* Attach virtual and physical memory for SFP.
*/
void
bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod)
{
sfp->dev = dev;
sfp->ioc = ioc;
sfp->trcmod = trcmod;
sfp->cbfn = NULL;
sfp->cbarg = NULL;
sfp->sfpmem = NULL;
sfp->lock = 0;
sfp->data_valid = 0;
sfp->state = BFA_SFP_STATE_INIT;
sfp->state_query_lock = 0;
sfp->state_query_cbfn = NULL;
sfp->state_query_cbarg = NULL;
sfp->media = NULL;
sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
sfp->is_elb = BFA_FALSE;
bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
bfa_q_qe_init(&sfp->ioc_notify);
bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
}
/*
* Claim Memory for SFP
*/
void
bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
{
sfp->dbuf_kva = dm_kva;
sfp->dbuf_pa = dm_pa;
memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
}
/*
* Show SFP eeprom content
*
* @param[in] sfp - bfa sfp module
*
* @param[out] sfpmem - sfp eeprom data
*
*/
bfa_status_t
bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
bfa_cb_sfp_t cbfn, void *cbarg)
{
if (!bfa_ioc_is_operational(sfp->ioc)) {
bfa_trc(sfp, 0);
return BFA_STATUS_IOC_NON_OP;
}
if (sfp->lock) {
bfa_trc(sfp, 0);
return BFA_STATUS_DEVBUSY;
}
sfp->cbfn = cbfn;
sfp->cbarg = cbarg;
sfp->sfpmem = sfpmem;
bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
return BFA_STATUS_OK;
}
/*
* Return SFP Media type
*
* @param[in] sfp - bfa sfp module
*
* @param[out] media - port speed from user
*
*/
bfa_status_t
bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
bfa_cb_sfp_t cbfn, void *cbarg)
{
if (!bfa_ioc_is_operational(sfp->ioc)) {
bfa_trc(sfp, 0);
return BFA_STATUS_IOC_NON_OP;
}
sfp->media = media;
if (sfp->state == BFA_SFP_STATE_INIT) {
if (sfp->state_query_lock) {
bfa_trc(sfp, 0);
return BFA_STATUS_DEVBUSY;
} else {
sfp->state_query_cbfn = cbfn;
sfp->state_query_cbarg = cbarg;
bfa_sfp_state_query(sfp);
return BFA_STATUS_SFP_NOT_READY;
}
}
bfa_sfp_media_get(sfp);
return BFA_STATUS_OK;
}
/*
* Check if user set port speed is allowed by the SFP
*
* @param[in] sfp - bfa sfp module
* @param[in] portspeed - port speed from user
*
*/
bfa_status_t
bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
bfa_cb_sfp_t cbfn, void *cbarg)
{
WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
if (!bfa_ioc_is_operational(sfp->ioc))
return BFA_STATUS_IOC_NON_OP;
/* For Mezz card, all speed is allowed */
if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
return BFA_STATUS_OK;
/* Check SFP state */
sfp->portspeed = portspeed;
if (sfp->state == BFA_SFP_STATE_INIT) {
if (sfp->state_query_lock) {
bfa_trc(sfp, 0);
return BFA_STATUS_DEVBUSY;
} else {
sfp->state_query_cbfn = cbfn;
sfp->state_query_cbarg = cbarg;
bfa_sfp_state_query(sfp);
return BFA_STATUS_SFP_NOT_READY;
}
}
if (sfp->state == BFA_SFP_STATE_REMOVED ||
sfp->state == BFA_SFP_STATE_FAILED) {
bfa_trc(sfp, sfp->state);
return BFA_STATUS_NO_SFP_DEV;
}
if (sfp->state == BFA_SFP_STATE_INSERTED) {
bfa_trc(sfp, sfp->state);
return BFA_STATUS_DEVBUSY; /* sfp is reading data */
}
/* For eloopback, all speed is allowed */
if (sfp->is_elb)
return BFA_STATUS_OK;
return bfa_sfp_speed_valid(sfp, portspeed);
}
/*
* Flash module specific
*/
/*
* FLASH DMA buffer should be big enough to hold both MFG block and
* asic block(64k) at the same time and also should be 2k aligned to
* avoid write segement to cross sector boundary.
*/
#define BFA_FLASH_SEG_SZ 2048
#define BFA_FLASH_DMA_BUF_SZ \
BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
static void
bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
int inst, int type)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
struct bfa_aen_entry_s *aen_entry;
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
aen_entry->aen_data.audit.partition_inst = inst;
aen_entry->aen_data.audit.partition_type = type;
/* Send the AEN notification */
bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
BFA_AEN_CAT_AUDIT, event);
}
static void
bfa_flash_cb(struct bfa_flash_s *flash)
{
flash->op_busy = 0;
if (flash->cbfn)
flash->cbfn(flash->cbarg, flash->status);
}
static void
bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
{
struct bfa_flash_s *flash = cbarg;
bfa_trc(flash, event);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (flash->op_busy) {
flash->status = BFA_STATUS_IOC_FAILURE;
flash->cbfn(flash->cbarg, flash->status);
flash->op_busy = 0;
}
break;
default:
break;
}
}
/*
* Send flash attribute query request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_query_send(void *cbarg)
{
struct bfa_flash_s *flash = cbarg;
struct bfi_flash_query_req_s *msg =
(struct bfi_flash_query_req_s *) flash->mb.msg;
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
bfa_ioc_portid(flash->ioc));
bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
flash->dbuf_pa);
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
}
/*
* Send flash write request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_write_send(struct bfa_flash_s *flash)
{
struct bfi_flash_write_req_s *msg =
(struct bfi_flash_write_req_s *) flash->mb.msg;
u32 len;
msg->type = be32_to_cpu(flash->type);
msg->instance = flash->instance;
msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
flash->residue : BFA_FLASH_DMA_BUF_SZ;
msg->length = be32_to_cpu(len);
/* indicate if it's the last msg of the whole write operation */
msg->last = (len == flash->residue) ? 1 : 0;
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
bfa_ioc_portid(flash->ioc));
bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
flash->residue -= len;
flash->offset += len;
}
/*
* Send flash read request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_read_send(void *cbarg)
{
struct bfa_flash_s *flash = cbarg;
struct bfi_flash_read_req_s *msg =
(struct bfi_flash_read_req_s *) flash->mb.msg;
u32 len;
msg->type = be32_to_cpu(flash->type);
msg->instance = flash->instance;
msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
flash->residue : BFA_FLASH_DMA_BUF_SZ;
msg->length = be32_to_cpu(len);
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
bfa_ioc_portid(flash->ioc));
bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
}
/*
* Send flash erase request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_flash_erase_send(void *cbarg)
{
struct bfa_flash_s *flash = cbarg;
struct bfi_flash_erase_req_s *msg =
(struct bfi_flash_erase_req_s *) flash->mb.msg;
msg->type = be32_to_cpu(flash->type);
msg->instance = flash->instance;
bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
bfa_ioc_portid(flash->ioc));
bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
}
/*
* Process flash response messages upon receiving interrupts.
*
* @param[in] flasharg - flash structure
* @param[in] msg - message structure
*/
static void
bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
{
struct bfa_flash_s *flash = flasharg;
u32 status;
union {
struct bfi_flash_query_rsp_s *query;
struct bfi_flash_erase_rsp_s *erase;
struct bfi_flash_write_rsp_s *write;
struct bfi_flash_read_rsp_s *read;
struct bfi_flash_event_s *event;
struct bfi_mbmsg_s *msg;
} m;
m.msg = msg;
bfa_trc(flash, msg->mh.msg_id);
if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
/* receiving response after ioc failure */
bfa_trc(flash, 0x9999);
return;
}
switch (msg->mh.msg_id) {
case BFI_FLASH_I2H_QUERY_RSP:
status = be32_to_cpu(m.query->status);
bfa_trc(flash, status);
if (status == BFA_STATUS_OK) {
u32 i;
struct bfa_flash_attr_s *attr, *f;
attr = (struct bfa_flash_attr_s *) flash->ubuf;
f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
attr->status = be32_to_cpu(f->status);
attr->npart = be32_to_cpu(f->npart);
bfa_trc(flash, attr->status);
bfa_trc(flash, attr->npart);
for (i = 0; i < attr->npart; i++) {
attr->part[i].part_type =
be32_to_cpu(f->part[i].part_type);
attr->part[i].part_instance =
be32_to_cpu(f->part[i].part_instance);
attr->part[i].part_off =
be32_to_cpu(f->part[i].part_off);
attr->part[i].part_size =
be32_to_cpu(f->part[i].part_size);
attr->part[i].part_len =
be32_to_cpu(f->part[i].part_len);
attr->part[i].part_status =
be32_to_cpu(f->part[i].part_status);
}
}
flash->status = status;
bfa_flash_cb(flash);
break;
case BFI_FLASH_I2H_ERASE_RSP:
status = be32_to_cpu(m.erase->status);
bfa_trc(flash, status);
flash->status = status;
bfa_flash_cb(flash);
break;
case BFI_FLASH_I2H_WRITE_RSP:
status = be32_to_cpu(m.write->status);
bfa_trc(flash, status);
if (status != BFA_STATUS_OK || flash->residue == 0) {
flash->status = status;
bfa_flash_cb(flash);
} else {
bfa_trc(flash, flash->offset);
bfa_flash_write_send(flash);
}
break;
case BFI_FLASH_I2H_READ_RSP:
status = be32_to_cpu(m.read->status);
bfa_trc(flash, status);
if (status != BFA_STATUS_OK) {
flash->status = status;
bfa_flash_cb(flash);
} else {
u32 len = be32_to_cpu(m.read->length);
bfa_trc(flash, flash->offset);
bfa_trc(flash, len);
memcpy(flash->ubuf + flash->offset,
flash->dbuf_kva, len);
flash->residue -= len;
flash->offset += len;
if (flash->residue == 0) {
flash->status = status;
bfa_flash_cb(flash);
} else
bfa_flash_read_send(flash);
}
break;
case BFI_FLASH_I2H_BOOT_VER_RSP:
break;
case BFI_FLASH_I2H_EVENT:
status = be32_to_cpu(m.event->status);
bfa_trc(flash, status);
if (status == BFA_STATUS_BAD_FWCFG)
bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
else if (status == BFA_STATUS_INVALID_VENDOR) {
u32 param;
param = be32_to_cpu(m.event->param);
bfa_trc(flash, param);
bfa_ioc_aen_post(flash->ioc,
BFA_IOC_AEN_INVALID_VENDOR);
}
break;
default:
WARN_ON(1);
}
}
/*
* Flash memory info API.
*
* @param[in] mincfg - minimal cfg variable
*/
u32
bfa_flash_meminfo(bfa_boolean_t mincfg)
{
/* min driver doesn't need flash */
if (mincfg)
return 0;
return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Flash attach API.
*
* @param[in] flash - flash structure
* @param[in] ioc - ioc structure
* @param[in] dev - device structure
* @param[in] trcmod - trace module
* @param[in] logmod - log module
*/
void
bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
{
flash->ioc = ioc;
flash->trcmod = trcmod;
flash->cbfn = NULL;
flash->cbarg = NULL;
flash->op_busy = 0;
bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
bfa_q_qe_init(&flash->ioc_notify);
bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
/* min driver doesn't need flash */
if (mincfg) {
flash->dbuf_kva = NULL;
flash->dbuf_pa = 0;
}
}
/*
* Claim memory for flash
*
* @param[in] flash - flash structure
* @param[in] dm_kva - pointer to virtual memory address
* @param[in] dm_pa - physical memory address
* @param[in] mincfg - minimal cfg variable
*/
void
bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
bfa_boolean_t mincfg)
{
if (mincfg)
return;
flash->dbuf_kva = dm_kva;
flash->dbuf_pa = dm_pa;
memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Get flash attribute.
*
* @param[in] flash - flash structure
* @param[in] attr - flash attribute structure
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->ubuf = (u8 *) attr;
bfa_flash_query_send(flash);
return BFA_STATUS_OK;
}
/*
* Erase flash partition.
*
* @param[in] flash - flash structure
* @param[in] type - flash partition type
* @param[in] instance - flash partition instance
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
bfa_trc(flash, type);
bfa_trc(flash, instance);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->type = type;
flash->instance = instance;
bfa_flash_erase_send(flash);
bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
instance, type);
return BFA_STATUS_OK;
}
/*
* Update flash partition.
*
* @param[in] flash - flash structure
* @param[in] type - flash partition type
* @param[in] instance - flash partition instance
* @param[in] buf - update data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to the partition starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
u8 instance, void *buf, u32 len, u32 offset,
bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
bfa_trc(flash, type);
bfa_trc(flash, instance);
bfa_trc(flash, len);
bfa_trc(flash, offset);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
/*
* 'len' must be in word (4-byte) boundary
* 'offset' must be in sector (16kb) boundary
*/
if (!len || (len & 0x03) || (offset & 0x00003FFF))
return BFA_STATUS_FLASH_BAD_LEN;
if (type == BFA_FLASH_PART_MFG)
return BFA_STATUS_EINVAL;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->type = type;
flash->instance = instance;
flash->residue = len;
flash->offset = 0;
flash->addr_off = offset;
flash->ubuf = buf;
bfa_flash_write_send(flash);
return BFA_STATUS_OK;
}
/*
* Read flash partition.
*
* @param[in] flash - flash structure
* @param[in] type - flash partition type
* @param[in] instance - flash partition instance
* @param[in] buf - read data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to the partition starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
u8 instance, void *buf, u32 len, u32 offset,
bfa_cb_flash_t cbfn, void *cbarg)
{
bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
bfa_trc(flash, type);
bfa_trc(flash, instance);
bfa_trc(flash, len);
bfa_trc(flash, offset);
if (!bfa_ioc_is_operational(flash->ioc))
return BFA_STATUS_IOC_NON_OP;
/*
* 'len' must be in word (4-byte) boundary
* 'offset' must be in sector (16kb) boundary
*/
if (!len || (len & 0x03) || (offset & 0x00003FFF))
return BFA_STATUS_FLASH_BAD_LEN;
if (flash->op_busy) {
bfa_trc(flash, flash->op_busy);
return BFA_STATUS_DEVBUSY;
}
flash->op_busy = 1;
flash->cbfn = cbfn;
flash->cbarg = cbarg;
flash->type = type;
flash->instance = instance;
flash->residue = len;
flash->offset = 0;
flash->addr_off = offset;
flash->ubuf = buf;
bfa_flash_read_send(flash);
return BFA_STATUS_OK;
}
/*
* DIAG module specific
*/
#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
/* IOC event handler */
static void
bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
{
struct bfa_diag_s *diag = diag_arg;
bfa_trc(diag, event);
bfa_trc(diag, diag->block);
bfa_trc(diag, diag->fwping.lock);
bfa_trc(diag, diag->tsensor.lock);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (diag->fwping.lock) {
diag->fwping.status = BFA_STATUS_IOC_FAILURE;
diag->fwping.cbfn(diag->fwping.cbarg,
diag->fwping.status);
diag->fwping.lock = 0;
}
if (diag->tsensor.lock) {
diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
diag->tsensor.cbfn(diag->tsensor.cbarg,
diag->tsensor.status);
diag->tsensor.lock = 0;
}
if (diag->block) {
if (diag->timer_active) {
bfa_timer_stop(&diag->timer);
diag->timer_active = 0;
}
diag->status = BFA_STATUS_IOC_FAILURE;
diag->cbfn(diag->cbarg, diag->status);
diag->block = 0;
}
break;
default:
break;
}
}
static void
bfa_diag_memtest_done(void *cbarg)
{
struct bfa_diag_s *diag = cbarg;
struct bfa_ioc_s *ioc = diag->ioc;
struct bfa_diag_memtest_result *res = diag->result;
u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
u32 pgnum, pgoff, i;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
sizeof(u32)); i++) {
/* read test result from smem */
*((u32 *) res + i) =
bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
loff += sizeof(u32);
}
/* Reset IOC fwstates to BFI_IOC_UNINIT */
bfa_ioc_reset_fwstate(ioc);
res->status = swab32(res->status);
bfa_trc(diag, res->status);
if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
diag->status = BFA_STATUS_OK;
else {
diag->status = BFA_STATUS_MEMTEST_FAILED;
res->addr = swab32(res->addr);
res->exp = swab32(res->exp);
res->act = swab32(res->act);
res->err_status = swab32(res->err_status);
res->err_status1 = swab32(res->err_status1);
res->err_addr = swab32(res->err_addr);
bfa_trc(diag, res->addr);
bfa_trc(diag, res->exp);
bfa_trc(diag, res->act);
bfa_trc(diag, res->err_status);
bfa_trc(diag, res->err_status1);
bfa_trc(diag, res->err_addr);
}
diag->timer_active = 0;
diag->cbfn(diag->cbarg, diag->status);
diag->block = 0;
}
/*
* Firmware ping
*/
/*
* Perform DMA test directly
*/
static void
diag_fwping_send(struct bfa_diag_s *diag)
{
struct bfi_diag_fwping_req_s *fwping_req;
u32 i;
bfa_trc(diag, diag->fwping.dbuf_pa);
/* fill DMA area with pattern */
for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
/* Fill mbox msg */
fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
/* Setup SG list */
bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
diag->fwping.dbuf_pa);
/* Set up dma count */
fwping_req->count = cpu_to_be32(diag->fwping.count);
/* Set up data pattern */
fwping_req->data = diag->fwping.data;
/* build host command */
bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
bfa_ioc_portid(diag->ioc));
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
}
static void
diag_fwping_comp(struct bfa_diag_s *diag,
struct bfi_diag_fwping_rsp_s *diag_rsp)
{
u32 rsp_data = diag_rsp->data;
u8 rsp_dma_status = diag_rsp->dma_status;
bfa_trc(diag, rsp_data);
bfa_trc(diag, rsp_dma_status);
if (rsp_dma_status == BFA_STATUS_OK) {
u32 i, pat;
pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
diag->fwping.data;
/* Check mbox data */
if (diag->fwping.data != rsp_data) {
bfa_trc(diag, rsp_data);
diag->fwping.result->dmastatus =
BFA_STATUS_DATACORRUPTED;
diag->fwping.status = BFA_STATUS_DATACORRUPTED;
diag->fwping.cbfn(diag->fwping.cbarg,
diag->fwping.status);
diag->fwping.lock = 0;
return;
}
/* Check dma pattern */
for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
bfa_trc(diag, i);
bfa_trc(diag, pat);
bfa_trc(diag,
*((u32 *)diag->fwping.dbuf_kva + i));
diag->fwping.result->dmastatus =
BFA_STATUS_DATACORRUPTED;
diag->fwping.status = BFA_STATUS_DATACORRUPTED;
diag->fwping.cbfn(diag->fwping.cbarg,
diag->fwping.status);
diag->fwping.lock = 0;
return;
}
}
diag->fwping.result->dmastatus = BFA_STATUS_OK;
diag->fwping.status = BFA_STATUS_OK;
diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
diag->fwping.lock = 0;
} else {
diag->fwping.status = BFA_STATUS_HDMA_FAILED;
diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
diag->fwping.lock = 0;
}
}
/*
* Temperature Sensor
*/
static void
diag_tempsensor_send(struct bfa_diag_s *diag)
{
struct bfi_diag_ts_req_s *msg;
msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
bfa_trc(diag, msg->temp);
/* build host command */
bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
bfa_ioc_portid(diag->ioc));
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
}
static void
diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
{
if (!diag->tsensor.lock) {
/* receiving response after ioc failure */
bfa_trc(diag, diag->tsensor.lock);
return;
}
/*
* ASIC junction tempsensor is a reg read operation
* it will always return OK
*/
diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
diag->tsensor.temp->ts_junc = rsp->ts_junc;
diag->tsensor.temp->ts_brd = rsp->ts_brd;
diag->tsensor.temp->status = BFA_STATUS_OK;
if (rsp->ts_brd) {
if (rsp->status == BFA_STATUS_OK) {
diag->tsensor.temp->brd_temp =
be16_to_cpu(rsp->brd_temp);
} else {
bfa_trc(diag, rsp->status);
diag->tsensor.temp->brd_temp = 0;
diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
}
}
bfa_trc(diag, rsp->ts_junc);
bfa_trc(diag, rsp->temp);
bfa_trc(diag, rsp->ts_brd);
bfa_trc(diag, rsp->brd_temp);
diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
diag->tsensor.lock = 0;
}
/*
* LED Test command
*/
static void
diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
{
struct bfi_diag_ledtest_req_s *msg;
msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
/* build host command */
bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
bfa_ioc_portid(diag->ioc));
/*
* convert the freq from N blinks per 10 sec to
* crossbow ontime value. We do it here because division is need
*/
if (ledtest->freq)
ledtest->freq = 500 / ledtest->freq;
if (ledtest->freq == 0)
ledtest->freq = 1;
bfa_trc(diag, ledtest->freq);
/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
msg->cmd = (u8) ledtest->cmd;
msg->color = (u8) ledtest->color;
msg->portid = bfa_ioc_portid(diag->ioc);
msg->led = ledtest->led;
msg->freq = cpu_to_be16(ledtest->freq);
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
}
static void
diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
{
bfa_trc(diag, diag->ledtest.lock);
diag->ledtest.lock = BFA_FALSE;
/* no bfa_cb_queue is needed because driver is not waiting */
}
/*
* Port beaconing
*/
static void
diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
{
struct bfi_diag_portbeacon_req_s *msg;
msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
/* build host command */
bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
bfa_ioc_portid(diag->ioc));
msg->beacon = beacon;
msg->period = cpu_to_be32(sec);
/* send mbox cmd */
bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
}
static void
diag_portbeacon_comp(struct bfa_diag_s *diag)
{
bfa_trc(diag, diag->beacon.state);
diag->beacon.state = BFA_FALSE;
if (diag->cbfn_beacon)
diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
}
/*
* Diag hmbox handler
*/
void
bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
{
struct bfa_diag_s *diag = diagarg;
switch (msg->mh.msg_id) {
case BFI_DIAG_I2H_PORTBEACON:
diag_portbeacon_comp(diag);
break;
case BFI_DIAG_I2H_FWPING:
diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
break;
case BFI_DIAG_I2H_TEMPSENSOR:
diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
break;
case BFI_DIAG_I2H_LEDTEST:
diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
break;
default:
bfa_trc(diag, msg->mh.msg_id);
WARN_ON(1);
}
}
/*
* Gen RAM Test
*
* @param[in] *diag - diag data struct
* @param[in] *memtest - mem test params input from upper layer,
* @param[in] pattern - mem test pattern
* @param[in] *result - mem test result
* @param[in] cbfn - mem test callback functioin
* @param[in] cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
u32 pattern, struct bfa_diag_memtest_result *result,
bfa_cb_diag_t cbfn, void *cbarg)
{
u32 memtest_tov;
bfa_trc(diag, pattern);
if (!bfa_ioc_adapter_is_disabled(diag->ioc))
return BFA_STATUS_ADAPTER_ENABLED;
/* check to see if there is another destructive diag cmd running */
if (diag->block) {
bfa_trc(diag, diag->block);
return BFA_STATUS_DEVBUSY;
} else
diag->block = 1;
diag->result = result;
diag->cbfn = cbfn;
diag->cbarg = cbarg;
/* download memtest code and take LPU0 out of reset */
bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
bfa_diag_memtest_done, diag, memtest_tov);
diag->timer_active = 1;
return BFA_STATUS_OK;
}
/*
* DIAG firmware ping command
*
* @param[in] *diag - diag data struct
* @param[in] cnt - dma loop count for testing PCIE
* @param[in] data - data pattern to pass in fw
* @param[in] *result - pt to bfa_diag_fwping_result_t data struct
* @param[in] cbfn - callback function
* @param[in] *cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
void *cbarg)
{
bfa_trc(diag, cnt);
bfa_trc(diag, data);
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
return BFA_STATUS_CMD_NOTSUPP;
/* check to see if there is another destructive diag cmd running */
if (diag->block || diag->fwping.lock) {
bfa_trc(diag, diag->block);
bfa_trc(diag, diag->fwping.lock);
return BFA_STATUS_DEVBUSY;
}
/* Initialization */
diag->fwping.lock = 1;
diag->fwping.cbfn = cbfn;
diag->fwping.cbarg = cbarg;
diag->fwping.result = result;
diag->fwping.data = data;
diag->fwping.count = cnt;
/* Init test results */
diag->fwping.result->data = 0;
diag->fwping.result->status = BFA_STATUS_OK;
/* kick off the first ping */
diag_fwping_send(diag);
return BFA_STATUS_OK;
}
/*
* Read Temperature Sensor
*
* @param[in] *diag - diag data struct
* @param[in] *result - pt to bfa_diag_temp_t data struct
* @param[in] cbfn - callback function
* @param[in] *cbarg - callback functioin arg
*
* @param[out]
*/
bfa_status_t
bfa_diag_tsensor_query(struct bfa_diag_s *diag,
struct bfa_diag_results_tempsensor_s *result,
bfa_cb_diag_t cbfn, void *cbarg)
{
/* check to see if there is a destructive diag cmd running */
if (diag->block || diag->tsensor.lock) {
bfa_trc(diag, diag->block);
bfa_trc(diag, diag->tsensor.lock);
return BFA_STATUS_DEVBUSY;
}
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
/* Init diag mod params */
diag->tsensor.lock = 1;
diag->tsensor.temp = result;
diag->tsensor.cbfn = cbfn;
diag->tsensor.cbarg = cbarg;
/* Send msg to fw */
diag_tempsensor_send(diag);
return BFA_STATUS_OK;
}
/*
* LED Test command
*
* @param[in] *diag - diag data struct
* @param[in] *ledtest - pt to ledtest data structure
*
* @param[out]
*/
bfa_status_t
bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
{
bfa_trc(diag, ledtest->cmd);
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
if (diag->beacon.state)
return BFA_STATUS_BEACON_ON;
if (diag->ledtest.lock)
return BFA_STATUS_LEDTEST_OP;
/* Send msg to fw */
diag->ledtest.lock = BFA_TRUE;
diag_ledtest_send(diag, ledtest);
return BFA_STATUS_OK;
}
/*
* Port beaconing command
*
* @param[in] *diag - diag data struct
* @param[in] beacon - port beaconing 1:ON 0:OFF
* @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
* @param[in] sec - beaconing duration in seconds
*
* @param[out]
*/
bfa_status_t
bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
bfa_boolean_t link_e2e_beacon, uint32_t sec)
{
bfa_trc(diag, beacon);
bfa_trc(diag, link_e2e_beacon);
bfa_trc(diag, sec);
if (!bfa_ioc_is_operational(diag->ioc))
return BFA_STATUS_IOC_NON_OP;
if (diag->ledtest.lock)
return BFA_STATUS_LEDTEST_OP;
if (diag->beacon.state && beacon) /* beacon alread on */
return BFA_STATUS_BEACON_ON;
diag->beacon.state = beacon;
diag->beacon.link_e2e = link_e2e_beacon;
if (diag->cbfn_beacon)
diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
/* Send msg to fw */
diag_portbeacon_send(diag, beacon, sec);
return BFA_STATUS_OK;
}
/*
* Return DMA memory needed by diag module.
*/
u32
bfa_diag_meminfo(void)
{
return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Attach virtual and physical memory for Diag.
*/
void
bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
{
diag->dev = dev;
diag->ioc = ioc;
diag->trcmod = trcmod;
diag->block = 0;
diag->cbfn = NULL;
diag->cbarg = NULL;
diag->result = NULL;
diag->cbfn_beacon = cbfn_beacon;
bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
bfa_q_qe_init(&diag->ioc_notify);
bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
}
void
bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
{
diag->fwping.dbuf_kva = dm_kva;
diag->fwping.dbuf_pa = dm_pa;
memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
}
/*
* PHY module specific
*/
#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
static void
bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
{
int i, m = sz >> 2;
for (i = 0; i < m; i++)
obuf[i] = be32_to_cpu(ibuf[i]);
}
static bfa_boolean_t
bfa_phy_present(struct bfa_phy_s *phy)
{
return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
}
static void
bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
{
struct bfa_phy_s *phy = cbarg;
bfa_trc(phy, event);
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (phy->op_busy) {
phy->status = BFA_STATUS_IOC_FAILURE;
phy->cbfn(phy->cbarg, phy->status);
phy->op_busy = 0;
}
break;
default:
break;
}
}
/*
* Send phy attribute query request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_query_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_query_req_s *msg =
(struct bfi_phy_query_req_s *) phy->mb.msg;
msg->instance = phy->instance;
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
}
/*
* Send phy write request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_write_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_write_req_s *msg =
(struct bfi_phy_write_req_s *) phy->mb.msg;
u32 len;
u16 *buf, *dbuf;
int i, sz;
msg->instance = phy->instance;
msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
phy->residue : BFA_PHY_DMA_BUF_SZ;
msg->length = cpu_to_be32(len);
/* indicate if it's the last msg of the whole write operation */
msg->last = (len == phy->residue) ? 1 : 0;
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
buf = (u16 *) (phy->ubuf + phy->offset);
dbuf = (u16 *)phy->dbuf_kva;
sz = len >> 1;
for (i = 0; i < sz; i++)
buf[i] = cpu_to_be16(dbuf[i]);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
phy->residue -= len;
phy->offset += len;
}
/*
* Send phy read request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_read_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_read_req_s *msg =
(struct bfi_phy_read_req_s *) phy->mb.msg;
u32 len;
msg->instance = phy->instance;
msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
phy->residue : BFA_PHY_DMA_BUF_SZ;
msg->length = cpu_to_be32(len);
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
}
/*
* Send phy stats request.
*
* @param[in] cbarg - callback argument
*/
static void
bfa_phy_stats_send(void *cbarg)
{
struct bfa_phy_s *phy = cbarg;
struct bfi_phy_stats_req_s *msg =
(struct bfi_phy_stats_req_s *) phy->mb.msg;
msg->instance = phy->instance;
bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
bfa_ioc_portid(phy->ioc));
bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
}
/*
* Flash memory info API.
*
* @param[in] mincfg - minimal cfg variable
*/
u32
bfa_phy_meminfo(bfa_boolean_t mincfg)
{
/* min driver doesn't need phy */
if (mincfg)
return 0;
return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
/*
* Flash attach API.
*
* @param[in] phy - phy structure
* @param[in] ioc - ioc structure
* @param[in] dev - device structure
* @param[in] trcmod - trace module
* @param[in] logmod - log module
*/
void
bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
{
phy->ioc = ioc;
phy->trcmod = trcmod;
phy->cbfn = NULL;
phy->cbarg = NULL;
phy->op_busy = 0;
bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
bfa_q_qe_init(&phy->ioc_notify);
bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
/* min driver doesn't need phy */
if (mincfg) {
phy->dbuf_kva = NULL;
phy->dbuf_pa = 0;
}
}
/*
* Claim memory for phy
*
* @param[in] phy - phy structure
* @param[in] dm_kva - pointer to virtual memory address
* @param[in] dm_pa - physical memory address
* @param[in] mincfg - minimal cfg variable
*/
void
bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
bfa_boolean_t mincfg)
{
if (mincfg)
return;
phy->dbuf_kva = dm_kva;
phy->dbuf_pa = dm_pa;
memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
bfa_boolean_t
bfa_phy_busy(struct bfa_ioc_s *ioc)
{
void __iomem *rb;
rb = bfa_ioc_bar0(ioc);
return readl(rb + BFA_PHY_LOCK_STATUS);
}
/*
* Get phy attribute.
*
* @param[in] phy - phy structure
* @param[in] attr - phy attribute structure
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
bfa_trc(phy, instance);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->ubuf = (uint8_t *) attr;
bfa_phy_query_send(phy);
return BFA_STATUS_OK;
}
/*
* Get phy stats.
*
* @param[in] phy - phy structure
* @param[in] instance - phy image instance
* @param[in] stats - pointer to phy stats
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
struct bfa_phy_stats_s *stats,
bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
bfa_trc(phy, instance);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->ubuf = (u8 *) stats;
bfa_phy_stats_send(phy);
return BFA_STATUS_OK;
}
/*
* Update phy image.
*
* @param[in] phy - phy structure
* @param[in] instance - phy image instance
* @param[in] buf - update data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
void *buf, u32 len, u32 offset,
bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
bfa_trc(phy, instance);
bfa_trc(phy, len);
bfa_trc(phy, offset);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
/* 'len' must be in word (4-byte) boundary */
if (!len || (len & 0x03))
return BFA_STATUS_FAILED;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->residue = len;
phy->offset = 0;
phy->addr_off = offset;
phy->ubuf = buf;
bfa_phy_write_send(phy);
return BFA_STATUS_OK;
}
/*
* Read phy image.
*
* @param[in] phy - phy structure
* @param[in] instance - phy image instance
* @param[in] buf - read data buffer
* @param[in] len - data buffer length
* @param[in] offset - offset relative to starting address
* @param[in] cbfn - callback function
* @param[in] cbarg - callback argument
*
* Return status.
*/
bfa_status_t
bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
void *buf, u32 len, u32 offset,
bfa_cb_phy_t cbfn, void *cbarg)
{
bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
bfa_trc(phy, instance);
bfa_trc(phy, len);
bfa_trc(phy, offset);
if (!bfa_phy_present(phy))
return BFA_STATUS_PHY_NOT_PRESENT;
if (!bfa_ioc_is_operational(phy->ioc))
return BFA_STATUS_IOC_NON_OP;
/* 'len' must be in word (4-byte) boundary */
if (!len || (len & 0x03))
return BFA_STATUS_FAILED;
if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
bfa_trc(phy, phy->op_busy);
return BFA_STATUS_DEVBUSY;
}
phy->op_busy = 1;
phy->cbfn = cbfn;
phy->cbarg = cbarg;
phy->instance = instance;
phy->residue = len;
phy->offset = 0;
phy->addr_off = offset;
phy->ubuf = buf;
bfa_phy_read_send(phy);
return BFA_STATUS_OK;
}
/*
* Process phy response messages upon receiving interrupts.
*
* @param[in] phyarg - phy structure
* @param[in] msg - message structure
*/
void
bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
{
struct bfa_phy_s *phy = phyarg;
u32 status;
union {
struct bfi_phy_query_rsp_s *query;
struct bfi_phy_stats_rsp_s *stats;
struct bfi_phy_write_rsp_s *write;
struct bfi_phy_read_rsp_s *read;
struct bfi_mbmsg_s *msg;
} m;
m.msg = msg;
bfa_trc(phy, msg->mh.msg_id);
if (!phy->op_busy) {
/* receiving response after ioc failure */
bfa_trc(phy, 0x9999);
return;
}
switch (msg->mh.msg_id) {
case BFI_PHY_I2H_QUERY_RSP:
status = be32_to_cpu(m.query->status);
bfa_trc(phy, status);
if (status == BFA_STATUS_OK) {
struct bfa_phy_attr_s *attr =
(struct bfa_phy_attr_s *) phy->ubuf;
bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
sizeof(struct bfa_phy_attr_s));
bfa_trc(phy, attr->status);
bfa_trc(phy, attr->length);
}
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
break;
case BFI_PHY_I2H_STATS_RSP:
status = be32_to_cpu(m.stats->status);
bfa_trc(phy, status);
if (status == BFA_STATUS_OK) {
struct bfa_phy_stats_s *stats =
(struct bfa_phy_stats_s *) phy->ubuf;
bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
sizeof(struct bfa_phy_stats_s));
bfa_trc(phy, stats->status);
}
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
break;
case BFI_PHY_I2H_WRITE_RSP:
status = be32_to_cpu(m.write->status);
bfa_trc(phy, status);
if (status != BFA_STATUS_OK || phy->residue == 0) {
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
} else {
bfa_trc(phy, phy->offset);
bfa_phy_write_send(phy);
}
break;
case BFI_PHY_I2H_READ_RSP:
status = be32_to_cpu(m.read->status);
bfa_trc(phy, status);
if (status != BFA_STATUS_OK) {
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
} else {
u32 len = be32_to_cpu(m.read->length);
u16 *buf = (u16 *)(phy->ubuf + phy->offset);
u16 *dbuf = (u16 *)phy->dbuf_kva;
int i, sz = len >> 1;
bfa_trc(phy, phy->offset);
bfa_trc(phy, len);
for (i = 0; i < sz; i++)
buf[i] = be16_to_cpu(dbuf[i]);
phy->residue -= len;
phy->offset += len;
if (phy->residue == 0) {
phy->status = status;
phy->op_busy = 0;
if (phy->cbfn)
phy->cbfn(phy->cbarg, phy->status);
} else
bfa_phy_read_send(phy);
}
break;
default:
WARN_ON(1);
}
}
/*
* DCONF module specific
*/
BFA_MODULE(dconf);
/*
* DCONF state machine events
*/
enum bfa_dconf_event {
BFA_DCONF_SM_INIT = 1, /* dconf Init */
BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
BFA_DCONF_SM_WR = 3, /* binding change, map */
BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
};
/* forward declaration of DCONF state machine */
static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
static void bfa_dconf_timer(void *cbarg);
static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
/*
* Begining state of dconf module. Waiting for an event to start.
*/
static void
bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_status_t bfa_status;
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_INIT:
if (dconf->min_cfg) {
bfa_trc(dconf->bfa, dconf->min_cfg);
bfa_fsm_send_event(&dconf->bfa->iocfc,
IOCFC_E_DCONF_DONE);
return;
}
bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
BFA_FLASH_PART_DRV, dconf->instance,
dconf->dconf,
sizeof(struct bfa_dconf_s), 0,
bfa_dconf_init_cb, dconf->bfa);
if (bfa_status != BFA_STATUS_OK) {
bfa_timer_stop(&dconf->timer);
bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
return;
}
break;
case BFA_DCONF_SM_EXIT:
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_WR:
case BFA_DCONF_SM_FLASH_COMP:
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* Read flash for dconf entries and make a call back to the driver once done.
*/
static void
bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_FLASH_COMP:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
break;
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
break;
case BFA_DCONF_SM_EXIT:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_IOCDISABLE:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* DCONF Module is in ready state. Has completed the initialization.
*/
static void
bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_WR:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_INIT:
case BFA_DCONF_SM_IOCDISABLE:
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* entries are dirty, write back to the flash.
*/
static void
bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
bfa_dconf_flash_write(dconf);
break;
case BFA_DCONF_SM_WR:
bfa_timer_stop(&dconf->timer);
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
break;
case BFA_DCONF_SM_EXIT:
bfa_timer_stop(&dconf->timer);
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
bfa_dconf_flash_write(dconf);
break;
case BFA_DCONF_SM_FLASH_COMP:
break;
case BFA_DCONF_SM_IOCDISABLE:
bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* Sync the dconf entries to the flash.
*/
static void
bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_FLASH_COMP:
bfa_timer_stop(&dconf->timer);
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
static void
bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_FLASH_COMP:
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
break;
case BFA_DCONF_SM_WR:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
break;
case BFA_DCONF_SM_IOCDISABLE:
bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
static void
bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event)
{
bfa_trc(dconf->bfa, event);
switch (event) {
case BFA_DCONF_SM_INIT:
bfa_timer_start(dconf->bfa, &dconf->timer,
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_IOCDISABLE:
break;
default:
bfa_sm_fault(dconf->bfa, event);
}
}
/*
* Compute and return memory needed by DRV_CFG module.
*/
static void
bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
struct bfa_s *bfa)
{
struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
if (cfg->drvcfg.min_cfg)
bfa_mem_kva_setup(meminfo, dconf_kva,
sizeof(struct bfa_dconf_hdr_s));
else
bfa_mem_kva_setup(meminfo, dconf_kva,
sizeof(struct bfa_dconf_s));
}
static void
bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
dconf->bfad = bfad;
dconf->bfa = bfa;
dconf->instance = bfa->ioc.port_id;
bfa_trc(bfa, dconf->instance);
dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
if (cfg->drvcfg.min_cfg) {
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
dconf->min_cfg = BFA_TRUE;
} else {
dconf->min_cfg = BFA_FALSE;
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
}
bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
}
static void
bfa_dconf_init_cb(void *arg, bfa_status_t status)
{
struct bfa_s *bfa = arg;
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
if (status == BFA_STATUS_OK) {
bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
dconf->dconf->hdr.version = BFI_DCONF_VERSION;
}
bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
}
void
bfa_dconf_modinit(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
}
static void
bfa_dconf_start(struct bfa_s *bfa)
{
}
static void
bfa_dconf_stop(struct bfa_s *bfa)
{
}
static void bfa_dconf_timer(void *cbarg)
{
struct bfa_dconf_mod_s *dconf = cbarg;
bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
}
static void
bfa_dconf_iocdisable(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
}
static void
bfa_dconf_detach(struct bfa_s *bfa)
{
}
static bfa_status_t
bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
{
bfa_status_t bfa_status;
bfa_trc(dconf->bfa, 0);
bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
BFA_FLASH_PART_DRV, dconf->instance,
dconf->dconf, sizeof(struct bfa_dconf_s), 0,
bfa_dconf_cbfn, dconf);
if (bfa_status != BFA_STATUS_OK)
WARN_ON(bfa_status);
bfa_trc(dconf->bfa, bfa_status);
return bfa_status;
}
bfa_status_t
bfa_dconf_update(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_trc(dconf->bfa, 0);
if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
return BFA_STATUS_FAILED;
if (dconf->min_cfg) {
bfa_trc(dconf->bfa, dconf->min_cfg);
return BFA_STATUS_FAILED;
}
bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
return BFA_STATUS_OK;
}
static void
bfa_dconf_cbfn(void *arg, bfa_status_t status)
{
struct bfa_dconf_mod_s *dconf = arg;
WARN_ON(status);
bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
}
void
bfa_dconf_modexit(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
}
| gpl-2.0 |
ennarr/linux-kernel | arch/cris/arch-v10/kernel/shadows.c | 13803 | 1040 | /*
* Various shadow registers. Defines for these are in include/asm-etrax100/io.h
*/
/* Shadows for internal Etrax-registers */
unsigned long genconfig_shadow;
unsigned long gen_config_ii_shadow;
unsigned long port_g_data_shadow;
unsigned char port_pa_dir_shadow;
unsigned char port_pa_data_shadow;
unsigned char port_pb_i2c_shadow;
unsigned char port_pb_config_shadow;
unsigned char port_pb_dir_shadow;
unsigned char port_pb_data_shadow;
unsigned long r_timer_ctrl_shadow;
/* Shadows for external I/O port registers.
* These are only usable if there actually IS a latch connected
* to the corresponding external chip-select pin.
*
* A common usage is that CSP0 controls LEDs and CSP4 video chips.
*/
unsigned long port_cse1_shadow;
unsigned long port_csp0_shadow;
unsigned long port_csp4_shadow;
/* Corresponding addresses for the ports.
* These are initialized in arch/cris/mm/init.c using ioremap.
*/
volatile unsigned long *port_cse1_addr;
volatile unsigned long *port_csp0_addr;
volatile unsigned long *port_csp4_addr;
| gpl-2.0 |
malvira/lpc31xx | drivers/usb/storage/transport.c | 236 | 43129 | /* Driver for USB Mass Storage compliant devices
*
* Current development and maintenance by:
* (c) 1999-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
*
* Developed with the assistance of:
* (c) 2000 David L. Brown, Jr. (usb-storage@davidb.org)
* (c) 2000 Stephen J. Gowdy (SGowdy@lbl.gov)
* (c) 2002 Alan Stern <stern@rowland.org>
*
* Initial work by:
* (c) 1999 Michael Gee (michael@linuxspecific.com)
*
* This driver is based on the 'USB Mass Storage Class' document. This
* describes in detail the protocol used to communicate with such
* devices. Clearly, the designers had SCSI and ATAPI commands in
* mind when they created this document. The commands are all very
* similar to commands in the SCSI-II and ATAPI specifications.
*
* It is important to note that in a number of cases this class
* exhibits class-specific exemptions from the USB specification.
* Notably the usage of NAK, STALL and ACK differs from the norm, in
* that they are used to communicate wait, failed and OK on commands.
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/sched.h>
#include <linux/gfp.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/usb/quirks.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "scsiglue.h"
#include "debug.h"
#include <linux/blkdev.h>
#include "../../scsi/sd.h"
/***********************************************************************
* Data transfer routines
***********************************************************************/
/*
* This is subtle, so pay attention:
* ---------------------------------
* We're very concerned about races with a command abort. Hanging this code
* is a sure fire way to hang the kernel. (Note that this discussion applies
* only to transactions resulting from a scsi queued-command, since only
* these transactions are subject to a scsi abort. Other transactions, such
* as those occurring during device-specific initialization, must be handled
* by a separate code path.)
*
* The abort function (usb_storage_command_abort() in scsiglue.c) first
* sets the machine state and the ABORTING bit in us->dflags to prevent
* new URBs from being submitted. It then calls usb_stor_stop_transport()
* below, which atomically tests-and-clears the URB_ACTIVE bit in us->dflags
* to see if the current_urb needs to be stopped. Likewise, the SG_ACTIVE
* bit is tested to see if the current_sg scatter-gather request needs to be
* stopped. The timeout callback routine does much the same thing.
*
* When a disconnect occurs, the DISCONNECTING bit in us->dflags is set to
* prevent new URBs from being submitted, and usb_stor_stop_transport() is
* called to stop any ongoing requests.
*
* The submit function first verifies that the submitting is allowed
* (neither ABORTING nor DISCONNECTING bits are set) and that the submit
* completes without errors, and only then sets the URB_ACTIVE bit. This
* prevents the stop_transport() function from trying to cancel the URB
* while the submit call is underway. Next, the submit function must test
* the flags to see if an abort or disconnect occurred during the submission
* or before the URB_ACTIVE bit was set. If so, it's essential to cancel
* the URB if it hasn't been cancelled already (i.e., if the URB_ACTIVE bit
* is still set). Either way, the function must then wait for the URB to
* finish. Note that the URB can still be in progress even after a call to
* usb_unlink_urb() returns.
*
* The idea is that (1) once the ABORTING or DISCONNECTING bit is set,
* either the stop_transport() function or the submitting function
* is guaranteed to call usb_unlink_urb() for an active URB,
* and (2) test_and_clear_bit() prevents usb_unlink_urb() from being
* called more than once or from being called during usb_submit_urb().
*/
/* This is the completion handler which will wake us up when an URB
* completes.
*/
static void usb_stor_blocking_completion(struct urb *urb)
{
struct completion *urb_done_ptr = urb->context;
complete(urb_done_ptr);
}
/* This is the common part of the URB message submission code
*
* All URBs from the usb-storage driver involved in handling a queued scsi
* command _must_ pass through this function (or something like it) for the
* abort mechanisms to work properly.
*/
static int usb_stor_msg_common(struct us_data *us, int timeout)
{
struct completion urb_done;
long timeleft;
int status;
/* don't submit URBs during abort processing */
if (test_bit(US_FLIDX_ABORTING, &us->dflags))
return -EIO;
/* set up data structures for the wakeup system */
init_completion(&urb_done);
/* fill the common fields in the URB */
us->current_urb->context = &urb_done;
us->current_urb->transfer_flags = 0;
/* we assume that if transfer_buffer isn't us->iobuf then it
* hasn't been mapped for DMA. Yes, this is clunky, but it's
* easier than always having the caller tell us whether the
* transfer buffer has already been mapped. */
if (us->current_urb->transfer_buffer == us->iobuf)
us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
us->current_urb->transfer_dma = us->iobuf_dma;
/* submit the URB */
status = usb_submit_urb(us->current_urb, GFP_NOIO);
if (status) {
/* something went wrong */
return status;
}
/* since the URB has been submitted successfully, it's now okay
* to cancel it */
set_bit(US_FLIDX_URB_ACTIVE, &us->dflags);
/* did an abort occur during the submission? */
if (test_bit(US_FLIDX_ABORTING, &us->dflags)) {
/* cancel the URB, if it hasn't been cancelled already */
if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) {
US_DEBUGP("-- cancelling URB\n");
usb_unlink_urb(us->current_urb);
}
}
/* wait for the completion of the URB */
timeleft = wait_for_completion_interruptible_timeout(
&urb_done, timeout ? : MAX_SCHEDULE_TIMEOUT);
clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags);
if (timeleft <= 0) {
US_DEBUGP("%s -- cancelling URB\n",
timeleft == 0 ? "Timeout" : "Signal");
usb_kill_urb(us->current_urb);
}
/* return the URB status */
return us->current_urb->status;
}
/*
* Transfer one control message, with timeouts, and allowing early
* termination. Return codes are usual -Exxx, *not* USB_STOR_XFER_xxx.
*/
int usb_stor_control_msg(struct us_data *us, unsigned int pipe,
u8 request, u8 requesttype, u16 value, u16 index,
void *data, u16 size, int timeout)
{
int status;
US_DEBUGP("%s: rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n",
__func__, request, requesttype,
value, index, size);
/* fill in the devrequest structure */
us->cr->bRequestType = requesttype;
us->cr->bRequest = request;
us->cr->wValue = cpu_to_le16(value);
us->cr->wIndex = cpu_to_le16(index);
us->cr->wLength = cpu_to_le16(size);
/* fill and submit the URB */
usb_fill_control_urb(us->current_urb, us->pusb_dev, pipe,
(unsigned char*) us->cr, data, size,
usb_stor_blocking_completion, NULL);
status = usb_stor_msg_common(us, timeout);
/* return the actual length of the data transferred if no error */
if (status == 0)
status = us->current_urb->actual_length;
return status;
}
EXPORT_SYMBOL_GPL(usb_stor_control_msg);
/* This is a version of usb_clear_halt() that allows early termination and
* doesn't read the status from the device -- this is because some devices
* crash their internal firmware when the status is requested after a halt.
*
* A definitive list of these 'bad' devices is too difficult to maintain or
* make complete enough to be useful. This problem was first observed on the
* Hagiwara FlashGate DUAL unit. However, bus traces reveal that neither
* MacOS nor Windows checks the status after clearing a halt.
*
* Since many vendors in this space limit their testing to interoperability
* with these two OSes, specification violations like this one are common.
*/
int usb_stor_clear_halt(struct us_data *us, unsigned int pipe)
{
int result;
int endp = usb_pipeendpoint(pipe);
if (usb_pipein (pipe))
endp |= USB_DIR_IN;
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
USB_ENDPOINT_HALT, endp,
NULL, 0, 3*HZ);
if (result >= 0)
usb_reset_endpoint(us->pusb_dev, endp);
US_DEBUGP("%s: result = %d\n", __func__, result);
return result;
}
EXPORT_SYMBOL_GPL(usb_stor_clear_halt);
/*
* Interpret the results of a URB transfer
*
* This function prints appropriate debugging messages, clears halts on
* non-control endpoints, and translates the status to the corresponding
* USB_STOR_XFER_xxx return code.
*/
static int interpret_urb_result(struct us_data *us, unsigned int pipe,
unsigned int length, int result, unsigned int partial)
{
US_DEBUGP("Status code %d; transferred %u/%u\n",
result, partial, length);
switch (result) {
/* no error code; did we send all the data? */
case 0:
if (partial != length) {
US_DEBUGP("-- short transfer\n");
return USB_STOR_XFER_SHORT;
}
US_DEBUGP("-- transfer complete\n");
return USB_STOR_XFER_GOOD;
/* stalled */
case -EPIPE:
/* for control endpoints, (used by CB[I]) a stall indicates
* a failed command */
if (usb_pipecontrol(pipe)) {
US_DEBUGP("-- stall on control pipe\n");
return USB_STOR_XFER_STALLED;
}
/* for other sorts of endpoint, clear the stall */
US_DEBUGP("clearing endpoint halt for pipe 0x%x\n", pipe);
if (usb_stor_clear_halt(us, pipe) < 0)
return USB_STOR_XFER_ERROR;
return USB_STOR_XFER_STALLED;
/* babble - the device tried to send more than we wanted to read */
case -EOVERFLOW:
US_DEBUGP("-- babble\n");
return USB_STOR_XFER_LONG;
/* the transfer was cancelled by abort, disconnect, or timeout */
case -ECONNRESET:
US_DEBUGP("-- transfer cancelled\n");
return USB_STOR_XFER_ERROR;
/* short scatter-gather read transfer */
case -EREMOTEIO:
US_DEBUGP("-- short read transfer\n");
return USB_STOR_XFER_SHORT;
/* abort or disconnect in progress */
case -EIO:
US_DEBUGP("-- abort or disconnect in progress\n");
return USB_STOR_XFER_ERROR;
/* the catch-all error case */
default:
US_DEBUGP("-- unknown error\n");
return USB_STOR_XFER_ERROR;
}
}
/*
* Transfer one control message, without timeouts, but allowing early
* termination. Return codes are USB_STOR_XFER_xxx.
*/
int usb_stor_ctrl_transfer(struct us_data *us, unsigned int pipe,
u8 request, u8 requesttype, u16 value, u16 index,
void *data, u16 size)
{
int result;
US_DEBUGP("%s: rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n",
__func__, request, requesttype,
value, index, size);
/* fill in the devrequest structure */
us->cr->bRequestType = requesttype;
us->cr->bRequest = request;
us->cr->wValue = cpu_to_le16(value);
us->cr->wIndex = cpu_to_le16(index);
us->cr->wLength = cpu_to_le16(size);
/* fill and submit the URB */
usb_fill_control_urb(us->current_urb, us->pusb_dev, pipe,
(unsigned char*) us->cr, data, size,
usb_stor_blocking_completion, NULL);
result = usb_stor_msg_common(us, 0);
return interpret_urb_result(us, pipe, size, result,
us->current_urb->actual_length);
}
EXPORT_SYMBOL_GPL(usb_stor_ctrl_transfer);
/*
* Receive one interrupt buffer, without timeouts, but allowing early
* termination. Return codes are USB_STOR_XFER_xxx.
*
* This routine always uses us->recv_intr_pipe as the pipe and
* us->ep_bInterval as the interrupt interval.
*/
static int usb_stor_intr_transfer(struct us_data *us, void *buf,
unsigned int length)
{
int result;
unsigned int pipe = us->recv_intr_pipe;
unsigned int maxp;
US_DEBUGP("%s: xfer %u bytes\n", __func__, length);
/* calculate the max packet size */
maxp = usb_maxpacket(us->pusb_dev, pipe, usb_pipeout(pipe));
if (maxp > length)
maxp = length;
/* fill and submit the URB */
usb_fill_int_urb(us->current_urb, us->pusb_dev, pipe, buf,
maxp, usb_stor_blocking_completion, NULL,
us->ep_bInterval);
result = usb_stor_msg_common(us, 0);
return interpret_urb_result(us, pipe, length, result,
us->current_urb->actual_length);
}
/*
* Transfer one buffer via bulk pipe, without timeouts, but allowing early
* termination. Return codes are USB_STOR_XFER_xxx. If the bulk pipe
* stalls during the transfer, the halt is automatically cleared.
*/
int usb_stor_bulk_transfer_buf(struct us_data *us, unsigned int pipe,
void *buf, unsigned int length, unsigned int *act_len)
{
int result;
US_DEBUGP("%s: xfer %u bytes\n", __func__, length);
/* fill and submit the URB */
usb_fill_bulk_urb(us->current_urb, us->pusb_dev, pipe, buf, length,
usb_stor_blocking_completion, NULL);
result = usb_stor_msg_common(us, 0);
/* store the actual length of the data transferred */
if (act_len)
*act_len = us->current_urb->actual_length;
return interpret_urb_result(us, pipe, length, result,
us->current_urb->actual_length);
}
EXPORT_SYMBOL_GPL(usb_stor_bulk_transfer_buf);
/*
* Transfer a scatter-gather list via bulk transfer
*
* This function does basically the same thing as usb_stor_bulk_transfer_buf()
* above, but it uses the usbcore scatter-gather library.
*/
static int usb_stor_bulk_transfer_sglist(struct us_data *us, unsigned int pipe,
struct scatterlist *sg, int num_sg, unsigned int length,
unsigned int *act_len)
{
int result;
/* don't submit s-g requests during abort processing */
if (test_bit(US_FLIDX_ABORTING, &us->dflags))
return USB_STOR_XFER_ERROR;
/* initialize the scatter-gather request block */
US_DEBUGP("%s: xfer %u bytes, %d entries\n", __func__,
length, num_sg);
result = usb_sg_init(&us->current_sg, us->pusb_dev, pipe, 0,
sg, num_sg, length, GFP_NOIO);
if (result) {
US_DEBUGP("usb_sg_init returned %d\n", result);
return USB_STOR_XFER_ERROR;
}
/* since the block has been initialized successfully, it's now
* okay to cancel it */
set_bit(US_FLIDX_SG_ACTIVE, &us->dflags);
/* did an abort occur during the submission? */
if (test_bit(US_FLIDX_ABORTING, &us->dflags)) {
/* cancel the request, if it hasn't been cancelled already */
if (test_and_clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags)) {
US_DEBUGP("-- cancelling sg request\n");
usb_sg_cancel(&us->current_sg);
}
}
/* wait for the completion of the transfer */
usb_sg_wait(&us->current_sg);
clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags);
result = us->current_sg.status;
if (act_len)
*act_len = us->current_sg.bytes;
return interpret_urb_result(us, pipe, length, result,
us->current_sg.bytes);
}
/*
* Common used function. Transfer a complete command
* via usb_stor_bulk_transfer_sglist() above. Set cmnd resid
*/
int usb_stor_bulk_srb(struct us_data* us, unsigned int pipe,
struct scsi_cmnd* srb)
{
unsigned int partial;
int result = usb_stor_bulk_transfer_sglist(us, pipe, scsi_sglist(srb),
scsi_sg_count(srb), scsi_bufflen(srb),
&partial);
scsi_set_resid(srb, scsi_bufflen(srb) - partial);
return result;
}
EXPORT_SYMBOL_GPL(usb_stor_bulk_srb);
/*
* Transfer an entire SCSI command's worth of data payload over the bulk
* pipe.
*
* Note that this uses usb_stor_bulk_transfer_buf() and
* usb_stor_bulk_transfer_sglist() to achieve its goals --
* this function simply determines whether we're going to use
* scatter-gather or not, and acts appropriately.
*/
int usb_stor_bulk_transfer_sg(struct us_data* us, unsigned int pipe,
void *buf, unsigned int length_left, int use_sg, int *residual)
{
int result;
unsigned int partial;
/* are we scatter-gathering? */
if (use_sg) {
/* use the usb core scatter-gather primitives */
result = usb_stor_bulk_transfer_sglist(us, pipe,
(struct scatterlist *) buf, use_sg,
length_left, &partial);
length_left -= partial;
} else {
/* no scatter-gather, just make the request */
result = usb_stor_bulk_transfer_buf(us, pipe, buf,
length_left, &partial);
length_left -= partial;
}
/* store the residual and return the error code */
if (residual)
*residual = length_left;
return result;
}
EXPORT_SYMBOL_GPL(usb_stor_bulk_transfer_sg);
/***********************************************************************
* Transport routines
***********************************************************************/
/* There are so many devices that report the capacity incorrectly,
* this routine was written to counteract some of the resulting
* problems.
*/
static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
{
struct gendisk *disk;
struct scsi_disk *sdkp;
u32 sector;
/* To Report "Medium Error: Record Not Found */
static unsigned char record_not_found[18] = {
[0] = 0x70, /* current error */
[2] = MEDIUM_ERROR, /* = 0x03 */
[7] = 0x0a, /* additional length */
[12] = 0x14 /* Record Not Found */
};
/* If last-sector problems can't occur, whether because the
* capacity was already decremented or because the device is
* known to report the correct capacity, then we don't need
* to do anything.
*/
if (!us->use_last_sector_hacks)
return;
/* Was this command a READ(10) or a WRITE(10)? */
if (srb->cmnd[0] != READ_10 && srb->cmnd[0] != WRITE_10)
goto done;
/* Did this command access the last sector? */
sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
(srb->cmnd[4] << 8) | (srb->cmnd[5]);
disk = srb->request->rq_disk;
if (!disk)
goto done;
sdkp = scsi_disk(disk);
if (!sdkp)
goto done;
if (sector + 1 != sdkp->capacity)
goto done;
if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
/* The command succeeded. We know this device doesn't
* have the last-sector bug, so stop checking it.
*/
us->use_last_sector_hacks = 0;
} else {
/* The command failed. Allow up to 3 retries in case this
* is some normal sort of failure. After that, assume the
* capacity is wrong and we're trying to access the sector
* beyond the end. Replace the result code and sense data
* with values that will cause the SCSI core to fail the
* command immediately, instead of going into an infinite
* (or even just a very long) retry loop.
*/
if (++us->last_sector_retries < 3)
return;
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer, record_not_found,
sizeof(record_not_found));
}
done:
/* Don't reset the retry counter for TEST UNIT READY commands,
* because they get issued after device resets which might be
* caused by a failed last-sector access.
*/
if (srb->cmnd[0] != TEST_UNIT_READY)
us->last_sector_retries = 0;
}
/* Invoke the transport and basic error-handling/recovery methods
*
* This is used by the protocol layers to actually send the message to
* the device and receive the response.
*/
void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int need_auto_sense;
int result;
/* send the command to the transport layer */
scsi_set_resid(srb, 0);
result = us->transport(srb, us);
/* if the command gets aborted by the higher layers, we need to
* short-circuit all other processing
*/
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
US_DEBUGP("-- command was aborted\n");
srb->result = DID_ABORT << 16;
goto Handle_Errors;
}
/* if there is a transport error, reset and don't auto-sense */
if (result == USB_STOR_TRANSPORT_ERROR) {
US_DEBUGP("-- transport indicates error, resetting\n");
srb->result = DID_ERROR << 16;
goto Handle_Errors;
}
/* if the transport provided its own sense data, don't auto-sense */
if (result == USB_STOR_TRANSPORT_NO_SENSE) {
srb->result = SAM_STAT_CHECK_CONDITION;
last_sector_hacks(us, srb);
return;
}
srb->result = SAM_STAT_GOOD;
/* Determine if we need to auto-sense
*
* I normally don't use a flag like this, but it's almost impossible
* to understand what's going on here if I don't.
*/
need_auto_sense = 0;
/*
* If we're running the CB transport, which is incapable
* of determining status on its own, we will auto-sense
* unless the operation involved a data-in transfer. Devices
* can signal most data-in errors by stalling the bulk-in pipe.
*/
if ((us->protocol == USB_PR_CB || us->protocol == USB_PR_DPCM_USB) &&
srb->sc_data_direction != DMA_FROM_DEVICE) {
US_DEBUGP("-- CB transport device requiring auto-sense\n");
need_auto_sense = 1;
}
/*
* If we have a failure, we're going to do a REQUEST_SENSE
* automatically. Note that we differentiate between a command
* "failure" and an "error" in the transport mechanism.
*/
if (result == USB_STOR_TRANSPORT_FAILED) {
US_DEBUGP("-- transport indicates command failure\n");
need_auto_sense = 1;
}
/*
* Determine if this device is SAT by seeing if the
* command executed successfully. Otherwise we'll have
* to wait for at least one CHECK_CONDITION to determine
* SANE_SENSE support
*/
if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
result == USB_STOR_TRANSPORT_GOOD &&
!(us->fflags & US_FL_SANE_SENSE) &&
!(us->fflags & US_FL_BAD_SENSE) &&
!(srb->cmnd[2] & 0x20))) {
US_DEBUGP("-- SAT supported, increasing auto-sense\n");
us->fflags |= US_FL_SANE_SENSE;
}
/*
* A short transfer on a command where we don't expect it
* is unusual, but it doesn't mean we need to auto-sense.
*/
if ((scsi_get_resid(srb) > 0) &&
!((srb->cmnd[0] == REQUEST_SENSE) ||
(srb->cmnd[0] == INQUIRY) ||
(srb->cmnd[0] == MODE_SENSE) ||
(srb->cmnd[0] == LOG_SENSE) ||
(srb->cmnd[0] == MODE_SENSE_10))) {
US_DEBUGP("-- unexpectedly short transfer\n");
}
/* Now, if we need to do the auto-sense, let's do it */
if (need_auto_sense) {
int temp_result;
struct scsi_eh_save ses;
int sense_size = US_SENSE_SIZE;
struct scsi_sense_hdr sshdr;
const u8 *scdd;
u8 fm_ili;
/* device supports and needs bigger sense buffer */
if (us->fflags & US_FL_SANE_SENSE)
sense_size = ~0;
Retry_Sense:
US_DEBUGP("Issuing auto-REQUEST_SENSE\n");
scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size);
/* FIXME: we must do the protocol translation here */
if (us->subclass == USB_SC_RBC || us->subclass == USB_SC_SCSI ||
us->subclass == USB_SC_CYP_ATACB)
srb->cmd_len = 6;
else
srb->cmd_len = 12;
/* issue the auto-sense command */
scsi_set_resid(srb, 0);
temp_result = us->transport(us->srb, us);
/* let's clean up right away */
scsi_eh_restore_cmnd(srb, &ses);
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
US_DEBUGP("-- auto-sense aborted\n");
srb->result = DID_ABORT << 16;
/* If SANE_SENSE caused this problem, disable it */
if (sense_size != US_SENSE_SIZE) {
us->fflags &= ~US_FL_SANE_SENSE;
us->fflags |= US_FL_BAD_SENSE;
}
goto Handle_Errors;
}
/* Some devices claim to support larger sense but fail when
* trying to request it. When a transport failure happens
* using US_FS_SANE_SENSE, we always retry with a standard
* (small) sense request. This fixes some USB GSM modems
*/
if (temp_result == USB_STOR_TRANSPORT_FAILED &&
sense_size != US_SENSE_SIZE) {
US_DEBUGP("-- auto-sense failure, retry small sense\n");
sense_size = US_SENSE_SIZE;
us->fflags &= ~US_FL_SANE_SENSE;
us->fflags |= US_FL_BAD_SENSE;
goto Retry_Sense;
}
/* Other failures */
if (temp_result != USB_STOR_TRANSPORT_GOOD) {
US_DEBUGP("-- auto-sense failure\n");
/* we skip the reset if this happens to be a
* multi-target device, since failure of an
* auto-sense is perfectly valid
*/
srb->result = DID_ERROR << 16;
if (!(us->fflags & US_FL_SCM_MULT_TARG))
goto Handle_Errors;
return;
}
/* If the sense data returned is larger than 18-bytes then we
* assume this device supports requesting more in the future.
* The response code must be 70h through 73h inclusive.
*/
if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) &&
!(us->fflags & US_FL_SANE_SENSE) &&
!(us->fflags & US_FL_BAD_SENSE) &&
(srb->sense_buffer[0] & 0x7C) == 0x70) {
US_DEBUGP("-- SANE_SENSE support enabled\n");
us->fflags |= US_FL_SANE_SENSE;
/* Indicate to the user that we truncated their sense
* because we didn't know it supported larger sense.
*/
US_DEBUGP("-- Sense data truncated to %i from %i\n",
US_SENSE_SIZE,
srb->sense_buffer[7] + 8);
srb->sense_buffer[7] = (US_SENSE_SIZE - 8);
}
scsi_normalize_sense(srb->sense_buffer, SCSI_SENSE_BUFFERSIZE,
&sshdr);
US_DEBUGP("-- Result from auto-sense is %d\n", temp_result);
US_DEBUGP("-- code: 0x%x, key: 0x%x, ASC: 0x%x, ASCQ: 0x%x\n",
sshdr.response_code, sshdr.sense_key,
sshdr.asc, sshdr.ascq);
#ifdef CONFIG_USB_STORAGE_DEBUG
usb_stor_show_sense(sshdr.sense_key, sshdr.asc, sshdr.ascq);
#endif
/* set the result so the higher layers expect this data */
srb->result = SAM_STAT_CHECK_CONDITION;
scdd = scsi_sense_desc_find(srb->sense_buffer,
SCSI_SENSE_BUFFERSIZE, 4);
fm_ili = (scdd ? scdd[3] : srb->sense_buffer[2]) & 0xA0;
/* We often get empty sense data. This could indicate that
* everything worked or that there was an unspecified
* problem. We have to decide which.
*/
if (sshdr.sense_key == 0 && sshdr.asc == 0 && sshdr.ascq == 0 &&
fm_ili == 0) {
/* If things are really okay, then let's show that.
* Zero out the sense buffer so the higher layers
* won't realize we did an unsolicited auto-sense.
*/
if (result == USB_STOR_TRANSPORT_GOOD) {
srb->result = SAM_STAT_GOOD;
srb->sense_buffer[0] = 0x0;
/* If there was a problem, report an unspecified
* hardware error to prevent the higher layers from
* entering an infinite retry loop.
*/
} else {
srb->result = DID_ERROR << 16;
if ((sshdr.response_code & 0x72) == 0x72)
srb->sense_buffer[1] = HARDWARE_ERROR;
else
srb->sense_buffer[2] = HARDWARE_ERROR;
}
}
}
/*
* Some devices don't work or return incorrect data the first
* time they get a READ(10) command, or for the first READ(10)
* after a media change. If the INITIAL_READ10 flag is set,
* keep track of whether READ(10) commands succeed. If the
* previous one succeeded and this one failed, set the REDO_READ10
* flag to force a retry.
*/
if (unlikely((us->fflags & US_FL_INITIAL_READ10) &&
srb->cmnd[0] == READ_10)) {
if (srb->result == SAM_STAT_GOOD) {
set_bit(US_FLIDX_READ10_WORKED, &us->dflags);
} else if (test_bit(US_FLIDX_READ10_WORKED, &us->dflags)) {
clear_bit(US_FLIDX_READ10_WORKED, &us->dflags);
set_bit(US_FLIDX_REDO_READ10, &us->dflags);
}
/*
* Next, if the REDO_READ10 flag is set, return a result
* code that will cause the SCSI core to retry the READ(10)
* command immediately.
*/
if (test_bit(US_FLIDX_REDO_READ10, &us->dflags)) {
clear_bit(US_FLIDX_REDO_READ10, &us->dflags);
srb->result = DID_IMM_RETRY << 16;
srb->sense_buffer[0] = 0;
}
}
/* Did we transfer less than the minimum amount required? */
if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) &&
scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
srb->result = DID_ERROR << 16;
last_sector_hacks(us, srb);
return;
/* Error and abort processing: try to resynchronize with the device
* by issuing a port reset. If that fails, try a class-specific
* device reset. */
Handle_Errors:
/* Set the RESETTING bit, and clear the ABORTING bit so that
* the reset may proceed. */
scsi_lock(us_to_host(us));
set_bit(US_FLIDX_RESETTING, &us->dflags);
clear_bit(US_FLIDX_ABORTING, &us->dflags);
scsi_unlock(us_to_host(us));
/* We must release the device lock because the pre_reset routine
* will want to acquire it. */
mutex_unlock(&us->dev_mutex);
result = usb_stor_port_reset(us);
mutex_lock(&us->dev_mutex);
if (result < 0) {
scsi_lock(us_to_host(us));
usb_stor_report_device_reset(us);
scsi_unlock(us_to_host(us));
us->transport_reset(us);
}
clear_bit(US_FLIDX_RESETTING, &us->dflags);
last_sector_hacks(us, srb);
}
/* Stop the current URB transfer */
void usb_stor_stop_transport(struct us_data *us)
{
US_DEBUGP("%s called\n", __func__);
/* If the state machine is blocked waiting for an URB,
* let's wake it up. The test_and_clear_bit() call
* guarantees that if a URB has just been submitted,
* it won't be cancelled more than once. */
if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) {
US_DEBUGP("-- cancelling URB\n");
usb_unlink_urb(us->current_urb);
}
/* If we are waiting for a scatter-gather operation, cancel it. */
if (test_and_clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags)) {
US_DEBUGP("-- cancelling sg request\n");
usb_sg_cancel(&us->current_sg);
}
}
/*
* Control/Bulk and Control/Bulk/Interrupt transport
*/
int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
{
unsigned int transfer_length = scsi_bufflen(srb);
unsigned int pipe = 0;
int result;
/* COMMAND STAGE */
/* let's send the command via the control pipe */
result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
US_CBI_ADSC,
USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
us->ifnum, srb->cmnd, srb->cmd_len);
/* check the return code for the command */
US_DEBUGP("Call to usb_stor_ctrl_transfer() returned %d\n", result);
/* if we stalled the command, it means command failed */
if (result == USB_STOR_XFER_STALLED) {
return USB_STOR_TRANSPORT_FAILED;
}
/* Uh oh... serious problem here */
if (result != USB_STOR_XFER_GOOD) {
return USB_STOR_TRANSPORT_ERROR;
}
/* DATA STAGE */
/* transfer the data payload for this command, if one exists*/
if (transfer_length) {
pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
us->recv_bulk_pipe : us->send_bulk_pipe;
result = usb_stor_bulk_srb(us, pipe, srb);
US_DEBUGP("CBI data stage result is 0x%x\n", result);
/* if we stalled the data transfer it means command failed */
if (result == USB_STOR_XFER_STALLED)
return USB_STOR_TRANSPORT_FAILED;
if (result > USB_STOR_XFER_STALLED)
return USB_STOR_TRANSPORT_ERROR;
}
/* STATUS STAGE */
/* NOTE: CB does not have a status stage. Silly, I know. So
* we have to catch this at a higher level.
*/
if (us->protocol != USB_PR_CBI)
return USB_STOR_TRANSPORT_GOOD;
result = usb_stor_intr_transfer(us, us->iobuf, 2);
US_DEBUGP("Got interrupt data (0x%x, 0x%x)\n",
us->iobuf[0], us->iobuf[1]);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* UFI gives us ASC and ASCQ, like a request sense
*
* REQUEST_SENSE and INQUIRY don't affect the sense data on UFI
* devices, so we ignore the information for those commands. Note
* that this means we could be ignoring a real error on these
* commands, but that can't be helped.
*/
if (us->subclass == USB_SC_UFI) {
if (srb->cmnd[0] == REQUEST_SENSE ||
srb->cmnd[0] == INQUIRY)
return USB_STOR_TRANSPORT_GOOD;
if (us->iobuf[0])
goto Failed;
return USB_STOR_TRANSPORT_GOOD;
}
/* If not UFI, we interpret the data as a result code
* The first byte should always be a 0x0.
*
* Some bogus devices don't follow that rule. They stuff the ASC
* into the first byte -- so if it's non-zero, call it a failure.
*/
if (us->iobuf[0]) {
US_DEBUGP("CBI IRQ data showed reserved bType 0x%x\n",
us->iobuf[0]);
goto Failed;
}
/* The second byte & 0x0F should be 0x0 for good, otherwise error */
switch (us->iobuf[1] & 0x0F) {
case 0x00:
return USB_STOR_TRANSPORT_GOOD;
case 0x01:
goto Failed;
}
return USB_STOR_TRANSPORT_ERROR;
/* the CBI spec requires that the bulk pipe must be cleared
* following any data-in/out command failure (section 2.4.3.1.3)
*/
Failed:
if (pipe)
usb_stor_clear_halt(us, pipe);
return USB_STOR_TRANSPORT_FAILED;
}
EXPORT_SYMBOL_GPL(usb_stor_CB_transport);
/*
* Bulk only transport
*/
/* Determine what the maximum LUN supported is */
int usb_stor_Bulk_max_lun(struct us_data *us)
{
int result;
/* issue the command */
us->iobuf[0] = 0;
result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
US_BULK_GET_MAX_LUN,
USB_DIR_IN | USB_TYPE_CLASS |
USB_RECIP_INTERFACE,
0, us->ifnum, us->iobuf, 1, 10*HZ);
US_DEBUGP("GetMaxLUN command result is %d, data is %d\n",
result, us->iobuf[0]);
/* if we have a successful request, return the result */
if (result > 0)
return us->iobuf[0];
/*
* Some devices don't like GetMaxLUN. They may STALL the control
* pipe, they may return a zero-length result, they may do nothing at
* all and timeout, or they may fail in even more bizarrely creative
* ways. In these cases the best approach is to use the default
* value: only one LUN.
*/
return 0;
}
int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
unsigned int transfer_length = scsi_bufflen(srb);
unsigned int residue;
int result;
int fake_sense = 0;
unsigned int cswlen;
unsigned int cbwlen = US_BULK_CB_WRAP_LEN;
/* Take care of BULK32 devices; set extra byte to 0 */
if (unlikely(us->fflags & US_FL_BULK32)) {
cbwlen = 32;
us->iobuf[31] = 0;
}
/* set up the command wrapper */
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = cpu_to_le32(transfer_length);
bcb->Flags = srb->sc_data_direction == DMA_FROM_DEVICE ? 1 << 7 : 0;
bcb->Tag = ++us->tag;
bcb->Lun = srb->device->lun;
if (us->fflags & US_FL_SCM_MULT_TARG)
bcb->Lun |= srb->device->id << 4;
bcb->Length = srb->cmd_len;
/* copy the command payload */
memset(bcb->CDB, 0, sizeof(bcb->CDB));
memcpy(bcb->CDB, srb->cmnd, bcb->Length);
/* send it to out endpoint */
US_DEBUGP("Bulk Command S 0x%x T 0x%x L %d F %d Trg %d LUN %d CL %d\n",
le32_to_cpu(bcb->Signature), bcb->Tag,
le32_to_cpu(bcb->DataTransferLength), bcb->Flags,
(bcb->Lun >> 4), (bcb->Lun & 0x0F),
bcb->Length);
result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
bcb, cbwlen, NULL);
US_DEBUGP("Bulk command transfer result=%d\n", result);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* DATA STAGE */
/* send/receive data payload, if there is any */
/* Some USB-IDE converter chips need a 100us delay between the
* command phase and the data phase. Some devices need a little
* more than that, probably because of clock rate inaccuracies. */
if (unlikely(us->fflags & US_FL_GO_SLOW))
udelay(125);
if (transfer_length) {
unsigned int pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
us->recv_bulk_pipe : us->send_bulk_pipe;
result = usb_stor_bulk_srb(us, pipe, srb);
US_DEBUGP("Bulk data transfer result 0x%x\n", result);
if (result == USB_STOR_XFER_ERROR)
return USB_STOR_TRANSPORT_ERROR;
/* If the device tried to send back more data than the
* amount requested, the spec requires us to transfer
* the CSW anyway. Since there's no point retrying the
* the command, we'll return fake sense data indicating
* Illegal Request, Invalid Field in CDB.
*/
if (result == USB_STOR_XFER_LONG)
fake_sense = 1;
}
/* See flow chart on pg 15 of the Bulk Only Transport spec for
* an explanation of how this code works.
*/
/* get CSW for device status */
US_DEBUGP("Attempting to get CSW...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, &cswlen);
/* Some broken devices add unnecessary zero-length packets to the
* end of their data transfers. Such packets show up as 0-length
* CSWs. If we encounter such a thing, try to read the CSW again.
*/
if (result == USB_STOR_XFER_SHORT && cswlen == 0) {
US_DEBUGP("Received 0-length CSW; retrying...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, &cswlen);
}
/* did the attempt to read the CSW fail? */
if (result == USB_STOR_XFER_STALLED) {
/* get the status again */
US_DEBUGP("Attempting to get CSW (2nd try)...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, NULL);
}
/* if we still have a failure at this point, we're in trouble */
US_DEBUGP("Bulk status result = %d\n", result);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* check bulk status */
residue = le32_to_cpu(bcs->Residue);
US_DEBUGP("Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
le32_to_cpu(bcs->Signature), bcs->Tag,
residue, bcs->Status);
if (!(bcs->Tag == us->tag || (us->fflags & US_FL_BULK_IGNORE_TAG)) ||
bcs->Status > US_BULK_STAT_PHASE) {
US_DEBUGP("Bulk logical error\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* Some broken devices report odd signatures, so we do not check them
* for validity against the spec. We store the first one we see,
* and check subsequent transfers for validity against this signature.
*/
if (!us->bcs_signature) {
us->bcs_signature = bcs->Signature;
if (us->bcs_signature != cpu_to_le32(US_BULK_CS_SIGN))
US_DEBUGP("Learnt BCS signature 0x%08X\n",
le32_to_cpu(us->bcs_signature));
} else if (bcs->Signature != us->bcs_signature) {
US_DEBUGP("Signature mismatch: got %08X, expecting %08X\n",
le32_to_cpu(bcs->Signature),
le32_to_cpu(us->bcs_signature));
return USB_STOR_TRANSPORT_ERROR;
}
/* try to compute the actual residue, based on how much data
* was really transferred and what the device tells us */
if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
/* Heuristically detect devices that generate bogus residues
* by seeing what happens with INQUIRY and READ CAPACITY
* commands.
*/
if (bcs->Status == US_BULK_STAT_OK &&
scsi_get_resid(srb) == 0 &&
((srb->cmnd[0] == INQUIRY &&
transfer_length == 36) ||
(srb->cmnd[0] == READ_CAPACITY &&
transfer_length == 8))) {
us->fflags |= US_FL_IGNORE_RESIDUE;
} else {
residue = min(residue, transfer_length);
scsi_set_resid(srb, max(scsi_get_resid(srb),
(int) residue));
}
}
/* based on the status code, we report good or bad */
switch (bcs->Status) {
case US_BULK_STAT_OK:
/* device babbled -- return fake sense data */
if (fake_sense) {
memcpy(srb->sense_buffer,
usb_stor_sense_invalidCDB,
sizeof(usb_stor_sense_invalidCDB));
return USB_STOR_TRANSPORT_NO_SENSE;
}
/* command good -- note that data could be short */
return USB_STOR_TRANSPORT_GOOD;
case US_BULK_STAT_FAIL:
/* command failed */
return USB_STOR_TRANSPORT_FAILED;
case US_BULK_STAT_PHASE:
/* phase error -- note that a transport reset will be
* invoked by the invoke_transport() function
*/
return USB_STOR_TRANSPORT_ERROR;
}
/* we should never get here, but if we do, we're in trouble */
return USB_STOR_TRANSPORT_ERROR;
}
EXPORT_SYMBOL_GPL(usb_stor_Bulk_transport);
/***********************************************************************
* Reset routines
***********************************************************************/
/* This is the common part of the device reset code.
*
* It's handy that every transport mechanism uses the control endpoint for
* resets.
*
* Basically, we send a reset with a 5-second timeout, so we don't get
* jammed attempting to do the reset.
*/
static int usb_stor_reset_common(struct us_data *us,
u8 request, u8 requesttype,
u16 value, u16 index, void *data, u16 size)
{
int result;
int result2;
if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) {
US_DEBUGP("No reset during disconnect\n");
return -EIO;
}
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
request, requesttype, value, index, data, size,
5*HZ);
if (result < 0) {
US_DEBUGP("Soft reset failed: %d\n", result);
return result;
}
/* Give the device some time to recover from the reset,
* but don't delay disconnect processing. */
wait_event_interruptible_timeout(us->delay_wait,
test_bit(US_FLIDX_DISCONNECTING, &us->dflags),
HZ*6);
if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) {
US_DEBUGP("Reset interrupted by disconnect\n");
return -EIO;
}
US_DEBUGP("Soft reset: clearing bulk-in endpoint halt\n");
result = usb_stor_clear_halt(us, us->recv_bulk_pipe);
US_DEBUGP("Soft reset: clearing bulk-out endpoint halt\n");
result2 = usb_stor_clear_halt(us, us->send_bulk_pipe);
/* return a result code based on the result of the clear-halts */
if (result >= 0)
result = result2;
if (result < 0)
US_DEBUGP("Soft reset failed\n");
else
US_DEBUGP("Soft reset done\n");
return result;
}
/* This issues a CB[I] Reset to the device in question
*/
#define CB_RESET_CMD_SIZE 12
int usb_stor_CB_reset(struct us_data *us)
{
US_DEBUGP("%s called\n", __func__);
memset(us->iobuf, 0xFF, CB_RESET_CMD_SIZE);
us->iobuf[0] = SEND_DIAGNOSTIC;
us->iobuf[1] = 4;
return usb_stor_reset_common(us, US_CBI_ADSC,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, us->ifnum, us->iobuf, CB_RESET_CMD_SIZE);
}
EXPORT_SYMBOL_GPL(usb_stor_CB_reset);
/* This issues a Bulk-only Reset to the device in question, including
* clearing the subsequent endpoint halts that may occur.
*/
int usb_stor_Bulk_reset(struct us_data *us)
{
US_DEBUGP("%s called\n", __func__);
return usb_stor_reset_common(us, US_BULK_RESET_REQUEST,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, us->ifnum, NULL, 0);
}
EXPORT_SYMBOL_GPL(usb_stor_Bulk_reset);
/* Issue a USB port reset to the device. The caller must not hold
* us->dev_mutex.
*/
int usb_stor_port_reset(struct us_data *us)
{
int result;
/*for these devices we must use the class specific method */
if (us->pusb_dev->quirks & USB_QUIRK_RESET_MORPHS)
return -EPERM;
result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
if (result < 0)
US_DEBUGP("unable to lock device for reset: %d\n", result);
else {
/* Were we disconnected while waiting for the lock? */
if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) {
result = -EIO;
US_DEBUGP("No reset during disconnect\n");
} else {
result = usb_reset_device(us->pusb_dev);
US_DEBUGP("usb_reset_device returns %d\n",
result);
}
usb_unlock_device(us->pusb_dev);
}
return result;
}
| gpl-2.0 |
tjstyle/FIH-Kernel | drivers/net/wireless/ath/ath5k/pcu.c | 492 | 31096 | /*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Matthew W. S. Bell <mentor@madwifi.org>
* Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
* Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*********************************\
* Protocol Control Unit Functions *
\*********************************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
#include "base.h"
/*******************\
* Generic functions *
\*******************/
/**
* ath5k_hw_set_opmode - Set PCU operating mode
*
* @ah: The &struct ath5k_hw
*
* Initialize PCU for the various operating modes (AP/STA etc)
*
* NOTE: ah->ah_op_mode must be set before calling this.
*/
int ath5k_hw_set_opmode(struct ath5k_hw *ah)
{
u32 pcu_reg, beacon_reg, low_id, high_id;
/* Preserve rest settings */
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP
| AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
(AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0));
beacon_reg = 0;
ATH5K_TRACE(ah->ah_sc);
switch (ah->ah_op_mode) {
case NL80211_IFTYPE_ADHOC:
pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_ADHOC;
if (ah->ah_version == AR5K_AR5210)
pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
else
AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_AP;
if (ah->ah_version == AR5K_AR5210)
pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
else
AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
break;
case NL80211_IFTYPE_STATION:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_PWR_SV : 0);
case NL80211_IFTYPE_MONITOR:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_NO_PSPOLL : 0);
break;
default:
return -EINVAL;
}
/*
* Set PCU registers
*/
low_id = AR5K_LOW_ID(ah->ah_sta_id);
high_id = AR5K_HIGH_ID(ah->ah_sta_id);
ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
/*
* Set Beacon Control Register on 5210
*/
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
return 0;
}
/**
* ath5k_hw_update - Update mib counters (mac layer statistics)
*
* @ah: The &struct ath5k_hw
* @stats: The &struct ieee80211_low_level_stats we use to track
* statistics on the driver
*
* Reads MIB counters from PCU and updates sw statistics. Must be
* called after a MIB interrupt.
*/
void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
struct ieee80211_low_level_stats *stats)
{
ATH5K_TRACE(ah->ah_sc);
/* Read-And-Clear */
stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
/* XXX: Should we use this to track beacon count ?
* -we read it anyway to clear the register */
ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
/* Reset profile count registers on 5212*/
if (ah->ah_version == AR5K_AR5212) {
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
}
/* TODO: Handle ANI stats */
}
/**
* ath5k_hw_set_ack_bitrate - set bitrate for ACKs
*
* @ah: The &struct ath5k_hw
* @high: Flag to determine if we want to use high transmition rate
* for ACKs or not
*
* If high flag is set, we tell hw to use a set of control rates based on
* the current transmition rate (check out control_rates array inside reset.c).
* If not hw just uses the lowest rate available for the current modulation
* scheme being used (1Mbit for CCK and 6Mbits for OFDM).
*/
void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
{
if (ah->ah_version != AR5K_AR5212)
return;
else {
u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
if (high)
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
}
}
/******************\
* ACK/CTS Timeouts *
\******************/
/**
* ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec
*
* @ah: The &struct ath5k_hw
*/
unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
}
/**
* ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
*
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
ah->ah_turbo) <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
ath5k_hw_htoclock(timeout, ah->ah_turbo));
return 0;
}
/**
* ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec
*
* @ah: The &struct ath5k_hw
*/
unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
}
/**
* ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
*
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
ah->ah_turbo) <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
ath5k_hw_htoclock(timeout, ah->ah_turbo));
return 0;
}
/****************\
* BSSID handling *
\****************/
/**
* ath5k_hw_get_lladdr - Get station id
*
* @ah: The &struct ath5k_hw
* @mac: The card's mac address
*
* Initialize ah->ah_sta_id using the mac address provided
* (just a memcpy).
*
* TODO: Remove it once we merge ath5k_softc and ath5k_hw
*/
void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
{
ATH5K_TRACE(ah->ah_sc);
memcpy(mac, ah->ah_sta_id, ETH_ALEN);
}
/**
* ath5k_hw_set_lladdr - Set station id
*
* @ah: The &struct ath5k_hw
* @mac: The card's mac address
*
* Set station id on hw using the provided mac address
*/
int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
{
u32 low_id, high_id;
u32 pcu_reg;
ATH5K_TRACE(ah->ah_sc);
/* Set new station ID */
memcpy(ah->ah_sta_id, mac, ETH_ALEN);
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
low_id = AR5K_LOW_ID(mac);
high_id = AR5K_HIGH_ID(mac);
ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
return 0;
}
/**
* ath5k_hw_set_associd - Set BSSID for association
*
* @ah: The &struct ath5k_hw
* @bssid: BSSID
* @assoc_id: Assoc id
*
* Sets the BSSID which trigers the "SME Join" operation
*/
void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id)
{
u32 low_id, high_id;
u16 tim_offset = 0;
/*
* Set simple BSSID mask on 5212
*/
if (ah->ah_version == AR5K_AR5212) {
ath5k_hw_reg_write(ah, AR5K_LOW_ID(ah->ah_bssid_mask),
AR5K_BSS_IDM0);
ath5k_hw_reg_write(ah, AR5K_HIGH_ID(ah->ah_bssid_mask),
AR5K_BSS_IDM1);
}
/*
* Set BSSID which triggers the "SME Join" operation
*/
low_id = AR5K_LOW_ID(bssid);
high_id = AR5K_HIGH_ID(bssid);
ath5k_hw_reg_write(ah, low_id, AR5K_BSS_ID0);
ath5k_hw_reg_write(ah, high_id | ((assoc_id & 0x3fff) <<
AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1);
if (assoc_id == 0) {
ath5k_hw_disable_pspoll(ah);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
tim_offset ? tim_offset + 4 : 0);
ath5k_hw_enable_pspoll(ah, NULL, 0);
}
/**
* ath5k_hw_set_bssid_mask - filter out bssids we listen
*
* @ah: the &struct ath5k_hw
* @mask: the bssid_mask, a u8 array of size ETH_ALEN
*
* BSSID masking is a method used by AR5212 and newer hardware to inform PCU
* which bits of the interface's MAC address should be looked at when trying
* to decide which packets to ACK. In station mode and AP mode with a single
* BSS every bit matters since we lock to only one BSS. In AP mode with
* multiple BSSes (virtual interfaces) not every bit matters because hw must
* accept frames for all BSSes and so we tweak some bits of our mac address
* in order to have multiple BSSes.
*
* NOTE: This is a simple filter and does *not* filter out all
* relevant frames. Some frames that are not for us might get ACKed from us
* by PCU because they just match the mask.
*
* When handling multiple BSSes you can get the BSSID mask by computing the
* set of ~ ( MAC XOR BSSID ) for all bssids we handle.
*
* When you do this you are essentially computing the common bits of all your
* BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with
* the MAC address to obtain the relevant bits and compare the result with
* (frame's BSSID & mask) to see if they match.
*/
/*
* Simple example: on your card you have have two BSSes you have created with
* BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
* There is another BSSID-03 but you are not part of it. For simplicity's sake,
* assuming only 4 bits for a mac address and for BSSIDs you can then have:
*
* \
* MAC: 0001 |
* BSSID-01: 0100 | --> Belongs to us
* BSSID-02: 1001 |
* /
* -------------------
* BSSID-03: 0110 | --> External
* -------------------
*
* Our bssid_mask would then be:
*
* On loop iteration for BSSID-01:
* ~(0001 ^ 0100) -> ~(0101)
* -> 1010
* bssid_mask = 1010
*
* On loop iteration for BSSID-02:
* bssid_mask &= ~(0001 ^ 1001)
* bssid_mask = (1010) & ~(0001 ^ 1001)
* bssid_mask = (1010) & ~(1001)
* bssid_mask = (1010) & (0110)
* bssid_mask = 0010
*
* A bssid_mask of 0010 means "only pay attention to the second least
* significant bit". This is because its the only bit common
* amongst the MAC and all BSSIDs we support. To findout what the real
* common bit is we can simply "&" the bssid_mask now with any BSSID we have
* or our MAC address (we assume the hardware uses the MAC address).
*
* Now, suppose there's an incoming frame for BSSID-03:
*
* IFRAME-01: 0110
*
* An easy eye-inspeciton of this already should tell you that this frame
* will not pass our check. This is beacuse the bssid_mask tells the
* hardware to only look at the second least significant bit and the
* common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
* as 1, which does not match 0.
*
* So with IFRAME-01 we *assume* the hardware will do:
*
* allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
* --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
* --> allow = (0010) == 0000 ? 1 : 0;
* --> allow = 0
*
* Lets now test a frame that should work:
*
* IFRAME-02: 0001 (we should allow)
*
* allow = (0001 & 1010) == 1010
*
* allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
* --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
* --> allow = (0010) == (0010)
* --> allow = 1
*
* Other examples:
*
* IFRAME-03: 0100 --> allowed
* IFRAME-04: 1001 --> allowed
* IFRAME-05: 1101 --> allowed but its not for us!!!
*
*/
int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
{
u32 low_id, high_id;
ATH5K_TRACE(ah->ah_sc);
/* Cache bssid mask so that we can restore it
* on reset */
memcpy(ah->ah_bssid_mask, mask, ETH_ALEN);
if (ah->ah_version == AR5K_AR5212) {
low_id = AR5K_LOW_ID(mask);
high_id = AR5K_HIGH_ID(mask);
ath5k_hw_reg_write(ah, low_id, AR5K_BSS_IDM0);
ath5k_hw_reg_write(ah, high_id, AR5K_BSS_IDM1);
return 0;
}
return -EIO;
}
/************\
* RX Control *
\************/
/**
* ath5k_hw_start_rx_pcu - Start RX engine
*
* @ah: The &struct ath5k_hw
*
* Starts RX engine on PCU so that hw can process RXed frames
* (ACK etc).
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
* TODO: Init ANI here
*/
void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
* at5k_hw_stop_rx_pcu - Stop RX engine
*
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
*
* TODO: Detach ANI here
*/
void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/*
* Set multicast filter
*/
void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
{
ATH5K_TRACE(ah->ah_sc);
/* Set the multicat filter */
ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
/*
* Set multicast filter by index
*/
int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
{
ATH5K_TRACE(ah->ah_sc);
if (index >= 64)
return -EINVAL;
else if (index >= 32)
AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
(1 << (index - 32)));
else
AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
return 0;
}
/*
* Clear Multicast filter by index
*/
int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
{
ATH5K_TRACE(ah->ah_sc);
if (index >= 64)
return -EINVAL;
else if (index >= 32)
AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
(1 << (index - 32)));
else
AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
return 0;
}
/**
* ath5k_hw_get_rx_filter - Get current rx filter
*
* @ah: The &struct ath5k_hw
*
* Returns the RX filter by reading rx filter and
* phy error filter registers. RX filter is used
* to set the allowed frame types that PCU will accept
* and pass to the driver. For a list of frame types
* check out reg.h.
*/
u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
{
u32 data, filter = 0;
ATH5K_TRACE(ah->ah_sc);
filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
/*Radar detection for 5212*/
if (ah->ah_version == AR5K_AR5212) {
data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
if (data & AR5K_PHY_ERR_FIL_RADAR)
filter |= AR5K_RX_FILTER_RADARERR;
if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
filter |= AR5K_RX_FILTER_PHYERR;
}
return filter;
}
/**
* ath5k_hw_set_rx_filter - Set rx filter
*
* @ah: The &struct ath5k_hw
* @filter: RX filter mask (see reg.h)
*
* Sets RX filter register and also handles PHY error filter
* register on 5212 and newer chips so that we have proper PHY
* error reporting.
*/
void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
{
u32 data = 0;
ATH5K_TRACE(ah->ah_sc);
/* Set PHY error filter register on 5212*/
if (ah->ah_version == AR5K_AR5212) {
if (filter & AR5K_RX_FILTER_RADARERR)
data |= AR5K_PHY_ERR_FIL_RADAR;
if (filter & AR5K_RX_FILTER_PHYERR)
data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
}
/*
* The AR5210 uses promiscous mode to detect radar activity
*/
if (ah->ah_version == AR5K_AR5210 &&
(filter & AR5K_RX_FILTER_RADARERR)) {
filter &= ~AR5K_RX_FILTER_RADARERR;
filter |= AR5K_RX_FILTER_PROM;
}
/*Zero length DMA (phy error reporting) */
if (data)
AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
/*Write RX Filter register*/
ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
/*Write PHY error filter register on 5212*/
if (ah->ah_version == AR5K_AR5212)
ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
}
/****************\
* Beacon control *
\****************/
/**
* ath5k_hw_get_tsf32 - Get a 32bit TSF
*
* @ah: The &struct ath5k_hw
*
* Returns lower 32 bits of current TSF
*/
u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
}
/**
* ath5k_hw_get_tsf64 - Get the full 64bit TSF
*
* @ah: The &struct ath5k_hw
*
* Returns the current TSF
*/
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
ATH5K_TRACE(ah->ah_sc);
return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
}
/**
* ath5k_hw_set_tsf64 - Set a new 64bit TSF
*
* @ah: The &struct ath5k_hw
* @tsf64: The new 64bit TSF
*
* Sets the new TSF
*/
void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
{
ATH5K_TRACE(ah->ah_sc);
ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
}
/**
* ath5k_hw_reset_tsf - Force a TSF reset
*
* @ah: The &struct ath5k_hw
*
* Forces a TSF reset on PCU
*/
void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
{
u32 val;
ATH5K_TRACE(ah->ah_sc);
val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF;
/*
* Each write to the RESET_TSF bit toggles a hardware internal
* signal to reset TSF, but if left high it will cause a TSF reset
* on the next chip reset as well. Thus we always write the value
* twice to clear the signal.
*/
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
}
/*
* Initialize beacon timers
*/
void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
{
u32 timer1, timer2, timer3;
ATH5K_TRACE(ah->ah_sc);
/*
* Set the additional timers by mode
*/
switch (ah->ah_op_mode) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
/* In STA mode timer1 is used as next wakeup
* timer and timer2 as next CFP duration start
* timer. Both in 1/8TUs. */
/* TODO: PCF handling */
if (ah->ah_version == AR5K_AR5210) {
timer1 = 0xffffffff;
timer2 = 0xffffffff;
} else {
timer1 = 0x0000ffff;
timer2 = 0x0007ffff;
}
/* Mark associated AP as PCF incapable for now */
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PCF);
break;
case NL80211_IFTYPE_ADHOC:
AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
default:
/* On non-STA modes timer1 is used as next DMA
* beacon alert (DBA) timer and timer2 as next
* software beacon alert. Both in 1/8TUs. */
timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
break;
}
/* Timer3 marks the end of our ATIM window
* a zero length window is not allowed because
* we 'll get no beacons */
timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1);
/*
* Set the beacon register and enable all timers.
*/
/* When in AP or Mesh Point mode zero timer0 to start TSF */
if (ah->ah_op_mode == NL80211_IFTYPE_AP ||
ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
/* Force a TSF reset if requested and enable beacons */
if (interval & AR5K_BEACON_RESET_TSF)
ath5k_hw_reset_tsf(ah);
ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
AR5K_BEACON_ENABLE),
AR5K_BEACON);
/* Flush any pending BMISS interrupts on ISR by
* performing a clear-on-write operation on PISR
* register for the BMISS bit (writing a bit on
* ISR togles a reset for that bit and leaves
* the rest bits intact) */
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_ISR);
else
ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_PISR);
/* TODO: Set enchanced sleep registers on AR5212
* based on vif->bss_conf params, until then
* disable power save reporting.*/
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PWR_SV);
}
#if 0
/*
* Set beacon timers
*/
int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
const struct ath5k_beacon_state *state)
{
u32 cfp_period, next_cfp, dtim, interval, next_beacon;
/*
* TODO: should be changed through *state
* review struct ath5k_beacon_state struct
*
* XXX: These are used for cfp period bellow, are they
* ok ? Is it O.K. for tsf here to be 0 or should we use
* get_tsf ?
*/
u32 dtim_count = 0; /* XXX */
u32 cfp_count = 0; /* XXX */
u32 tsf = 0; /* XXX */
ATH5K_TRACE(ah->ah_sc);
/* Return on an invalid beacon state */
if (state->bs_interval < 1)
return -EINVAL;
interval = state->bs_interval;
dtim = state->bs_dtim_period;
/*
* PCF support?
*/
if (state->bs_cfp_period > 0) {
/*
* Enable PCF mode and set the CFP
* (Contention Free Period) and timer registers
*/
cfp_period = state->bs_cfp_period * state->bs_dtim_period *
state->bs_interval;
next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
state->bs_interval;
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_DEFAULT_ANTENNA |
AR5K_STA_ID1_PCF);
ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
AR5K_CFP_DUR);
ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
next_cfp)) << 3, AR5K_TIMER2);
} else {
/* Disable PCF mode */
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_DEFAULT_ANTENNA |
AR5K_STA_ID1_PCF);
}
/*
* Enable the beacon timer register
*/
ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
/*
* Start the beacon timers
*/
ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &
~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
AR5K_BEACON_PERIOD), AR5K_BEACON);
/*
* Write new beacon miss threshold, if it appears to be valid
* XXX: Figure out right values for min <= bs_bmiss_threshold <= max
* and return if its not in range. We can test this by reading value and
* setting value to a largest value and seeing which values register.
*/
AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
state->bs_bmiss_threshold);
/*
* Set sleep control register
* XXX: Didn't find this in 5210 code but since this register
* exists also in ar5k's 5210 headers i leave it as common code.
*/
AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
(state->bs_sleep_duration - 3) << 3);
/*
* Set enhanced sleep registers on 5212
*/
if (ah->ah_version == AR5K_AR5212) {
if (state->bs_sleep_duration > state->bs_interval &&
roundup(state->bs_sleep_duration, interval) ==
state->bs_sleep_duration)
interval = state->bs_sleep_duration;
if (state->bs_sleep_duration > dtim && (dtim == 0 ||
roundup(state->bs_sleep_duration, dtim) ==
state->bs_sleep_duration))
dtim = state->bs_sleep_duration;
if (interval > dtim)
return -EINVAL;
next_beacon = interval == dtim ? state->bs_next_dtim :
state->bs_next_beacon;
ath5k_hw_reg_write(ah,
AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
AR5K_SLEEP0_NEXT_DTIM) |
AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
AR5K_SLEEP0_ENH_SLEEP_EN |
AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
AR5K_SLEEP1_NEXT_TIM) |
AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
ath5k_hw_reg_write(ah,
AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
}
return 0;
}
/*
* Reset beacon timers
*/
void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
/*
* Disable beacon timer
*/
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
/*
* Disable some beacon register values
*/
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
}
/*
* Wait for beacon queue to finish
*/
int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
{
unsigned int i;
int ret;
ATH5K_TRACE(ah->ah_sc);
/* 5210 doesn't have QCU*/
if (ah->ah_version == AR5K_AR5210) {
/*
* Wait for beaconn queue to finish by checking
* Control Register and Beacon Status Register.
*/
for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
||
!(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
break;
udelay(10);
}
/* Timeout... */
if (i <= 0) {
/*
* Re-schedule the beacon queue
*/
ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
AR5K_BCR);
return -EIO;
}
ret = 0;
} else {
/*5211/5212*/
ret = ath5k_hw_register_timeout(ah,
AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
AR5K_QCU_STS_FRMPENDCNT, 0, false);
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
return -EIO;
}
return ret;
}
#endif
/*********************\
* Key table functions *
\*********************/
/*
* Reset a key entry on the table
*/
int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
{
unsigned int i, type;
u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry));
for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
/* Reset associated MIC entry if TKIP
* is enabled located at offset (entry + 64) */
if (type == AR5K_KEYTABLE_TYPE_TKIP) {
AR5K_ASSERT_ENTRY(micentry, AR5K_KEYTABLE_SIZE);
for (i = 0; i < AR5K_KEYCACHE_SIZE / 2 ; i++)
ath5k_hw_reg_write(ah, 0,
AR5K_KEYTABLE_OFF(micentry, i));
}
/*
* Set NULL encryption on AR5212+
*
* Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5)
* AR5K_KEYTABLE_TYPE_NULL -> 0x00000007
*
* Note2: Windows driver (ndiswrapper) sets this to
* 0x00000714 instead of 0x00000007
*/
if (ah->ah_version >= AR5K_AR5211) {
ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
AR5K_KEYTABLE_TYPE(entry));
if (type == AR5K_KEYTABLE_TYPE_TKIP) {
ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
AR5K_KEYTABLE_TYPE(micentry));
}
}
return 0;
}
/*
* Check if a table entry is valid
*/
int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
{
ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
/* Check the validation flag at the end of the entry */
return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
AR5K_KEYTABLE_VALID;
}
static
int ath5k_keycache_type(const struct ieee80211_key_conf *key)
{
switch (key->alg) {
case ALG_TKIP:
return AR5K_KEYTABLE_TYPE_TKIP;
case ALG_CCMP:
return AR5K_KEYTABLE_TYPE_CCM;
case ALG_WEP:
if (key->keylen == WLAN_KEY_LEN_WEP40)
return AR5K_KEYTABLE_TYPE_40;
else if (key->keylen == WLAN_KEY_LEN_WEP104)
return AR5K_KEYTABLE_TYPE_104;
return -EINVAL;
default:
return -EINVAL;
}
return -EINVAL;
}
/*
* Set a key entry on the table
*/
int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
const struct ieee80211_key_conf *key, const u8 *mac)
{
unsigned int i;
int keylen;
__le32 key_v[5] = {};
__le32 key0 = 0, key1 = 0;
__le32 *rxmic, *txmic;
int keytype;
u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
bool is_tkip;
const u8 *key_ptr;
ATH5K_TRACE(ah->ah_sc);
is_tkip = (key->alg == ALG_TKIP);
/*
* key->keylen comes in from mac80211 in bytes.
* TKIP is 128 bit + 128 bit mic
*/
keylen = (is_tkip) ? (128 / 8) : key->keylen;
if (entry > AR5K_KEYTABLE_SIZE ||
(is_tkip && micentry > AR5K_KEYTABLE_SIZE))
return -EOPNOTSUPP;
if (unlikely(keylen > 16))
return -EOPNOTSUPP;
keytype = ath5k_keycache_type(key);
if (keytype < 0)
return keytype;
/*
* each key block is 6 bytes wide, written as pairs of
* alternating 32 and 16 bit le values.
*/
key_ptr = key->key;
for (i = 0; keylen >= 6; keylen -= 6) {
memcpy(&key_v[i], key_ptr, 6);
i += 2;
key_ptr += 6;
}
if (keylen)
memcpy(&key_v[i], key_ptr, keylen);
/* intentionally corrupt key until mic is installed */
if (is_tkip) {
key0 = key_v[0] = ~key_v[0];
key1 = key_v[1] = ~key_v[1];
}
for (i = 0; i < ARRAY_SIZE(key_v); i++)
ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
AR5K_KEYTABLE_OFF(entry, i));
ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry));
if (is_tkip) {
/* Install rx/tx MIC */
rxmic = (__le32 *) &key->key[16];
txmic = (__le32 *) &key->key[24];
if (ah->ah_combined_mic) {
key_v[0] = rxmic[0];
key_v[1] = cpu_to_le32(le32_to_cpu(txmic[0]) >> 16);
key_v[2] = rxmic[1];
key_v[3] = cpu_to_le32(le32_to_cpu(txmic[0]) & 0xffff);
key_v[4] = txmic[1];
} else {
key_v[0] = rxmic[0];
key_v[1] = 0;
key_v[2] = rxmic[1];
key_v[3] = 0;
key_v[4] = 0;
}
for (i = 0; i < ARRAY_SIZE(key_v); i++)
ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
AR5K_KEYTABLE_OFF(micentry, i));
ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
AR5K_KEYTABLE_TYPE(micentry));
ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC0(micentry));
ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC1(micentry));
/* restore first 2 words of key */
ath5k_hw_reg_write(ah, le32_to_cpu(~key0),
AR5K_KEYTABLE_OFF(entry, 0));
ath5k_hw_reg_write(ah, le32_to_cpu(~key1),
AR5K_KEYTABLE_OFF(entry, 1));
}
return ath5k_hw_set_key_lladdr(ah, entry, mac);
}
int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
{
u32 low_id, high_id;
ATH5K_TRACE(ah->ah_sc);
/* Invalid entry (key table overflow) */
AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
/* MAC may be NULL if it's a broadcast key. In this case no need to
* to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */
if (!mac) {
low_id = 0xffffffff;
high_id = 0xffff | AR5K_KEYTABLE_VALID;
} else {
low_id = AR5K_LOW_ID(mac);
high_id = AR5K_HIGH_ID(mac) | AR5K_KEYTABLE_VALID;
}
ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry));
return 0;
}
| gpl-2.0 |
schnitzeltony/linux | drivers/clocksource/bcm_kona_timer.c | 492 | 5394 | /*
* Copyright (C) 2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mach/time.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define KONA_GPTIMER_STCS_OFFSET 0x00000000
#define KONA_GPTIMER_STCLO_OFFSET 0x00000004
#define KONA_GPTIMER_STCHI_OFFSET 0x00000008
#define KONA_GPTIMER_STCM0_OFFSET 0x0000000C
#define KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT 0
#define KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT 4
struct kona_bcm_timers {
int tmr_irq;
void __iomem *tmr_regs;
};
static struct kona_bcm_timers timers;
static u32 arch_timer_rate;
/*
* We use the peripheral timers for system tick, the cpu global timer for
* profile tick
*/
static void kona_timer_disable_and_clear(void __iomem *base)
{
uint32_t reg;
/*
* clear and disable interrupts
* We are using compare/match register 0 for our system interrupts
*/
reg = readl(base + KONA_GPTIMER_STCS_OFFSET);
/* Clear compare (0) interrupt */
reg |= 1 << KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT;
/* disable compare */
reg &= ~(1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT);
writel(reg, base + KONA_GPTIMER_STCS_OFFSET);
}
static void
kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
{
int loop_limit = 4;
/*
* Read 64-bit free running counter
* 1. Read hi-word
* 2. Read low-word
* 3. Read hi-word again
* 4.1
* if new hi-word is not equal to previously read hi-word, then
* start from #1
* 4.2
* if new hi-word is equal to previously read hi-word then stop.
*/
while (--loop_limit) {
*msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
*lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
break;
}
if (!loop_limit) {
pr_err("bcm_kona_timer: getting counter failed.\n");
pr_err(" Timer will be impacted\n");
}
return;
}
static int kona_timer_set_next_event(unsigned long clc,
struct clock_event_device *unused)
{
/*
* timer (0) is disabled by the timer interrupt already
* so, here we reload the next event value and re-enable
* the timer.
*
* This way, we are potentially losing the time between
* timer-interrupt->set_next_event. CPU local timers, when
* they come in should get rid of skew.
*/
uint32_t lsw, msw;
uint32_t reg;
kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
/* Load the "next" event tick value */
writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
/* Enable compare */
reg = readl(timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET);
reg |= (1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT);
writel(reg, timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET);
return 0;
}
static int kona_timer_shutdown(struct clock_event_device *evt)
{
kona_timer_disable_and_clear(timers.tmr_regs);
return 0;
}
static struct clock_event_device kona_clockevent_timer = {
.name = "timer 1",
.features = CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = kona_timer_set_next_event,
.set_state_shutdown = kona_timer_shutdown,
.tick_resume = kona_timer_shutdown,
};
static void __init kona_timer_clockevents_init(void)
{
kona_clockevent_timer.cpumask = cpumask_of(0);
clockevents_config_and_register(&kona_clockevent_timer,
arch_timer_rate, 6, 0xffffffff);
}
static irqreturn_t kona_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &kona_clockevent_timer;
kona_timer_disable_and_clear(timers.tmr_regs);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct irqaction kona_timer_irq = {
.name = "Kona Timer Tick",
.flags = IRQF_TIMER,
.handler = kona_timer_interrupt,
};
static void __init kona_timer_init(struct device_node *node)
{
u32 freq;
struct clk *external_clk;
if (!of_device_is_available(node)) {
pr_info("Kona Timer v1 marked as disabled in device tree\n");
return;
}
external_clk = of_clk_get_by_name(node, NULL);
if (!IS_ERR(external_clk)) {
arch_timer_rate = clk_get_rate(external_clk);
clk_prepare_enable(external_clk);
} else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
arch_timer_rate = freq;
} else {
pr_err("Kona Timer v1 unable to determine clock-frequency");
return;
}
/* Setup IRQ numbers */
timers.tmr_irq = irq_of_parse_and_map(node, 0);
/* Setup IO addresses */
timers.tmr_regs = of_iomap(node, 0);
kona_timer_disable_and_clear(timers.tmr_regs);
kona_timer_clockevents_init();
setup_irq(timers.tmr_irq, &kona_timer_irq);
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
}
CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
/*
* bcm,kona-timer is deprecated by brcm,kona-timer
* being kept here for driver compatibility
*/
CLOCKSOURCE_OF_DECLARE(bcm_kona, "bcm,kona-timer", kona_timer_init);
| gpl-2.0 |
s0be/kernel_htc_msm7227 | drivers/staging/msm/mdp4_overlay.c | 748 | 31256 | /* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/clk.h>
#include <mach/hardware.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/fb.h>
#include <msm_mdp.h>
#include <linux/file.h>
#include "android_pmem.h"
#include <linux/major.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include "mdp.h"
#include "msm_fb.h"
#include "mdp4.h"
struct mdp4_overlay_ctrl {
struct mdp4_overlay_pipe plist[MDP4_MAX_OVERLAY_PIPE];
struct mdp4_overlay_pipe *stage[MDP4_MAX_MIXER][MDP4_MAX_STAGE];
} mdp4_overlay_db;
static struct mdp4_overlay_ctrl *ctrl = &mdp4_overlay_db;
void mdp4_overlay_dmap_cfg(struct msm_fb_data_type *mfd, int lcdc)
{
uint32 dma2_cfg_reg;
dma2_cfg_reg = DMA_DITHER_EN;
if (mfd->fb_imgType == MDP_BGR_565)
dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
else
dma2_cfg_reg |= DMA_PACK_PATTERN_RGB;
if (mfd->panel_info.bpp == 18) {
dma2_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */
DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
} else if (mfd->panel_info.bpp == 16) {
dma2_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */
DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
} else {
dma2_cfg_reg |= DMA_DSTC0G_8BITS | /* 888 16BPP */
DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
}
if (lcdc)
dma2_cfg_reg |= DMA_PACK_ALIGN_MSB;
/* dma2 config register */
MDP_OUTP(MDP_BASE + 0x90000, dma2_cfg_reg);
}
void mdp4_overlay_dmap_xy(struct mdp4_overlay_pipe *pipe)
{
/* dma_p source */
MDP_OUTP(MDP_BASE + 0x90004,
(pipe->src_height << 16 | pipe->src_width));
MDP_OUTP(MDP_BASE + 0x90008, pipe->srcp0_addr);
MDP_OUTP(MDP_BASE + 0x9000c, pipe->srcp0_ystride);
/* dma_p dest */
MDP_OUTP(MDP_BASE + 0x90010, (pipe->dst_y << 16 | pipe->dst_x));
}
#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
#define MDP4_VG_PHASE_STEP_SHIFT 29
static int mdp4_leading_0(uint32 num)
{
uint32 bit = 0x80000000;
int i;
for (i = 0; i < 32; i++) {
if (bit & num)
return i;
bit >>= 1;
}
return i;
}
static uint32 mdp4_scale_phase_step(int f_num, uint32 src, uint32 dst)
{
uint32 val;
int n;
n = mdp4_leading_0(src);
if (n > f_num)
n = f_num;
val = src << n; /* maximum to reduce lose of resolution */
val /= dst;
if (n < f_num) {
n = f_num - n;
val <<= n;
}
return val;
}
static void mdp4_scale_setup(struct mdp4_overlay_pipe *pipe)
{
pipe->phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
pipe->phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
if (pipe->dst_h && pipe->src_h != pipe->dst_h) {
if (pipe->dst_h >= pipe->src_h * 8) /* too much */
return;
pipe->op_mode |= MDP4_OP_SCALEY_EN;
if (pipe->pipe_type == OVERLAY_TYPE_VG) {
if (pipe->dst_h <= (pipe->src_h / 4))
pipe->op_mode |= MDP4_OP_SCALEY_MN_PHASE;
else
pipe->op_mode |= MDP4_OP_SCALEY_FIR;
}
pipe->phasey_step = mdp4_scale_phase_step(29,
pipe->src_h, pipe->dst_h);
}
if (pipe->dst_w && pipe->src_w != pipe->dst_w) {
if (pipe->dst_w >= pipe->src_w * 8) /* too much */
return;
pipe->op_mode |= MDP4_OP_SCALEX_EN;
if (pipe->pipe_type == OVERLAY_TYPE_VG) {
if (pipe->dst_w <= (pipe->src_w / 4))
pipe->op_mode |= MDP4_OP_SCALEY_MN_PHASE;
else
pipe->op_mode |= MDP4_OP_SCALEY_FIR;
}
pipe->phasex_step = mdp4_scale_phase_step(29,
pipe->src_w, pipe->dst_w);
}
}
void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe)
{
char *rgb_base;
uint32 src_size, src_xy, dst_size, dst_xy;
uint32 format, pattern;
rgb_base = MDP_BASE + MDP4_RGB_BASE;
rgb_base += (MDP4_RGB_OFF * pipe->pipe_num);
src_size = ((pipe->src_h << 16) | pipe->src_w);
src_xy = ((pipe->src_y << 16) | pipe->src_x);
dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
format = mdp4_overlay_format(pipe);
pattern = mdp4_overlay_unpack_pattern(pipe);
pipe->op_mode |= MDP4_OP_IGC_LUT_EN;
mdp4_scale_setup(pipe);
outpdw(rgb_base + 0x0000, src_size); /* MDP_RGB_SRC_SIZE */
outpdw(rgb_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
outpdw(rgb_base + 0x0008, dst_size); /* MDP_RGB_DST_SIZE */
outpdw(rgb_base + 0x000c, dst_xy); /* MDP_RGB_DST_XY */
outpdw(rgb_base + 0x0010, pipe->srcp0_addr);
outpdw(rgb_base + 0x0040, pipe->srcp0_ystride);
outpdw(rgb_base + 0x0050, format);/* MDP_RGB_SRC_FORMAT */
outpdw(rgb_base + 0x0054, pattern);/* MDP_RGB_SRC_UNPACK_PATTERN */
outpdw(rgb_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
outpdw(rgb_base + 0x005c, pipe->phasex_step);
outpdw(rgb_base + 0x0060, pipe->phasey_step);
/* 16 bytes-burst x 3 req <= 48 bytes */
outpdw(rgb_base + 0x1004, 0xc2); /* MDP_RGB_FETCH_CFG */
}
void mdp4_overlay_vg_setup(struct mdp4_overlay_pipe *pipe)
{
char *vg_base;
uint32 frame_size, src_size, src_xy, dst_size, dst_xy;
uint32 format, pattern;
vg_base = MDP_BASE + MDP4_VIDEO_BASE;
vg_base += (MDP4_VIDEO_OFF * pipe->pipe_num);
frame_size = ((pipe->src_height << 16) | pipe->src_width);
src_size = ((pipe->src_h << 16) | pipe->src_w);
src_xy = ((pipe->src_y << 16) | pipe->src_x);
dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
format = mdp4_overlay_format(pipe);
pattern = mdp4_overlay_unpack_pattern(pipe);
pipe->op_mode |= (MDP4_OP_CSC_EN | MDP4_OP_SRC_DATA_YCBCR |
MDP4_OP_IGC_LUT_EN);
mdp4_scale_setup(pipe);
outpdw(vg_base + 0x0000, src_size); /* MDP_RGB_SRC_SIZE */
outpdw(vg_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
outpdw(vg_base + 0x0008, dst_size); /* MDP_RGB_DST_SIZE */
outpdw(vg_base + 0x000c, dst_xy); /* MDP_RGB_DST_XY */
outpdw(vg_base + 0x0048, frame_size); /* TILE frame size */
/* luma component plane */
outpdw(vg_base + 0x0010, pipe->srcp0_addr);
/* chroma component plane */
outpdw(vg_base + 0x0014, pipe->srcp1_addr);
outpdw(vg_base + 0x0040,
pipe->srcp1_ystride << 16 | pipe->srcp0_ystride);
outpdw(vg_base + 0x0050, format); /* MDP_RGB_SRC_FORMAT */
outpdw(vg_base + 0x0054, pattern); /* MDP_RGB_SRC_UNPACK_PATTERN */
outpdw(vg_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
outpdw(vg_base + 0x005c, pipe->phasex_step);
outpdw(vg_base + 0x0060, pipe->phasey_step);
if (pipe->op_mode & MDP4_OP_DITHER_EN) {
outpdw(vg_base + 0x0068,
pipe->r_bit << 4 | pipe->b_bit << 2 | pipe->g_bit);
}
/* 16 bytes-burst x 3 req <= 48 bytes */
outpdw(vg_base + 0x1004, 0xc2); /* MDP_VG_FETCH_CFG */
}
int mdp4_overlay_format2type(uint32 format)
{
switch (format) {
case MDP_RGB_565:
case MDP_RGB_888:
case MDP_BGR_565:
case MDP_ARGB_8888:
case MDP_RGBA_8888:
case MDP_BGRA_8888:
return OVERLAY_TYPE_RGB;
case MDP_YCRYCB_H2V1:
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V2_TILE:
case MDP_Y_CRCB_H2V2_TILE:
return OVERLAY_TYPE_VG;
default:
return -ERANGE;
}
}
#define C3_ALPHA 3 /* alpha */
#define C2_R_Cr 2 /* R/Cr */
#define C1_B_Cb 1 /* B/Cb */
#define C0_G_Y 0 /* G/luma */
int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe)
{
switch (pipe->src_format) {
case MDP_RGB_565:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 1; /* R, 5 bits */
pipe->b_bit = 1; /* B, 5 bits */
pipe->g_bit = 2; /* G, 6 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element2 = C2_R_Cr; /* R */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C1_B_Cb; /* B */
pipe->bpp = 2; /* 2 bpp */
break;
case MDP_RGB_888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element2 = C2_R_Cr; /* R */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C1_B_Cb; /* B */
pipe->bpp = 3; /* 3 bpp */
break;
case MDP_BGR_565:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 1; /* R, 5 bits */
pipe->b_bit = 1; /* B, 5 bits */
pipe->g_bit = 2; /* G, 6 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
pipe->bpp = 2; /* 2 bpp */
break;
case MDP_ARGB_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 1;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C3_ALPHA; /* alpha */
pipe->element2 = C2_R_Cr; /* R */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C1_B_Cb; /* B */
pipe->bpp = 4; /* 4 bpp */
break;
case MDP_RGBA_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 1;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C2_R_Cr; /* R */
pipe->element2 = C0_G_Y; /* G */
pipe->element1 = C1_B_Cb; /* B */
pipe->element0 = C3_ALPHA; /* alpha */
pipe->bpp = 4; /* 4 bpp */
break;
case MDP_BGRA_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 1;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C1_B_Cb; /* B */
pipe->element2 = C0_G_Y; /* G */
pipe->element1 = C2_R_Cr; /* R */
pipe->element0 = C3_ALPHA; /* alpha */
pipe->bpp = 4; /* 4 bpp */
break;
case MDP_YCRYCB_H2V1:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C0_G_Y; /* G */
pipe->element2 = C2_R_Cr; /* R */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C1_B_Cb; /* B */
pipe->bpp = 2; /* 2 bpp */
pipe->chroma_sample = MDP4_CHROMA_H2V1;
break;
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_PSEUDO_PLANAR;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 1; /* 2 */
pipe->element3 = C0_G_Y; /* not used */
pipe->element2 = C0_G_Y; /* not used */
if (pipe->src_format == MDP_Y_CRCB_H2V1) {
pipe->element1 = C2_R_Cr; /* R */
pipe->element0 = C1_B_Cb; /* B */
pipe->chroma_sample = MDP4_CHROMA_H2V1;
} else if (pipe->src_format == MDP_Y_CBCR_H2V1) {
pipe->element1 = C1_B_Cb; /* B */
pipe->element0 = C2_R_Cr; /* R */
pipe->chroma_sample = MDP4_CHROMA_H2V1;
} else if (pipe->src_format == MDP_Y_CRCB_H2V2) {
pipe->element1 = C2_R_Cr; /* R */
pipe->element0 = C1_B_Cb; /* B */
pipe->chroma_sample = MDP4_CHROMA_420;
} else if (pipe->src_format == MDP_Y_CBCR_H2V2) {
pipe->element1 = C1_B_Cb; /* B */
pipe->element0 = C2_R_Cr; /* R */
pipe->chroma_sample = MDP4_CHROMA_420;
}
pipe->bpp = 2; /* 2 bpp */
break;
case MDP_Y_CBCR_H2V2_TILE:
case MDP_Y_CRCB_H2V2_TILE:
pipe->frame_format = MDP4_FRAME_FORMAT_VIDEO_SUPERTILE;
pipe->fetch_plane = OVERLAY_PLANE_PSEUDO_PLANAR;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 1; /* 2 */
pipe->element3 = C0_G_Y; /* not used */
pipe->element2 = C0_G_Y; /* not used */
if (pipe->src_format == MDP_Y_CRCB_H2V2_TILE) {
pipe->element1 = C2_R_Cr; /* R */
pipe->element0 = C1_B_Cb; /* B */
pipe->chroma_sample = MDP4_CHROMA_420;
} else if (pipe->src_format == MDP_Y_CBCR_H2V2_TILE) {
pipe->element1 = C1_B_Cb; /* B */
pipe->element0 = C2_R_Cr; /* R */
pipe->chroma_sample = MDP4_CHROMA_420;
}
pipe->bpp = 2; /* 2 bpp */
break;
default:
/* not likely */
return -ERANGE;
}
return 0;
}
/*
* color_key_convert: output with 12 bits color key
*/
static uint32 color_key_convert(int start, int num, uint32 color)
{
uint32 data;
data = (color >> start) & ((1 << num) - 1);
if (num == 5)
data = (data << 7) + (data << 2) + (data >> 3);
else if (num == 6)
data = (data << 6) + data;
else /* 8 bits */
data = (data << 4) + (data >> 4);
return data;
}
void transp_color_key(int format, uint32 transp,
uint32 *c0, uint32 *c1, uint32 *c2)
{
int b_start, g_start, r_start;
int b_num, g_num, r_num;
switch (format) {
case MDP_RGB_565:
b_start = 0;
g_start = 5;
r_start = 11;
r_num = 5;
g_num = 6;
b_num = 5;
break;
case MDP_RGB_888:
case MDP_XRGB_8888:
case MDP_ARGB_8888:
b_start = 0;
g_start = 8;
r_start = 16;
r_num = 8;
g_num = 8;
b_num = 8;
break;
case MDP_BGR_565:
b_start = 11;
g_start = 5;
r_start = 0;
r_num = 5;
g_num = 6;
b_num = 5;
break;
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V1:
b_start = 8;
g_start = 16;
r_start = 0;
r_num = 8;
g_num = 8;
b_num = 8;
break;
case MDP_Y_CRCB_H2V2:
case MDP_Y_CRCB_H2V1:
b_start = 0;
g_start = 16;
r_start = 8;
r_num = 8;
g_num = 8;
b_num = 8;
break;
default:
b_start = 0;
g_start = 8;
r_start = 16;
r_num = 8;
g_num = 8;
b_num = 8;
break;
}
*c0 = color_key_convert(g_start, g_num, transp);
*c1 = color_key_convert(b_start, b_num, transp);
*c2 = color_key_convert(r_start, r_num, transp);
}
uint32 mdp4_overlay_format(struct mdp4_overlay_pipe *pipe)
{
uint32 format;
format = 0;
if (pipe->solid_fill)
format |= MDP4_FORMAT_SOLID_FILL;
if (pipe->unpack_align_msb)
format |= MDP4_FORMAT_UNPACK_ALIGN_MSB;
if (pipe->unpack_tight)
format |= MDP4_FORMAT_UNPACK_TIGHT;
if (pipe->alpha_enable)
format |= MDP4_FORMAT_ALPHA_ENABLE;
format |= (pipe->unpack_count << 13);
format |= ((pipe->bpp - 1) << 9);
format |= (pipe->a_bit << 6);
format |= (pipe->r_bit << 4);
format |= (pipe->b_bit << 2);
format |= pipe->g_bit;
format |= (pipe->frame_format << 29);
if (pipe->fetch_plane == OVERLAY_PLANE_PSEUDO_PLANAR) {
/* video/graphic */
format |= (pipe->fetch_plane << 19);
format |= (pipe->chroma_site << 28);
format |= (pipe->chroma_sample << 26);
}
return format;
}
uint32 mdp4_overlay_unpack_pattern(struct mdp4_overlay_pipe *pipe)
{
return (pipe->element3 << 24) | (pipe->element2 << 16) |
(pipe->element1 << 8) | pipe->element0;
}
void mdp4_overlayproc_cfg(struct mdp4_overlay_pipe *pipe)
{
uint32 data;
char *overlay_base;
if (pipe->mixer_num == MDP4_MIXER1)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
else
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
/* MDP_OVERLAYPROC_CFG */
outpdw(overlay_base + 0x0004, 0x01); /* directout */
data = pipe->src_height;
data <<= 16;
data |= pipe->src_width;
outpdw(overlay_base + 0x0008, data); /* ROI, height + width */
outpdw(overlay_base + 0x000c, pipe->srcp0_addr);
outpdw(overlay_base + 0x0010, pipe->srcp0_ystride);
outpdw(overlay_base + 0x0014, 0x4); /* GC_LUT_EN, 888 */
}
int mdp4_overlay_active(int mixer)
{
uint32 data, mask, i;
int p1, p2;
data = inpdw(MDP_BASE + 0x10100);
p1 = 0;
p2 = 0;
for (i = 0; i < 8; i++) {
mask = data & 0x0f;
if (mask) {
if (mask <= 4)
p1++;
else
p2++;
}
data >>= 4;
}
if (mixer)
return p2;
else
return p1;
}
void mdp4_mixer_stage_up(struct mdp4_overlay_pipe *pipe)
{
uint32 data, mask, snum, stage, mixer;
stage = pipe->mixer_stage;
mixer = pipe->mixer_num;
/* MDP_LAYERMIXER_IN_CFG, shard by both mixer 0 and 1 */
data = inpdw(MDP_BASE + 0x10100);
if (mixer == MDP4_MIXER1)
stage += 8;
if (pipe->pipe_type == OVERLAY_TYPE_VG) {/* VG1 and VG2 */
snum = 0;
snum += (4 * pipe->pipe_num);
} else {
snum = 8;
snum += (4 * pipe->pipe_num); /* RGB1 and RGB2 */
}
mask = 0x0f;
mask <<= snum;
stage <<= snum;
data &= ~mask; /* clear old bits */
data |= stage;
outpdw(MDP_BASE + 0x10100, data); /* MDP_LAYERMIXER_IN_CFG */
data = inpdw(MDP_BASE + 0x10100);
ctrl->stage[pipe->mixer_num][pipe->mixer_stage] = pipe; /* keep it */
}
void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe)
{
uint32 data, mask, snum, stage, mixer;
stage = pipe->mixer_stage;
mixer = pipe->mixer_num;
if (pipe != ctrl->stage[mixer][stage]) /* not runing */
return;
/* MDP_LAYERMIXER_IN_CFG, shard by both mixer 0 and 1 */
data = inpdw(MDP_BASE + 0x10100);
if (mixer == MDP4_MIXER1)
stage += 8;
if (pipe->pipe_type == OVERLAY_TYPE_VG) {/* VG1 and VG2 */
snum = 0;
snum += (4 * pipe->pipe_num);
} else {
snum = 8;
snum += (4 * pipe->pipe_num); /* RGB1 and RGB2 */
}
mask = 0x0f;
mask <<= snum;
data &= ~mask; /* clear old bits */
outpdw(MDP_BASE + 0x10100, data); /* MDP_LAYERMIXER_IN_CFG */
data = inpdw(MDP_BASE + 0x10100);
ctrl->stage[pipe->mixer_num][pipe->mixer_stage] = NULL; /* clear it */
}
void mdp4_mixer_blend_setup(struct mdp4_overlay_pipe *pipe)
{
unsigned char *overlay_base;
uint32 c0, c1, c2, blend_op;
int off;
if (pipe->mixer_num) /* mixer number, /dev/fb0, /dev/fb1 */
overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
else
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
/* stage 0 to stage 2 */
off = 0x20 * (pipe->mixer_stage - MDP4_MIXER_STAGE0);
blend_op = 0;
if (pipe->alpha_enable) /* ARGB */
blend_op = MDP4_BLEND_FG_ALPHA_FG_PIXEL |
MDP4_BLEND_BG_ALPHA_FG_PIXEL;
else
blend_op = (MDP4_BLEND_BG_ALPHA_BG_CONST |
MDP4_BLEND_FG_ALPHA_FG_CONST);
if (pipe->alpha_enable == 0) { /* not ARGB */
if (pipe->is_fg) {
outpdw(overlay_base + off + 0x108, pipe->alpha);
outpdw(overlay_base + off + 0x10c, 0xff - pipe->alpha);
} else {
outpdw(overlay_base + off + 0x108, 0xff - pipe->alpha);
outpdw(overlay_base + off + 0x10c, pipe->alpha);
}
}
if (pipe->transp != MDP_TRANSP_NOP) {
transp_color_key(pipe->src_format, pipe->transp, &c0, &c1, &c2);
if (pipe->is_fg) {
blend_op |= MDP4_BLEND_FG_TRANSP_EN; /* Fg blocked */
/* lower limit */
if (c0 > 0x10)
c0 -= 0x10;
if (c1 > 0x10)
c1 -= 0x10;
if (c2 > 0x10)
c2 -= 0x10;
outpdw(overlay_base + off + 0x110,
(c1 << 16 | c0));/* low */
outpdw(overlay_base + off + 0x114, c2);/* low */
/* upper limit */
if ((c0 + 0x20) < 0x0fff)
c0 += 0x20;
else
c0 = 0x0fff;
if ((c1 + 0x20) < 0x0fff)
c1 += 0x20;
else
c1 = 0x0fff;
if ((c2 + 0x20) < 0x0fff)
c2 += 0x20;
else
c2 = 0x0fff;
outpdw(overlay_base + off + 0x118,
(c1 << 16 | c0));/* high */
outpdw(overlay_base + off + 0x11c, c2);/* high */
} else {
blend_op |= MDP4_BLEND_BG_TRANSP_EN; /* bg blocked */
/* lower limit */
if (c0 > 0x10)
c0 -= 0x10;
if (c1 > 0x10)
c1 -= 0x10;
if (c2 > 0x10)
c2 -= 0x10;
outpdw(overlay_base + 0x180,
(c1 << 16 | c0));/* low */
outpdw(overlay_base + 0x184, c2);/* low */
/* upper limit */
if ((c0 + 0x20) < 0x0fff)
c0 += 0x20;
else
c0 = 0x0fff;
if ((c1 + 0x20) < 0x0fff)
c1 += 0x20;
else
c1 = 0x0fff;
if ((c2 + 0x20) < 0x0fff)
c2 += 0x20;
else
c2 = 0x0fff;
outpdw(overlay_base + 0x188,
(c1 << 16 | c0));/* high */
outpdw(overlay_base + 0x18c, c2);/* high */
}
}
outpdw(overlay_base + off + 0x104, blend_op);
}
void mdp4_overlay_reg_flush(struct mdp4_overlay_pipe *pipe, int all)
{
uint32 bits = 0;
if (pipe->mixer_num == MDP4_MIXER1)
bits |= 0x02;
else
bits |= 0x01;
if (all) {
if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
if (pipe->pipe_num == OVERLAY_PIPE_RGB2)
bits |= 0x20;
else
bits |= 0x10;
} else {
if (pipe->pipe_num == OVERLAY_PIPE_VG2)
bits |= 0x08;
else
bits |= 0x04;
}
}
outpdw(MDP_BASE + 0x18000, bits); /* MDP_OVERLAY_REG_FLUSH */
while (inpdw(MDP_BASE + 0x18000) & bits) /* self clear when complete */
;
}
struct mdp4_overlay_pipe *mdp4_overlay_ndx2pipe(int ndx)
{
struct mdp4_overlay_pipe *pipe;
if (ndx == 0 || ndx >= MDP4_MAX_OVERLAY_PIPE)
return NULL;
pipe = &ctrl->plist[ndx - 1]; /* ndx start from 1 */
if (pipe->pipe_ndx == 0)
return NULL;
return pipe;
}
struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(void)
{
int i;
struct mdp4_overlay_pipe *pipe;
pipe = &ctrl->plist[0];
for (i = 0; i < MDP4_MAX_OVERLAY_PIPE; i++) {
if (pipe->pipe_ndx == 0) {
pipe->pipe_ndx = i + 1; /* start from 1 */
init_completion(&pipe->comp);
printk(KERN_INFO "mdp4_overlay_pipe_alloc: pipe=%x ndx=%d\n",
(int)pipe, pipe->pipe_ndx);
return pipe;
}
pipe++;
}
return NULL;
}
void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe)
{
printk(KERN_INFO "mdp4_overlay_pipe_free: pipe=%x ndx=%d\n",
(int)pipe, pipe->pipe_ndx);
memset(pipe, 0, sizeof(*pipe));
}
static int get_pipe_num(int ptype, int stage)
{
if (ptype == OVERLAY_TYPE_RGB) {
if (stage == MDP4_MIXER_STAGE_BASE)
return OVERLAY_PIPE_RGB1;
else
return OVERLAY_PIPE_RGB2;
} else {
if (stage == MDP4_MIXER_STAGE0)
return OVERLAY_PIPE_VG1;
else
return OVERLAY_PIPE_VG2;
}
}
int mdp4_overlay_req_check(uint32 id, uint32 z_order, uint32 mixer)
{
struct mdp4_overlay_pipe *pipe;
pipe = ctrl->stage[mixer][z_order];
if (pipe == NULL)
return 0;
if (pipe->pipe_ndx == id) /* same req, recycle */
return 0;
return -EPERM;
}
static int mdp4_overlay_req2pipe(struct mdp_overlay *req, int mixer,
struct mdp4_overlay_pipe **ppipe)
{
struct mdp4_overlay_pipe *pipe;
int ret, ptype;
if (mixer >= MDP4_MAX_MIXER) {
printk(KERN_ERR "mpd_overlay_req2pipe: mixer out of range!\n");
return -ERANGE;
}
if (req->z_order < 0 || req->z_order > 2) {
printk(KERN_ERR "mpd_overlay_req2pipe: z_order=%d out of range!\n",
req->z_order);
return -ERANGE;
}
if (req->src_rect.h == 0 || req->src_rect.w == 0) {
printk(KERN_ERR "mpd_overlay_req2pipe: src img of zero size!\n");
return -EINVAL;
}
ret = mdp4_overlay_req_check(req->id, req->z_order, mixer);
if (ret < 0)
return ret;
ptype = mdp4_overlay_format2type(req->src.format);
if (ptype < 0)
return ptype;
if (req->id == MSMFB_NEW_REQUEST) /* new request */
pipe = mdp4_overlay_pipe_alloc();
else
pipe = mdp4_overlay_ndx2pipe(req->id);
if (pipe == NULL)
return -ENOMEM;
pipe->src_format = req->src.format;
ret = mdp4_overlay_format2pipe(pipe);
if (ret < 0)
return ret;
/*
* base layer == 1, reserved for frame buffer
* zorder 0 == stage 0 == 2
* zorder 1 == stage 1 == 3
* zorder 2 == stage 2 == 4
*/
if (req->id == MSMFB_NEW_REQUEST) { /* new request */
pipe->mixer_stage = req->z_order + MDP4_MIXER_STAGE0;
pipe->pipe_type = ptype;
pipe->pipe_num = get_pipe_num(ptype, pipe->mixer_stage);
printk(KERN_INFO "mpd4_overlay_req2pipe: zorder=%d pipe_num=%d\n",
req->z_order, pipe->pipe_num);
}
pipe->src_width = req->src.width & 0x07ff; /* source img width */
pipe->src_height = req->src.height & 0x07ff; /* source img height */
pipe->src_h = req->src_rect.h & 0x07ff;
pipe->src_w = req->src_rect.w & 0x07ff;
pipe->src_y = req->src_rect.y & 0x07ff;
pipe->src_x = req->src_rect.x & 0x07ff;
pipe->dst_h = req->dst_rect.h & 0x07ff;
pipe->dst_w = req->dst_rect.w & 0x07ff;
pipe->dst_y = req->dst_rect.y & 0x07ff;
pipe->dst_x = req->dst_rect.x & 0x07ff;
if (req->flags & MDP_FLIP_LR)
pipe->op_mode |= MDP4_OP_FLIP_LR;
if (req->flags & MDP_FLIP_UD)
pipe->op_mode |= MDP4_OP_FLIP_UD;
if (req->flags & MDP_DITHER)
pipe->op_mode |= MDP4_OP_DITHER_EN;
if (req->flags & MDP_DEINTERLACE)
pipe->op_mode |= MDP4_OP_DEINT_ODD_REF;
pipe->is_fg = req->is_fg;/* control alpha and color key */
pipe->alpha = req->alpha & 0x0ff;
pipe->transp = req->transp_mask;
*ppipe = pipe;
return 0;
}
int get_img(struct msmfb_data *img, struct fb_info *info,
unsigned long *start, unsigned long *len, struct file **pp_file)
{
int put_needed, ret = 0;
struct file *file;
#ifdef CONFIG_ANDROID_PMEM
unsigned long vstart;
#endif
#ifdef CONFIG_ANDROID_PMEM
if (!get_pmem_file(img->memory_id, start, &vstart, len, pp_file))
return 0;
#endif
file = fget_light(img->memory_id, &put_needed);
if (file == NULL)
return -1;
if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
*start = info->fix.smem_start;
*len = info->fix.smem_len;
*pp_file = file;
} else {
ret = -1;
fput_light(file, put_needed);
}
return ret;
}
int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req)
{
struct mdp4_overlay_pipe *pipe;
pipe = mdp4_overlay_ndx2pipe(req->id);
if (pipe == NULL)
return -ENODEV;
*req = pipe->req_data;
return 0;
}
int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int ret, mixer;
struct mdp4_overlay_pipe *pipe;
int lcdc;
if (mfd == NULL)
return -ENODEV;
if (req->src.format == MDP_FB_FORMAT)
req->src.format = mfd->fb_imgType;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
mixer = info->node; /* minor number of char device */
ret = mdp4_overlay_req2pipe(req, mixer, &pipe);
if (ret < 0) {
mutex_unlock(&mfd->dma->ov_mutex);
return ret;
}
lcdc = inpdw(MDP_BASE + 0xc0000);
if (lcdc == 0) { /* mddi */
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
}
/* return id back to user */
req->id = pipe->pipe_ndx; /* pipe_ndx start from 1 */
pipe->req_data = *req; /* keep original req */
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
int mdp4_overlay_unset(struct fb_info *info, int ndx)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct mdp4_overlay_pipe *pipe;
int lcdc;
if (mfd == NULL)
return -ENODEV;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
pipe = mdp4_overlay_ndx2pipe(ndx);
if (pipe == NULL) {
mutex_unlock(&mfd->dma->ov_mutex);
return -ENODEV;
}
lcdc = inpdw(MDP_BASE + 0xc0000);
mdp4_mixer_stage_down(pipe);
if (lcdc == 0) { /* mddi */
/* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
if (lcdc) /* LCDC mode */
mdp4_overlay_reg_flush(pipe, 0);
mdp4_overlay_pipe_free(pipe);
if (lcdc == 0) { /* mddi */
mdp4_mddi_overlay_restore();
}
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
struct tile_desc {
uint32 width; /* tile's width */
uint32 height; /* tile's height */
uint32 row_tile_w; /* tiles per row's width */
uint32 row_tile_h; /* tiles per row's height */
};
void tile_samsung(struct tile_desc *tp)
{
/*
* each row of samsung tile consists of two tiles in height
* and two tiles in width which means width should align to
* 64 x 2 bytes and height should align to 32 x 2 bytes.
* video decoder generate two tiles in width and one tile
* in height which ends up height align to 32 X 1 bytes.
*/
tp->width = 64; /* 64 bytes */
tp->row_tile_w = 2; /* 2 tiles per row's width */
tp->height = 32; /* 32 bytes */
tp->row_tile_h = 1; /* 1 tiles per row's height */
}
uint32 tile_mem_size(struct mdp4_overlay_pipe *pipe, struct tile_desc *tp)
{
uint32 tile_w, tile_h;
uint32 row_num_w, row_num_h;
tile_w = tp->width * tp->row_tile_w;
tile_h = tp->height * tp->row_tile_h;
row_num_w = (pipe->src_width + tile_w - 1) / tile_w;
row_num_h = (pipe->src_height + tile_h - 1) / tile_h;
return row_num_w * row_num_h * tile_w * tile_h;
}
int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req,
struct file **pp_src_file)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct msmfb_data *img;
struct mdp4_overlay_pipe *pipe;
ulong start, addr;
ulong len = 0;
struct file *p_src_file = 0;
int lcdc;
if (mfd == NULL)
return -ENODEV;
pipe = mdp4_overlay_ndx2pipe(req->id);
if (pipe == NULL)
return -ENODEV;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
img = &req->data;
get_img(img, info, &start, &len, &p_src_file);
if (len == 0) {
mutex_unlock(&mfd->dma->ov_mutex);
printk(KERN_ERR "mdp_overlay_play: could not retrieve"
" image from memory\n");
return -1;
}
*pp_src_file = p_src_file;
addr = start + img->offset;
pipe->srcp0_addr = addr;
pipe->srcp0_ystride = pipe->src_width * pipe->bpp;
if (pipe->fetch_plane == OVERLAY_PLANE_PSEUDO_PLANAR) {
if (pipe->frame_format == MDP4_FRAME_FORMAT_VIDEO_SUPERTILE) {
struct tile_desc tile;
tile_samsung(&tile);
pipe->srcp1_addr = addr + tile_mem_size(pipe, &tile);
} else
pipe->srcp1_addr = addr +
pipe->src_width * pipe->src_height;
pipe->srcp0_ystride = pipe->src_width;
pipe->srcp1_ystride = pipe->src_width;
}
lcdc = inpdw(MDP_BASE + 0xc0000);
lcdc &= 0x01; /* LCDC mode */
if (pipe->pipe_type == OVERLAY_TYPE_VG)
mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
else
mdp4_overlay_rgb_setup(pipe); /* rgb pipe */
mdp4_mixer_blend_setup(pipe);
mdp4_mixer_stage_up(pipe);
if (lcdc) { /* LCDC mode */
mdp4_overlay_reg_flush(pipe, 1);
}
if (lcdc) { /* LCDC mode */
if (pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) { /* done */
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
}
if (lcdc == 0) { /* MDDI mode */
#ifdef MDP4_NONBLOCKING
if (mfd->panel_power_on)
#else
if (!mfd->dma->busy && mfd->panel_power_on)
#endif
mdp4_mddi_overlay_kickoff(mfd, pipe);
}
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
| gpl-2.0 |
VentureROM-L/android_kernel_moto_shamu | drivers/staging/android/sw_sync.c | 1004 | 5966 | /*
* drivers/base/sw_sync.c
*
* Copyright (C) 2012 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/sw_sync.h>
static int sw_sync_cmp(u32 a, u32 b)
{
if (a == b)
return 0;
return ((s32)a - (s32)b) < 0 ? -1 : 1;
}
struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
{
struct sw_sync_pt *pt;
pt = (struct sw_sync_pt *)
sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
pt->value = value;
return (struct sync_pt *)pt;
}
EXPORT_SYMBOL(sw_sync_pt_create);
static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
{
struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt;
struct sw_sync_timeline *obj =
(struct sw_sync_timeline *)sync_pt->parent;
return (struct sync_pt *) sw_sync_pt_create(obj, pt->value);
}
static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
{
struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
struct sw_sync_timeline *obj =
(struct sw_sync_timeline *)sync_pt->parent;
return sw_sync_cmp(obj->value, pt->value) >= 0;
}
static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
{
struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
return sw_sync_cmp(pt_a->value, pt_b->value);
}
static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
void *data, int size)
{
struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
if (size < sizeof(pt->value))
return -ENOMEM;
memcpy(data, &pt->value, sizeof(pt->value));
return sizeof(pt->value);
}
static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
char *str, int size)
{
struct sw_sync_timeline *timeline =
(struct sw_sync_timeline *)sync_timeline;
snprintf(str, size, "%d", timeline->value);
}
static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
char *str, int size)
{
struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
snprintf(str, size, "%d", pt->value);
}
static struct sync_timeline_ops sw_sync_timeline_ops = {
.driver_name = "sw_sync",
.dup = sw_sync_pt_dup,
.has_signaled = sw_sync_pt_has_signaled,
.compare = sw_sync_pt_compare,
.fill_driver_data = sw_sync_fill_driver_data,
.timeline_value_str = sw_sync_timeline_value_str,
.pt_value_str = sw_sync_pt_value_str,
};
struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
{
struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
sync_timeline_create(&sw_sync_timeline_ops,
sizeof(struct sw_sync_timeline),
name);
return obj;
}
EXPORT_SYMBOL(sw_sync_timeline_create);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
{
obj->value += inc;
sync_timeline_signal(&obj->obj);
}
EXPORT_SYMBOL(sw_sync_timeline_inc);
#ifdef CONFIG_SW_SYNC_USER
/* *WARNING*
*
* improper use of this can result in deadlocking kernel drivers from userspace.
*/
/* opening sw_sync create a new sync obj */
static int sw_sync_open(struct inode *inode, struct file *file)
{
struct sw_sync_timeline *obj;
char task_comm[TASK_COMM_LEN];
get_task_comm(task_comm, current);
obj = sw_sync_timeline_create(task_comm);
if (obj == NULL)
return -ENOMEM;
file->private_data = obj;
return 0;
}
static int sw_sync_release(struct inode *inode, struct file *file)
{
struct sw_sync_timeline *obj = file->private_data;
sync_timeline_destroy(&obj->obj);
return 0;
}
static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg)
{
int fd = get_unused_fd();
int err;
struct sync_pt *pt;
struct sync_fence *fence;
struct sw_sync_create_fence_data data;
if (fd < 0)
return fd;
if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
err = -EFAULT;
goto err;
}
pt = sw_sync_pt_create(obj, data.value);
if (pt == NULL) {
err = -ENOMEM;
goto err;
}
data.name[sizeof(data.name) - 1] = '\0';
fence = sync_fence_create(data.name, pt);
if (fence == NULL) {
sync_pt_free(pt);
err = -ENOMEM;
goto err;
}
data.fence = fd;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
sync_fence_put(fence);
err = -EFAULT;
goto err;
}
sync_fence_install(fence, fd);
return 0;
err:
put_unused_fd(fd);
return err;
}
static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
{
u32 value;
if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
return -EFAULT;
sw_sync_timeline_inc(obj, value);
return 0;
}
static long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct sw_sync_timeline *obj = file->private_data;
switch (cmd) {
case SW_SYNC_IOC_CREATE_FENCE:
return sw_sync_ioctl_create_fence(obj, arg);
case SW_SYNC_IOC_INC:
return sw_sync_ioctl_inc(obj, arg);
default:
return -ENOTTY;
}
}
static const struct file_operations sw_sync_fops = {
.owner = THIS_MODULE,
.open = sw_sync_open,
.release = sw_sync_release,
.unlocked_ioctl = sw_sync_ioctl,
.compat_ioctl = sw_sync_ioctl,
};
static struct miscdevice sw_sync_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "sw_sync",
.fops = &sw_sync_fops,
};
static int __init sw_sync_device_init(void)
{
return misc_register(&sw_sync_dev);
}
static void __exit sw_sync_device_remove(void)
{
misc_deregister(&sw_sync_dev);
}
module_init(sw_sync_device_init);
module_exit(sw_sync_device_remove);
#endif /* CONFIG_SW_SYNC_USER */
| gpl-2.0 |
javelinanddart/ElementalX-m9 | net/rmnet_data/rmnet_data_main.c | 1260 | 1812 | /*
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* RMNET Data generic framework
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include "rmnet_data_private.h"
#include "rmnet_data_config.h"
#include "rmnet_data_vnd.h"
/* ***************** Trace Points ******************************************* */
#define CREATE_TRACE_POINTS
#include "rmnet_data_trace.h"
/* ***************** Module Parameters ************************************** */
unsigned int rmnet_data_log_level = RMNET_LOG_LVL_ERR | RMNET_LOG_LVL_HI;
module_param(rmnet_data_log_level, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(log_level, "Logging level");
unsigned int rmnet_data_log_module_mask;
module_param(rmnet_data_log_module_mask, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(rmnet_data_log_module_mask, "Logging module mask");
/* ***************** Startup/Shutdown *************************************** */
/**
* rmnet_init() - Module initialization
*
* todo: check for (and init) startup errors
*/
static int __init rmnet_init(void)
{
rmnet_config_init();
rmnet_vnd_init();
LOGL("%s", "RMNET Data driver loaded successfully");
return 0;
}
static void __exit rmnet_exit(void)
{
rmnet_config_exit();
rmnet_vnd_exit();
}
module_init(rmnet_init)
module_exit(rmnet_exit)
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Jairus980/kernel_hltexx | drivers/usb/musb/cppi_dma.c | 1516 | 45227 | /*
* Copyright (C) 2005-2006 by Texas Instruments
*
* This file implements a DMA interface using TI's CPPI DMA.
* For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
* The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "musb_core.h"
#include "musb_debug.h"
#include "cppi_dma.h"
/* CPPI DMA status 7-mar-2006:
*
* - See musb_{host,gadget}.c for more info
*
* - Correct RX DMA generally forces the engine into irq-per-packet mode,
* which can easily saturate the CPU under non-mass-storage loads.
*
* NOTES 24-aug-2006 (2.6.18-rc4):
*
* - peripheral RXDMA wedged in a test with packets of length 512/512/1.
* evidently after the 1 byte packet was received and acked, the queue
* of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
* and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
* 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
* of its next (512 byte) packet. IRQ issues?
*
* REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
* evidently also directly update the RX and TX CSRs ... so audit all
* host and peripheral side DMA code to avoid CSR access after DMA has
* been started.
*/
/* REVISIT now we can avoid preallocating these descriptors; or
* more simply, switch to a global freelist not per-channel ones.
* Note: at full speed, 64 descriptors == 4K bulk data.
*/
#define NUM_TXCHAN_BD 64
#define NUM_RXCHAN_BD 64
static inline void cpu_drain_writebuffer(void)
{
wmb();
#ifdef CONFIG_CPU_ARM926T
/* REVISIT this "should not be needed",
* but lack of it sure seemed to hurt ...
*/
asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
#endif
}
static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
{
struct cppi_descriptor *bd = c->freelist;
if (bd)
c->freelist = bd->next;
return bd;
}
static inline void
cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
{
if (!bd)
return;
bd->next = c->freelist;
c->freelist = bd;
}
/*
* Start DMA controller
*
* Initialize the DMA controller as necessary.
*/
/* zero out entire rx state RAM entry for the channel */
static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
{
musb_writel(&rx->rx_skipbytes, 0, 0);
musb_writel(&rx->rx_head, 0, 0);
musb_writel(&rx->rx_sop, 0, 0);
musb_writel(&rx->rx_current, 0, 0);
musb_writel(&rx->rx_buf_current, 0, 0);
musb_writel(&rx->rx_len_len, 0, 0);
musb_writel(&rx->rx_cnt_cnt, 0, 0);
}
/* zero out entire tx state RAM entry for the channel */
static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
{
musb_writel(&tx->tx_head, 0, 0);
musb_writel(&tx->tx_buf, 0, 0);
musb_writel(&tx->tx_current, 0, 0);
musb_writel(&tx->tx_buf_current, 0, 0);
musb_writel(&tx->tx_info, 0, 0);
musb_writel(&tx->tx_rem_len, 0, 0);
/* musb_writel(&tx->tx_dummy, 0, 0); */
musb_writel(&tx->tx_complete, 0, ptr);
}
static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
{
int j;
/* initialize channel fields */
c->head = NULL;
c->tail = NULL;
c->last_processed = NULL;
c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
c->controller = cppi;
c->is_rndis = 0;
c->freelist = NULL;
/* build the BD Free list for the channel */
for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
struct cppi_descriptor *bd;
dma_addr_t dma;
bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
bd->dma = dma;
cppi_bd_free(c, bd);
}
}
static int cppi_channel_abort(struct dma_channel *);
static void cppi_pool_free(struct cppi_channel *c)
{
struct cppi *cppi = c->controller;
struct cppi_descriptor *bd;
(void) cppi_channel_abort(&c->channel);
c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
c->controller = NULL;
/* free all its bds */
bd = c->last_processed;
do {
if (bd)
dma_pool_free(cppi->pool, bd, bd->dma);
bd = cppi_bd_alloc(c);
} while (bd);
c->last_processed = NULL;
}
static int __init cppi_controller_start(struct dma_controller *c)
{
struct cppi *controller;
void __iomem *tibase;
int i;
controller = container_of(c, struct cppi, controller);
/* do whatever is necessary to start controller */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
controller->tx[i].transmit = true;
controller->tx[i].index = i;
}
for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
controller->rx[i].transmit = false;
controller->rx[i].index = i;
}
/* setup BD list on a per channel basis */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
cppi_pool_init(controller, controller->tx + i);
for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
cppi_pool_init(controller, controller->rx + i);
tibase = controller->tibase;
INIT_LIST_HEAD(&controller->tx_complete);
/* initialise tx/rx channel head pointers to zero */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
struct cppi_channel *tx_ch = controller->tx + i;
struct cppi_tx_stateram __iomem *tx;
INIT_LIST_HEAD(&tx_ch->tx_complete);
tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
tx_ch->state_ram = tx;
cppi_reset_tx(tx, 0);
}
for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
struct cppi_channel *rx_ch = controller->rx + i;
struct cppi_rx_stateram __iomem *rx;
INIT_LIST_HEAD(&rx_ch->tx_complete);
rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
rx_ch->state_ram = rx;
cppi_reset_rx(rx);
}
/* enable individual cppi channels */
musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
/* enable tx/rx CPPI control */
musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
/* disable RNDIS mode, also host rx RNDIS autorequest */
musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
return 0;
}
/*
* Stop DMA controller
*
* De-Init the DMA controller as necessary.
*/
static int cppi_controller_stop(struct dma_controller *c)
{
struct cppi *controller;
void __iomem *tibase;
int i;
struct musb *musb;
controller = container_of(c, struct cppi, controller);
musb = controller->musb;
tibase = controller->tibase;
/* DISABLE INDIVIDUAL CHANNEL Interrupts */
musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
dev_dbg(musb->controller, "Tearing down RX and TX Channels\n");
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
/* FIXME restructure of txdma to use bds like rxdma */
controller->tx[i].last_processed = NULL;
cppi_pool_free(controller->tx + i);
}
for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
cppi_pool_free(controller->rx + i);
/* in Tx Case proper teardown is supported. We resort to disabling
* Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
* complete TX CPPI cannot be disabled.
*/
/*disable tx/rx cppi */
musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
return 0;
}
/* While dma channel is allocated, we only want the core irqs active
* for fault reports, otherwise we'd get irqs that we don't care about.
* Except for TX irqs, where dma done != fifo empty and reusable ...
*
* NOTE: docs don't say either way, but irq masking **enables** irqs.
*
* REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
*/
static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
{
musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
}
static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
{
musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
}
/*
* Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
* each transfer direction of a non-control endpoint, so allocating
* (and deallocating) is mostly a way to notice bad housekeeping on
* the software side. We assume the irqs are always active.
*/
static struct dma_channel *
cppi_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *ep, u8 transmit)
{
struct cppi *controller;
u8 index;
struct cppi_channel *cppi_ch;
void __iomem *tibase;
struct musb *musb;
controller = container_of(c, struct cppi, controller);
tibase = controller->tibase;
musb = controller->musb;
/* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
index = ep->epnum - 1;
/* return the corresponding CPPI Channel Handle, and
* probably disable the non-CPPI irq until we need it.
*/
if (transmit) {
if (index >= ARRAY_SIZE(controller->tx)) {
dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'T', index);
return NULL;
}
cppi_ch = controller->tx + index;
} else {
if (index >= ARRAY_SIZE(controller->rx)) {
dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'R', index);
return NULL;
}
cppi_ch = controller->rx + index;
core_rxirq_disable(tibase, ep->epnum);
}
/* REVISIT make this an error later once the same driver code works
* with the other DMA engine too
*/
if (cppi_ch->hw_ep)
dev_dbg(musb->controller, "re-allocating DMA%d %cX channel %p\n",
index, transmit ? 'T' : 'R', cppi_ch);
cppi_ch->hw_ep = ep;
cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
cppi_ch->channel.max_len = 0x7fffffff;
dev_dbg(musb->controller, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
return &cppi_ch->channel;
}
/* Release a CPPI Channel. */
static void cppi_channel_release(struct dma_channel *channel)
{
struct cppi_channel *c;
void __iomem *tibase;
/* REVISIT: for paranoia, check state and abort if needed... */
c = container_of(channel, struct cppi_channel, channel);
tibase = c->controller->tibase;
if (!c->hw_ep)
dev_dbg(c->controller->musb->controller,
"releasing idle DMA channel %p\n", c);
else if (!c->transmit)
core_rxirq_enable(tibase, c->index + 1);
/* for now, leave its cppi IRQ enabled (we won't trigger it) */
c->hw_ep = NULL;
channel->status = MUSB_DMA_STATUS_UNKNOWN;
}
/* Context: controller irqlocked */
static void
cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
{
void __iomem *base = c->controller->mregs;
struct cppi_rx_stateram __iomem *rx = c->state_ram;
musb_ep_select(base, c->index + 1);
dev_dbg(c->controller->musb->controller,
"RX DMA%d%s: %d left, csr %04x, "
"%08x H%08x S%08x C%08x, "
"B%08x L%08x %08x .. %08x"
"\n",
c->index, tag,
musb_readl(c->controller->tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
musb_readw(c->hw_ep->regs, MUSB_RXCSR),
musb_readl(&rx->rx_skipbytes, 0),
musb_readl(&rx->rx_head, 0),
musb_readl(&rx->rx_sop, 0),
musb_readl(&rx->rx_current, 0),
musb_readl(&rx->rx_buf_current, 0),
musb_readl(&rx->rx_len_len, 0),
musb_readl(&rx->rx_cnt_cnt, 0),
musb_readl(&rx->rx_complete, 0)
);
}
/* Context: controller irqlocked */
static void
cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
{
void __iomem *base = c->controller->mregs;
struct cppi_tx_stateram __iomem *tx = c->state_ram;
musb_ep_select(base, c->index + 1);
dev_dbg(c->controller->musb->controller,
"TX DMA%d%s: csr %04x, "
"H%08x S%08x C%08x %08x, "
"F%08x L%08x .. %08x"
"\n",
c->index, tag,
musb_readw(c->hw_ep->regs, MUSB_TXCSR),
musb_readl(&tx->tx_head, 0),
musb_readl(&tx->tx_buf, 0),
musb_readl(&tx->tx_current, 0),
musb_readl(&tx->tx_buf_current, 0),
musb_readl(&tx->tx_info, 0),
musb_readl(&tx->tx_rem_len, 0),
/* dummy/unused word 6 */
musb_readl(&tx->tx_complete, 0)
);
}
/* Context: controller irqlocked */
static inline void
cppi_rndis_update(struct cppi_channel *c, int is_rx,
void __iomem *tibase, int is_rndis)
{
/* we may need to change the rndis flag for this cppi channel */
if (c->is_rndis != is_rndis) {
u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
u32 temp = 1 << (c->index);
if (is_rx)
temp <<= 16;
if (is_rndis)
value |= temp;
else
value &= ~temp;
musb_writel(tibase, DAVINCI_RNDIS_REG, value);
c->is_rndis = is_rndis;
}
}
#ifdef CONFIG_USB_MUSB_DEBUG
static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
{
pr_debug("RXBD/%s %08x: "
"nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
tag, bd->dma,
bd->hw_next, bd->hw_bufp, bd->hw_off_len,
bd->hw_options);
}
#endif
static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
{
#ifdef CONFIG_USB_MUSB_DEBUG
struct cppi_descriptor *bd;
if (!_dbg_level(level))
return;
cppi_dump_rx(level, rx, tag);
if (rx->last_processed)
cppi_dump_rxbd("last", rx->last_processed);
for (bd = rx->head; bd; bd = bd->next)
cppi_dump_rxbd("active", bd);
#endif
}
/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
* so we won't ever use it (see "CPPI RX Woes" below).
*/
static inline int cppi_autoreq_update(struct cppi_channel *rx,
void __iomem *tibase, int onepacket, unsigned n_bds)
{
u32 val;
#ifdef RNDIS_RX_IS_USABLE
u32 tmp;
/* assert(is_host_active(musb)) */
/* start from "AutoReq never" */
tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
val = tmp & ~((0x3) << (rx->index * 2));
/* HCD arranged reqpkt for packet #1. we arrange int
* for all but the last one, maybe in two segments.
*/
if (!onepacket) {
#if 0
/* use two segments, autoreq "all" then the last "never" */
val |= ((0x3) << (rx->index * 2));
n_bds--;
#else
/* one segment, autoreq "all-but-last" */
val |= ((0x1) << (rx->index * 2));
#endif
}
if (val != tmp) {
int n = 100;
/* make sure that autoreq is updated before continuing */
musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
do {
tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
if (tmp == val)
break;
cpu_relax();
} while (n-- > 0);
}
#endif
/* REQPKT is turned off after each segment */
if (n_bds && rx->channel.actual_len) {
void __iomem *regs = rx->hw_ep->regs;
val = musb_readw(regs, MUSB_RXCSR);
if (!(val & MUSB_RXCSR_H_REQPKT)) {
val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, val);
/* flush writebuffer */
val = musb_readw(regs, MUSB_RXCSR);
}
}
return n_bds;
}
/* Buffer enqueuing Logic:
*
* - RX builds new queues each time, to help handle routine "early
* termination" cases (faults, including errors and short reads)
* more correctly.
*
* - for now, TX reuses the same queue of BDs every time
*
* REVISIT long term, we want a normal dynamic model.
* ... the goal will be to append to the
* existing queue, processing completed "dma buffers" (segments) on the fly.
*
* Otherwise we force an IRQ latency between requests, which slows us a lot
* (especially in "transparent" dma). Unfortunately that model seems to be
* inherent in the DMA model from the Mentor code, except in the rare case
* of transfers big enough (~128+ KB) that we could append "middle" segments
* in the TX paths. (RX can't do this, see below.)
*
* That's true even in the CPPI- friendly iso case, where most urbs have
* several small segments provided in a group and where the "packet at a time"
* "transparent" DMA model is always correct, even on the RX side.
*/
/*
* CPPI TX:
* ========
* TX is a lot more reasonable than RX; it doesn't need to run in
* irq-per-packet mode very often. RNDIS mode seems to behave too
* (except how it handles the exactly-N-packets case). Building a
* txdma queue with multiple requests (urb or usb_request) looks
* like it would work ... but fault handling would need much testing.
*
* The main issue with TX mode RNDIS relates to transfer lengths that
* are an exact multiple of the packet length. It appears that there's
* a hiccup in that case (maybe the DMA completes before the ZLP gets
* written?) boiling down to not being able to rely on CPPI writing any
* terminating zero length packet before the next transfer is written.
* So that's punted to PIO; better yet, gadget drivers can avoid it.
*
* Plus, there's allegedly an undocumented constraint that rndis transfer
* length be a multiple of 64 bytes ... but the chip doesn't act that
* way, and we really don't _want_ that behavior anyway.
*
* On TX, "transparent" mode works ... although experiments have shown
* problems trying to use the SOP/EOP bits in different USB packets.
*
* REVISIT try to handle terminating zero length packets using CPPI
* instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
* links avoid that issue by forcing them to avoid zlps.)
*/
static void
cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
{
unsigned maxpacket = tx->maxpacket;
dma_addr_t addr = tx->buf_dma + tx->offset;
size_t length = tx->buf_len - tx->offset;
struct cppi_descriptor *bd;
unsigned n_bds;
unsigned i;
struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
int rndis;
/* TX can use the CPPI "rndis" mode, where we can probably fit this
* transfer in one BD and one IRQ. The only time we would NOT want
* to use it is when hardware constraints prevent it, or if we'd
* trigger the "send a ZLP?" confusion.
*/
rndis = (maxpacket & 0x3f) == 0
&& length > maxpacket
&& length < 0xffff
&& (length % maxpacket) != 0;
if (rndis) {
maxpacket = length;
n_bds = 1;
} else {
n_bds = length / maxpacket;
if (!length || (length % maxpacket))
n_bds++;
n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
length = min(n_bds * maxpacket, length);
}
dev_dbg(musb->controller, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
tx->index,
maxpacket,
rndis ? "rndis" : "transparent",
n_bds,
(unsigned long long)addr, length);
cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
/* assuming here that channel_program is called during
* transfer initiation ... current code maintains state
* for one outstanding request only (no queues, not even
* the implicit ones of an iso urb).
*/
bd = tx->freelist;
tx->head = bd;
tx->last_processed = NULL;
/* FIXME use BD pool like RX side does, and just queue
* the minimum number for this request.
*/
/* Prepare queue of BDs first, then hand it to hardware.
* All BDs except maybe the last should be of full packet
* size; for RNDIS there _is_ only that last packet.
*/
for (i = 0; i < n_bds; ) {
if (++i < n_bds && bd->next)
bd->hw_next = bd->next->dma;
else
bd->hw_next = 0;
bd->hw_bufp = tx->buf_dma + tx->offset;
/* FIXME set EOP only on the last packet,
* SOP only on the first ... avoid IRQs
*/
if ((tx->offset + maxpacket) <= tx->buf_len) {
tx->offset += maxpacket;
bd->hw_off_len = maxpacket;
bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
| CPPI_OWN_SET | maxpacket;
} else {
/* only this one may be a partial USB Packet */
u32 partial_len;
partial_len = tx->buf_len - tx->offset;
tx->offset = tx->buf_len;
bd->hw_off_len = partial_len;
bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
| CPPI_OWN_SET | partial_len;
if (partial_len == 0)
bd->hw_options |= CPPI_ZERO_SET;
}
dev_dbg(musb->controller, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
/* update the last BD enqueued to the list */
tx->tail = bd;
bd = bd->next;
}
/* BDs live in DMA-coherent memory, but writes might be pending */
cpu_drain_writebuffer();
/* Write to the HeadPtr in state RAM to trigger */
musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
cppi_dump_tx(5, tx, "/S");
}
/*
* CPPI RX Woes:
* =============
* Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
* packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
* (Full speed transfers have similar scenarios.)
*
* The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
* and the next packet goes into a buffer that's queued later; while (b) fills
* the buffer with 1024 bytes. How to do that with CPPI?
*
* - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
* (b) loses **BADLY** because nothing (!) happens when that second packet
* fills the buffer, much less when a third one arrives. (Which makes this
* not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
* is optional, and it's fine if peripherals -- not hosts! -- pad messages
* out to end-of-buffer. Standard PCI host controller DMA descriptors
* implement that mode by default ... which is no accident.)
*
* - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
* converse problems: (b) is handled right, but (a) loses badly. CPPI RX
* ignores SOP/EOP markings and processes both of those BDs; so both packets
* are loaded into the buffer (with a 212 byte gap between them), and the next
* buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
* are intended as outputs for RX queues, not inputs...)
*
* - A variant of "transparent" mode -- one BD at a time -- is the only way to
* reliably make both cases work, with software handling both cases correctly
* and at the significant penalty of needing an IRQ per packet. (The lack of
* I/O overlap can be slightly ameliorated by enabling double buffering.)
*
* So how to get rid of IRQ-per-packet? The transparent multi-BD case could
* be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
* (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
* with guaranteed driver level fault recovery and scrubbing out what's left
* of that garbaged datastream.
*
* But there seems to be no way to identify the cases where CPPI RNDIS mode
* is appropriate -- which do NOT include RNDIS host drivers, but do include
* the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
* So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
* that applies best on the peripheral side (and which could fail rudely).
*
* Leaving only "transparent" mode; we avoid multi-bd modes in almost all
* cases other than mass storage class. Otherwise we're correct but slow,
* since CPPI penalizes our need for a "true RNDIS" default mode.
*/
/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
*
* IFF
* (a) peripheral mode ... since rndis peripherals could pad their
* writes to hosts, causing i/o failure; or we'd have to cope with
* a largely unknowable variety of host side protocol variants
* (b) and short reads are NOT errors ... since full reads would
* cause those same i/o failures
* (c) and read length is
* - less than 64KB (max per cppi descriptor)
* - not a multiple of 4096 (g_zero default, full reads typical)
* - N (>1) packets long, ditto (full reads not EXPECTED)
* THEN
* try rx rndis mode
*
* Cost of heuristic failing: RXDMA wedges at the end of transfers that
* fill out the whole buffer. Buggy host side usb network drivers could
* trigger that, but "in the field" such bugs seem to be all but unknown.
*
* So this module parameter lets the heuristic be disabled. When using
* gadgetfs, the heuristic will probably need to be disabled.
*/
static bool cppi_rx_rndis = 1;
module_param(cppi_rx_rndis, bool, 0);
MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
/**
* cppi_next_rx_segment - dma read for the next chunk of a buffer
* @musb: the controller
* @rx: dma channel
* @onepacket: true unless caller treats short reads as errors, and
* performs fault recovery above usbcore.
* Context: controller irqlocked
*
* See above notes about why we can't use multi-BD RX queues except in
* rare cases (mass storage class), and can never use the hardware "rndis"
* mode (since it's not a "true" RNDIS mode) with complete safety..
*
* It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
* code to recover from corrupted datastreams after each short transfer.
*/
static void
cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
{
unsigned maxpacket = rx->maxpacket;
dma_addr_t addr = rx->buf_dma + rx->offset;
size_t length = rx->buf_len - rx->offset;
struct cppi_descriptor *bd, *tail;
unsigned n_bds;
unsigned i;
void __iomem *tibase = musb->ctrl_base;
int is_rndis = 0;
struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
if (onepacket) {
/* almost every USB driver, host or peripheral side */
n_bds = 1;
/* maybe apply the heuristic above */
if (cppi_rx_rndis
&& is_peripheral_active(musb)
&& length > maxpacket
&& (length & ~0xffff) == 0
&& (length & 0x0fff) != 0
&& (length & (maxpacket - 1)) == 0) {
maxpacket = length;
is_rndis = 1;
}
} else {
/* virtually nothing except mass storage class */
if (length > 0xffff) {
n_bds = 0xffff / maxpacket;
length = n_bds * maxpacket;
} else {
n_bds = length / maxpacket;
if (length % maxpacket)
n_bds++;
}
if (n_bds == 1)
onepacket = 1;
else
n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
}
/* In host mode, autorequest logic can generate some IN tokens; it's
* tricky since we can't leave REQPKT set in RXCSR after the transfer
* finishes. So: multipacket transfers involve two or more segments.
* And always at least two IRQs ... RNDIS mode is not an option.
*/
if (is_host_active(musb))
n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
length = min(n_bds * maxpacket, length);
dev_dbg(musb->controller, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
"dma 0x%llx len %u %u/%u\n",
rx->index, maxpacket,
onepacket
? (is_rndis ? "rndis" : "onepacket")
: "multipacket",
n_bds,
musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff,
(unsigned long long)addr, length,
rx->channel.actual_len, rx->buf_len);
/* only queue one segment at a time, since the hardware prevents
* correct queue shutdown after unexpected short packets
*/
bd = cppi_bd_alloc(rx);
rx->head = bd;
/* Build BDs for all packets in this segment */
for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
u32 bd_len;
if (i) {
bd = cppi_bd_alloc(rx);
if (!bd)
break;
tail->next = bd;
tail->hw_next = bd->dma;
}
bd->hw_next = 0;
/* all but the last packet will be maxpacket size */
if (maxpacket < length)
bd_len = maxpacket;
else
bd_len = length;
bd->hw_bufp = addr;
addr += bd_len;
rx->offset += bd_len;
bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
bd->buflen = bd_len;
bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
length -= bd_len;
}
/* we always expect at least one reusable BD! */
if (!tail) {
WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
return;
} else if (i < n_bds)
WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
tail->next = NULL;
tail->hw_next = 0;
bd = rx->head;
rx->tail = tail;
/* short reads and other faults should terminate this entire
* dma segment. we want one "dma packet" per dma segment, not
* one per USB packet, terminating the whole queue at once...
* NOTE that current hardware seems to ignore SOP and EOP.
*/
bd->hw_options |= CPPI_SOP_SET;
tail->hw_options |= CPPI_EOP_SET;
#ifdef CONFIG_USB_MUSB_DEBUG
if (_dbg_level(5)) {
struct cppi_descriptor *d;
for (d = rx->head; d; d = d->next)
cppi_dump_rxbd("S", d);
}
#endif
/* in case the preceding transfer left some state... */
tail = rx->last_processed;
if (tail) {
tail->next = bd;
tail->hw_next = bd->dma;
}
core_rxirq_enable(tibase, rx->index + 1);
/* BDs live in DMA-coherent memory, but writes might be pending */
cpu_drain_writebuffer();
/* REVISIT specs say to write this AFTER the BUFCNT register
* below ... but that loses badly.
*/
musb_writel(&rx_ram->rx_head, 0, bd->dma);
/* bufferCount must be at least 3, and zeroes on completion
* unless it underflows below zero, or stops at two, or keeps
* growing ... grr.
*/
i = musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff;
if (!i)
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds + 2);
else if (n_bds > (i - 3))
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds - (i - 3));
i = musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff;
if (i < (2 + n_bds)) {
dev_dbg(musb->controller, "bufcnt%d underrun - %d (for %d)\n",
rx->index, i, n_bds);
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds + 2);
}
cppi_dump_rx(4, rx, "/S");
}
/**
* cppi_channel_program - program channel for data transfer
* @ch: the channel
* @maxpacket: max packet size
* @mode: For RX, 1 unless the usb protocol driver promised to treat
* all short reads as errors and kick in high level fault recovery.
* For TX, ignored because of RNDIS mode races/glitches.
* @dma_addr: dma address of buffer
* @len: length of buffer
* Context: controller irqlocked
*/
static int cppi_channel_program(struct dma_channel *ch,
u16 maxpacket, u8 mode,
dma_addr_t dma_addr, u32 len)
{
struct cppi_channel *cppi_ch;
struct cppi *controller;
struct musb *musb;
cppi_ch = container_of(ch, struct cppi_channel, channel);
controller = cppi_ch->controller;
musb = controller->musb;
switch (ch->status) {
case MUSB_DMA_STATUS_BUS_ABORT:
case MUSB_DMA_STATUS_CORE_ABORT:
/* fault irq handler should have handled cleanup */
WARNING("%cX DMA%d not cleaned up after abort!\n",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* WARN_ON(1); */
break;
case MUSB_DMA_STATUS_BUSY:
WARNING("program active channel? %cX DMA%d\n",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* WARN_ON(1); */
break;
case MUSB_DMA_STATUS_UNKNOWN:
dev_dbg(musb->controller, "%cX DMA%d not allocated!\n",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* FALLTHROUGH */
case MUSB_DMA_STATUS_FREE:
break;
}
ch->status = MUSB_DMA_STATUS_BUSY;
/* set transfer parameters, then queue up its first segment */
cppi_ch->buf_dma = dma_addr;
cppi_ch->offset = 0;
cppi_ch->maxpacket = maxpacket;
cppi_ch->buf_len = len;
cppi_ch->channel.actual_len = 0;
/* TX channel? or RX? */
if (cppi_ch->transmit)
cppi_next_tx_segment(musb, cppi_ch);
else
cppi_next_rx_segment(musb, cppi_ch, mode);
return true;
}
static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
{
struct cppi_channel *rx = &cppi->rx[ch];
struct cppi_rx_stateram __iomem *state = rx->state_ram;
struct cppi_descriptor *bd;
struct cppi_descriptor *last = rx->last_processed;
bool completed = false;
bool acked = false;
int i;
dma_addr_t safe2ack;
void __iomem *regs = rx->hw_ep->regs;
struct musb *musb = cppi->musb;
cppi_dump_rx(6, rx, "/K");
bd = last ? last->next : rx->head;
if (!bd)
return false;
/* run through all completed BDs */
for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
(safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
i++, bd = bd->next) {
u16 len;
/* catch latest BD writes from CPPI */
rmb();
if (!completed && (bd->hw_options & CPPI_OWN_SET))
break;
dev_dbg(musb->controller, "C/RXBD %llx: nxt %08x buf %08x "
"off.len %08x opt.len %08x (%d)\n",
(unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options,
rx->channel.actual_len);
/* actual packet received length */
if ((bd->hw_options & CPPI_SOP_SET) && !completed)
len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
else
len = 0;
if (bd->hw_options & CPPI_EOQ_MASK)
completed = true;
if (!completed && len < bd->buflen) {
/* NOTE: when we get a short packet, RXCSR_H_REQPKT
* must have been cleared, and no more DMA packets may
* active be in the queue... TI docs didn't say, but
* CPPI ignores those BDs even though OWN is still set.
*/
completed = true;
dev_dbg(musb->controller, "rx short %d/%d (%d)\n",
len, bd->buflen,
rx->channel.actual_len);
}
/* If we got here, we expect to ack at least one BD; meanwhile
* CPPI may completing other BDs while we scan this list...
*
* RACE: we can notice OWN cleared before CPPI raises the
* matching irq by writing that BD as the completion pointer.
* In such cases, stop scanning and wait for the irq, avoiding
* lost acks and states where BD ownership is unclear.
*/
if (bd->dma == safe2ack) {
musb_writel(&state->rx_complete, 0, safe2ack);
safe2ack = musb_readl(&state->rx_complete, 0);
acked = true;
if (bd->dma == safe2ack)
safe2ack = 0;
}
rx->channel.actual_len += len;
cppi_bd_free(rx, last);
last = bd;
/* stop scanning on end-of-segment */
if (bd->hw_next == 0)
completed = true;
}
rx->last_processed = last;
/* dma abort, lost ack, or ... */
if (!acked && last) {
int csr;
if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
musb_writel(&state->rx_complete, 0, safe2ack);
if (safe2ack == 0) {
cppi_bd_free(rx, last);
rx->last_processed = NULL;
/* if we land here on the host side, H_REQPKT will
* be clear and we need to restart the queue...
*/
WARN_ON(rx->head);
}
musb_ep_select(cppi->mregs, rx->index + 1);
csr = musb_readw(regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_DMAENAB) {
dev_dbg(musb->controller, "list%d %p/%p, last %llx%s, csr %04x\n",
rx->index,
rx->head, rx->tail,
rx->last_processed
? (unsigned long long)
rx->last_processed->dma
: 0,
completed ? ", completed" : "",
csr);
cppi_dump_rxq(4, "/what?", rx);
}
}
if (!completed) {
int csr;
rx->head = bd;
/* REVISIT seems like "autoreq all but EOP" doesn't...
* setting it here "should" be racey, but seems to work
*/
csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
if (is_host_active(cppi->musb)
&& bd
&& !(csr & MUSB_RXCSR_H_REQPKT)) {
csr |= MUSB_RXCSR_H_REQPKT;
musb_writew(regs, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | csr);
csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
}
} else {
rx->head = NULL;
rx->tail = NULL;
}
cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
return completed;
}
irqreturn_t cppi_interrupt(int irq, void *dev_id)
{
struct musb *musb = dev_id;
struct cppi *cppi;
void __iomem *tibase;
struct musb_hw_ep *hw_ep = NULL;
u32 rx, tx;
int i, index;
unsigned long uninitialized_var(flags);
cppi = container_of(musb->dma_controller, struct cppi, controller);
if (cppi->irq)
spin_lock_irqsave(&musb->lock, flags);
tibase = musb->ctrl_base;
tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
if (!tx && !rx) {
if (cppi->irq)
spin_unlock_irqrestore(&musb->lock, flags);
return IRQ_NONE;
}
dev_dbg(musb->controller, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
/* process TX channels */
for (index = 0; tx; tx = tx >> 1, index++) {
struct cppi_channel *tx_ch;
struct cppi_tx_stateram __iomem *tx_ram;
bool completed = false;
struct cppi_descriptor *bd;
if (!(tx & 1))
continue;
tx_ch = cppi->tx + index;
tx_ram = tx_ch->state_ram;
/* FIXME need a cppi_tx_scan() routine, which
* can also be called from abort code
*/
cppi_dump_tx(5, tx_ch, "/E");
bd = tx_ch->head;
/*
* If Head is null then this could mean that a abort interrupt
* that needs to be acknowledged.
*/
if (NULL == bd) {
dev_dbg(musb->controller, "null BD\n");
musb_writel(&tx_ram->tx_complete, 0, 0);
continue;
}
/* run through all completed BDs */
for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
i++, bd = bd->next) {
u16 len;
/* catch latest BD writes from CPPI */
rmb();
if (bd->hw_options & CPPI_OWN_SET)
break;
dev_dbg(musb->controller, "C/TXBD %p n %x b %x off %x opt %x\n",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
tx_ch->channel.actual_len += len;
tx_ch->last_processed = bd;
/* write completion register to acknowledge
* processing of completed BDs, and possibly
* release the IRQ; EOQ might not be set ...
*
* REVISIT use the same ack strategy as rx
*
* REVISIT have observed bit 18 set; huh??
*/
/* if ((bd->hw_options & CPPI_EOQ_MASK)) */
musb_writel(&tx_ram->tx_complete, 0, bd->dma);
/* stop scanning on end-of-segment */
if (bd->hw_next == 0)
completed = true;
}
/* on end of segment, maybe go to next one */
if (completed) {
/* cppi_dump_tx(4, tx_ch, "/complete"); */
/* transfer more, or report completion */
if (tx_ch->offset >= tx_ch->buf_len) {
tx_ch->head = NULL;
tx_ch->tail = NULL;
tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
hw_ep = tx_ch->hw_ep;
musb_dma_completion(musb, index + 1, 1);
} else {
/* Bigger transfer than we could fit in
* that first batch of descriptors...
*/
cppi_next_tx_segment(musb, tx_ch);
}
} else
tx_ch->head = bd;
}
/* Start processing the RX block */
for (index = 0; rx; rx = rx >> 1, index++) {
if (rx & 1) {
struct cppi_channel *rx_ch;
rx_ch = cppi->rx + index;
/* let incomplete dma segments finish */
if (!cppi_rx_scan(cppi, index))
continue;
/* start another dma segment if needed */
if (rx_ch->channel.actual_len != rx_ch->buf_len
&& rx_ch->channel.actual_len
== rx_ch->offset) {
cppi_next_rx_segment(musb, rx_ch, 1);
continue;
}
/* all segments completed! */
rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
hw_ep = rx_ch->hw_ep;
core_rxirq_disable(tibase, index + 1);
musb_dma_completion(musb, index + 1, 0);
}
}
/* write to CPPI EOI register to re-enable interrupts */
musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
if (cppi->irq)
spin_unlock_irqrestore(&musb->lock, flags);
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(cppi_interrupt);
/* Instantiate a software object representing a DMA controller. */
struct dma_controller *__init
dma_controller_create(struct musb *musb, void __iomem *mregs)
{
struct cppi *controller;
struct device *dev = musb->controller;
struct platform_device *pdev = to_platform_device(dev);
int irq = platform_get_irq_byname(pdev, "dma");
controller = kzalloc(sizeof *controller, GFP_KERNEL);
if (!controller)
return NULL;
controller->mregs = mregs;
controller->tibase = mregs - DAVINCI_BASE_OFFSET;
controller->musb = musb;
controller->controller.start = cppi_controller_start;
controller->controller.stop = cppi_controller_stop;
controller->controller.channel_alloc = cppi_channel_allocate;
controller->controller.channel_release = cppi_channel_release;
controller->controller.channel_program = cppi_channel_program;
controller->controller.channel_abort = cppi_channel_abort;
/* NOTE: allocating from on-chip SRAM would give the least
* contention for memory access, if that ever matters here.
*/
/* setup BufferPool */
controller->pool = dma_pool_create("cppi",
controller->musb->controller,
sizeof(struct cppi_descriptor),
CPPI_DESCRIPTOR_ALIGN, 0);
if (!controller->pool) {
kfree(controller);
return NULL;
}
if (irq > 0) {
if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
dev_err(dev, "request_irq %d failed!\n", irq);
dma_controller_destroy(&controller->controller);
return NULL;
}
controller->irq = irq;
}
return &controller->controller;
}
/*
* Destroy a previously-instantiated DMA controller.
*/
void dma_controller_destroy(struct dma_controller *c)
{
struct cppi *cppi;
cppi = container_of(c, struct cppi, controller);
if (cppi->irq)
free_irq(cppi->irq, cppi->musb);
/* assert: caller stopped the controller first */
dma_pool_destroy(cppi->pool);
kfree(cppi);
}
/*
* Context: controller irqlocked, endpoint selected
*/
static int cppi_channel_abort(struct dma_channel *channel)
{
struct cppi_channel *cppi_ch;
struct cppi *controller;
void __iomem *mbase;
void __iomem *tibase;
void __iomem *regs;
u32 value;
struct cppi_descriptor *queue;
cppi_ch = container_of(channel, struct cppi_channel, channel);
controller = cppi_ch->controller;
switch (channel->status) {
case MUSB_DMA_STATUS_BUS_ABORT:
case MUSB_DMA_STATUS_CORE_ABORT:
/* from RX or TX fault irq handler */
case MUSB_DMA_STATUS_BUSY:
/* the hardware needs shutting down */
regs = cppi_ch->hw_ep->regs;
break;
case MUSB_DMA_STATUS_UNKNOWN:
case MUSB_DMA_STATUS_FREE:
return 0;
default:
return -EINVAL;
}
if (!cppi_ch->transmit && cppi_ch->head)
cppi_dump_rxq(3, "/abort", cppi_ch);
mbase = controller->mregs;
tibase = controller->tibase;
queue = cppi_ch->head;
cppi_ch->head = NULL;
cppi_ch->tail = NULL;
/* REVISIT should rely on caller having done this,
* and caller should rely on us not changing it.
* peripheral code is safe ... check host too.
*/
musb_ep_select(mbase, cppi_ch->index + 1);
if (cppi_ch->transmit) {
struct cppi_tx_stateram __iomem *tx_ram;
/* REVISIT put timeouts on these controller handshakes */
cppi_dump_tx(6, cppi_ch, " (teardown)");
/* teardown DMA engine then usb core */
do {
value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
} while (!(value & CPPI_TEAR_READY));
musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
tx_ram = cppi_ch->state_ram;
do {
value = musb_readl(&tx_ram->tx_complete, 0);
} while (0xFFFFFFFC != value);
/* FIXME clean up the transfer state ... here?
* the completion routine should get called with
* an appropriate status code.
*/
value = musb_readw(regs, MUSB_TXCSR);
value &= ~MUSB_TXCSR_DMAENAB;
value |= MUSB_TXCSR_FLUSHFIFO;
musb_writew(regs, MUSB_TXCSR, value);
musb_writew(regs, MUSB_TXCSR, value);
/*
* 1. Write to completion Ptr value 0x1(bit 0 set)
* (write back mode)
* 2. Wait for abort interrupt and then put the channel in
* compare mode by writing 1 to the tx_complete register.
*/
cppi_reset_tx(tx_ram, 1);
cppi_ch->head = NULL;
musb_writel(&tx_ram->tx_complete, 0, 1);
cppi_dump_tx(5, cppi_ch, " (done teardown)");
/* REVISIT tx side _should_ clean up the same way
* as the RX side ... this does no cleanup at all!
*/
} else /* RX */ {
u16 csr;
/* NOTE: docs don't guarantee any of this works ... we
* expect that if the usb core stops telling the cppi core
* to pull more data from it, then it'll be safe to flush
* current RX DMA state iff any pending fifo transfer is done.
*/
core_rxirq_disable(tibase, cppi_ch->index + 1);
/* for host, ensure ReqPkt is never set again */
if (is_host_active(cppi_ch->controller->musb)) {
value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
value &= ~((0x3) << (cppi_ch->index * 2));
musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
}
csr = musb_readw(regs, MUSB_RXCSR);
/* for host, clear (just) ReqPkt at end of current packet(s) */
if (is_host_active(cppi_ch->controller->musb)) {
csr |= MUSB_RXCSR_H_WZC_BITS;
csr &= ~MUSB_RXCSR_H_REQPKT;
} else
csr |= MUSB_RXCSR_P_WZC_BITS;
/* clear dma enable */
csr &= ~(MUSB_RXCSR_DMAENAB);
musb_writew(regs, MUSB_RXCSR, csr);
csr = musb_readw(regs, MUSB_RXCSR);
/* Quiesce: wait for current dma to finish (if not cleanup).
* We can't use bit zero of stateram->rx_sop, since that
* refers to an entire "DMA packet" not just emptying the
* current fifo. Most segments need multiple usb packets.
*/
if (channel->status == MUSB_DMA_STATUS_BUSY)
udelay(50);
/* scan the current list, reporting any data that was
* transferred and acking any IRQ
*/
cppi_rx_scan(controller, cppi_ch->index);
/* clobber the existing state once it's idle
*
* NOTE: arguably, we should also wait for all the other
* RX channels to quiesce (how??) and then temporarily
* disable RXCPPI_CTRL_REG ... but it seems that we can
* rely on the controller restarting from state ram, with
* only RXCPPI_BUFCNT state being bogus. BUFCNT will
* correct itself after the next DMA transfer though.
*
* REVISIT does using rndis mode change that?
*/
cppi_reset_rx(cppi_ch->state_ram);
/* next DMA request _should_ load cppi head ptr */
/* ... we don't "free" that list, only mutate it in place. */
cppi_dump_rx(5, cppi_ch, " (done abort)");
/* clean up previously pending bds */
cppi_bd_free(cppi_ch, cppi_ch->last_processed);
cppi_ch->last_processed = NULL;
while (queue) {
struct cppi_descriptor *tmp = queue->next;
cppi_bd_free(cppi_ch, queue);
queue = tmp;
}
}
channel->status = MUSB_DMA_STATUS_FREE;
cppi_ch->buf_dma = 0;
cppi_ch->offset = 0;
cppi_ch->buf_len = 0;
cppi_ch->maxpacket = 0;
return 0;
}
/* TBD Queries:
*
* Power Management ... probably turn off cppi during suspend, restart;
* check state ram? Clocking is presumably shared with usb core.
*/
| gpl-2.0 |
LeMaker/linux-actions | drivers/net/ethernet/davicom/dm9000.c | 2028 | 39498 | /*
* Davicom DM9000 Fast Ethernet driver for Linux.
* Copyright (C) 1997 Sten Wang
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
*
* Additional updates, Copyright:
* Ben Dooks <ben@simtec.co.uk>
* Sascha Hauer <s.hauer@pengutronix.de>
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/dm9000.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/io.h>
#include "dm9000.h"
/* Board/System/Debug information/definition ---------------- */
#define DM9000_PHY 0x40 /* PHY address 0x01 */
#define CARDNAME "dm9000"
#define DRV_VERSION "1.31"
/*
* Transmit timeout, default 5 seconds.
*/
static int watchdog = 5000;
module_param(watchdog, int, 0400);
MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
/*
* Debug messages level
*/
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)");
/* DM9000 register address locking.
*
* The DM9000 uses an address register to control where data written
* to the data register goes. This means that the address register
* must be preserved over interrupts or similar calls.
*
* During interrupt and other critical calls, a spinlock is used to
* protect the system, but the calls themselves save the address
* in the address register in case they are interrupting another
* access to the device.
*
* For general accesses a lock is provided so that calls which are
* allowed to sleep are serialised so that the address register does
* not need to be saved. This lock also serves to serialise access
* to the EEPROM and PHY access registers which are shared between
* these two devices.
*/
/* The driver supports the original DM9000E, and now the two newer
* devices, DM9000A and DM9000B.
*/
enum dm9000_type {
TYPE_DM9000E, /* original DM9000 */
TYPE_DM9000A,
TYPE_DM9000B
};
/* Structure/enum declaration ------------------------------- */
typedef struct board_info {
void __iomem *io_addr; /* Register I/O base address */
void __iomem *io_data; /* Data I/O address */
u16 irq; /* IRQ */
u16 tx_pkt_cnt;
u16 queue_pkt_len;
u16 queue_start_addr;
u16 queue_ip_summed;
u16 dbug_cnt;
u8 io_mode; /* 0:word, 2:byte */
u8 phy_addr;
u8 imr_all;
unsigned int flags;
unsigned int in_suspend :1;
unsigned int wake_supported :1;
enum dm9000_type type;
void (*inblk)(void __iomem *port, void *data, int length);
void (*outblk)(void __iomem *port, void *data, int length);
void (*dumpblk)(void __iomem *port, int length);
struct device *dev; /* parent device */
struct resource *addr_res; /* resources found */
struct resource *data_res;
struct resource *addr_req; /* resources requested */
struct resource *data_req;
struct resource *irq_res;
int irq_wake;
struct mutex addr_lock; /* phy and eeprom access lock */
struct delayed_work phy_poll;
struct net_device *ndev;
spinlock_t lock;
struct mii_if_info mii;
u32 msg_enable;
u32 wake_state;
int ip_summed;
} board_info_t;
/* debug code */
#define dm9000_dbg(db, lev, msg...) do { \
if ((lev) < debug) { \
dev_dbg(db->dev, msg); \
} \
} while (0)
static inline board_info_t *to_dm9000_board(struct net_device *dev)
{
return netdev_priv(dev);
}
/* DM9000 network board routine ---------------------------- */
static void
dm9000_reset(board_info_t * db)
{
dev_dbg(db->dev, "resetting device\n");
/* RESET device */
writeb(DM9000_NCR, db->io_addr);
udelay(200);
writeb(NCR_RST, db->io_data);
udelay(200);
}
/*
* Read a byte from I/O port
*/
static u8
ior(board_info_t * db, int reg)
{
writeb(reg, db->io_addr);
return readb(db->io_data);
}
/*
* Write a byte to I/O port
*/
static void
iow(board_info_t * db, int reg, int value)
{
writeb(reg, db->io_addr);
writeb(value, db->io_data);
}
/* routines for sending block to chip */
static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
{
iowrite8_rep(reg, data, count);
}
static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
{
iowrite16_rep(reg, data, (count+1) >> 1);
}
static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
{
iowrite32_rep(reg, data, (count+3) >> 2);
}
/* input block from chip to memory */
static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
{
ioread8_rep(reg, data, count);
}
static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
{
ioread16_rep(reg, data, (count+1) >> 1);
}
static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
{
ioread32_rep(reg, data, (count+3) >> 2);
}
/* dump block from chip to null */
static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
{
int i;
int tmp;
for (i = 0; i < count; i++)
tmp = readb(reg);
}
static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
{
int i;
int tmp;
count = (count + 1) >> 1;
for (i = 0; i < count; i++)
tmp = readw(reg);
}
static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
{
int i;
int tmp;
count = (count + 3) >> 2;
for (i = 0; i < count; i++)
tmp = readl(reg);
}
/*
* Sleep, either by using msleep() or if we are suspending, then
* use mdelay() to sleep.
*/
static void dm9000_msleep(board_info_t *db, unsigned int ms)
{
if (db->in_suspend)
mdelay(ms);
else
msleep(ms);
}
/* Read a word from phyxcer */
static int
dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned int reg_save;
int ret;
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
/* Issue phyxcer read command */
iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_msleep(db, 1); /* Wait read complete */
spin_lock_irqsave(&db->lock, flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
/* The read data keeps on REG_0D & REG_0E */
ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
return ret;
}
/* Write a word to phyxcer */
static void
dm9000_phy_write(struct net_device *dev,
int phyaddr_unused, int reg, int value)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned long reg_save;
dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
/* Fill the written data into REG_0D & REG_0E */
iow(db, DM9000_EPDRL, value);
iow(db, DM9000_EPDRH, value >> 8);
/* Issue phyxcer write command */
iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_msleep(db, 1); /* Wait write complete */
spin_lock_irqsave(&db->lock, flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
}
/* dm9000_set_io
*
* select the specified set of io routines to use with the
* device
*/
static void dm9000_set_io(struct board_info *db, int byte_width)
{
/* use the size of the data resource to work out what IO
* routines we want to use
*/
switch (byte_width) {
case 1:
db->dumpblk = dm9000_dumpblk_8bit;
db->outblk = dm9000_outblk_8bit;
db->inblk = dm9000_inblk_8bit;
break;
case 3:
dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
case 2:
db->dumpblk = dm9000_dumpblk_16bit;
db->outblk = dm9000_outblk_16bit;
db->inblk = dm9000_inblk_16bit;
break;
case 4:
default:
db->dumpblk = dm9000_dumpblk_32bit;
db->outblk = dm9000_outblk_32bit;
db->inblk = dm9000_inblk_32bit;
break;
}
}
static void dm9000_schedule_poll(board_info_t *db)
{
if (db->type == TYPE_DM9000E)
schedule_delayed_work(&db->phy_poll, HZ * 2);
}
static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
board_info_t *dm = to_dm9000_board(dev);
if (!netif_running(dev))
return -EINVAL;
return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
}
static unsigned int
dm9000_read_locked(board_info_t *db, int reg)
{
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&db->lock, flags);
ret = ior(db, reg);
spin_unlock_irqrestore(&db->lock, flags);
return ret;
}
static int dm9000_wait_eeprom(board_info_t *db)
{
unsigned int status;
int timeout = 8; /* wait max 8msec */
/* The DM9000 data sheets say we should be able to
* poll the ERRE bit in EPCR to wait for the EEPROM
* operation. From testing several chips, this bit
* does not seem to work.
*
* We attempt to use the bit, but fall back to the
* timeout (which is why we do not return an error
* on expiry) to say that the EEPROM operation has
* completed.
*/
while (1) {
status = dm9000_read_locked(db, DM9000_EPCR);
if ((status & EPCR_ERRE) == 0)
break;
msleep(1);
if (timeout-- < 0) {
dev_dbg(db->dev, "timeout waiting EEPROM\n");
break;
}
}
return 0;
}
/*
* Read a word data from EEPROM
*/
static void
dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
{
unsigned long flags;
if (db->flags & DM9000_PLATF_NO_EEPROM) {
to[0] = 0xff;
to[1] = 0xff;
return;
}
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
iow(db, DM9000_EPAR, offset);
iow(db, DM9000_EPCR, EPCR_ERPRR);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_wait_eeprom(db);
/* delay for at-least 150uS */
msleep(1);
spin_lock_irqsave(&db->lock, flags);
iow(db, DM9000_EPCR, 0x0);
to[0] = ior(db, DM9000_EPDRL);
to[1] = ior(db, DM9000_EPDRH);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
}
/*
* Write a word data to SROM
*/
static void
dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
{
unsigned long flags;
if (db->flags & DM9000_PLATF_NO_EEPROM)
return;
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
iow(db, DM9000_EPAR, offset);
iow(db, DM9000_EPDRH, data[1]);
iow(db, DM9000_EPDRL, data[0]);
iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_wait_eeprom(db);
mdelay(1); /* wait at least 150uS to clear */
spin_lock_irqsave(&db->lock, flags);
iow(db, DM9000_EPCR, 0);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
}
/* ethtool ops */
static void dm9000_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
board_info_t *dm = to_dm9000_board(dev);
strlcpy(info->driver, CARDNAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
sizeof(info->bus_info));
}
static u32 dm9000_get_msglevel(struct net_device *dev)
{
board_info_t *dm = to_dm9000_board(dev);
return dm->msg_enable;
}
static void dm9000_set_msglevel(struct net_device *dev, u32 value)
{
board_info_t *dm = to_dm9000_board(dev);
dm->msg_enable = value;
}
static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
board_info_t *dm = to_dm9000_board(dev);
mii_ethtool_gset(&dm->mii, cmd);
return 0;
}
static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
board_info_t *dm = to_dm9000_board(dev);
return mii_ethtool_sset(&dm->mii, cmd);
}
static int dm9000_nway_reset(struct net_device *dev)
{
board_info_t *dm = to_dm9000_board(dev);
return mii_nway_restart(&dm->mii);
}
static int dm9000_set_features(struct net_device *dev,
netdev_features_t features)
{
board_info_t *dm = to_dm9000_board(dev);
netdev_features_t changed = dev->features ^ features;
unsigned long flags;
if (!(changed & NETIF_F_RXCSUM))
return 0;
spin_lock_irqsave(&dm->lock, flags);
iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
spin_unlock_irqrestore(&dm->lock, flags);
return 0;
}
static u32 dm9000_get_link(struct net_device *dev)
{
board_info_t *dm = to_dm9000_board(dev);
u32 ret;
if (dm->flags & DM9000_PLATF_EXT_PHY)
ret = mii_link_ok(&dm->mii);
else
ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
return ret;
}
#define DM_EEPROM_MAGIC (0x444D394B)
static int dm9000_get_eeprom_len(struct net_device *dev)
{
return 128;
}
static int dm9000_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee, u8 *data)
{
board_info_t *dm = to_dm9000_board(dev);
int offset = ee->offset;
int len = ee->len;
int i;
/* EEPROM access is aligned to two bytes */
if ((len & 1) != 0 || (offset & 1) != 0)
return -EINVAL;
if (dm->flags & DM9000_PLATF_NO_EEPROM)
return -ENOENT;
ee->magic = DM_EEPROM_MAGIC;
for (i = 0; i < len; i += 2)
dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
return 0;
}
static int dm9000_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee, u8 *data)
{
board_info_t *dm = to_dm9000_board(dev);
int offset = ee->offset;
int len = ee->len;
int done;
/* EEPROM access is aligned to two bytes */
if (dm->flags & DM9000_PLATF_NO_EEPROM)
return -ENOENT;
if (ee->magic != DM_EEPROM_MAGIC)
return -EINVAL;
while (len > 0) {
if (len & 1 || offset & 1) {
int which = offset & 1;
u8 tmp[2];
dm9000_read_eeprom(dm, offset / 2, tmp);
tmp[which] = *data;
dm9000_write_eeprom(dm, offset / 2, tmp);
done = 1;
} else {
dm9000_write_eeprom(dm, offset / 2, data);
done = 2;
}
data += done;
offset += done;
len -= done;
}
return 0;
}
static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
board_info_t *dm = to_dm9000_board(dev);
memset(w, 0, sizeof(struct ethtool_wolinfo));
/* note, we could probably support wake-phy too */
w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
w->wolopts = dm->wake_state;
}
static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
board_info_t *dm = to_dm9000_board(dev);
unsigned long flags;
u32 opts = w->wolopts;
u32 wcr = 0;
if (!dm->wake_supported)
return -EOPNOTSUPP;
if (opts & ~WAKE_MAGIC)
return -EINVAL;
if (opts & WAKE_MAGIC)
wcr |= WCR_MAGICEN;
mutex_lock(&dm->addr_lock);
spin_lock_irqsave(&dm->lock, flags);
iow(dm, DM9000_WCR, wcr);
spin_unlock_irqrestore(&dm->lock, flags);
mutex_unlock(&dm->addr_lock);
if (dm->wake_state != opts) {
/* change in wol state, update IRQ state */
if (!dm->wake_state)
irq_set_irq_wake(dm->irq_wake, 1);
else if (dm->wake_state && !opts)
irq_set_irq_wake(dm->irq_wake, 0);
}
dm->wake_state = opts;
return 0;
}
static const struct ethtool_ops dm9000_ethtool_ops = {
.get_drvinfo = dm9000_get_drvinfo,
.get_settings = dm9000_get_settings,
.set_settings = dm9000_set_settings,
.get_msglevel = dm9000_get_msglevel,
.set_msglevel = dm9000_set_msglevel,
.nway_reset = dm9000_nway_reset,
.get_link = dm9000_get_link,
.get_wol = dm9000_get_wol,
.set_wol = dm9000_set_wol,
.get_eeprom_len = dm9000_get_eeprom_len,
.get_eeprom = dm9000_get_eeprom,
.set_eeprom = dm9000_set_eeprom,
};
static void dm9000_show_carrier(board_info_t *db,
unsigned carrier, unsigned nsr)
{
struct net_device *ndev = db->ndev;
unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
if (carrier)
dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
(ncr & NCR_FDX) ? "full" : "half");
else
dev_info(db->dev, "%s: link down\n", ndev->name);
}
static void
dm9000_poll_work(struct work_struct *w)
{
struct delayed_work *dw = to_delayed_work(w);
board_info_t *db = container_of(dw, board_info_t, phy_poll);
struct net_device *ndev = db->ndev;
if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
!(db->flags & DM9000_PLATF_EXT_PHY)) {
unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
unsigned new_carrier;
new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
if (old_carrier != new_carrier) {
if (netif_msg_link(db))
dm9000_show_carrier(db, new_carrier, nsr);
if (!new_carrier)
netif_carrier_off(ndev);
else
netif_carrier_on(ndev);
}
} else
mii_check_media(&db->mii, netif_msg_link(db), 0);
if (netif_running(ndev))
dm9000_schedule_poll(db);
}
/* dm9000_release_board
*
* release a board, and any mapped resources
*/
static void
dm9000_release_board(struct platform_device *pdev, struct board_info *db)
{
/* unmap our resources */
iounmap(db->io_addr);
iounmap(db->io_data);
/* release the resources */
release_resource(db->data_req);
kfree(db->data_req);
release_resource(db->addr_req);
kfree(db->addr_req);
}
static unsigned char dm9000_type_to_char(enum dm9000_type type)
{
switch (type) {
case TYPE_DM9000E: return 'e';
case TYPE_DM9000A: return 'a';
case TYPE_DM9000B: return 'b';
}
return '?';
}
/*
* Set DM9000 multicast address
*/
static void
dm9000_hash_table_unlocked(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
struct netdev_hw_addr *ha;
int i, oft;
u32 hash_val;
u16 hash_table[4];
u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
dm9000_dbg(db, 1, "entering %s\n", __func__);
for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
iow(db, oft, dev->dev_addr[i]);
/* Clear Hash Table */
for (i = 0; i < 4; i++)
hash_table[i] = 0x0;
/* broadcast address */
hash_table[3] = 0x8000;
if (dev->flags & IFF_PROMISC)
rcr |= RCR_PRMSC;
if (dev->flags & IFF_ALLMULTI)
rcr |= RCR_ALL;
/* the multicast address in Hash Table : 64 bits */
netdev_for_each_mc_addr(ha, dev) {
hash_val = ether_crc_le(6, ha->addr) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
/* Write the hash table to MAC MD table */
for (i = 0, oft = DM9000_MAR; i < 4; i++) {
iow(db, oft++, hash_table[i]);
iow(db, oft++, hash_table[i] >> 8);
}
iow(db, DM9000_RCR, rcr);
}
static void
dm9000_hash_table(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&db->lock, flags);
dm9000_hash_table_unlocked(dev);
spin_unlock_irqrestore(&db->lock, flags);
}
/*
* Initialize dm9000 board
*/
static void
dm9000_init_dm9000(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
unsigned int imr;
unsigned int ncr;
dm9000_dbg(db, 1, "entering %s\n", __func__);
/* I/O mode */
db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
/* Checksum mode */
if (dev->hw_features & NETIF_F_RXCSUM)
iow(db, DM9000_RCSR,
(dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
/* if wol is needed, then always set NCR_WAKEEN otherwise we end
* up dumping the wake events if we disable this. There is already
* a wake-mask in DM9000_WCR */
if (db->wake_supported)
ncr |= NCR_WAKEEN;
iow(db, DM9000_NCR, ncr);
/* Program operating register */
iow(db, DM9000_TCR, 0); /* TX Polling clear */
iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
iow(db, DM9000_FCR, 0xff); /* Flow Control */
iow(db, DM9000_SMCR, 0); /* Special Mode */
/* clear TX status */
iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
/* Set address filter table */
dm9000_hash_table_unlocked(dev);
imr = IMR_PAR | IMR_PTM | IMR_PRM;
if (db->type != TYPE_DM9000E)
imr |= IMR_LNKCHNG;
db->imr_all = imr;
/* Enable TX/RX interrupt mask */
iow(db, DM9000_IMR, imr);
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
dev->trans_start = jiffies;
}
/* Our watchdog timed out. Called by the networking layer */
static void dm9000_timeout(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
u8 reg_save;
unsigned long flags;
/* Save previous register address */
spin_lock_irqsave(&db->lock, flags);
reg_save = readb(db->io_addr);
netif_stop_queue(dev);
dm9000_reset(db);
dm9000_init_dm9000(dev);
/* We can accept TX packets again */
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
}
static void dm9000_send_packet(struct net_device *dev,
int ip_summed,
u16 pkt_len)
{
board_info_t *dm = to_dm9000_board(dev);
/* The DM9000 is not smart enough to leave fragmented packets alone. */
if (dm->ip_summed != ip_summed) {
if (ip_summed == CHECKSUM_NONE)
iow(dm, DM9000_TCCR, 0);
else
iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
dm->ip_summed = ip_summed;
}
/* Set TX length to DM9000 */
iow(dm, DM9000_TXPLL, pkt_len);
iow(dm, DM9000_TXPLH, pkt_len >> 8);
/* Issue TX polling command */
iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
}
/*
* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
static int
dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
board_info_t *db = netdev_priv(dev);
dm9000_dbg(db, 3, "%s:\n", __func__);
if (db->tx_pkt_cnt > 1)
return NETDEV_TX_BUSY;
spin_lock_irqsave(&db->lock, flags);
/* Move data to DM9000 TX RAM */
writeb(DM9000_MWCMD, db->io_addr);
(db->outblk)(db->io_data, skb->data, skb->len);
dev->stats.tx_bytes += skb->len;
db->tx_pkt_cnt++;
/* TX control: First packet immediately send, second packet queue */
if (db->tx_pkt_cnt == 1) {
dm9000_send_packet(dev, skb->ip_summed, skb->len);
} else {
/* Second packet */
db->queue_pkt_len = skb->len;
db->queue_ip_summed = skb->ip_summed;
netif_stop_queue(dev);
}
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/*
* DM9000 interrupt handler
* receive the packet to upper layer, free the transmitted packet
*/
static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
{
int tx_status = ior(db, DM9000_NSR); /* Got TX status */
if (tx_status & (NSR_TX2END | NSR_TX1END)) {
/* One packet sent complete */
db->tx_pkt_cnt--;
dev->stats.tx_packets++;
if (netif_msg_tx_done(db))
dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
/* Queue packet check & send */
if (db->tx_pkt_cnt > 0)
dm9000_send_packet(dev, db->queue_ip_summed,
db->queue_pkt_len);
netif_wake_queue(dev);
}
}
struct dm9000_rxhdr {
u8 RxPktReady;
u8 RxStatus;
__le16 RxLen;
} __packed;
/*
* Received a packet and pass to upper layer
*/
static void
dm9000_rx(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
struct dm9000_rxhdr rxhdr;
struct sk_buff *skb;
u8 rxbyte, *rdptr;
bool GoodPacket;
int RxLen;
/* Check packet ready or not */
do {
ior(db, DM9000_MRCMDX); /* Dummy read */
/* Get most updated data */
rxbyte = readb(db->io_data);
/* Status check: this byte must be 0 or 1 */
if (rxbyte & DM9000_PKT_ERR) {
dev_warn(db->dev, "status check fail: %d\n", rxbyte);
iow(db, DM9000_RCR, 0x00); /* Stop Device */
iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
return;
}
if (!(rxbyte & DM9000_PKT_RDY))
return;
/* A packet ready now & Get status/length */
GoodPacket = true;
writeb(DM9000_MRCMD, db->io_addr);
(db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
RxLen = le16_to_cpu(rxhdr.RxLen);
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RX: status %02x, length %04x\n",
rxhdr.RxStatus, RxLen);
/* Packet Status check */
if (RxLen < 0x40) {
GoodPacket = false;
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
}
if (RxLen > DM9000_PKT_MAX) {
dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
}
/* rxhdr.RxStatus is identical to RSR register. */
if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
RSR_PLE | RSR_RWTO |
RSR_LCS | RSR_RF)) {
GoodPacket = false;
if (rxhdr.RxStatus & RSR_FOE) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "fifo error\n");
dev->stats.rx_fifo_errors++;
}
if (rxhdr.RxStatus & RSR_CE) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "crc error\n");
dev->stats.rx_crc_errors++;
}
if (rxhdr.RxStatus & RSR_RF) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "length error\n");
dev->stats.rx_length_errors++;
}
}
/* Move data from DM9000 */
if (GoodPacket &&
((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
skb_reserve(skb, 2);
rdptr = (u8 *) skb_put(skb, RxLen - 4);
/* Read received packet from RX SRAM */
(db->inblk)(db->io_data, rdptr, RxLen);
dev->stats.rx_bytes += RxLen;
/* Pass to upper layer */
skb->protocol = eth_type_trans(skb, dev);
if (dev->features & NETIF_F_RXCSUM) {
if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
}
netif_rx(skb);
dev->stats.rx_packets++;
} else {
/* need to dump the packet's data */
(db->dumpblk)(db->io_data, RxLen);
}
} while (rxbyte & DM9000_PKT_RDY);
}
static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
board_info_t *db = netdev_priv(dev);
int int_status;
unsigned long flags;
u8 reg_save;
dm9000_dbg(db, 3, "entering %s\n", __func__);
/* A real interrupt coming */
/* holders of db->lock must always block IRQs */
spin_lock_irqsave(&db->lock, flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Disable all interrupts */
iow(db, DM9000_IMR, IMR_PAR);
/* Got DM9000 interrupt status */
int_status = ior(db, DM9000_ISR); /* Got ISR */
iow(db, DM9000_ISR, int_status); /* Clear ISR status */
if (netif_msg_intr(db))
dev_dbg(db->dev, "interrupt status %02x\n", int_status);
/* Received the coming packet */
if (int_status & ISR_PRS)
dm9000_rx(dev);
/* Trnasmit Interrupt check */
if (int_status & ISR_PTS)
dm9000_tx_done(dev, db);
if (db->type != TYPE_DM9000E) {
if (int_status & ISR_LNKCHNG) {
/* fire a link-change request */
schedule_delayed_work(&db->phy_poll, 1);
}
}
/* Re-enable interrupt mask */
iow(db, DM9000_IMR, db->imr_all);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned nsr, wcr;
spin_lock_irqsave(&db->lock, flags);
nsr = ior(db, DM9000_NSR);
wcr = ior(db, DM9000_WCR);
dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
if (nsr & NSR_WAKEST) {
/* clear, so we can avoid */
iow(db, DM9000_NSR, NSR_WAKEST);
if (wcr & WCR_LINKST)
dev_info(db->dev, "wake by link status change\n");
if (wcr & WCR_SAMPLEST)
dev_info(db->dev, "wake by sample packet\n");
if (wcr & WCR_MAGICST )
dev_info(db->dev, "wake by magic packet\n");
if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
dev_err(db->dev, "wake signalled with no reason? "
"NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
}
spin_unlock_irqrestore(&db->lock, flags);
return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
*Used by netconsole
*/
static void dm9000_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
dm9000_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
/*
* Open the interface.
* The interface is opened whenever "ifconfig" actives it.
*/
static int
dm9000_open(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
if (netif_msg_ifup(db))
dev_dbg(db->dev, "enabling %s\n", dev->name);
/* If there is no IRQ type specified, default to something that
* may work, and tell the user that this is a problem */
if (irqflags == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
irqflags |= IRQF_SHARED;
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
/* Initialize DM9000 board */
dm9000_reset(db);
dm9000_init_dm9000(dev);
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
/* Init driver variable */
db->dbug_cnt = 0;
mii_check_media(&db->mii, netif_msg_link(db), 1);
netif_start_queue(dev);
dm9000_schedule_poll(db);
return 0;
}
static void
dm9000_shutdown(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
/* RESET device */
dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
iow(db, DM9000_RCR, 0x00); /* Disable RX */
}
/*
* Stop the interface.
* The interface is stopped when it is brought.
*/
static int
dm9000_stop(struct net_device *ndev)
{
board_info_t *db = netdev_priv(ndev);
if (netif_msg_ifdown(db))
dev_dbg(db->dev, "shutting down %s\n", ndev->name);
cancel_delayed_work_sync(&db->phy_poll);
netif_stop_queue(ndev);
netif_carrier_off(ndev);
/* free interrupt */
free_irq(ndev->irq, ndev);
dm9000_shutdown(ndev);
return 0;
}
static const struct net_device_ops dm9000_netdev_ops = {
.ndo_open = dm9000_open,
.ndo_stop = dm9000_stop,
.ndo_start_xmit = dm9000_start_xmit,
.ndo_tx_timeout = dm9000_timeout,
.ndo_set_rx_mode = dm9000_hash_table,
.ndo_do_ioctl = dm9000_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_features = dm9000_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = dm9000_poll_controller,
#endif
};
/*
* Search DM9000 board, allocate space and register it
*/
static int
dm9000_probe(struct platform_device *pdev)
{
struct dm9000_plat_data *pdata = pdev->dev.platform_data;
struct board_info *db; /* Point a board information structure */
struct net_device *ndev;
const unsigned char *mac_src;
int ret = 0;
int iosize;
int i;
u32 id_val;
/* Init network device */
ndev = alloc_etherdev(sizeof(struct board_info));
if (!ndev)
return -ENOMEM;
SET_NETDEV_DEV(ndev, &pdev->dev);
dev_dbg(&pdev->dev, "dm9000_probe()\n");
/* setup board info structure */
db = netdev_priv(ndev);
db->dev = &pdev->dev;
db->ndev = ndev;
spin_lock_init(&db->lock);
mutex_init(&db->addr_lock);
INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (db->addr_res == NULL || db->data_res == NULL ||
db->irq_res == NULL) {
dev_err(db->dev, "insufficient resources\n");
ret = -ENOENT;
goto out;
}
db->irq_wake = platform_get_irq(pdev, 1);
if (db->irq_wake >= 0) {
dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
IRQF_SHARED, dev_name(db->dev), ndev);
if (ret) {
dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
} else {
/* test to see if irq is really wakeup capable */
ret = irq_set_irq_wake(db->irq_wake, 1);
if (ret) {
dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
db->irq_wake, ret);
ret = 0;
} else {
irq_set_irq_wake(db->irq_wake, 0);
db->wake_supported = 1;
}
}
}
iosize = resource_size(db->addr_res);
db->addr_req = request_mem_region(db->addr_res->start, iosize,
pdev->name);
if (db->addr_req == NULL) {
dev_err(db->dev, "cannot claim address reg area\n");
ret = -EIO;
goto out;
}
db->io_addr = ioremap(db->addr_res->start, iosize);
if (db->io_addr == NULL) {
dev_err(db->dev, "failed to ioremap address reg\n");
ret = -EINVAL;
goto out;
}
iosize = resource_size(db->data_res);
db->data_req = request_mem_region(db->data_res->start, iosize,
pdev->name);
if (db->data_req == NULL) {
dev_err(db->dev, "cannot claim data reg area\n");
ret = -EIO;
goto out;
}
db->io_data = ioremap(db->data_res->start, iosize);
if (db->io_data == NULL) {
dev_err(db->dev, "failed to ioremap data reg\n");
ret = -EINVAL;
goto out;
}
/* fill in parameters for net-dev structure */
ndev->base_addr = (unsigned long)db->io_addr;
ndev->irq = db->irq_res->start;
/* ensure at least we have a default set of IO routines */
dm9000_set_io(db, iosize);
/* check to see if anything is being over-ridden */
if (pdata != NULL) {
/* check to see if the driver wants to over-ride the
* default IO width */
if (pdata->flags & DM9000_PLATF_8BITONLY)
dm9000_set_io(db, 1);
if (pdata->flags & DM9000_PLATF_16BITONLY)
dm9000_set_io(db, 2);
if (pdata->flags & DM9000_PLATF_32BITONLY)
dm9000_set_io(db, 4);
/* check to see if there are any IO routine
* over-rides */
if (pdata->inblk != NULL)
db->inblk = pdata->inblk;
if (pdata->outblk != NULL)
db->outblk = pdata->outblk;
if (pdata->dumpblk != NULL)
db->dumpblk = pdata->dumpblk;
db->flags = pdata->flags;
}
#ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
db->flags |= DM9000_PLATF_SIMPLE_PHY;
#endif
/* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
* Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
* while probe stage.
*/
iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
/* try multiple times, DM9000 sometimes gets the read wrong */
for (i = 0; i < 8; i++) {
id_val = ior(db, DM9000_VIDL);
id_val |= (u32)ior(db, DM9000_VIDH) << 8;
id_val |= (u32)ior(db, DM9000_PIDL) << 16;
id_val |= (u32)ior(db, DM9000_PIDH) << 24;
if (id_val == DM9000_ID)
break;
dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
}
if (id_val != DM9000_ID) {
dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
ret = -ENODEV;
goto out;
}
/* Identify what type of DM9000 we are working on */
id_val = ior(db, DM9000_CHIPR);
dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
switch (id_val) {
case CHIPR_DM9000A:
db->type = TYPE_DM9000A;
break;
case CHIPR_DM9000B:
db->type = TYPE_DM9000B;
break;
default:
dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
db->type = TYPE_DM9000E;
}
/* dm9000a/b are capable of hardware checksum offload */
if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
ndev->features |= ndev->hw_features;
}
/* from this point we assume that we have found a DM9000 */
/* driver system function */
ether_setup(ndev);
ndev->netdev_ops = &dm9000_netdev_ops;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
ndev->ethtool_ops = &dm9000_ethtool_ops;
db->msg_enable = NETIF_MSG_LINK;
db->mii.phy_id_mask = 0x1f;
db->mii.reg_num_mask = 0x1f;
db->mii.force_media = 0;
db->mii.full_duplex = 0;
db->mii.dev = ndev;
db->mii.mdio_read = dm9000_phy_read;
db->mii.mdio_write = dm9000_phy_write;
mac_src = "eeprom";
/* try reading the node address from the attached EEPROM */
for (i = 0; i < 6; i += 2)
dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
mac_src = "platform data";
memcpy(ndev->dev_addr, pdata->dev_addr, 6);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
/* try reading from mac */
mac_src = "chip";
for (i = 0; i < 6; i++)
ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
"set using ifconfig\n", ndev->name);
eth_hw_addr_random(ndev);
mac_src = "random";
}
platform_set_drvdata(pdev, ndev);
ret = register_netdev(ndev);
if (ret == 0)
printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
ndev->name, dm9000_type_to_char(db->type),
db->io_addr, db->io_data, ndev->irq,
ndev->dev_addr, mac_src);
return 0;
out:
dev_err(db->dev, "not found (%d).\n", ret);
dm9000_release_board(pdev, db);
free_netdev(ndev);
return ret;
}
static int
dm9000_drv_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
board_info_t *db;
if (ndev) {
db = netdev_priv(ndev);
db->in_suspend = 1;
if (!netif_running(ndev))
return 0;
netif_device_detach(ndev);
/* only shutdown if not using WoL */
if (!db->wake_state)
dm9000_shutdown(ndev);
}
return 0;
}
static int
dm9000_drv_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
board_info_t *db = netdev_priv(ndev);
if (ndev) {
if (netif_running(ndev)) {
/* reset if we were not in wake mode to ensure if
* the device was powered off it is in a known state */
if (!db->wake_state) {
dm9000_reset(db);
dm9000_init_dm9000(ndev);
}
netif_device_attach(ndev);
}
db->in_suspend = 0;
}
return 0;
}
static const struct dev_pm_ops dm9000_drv_pm_ops = {
.suspend = dm9000_drv_suspend,
.resume = dm9000_drv_resume,
};
static int
dm9000_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
dm9000_release_board(pdev, netdev_priv(ndev));
free_netdev(ndev); /* free device structure */
dev_dbg(&pdev->dev, "released and freed device\n");
return 0;
}
static struct platform_driver dm9000_driver = {
.driver = {
.name = "dm9000",
.owner = THIS_MODULE,
.pm = &dm9000_drv_pm_ops,
},
.probe = dm9000_probe,
.remove = dm9000_drv_remove,
};
module_platform_driver(dm9000_driver);
MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
MODULE_DESCRIPTION("Davicom DM9000 network driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dm9000");
| gpl-2.0 |
Octane70/SGH-T989D_JB_4.1.2_Kernel | arch/arm/mach-omap2/board-am3517evm.c | 2284 | 13199 | /*
* linux/arch/arm/mach-omap2/board-am3517evm.c
*
* Copyright (C) 2009 Texas Instruments Incorporated
* Author: Ranjith Lohithakshan <ranjithl@ti.com>
*
* Based on mach-omap2/board-omap3evm.c
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
* whether express or implied; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/i2c/pca953x.h>
#include <linux/can/platform/ti_hecc.h>
#include <linux/davinci_emac.h>
#include <mach/hardware.h>
#include <mach/am35xx.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <plat/board.h>
#include <plat/common.h>
#include <plat/usb.h>
#include <video/omapdss.h>
#include <video/omap-panel-generic-dpi.h>
#include "mux.h"
#include "control.h"
#define AM35XX_EVM_MDIO_FREQUENCY (1000000)
static struct mdio_platform_data am3517_evm_mdio_pdata = {
.bus_freq = AM35XX_EVM_MDIO_FREQUENCY,
};
static struct resource am3517_mdio_resources[] = {
{
.start = AM35XX_IPSS_EMAC_BASE + AM35XX_EMAC_MDIO_OFFSET,
.end = AM35XX_IPSS_EMAC_BASE + AM35XX_EMAC_MDIO_OFFSET +
SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device am3517_mdio_device = {
.name = "davinci_mdio",
.id = 0,
.num_resources = ARRAY_SIZE(am3517_mdio_resources),
.resource = am3517_mdio_resources,
.dev.platform_data = &am3517_evm_mdio_pdata,
};
static struct emac_platform_data am3517_evm_emac_pdata = {
.rmii_en = 1,
};
static struct resource am3517_emac_resources[] = {
{
.start = AM35XX_IPSS_EMAC_BASE,
.end = AM35XX_IPSS_EMAC_BASE + 0x2FFFF,
.flags = IORESOURCE_MEM,
},
{
.start = INT_35XX_EMAC_C0_RXTHRESH_IRQ,
.end = INT_35XX_EMAC_C0_RXTHRESH_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = INT_35XX_EMAC_C0_RX_PULSE_IRQ,
.end = INT_35XX_EMAC_C0_RX_PULSE_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = INT_35XX_EMAC_C0_TX_PULSE_IRQ,
.end = INT_35XX_EMAC_C0_TX_PULSE_IRQ,
.flags = IORESOURCE_IRQ,
},
{
.start = INT_35XX_EMAC_C0_MISC_PULSE_IRQ,
.end = INT_35XX_EMAC_C0_MISC_PULSE_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device am3517_emac_device = {
.name = "davinci_emac",
.id = -1,
.num_resources = ARRAY_SIZE(am3517_emac_resources),
.resource = am3517_emac_resources,
};
static void am3517_enable_ethernet_int(void)
{
u32 regval;
regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
regval = (regval | AM35XX_CPGMAC_C0_RX_PULSE_CLR |
AM35XX_CPGMAC_C0_TX_PULSE_CLR |
AM35XX_CPGMAC_C0_MISC_PULSE_CLR |
AM35XX_CPGMAC_C0_RX_THRESH_CLR);
omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
}
static void am3517_disable_ethernet_int(void)
{
u32 regval;
regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
regval = (regval | AM35XX_CPGMAC_C0_RX_PULSE_CLR |
AM35XX_CPGMAC_C0_TX_PULSE_CLR);
omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
}
static void am3517_evm_ethernet_init(struct emac_platform_data *pdata)
{
unsigned int regval;
pdata->ctrl_reg_offset = AM35XX_EMAC_CNTRL_OFFSET;
pdata->ctrl_mod_reg_offset = AM35XX_EMAC_CNTRL_MOD_OFFSET;
pdata->ctrl_ram_offset = AM35XX_EMAC_CNTRL_RAM_OFFSET;
pdata->ctrl_ram_size = AM35XX_EMAC_CNTRL_RAM_SIZE;
pdata->version = EMAC_VERSION_2;
pdata->hw_ram_addr = AM35XX_EMAC_HW_RAM_ADDR;
pdata->interrupt_enable = am3517_enable_ethernet_int;
pdata->interrupt_disable = am3517_disable_ethernet_int;
am3517_emac_device.dev.platform_data = pdata;
platform_device_register(&am3517_emac_device);
platform_device_register(&am3517_mdio_device);
clk_add_alias(NULL, dev_name(&am3517_mdio_device.dev),
NULL, &am3517_emac_device.dev);
regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
regval = regval & (~(AM35XX_CPGMACSS_SW_RST));
omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
return ;
}
#define LCD_PANEL_PWR 176
#define LCD_PANEL_BKLIGHT_PWR 182
#define LCD_PANEL_PWM 181
static struct i2c_board_info __initdata am3517evm_i2c1_boardinfo[] = {
{
I2C_BOARD_INFO("s35390a", 0x30),
},
};
/*
* RTC - S35390A
*/
#define GPIO_RTCS35390A_IRQ 55
static void __init am3517_evm_rtc_init(void)
{
int r;
omap_mux_init_gpio(GPIO_RTCS35390A_IRQ, OMAP_PIN_INPUT_PULLUP);
r = gpio_request_one(GPIO_RTCS35390A_IRQ, GPIOF_IN, "rtcs35390a-irq");
if (r < 0) {
printk(KERN_WARNING "failed to request GPIO#%d\n",
GPIO_RTCS35390A_IRQ);
return;
}
am3517evm_i2c1_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ);
}
/*
* I2C GPIO Expander - TCA6416
*/
/* Mounted on Base-Board */
static struct pca953x_platform_data am3517evm_gpio_expander_info_0 = {
.gpio_base = OMAP_MAX_GPIO_LINES,
};
static struct i2c_board_info __initdata am3517evm_i2c2_boardinfo[] = {
{
I2C_BOARD_INFO("tlv320aic23", 0x1A),
},
{
I2C_BOARD_INFO("tca6416", 0x21),
.platform_data = &am3517evm_gpio_expander_info_0,
},
};
/* Mounted on UI Card */
static struct pca953x_platform_data am3517evm_ui_gpio_expander_info_1 = {
.gpio_base = OMAP_MAX_GPIO_LINES + 16,
};
static struct pca953x_platform_data am3517evm_ui_gpio_expander_info_2 = {
.gpio_base = OMAP_MAX_GPIO_LINES + 32,
};
static struct i2c_board_info __initdata am3517evm_i2c3_boardinfo[] = {
{
I2C_BOARD_INFO("tca6416", 0x20),
.platform_data = &am3517evm_ui_gpio_expander_info_1,
},
{
I2C_BOARD_INFO("tca6416", 0x21),
.platform_data = &am3517evm_ui_gpio_expander_info_2,
},
};
static int __init am3517_evm_i2c_init(void)
{
omap_register_i2c_bus(1, 400, NULL, 0);
omap_register_i2c_bus(2, 400, am3517evm_i2c2_boardinfo,
ARRAY_SIZE(am3517evm_i2c2_boardinfo));
omap_register_i2c_bus(3, 400, am3517evm_i2c3_boardinfo,
ARRAY_SIZE(am3517evm_i2c3_boardinfo));
return 0;
}
static int lcd_enabled;
static int dvi_enabled;
#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)
static struct gpio am3517_evm_dss_gpios[] __initdata = {
/* GPIO 182 = LCD Backlight Power */
{ LCD_PANEL_BKLIGHT_PWR, GPIOF_OUT_INIT_HIGH, "lcd_backlight_pwr" },
/* GPIO 181 = LCD Panel PWM */
{ LCD_PANEL_PWM, GPIOF_OUT_INIT_HIGH, "lcd bl enable" },
/* GPIO 176 = LCD Panel Power enable pin */
{ LCD_PANEL_PWR, GPIOF_OUT_INIT_HIGH, "dvi enable" },
};
static void __init am3517_evm_display_init(void)
{
int r;
omap_mux_init_gpio(LCD_PANEL_PWR, OMAP_PIN_INPUT_PULLUP);
omap_mux_init_gpio(LCD_PANEL_BKLIGHT_PWR, OMAP_PIN_INPUT_PULLDOWN);
omap_mux_init_gpio(LCD_PANEL_PWM, OMAP_PIN_INPUT_PULLDOWN);
r = gpio_request_array(am3517_evm_dss_gpios,
ARRAY_SIZE(am3517_evm_dss_gpios));
if (r) {
printk(KERN_ERR "failed to get DSS panel control GPIOs\n");
return;
}
printk(KERN_INFO "Display initialized successfully\n");
}
#else
static void __init am3517_evm_display_init(void) {}
#endif
static int am3517_evm_panel_enable_lcd(struct omap_dss_device *dssdev)
{
if (dvi_enabled) {
printk(KERN_ERR "cannot enable LCD, DVI is enabled\n");
return -EINVAL;
}
gpio_set_value(LCD_PANEL_PWR, 1);
lcd_enabled = 1;
return 0;
}
static void am3517_evm_panel_disable_lcd(struct omap_dss_device *dssdev)
{
gpio_set_value(LCD_PANEL_PWR, 0);
lcd_enabled = 0;
}
static struct panel_generic_dpi_data lcd_panel = {
.name = "sharp_lq",
.platform_enable = am3517_evm_panel_enable_lcd,
.platform_disable = am3517_evm_panel_disable_lcd,
};
static struct omap_dss_device am3517_evm_lcd_device = {
.type = OMAP_DISPLAY_TYPE_DPI,
.name = "lcd",
.driver_name = "generic_dpi_panel",
.data = &lcd_panel,
.phy.dpi.data_lines = 16,
};
static int am3517_evm_panel_enable_tv(struct omap_dss_device *dssdev)
{
return 0;
}
static void am3517_evm_panel_disable_tv(struct omap_dss_device *dssdev)
{
}
static struct omap_dss_device am3517_evm_tv_device = {
.type = OMAP_DISPLAY_TYPE_VENC,
.name = "tv",
.driver_name = "venc",
.phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
.platform_enable = am3517_evm_panel_enable_tv,
.platform_disable = am3517_evm_panel_disable_tv,
};
static int am3517_evm_panel_enable_dvi(struct omap_dss_device *dssdev)
{
if (lcd_enabled) {
printk(KERN_ERR "cannot enable DVI, LCD is enabled\n");
return -EINVAL;
}
dvi_enabled = 1;
return 0;
}
static void am3517_evm_panel_disable_dvi(struct omap_dss_device *dssdev)
{
dvi_enabled = 0;
}
static struct panel_generic_dpi_data dvi_panel = {
.name = "generic",
.platform_enable = am3517_evm_panel_enable_dvi,
.platform_disable = am3517_evm_panel_disable_dvi,
};
static struct omap_dss_device am3517_evm_dvi_device = {
.type = OMAP_DISPLAY_TYPE_DPI,
.name = "dvi",
.driver_name = "generic_dpi_panel",
.data = &dvi_panel,
.phy.dpi.data_lines = 24,
};
static struct omap_dss_device *am3517_evm_dss_devices[] = {
&am3517_evm_lcd_device,
&am3517_evm_tv_device,
&am3517_evm_dvi_device,
};
static struct omap_dss_board_info am3517_evm_dss_data = {
.num_devices = ARRAY_SIZE(am3517_evm_dss_devices),
.devices = am3517_evm_dss_devices,
.default_device = &am3517_evm_lcd_device,
};
/*
* Board initialization
*/
static void __init am3517_evm_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
}
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_ULPI,
.mode = MUSB_OTG,
.power = 500,
.set_phy_power = am35x_musb_phy_power,
.clear_irq = am35x_musb_clear_irq,
.set_mode = am35x_set_mode,
.reset = am35x_musb_reset,
};
static __init void am3517_evm_musb_init(void)
{
u32 devconf2;
/*
* Set up USB clock/mode in the DEVCONF2 register.
*/
devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
/* USB2.0 PHY reference clock is 13 MHz */
devconf2 &= ~(CONF2_REFFREQ | CONF2_OTGMODE | CONF2_PHY_GPIOMODE);
devconf2 |= CONF2_REFFREQ_13MHZ | CONF2_SESENDEN | CONF2_VBDTCTEN
| CONF2_DATPOL;
omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
usb_musb_init(&musb_board_data);
}
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)
.port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
#else
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
#endif
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
.phy_reset = true,
.reset_gpio_port[0] = 57,
.reset_gpio_port[1] = -EINVAL,
.reset_gpio_port[2] = -EINVAL
};
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
/* USB OTG DRVVBUS offset = 0x212 */
OMAP3_MUX(SAD2D_MCAD23, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#endif
static struct resource am3517_hecc_resources[] = {
{
.start = AM35XX_IPSS_HECC_BASE,
.end = AM35XX_IPSS_HECC_BASE + 0x3FFF,
.flags = IORESOURCE_MEM,
},
{
.start = INT_35XX_HECC0_IRQ,
.end = INT_35XX_HECC0_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device am3517_hecc_device = {
.name = "ti_hecc",
.id = -1,
.num_resources = ARRAY_SIZE(am3517_hecc_resources),
.resource = am3517_hecc_resources,
};
static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
.scc_hecc_offset = AM35XX_HECC_SCC_HECC_OFFSET,
.scc_ram_offset = AM35XX_HECC_SCC_RAM_OFFSET,
.hecc_ram_offset = AM35XX_HECC_RAM_OFFSET,
.mbx_offset = AM35XX_HECC_MBOX_OFFSET,
.int_line = AM35XX_HECC_INT_LINE,
.version = AM35XX_HECC_VERSION,
};
static void am3517_evm_hecc_init(struct ti_hecc_platform_data *pdata)
{
am3517_hecc_device.dev.platform_data = pdata;
platform_device_register(&am3517_hecc_device);
}
static struct omap_board_config_kernel am3517_evm_config[] __initdata = {
};
static void __init am3517_evm_init(void)
{
omap_board_config = am3517_evm_config;
omap_board_config_size = ARRAY_SIZE(am3517_evm_config);
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
am3517_evm_i2c_init();
omap_display_init(&am3517_evm_dss_data);
omap_serial_init();
/* Configure GPIO for EHCI port */
omap_mux_init_gpio(57, OMAP_PIN_OUTPUT);
usbhs_init(&usbhs_bdata);
am3517_evm_hecc_init(&am3517_evm_hecc_pdata);
/* DSS */
am3517_evm_display_init();
/* RTC - S35390A */
am3517_evm_rtc_init();
i2c_register_board_info(1, am3517evm_i2c1_boardinfo,
ARRAY_SIZE(am3517evm_i2c1_boardinfo));
/*Ethernet*/
am3517_evm_ethernet_init(&am3517_evm_emac_pdata);
/* MUSB */
am3517_evm_musb_init();
}
MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
.boot_params = 0x80000100,
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = am3517_evm_init_early,
.init_irq = omap_init_irq,
.init_machine = am3517_evm_init,
.timer = &omap_timer,
MACHINE_END
| gpl-2.0 |
Nothing-Dev/MaxiKernel_condor | drivers/of/of_slimbus.c | 3052 | 2378 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* OF helpers for SLIMbus */
#include <linux/slimbus/slimbus.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_slimbus.h>
int of_register_slim_devices(struct slim_controller *ctrl)
{
struct device_node *node;
struct slim_boardinfo *binfo = NULL;
struct slim_boardinfo *temp;
int n = 0;
int ret = 0;
if (!ctrl->dev.of_node)
return -EINVAL;
for_each_child_of_node(ctrl->dev.of_node, node) {
struct property *prop;
struct slim_device *slim;
char *name;
prop = of_find_property(node, "elemental-addr", NULL);
if (!prop || prop->length != 6) {
dev_err(&ctrl->dev, "of_slim: invalid E-addr");
continue;
}
name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
if (!name) {
dev_err(&ctrl->dev, "of_slim: out of memory");
ret = -ENOMEM;
goto of_slim_err;
}
if (of_modalias_node(node, name, SLIMBUS_NAME_SIZE) < 0) {
dev_err(&ctrl->dev, "of_slim: modalias failure on %s\n",
node->full_name);
kfree(name);
continue;
}
slim = kzalloc(sizeof(struct slim_device), GFP_KERNEL);
if (!slim) {
dev_err(&ctrl->dev, "of_slim: out of memory");
ret = -ENOMEM;
kfree(name);
goto of_slim_err;
}
memcpy(slim->e_addr, prop->value, 6);
temp = krealloc(binfo, (n + 1) * sizeof(struct slim_boardinfo),
GFP_KERNEL);
if (!temp) {
dev_err(&ctrl->dev, "out of memory");
kfree(name);
kfree(slim);
ret = -ENOMEM;
goto of_slim_err;
}
binfo = temp;
slim->dev.of_node = of_node_get(node);
slim->name = (const char *)name;
binfo[n].bus_num = ctrl->nr;
binfo[n].slim_slave = slim;
n++;
}
ret = slim_register_board_info(binfo, n);
if (!ret)
goto of_slim_ret;
of_slim_err:
while (n-- > 0) {
kfree(binfo[n].slim_slave->name);
kfree(binfo[n].slim_slave);
}
of_slim_ret:
kfree(binfo);
return ret;
}
| gpl-2.0 |
RenderBroken/msm8974_caf_G2_render_kernel | arch/arm/mach-msm/sirc.c | 3308 | 5311 | /* linux/arch/arm/mach-msm/irq.c
*
* Copyright (c) 2009-2011 The Linux Foundation. All rights reserved.
* Copyright (C) 2009 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/irqdesc.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <mach/fiq.h>
#include <mach/msm_iomap.h>
#include "sirc.h"
static unsigned int int_enable;
static unsigned int wake_enable;
static struct sirc_regs_t sirc_regs = {
.int_enable = SPSS_SIRC_INT_ENABLE,
.int_enable_clear = SPSS_SIRC_INT_ENABLE_CLEAR,
.int_enable_set = SPSS_SIRC_INT_ENABLE_SET,
.int_type = SPSS_SIRC_INT_TYPE,
.int_polarity = SPSS_SIRC_INT_POLARITY,
.int_clear = SPSS_SIRC_INT_CLEAR,
};
static struct sirc_cascade_regs sirc_reg_table[] = {
{
.int_status = SPSS_SIRC_IRQ_STATUS,
.cascade_irq = INT_SIRC_0,
.cascade_fiq = INT_SIRC_1,
}
};
static unsigned int save_type;
static unsigned int save_polarity;
/* Mask off the given interrupt. Keep the int_enable mask in sync with
the enable reg, so it can be restored after power collapse. */
static void sirc_irq_mask(struct irq_data *d)
{
unsigned int mask;
mask = 1 << (d->irq - FIRST_SIRC_IRQ);
writel(mask, sirc_regs.int_enable_clear);
int_enable &= ~mask;
mb();
return;
}
/* Unmask the given interrupt. Keep the int_enable mask in sync with
the enable reg, so it can be restored after power collapse. */
static void sirc_irq_unmask(struct irq_data *d)
{
unsigned int mask;
mask = 1 << (d->irq - FIRST_SIRC_IRQ);
writel(mask, sirc_regs.int_enable_set);
mb();
int_enable |= mask;
return;
}
static void sirc_irq_ack(struct irq_data *d)
{
unsigned int mask;
mask = 1 << (d->irq - FIRST_SIRC_IRQ);
writel(mask, sirc_regs.int_clear);
mb();
return;
}
static int sirc_irq_set_wake(struct irq_data *d, unsigned int on)
{
unsigned int mask;
/* Used to set the interrupt enable mask during power collapse. */
mask = 1 << (d->irq - FIRST_SIRC_IRQ);
if (on)
wake_enable |= mask;
else
wake_enable &= ~mask;
return 0;
}
static int sirc_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
unsigned int mask;
unsigned int val;
mask = 1 << (d->irq - FIRST_SIRC_IRQ);
val = readl(sirc_regs.int_polarity);
if (flow_type & (IRQF_TRIGGER_LOW | IRQF_TRIGGER_FALLING))
val |= mask;
else
val &= ~mask;
writel(val, sirc_regs.int_polarity);
val = readl(sirc_regs.int_type);
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
val |= mask;
} else {
val &= ~mask;
}
writel(val, sirc_regs.int_type);
mb();
return 0;
}
#if defined(CONFIG_MSM_FIQ_SUPPORT)
void sirc_fiq_select(int irq, bool enable)
{
uint32_t mask = 1 << (irq - FIRST_SIRC_IRQ);
uint32_t val;
unsigned long flags;
local_irq_save(flags);
val = readl(SPSS_SIRC_INT_SELECT);
if (enable)
val |= mask;
else
val &= ~mask;
writel(val, SPSS_SIRC_INT_SELECT);
mb();
local_irq_restore(flags);
}
#endif
/* Finds the pending interrupt on the passed cascade irq and redrives it */
static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned int reg = 0;
unsigned int sirq;
unsigned int status;
while ((reg < ARRAY_SIZE(sirc_reg_table)) &&
(sirc_reg_table[reg].cascade_irq != irq))
reg++;
if (reg == ARRAY_SIZE(sirc_reg_table)) {
printk(KERN_ERR "%s: incorrect irq %d called\n",
__func__, irq);
return;
}
status = readl(sirc_reg_table[reg].int_status);
status &= SIRC_MASK;
if (status == 0)
return;
for (sirq = 0;
(sirq < NR_SIRC_IRQS) && ((status & (1U << sirq)) == 0);
sirq++)
;
generic_handle_irq(sirq+FIRST_SIRC_IRQ);
irq_desc_get_chip(desc)->irq_ack(irq_get_irq_data(irq));
}
void msm_sirc_enter_sleep(void)
{
save_type = readl(sirc_regs.int_type);
save_polarity = readl(sirc_regs.int_polarity);
writel(wake_enable, sirc_regs.int_enable);
mb();
return;
}
void msm_sirc_exit_sleep(void)
{
writel(save_type, sirc_regs.int_type);
writel(save_polarity, sirc_regs.int_polarity);
writel(int_enable, sirc_regs.int_enable);
mb();
return;
}
static struct irq_chip sirc_irq_chip = {
.name = "sirc",
.irq_ack = sirc_irq_ack,
.irq_mask = sirc_irq_mask,
.irq_unmask = sirc_irq_unmask,
.irq_set_wake = sirc_irq_set_wake,
.irq_set_type = sirc_irq_set_type,
};
void __init msm_init_sirc(void)
{
int i;
int_enable = 0;
wake_enable = 0;
for (i = FIRST_SIRC_IRQ; i < LAST_SIRC_IRQ; i++) {
irq_set_chip_and_handler(i, &sirc_irq_chip, handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
}
for (i = 0; i < ARRAY_SIZE(sirc_reg_table); i++) {
irq_set_chained_handler(sirc_reg_table[i].cascade_irq,
sirc_irq_handler);
irq_set_irq_wake(sirc_reg_table[i].cascade_irq, 1);
#if defined(CONFIG_MSM_FIQ_SUPPORT)
msm_fiq_select(sirc_reg_table[i].cascade_fiq);
msm_fiq_enable(sirc_reg_table[i].cascade_fiq);
#endif
}
return;
}
| gpl-2.0 |
Albinoman887/Linux-3.4.x | arch/powerpc/sysdev/fsl_soc.c | 4588 | 6018 | /*
* FSL SoC setup code
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/major.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/export.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
#include <linux/fs_enet_pd.h>
#include <linux/fs_uart_pd.h>
#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/time.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <sysdev/fsl_soc.h>
#include <mm/mmu_decl.h>
#include <asm/cpm2.h>
#include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */
extern void init_fcc_ioports(struct fs_platform_info*);
extern void init_fec_ioports(struct fs_platform_info*);
extern void init_smc_ioports(struct fs_uart_platform_info*);
static phys_addr_t immrbase = -1;
phys_addr_t get_immrbase(void)
{
struct device_node *soc;
if (immrbase != -1)
return immrbase;
soc = of_find_node_by_type(NULL, "soc");
if (soc) {
int size;
u32 naddr;
const u32 *prop = of_get_property(soc, "#address-cells", &size);
if (prop && size == 4)
naddr = *prop;
else
naddr = 2;
prop = of_get_property(soc, "ranges", &size);
if (prop)
immrbase = of_translate_address(soc, prop + naddr);
of_node_put(soc);
}
return immrbase;
}
EXPORT_SYMBOL(get_immrbase);
static u32 sysfreq = -1;
u32 fsl_get_sys_freq(void)
{
struct device_node *soc;
const u32 *prop;
int size;
if (sysfreq != -1)
return sysfreq;
soc = of_find_node_by_type(NULL, "soc");
if (!soc)
return -1;
prop = of_get_property(soc, "clock-frequency", &size);
if (!prop || size != sizeof(*prop) || *prop == 0)
prop = of_get_property(soc, "bus-frequency", &size);
if (prop && size == sizeof(*prop))
sysfreq = *prop;
of_node_put(soc);
return sysfreq;
}
EXPORT_SYMBOL(fsl_get_sys_freq);
#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
static u32 brgfreq = -1;
u32 get_brgfreq(void)
{
struct device_node *node;
const unsigned int *prop;
int size;
if (brgfreq != -1)
return brgfreq;
node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg");
if (node) {
prop = of_get_property(node, "clock-frequency", &size);
if (prop && size == 4)
brgfreq = *prop;
of_node_put(node);
return brgfreq;
}
/* Legacy device binding -- will go away when no users are left. */
node = of_find_node_by_type(NULL, "cpm");
if (!node)
node = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!node)
node = of_find_node_by_type(NULL, "qe");
if (node) {
prop = of_get_property(node, "brg-frequency", &size);
if (prop && size == 4)
brgfreq = *prop;
if (brgfreq == -1 || brgfreq == 0) {
prop = of_get_property(node, "bus-frequency", &size);
if (prop && size == 4)
brgfreq = *prop / 2;
}
of_node_put(node);
}
return brgfreq;
}
EXPORT_SYMBOL(get_brgfreq);
static u32 fs_baudrate = -1;
u32 get_baudrate(void)
{
struct device_node *node;
if (fs_baudrate != -1)
return fs_baudrate;
node = of_find_node_by_type(NULL, "serial");
if (node) {
int size;
const unsigned int *prop = of_get_property(node,
"current-speed", &size);
if (prop)
fs_baudrate = *prop;
of_node_put(node);
}
return fs_baudrate;
}
EXPORT_SYMBOL(get_baudrate);
#endif /* CONFIG_CPM2 */
#ifdef CONFIG_FIXED_PHY
static int __init of_add_fixed_phys(void)
{
int ret;
struct device_node *np;
u32 *fixed_link;
struct fixed_phy_status status = {};
for_each_node_by_name(np, "ethernet") {
fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
if (!fixed_link)
continue;
status.link = 1;
status.duplex = fixed_link[1];
status.speed = fixed_link[2];
status.pause = fixed_link[3];
status.asym_pause = fixed_link[4];
ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status);
if (ret) {
of_node_put(np);
return ret;
}
}
return 0;
}
arch_initcall(of_add_fixed_phys);
#endif /* CONFIG_FIXED_PHY */
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
static __be32 __iomem *rstcr;
static int __init setup_rstcr(void)
{
struct device_node *np;
for_each_node_by_name(np, "global-utilities") {
if ((of_get_property(np, "fsl,has-rstcr", NULL))) {
rstcr = of_iomap(np, 0) + 0xb0;
if (!rstcr)
printk (KERN_ERR "Error: reset control "
"register not mapped!\n");
break;
}
}
if (!rstcr && ppc_md.restart == fsl_rstcr_restart)
printk(KERN_ERR "No RSTCR register, warm reboot won't work\n");
if (np)
of_node_put(np);
return 0;
}
arch_initcall(setup_rstcr);
void fsl_rstcr_restart(char *cmd)
{
local_irq_disable();
if (rstcr)
/* set reset control register */
out_be32(rstcr, 0x2); /* HRESET_REQ */
while (1) ;
}
#endif
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
struct platform_diu_data_ops diu_ops;
EXPORT_SYMBOL(diu_ops);
#endif
/*
* Restart the current partition
*
* This function should be assigned to the ppc_md.restart function pointer,
* to initiate a partition restart when we're running under the Freescale
* hypervisor.
*/
void fsl_hv_restart(char *cmd)
{
pr_info("hv restart\n");
fh_partition_restart(-1);
}
/*
* Halt the current partition
*
* This function should be assigned to the ppc_md.power_off and ppc_md.halt
* function pointers, to shut down the partition when we're running under
* the Freescale hypervisor.
*/
void fsl_hv_halt(void)
{
pr_info("hv exit\n");
fh_partition_stop(-1);
}
| gpl-2.0 |
go2ev-devteam/Gplus_2159_0801 | openplatform/sdk/os/kernel-2.6.32/sound/pci/ctxfi/ctsrc.c | 4588 | 19820 | /**
* Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*
* @File ctsrc.c
*
* @Brief
* This file contains the implementation of the Sample Rate Convertor
* resource management object.
*
* @Author Liu Chun
* @Date May 13 2008
*
*/
#include "ctsrc.h"
#include "cthardware.h"
#include <linux/slab.h>
#define SRC_RESOURCE_NUM 64
#define SRCIMP_RESOURCE_NUM 256
static unsigned int conj_mask;
static int src_default_config_memrd(struct src *src);
static int src_default_config_memwr(struct src *src);
static int src_default_config_arcrw(struct src *src);
static int (*src_default_config[3])(struct src *) = {
[MEMRD] = src_default_config_memrd,
[MEMWR] = src_default_config_memwr,
[ARCRW] = src_default_config_arcrw
};
static int src_set_state(struct src *src, unsigned int state)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_state(src->rsc.ctrl_blk, state);
return 0;
}
static int src_set_bm(struct src *src, unsigned int bm)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_bm(src->rsc.ctrl_blk, bm);
return 0;
}
static int src_set_sf(struct src *src, unsigned int sf)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_sf(src->rsc.ctrl_blk, sf);
return 0;
}
static int src_set_pm(struct src *src, unsigned int pm)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_pm(src->rsc.ctrl_blk, pm);
return 0;
}
static int src_set_rom(struct src *src, unsigned int rom)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_rom(src->rsc.ctrl_blk, rom);
return 0;
}
static int src_set_vo(struct src *src, unsigned int vo)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_vo(src->rsc.ctrl_blk, vo);
return 0;
}
static int src_set_st(struct src *src, unsigned int st)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_st(src->rsc.ctrl_blk, st);
return 0;
}
static int src_set_bp(struct src *src, unsigned int bp)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_bp(src->rsc.ctrl_blk, bp);
return 0;
}
static int src_set_cisz(struct src *src, unsigned int cisz)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_cisz(src->rsc.ctrl_blk, cisz);
return 0;
}
static int src_set_ca(struct src *src, unsigned int ca)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_ca(src->rsc.ctrl_blk, ca);
return 0;
}
static int src_set_sa(struct src *src, unsigned int sa)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_sa(src->rsc.ctrl_blk, sa);
return 0;
}
static int src_set_la(struct src *src, unsigned int la)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_la(src->rsc.ctrl_blk, la);
return 0;
}
static int src_set_pitch(struct src *src, unsigned int pitch)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_pitch(src->rsc.ctrl_blk, pitch);
return 0;
}
static int src_set_clear_zbufs(struct src *src)
{
struct hw *hw;
hw = src->rsc.hw;
hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1);
return 0;
}
static int src_commit_write(struct src *src)
{
struct hw *hw;
int i;
unsigned int dirty = 0;
hw = src->rsc.hw;
src->rsc.ops->master(&src->rsc);
if (src->rsc.msr > 1) {
/* Save dirty flags for conjugate resource programming */
dirty = hw->src_get_dirty(src->rsc.ctrl_blk) & conj_mask;
}
hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
src->rsc.ctrl_blk);
/* Program conjugate parameter mixer resources */
if (MEMWR == src->mode)
return 0;
for (i = 1; i < src->rsc.msr; i++) {
src->rsc.ops->next_conj(&src->rsc);
hw->src_set_dirty(src->rsc.ctrl_blk, dirty);
hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
src->rsc.ctrl_blk);
}
src->rsc.ops->master(&src->rsc);
return 0;
}
static int src_get_ca(struct src *src)
{
struct hw *hw;
hw = src->rsc.hw;
return hw->src_get_ca(hw, src->rsc.ops->index(&src->rsc),
src->rsc.ctrl_blk);
}
static int src_init(struct src *src)
{
src_default_config[src->mode](src);
return 0;
}
static struct src *src_next_interleave(struct src *src)
{
return src->intlv;
}
static int src_default_config_memrd(struct src *src)
{
struct hw *hw = src->rsc.hw;
unsigned int rsr, msr;
hw->src_set_state(src->rsc.ctrl_blk, SRC_STATE_OFF);
hw->src_set_bm(src->rsc.ctrl_blk, 1);
for (rsr = 0, msr = src->rsc.msr; msr > 1; msr >>= 1)
rsr++;
hw->src_set_rsr(src->rsc.ctrl_blk, rsr);
hw->src_set_sf(src->rsc.ctrl_blk, SRC_SF_S16);
hw->src_set_wr(src->rsc.ctrl_blk, 0);
hw->src_set_pm(src->rsc.ctrl_blk, 0);
hw->src_set_rom(src->rsc.ctrl_blk, 0);
hw->src_set_vo(src->rsc.ctrl_blk, 0);
hw->src_set_st(src->rsc.ctrl_blk, 0);
hw->src_set_ilsz(src->rsc.ctrl_blk, src->multi - 1);
hw->src_set_cisz(src->rsc.ctrl_blk, 0x80);
hw->src_set_sa(src->rsc.ctrl_blk, 0x0);
hw->src_set_la(src->rsc.ctrl_blk, 0x1000);
hw->src_set_ca(src->rsc.ctrl_blk, 0x80);
hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000);
hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1);
src->rsc.ops->master(&src->rsc);
hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
src->rsc.ctrl_blk);
for (msr = 1; msr < src->rsc.msr; msr++) {
src->rsc.ops->next_conj(&src->rsc);
hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000);
hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
src->rsc.ctrl_blk);
}
src->rsc.ops->master(&src->rsc);
return 0;
}
static int src_default_config_memwr(struct src *src)
{
struct hw *hw = src->rsc.hw;
hw->src_set_state(src->rsc.ctrl_blk, SRC_STATE_OFF);
hw->src_set_bm(src->rsc.ctrl_blk, 1);
hw->src_set_rsr(src->rsc.ctrl_blk, 0);
hw->src_set_sf(src->rsc.ctrl_blk, SRC_SF_S16);
hw->src_set_wr(src->rsc.ctrl_blk, 1);
hw->src_set_pm(src->rsc.ctrl_blk, 0);
hw->src_set_rom(src->rsc.ctrl_blk, 0);
hw->src_set_vo(src->rsc.ctrl_blk, 0);
hw->src_set_st(src->rsc.ctrl_blk, 0);
hw->src_set_ilsz(src->rsc.ctrl_blk, 0);
hw->src_set_cisz(src->rsc.ctrl_blk, 0x80);
hw->src_set_sa(src->rsc.ctrl_blk, 0x0);
hw->src_set_la(src->rsc.ctrl_blk, 0x1000);
hw->src_set_ca(src->rsc.ctrl_blk, 0x80);
hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000);
hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1);
src->rsc.ops->master(&src->rsc);
hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
src->rsc.ctrl_blk);
return 0;
}
static int src_default_config_arcrw(struct src *src)
{
struct hw *hw = src->rsc.hw;
unsigned int rsr, msr;
unsigned int dirty;
hw->src_set_state(src->rsc.ctrl_blk, SRC_STATE_OFF);
hw->src_set_bm(src->rsc.ctrl_blk, 0);
for (rsr = 0, msr = src->rsc.msr; msr > 1; msr >>= 1)
rsr++;
hw->src_set_rsr(src->rsc.ctrl_blk, rsr);
hw->src_set_sf(src->rsc.ctrl_blk, SRC_SF_F32);
hw->src_set_wr(src->rsc.ctrl_blk, 0);
hw->src_set_pm(src->rsc.ctrl_blk, 0);
hw->src_set_rom(src->rsc.ctrl_blk, 0);
hw->src_set_vo(src->rsc.ctrl_blk, 0);
hw->src_set_st(src->rsc.ctrl_blk, 0);
hw->src_set_ilsz(src->rsc.ctrl_blk, 0);
hw->src_set_cisz(src->rsc.ctrl_blk, 0x80);
hw->src_set_sa(src->rsc.ctrl_blk, 0x0);
/*hw->src_set_sa(src->rsc.ctrl_blk, 0x100);*/
hw->src_set_la(src->rsc.ctrl_blk, 0x1000);
/*hw->src_set_la(src->rsc.ctrl_blk, 0x03ffffe0);*/
hw->src_set_ca(src->rsc.ctrl_blk, 0x80);
hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000);
hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1);
dirty = hw->src_get_dirty(src->rsc.ctrl_blk);
src->rsc.ops->master(&src->rsc);
for (msr = 0; msr < src->rsc.msr; msr++) {
hw->src_set_dirty(src->rsc.ctrl_blk, dirty);
hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
src->rsc.ctrl_blk);
src->rsc.ops->next_conj(&src->rsc);
}
src->rsc.ops->master(&src->rsc);
return 0;
}
static struct src_rsc_ops src_rsc_ops = {
.set_state = src_set_state,
.set_bm = src_set_bm,
.set_sf = src_set_sf,
.set_pm = src_set_pm,
.set_rom = src_set_rom,
.set_vo = src_set_vo,
.set_st = src_set_st,
.set_bp = src_set_bp,
.set_cisz = src_set_cisz,
.set_ca = src_set_ca,
.set_sa = src_set_sa,
.set_la = src_set_la,
.set_pitch = src_set_pitch,
.set_clr_zbufs = src_set_clear_zbufs,
.commit_write = src_commit_write,
.get_ca = src_get_ca,
.init = src_init,
.next_interleave = src_next_interleave,
};
static int
src_rsc_init(struct src *src, u32 idx,
const struct src_desc *desc, struct src_mgr *mgr)
{
int err;
int i, n;
struct src *p;
n = (MEMRD == desc->mode) ? desc->multi : 1;
for (i = 0, p = src; i < n; i++, p++) {
err = rsc_init(&p->rsc, idx + i, SRC, desc->msr, mgr->mgr.hw);
if (err)
goto error1;
/* Initialize src specific rsc operations */
p->ops = &src_rsc_ops;
p->multi = (0 == i) ? desc->multi : 1;
p->mode = desc->mode;
src_default_config[desc->mode](p);
mgr->src_enable(mgr, p);
p->intlv = p + 1;
}
(--p)->intlv = NULL; /* Set @intlv of the last SRC to NULL */
mgr->commit_write(mgr);
return 0;
error1:
for (i--, p--; i >= 0; i--, p--) {
mgr->src_disable(mgr, p);
rsc_uninit(&p->rsc);
}
mgr->commit_write(mgr);
return err;
}
static int src_rsc_uninit(struct src *src, struct src_mgr *mgr)
{
int i, n;
struct src *p;
n = (MEMRD == src->mode) ? src->multi : 1;
for (i = 0, p = src; i < n; i++, p++) {
mgr->src_disable(mgr, p);
rsc_uninit(&p->rsc);
p->multi = 0;
p->ops = NULL;
p->mode = NUM_SRCMODES;
p->intlv = NULL;
}
mgr->commit_write(mgr);
return 0;
}
static int
get_src_rsc(struct src_mgr *mgr, const struct src_desc *desc, struct src **rsrc)
{
unsigned int idx = SRC_RESOURCE_NUM;
int err;
struct src *src;
unsigned long flags;
*rsrc = NULL;
/* Check whether there are sufficient src resources to meet request. */
spin_lock_irqsave(&mgr->mgr_lock, flags);
if (MEMRD == desc->mode)
err = mgr_get_resource(&mgr->mgr, desc->multi, &idx);
else
err = mgr_get_resource(&mgr->mgr, 1, &idx);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
printk(KERN_ERR "ctxfi: Can't meet SRC resource request!\n");
return err;
}
/* Allocate mem for master src resource */
if (MEMRD == desc->mode)
src = kzalloc(sizeof(*src)*desc->multi, GFP_KERNEL);
else
src = kzalloc(sizeof(*src), GFP_KERNEL);
if (!src) {
err = -ENOMEM;
goto error1;
}
err = src_rsc_init(src, idx, desc, mgr);
if (err)
goto error2;
*rsrc = src;
return 0;
error2:
kfree(src);
error1:
spin_lock_irqsave(&mgr->mgr_lock, flags);
if (MEMRD == desc->mode)
mgr_put_resource(&mgr->mgr, desc->multi, idx);
else
mgr_put_resource(&mgr->mgr, 1, idx);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
return err;
}
static int put_src_rsc(struct src_mgr *mgr, struct src *src)
{
unsigned long flags;
spin_lock_irqsave(&mgr->mgr_lock, flags);
src->rsc.ops->master(&src->rsc);
if (MEMRD == src->mode)
mgr_put_resource(&mgr->mgr, src->multi,
src->rsc.ops->index(&src->rsc));
else
mgr_put_resource(&mgr->mgr, 1, src->rsc.ops->index(&src->rsc));
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
src_rsc_uninit(src, mgr);
kfree(src);
return 0;
}
static int src_enable_s(struct src_mgr *mgr, struct src *src)
{
struct hw *hw = mgr->mgr.hw;
int i;
src->rsc.ops->master(&src->rsc);
for (i = 0; i < src->rsc.msr; i++) {
hw->src_mgr_enbs_src(mgr->mgr.ctrl_blk,
src->rsc.ops->index(&src->rsc));
src->rsc.ops->next_conj(&src->rsc);
}
src->rsc.ops->master(&src->rsc);
return 0;
}
static int src_enable(struct src_mgr *mgr, struct src *src)
{
struct hw *hw = mgr->mgr.hw;
int i;
src->rsc.ops->master(&src->rsc);
for (i = 0; i < src->rsc.msr; i++) {
hw->src_mgr_enb_src(mgr->mgr.ctrl_blk,
src->rsc.ops->index(&src->rsc));
src->rsc.ops->next_conj(&src->rsc);
}
src->rsc.ops->master(&src->rsc);
return 0;
}
static int src_disable(struct src_mgr *mgr, struct src *src)
{
struct hw *hw = mgr->mgr.hw;
int i;
src->rsc.ops->master(&src->rsc);
for (i = 0; i < src->rsc.msr; i++) {
hw->src_mgr_dsb_src(mgr->mgr.ctrl_blk,
src->rsc.ops->index(&src->rsc));
src->rsc.ops->next_conj(&src->rsc);
}
src->rsc.ops->master(&src->rsc);
return 0;
}
static int src_mgr_commit_write(struct src_mgr *mgr)
{
struct hw *hw = mgr->mgr.hw;
hw->src_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
return 0;
}
int src_mgr_create(void *hw, struct src_mgr **rsrc_mgr)
{
int err, i;
struct src_mgr *src_mgr;
*rsrc_mgr = NULL;
src_mgr = kzalloc(sizeof(*src_mgr), GFP_KERNEL);
if (!src_mgr)
return -ENOMEM;
err = rsc_mgr_init(&src_mgr->mgr, SRC, SRC_RESOURCE_NUM, hw);
if (err)
goto error1;
spin_lock_init(&src_mgr->mgr_lock);
conj_mask = ((struct hw *)hw)->src_dirty_conj_mask();
src_mgr->get_src = get_src_rsc;
src_mgr->put_src = put_src_rsc;
src_mgr->src_enable_s = src_enable_s;
src_mgr->src_enable = src_enable;
src_mgr->src_disable = src_disable;
src_mgr->commit_write = src_mgr_commit_write;
/* Disable all SRC resources. */
for (i = 0; i < 256; i++)
((struct hw *)hw)->src_mgr_dsb_src(src_mgr->mgr.ctrl_blk, i);
((struct hw *)hw)->src_mgr_commit_write(hw, src_mgr->mgr.ctrl_blk);
*rsrc_mgr = src_mgr;
return 0;
error1:
kfree(src_mgr);
return err;
}
int src_mgr_destroy(struct src_mgr *src_mgr)
{
rsc_mgr_uninit(&src_mgr->mgr);
kfree(src_mgr);
return 0;
}
/* SRCIMP resource manager operations */
static int srcimp_master(struct rsc *rsc)
{
rsc->conj = 0;
return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
}
static int srcimp_next_conj(struct rsc *rsc)
{
rsc->conj++;
return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
}
static int srcimp_index(const struct rsc *rsc)
{
return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
}
static struct rsc_ops srcimp_basic_rsc_ops = {
.master = srcimp_master,
.next_conj = srcimp_next_conj,
.index = srcimp_index,
.output_slot = NULL,
};
static int srcimp_map(struct srcimp *srcimp, struct src *src, struct rsc *input)
{
struct imapper *entry;
int i;
srcimp->rsc.ops->master(&srcimp->rsc);
src->rsc.ops->master(&src->rsc);
input->ops->master(input);
/* Program master and conjugate resources */
for (i = 0; i < srcimp->rsc.msr; i++) {
entry = &srcimp->imappers[i];
entry->slot = input->ops->output_slot(input);
entry->user = src->rsc.ops->index(&src->rsc);
entry->addr = srcimp->rsc.ops->index(&srcimp->rsc);
srcimp->mgr->imap_add(srcimp->mgr, entry);
srcimp->mapped |= (0x1 << i);
srcimp->rsc.ops->next_conj(&srcimp->rsc);
input->ops->next_conj(input);
}
srcimp->rsc.ops->master(&srcimp->rsc);
input->ops->master(input);
return 0;
}
static int srcimp_unmap(struct srcimp *srcimp)
{
int i;
/* Program master and conjugate resources */
for (i = 0; i < srcimp->rsc.msr; i++) {
if (srcimp->mapped & (0x1 << i)) {
srcimp->mgr->imap_delete(srcimp->mgr,
&srcimp->imappers[i]);
srcimp->mapped &= ~(0x1 << i);
}
}
return 0;
}
static struct srcimp_rsc_ops srcimp_ops = {
.map = srcimp_map,
.unmap = srcimp_unmap
};
static int srcimp_rsc_init(struct srcimp *srcimp,
const struct srcimp_desc *desc,
struct srcimp_mgr *mgr)
{
int err;
err = rsc_init(&srcimp->rsc, srcimp->idx[0],
SRCIMP, desc->msr, mgr->mgr.hw);
if (err)
return err;
/* Reserve memory for imapper nodes */
srcimp->imappers = kzalloc(sizeof(struct imapper)*desc->msr,
GFP_KERNEL);
if (!srcimp->imappers) {
err = -ENOMEM;
goto error1;
}
/* Set srcimp specific operations */
srcimp->rsc.ops = &srcimp_basic_rsc_ops;
srcimp->ops = &srcimp_ops;
srcimp->mgr = mgr;
srcimp->rsc.ops->master(&srcimp->rsc);
return 0;
error1:
rsc_uninit(&srcimp->rsc);
return err;
}
static int srcimp_rsc_uninit(struct srcimp *srcimp)
{
if (NULL != srcimp->imappers) {
kfree(srcimp->imappers);
srcimp->imappers = NULL;
}
srcimp->ops = NULL;
srcimp->mgr = NULL;
rsc_uninit(&srcimp->rsc);
return 0;
}
static int get_srcimp_rsc(struct srcimp_mgr *mgr,
const struct srcimp_desc *desc,
struct srcimp **rsrcimp)
{
int err, i;
unsigned int idx;
struct srcimp *srcimp;
unsigned long flags;
*rsrcimp = NULL;
/* Allocate mem for SRCIMP resource */
srcimp = kzalloc(sizeof(*srcimp), GFP_KERNEL);
if (!srcimp)
return -ENOMEM;
/* Check whether there are sufficient SRCIMP resources. */
err = 0;
spin_lock_irqsave(&mgr->mgr_lock, flags);
for (i = 0; i < desc->msr; i++) {
err = mgr_get_resource(&mgr->mgr, 1, &idx);
if (err)
break;
srcimp->idx[i] = idx;
}
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
printk(KERN_ERR "ctxfi: Can't meet SRCIMP resource request!\n");
goto error1;
}
err = srcimp_rsc_init(srcimp, desc, mgr);
if (err)
goto error1;
*rsrcimp = srcimp;
return 0;
error1:
spin_lock_irqsave(&mgr->mgr_lock, flags);
for (i--; i >= 0; i--)
mgr_put_resource(&mgr->mgr, 1, srcimp->idx[i]);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
kfree(srcimp);
return err;
}
static int put_srcimp_rsc(struct srcimp_mgr *mgr, struct srcimp *srcimp)
{
unsigned long flags;
int i;
spin_lock_irqsave(&mgr->mgr_lock, flags);
for (i = 0; i < srcimp->rsc.msr; i++)
mgr_put_resource(&mgr->mgr, 1, srcimp->idx[i]);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
srcimp_rsc_uninit(srcimp);
kfree(srcimp);
return 0;
}
static int srcimp_map_op(void *data, struct imapper *entry)
{
struct rsc_mgr *mgr = &((struct srcimp_mgr *)data)->mgr;
struct hw *hw = mgr->hw;
hw->srcimp_mgr_set_imaparc(mgr->ctrl_blk, entry->slot);
hw->srcimp_mgr_set_imapuser(mgr->ctrl_blk, entry->user);
hw->srcimp_mgr_set_imapnxt(mgr->ctrl_blk, entry->next);
hw->srcimp_mgr_set_imapaddr(mgr->ctrl_blk, entry->addr);
hw->srcimp_mgr_commit_write(mgr->hw, mgr->ctrl_blk);
return 0;
}
static int srcimp_imap_add(struct srcimp_mgr *mgr, struct imapper *entry)
{
unsigned long flags;
int err;
spin_lock_irqsave(&mgr->imap_lock, flags);
if ((0 == entry->addr) && (mgr->init_imap_added)) {
input_mapper_delete(&mgr->imappers,
mgr->init_imap, srcimp_map_op, mgr);
mgr->init_imap_added = 0;
}
err = input_mapper_add(&mgr->imappers, entry, srcimp_map_op, mgr);
spin_unlock_irqrestore(&mgr->imap_lock, flags);
return err;
}
static int srcimp_imap_delete(struct srcimp_mgr *mgr, struct imapper *entry)
{
unsigned long flags;
int err;
spin_lock_irqsave(&mgr->imap_lock, flags);
err = input_mapper_delete(&mgr->imappers, entry, srcimp_map_op, mgr);
if (list_empty(&mgr->imappers)) {
input_mapper_add(&mgr->imappers, mgr->init_imap,
srcimp_map_op, mgr);
mgr->init_imap_added = 1;
}
spin_unlock_irqrestore(&mgr->imap_lock, flags);
return err;
}
int srcimp_mgr_create(void *hw, struct srcimp_mgr **rsrcimp_mgr)
{
int err;
struct srcimp_mgr *srcimp_mgr;
struct imapper *entry;
*rsrcimp_mgr = NULL;
srcimp_mgr = kzalloc(sizeof(*srcimp_mgr), GFP_KERNEL);
if (!srcimp_mgr)
return -ENOMEM;
err = rsc_mgr_init(&srcimp_mgr->mgr, SRCIMP, SRCIMP_RESOURCE_NUM, hw);
if (err)
goto error1;
spin_lock_init(&srcimp_mgr->mgr_lock);
spin_lock_init(&srcimp_mgr->imap_lock);
INIT_LIST_HEAD(&srcimp_mgr->imappers);
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
goto error2;
}
entry->slot = entry->addr = entry->next = entry->user = 0;
list_add(&entry->list, &srcimp_mgr->imappers);
srcimp_mgr->init_imap = entry;
srcimp_mgr->init_imap_added = 1;
srcimp_mgr->get_srcimp = get_srcimp_rsc;
srcimp_mgr->put_srcimp = put_srcimp_rsc;
srcimp_mgr->imap_add = srcimp_imap_add;
srcimp_mgr->imap_delete = srcimp_imap_delete;
*rsrcimp_mgr = srcimp_mgr;
return 0;
error2:
rsc_mgr_uninit(&srcimp_mgr->mgr);
error1:
kfree(srcimp_mgr);
return err;
}
int srcimp_mgr_destroy(struct srcimp_mgr *srcimp_mgr)
{
unsigned long flags;
/* free src input mapper list */
spin_lock_irqsave(&srcimp_mgr->imap_lock, flags);
free_input_mapper_list(&srcimp_mgr->imappers);
spin_unlock_irqrestore(&srcimp_mgr->imap_lock, flags);
rsc_mgr_uninit(&srcimp_mgr->mgr);
kfree(srcimp_mgr);
return 0;
}
| gpl-2.0 |
GustavoRD78/78Kernel-ZL-569 | drivers/net/wireless/ath/ath9k/ar9002_hw.c | 4844 | 15661 | /*
* Copyright (c) 2008-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/moduleparam.h>
#include "hw.h"
#include "ar5008_initvals.h"
#include "ar9001_initvals.h"
#include "ar9002_initvals.h"
#include "ar9002_phy.h"
int modparam_force_new_ani;
module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
MODULE_PARM_DESC(force_new_ani, "Force new ANI for AR5008, AR9001, AR9002");
/* General hardware code for the A5008/AR9001/AR9002 hadware families */
static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
{
if (AR_SREV_9271(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
ARRAY_SIZE(ar9271Modes_9271), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
ARRAY_SIZE(ar9271Common_9271), 2);
INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 5);
return;
}
if (ah->config.pcie_clock_req)
INIT_INI_ARRAY(&ah->iniPcieSerdes,
ar9280PciePhy_clkreq_off_L1_9280,
ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
else
INIT_INI_ARRAY(&ah->iniPcieSerdes,
ar9280PciePhy_clkreq_always_on_L1_9280,
ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
ARRAY_SIZE(ar9287Modes_9287_1_1), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
ARRAY_SIZE(ar9287Common_9287_1_1), 2);
} else if (AR_SREV_9285_12_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
ARRAY_SIZE(ar9285Modes_9285_1_2), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
ARRAY_SIZE(ar9285Common_9285_1_2), 2);
} else if (AR_SREV_9280_20_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
ARRAY_SIZE(ar9280Modes_9280_2), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
ARRAY_SIZE(ar9280Common_9280_2), 2);
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9280Modes_fast_clock_9280_2,
ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
} else if (AR_SREV_9160_10_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
ARRAY_SIZE(ar5416Modes_9160), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
ARRAY_SIZE(ar5416Common_9160), 2);
if (AR_SREV_9160_11(ah)) {
INIT_INI_ARRAY(&ah->iniAddac,
ar5416Addac_9160_1_1,
ARRAY_SIZE(ar5416Addac_9160_1_1), 2);
} else {
INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
ARRAY_SIZE(ar5416Addac_9160), 2);
}
} else if (AR_SREV_9100_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
ARRAY_SIZE(ar5416Modes_9100), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
ARRAY_SIZE(ar5416Common_9100), 2);
INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
ARRAY_SIZE(ar5416Bank6_9100), 3);
INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
ARRAY_SIZE(ar5416Addac_9100), 2);
} else {
INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
ARRAY_SIZE(ar5416Modes), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
ARRAY_SIZE(ar5416Common), 2);
INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
ARRAY_SIZE(ar5416Bank6TPC), 3);
INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
ARRAY_SIZE(ar5416Addac), 2);
}
if (!AR_SREV_9280_20_OR_LATER(ah)) {
/* Common for AR5416, AR913x, AR9160 */
INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
ARRAY_SIZE(ar5416BB_RfGain), 3);
INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
ARRAY_SIZE(ar5416Bank0), 2);
INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
ARRAY_SIZE(ar5416Bank1), 2);
INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
ARRAY_SIZE(ar5416Bank2), 2);
INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
ARRAY_SIZE(ar5416Bank3), 3);
INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
ARRAY_SIZE(ar5416Bank7), 2);
/* Common for AR5416, AR9160 */
if (!AR_SREV_9100(ah))
INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
ARRAY_SIZE(ar5416Bank6), 3);
/* Common for AR913x, AR9160 */
if (!AR_SREV_5416(ah))
INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
}
/* iniAddac needs to be modified for these chips */
if (AR_SREV_9160(ah) || !AR_SREV_5416_22_OR_LATER(ah)) {
struct ar5416IniArray *addac = &ah->iniAddac;
u32 size = sizeof(u32) * addac->ia_rows * addac->ia_columns;
u32 *data;
data = kmalloc(size, GFP_KERNEL);
if (!data)
return;
memcpy(data, addac->ia_array, size);
addac->ia_array = data;
if (!AR_SREV_5416_22_OR_LATER(ah)) {
/* override CLKDRV value */
INI_RA(addac, 31,1) = 0;
}
}
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniCckfirNormal,
ar9287Common_normal_cck_fir_coeff_9287_1_1,
ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_9287_1_1),
2);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9287Common_japan_2484_cck_fir_coeff_9287_1_1,
ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_9287_1_1),
2);
}
}
static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
{
u32 rxgain_type;
if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
AR5416_EEP_MINOR_VER_17) {
rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9280Modes_backoff_13db_rxgain_9280_2,
ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 5);
else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9280Modes_backoff_23db_rxgain_9280_2,
ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 5);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9280Modes_original_rxgain_9280_2,
ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 5);
} else {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9280Modes_original_rxgain_9280_2,
ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 5);
}
}
static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
{
if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
AR5416_EEP_MINOR_VER_19) {
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9280Modes_high_power_tx_gain_9280_2,
ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 5);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9280Modes_original_tx_gain_9280_2,
ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 5);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9280Modes_original_tx_gain_9280_2,
ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 5);
}
}
static void ar9271_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
{
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9271Modes_high_power_tx_gain_9271,
ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 5);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9271Modes_normal_power_tx_gain_9271,
ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 5);
}
static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
{
u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
if (AR_SREV_9287_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9287Modes_rx_gain_9287_1_1,
ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 5);
else if (AR_SREV_9280_20(ah))
ar9280_20_hw_init_rxgain_ini(ah);
if (AR_SREV_9271(ah)) {
ar9271_hw_init_txgain_ini(ah, txgain_type);
} else if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9287Modes_tx_gain_9287_1_1,
ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 5);
} else if (AR_SREV_9280_20(ah)) {
ar9280_20_hw_init_txgain_ini(ah, txgain_type);
} else if (AR_SREV_9285_12_OR_LATER(ah)) {
/* txgain table */
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
if (AR_SREV_9285E_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_XE2_0_high_power,
ARRAY_SIZE(
ar9285Modes_XE2_0_high_power), 5);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_high_power_tx_gain_9285_1_2,
ARRAY_SIZE(
ar9285Modes_high_power_tx_gain_9285_1_2), 5);
}
} else {
if (AR_SREV_9285E_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_XE2_0_normal_power,
ARRAY_SIZE(
ar9285Modes_XE2_0_normal_power), 5);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_original_tx_gain_9285_1_2,
ARRAY_SIZE(
ar9285Modes_original_tx_gain_9285_1_2), 5);
}
}
}
}
/*
* Helper for ASPM support.
*
* Disable PLL when in L0s as well as receiver clock when in L1.
* This power saving option must be enabled through the SerDes.
*
* Programming the SerDes must go through the same 288 bit serial shift
* register as the other analog registers. Hence the 9 writes.
*/
static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
bool power_off)
{
u8 i;
u32 val;
/* Nothing to do on restore for 11N */
if (!power_off /* !restore */) {
if (AR_SREV_9280_20_OR_LATER(ah)) {
/*
* AR9280 2.0 or later chips use SerDes values from the
* initvals.h initialized depending on chipset during
* __ath9k_hw_init()
*/
for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
INI_RA(&ah->iniPcieSerdes, i, 1));
}
} else {
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
/* RX shut off when elecidle is asserted */
REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
/*
* Ignore ah->ah_config.pcie_clock_req setting for
* pre-AR9280 11n
*/
REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
/* Load the new settings */
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
REGWRITE_BUFFER_FLUSH(ah);
}
udelay(1000);
}
if (power_off) {
/* clear bit 19 to disable L1 */
REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
val = REG_READ(ah, AR_WA);
/*
* Set PCIe workaround bits
* In AR9280 and AR9285, bit 14 in WA register (disable L1)
* should only be set when device enters D3 and be
* cleared when device comes back to D0.
*/
if (ah->config.pcie_waen) {
if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
val |= AR_WA_D3_L1_DISABLE;
} else {
if (((AR_SREV_9285(ah) ||
AR_SREV_9271(ah) ||
AR_SREV_9287(ah)) &&
(AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
(AR_SREV_9280(ah) &&
(AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
val |= AR_WA_D3_L1_DISABLE;
}
}
if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
/*
* Disable bit 6 and 7 before entering D3 to
* prevent system hang.
*/
val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
}
if (AR_SREV_9280(ah))
val |= AR_WA_BIT22;
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
REG_WRITE(ah, AR_WA, val);
} else {
if (ah->config.pcie_waen) {
val = ah->config.pcie_waen;
if (!power_off)
val &= (~AR_WA_D3_L1_DISABLE);
} else {
if (AR_SREV_9285(ah) ||
AR_SREV_9271(ah) ||
AR_SREV_9287(ah)) {
val = AR9285_WA_DEFAULT;
if (!power_off)
val &= (~AR_WA_D3_L1_DISABLE);
}
else if (AR_SREV_9280(ah)) {
/*
* For AR9280 chips, bit 22 of 0x4004
* needs to be set.
*/
val = AR9280_WA_DEFAULT;
if (!power_off)
val &= (~AR_WA_D3_L1_DISABLE);
} else {
val = AR_WA_DEFAULT;
}
}
/* WAR for ASPM system hang */
if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
val |= (AR_WA_BIT6 | AR_WA_BIT7);
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
REG_WRITE(ah, AR_WA, val);
/* set bit 19 to allow forcing of pcie core into L1 state */
REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
}
}
static int ar9002_hw_get_radiorev(struct ath_hw *ah)
{
u32 val;
int i;
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
for (i = 0; i < 8; i++)
REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
REGWRITE_BUFFER_FLUSH(ah);
val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
return ath9k_hw_reverse_bits(val, 8);
}
int ar9002_hw_rf_claim(struct ath_hw *ah)
{
u32 val;
REG_WRITE(ah, AR_PHY(0), 0x00000007);
val = ar9002_hw_get_radiorev(ah);
switch (val & AR_RADIO_SREV_MAJOR) {
case 0:
val = AR_RAD5133_SREV_MAJOR;
break;
case AR_RAD5133_SREV_MAJOR:
case AR_RAD5122_SREV_MAJOR:
case AR_RAD2133_SREV_MAJOR:
case AR_RAD2122_SREV_MAJOR:
break;
default:
ath_err(ath9k_hw_common(ah),
"Radio Chip Rev 0x%02X not supported\n",
val & AR_RADIO_SREV_MAJOR);
return -EOPNOTSUPP;
}
ah->hw_version.analog5GhzRev = val;
return 0;
}
void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
{
if (AR_SREV_9287_13_OR_LATER(ah)) {
REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
}
}
/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
void ar9002_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
ops->config_pci_powersave = ar9002_hw_configpcipowersave;
ar5008_hw_attach_phy_ops(ah);
if (AR_SREV_9280_20_OR_LATER(ah))
ar9002_hw_attach_phy_ops(ah);
ar9002_hw_attach_calib_ops(ah);
ar9002_hw_attach_mac_ops(ah);
}
void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
{
u32 modesIndex;
int i;
switch (chan->chanmode) {
case CHANNEL_A:
case CHANNEL_A_HT20:
modesIndex = 1;
break;
case CHANNEL_A_HT40PLUS:
case CHANNEL_A_HT40MINUS:
modesIndex = 2;
break;
case CHANNEL_G:
case CHANNEL_G_HT20:
case CHANNEL_B:
modesIndex = 4;
break;
case CHANNEL_G_HT40PLUS:
case CHANNEL_G_HT40MINUS:
modesIndex = 3;
break;
default:
return;
}
ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < ah->iniModes_9271_ANI_reg.ia_rows; i++) {
u32 reg = INI_RA(&ah->iniModes_9271_ANI_reg, i, 0);
u32 val = INI_RA(&ah->iniModes_9271_ANI_reg, i, modesIndex);
u32 val_orig;
if (reg == AR_PHY_CCK_DETECT) {
val_orig = REG_READ(ah, reg);
val &= AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK;
val_orig &= ~AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK;
REG_WRITE(ah, reg, val|val_orig);
} else
REG_WRITE(ah, reg, val);
}
REGWRITE_BUFFER_FLUSH(ah);
}
| gpl-2.0 |
hiikezoe/android_kernel_kyocera_202k | arch/arm/mach-ks8695/board-dsm320.c | 5100 | 2892 | /*
* arch/arm/mach-ks8695/board-dsm320.c
*
* DSM-320 D-Link Wireless Media Player, board support.
*
* Copyright 2008 Simtec Electronics
* Daniel Silverstone <dsilvers@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/partitions.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/devices.h>
#include <mach/gpio-ks8695.h>
#include "generic.h"
#ifdef CONFIG_PCI
static int dsm320_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
switch (slot) {
case 0:
/* PCI-AHB bridge? */
return KS8695_IRQ_EXTERN0;
case 18:
/* Mini PCI slot */
return KS8695_IRQ_EXTERN2;
case 20:
/* RealMAGIC chip */
return KS8695_IRQ_EXTERN0;
}
BUG();
}
static struct ks8695_pci_cfg __initdata dsm320_pci = {
.mode = KS8695_MODE_MINIPCI,
.map_irq = dsm320_pci_map_irq,
};
static void __init dsm320_register_pci(void)
{
/* Initialise the GPIO lines for interrupt mode */
/* RealMAGIC */
ks8695_gpio_interrupt(KS8695_GPIO_0, IRQ_TYPE_LEVEL_LOW);
/* MiniPCI Slot */
ks8695_gpio_interrupt(KS8695_GPIO_2, IRQ_TYPE_LEVEL_LOW);
ks8695_init_pci(&dsm320_pci);
}
#else
static inline void __init dsm320_register_pci(void) { }
#endif
static struct physmap_flash_data dsm320_nor_pdata = {
.width = 4,
.nr_parts = 0,
};
static struct resource dsm320_nor_resource[] = {
[0] = {
.start = SZ_32M, /* We expect the bootloader to map
* the flash here.
*/
.end = SZ_32M + SZ_4M - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device dsm320_device_nor = {
.name = "physmap-flash",
.id = -1,
.num_resources = ARRAY_SIZE(dsm320_nor_resource),
.resource = dsm320_nor_resource,
.dev = {
.platform_data = &dsm320_nor_pdata,
},
};
void __init dsm320_register_nor(void)
{
int ret;
ret = platform_device_register(&dsm320_device_nor);
if (ret < 0)
printk(KERN_ERR "failed to register physmap-flash device\n");
}
static void __init dsm320_init(void)
{
/* GPIO registration */
ks8695_register_gpios();
/* PCI registration */
dsm320_register_pci();
/* Network device */
ks8695_add_device_lan(); /* eth0 = LAN */
/* NOR devices */
dsm320_register_nor();
}
MACHINE_START(DSM320, "D-Link DSM-320 Wireless Media Player")
/* Maintainer: Simtec Electronics. */
.atag_offset = 0x100,
.map_io = ks8695_map_io,
.init_irq = ks8695_init_irq,
.init_machine = dsm320_init,
.timer = &ks8695_timer,
.restart = ks8695_restart,
MACHINE_END
| gpl-2.0 |
pedestre/Kernel-Apolo-ICS-4.0.4 | arch/mips/kernel/kgdb.c | 5356 | 11177 | /*
* Originally written by Glenn Engel, Lake Stevens Instrument Division
*
* Contributed by HP Systems
*
* Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
* Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
*
* Copyright (C) 1995 Andreas Busse
*
* Copyright (C) 2003 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* Copyright (C) 2004-2005 MontaVista Software Inc.
* Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com
*
* Copyright (C) 2007-2008 Wind River Systems, Inc.
* Author/Maintainer: Jason Wessel, jason.wessel@windriver.com
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/ptrace.h> /* for linux pt_regs struct */
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/inst.h>
#include <asm/fpu.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
static struct hard_trap_info {
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
unsigned char signo; /* Signal that we map this trap into */
} hard_trap_info[] = {
{ 6, SIGBUS }, /* instruction bus error */
{ 7, SIGBUS }, /* data bus error */
{ 9, SIGTRAP }, /* break */
/* { 11, SIGILL }, */ /* CPU unusable */
{ 12, SIGFPE }, /* overflow */
{ 13, SIGTRAP }, /* trap */
{ 14, SIGSEGV }, /* virtual instruction cache coherency */
{ 15, SIGFPE }, /* floating point exception */
{ 23, SIGSEGV }, /* watch */
{ 31, SIGSEGV }, /* virtual data cache coherency */
{ 0, 0} /* Must be last */
};
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
{ "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
{ "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
{ "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
{ "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
{ "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
{ "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
{ "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
{ "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
{ "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
{ "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
{ "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
{ "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
{ "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
{ "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
{ "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
{ "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
{ "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
{ "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
{ "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
{ "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
{ "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
{ "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
{ "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
{ "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
{ "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
{ "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
{ "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
{ "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
{ "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
{ "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
{ "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
{ "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
{ "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
{ "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
{ "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
{ "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
{ "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
{ "f0", GDB_SIZEOF_REG, 0 },
{ "f1", GDB_SIZEOF_REG, 1 },
{ "f2", GDB_SIZEOF_REG, 2 },
{ "f3", GDB_SIZEOF_REG, 3 },
{ "f4", GDB_SIZEOF_REG, 4 },
{ "f5", GDB_SIZEOF_REG, 5 },
{ "f6", GDB_SIZEOF_REG, 6 },
{ "f7", GDB_SIZEOF_REG, 7 },
{ "f8", GDB_SIZEOF_REG, 8 },
{ "f9", GDB_SIZEOF_REG, 9 },
{ "f10", GDB_SIZEOF_REG, 10 },
{ "f11", GDB_SIZEOF_REG, 11 },
{ "f12", GDB_SIZEOF_REG, 12 },
{ "f13", GDB_SIZEOF_REG, 13 },
{ "f14", GDB_SIZEOF_REG, 14 },
{ "f15", GDB_SIZEOF_REG, 15 },
{ "f16", GDB_SIZEOF_REG, 16 },
{ "f17", GDB_SIZEOF_REG, 17 },
{ "f18", GDB_SIZEOF_REG, 18 },
{ "f19", GDB_SIZEOF_REG, 19 },
{ "f20", GDB_SIZEOF_REG, 20 },
{ "f21", GDB_SIZEOF_REG, 21 },
{ "f22", GDB_SIZEOF_REG, 22 },
{ "f23", GDB_SIZEOF_REG, 23 },
{ "f24", GDB_SIZEOF_REG, 24 },
{ "f25", GDB_SIZEOF_REG, 25 },
{ "f26", GDB_SIZEOF_REG, 26 },
{ "f27", GDB_SIZEOF_REG, 27 },
{ "f28", GDB_SIZEOF_REG, 28 },
{ "f29", GDB_SIZEOF_REG, 29 },
{ "f30", GDB_SIZEOF_REG, 30 },
{ "f31", GDB_SIZEOF_REG, 31 },
{ "fsr", GDB_SIZEOF_REG, 0 },
{ "fir", GDB_SIZEOF_REG, 0 },
};
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
int fp_reg;
if (regno < 0 || regno >= DBG_MAX_REG_NUM)
return -EINVAL;
if (dbg_reg_def[regno].offset != -1 && regno < 38) {
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
dbg_reg_def[regno].size);
} else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
/* FP registers 38 -> 69 */
if (!(regs->cp0_status & ST0_CU1))
return 0;
if (regno == 70) {
/* Process the fcr31/fsr (register 70) */
memcpy((void *)¤t->thread.fpu.fcr31, mem,
dbg_reg_def[regno].size);
goto out_save;
} else if (regno == 71) {
/* Ignore the fir (register 71) */
goto out_save;
}
fp_reg = dbg_reg_def[regno].offset;
memcpy((void *)¤t->thread.fpu.fpr[fp_reg], mem,
dbg_reg_def[regno].size);
out_save:
restore_fp(current);
}
return 0;
}
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
int fp_reg;
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
if (dbg_reg_def[regno].offset != -1 && regno < 38) {
/* First 38 registers */
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
} else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
/* FP registers 38 -> 69 */
if (!(regs->cp0_status & ST0_CU1))
goto out;
save_fp(current);
if (regno == 70) {
/* Process the fcr31/fsr (register 70) */
memcpy(mem, (void *)¤t->thread.fpu.fcr31,
dbg_reg_def[regno].size);
goto out;
} else if (regno == 71) {
/* Ignore the fir (register 71) */
memset(mem, 0, dbg_reg_def[regno].size);
goto out;
}
fp_reg = dbg_reg_def[regno].offset;
memcpy(mem, (void *)¤t->thread.fpu.fpr[fp_reg],
dbg_reg_def[regno].size);
}
out:
return dbg_reg_def[regno].name;
}
void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__(
".globl breakinst\n\t"
".set\tnoreorder\n\t"
"nop\n"
"breakinst:\tbreak\n\t"
"nop\n\t"
".set\treorder");
}
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), NULL);
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int compute_signal(int tt)
{
struct hard_trap_info *ht;
for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
if (ht->tt == tt)
return ht->signo;
return SIGHUP; /* default for things we don't know about */
}
/*
* Similar to regs_to_gdb_regs() except that process is sleeping and so
* we may not be able to get all the info.
*/
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
int reg;
struct thread_info *ti = task_thread_info(p);
unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
struct pt_regs *regs = (struct pt_regs *)ksp - 1;
#if (KGDB_GDB_REG_SIZE == 32)
u32 *ptr = (u32 *)gdb_regs;
#else
u64 *ptr = (u64 *)gdb_regs;
#endif
for (reg = 0; reg < 16; reg++)
*(ptr++) = regs->regs[reg];
/* S0 - S7 */
for (reg = 16; reg < 24; reg++)
*(ptr++) = regs->regs[reg];
for (reg = 24; reg < 28; reg++)
*(ptr++) = 0;
/* GP, SP, FP, RA */
for (reg = 28; reg < 32; reg++)
*(ptr++) = regs->regs[reg];
*(ptr++) = regs->cp0_status;
*(ptr++) = regs->lo;
*(ptr++) = regs->hi;
*(ptr++) = regs->cp0_badvaddr;
*(ptr++) = regs->cp0_cause;
*(ptr++) = regs->cp0_epc;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
regs->cp0_epc = pc;
}
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
* then try to fall into the debugger
*/
static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
void *ptr)
{
struct die_args *args = (struct die_args *)ptr;
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
/* Userspace events, ignore. */
if (user_mode(regs))
return NOTIFY_DONE;
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
return NOTIFY_DONE;
if (atomic_read(&kgdb_setting_breakpoint))
if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
regs->cp0_epc += 4;
/* In SMP mode, __flush_cache_all does IPI */
local_irq_enable();
__flush_cache_all();
return NOTIFY_STOP;
}
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
int kgdb_ll_trap(int cmd, const char *str,
struct pt_regs *regs, long err, int trap, int sig)
{
struct die_args args = {
.regs = regs,
.str = str,
.err = err,
.trapnr = trap,
.signr = sig,
};
if (!kgdb_io_module_registered)
return NOTIFY_DONE;
return kgdb_mips_notify(NULL, cmd, &args);
}
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_mips_notify,
};
/*
* Handle the 'c' command
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *regs)
{
char *ptr;
unsigned long address;
switch (remcom_in_buffer[0]) {
case 'c':
/* handle the optional parameter */
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &address))
regs->cp0_epc = address;
return 0;
}
return -1;
}
struct kgdb_arch arch_kgdb_ops;
/*
* We use kgdb_early_setup so that functions we need to call now don't
* cause trouble when called again later.
*/
int kgdb_arch_init(void)
{
union mips_instruction insn = {
.r_format = {
.opcode = spec_op,
.func = break_op,
}
};
memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
register_die_notifier(&kgdb_notifier);
return 0;
}
/*
* kgdb_arch_exit - Perform any architecture specific uninitalization.
*
* This function will handle the uninitalization of any architecture
* specific callbacks, for dynamic registration and unregistration.
*/
void kgdb_arch_exit(void)
{
unregister_die_notifier(&kgdb_notifier);
}
| gpl-2.0 |
TeamExodus/kernel_huawei_angler | fs/nfs/fscache-index.c | 8172 | 9558 | /* NFS FS-Cache index structure definition
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
#include <linux/in6.h>
#include "internal.h"
#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_FSCACHE
/*
* Define the NFS filesystem for FS-Cache. Upon registration FS-Cache sticks
* the cookie for the top-level index object for NFS into here. The top-level
* index can than have other cache objects inserted into it.
*/
struct fscache_netfs nfs_fscache_netfs = {
.name = "nfs",
.version = 0,
};
/*
* Register NFS for caching
*/
int nfs_fscache_register(void)
{
return fscache_register_netfs(&nfs_fscache_netfs);
}
/*
* Unregister NFS for caching
*/
void nfs_fscache_unregister(void)
{
fscache_unregister_netfs(&nfs_fscache_netfs);
}
/*
* Layout of the key for an NFS server cache object.
*/
struct nfs_server_key {
uint16_t nfsversion; /* NFS protocol version */
uint16_t family; /* address family */
uint16_t port; /* IP port */
union {
struct in_addr ipv4_addr; /* IPv4 address */
struct in6_addr ipv6_addr; /* IPv6 address */
} addr[0];
};
/*
* Generate a key to describe a server in the main NFS index
* - We return the length of the key, or 0 if we can't generate one
*/
static uint16_t nfs_server_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct nfs_client *clp = cookie_netfs_data;
const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
struct nfs_server_key *key = buffer;
uint16_t len = sizeof(struct nfs_server_key);
key->nfsversion = clp->rpc_ops->version;
key->family = clp->cl_addr.ss_family;
memset(key, 0, len);
switch (clp->cl_addr.ss_family) {
case AF_INET:
key->port = sin->sin_port;
key->addr[0].ipv4_addr = sin->sin_addr;
len += sizeof(key->addr[0].ipv4_addr);
break;
case AF_INET6:
key->port = sin6->sin6_port;
key->addr[0].ipv6_addr = sin6->sin6_addr;
len += sizeof(key->addr[0].ipv6_addr);
break;
default:
printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
clp->cl_addr.ss_family);
len = 0;
break;
}
return len;
}
/*
* Define the server object for FS-Cache. This is used to describe a server
* object to fscache_acquire_cookie(). It is keyed by the NFS protocol and
* server address parameters.
*/
const struct fscache_cookie_def nfs_fscache_server_index_def = {
.name = "NFS.server",
.type = FSCACHE_COOKIE_TYPE_INDEX,
.get_key = nfs_server_get_key,
};
/*
* Generate a key to describe a superblock key in the main NFS index
*/
static uint16_t nfs_super_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct nfs_fscache_key *key;
const struct nfs_server *nfss = cookie_netfs_data;
uint16_t len;
key = nfss->fscache_key;
len = sizeof(key->key) + key->key.uniq_len;
if (len > bufmax) {
len = 0;
} else {
memcpy(buffer, &key->key, sizeof(key->key));
memcpy(buffer + sizeof(key->key),
key->key.uniquifier, key->key.uniq_len);
}
return len;
}
/*
* Define the superblock object for FS-Cache. This is used to describe a
* superblock object to fscache_acquire_cookie(). It is keyed by all the NFS
* parameters that might cause a separate superblock.
*/
const struct fscache_cookie_def nfs_fscache_super_index_def = {
.name = "NFS.super",
.type = FSCACHE_COOKIE_TYPE_INDEX,
.get_key = nfs_super_get_key,
};
/*
* Definition of the auxiliary data attached to NFS inode storage objects
* within the cache.
*
* The contents of this struct are recorded in the on-disk local cache in the
* auxiliary data attached to the data storage object backing an inode. This
* permits coherency to be managed when a new inode binds to an already extant
* cache object.
*/
struct nfs_fscache_inode_auxdata {
struct timespec mtime;
struct timespec ctime;
loff_t size;
u64 change_attr;
};
/*
* Generate a key to describe an NFS inode in an NFS server's index
*/
static uint16_t nfs_fscache_inode_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct nfs_inode *nfsi = cookie_netfs_data;
uint16_t nsize;
/* use the inode's NFS filehandle as the key */
nsize = nfsi->fh.size;
memcpy(buffer, nfsi->fh.data, nsize);
return nsize;
}
/*
* Get certain file attributes from the netfs data
* - This function can be absent for an index
* - Not permitted to return an error
* - The netfs data from the cookie being used as the source is presented
*/
static void nfs_fscache_inode_get_attr(const void *cookie_netfs_data,
uint64_t *size)
{
const struct nfs_inode *nfsi = cookie_netfs_data;
*size = nfsi->vfs_inode.i_size;
}
/*
* Get the auxiliary data from netfs data
* - This function can be absent if the index carries no state data
* - Should store the auxiliary data in the buffer
* - Should return the amount of amount stored
* - Not permitted to return an error
* - The netfs data from the cookie being used as the source is presented
*/
static uint16_t nfs_fscache_inode_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
struct nfs_fscache_inode_auxdata auxdata;
const struct nfs_inode *nfsi = cookie_netfs_data;
memset(&auxdata, 0, sizeof(auxdata));
auxdata.size = nfsi->vfs_inode.i_size;
auxdata.mtime = nfsi->vfs_inode.i_mtime;
auxdata.ctime = nfsi->vfs_inode.i_ctime;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
auxdata.change_attr = nfsi->vfs_inode.i_version;
if (bufmax > sizeof(auxdata))
bufmax = sizeof(auxdata);
memcpy(buffer, &auxdata, bufmax);
return bufmax;
}
/*
* Consult the netfs about the state of an object
* - This function can be absent if the index carries no state data
* - The netfs data from the cookie being used as the target is
* presented, as is the auxiliary data
*/
static
enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
const void *data,
uint16_t datalen)
{
struct nfs_fscache_inode_auxdata auxdata;
struct nfs_inode *nfsi = cookie_netfs_data;
if (datalen != sizeof(auxdata))
return FSCACHE_CHECKAUX_OBSOLETE;
memset(&auxdata, 0, sizeof(auxdata));
auxdata.size = nfsi->vfs_inode.i_size;
auxdata.mtime = nfsi->vfs_inode.i_mtime;
auxdata.ctime = nfsi->vfs_inode.i_ctime;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
auxdata.change_attr = nfsi->vfs_inode.i_version;
if (memcmp(data, &auxdata, datalen) != 0)
return FSCACHE_CHECKAUX_OBSOLETE;
return FSCACHE_CHECKAUX_OKAY;
}
/*
* Indication from FS-Cache that the cookie is no longer cached
* - This function is called when the backing store currently caching a cookie
* is removed
* - The netfs should use this to clean up any markers indicating cached pages
* - This is mandatory for any object that may have data
*/
static void nfs_fscache_inode_now_uncached(void *cookie_netfs_data)
{
struct nfs_inode *nfsi = cookie_netfs_data;
struct pagevec pvec;
pgoff_t first;
int loop, nr_pages;
pagevec_init(&pvec, 0);
first = 0;
dprintk("NFS: nfs_inode_now_uncached: nfs_inode 0x%p\n", nfsi);
for (;;) {
/* grab a bunch of pages to unmark */
nr_pages = pagevec_lookup(&pvec,
nfsi->vfs_inode.i_mapping,
first,
PAGEVEC_SIZE - pagevec_count(&pvec));
if (!nr_pages)
break;
for (loop = 0; loop < nr_pages; loop++)
ClearPageFsCache(pvec.pages[loop]);
first = pvec.pages[nr_pages - 1]->index + 1;
pvec.nr = nr_pages;
pagevec_release(&pvec);
cond_resched();
}
}
/*
* Get an extra reference on a read context.
* - This function can be absent if the completion function doesn't require a
* context.
* - The read context is passed back to NFS in the event that a data read on the
* cache fails with EIO - in which case the server must be contacted to
* retrieve the data, which requires the read context for security.
*/
static void nfs_fh_get_context(void *cookie_netfs_data, void *context)
{
get_nfs_open_context(context);
}
/*
* Release an extra reference on a read context.
* - This function can be absent if the completion function doesn't require a
* context.
*/
static void nfs_fh_put_context(void *cookie_netfs_data, void *context)
{
if (context)
put_nfs_open_context(context);
}
/*
* Define the inode object for FS-Cache. This is used to describe an inode
* object to fscache_acquire_cookie(). It is keyed by the NFS file handle for
* an inode.
*
* Coherency is managed by comparing the copies of i_size, i_mtime and i_ctime
* held in the cache auxiliary data for the data storage object with those in
* the inode struct in memory.
*/
const struct fscache_cookie_def nfs_fscache_inode_object_def = {
.name = "NFS.fh",
.type = FSCACHE_COOKIE_TYPE_DATAFILE,
.get_key = nfs_fscache_inode_get_key,
.get_attr = nfs_fscache_inode_get_attr,
.get_aux = nfs_fscache_inode_get_aux,
.check_aux = nfs_fscache_inode_check_aux,
.now_uncached = nfs_fscache_inode_now_uncached,
.get_context = nfs_fh_get_context,
.put_context = nfs_fh_put_context,
};
| gpl-2.0 |
dummie999/android_kernel_htc_z4u | drivers/pnp/manager.c | 8172 | 9563 | /*
* manager.c - Resource Management, Conflict Resolution, Activation and Disabling of Devices
*
* based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz>
* Copyright 2003 Adam Belay <ambx1@neo.rr.com>
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pnp.h>
#include <linux/bitmap.h>
#include <linux/mutex.h>
#include "base.h"
DEFINE_MUTEX(pnp_res_mutex);
static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
{
struct resource *res, local_res;
res = pnp_get_resource(dev, IORESOURCE_IO, idx);
if (res) {
pnp_dbg(&dev->dev, " io %d already set to %#llx-%#llx "
"flags %#lx\n", idx, (unsigned long long) res->start,
(unsigned long long) res->end, res->flags);
return 0;
}
res = &local_res;
res->flags = rule->flags | IORESOURCE_AUTO;
res->start = 0;
res->end = 0;
if (!rule->size) {
res->flags |= IORESOURCE_DISABLED;
pnp_dbg(&dev->dev, " io %d disabled\n", idx);
goto __add;
}
res->start = rule->min;
res->end = res->start + rule->size - 1;
while (!pnp_check_port(dev, res)) {
res->start += rule->align;
res->end = res->start + rule->size - 1;
if (res->start > rule->max || !rule->align) {
pnp_dbg(&dev->dev, " couldn't assign io %d "
"(min %#llx max %#llx)\n", idx,
(unsigned long long) rule->min,
(unsigned long long) rule->max);
return -EBUSY;
}
}
__add:
pnp_add_io_resource(dev, res->start, res->end, res->flags);
return 0;
}
static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
{
struct resource *res, local_res;
res = pnp_get_resource(dev, IORESOURCE_MEM, idx);
if (res) {
pnp_dbg(&dev->dev, " mem %d already set to %#llx-%#llx "
"flags %#lx\n", idx, (unsigned long long) res->start,
(unsigned long long) res->end, res->flags);
return 0;
}
res = &local_res;
res->flags = rule->flags | IORESOURCE_AUTO;
res->start = 0;
res->end = 0;
if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
res->flags |= IORESOURCE_READONLY;
if (rule->flags & IORESOURCE_MEM_CACHEABLE)
res->flags |= IORESOURCE_CACHEABLE;
if (rule->flags & IORESOURCE_MEM_RANGELENGTH)
res->flags |= IORESOURCE_RANGELENGTH;
if (rule->flags & IORESOURCE_MEM_SHADOWABLE)
res->flags |= IORESOURCE_SHADOWABLE;
if (!rule->size) {
res->flags |= IORESOURCE_DISABLED;
pnp_dbg(&dev->dev, " mem %d disabled\n", idx);
goto __add;
}
res->start = rule->min;
res->end = res->start + rule->size - 1;
while (!pnp_check_mem(dev, res)) {
res->start += rule->align;
res->end = res->start + rule->size - 1;
if (res->start > rule->max || !rule->align) {
pnp_dbg(&dev->dev, " couldn't assign mem %d "
"(min %#llx max %#llx)\n", idx,
(unsigned long long) rule->min,
(unsigned long long) rule->max);
return -EBUSY;
}
}
__add:
pnp_add_mem_resource(dev, res->start, res->end, res->flags);
return 0;
}
static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
{
struct resource *res, local_res;
int i;
/* IRQ priority: this table is good for i386 */
static unsigned short xtab[16] = {
5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
};
res = pnp_get_resource(dev, IORESOURCE_IRQ, idx);
if (res) {
pnp_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n",
idx, (int) res->start, res->flags);
return 0;
}
res = &local_res;
res->flags = rule->flags | IORESOURCE_AUTO;
res->start = -1;
res->end = -1;
if (bitmap_empty(rule->map.bits, PNP_IRQ_NR)) {
res->flags |= IORESOURCE_DISABLED;
pnp_dbg(&dev->dev, " irq %d disabled\n", idx);
goto __add;
}
/* TBD: need check for >16 IRQ */
res->start = find_next_bit(rule->map.bits, PNP_IRQ_NR, 16);
if (res->start < PNP_IRQ_NR) {
res->end = res->start;
goto __add;
}
for (i = 0; i < 16; i++) {
if (test_bit(xtab[i], rule->map.bits)) {
res->start = res->end = xtab[i];
if (pnp_check_irq(dev, res))
goto __add;
}
}
if (rule->flags & IORESOURCE_IRQ_OPTIONAL) {
res->start = -1;
res->end = -1;
res->flags |= IORESOURCE_DISABLED;
pnp_dbg(&dev->dev, " irq %d disabled (optional)\n", idx);
goto __add;
}
pnp_dbg(&dev->dev, " couldn't assign irq %d\n", idx);
return -EBUSY;
__add:
pnp_add_irq_resource(dev, res->start, res->flags);
return 0;
}
#ifdef CONFIG_ISA_DMA_API
static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
{
struct resource *res, local_res;
int i;
/* DMA priority: this table is good for i386 */
static unsigned short xtab[8] = {
1, 3, 5, 6, 7, 0, 2, 4
};
res = pnp_get_resource(dev, IORESOURCE_DMA, idx);
if (res) {
pnp_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n",
idx, (int) res->start, res->flags);
return 0;
}
res = &local_res;
res->flags = rule->flags | IORESOURCE_AUTO;
res->start = -1;
res->end = -1;
for (i = 0; i < 8; i++) {
if (rule->map & (1 << xtab[i])) {
res->start = res->end = xtab[i];
if (pnp_check_dma(dev, res))
goto __add;
}
}
#ifdef MAX_DMA_CHANNELS
res->start = res->end = MAX_DMA_CHANNELS;
#endif
res->flags |= IORESOURCE_DISABLED;
pnp_dbg(&dev->dev, " disable dma %d\n", idx);
__add:
pnp_add_dma_resource(dev, res->start, res->flags);
return 0;
}
#endif /* CONFIG_ISA_DMA_API */
void pnp_init_resources(struct pnp_dev *dev)
{
pnp_free_resources(dev);
}
static void pnp_clean_resource_table(struct pnp_dev *dev)
{
struct pnp_resource *pnp_res, *tmp;
list_for_each_entry_safe(pnp_res, tmp, &dev->resources, list) {
if (pnp_res->res.flags & IORESOURCE_AUTO)
pnp_free_resource(pnp_res);
}
}
/**
* pnp_assign_resources - assigns resources to the device based on the specified dependent number
* @dev: pointer to the desired device
* @set: the dependent function number
*/
static int pnp_assign_resources(struct pnp_dev *dev, int set)
{
struct pnp_option *option;
int nport = 0, nmem = 0, nirq = 0;
int ndma __maybe_unused = 0;
int ret = 0;
pnp_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set);
mutex_lock(&pnp_res_mutex);
pnp_clean_resource_table(dev);
list_for_each_entry(option, &dev->options, list) {
if (pnp_option_is_dependent(option) &&
pnp_option_set(option) != set)
continue;
switch (option->type) {
case IORESOURCE_IO:
ret = pnp_assign_port(dev, &option->u.port, nport++);
break;
case IORESOURCE_MEM:
ret = pnp_assign_mem(dev, &option->u.mem, nmem++);
break;
case IORESOURCE_IRQ:
ret = pnp_assign_irq(dev, &option->u.irq, nirq++);
break;
#ifdef CONFIG_ISA_DMA_API
case IORESOURCE_DMA:
ret = pnp_assign_dma(dev, &option->u.dma, ndma++);
break;
#endif
default:
ret = -EINVAL;
break;
}
if (ret < 0)
break;
}
mutex_unlock(&pnp_res_mutex);
if (ret < 0) {
pnp_dbg(&dev->dev, "pnp_assign_resources failed (%d)\n", ret);
pnp_clean_resource_table(dev);
} else
dbg_pnp_show_resources(dev, "pnp_assign_resources succeeded");
return ret;
}
/**
* pnp_auto_config_dev - automatically assigns resources to a device
* @dev: pointer to the desired device
*/
int pnp_auto_config_dev(struct pnp_dev *dev)
{
int i, ret;
if (!pnp_can_configure(dev)) {
pnp_dbg(&dev->dev, "configuration not supported\n");
return -ENODEV;
}
ret = pnp_assign_resources(dev, 0);
if (ret == 0)
return 0;
for (i = 1; i < dev->num_dependent_sets; i++) {
ret = pnp_assign_resources(dev, i);
if (ret == 0)
return 0;
}
dev_err(&dev->dev, "unable to assign resources\n");
return ret;
}
/**
* pnp_start_dev - low-level start of the PnP device
* @dev: pointer to the desired device
*
* assumes that resources have already been allocated
*/
int pnp_start_dev(struct pnp_dev *dev)
{
if (!pnp_can_write(dev)) {
pnp_dbg(&dev->dev, "activation not supported\n");
return -EINVAL;
}
dbg_pnp_show_resources(dev, "pnp_start_dev");
if (dev->protocol->set(dev) < 0) {
dev_err(&dev->dev, "activation failed\n");
return -EIO;
}
dev_info(&dev->dev, "activated\n");
return 0;
}
/**
* pnp_stop_dev - low-level disable of the PnP device
* @dev: pointer to the desired device
*
* does not free resources
*/
int pnp_stop_dev(struct pnp_dev *dev)
{
if (!pnp_can_disable(dev)) {
pnp_dbg(&dev->dev, "disabling not supported\n");
return -EINVAL;
}
if (dev->protocol->disable(dev) < 0) {
dev_err(&dev->dev, "disable failed\n");
return -EIO;
}
dev_info(&dev->dev, "disabled\n");
return 0;
}
/**
* pnp_activate_dev - activates a PnP device for use
* @dev: pointer to the desired device
*
* does not validate or set resources so be careful.
*/
int pnp_activate_dev(struct pnp_dev *dev)
{
int error;
if (dev->active)
return 0;
/* ensure resources are allocated */
if (pnp_auto_config_dev(dev))
return -EBUSY;
error = pnp_start_dev(dev);
if (error)
return error;
dev->active = 1;
return 0;
}
/**
* pnp_disable_dev - disables device
* @dev: pointer to the desired device
*
* inform the correct pnp protocol so that resources can be used by other devices
*/
int pnp_disable_dev(struct pnp_dev *dev)
{
int error;
if (!dev->active)
return 0;
error = pnp_stop_dev(dev);
if (error)
return error;
dev->active = 0;
/* release the resources so that other devices can use them */
mutex_lock(&pnp_res_mutex);
pnp_clean_resource_table(dev);
mutex_unlock(&pnp_res_mutex);
return 0;
}
EXPORT_SYMBOL(pnp_start_dev);
EXPORT_SYMBOL(pnp_stop_dev);
EXPORT_SYMBOL(pnp_activate_dev);
EXPORT_SYMBOL(pnp_disable_dev);
| gpl-2.0 |
TEAM-RAZOR-DEVICES/android_kernel_lge_awifi | arch/h8300/kernel/signal.c | 8940 | 14395 | /*
* linux/arch/h8300/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* uClinux H8/300 support by Yoshinori Sato <ysato@users.sourceforge.jp>
* and David McCullough <davidm@snapgear.com>
*
* Based on
* Linux/m68k by Hamish Macdonald
*/
/*
* ++roman (07/09/96): implemented signal stacks (specially for tosemu on
* Atari :-) Current limitation: Only one sigstack can be active at one time.
* If a second signal with SA_ONSTACK set arrives while working on a sigstack,
* SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
* signal handlers!
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/highuid.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/freezer.h>
#include <linux/tracehook.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int do_sigsuspend(struct pt_regs *regs)
{
old_sigset_t mask = regs->er3;
sigset_t saveset;
mask &= _BLOCKABLE;
spin_lock_irq(¤t->sighand->siglock);
saveset = current->blocked;
siginitset(¤t->blocked, mask);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
regs->er0 = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal(regs, &saveset))
return -EINTR;
}
}
asmlinkage int
do_rt_sigsuspend(struct pt_regs *regs)
{
sigset_t *unewset = (sigset_t *)regs->er1;
size_t sigsetsize = (size_t)regs->er2;
sigset_t saveset, newset;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
regs->er0 = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal(regs, &saveset))
return -EINTR;
}
}
asmlinkage int
sys_sigaction(int sig, const struct old_sigaction *act,
struct old_sigaction *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
return -EFAULT;
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
__get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
return -EFAULT;
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
}
asmlinkage int
sys_sigaltstack(const stack_t *uss, stack_t *uoss)
{
return do_sigaltstack(uss, uoss, rdusp());
}
/*
* Do a signal return; undo the signal stack.
*
* Keep the return code on the stack quadword aligned!
* That makes the cache flush below easier.
*/
struct sigframe
{
long dummy_er0;
long dummy_vector;
#if defined(CONFIG_CPU_H8S)
short dummy_exr;
#endif
long dummy_pc;
char *pretcode;
unsigned char retcode[8];
unsigned long extramask[_NSIG_WORDS-1];
struct sigcontext sc;
int sig;
} __attribute__((aligned(2),packed));
struct rt_sigframe
{
long dummy_er0;
long dummy_vector;
#if defined(CONFIG_CPU_H8S)
short dummy_exr;
#endif
long dummy_pc;
char *pretcode;
struct siginfo *pinfo;
void *puc;
unsigned char retcode[8];
struct siginfo info;
struct ucontext uc;
int sig;
} __attribute__((aligned(2),packed));
static inline int
restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc,
int *pd0)
{
int err = 0;
unsigned int ccr;
unsigned int usp;
unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
#define COPY(r) err |= __get_user(regs->r, &usc->sc_##r) /* restore passed registers */
COPY(er1);
COPY(er2);
COPY(er3);
COPY(er5);
COPY(pc);
ccr = regs->ccr & 0x10;
COPY(ccr);
#undef COPY
regs->ccr &= 0xef;
regs->ccr |= ccr;
regs->orig_er0 = -1; /* disable syscall checks */
err |= __get_user(usp, &usc->sc_usp);
wrusp(usp);
err |= __get_user(er0, &usc->sc_er0);
*pd0 = er0;
return err;
}
asmlinkage int do_sigreturn(unsigned long __unused,...)
{
struct pt_regs *regs = (struct pt_regs *) (&__unused - 1);
unsigned long usp = rdusp();
struct sigframe *frame = (struct sigframe *)(usp - 4);
sigset_t set;
int er0;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
(_NSIG_WORDS > 1 &&
__copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
if (restore_sigcontext(regs, &frame->sc, &er0))
goto badframe;
return er0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
asmlinkage int do_rt_sigreturn(unsigned long __unused,...)
{
struct pt_regs *regs = (struct pt_regs *) &__unused;
unsigned long usp = rdusp();
struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
sigset_t set;
int er0;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_unlock_irq(¤t->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_lock_irq(¤t->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &er0))
goto badframe;
if (do_sigaltstack(&frame->uc.uc_stack, NULL, usp) == -EFAULT)
goto badframe;
return er0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask)
{
int err = 0;
err |= __put_user(regs->er0, &sc->sc_er0);
err |= __put_user(regs->er1, &sc->sc_er1);
err |= __put_user(regs->er2, &sc->sc_er2);
err |= __put_user(regs->er3, &sc->sc_er3);
err |= __put_user(regs->er4, &sc->sc_er4);
err |= __put_user(regs->er5, &sc->sc_er5);
err |= __put_user(regs->er6, &sc->sc_er6);
err |= __put_user(rdusp(), &sc->sc_usp);
err |= __put_user(regs->pc, &sc->sc_pc);
err |= __put_user(regs->ccr, &sc->sc_ccr);
err |= __put_user(mask, &sc->sc_mask);
return err;
}
static inline void *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
{
unsigned long usp;
/* Default to using normal stack. */
usp = rdusp();
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (!sas_ss_flags(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
return (void *)((usp - frame_size) & -8UL);
}
static void setup_frame (int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs)
{
struct sigframe *frame;
int err = 0;
int usig;
unsigned char *ret;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
usig = current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
err |= __put_user(usig, &frame->sig);
if (err)
goto give_sigsegv;
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
if (err)
goto give_sigsegv;
if (_NSIG_WORDS > 1) {
err |= copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
if (err)
goto give_sigsegv;
}
ret = frame->retcode;
if (ka->sa.sa_flags & SA_RESTORER)
ret = (unsigned char *)(ka->sa.sa_restorer);
else {
/* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
(unsigned long *)(frame->retcode + 0));
err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
}
/* Set up to return from userspace. */
err |= __put_user(ret, &frame->pretcode);
if (err)
goto give_sigsegv;
/* Set up registers for signal handler */
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->er0 = (current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig);
regs->er1 = (unsigned long)&(frame->sc);
regs->er5 = current->mm->start_data; /* GOT base */
return;
give_sigsegv:
force_sigsegv(sig, current);
}
static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe *frame;
int err = 0;
int usig;
unsigned char *ret;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
usig = current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
err |= __put_user(usig, &frame->sig);
if (err)
goto give_sigsegv;
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, info);
if (err)
goto give_sigsegv;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user((void *)current->sas_ss_sp,
&frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(rdusp()),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
goto give_sigsegv;
/* Set up to return from userspace. */
ret = frame->retcode;
if (ka->sa.sa_flags & SA_RESTORER)
ret = (unsigned char *)(ka->sa.sa_restorer);
else {
/* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
(unsigned long *)(frame->retcode + 0));
err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
}
err |= __put_user(ret, &frame->pretcode);
if (err)
goto give_sigsegv;
/* Set up registers for signal handler */
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->er0 = (current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig);
regs->er1 = (unsigned long)&(frame->info);
regs->er2 = (unsigned long)&frame->uc;
regs->er5 = current->mm->start_data; /* GOT base */
return;
give_sigsegv:
force_sigsegv(sig, current);
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
sigset_t *oldset, struct pt_regs * regs)
{
/* are we from a system call? */
if (regs->orig_er0 >= 0) {
switch (regs->er0) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
regs->er0 = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->er0 = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
regs->er0 = regs->orig_er0;
regs->pc -= 2;
}
}
/* set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame(sig, ka, info, oldset, regs);
else
setup_frame(sig, ka, oldset, regs);
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(¤t->blocked,sig);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
/*
* We want the common case to go fast, which
* is why we may in certain cases get here from
* kernel mode. Just return without doing anything
* if so.
*/
if ((regs->ccr & 0x10))
return 1;
if (try_to_freeze())
goto no_signal;
current->thread.esp0 = (unsigned long) regs;
if (!oldset)
oldset = ¤t->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
handle_signal(signr, &info, &ka, oldset, regs);
return 1;
}
no_signal:
/* Did we come from a system call? */
if (regs->orig_er0 >= 0) {
/* Restart the system call - no handlers present */
if (regs->er0 == -ERESTARTNOHAND ||
regs->er0 == -ERESTARTSYS ||
regs->er0 == -ERESTARTNOINTR) {
regs->er0 = regs->orig_er0;
regs->pc -= 2;
}
if (regs->er0 == -ERESTART_RESTARTBLOCK){
regs->er0 = __NR_restart_syscall;
regs->pc -= 2;
}
}
return 0;
}
asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
{
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs, NULL);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
if (current->replacement_session_keyring)
key_replace_session_keyring();
}
}
| gpl-2.0 |
IonKiwi/android_kernel_samsung_kccat6 | drivers/char/snsc.c | 10732 | 11216 | /*
* SN Platform system controller communication support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004, 2006 Silicon Graphics, Inc. All rights reserved.
*/
/*
* System controller communication driver
*
* This driver allows a user process to communicate with the system
* controller (a.k.a. "IRouter") network in an SGI SN system.
*/
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/module.h>
#include <asm/sn/geo.h>
#include <asm/sn/nodepda.h>
#include "snsc.h"
#define SYSCTL_BASENAME "snsc"
#define SCDRV_BUFSZ 2048
#define SCDRV_TIMEOUT 1000
static DEFINE_MUTEX(scdrv_mutex);
static irqreturn_t
scdrv_interrupt(int irq, void *subch_data)
{
struct subch_data_s *sd = subch_data;
unsigned long flags;
int status;
spin_lock_irqsave(&sd->sd_rlock, flags);
spin_lock(&sd->sd_wlock);
status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
if (status > 0) {
if (status & SAL_IROUTER_INTR_RECV) {
wake_up(&sd->sd_rq);
}
if (status & SAL_IROUTER_INTR_XMIT) {
ia64_sn_irtr_intr_disable
(sd->sd_nasid, sd->sd_subch,
SAL_IROUTER_INTR_XMIT);
wake_up(&sd->sd_wq);
}
}
spin_unlock(&sd->sd_wlock);
spin_unlock_irqrestore(&sd->sd_rlock, flags);
return IRQ_HANDLED;
}
/*
* scdrv_open
*
* Reserve a subchannel for system controller communication.
*/
static int
scdrv_open(struct inode *inode, struct file *file)
{
struct sysctl_data_s *scd;
struct subch_data_s *sd;
int rv;
/* look up device info for this device file */
scd = container_of(inode->i_cdev, struct sysctl_data_s, scd_cdev);
/* allocate memory for subchannel data */
sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
if (sd == NULL) {
printk("%s: couldn't allocate subchannel data\n",
__func__);
return -ENOMEM;
}
/* initialize subch_data_s fields */
sd->sd_nasid = scd->scd_nasid;
sd->sd_subch = ia64_sn_irtr_open(scd->scd_nasid);
if (sd->sd_subch < 0) {
kfree(sd);
printk("%s: couldn't allocate subchannel\n", __func__);
return -EBUSY;
}
spin_lock_init(&sd->sd_rlock);
spin_lock_init(&sd->sd_wlock);
init_waitqueue_head(&sd->sd_rq);
init_waitqueue_head(&sd->sd_wq);
sema_init(&sd->sd_rbs, 1);
sema_init(&sd->sd_wbs, 1);
file->private_data = sd;
/* hook this subchannel up to the system controller interrupt */
mutex_lock(&scdrv_mutex);
rv = request_irq(SGI_UART_VECTOR, scdrv_interrupt,
IRQF_SHARED | IRQF_DISABLED,
SYSCTL_BASENAME, sd);
if (rv) {
ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
kfree(sd);
printk("%s: irq request failed (%d)\n", __func__, rv);
mutex_unlock(&scdrv_mutex);
return -EBUSY;
}
mutex_unlock(&scdrv_mutex);
return 0;
}
/*
* scdrv_release
*
* Release a previously-reserved subchannel.
*/
static int
scdrv_release(struct inode *inode, struct file *file)
{
struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
int rv;
/* free the interrupt */
free_irq(SGI_UART_VECTOR, sd);
/* ask SAL to close the subchannel */
rv = ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
kfree(sd);
return rv;
}
/*
* scdrv_read
*
* Called to read bytes from the open IRouter pipe.
*
*/
static inline int
read_status_check(struct subch_data_s *sd, int *len)
{
return ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, sd->sd_rb, len);
}
static ssize_t
scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
{
int status;
int len;
unsigned long flags;
struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
/* try to get control of the read buffer */
if (down_trylock(&sd->sd_rbs)) {
/* somebody else has it now;
* if we're non-blocking, then exit...
*/
if (file->f_flags & O_NONBLOCK) {
return -EAGAIN;
}
/* ...or if we want to block, then do so here */
if (down_interruptible(&sd->sd_rbs)) {
/* something went wrong with wait */
return -ERESTARTSYS;
}
}
/* anything to read? */
len = CHUNKSIZE;
spin_lock_irqsave(&sd->sd_rlock, flags);
status = read_status_check(sd, &len);
/* if not, and we're blocking I/O, loop */
while (status < 0) {
DECLARE_WAITQUEUE(wait, current);
if (file->f_flags & O_NONBLOCK) {
spin_unlock_irqrestore(&sd->sd_rlock, flags);
up(&sd->sd_rbs);
return -EAGAIN;
}
len = CHUNKSIZE;
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&sd->sd_rq, &wait);
spin_unlock_irqrestore(&sd->sd_rlock, flags);
schedule_timeout(SCDRV_TIMEOUT);
remove_wait_queue(&sd->sd_rq, &wait);
if (signal_pending(current)) {
/* wait was interrupted */
up(&sd->sd_rbs);
return -ERESTARTSYS;
}
spin_lock_irqsave(&sd->sd_rlock, flags);
status = read_status_check(sd, &len);
}
spin_unlock_irqrestore(&sd->sd_rlock, flags);
if (len > 0) {
/* we read something in the last read_status_check(); copy
* it out to user space
*/
if (count < len) {
pr_debug("%s: only accepting %d of %d bytes\n",
__func__, (int) count, len);
}
len = min((int) count, len);
if (copy_to_user(buf, sd->sd_rb, len))
len = -EFAULT;
}
/* release the read buffer and wake anyone who might be
* waiting for it
*/
up(&sd->sd_rbs);
/* return the number of characters read in */
return len;
}
/*
* scdrv_write
*
* Writes a chunk of an IRouter packet (or other system controller data)
* to the system controller.
*
*/
static inline int
write_status_check(struct subch_data_s *sd, int count)
{
return ia64_sn_irtr_send(sd->sd_nasid, sd->sd_subch, sd->sd_wb, count);
}
static ssize_t
scdrv_write(struct file *file, const char __user *buf,
size_t count, loff_t *f_pos)
{
unsigned long flags;
int status;
struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
/* try to get control of the write buffer */
if (down_trylock(&sd->sd_wbs)) {
/* somebody else has it now;
* if we're non-blocking, then exit...
*/
if (file->f_flags & O_NONBLOCK) {
return -EAGAIN;
}
/* ...or if we want to block, then do so here */
if (down_interruptible(&sd->sd_wbs)) {
/* something went wrong with wait */
return -ERESTARTSYS;
}
}
count = min((int) count, CHUNKSIZE);
if (copy_from_user(sd->sd_wb, buf, count)) {
up(&sd->sd_wbs);
return -EFAULT;
}
/* try to send the buffer */
spin_lock_irqsave(&sd->sd_wlock, flags);
status = write_status_check(sd, count);
/* if we failed, and we want to block, then loop */
while (status <= 0) {
DECLARE_WAITQUEUE(wait, current);
if (file->f_flags & O_NONBLOCK) {
spin_unlock(&sd->sd_wlock);
up(&sd->sd_wbs);
return -EAGAIN;
}
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&sd->sd_wq, &wait);
spin_unlock_irqrestore(&sd->sd_wlock, flags);
schedule_timeout(SCDRV_TIMEOUT);
remove_wait_queue(&sd->sd_wq, &wait);
if (signal_pending(current)) {
/* wait was interrupted */
up(&sd->sd_wbs);
return -ERESTARTSYS;
}
spin_lock_irqsave(&sd->sd_wlock, flags);
status = write_status_check(sd, count);
}
spin_unlock_irqrestore(&sd->sd_wlock, flags);
/* release the write buffer and wake anyone who's waiting for it */
up(&sd->sd_wbs);
/* return the number of characters accepted (should be the complete
* "chunk" as requested)
*/
if ((status >= 0) && (status < count)) {
pr_debug("Didn't accept the full chunk; %d of %d\n",
status, (int) count);
}
return status;
}
static unsigned int
scdrv_poll(struct file *file, struct poll_table_struct *wait)
{
unsigned int mask = 0;
int status = 0;
struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
unsigned long flags;
poll_wait(file, &sd->sd_rq, wait);
poll_wait(file, &sd->sd_wq, wait);
spin_lock_irqsave(&sd->sd_rlock, flags);
spin_lock(&sd->sd_wlock);
status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
spin_unlock(&sd->sd_wlock);
spin_unlock_irqrestore(&sd->sd_rlock, flags);
if (status > 0) {
if (status & SAL_IROUTER_INTR_RECV) {
mask |= POLLIN | POLLRDNORM;
}
if (status & SAL_IROUTER_INTR_XMIT) {
mask |= POLLOUT | POLLWRNORM;
}
}
return mask;
}
static const struct file_operations scdrv_fops = {
.owner = THIS_MODULE,
.read = scdrv_read,
.write = scdrv_write,
.poll = scdrv_poll,
.open = scdrv_open,
.release = scdrv_release,
.llseek = noop_llseek,
};
static struct class *snsc_class;
/*
* scdrv_init
*
* Called at boot time to initialize the system controller communication
* facility.
*/
int __init
scdrv_init(void)
{
geoid_t geoid;
cnodeid_t cnode;
char devname[32];
char *devnamep;
struct sysctl_data_s *scd;
void *salbuf;
dev_t first_dev, dev;
nasid_t event_nasid;
if (!ia64_platform_is("sn2"))
return -ENODEV;
event_nasid = ia64_sn_get_console_nasid();
if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
SYSCTL_BASENAME) < 0) {
printk("%s: failed to register SN system controller device\n",
__func__);
return -ENODEV;
}
snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME);
for (cnode = 0; cnode < num_cnodes; cnode++) {
geoid = cnodeid_get_geoid(cnode);
devnamep = devname;
format_module_id(devnamep, geo_module(geoid),
MODULE_FORMAT_BRIEF);
devnamep = devname + strlen(devname);
sprintf(devnamep, "^%d#%d", geo_slot(geoid),
geo_slab(geoid));
/* allocate sysctl device data */
scd = kzalloc(sizeof (struct sysctl_data_s),
GFP_KERNEL);
if (!scd) {
printk("%s: failed to allocate device info"
"for %s/%s\n", __func__,
SYSCTL_BASENAME, devname);
continue;
}
/* initialize sysctl device data fields */
scd->scd_nasid = cnodeid_to_nasid(cnode);
if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
printk("%s: failed to allocate driver buffer"
"(%s%s)\n", __func__,
SYSCTL_BASENAME, devname);
kfree(scd);
continue;
}
if (ia64_sn_irtr_init(scd->scd_nasid, salbuf,
SCDRV_BUFSZ) < 0) {
printk
("%s: failed to initialize SAL for"
" system controller communication"
" (%s/%s): outdated PROM?\n",
__func__, SYSCTL_BASENAME, devname);
kfree(scd);
kfree(salbuf);
continue;
}
dev = first_dev + cnode;
cdev_init(&scd->scd_cdev, &scdrv_fops);
if (cdev_add(&scd->scd_cdev, dev, 1)) {
printk("%s: failed to register system"
" controller device (%s%s)\n",
__func__, SYSCTL_BASENAME, devname);
kfree(scd);
kfree(salbuf);
continue;
}
device_create(snsc_class, NULL, dev, NULL,
"%s", devname);
ia64_sn_irtr_intr_enable(scd->scd_nasid,
0 /*ignored */ ,
SAL_IROUTER_INTR_RECV);
/* on the console nasid, prepare to receive
* system controller environmental events
*/
if(scd->scd_nasid == event_nasid) {
scdrv_event_init(scd);
}
}
return 0;
}
module_init(scdrv_init);
| gpl-2.0 |
SandPox/kernel_collection | net/ipv6/ip6_gre.c | 493 | 40936 | /*
* GRE over IPv6 protocol decoder.
*
* Authors: Dmitry Kozlov (xeb@mail.ru)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/in6.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/hash.h>
#include <linux/if_tunnel.h>
#include <linux/ip6_tunnel.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/icmp.h>
#include <net/protocol.h>
#include <net/addrconf.h>
#include <net/arp.h>
#include <net/checksum.h>
#include <net/dsfield.h>
#include <net/inet_ecn.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <net/ip6_tunnel.h>
static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
#define IPV6_TCLASS_SHIFT 20
#define HASH_SIZE_SHIFT 5
#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
static int ip6gre_net_id __read_mostly;
struct ip6gre_net {
struct ip6_tnl __rcu *tunnels[4][HASH_SIZE];
struct net_device *fb_tunnel_dev;
};
static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
static int ip6gre_tunnel_init(struct net_device *dev);
static void ip6gre_tunnel_setup(struct net_device *dev);
static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
/* Tunnel hash table */
/*
4 hash tables:
3: (remote,local)
2: (remote,*)
1: (*,local)
0: (*,*)
We require exact key match i.e. if a key is present in packet
it will match only tunnel with the same key; if it is not present,
it will match only keyless tunnel.
All keysless packets, if not matched configured keyless tunnels
will match fallback tunnel.
*/
#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1))
static u32 HASH_ADDR(const struct in6_addr *addr)
{
u32 hash = ipv6_addr_hash(addr);
return hash_32(hash, HASH_SIZE_SHIFT);
}
#define tunnels_r_l tunnels[3]
#define tunnels_r tunnels[2]
#define tunnels_l tunnels[1]
#define tunnels_wc tunnels[0]
/* Given src, dst and key, find appropriate for input tunnel. */
static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
const struct in6_addr *remote, const struct in6_addr *local,
__be32 key, __be16 gre_proto)
{
struct net *net = dev_net(dev);
int link = dev->ifindex;
unsigned int h0 = HASH_ADDR(remote);
unsigned int h1 = HASH_KEY(key);
struct ip6_tnl *t, *cand = NULL;
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
ARPHRD_ETHER : ARPHRD_IP6GRE;
int score, cand_score = 4;
for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
if (!ipv6_addr_equal(local, &t->parms.laddr) ||
!ipv6_addr_equal(remote, &t->parms.raddr) ||
key != t->parms.i_key ||
!(t->dev->flags & IFF_UP))
continue;
if (t->dev->type != ARPHRD_IP6GRE &&
t->dev->type != dev_type)
continue;
score = 0;
if (t->parms.link != link)
score |= 1;
if (t->dev->type != dev_type)
score |= 2;
if (score == 0)
return t;
if (score < cand_score) {
cand = t;
cand_score = score;
}
}
for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
key != t->parms.i_key ||
!(t->dev->flags & IFF_UP))
continue;
if (t->dev->type != ARPHRD_IP6GRE &&
t->dev->type != dev_type)
continue;
score = 0;
if (t->parms.link != link)
score |= 1;
if (t->dev->type != dev_type)
score |= 2;
if (score == 0)
return t;
if (score < cand_score) {
cand = t;
cand_score = score;
}
}
for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
(!ipv6_addr_equal(local, &t->parms.raddr) ||
!ipv6_addr_is_multicast(local))) ||
key != t->parms.i_key ||
!(t->dev->flags & IFF_UP))
continue;
if (t->dev->type != ARPHRD_IP6GRE &&
t->dev->type != dev_type)
continue;
score = 0;
if (t->parms.link != link)
score |= 1;
if (t->dev->type != dev_type)
score |= 2;
if (score == 0)
return t;
if (score < cand_score) {
cand = t;
cand_score = score;
}
}
for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
if (t->parms.i_key != key ||
!(t->dev->flags & IFF_UP))
continue;
if (t->dev->type != ARPHRD_IP6GRE &&
t->dev->type != dev_type)
continue;
score = 0;
if (t->parms.link != link)
score |= 1;
if (t->dev->type != dev_type)
score |= 2;
if (score == 0)
return t;
if (score < cand_score) {
cand = t;
cand_score = score;
}
}
if (cand != NULL)
return cand;
dev = ign->fb_tunnel_dev;
if (dev->flags & IFF_UP)
return netdev_priv(dev);
return NULL;
}
static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
const struct __ip6_tnl_parm *p)
{
const struct in6_addr *remote = &p->raddr;
const struct in6_addr *local = &p->laddr;
unsigned int h = HASH_KEY(p->i_key);
int prio = 0;
if (!ipv6_addr_any(local))
prio |= 1;
if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
prio |= 2;
h ^= HASH_ADDR(remote);
}
return &ign->tunnels[prio][h];
}
static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
const struct ip6_tnl *t)
{
return __ip6gre_bucket(ign, &t->parms);
}
static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
{
struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
}
static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
{
struct ip6_tnl __rcu **tp;
struct ip6_tnl *iter;
for (tp = ip6gre_bucket(ign, t);
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
if (t == iter) {
rcu_assign_pointer(*tp, t->next);
break;
}
}
}
static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
const struct __ip6_tnl_parm *parms,
int type)
{
const struct in6_addr *remote = &parms->raddr;
const struct in6_addr *local = &parms->laddr;
__be32 key = parms->i_key;
int link = parms->link;
struct ip6_tnl *t;
struct ip6_tnl __rcu **tp;
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
for (tp = __ip6gre_bucket(ign, parms);
(t = rtnl_dereference(*tp)) != NULL;
tp = &t->next)
if (ipv6_addr_equal(local, &t->parms.laddr) &&
ipv6_addr_equal(remote, &t->parms.raddr) &&
key == t->parms.i_key &&
link == t->parms.link &&
type == t->dev->type)
break;
return t;
}
static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
const struct __ip6_tnl_parm *parms, int create)
{
struct ip6_tnl *t, *nt;
struct net_device *dev;
char name[IFNAMSIZ];
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
if (t || !create)
return t;
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
else
strcpy(name, "ip6gre%d");
dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
if (!dev)
return NULL;
dev_net_set(dev, net);
nt = netdev_priv(dev);
nt->parms = *parms;
dev->rtnl_link_ops = &ip6gre_link_ops;
nt->dev = dev;
ip6gre_tnl_link_config(nt, 1);
if (register_netdevice(dev) < 0)
goto failed_free;
/* Can use a lockless transmit, unless we generate output sequences */
if (!(nt->parms.o_flags & GRE_SEQ))
dev->features |= NETIF_F_LLTX;
dev_hold(dev);
ip6gre_tunnel_link(ign, nt);
return nt;
failed_free:
free_netdev(dev);
return NULL;
}
static void ip6gre_tunnel_uninit(struct net_device *dev)
{
struct net *net = dev_net(dev);
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
ip6gre_tunnel_unlink(ign, netdev_priv(dev));
dev_put(dev);
}
static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
__be16 *p = (__be16 *)(skb->data + offset);
int grehlen = offset + 4;
struct ip6_tnl *t;
__be16 flags;
flags = p[0];
if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
if (flags&(GRE_VERSION|GRE_ROUTING))
return;
if (flags&GRE_KEY) {
grehlen += 4;
if (flags&GRE_CSUM)
grehlen += 4;
}
}
/* If only 8 bytes returned, keyed message will be dropped here */
if (!pskb_may_pull(skb, grehlen))
return;
ipv6h = (const struct ipv6hdr *)skb->data;
p = (__be16 *)(skb->data + offset);
t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
flags & GRE_KEY ?
*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
p[1]);
if (t == NULL)
return;
switch (type) {
__u32 teli;
struct ipv6_tlv_tnl_enc_lim *tel;
__u32 mtu;
case ICMPV6_DEST_UNREACH:
net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
t->parms.name);
break;
case ICMPV6_TIME_EXCEED:
if (code == ICMPV6_EXC_HOPLIMIT) {
net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
t->parms.name);
}
break;
case ICMPV6_PARAMPROB:
teli = 0;
if (code == ICMPV6_HDR_FIELD)
teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
if (teli && teli == info - 2) {
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
if (tel->encap_limit == 0) {
net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
t->parms.name);
}
} else {
net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
t->parms.name);
}
break;
case ICMPV6_PKT_TOOBIG:
mtu = info - offset;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
break;
}
if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
t->err_time = jiffies;
}
static int ip6gre_rcv(struct sk_buff *skb)
{
const struct ipv6hdr *ipv6h;
u8 *h;
__be16 flags;
__sum16 csum = 0;
__be32 key = 0;
u32 seqno = 0;
struct ip6_tnl *tunnel;
int offset = 4;
__be16 gre_proto;
int err;
if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
goto drop;
ipv6h = ipv6_hdr(skb);
h = skb->data;
flags = *(__be16 *)h;
if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
/* - Version must be 0.
- We do not support routing headers.
*/
if (flags&(GRE_VERSION|GRE_ROUTING))
goto drop;
if (flags&GRE_CSUM) {
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
csum = csum_fold(skb->csum);
if (!csum)
break;
/* fall through */
case CHECKSUM_NONE:
skb->csum = 0;
csum = __skb_checksum_complete(skb);
skb->ip_summed = CHECKSUM_COMPLETE;
}
offset += 4;
}
if (flags&GRE_KEY) {
key = *(__be32 *)(h + offset);
offset += 4;
}
if (flags&GRE_SEQ) {
seqno = ntohl(*(__be32 *)(h + offset));
offset += 4;
}
}
gre_proto = *(__be16 *)(h + 2);
tunnel = ip6gre_tunnel_lookup(skb->dev,
&ipv6h->saddr, &ipv6h->daddr, key,
gre_proto);
if (tunnel) {
struct pcpu_tstats *tstats;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
tunnel->dev->stats.rx_dropped++;
goto drop;
}
secpath_reset(skb);
skb->protocol = gre_proto;
/* WCCP version 1 and 2 protocol decoding.
* - Change protocol to IP
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
skb->protocol = htons(ETH_P_IP);
if ((*(h + offset) & 0xF0) != 0x40)
offset += 4;
}
skb->mac_header = skb->network_header;
__pskb_pull(skb, offset);
skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
skb->pkt_type = PACKET_HOST;
if (((flags&GRE_CSUM) && csum) ||
(!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
tunnel->dev->stats.rx_crc_errors++;
tunnel->dev->stats.rx_errors++;
goto drop;
}
if (tunnel->parms.i_flags&GRE_SEQ) {
if (!(flags&GRE_SEQ) ||
(tunnel->i_seqno &&
(s32)(seqno - tunnel->i_seqno) < 0)) {
tunnel->dev->stats.rx_fifo_errors++;
tunnel->dev->stats.rx_errors++;
goto drop;
}
tunnel->i_seqno = seqno + 1;
}
/* Warning: All skb pointers will be invalidated! */
if (tunnel->dev->type == ARPHRD_ETHER) {
if (!pskb_may_pull(skb, ETH_HLEN)) {
tunnel->dev->stats.rx_length_errors++;
tunnel->dev->stats.rx_errors++;
goto drop;
}
ipv6h = ipv6_hdr(skb);
skb->protocol = eth_type_trans(skb, tunnel->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
__skb_tunnel_rx(skb, tunnel->dev);
skb_reset_network_header(skb);
err = IP6_ECN_decapsulate(ipv6h, skb);
if (unlikely(err)) {
if (log_ecn_error)
net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
&ipv6h->saddr,
ipv6_get_dsfield(ipv6h));
if (err > 1) {
++tunnel->dev->stats.rx_frame_errors;
++tunnel->dev->stats.rx_errors;
goto drop;
}
}
tstats = this_cpu_ptr(tunnel->dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
netif_rx(skb);
return 0;
}
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
return 0;
}
struct ipv6_tel_txoption {
struct ipv6_txoptions ops;
__u8 dst_opt[8];
};
static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
{
memset(opt, 0, sizeof(struct ipv6_tel_txoption));
opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
opt->dst_opt[3] = 1;
opt->dst_opt[4] = encap_limit;
opt->dst_opt[5] = IPV6_TLV_PADN;
opt->dst_opt[6] = 1;
opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
opt->ops.opt_nflen = 8;
}
static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
struct net_device *dev,
__u8 dsfield,
struct flowi6 *fl6,
int encap_limit,
__u32 *pmtu)
{
struct net *net = dev_net(dev);
struct ip6_tnl *tunnel = netdev_priv(dev);
struct net_device *tdev; /* Device to other host */
struct ipv6hdr *ipv6h; /* Our new IP header */
unsigned int max_headroom = 0; /* The extra header space needed */
int gre_hlen;
struct ipv6_tel_txoption opt;
int mtu;
struct dst_entry *dst = NULL, *ndst = NULL;
struct net_device_stats *stats = &tunnel->dev->stats;
int err = -1;
u8 proto;
struct sk_buff *new_skb;
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
gre_hlen = 0;
ipv6h = (struct ipv6hdr *)skb->data;
fl6->daddr = ipv6h->daddr;
} else {
gre_hlen = tunnel->hlen;
fl6->daddr = tunnel->parms.raddr;
}
if (!fl6->flowi6_mark)
dst = ip6_tnl_dst_check(tunnel);
if (!dst) {
ndst = ip6_route_output(net, NULL, fl6);
if (ndst->error)
goto tx_err_link_failure;
ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
if (IS_ERR(ndst)) {
err = PTR_ERR(ndst);
ndst = NULL;
goto tx_err_link_failure;
}
dst = ndst;
}
tdev = dst->dev;
if (tdev == dev) {
stats->collisions++;
net_warn_ratelimited("%s: Local routing loop detected!\n",
tunnel->parms.name);
goto tx_err_dst_release;
}
mtu = dst_mtu(dst) - sizeof(*ipv6h);
if (encap_limit >= 0) {
max_headroom += 8;
mtu -= 8;
}
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu) {
*pmtu = mtu;
err = -EMSGSIZE;
goto tx_err_dst_release;
}
if (tunnel->err_count > 0) {
if (time_before(jiffies,
tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
tunnel->err_count--;
dst_link_failure(skb);
} else
tunnel->err_count = 0;
}
max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
new_skb = skb_realloc_headroom(skb, max_headroom);
if (max_headroom > dev->needed_headroom)
dev->needed_headroom = max_headroom;
if (!new_skb)
goto tx_err_dst_release;
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
consume_skb(skb);
skb = new_skb;
}
skb_dst_drop(skb);
if (fl6->flowi6_mark) {
skb_dst_set(skb, dst);
ndst = NULL;
} else {
skb_dst_set_noref(skb, dst);
}
proto = NEXTHDR_GRE;
if (encap_limit >= 0) {
init_tel_txopt(&opt, encap_limit);
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
}
skb_push(skb, gre_hlen);
skb_reset_network_header(skb);
skb_set_transport_header(skb, sizeof(*ipv6h));
/*
* Push down and install the IP header.
*/
ipv6h = ipv6_hdr(skb);
ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
ipv6h->hop_limit = tunnel->parms.hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
ipv6h->daddr = fl6->daddr;
((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ?
htons(ETH_P_TEB) : skb->protocol;
if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
__be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
if (tunnel->parms.o_flags&GRE_SEQ) {
++tunnel->o_seqno;
*ptr = htonl(tunnel->o_seqno);
ptr--;
}
if (tunnel->parms.o_flags&GRE_KEY) {
*ptr = tunnel->parms.o_key;
ptr--;
}
if (tunnel->parms.o_flags&GRE_CSUM) {
*ptr = 0;
*(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
skb->len - sizeof(struct ipv6hdr));
}
}
ip6tunnel_xmit(skb, dev);
if (ndst)
ip6_tnl_dst_store(tunnel, ndst);
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
dst_link_failure(skb);
tx_err_dst_release:
dst_release(ndst);
return err;
}
static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
const struct iphdr *iph = ip_hdr(skb);
int encap_limit = -1;
struct flowi6 fl6;
__u8 dsfield;
__u32 mtu;
int err;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPIP;
dsfield = ipv4_get_dsfield(iph);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
& IPV6_TCLASS_MASK;
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
if (err != 0) {
/* XXX: send ICMP error even if DF is not set. */
if (err == -EMSGSIZE)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
return -1;
}
return 0;
}
static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
int encap_limit = -1;
__u16 offset;
struct flowi6 fl6;
__u8 dsfield;
__u32 mtu;
int err;
if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
return -1;
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
if (offset > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
if (tel->encap_limit == 0) {
icmpv6_send(skb, ICMPV6_PARAMPROB,
ICMPV6_HDR_FIELD, offset + 2);
return -1;
}
encap_limit = tel->encap_limit - 1;
} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPV6;
dsfield = ipv6_get_dsfield(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
if (err != 0) {
if (err == -EMSGSIZE)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
return -1;
}
return 0;
}
/**
* ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
* @t: the outgoing tunnel device
* @hdr: IPv6 header from the incoming packet
*
* Description:
* Avoid trivial tunneling loop by checking that tunnel exit-point
* doesn't match source of incoming packet.
*
* Return:
* 1 if conflict,
* 0 else
**/
static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
const struct ipv6hdr *hdr)
{
return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
}
static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
int encap_limit = -1;
struct flowi6 fl6;
__u32 mtu;
int err;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = skb->protocol;
err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
return err;
}
static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
int ret;
if (!ip6_tnl_xmit_ctl(t))
goto tx_err;
switch (skb->protocol) {
case htons(ETH_P_IP):
ret = ip6gre_xmit_ipv4(skb, dev);
break;
case htons(ETH_P_IPV6):
ret = ip6gre_xmit_ipv6(skb, dev);
break;
default:
ret = ip6gre_xmit_other(skb, dev);
break;
}
if (ret < 0)
goto tx_err;
return NETDEV_TX_OK;
tx_err:
stats->tx_errors++;
stats->tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
{
struct net_device *dev = t->dev;
struct __ip6_tnl_parm *p = &t->parms;
struct flowi6 *fl6 = &t->fl.u.ip6;
int addend = sizeof(struct ipv6hdr) + 4;
if (dev->type != ARPHRD_ETHER) {
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
}
/* Set up flowi template */
fl6->saddr = p->laddr;
fl6->daddr = p->raddr;
fl6->flowi6_oif = p->link;
fl6->flowlabel = 0;
if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
if (p->flags&IP6_TNL_F_CAP_XMIT &&
p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
dev->flags |= IFF_POINTOPOINT;
else
dev->flags &= ~IFF_POINTOPOINT;
dev->iflink = p->link;
/* Precalculate GRE options length */
if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
if (t->parms.o_flags&GRE_CSUM)
addend += 4;
if (t->parms.o_flags&GRE_KEY)
addend += 4;
if (t->parms.o_flags&GRE_SEQ)
addend += 4;
}
if (p->flags & IP6_TNL_F_CAP_XMIT) {
int strict = (ipv6_addr_type(&p->raddr) &
(IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
struct rt6_info *rt = rt6_lookup(dev_net(dev),
&p->raddr, &p->laddr,
p->link, strict);
if (rt == NULL)
return;
if (rt->dst.dev) {
dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
if (set_mtu) {
dev->mtu = rt->dst.dev->mtu - addend;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
}
}
ip6_rt_put(rt);
}
t->hlen = addend;
}
static int ip6gre_tnl_change(struct ip6_tnl *t,
const struct __ip6_tnl_parm *p, int set_mtu)
{
t->parms.laddr = p->laddr;
t->parms.raddr = p->raddr;
t->parms.flags = p->flags;
t->parms.hop_limit = p->hop_limit;
t->parms.encap_limit = p->encap_limit;
t->parms.flowinfo = p->flowinfo;
t->parms.link = p->link;
t->parms.proto = p->proto;
t->parms.i_key = p->i_key;
t->parms.o_key = p->o_key;
t->parms.i_flags = p->i_flags;
t->parms.o_flags = p->o_flags;
ip6_tnl_dst_reset(t);
ip6gre_tnl_link_config(t, set_mtu);
return 0;
}
static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
const struct ip6_tnl_parm2 *u)
{
p->laddr = u->laddr;
p->raddr = u->raddr;
p->flags = u->flags;
p->hop_limit = u->hop_limit;
p->encap_limit = u->encap_limit;
p->flowinfo = u->flowinfo;
p->link = u->link;
p->i_key = u->i_key;
p->o_key = u->o_key;
p->i_flags = u->i_flags;
p->o_flags = u->o_flags;
memcpy(p->name, u->name, sizeof(u->name));
}
static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
const struct __ip6_tnl_parm *p)
{
u->proto = IPPROTO_GRE;
u->laddr = p->laddr;
u->raddr = p->raddr;
u->flags = p->flags;
u->hop_limit = p->hop_limit;
u->encap_limit = p->encap_limit;
u->flowinfo = p->flowinfo;
u->link = p->link;
u->i_key = p->i_key;
u->o_key = p->o_key;
u->i_flags = p->i_flags;
u->o_flags = p->o_flags;
memcpy(u->name, p->name, sizeof(u->name));
}
static int ip6gre_tunnel_ioctl(struct net_device *dev,
struct ifreq *ifr, int cmd)
{
int err = 0;
struct ip6_tnl_parm2 p;
struct __ip6_tnl_parm p1;
struct ip6_tnl *t;
struct net *net = dev_net(dev);
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
switch (cmd) {
case SIOCGETTUNNEL:
t = NULL;
if (dev == ign->fb_tunnel_dev) {
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
err = -EFAULT;
break;
}
ip6gre_tnl_parm_from_user(&p1, &p);
t = ip6gre_tunnel_locate(net, &p1, 0);
}
if (t == NULL)
t = netdev_priv(dev);
memset(&p, 0, sizeof(p));
ip6gre_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
break;
case SIOCADDTUNNEL:
case SIOCCHGTUNNEL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto done;
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -EINVAL;
if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
goto done;
if (!(p.i_flags&GRE_KEY))
p.i_key = 0;
if (!(p.o_flags&GRE_KEY))
p.o_key = 0;
ip6gre_tnl_parm_from_user(&p1, &p);
t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
if (t != NULL) {
if (t->dev != dev) {
err = -EEXIST;
break;
}
} else {
t = netdev_priv(dev);
ip6gre_tunnel_unlink(ign, t);
synchronize_net();
ip6gre_tnl_change(t, &p1, 1);
ip6gre_tunnel_link(ign, t);
netdev_state_change(dev);
}
}
if (t) {
err = 0;
memset(&p, 0, sizeof(p));
ip6gre_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
} else
err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
break;
case SIOCDELTUNNEL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto done;
if (dev == ign->fb_tunnel_dev) {
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -ENOENT;
ip6gre_tnl_parm_from_user(&p1, &p);
t = ip6gre_tunnel_locate(net, &p1, 0);
if (t == NULL)
goto done;
err = -EPERM;
if (t == netdev_priv(ign->fb_tunnel_dev))
goto done;
dev = t->dev;
}
unregister_netdevice(dev);
err = 0;
break;
default:
err = -EINVAL;
}
done:
return err;
}
static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
if (new_mtu < 68 ||
new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
struct ip6_tnl *t = netdev_priv(dev);
struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
__be16 *p = (__be16 *)(ipv6h+1);
ip6_flow_hdr(ipv6h, 0, t->fl.u.ip6.flowlabel);
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = NEXTHDR_GRE;
ipv6h->saddr = t->parms.laddr;
ipv6h->daddr = t->parms.raddr;
p[0] = t->parms.o_flags;
p[1] = htons(type);
/*
* Set the source hardware address.
*/
if (saddr)
memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
if (daddr)
memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
if (!ipv6_addr_any(&ipv6h->daddr))
return t->hlen;
return -t->hlen;
}
static const struct header_ops ip6gre_header_ops = {
.create = ip6gre_header,
};
static const struct net_device_ops ip6gre_netdev_ops = {
.ndo_init = ip6gre_tunnel_init,
.ndo_uninit = ip6gre_tunnel_uninit,
.ndo_start_xmit = ip6gre_tunnel_xmit,
.ndo_do_ioctl = ip6gre_tunnel_ioctl,
.ndo_change_mtu = ip6gre_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
};
static void ip6gre_dev_free(struct net_device *dev)
{
free_percpu(dev->tstats);
free_netdev(dev);
}
static void ip6gre_tunnel_setup(struct net_device *dev)
{
struct ip6_tnl *t;
dev->netdev_ops = &ip6gre_netdev_ops;
dev->destructor = ip6gre_dev_free;
dev->type = ARPHRD_IP6GRE;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
t = netdev_priv(dev);
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
dev->flags |= IFF_NOARP;
dev->iflink = 0;
dev->addr_len = sizeof(struct in6_addr);
dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
static int ip6gre_tunnel_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
tunnel = netdev_priv(dev);
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
if (ipv6_addr_any(&tunnel->parms.raddr))
dev->header_ops = &ip6gre_header_ops;
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
return 0;
}
static void ip6gre_fb_tunnel_init(struct net_device *dev)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
tunnel->hlen = sizeof(struct ipv6hdr) + 4;
dev_hold(dev);
}
static struct inet6_protocol ip6gre_protocol __read_mostly = {
.handler = ip6gre_rcv,
.err_handler = ip6gre_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
struct list_head *head)
{
int prio;
for (prio = 0; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
struct ip6_tnl *t;
t = rtnl_dereference(ign->tunnels[prio][h]);
while (t != NULL) {
unregister_netdevice_queue(t->dev, head);
t = rtnl_dereference(t->next);
}
}
}
}
static int __net_init ip6gre_init_net(struct net *net)
{
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
int err;
ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
ip6gre_tunnel_setup);
if (!ign->fb_tunnel_dev) {
err = -ENOMEM;
goto err_alloc_dev;
}
dev_net_set(ign->fb_tunnel_dev, net);
ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
err = register_netdev(ign->fb_tunnel_dev);
if (err)
goto err_reg_dev;
rcu_assign_pointer(ign->tunnels_wc[0],
netdev_priv(ign->fb_tunnel_dev));
return 0;
err_reg_dev:
ip6gre_dev_free(ign->fb_tunnel_dev);
err_alloc_dev:
return err;
}
static void __net_exit ip6gre_exit_net(struct net *net)
{
struct ip6gre_net *ign;
LIST_HEAD(list);
ign = net_generic(net, ip6gre_net_id);
rtnl_lock();
ip6gre_destroy_tunnels(ign, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations ip6gre_net_ops = {
.init = ip6gre_init_net,
.exit = ip6gre_exit_net,
.id = &ip6gre_net_id,
.size = sizeof(struct ip6gre_net),
};
static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
{
__be16 flags;
if (!data)
return 0;
flags = 0;
if (data[IFLA_GRE_IFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
if (data[IFLA_GRE_OFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
if (flags & (GRE_VERSION|GRE_ROUTING))
return -EINVAL;
return 0;
}
static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
{
struct in6_addr daddr;
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
if (!data)
goto out;
if (data[IFLA_GRE_REMOTE]) {
nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
if (ipv6_addr_any(&daddr))
return -EINVAL;
}
out:
return ip6gre_tunnel_validate(tb, data);
}
static void ip6gre_netlink_parms(struct nlattr *data[],
struct __ip6_tnl_parm *parms)
{
memset(parms, 0, sizeof(*parms));
if (!data)
return;
if (data[IFLA_GRE_LINK])
parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
if (data[IFLA_GRE_IFLAGS])
parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
if (data[IFLA_GRE_OFLAGS])
parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
if (data[IFLA_GRE_IKEY])
parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
if (data[IFLA_GRE_OKEY])
parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
if (data[IFLA_GRE_LOCAL])
nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
if (data[IFLA_GRE_REMOTE])
nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
if (data[IFLA_GRE_TTL])
parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
if (data[IFLA_GRE_ENCAP_LIMIT])
parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
if (data[IFLA_GRE_FLOWINFO])
parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
if (data[IFLA_GRE_FLAGS])
parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
}
static int ip6gre_tap_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
tunnel = netdev_priv(dev);
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
ip6gre_tnl_link_config(tunnel, 1);
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
return 0;
}
static const struct net_device_ops ip6gre_tap_netdev_ops = {
.ndo_init = ip6gre_tap_init,
.ndo_uninit = ip6gre_tunnel_uninit,
.ndo_start_xmit = ip6gre_tunnel_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip6gre_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
};
static void ip6gre_tap_setup(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &ip6gre_tap_netdev_ops;
dev->destructor = ip6gre_dev_free;
dev->iflink = 0;
dev->features |= NETIF_F_NETNS_LOCAL;
}
static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct ip6_tnl *nt;
struct net *net = dev_net(dev);
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
int err;
nt = netdev_priv(dev);
ip6gre_netlink_parms(data, &nt->parms);
if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
return -EEXIST;
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
nt->dev = dev;
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
/* Can use a lockless transmit, unless we generate output sequences */
if (!(nt->parms.o_flags & GRE_SEQ))
dev->features |= NETIF_F_LLTX;
err = register_netdevice(dev);
if (err)
goto out;
dev_hold(dev);
ip6gre_tunnel_link(ign, nt);
out:
return err;
}
static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
struct ip6_tnl *t, *nt;
struct net *net = dev_net(dev);
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
struct __ip6_tnl_parm p;
if (dev == ign->fb_tunnel_dev)
return -EINVAL;
nt = netdev_priv(dev);
ip6gre_netlink_parms(data, &p);
t = ip6gre_tunnel_locate(net, &p, 0);
if (t) {
if (t->dev != dev)
return -EEXIST;
} else {
t = nt;
ip6gre_tunnel_unlink(ign, t);
ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
ip6gre_tunnel_link(ign, t);
netdev_state_change(dev);
}
return 0;
}
static size_t ip6gre_get_size(const struct net_device *dev)
{
return
/* IFLA_GRE_LINK */
nla_total_size(4) +
/* IFLA_GRE_IFLAGS */
nla_total_size(2) +
/* IFLA_GRE_OFLAGS */
nla_total_size(2) +
/* IFLA_GRE_IKEY */
nla_total_size(4) +
/* IFLA_GRE_OKEY */
nla_total_size(4) +
/* IFLA_GRE_LOCAL */
nla_total_size(sizeof(struct in6_addr)) +
/* IFLA_GRE_REMOTE */
nla_total_size(sizeof(struct in6_addr)) +
/* IFLA_GRE_TTL */
nla_total_size(1) +
/* IFLA_GRE_TOS */
nla_total_size(1) +
/* IFLA_GRE_ENCAP_LIMIT */
nla_total_size(1) +
/* IFLA_GRE_FLOWINFO */
nla_total_size(4) +
/* IFLA_GRE_FLAGS */
nla_total_size(4) +
0;
}
static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct __ip6_tnl_parm *p = &t->parms;
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) ||
nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) ||
nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
/*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_LINK] = { .type = NLA_U32 },
[IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
[IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
[IFLA_GRE_IKEY] = { .type = NLA_U32 },
[IFLA_GRE_OKEY] = { .type = NLA_U32 },
[IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
[IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
[IFLA_GRE_TTL] = { .type = NLA_U8 },
[IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
[IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
[IFLA_GRE_FLAGS] = { .type = NLA_U32 },
};
static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
.kind = "ip6gre",
.maxtype = IFLA_GRE_MAX,
.policy = ip6gre_policy,
.priv_size = sizeof(struct ip6_tnl),
.setup = ip6gre_tunnel_setup,
.validate = ip6gre_tunnel_validate,
.newlink = ip6gre_newlink,
.changelink = ip6gre_changelink,
.get_size = ip6gre_get_size,
.fill_info = ip6gre_fill_info,
};
static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
.kind = "ip6gretap",
.maxtype = IFLA_GRE_MAX,
.policy = ip6gre_policy,
.priv_size = sizeof(struct ip6_tnl),
.setup = ip6gre_tap_setup,
.validate = ip6gre_tap_validate,
.newlink = ip6gre_newlink,
.changelink = ip6gre_changelink,
.get_size = ip6gre_get_size,
.fill_info = ip6gre_fill_info,
};
/*
* And now the modules code and kernel interface.
*/
static int __init ip6gre_init(void)
{
int err;
pr_info("GRE over IPv6 tunneling driver\n");
err = register_pernet_device(&ip6gre_net_ops);
if (err < 0)
return err;
err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
if (err < 0) {
pr_info("%s: can't add protocol\n", __func__);
goto add_proto_failed;
}
err = rtnl_link_register(&ip6gre_link_ops);
if (err < 0)
goto rtnl_link_failed;
err = rtnl_link_register(&ip6gre_tap_ops);
if (err < 0)
goto tap_ops_failed;
out:
return err;
tap_ops_failed:
rtnl_link_unregister(&ip6gre_link_ops);
rtnl_link_failed:
inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
add_proto_failed:
unregister_pernet_device(&ip6gre_net_ops);
goto out;
}
static void __exit ip6gre_fini(void)
{
rtnl_link_unregister(&ip6gre_tap_ops);
rtnl_link_unregister(&ip6gre_link_ops);
inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
unregister_pernet_device(&ip6gre_net_ops);
}
module_init(ip6gre_init);
module_exit(ip6gre_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
MODULE_ALIAS_RTNL_LINK("ip6gre");
MODULE_ALIAS_NETDEV("ip6gre0");
| gpl-2.0 |
pritanshchandra/purex_kernel_xolo_black | drivers/usb/serial/pl2303.c | 749 | 24527 | /*
* Prolific PL2303 USB to serial adaptor driver
*
* Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2003 IBM Corp.
*
* Original driver for 2.2.x by anonymous
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "pl2303.h"
/*
* Version Information
*/
#define DRIVER_DESC "Prolific PL2303 USB to serial adaptor driver"
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
{ USB_DEVICE(ITEGNO_VENDOR_ID, ITEGNO_PRODUCT_ID) },
{ USB_DEVICE(ITEGNO_VENDOR_ID, ITEGNO_PRODUCT_ID_2080) },
{ USB_DEVICE(MA620_VENDOR_ID, MA620_PRODUCT_ID) },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID) },
{ USB_DEVICE(TRIPP_VENDOR_ID, TRIPP_PRODUCT_ID) },
{ USB_DEVICE(RADIOSHACK_VENDOR_ID, RADIOSHACK_PRODUCT_ID) },
{ USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) },
{ USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_EF81) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_ID_S81) }, /* Benq/Siemens S81 */
{ USB_DEVICE(SYNTECH_VENDOR_ID, SYNTECH_PRODUCT_ID) },
{ USB_DEVICE(NOKIA_CA42_VENDOR_ID, NOKIA_CA42_PRODUCT_ID) },
{ USB_DEVICE(CA_42_CA42_VENDOR_ID, CA_42_CA42_PRODUCT_ID) },
{ USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) },
{ USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) },
{ USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) },
{ USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) },
{ USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) },
{ USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) },
{ USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
{ USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
{ USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
#define SET_LINE_REQUEST_TYPE 0x21
#define SET_LINE_REQUEST 0x20
#define SET_CONTROL_REQUEST_TYPE 0x21
#define SET_CONTROL_REQUEST 0x22
#define CONTROL_DTR 0x01
#define CONTROL_RTS 0x02
#define BREAK_REQUEST_TYPE 0x21
#define BREAK_REQUEST 0x23
#define BREAK_ON 0xffff
#define BREAK_OFF 0x0000
#define GET_LINE_REQUEST_TYPE 0xa1
#define GET_LINE_REQUEST 0x21
#define VENDOR_WRITE_REQUEST_TYPE 0x40
#define VENDOR_WRITE_REQUEST 0x01
#define VENDOR_READ_REQUEST_TYPE 0xc0
#define VENDOR_READ_REQUEST 0x01
#define UART_STATE 0x08
#define UART_STATE_TRANSIENT_MASK 0x74
#define UART_DCD 0x01
#define UART_DSR 0x02
#define UART_BREAK_ERROR 0x04
#define UART_RING 0x08
#define UART_FRAME_ERROR 0x10
#define UART_PARITY_ERROR 0x20
#define UART_OVERRUN_ERROR 0x40
#define UART_CTS 0x80
enum pl2303_type {
type_0, /* don't know the difference between type 0 and */
type_1, /* type 1, until someone from prolific tells us... */
HX, /* HX version of the pl2303 chip */
};
struct pl2303_serial_private {
enum pl2303_type type;
};
struct pl2303_private {
spinlock_t lock;
u8 line_control;
u8 line_status;
u8 line_settings[7];
};
static int pl2303_vendor_read(__u16 value, __u16 index,
struct usb_serial *serial, unsigned char *buf)
{
int res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
VENDOR_READ_REQUEST, VENDOR_READ_REQUEST_TYPE,
value, index, buf, 1, 100);
dev_dbg(&serial->interface->dev, "0x%x:0x%x:0x%x:0x%x %d - %x\n",
VENDOR_READ_REQUEST_TYPE, VENDOR_READ_REQUEST, value, index,
res, buf[0]);
return res;
}
static int pl2303_vendor_write(__u16 value, __u16 index,
struct usb_serial *serial)
{
int res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
VENDOR_WRITE_REQUEST, VENDOR_WRITE_REQUEST_TYPE,
value, index, NULL, 0, 100);
dev_dbg(&serial->interface->dev, "0x%x:0x%x:0x%x:0x%x %d\n",
VENDOR_WRITE_REQUEST_TYPE, VENDOR_WRITE_REQUEST, value, index,
res);
return res;
}
static int pl2303_startup(struct usb_serial *serial)
{
struct pl2303_serial_private *spriv;
enum pl2303_type type = type_0;
unsigned char *buf;
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
if (!spriv)
return -ENOMEM;
buf = kmalloc(10, GFP_KERNEL);
if (!buf) {
kfree(spriv);
return -ENOMEM;
}
if (serial->dev->descriptor.bDeviceClass == 0x02)
type = type_0;
else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40)
type = HX;
else if (serial->dev->descriptor.bDeviceClass == 0x00)
type = type_1;
else if (serial->dev->descriptor.bDeviceClass == 0xFF)
type = type_1;
dev_dbg(&serial->interface->dev, "device type: %d\n", type);
spriv->type = type;
usb_set_serial_data(serial, spriv);
pl2303_vendor_read(0x8484, 0, serial, buf);
pl2303_vendor_write(0x0404, 0, serial);
pl2303_vendor_read(0x8484, 0, serial, buf);
pl2303_vendor_read(0x8383, 0, serial, buf);
pl2303_vendor_read(0x8484, 0, serial, buf);
pl2303_vendor_write(0x0404, 1, serial);
pl2303_vendor_read(0x8484, 0, serial, buf);
pl2303_vendor_read(0x8383, 0, serial, buf);
pl2303_vendor_write(0, 1, serial);
pl2303_vendor_write(1, 0, serial);
if (type == HX)
pl2303_vendor_write(2, 0x44, serial);
else
pl2303_vendor_write(2, 0x24, serial);
kfree(buf);
return 0;
}
static void pl2303_release(struct usb_serial *serial)
{
struct pl2303_serial_private *spriv;
spriv = usb_get_serial_data(serial);
kfree(spriv);
}
static int pl2303_port_probe(struct usb_serial_port *port)
{
struct pl2303_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
return 0;
}
static int pl2303_port_remove(struct usb_serial_port *port)
{
struct pl2303_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
return 0;
}
static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value)
{
struct usb_device *dev = port->serial->dev;
int retval;
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
SET_CONTROL_REQUEST, SET_CONTROL_REQUEST_TYPE,
value, 0, NULL, 0, 100);
dev_dbg(&port->dev, "%s - value = %d, retval = %d\n", __func__,
value, retval);
return retval;
}
static void pl2303_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int cflag;
unsigned char *buf;
int baud;
int i;
u8 control;
const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
4800, 7200, 9600, 14400, 19200, 28800, 38400,
57600, 115200, 230400, 460800, 614400,
921600, 1228800, 2457600, 3000000, 6000000 };
int baud_floor, baud_ceil;
int k;
if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
cflag = tty->termios.c_cflag;
buf = kzalloc(7, GFP_KERNEL);
if (!buf) {
dev_err(&port->dev, "%s - out of memory.\n", __func__);
/* Report back no change occurred */
if (old_termios)
tty->termios = *old_termios;
return;
}
i = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
GET_LINE_REQUEST, GET_LINE_REQUEST_TYPE,
0, 0, buf, 7, 100);
dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %x %x %x %x %x %x %x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
switch (cflag & CSIZE) {
case CS5:
buf[6] = 5;
break;
case CS6:
buf[6] = 6;
break;
case CS7:
buf[6] = 7;
break;
default:
case CS8:
buf[6] = 8;
break;
}
dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
/* For reference buf[0]:buf[3] baud rate value */
/* NOTE: Only the values defined in baud_sup are supported !
* => if unsupported values are set, the PL2303 seems to use
* 9600 baud (at least my PL2303X always does)
*/
baud = tty_get_baud_rate(tty);
dev_dbg(&port->dev, "baud requested = %d\n", baud);
if (baud) {
/* Set baudrate to nearest supported value */
for (k=0; k<ARRAY_SIZE(baud_sup); k++) {
if (baud_sup[k] / baud) {
baud_ceil = baud_sup[k];
if (k==0) {
baud = baud_ceil;
} else {
baud_floor = baud_sup[k-1];
if ((baud_ceil % baud)
> (baud % baud_floor))
baud = baud_floor;
else
baud = baud_ceil;
}
break;
}
}
if (baud > 1228800) {
/* type_0, type_1 only support up to 1228800 baud */
if (spriv->type != HX)
baud = 1228800;
else if (baud > 6000000)
baud = 6000000;
}
dev_dbg(&port->dev, "baud set = %d\n", baud);
if (baud <= 115200) {
buf[0] = baud & 0xff;
buf[1] = (baud >> 8) & 0xff;
buf[2] = (baud >> 16) & 0xff;
buf[3] = (baud >> 24) & 0xff;
} else {
/* apparently the formula for higher speeds is:
* baudrate = 12M * 32 / (2^buf[1]) / buf[0]
*/
unsigned tmp = 12*1000*1000*32 / baud;
buf[3] = 0x80;
buf[2] = 0;
buf[1] = (tmp >= 256);
while (tmp >= 256) {
tmp >>= 2;
buf[1] <<= 1;
}
buf[0] = tmp;
}
}
/* For reference buf[4]=0 is 1 stop bits */
/* For reference buf[4]=1 is 1.5 stop bits */
/* For reference buf[4]=2 is 2 stop bits */
if (cflag & CSTOPB) {
/* NOTE: Comply with "real" UARTs / RS232:
* use 1.5 instead of 2 stop bits with 5 data bits
*/
if ((cflag & CSIZE) == CS5) {
buf[4] = 1;
dev_dbg(&port->dev, "stop bits = 1.5\n");
} else {
buf[4] = 2;
dev_dbg(&port->dev, "stop bits = 2\n");
}
} else {
buf[4] = 0;
dev_dbg(&port->dev, "stop bits = 1\n");
}
if (cflag & PARENB) {
/* For reference buf[5]=0 is none parity */
/* For reference buf[5]=1 is odd parity */
/* For reference buf[5]=2 is even parity */
/* For reference buf[5]=3 is mark parity */
/* For reference buf[5]=4 is space parity */
if (cflag & PARODD) {
if (cflag & CMSPAR) {
buf[5] = 3;
dev_dbg(&port->dev, "parity = mark\n");
} else {
buf[5] = 1;
dev_dbg(&port->dev, "parity = odd\n");
}
} else {
if (cflag & CMSPAR) {
buf[5] = 4;
dev_dbg(&port->dev, "parity = space\n");
} else {
buf[5] = 2;
dev_dbg(&port->dev, "parity = even\n");
}
}
} else {
buf[5] = 0;
dev_dbg(&port->dev, "parity = none\n");
}
/*
* Some PL2303 are known to lose bytes if you change serial settings
* even to the same values as before. Thus we actually need to filter
* in this specific case.
*
* Note that the tty_termios_hw_change check above is not sufficient
* as a previously requested baud rate may differ from the one
* actually used (and stored in old_termios).
*
* NOTE: No additional locking needed for line_settings as it is
* only used in set_termios, which is serialised against itself.
*/
if (!old_termios || memcmp(buf, priv->line_settings, 7)) {
i = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE,
0, 0, buf, 7, 100);
dev_dbg(&port->dev, "0x21:0x20:0:0 %d\n", i);
if (i == 7)
memcpy(priv->line_settings, buf, 7);
}
/* change control lines if we are switching to or from B0 */
spin_lock_irqsave(&priv->lock, flags);
control = priv->line_control;
if ((cflag & CBAUD) == B0)
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
if (control != priv->line_control) {
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
pl2303_set_control_lines(port, control);
} else {
spin_unlock_irqrestore(&priv->lock, flags);
}
buf[0] = buf[1] = buf[2] = buf[3] = buf[4] = buf[5] = buf[6] = 0;
i = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
GET_LINE_REQUEST, GET_LINE_REQUEST_TYPE,
0, 0, buf, 7, 100);
dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %x %x %x %x %x %x %x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
if (cflag & CRTSCTS) {
if (spriv->type == HX)
pl2303_vendor_write(0x0, 0x61, serial);
else
pl2303_vendor_write(0x0, 0x41, serial);
} else {
pl2303_vendor_write(0x0, 0x0, serial);
}
/* Save resulting baud rate */
if (baud)
tty_encode_baud_rate(tty, baud, baud);
kfree(buf);
}
static void pl2303_dtr_rts(struct usb_serial_port *port, int on)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 control;
spin_lock_irqsave(&priv->lock, flags);
/* Change DTR and RTS */
if (on)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
else
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
pl2303_set_control_lines(port, control);
}
static void pl2303_close(struct usb_serial_port *port)
{
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
}
static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int result;
if (spriv->type != HX) {
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
} else {
/* reset upstream data pipes */
pl2303_vendor_write(8, 0, serial);
pl2303_vendor_write(9, 0, serial);
}
/* Setup termios */
if (tty)
pl2303_set_termios(tty, port, NULL);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, result);
return result;
}
result = usb_serial_generic_open(tty, port);
if (result) {
usb_kill_urb(port->interrupt_in_urb);
return result;
}
port->port.drain_delay = 256;
return 0;
}
static int pl2303_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 control;
int ret;
spin_lock_irqsave(&priv->lock, flags);
if (set & TIOCM_RTS)
priv->line_control |= CONTROL_RTS;
if (set & TIOCM_DTR)
priv->line_control |= CONTROL_DTR;
if (clear & TIOCM_RTS)
priv->line_control &= ~CONTROL_RTS;
if (clear & TIOCM_DTR)
priv->line_control &= ~CONTROL_DTR;
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
ret = pl2303_set_control_lines(port, control);
if (ret)
return usb_translate_errors(ret);
return 0;
}
static int pl2303_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int mcr;
unsigned int status;
unsigned int result;
spin_lock_irqsave(&priv->lock, flags);
mcr = priv->line_control;
status = priv->line_status;
spin_unlock_irqrestore(&priv->lock, flags);
result = ((mcr & CONTROL_DTR) ? TIOCM_DTR : 0)
| ((mcr & CONTROL_RTS) ? TIOCM_RTS : 0)
| ((status & UART_CTS) ? TIOCM_CTS : 0)
| ((status & UART_DSR) ? TIOCM_DSR : 0)
| ((status & UART_RING) ? TIOCM_RI : 0)
| ((status & UART_DCD) ? TIOCM_CD : 0);
dev_dbg(&port->dev, "%s - result = %x\n", __func__, result);
return result;
}
static int pl2303_carrier_raised(struct usb_serial_port *port)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
if (priv->line_status & UART_DCD)
return 1;
return 0;
}
static int pl2303_tiocmiwait(struct tty_struct *tty, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int prevstatus;
unsigned int status;
unsigned int changed;
spin_lock_irqsave(&priv->lock, flags);
prevstatus = priv->line_status;
spin_unlock_irqrestore(&priv->lock, flags);
while (1) {
interruptible_sleep_on(&port->port.delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
if (port->serial->disconnected)
return -EIO;
spin_lock_irqsave(&priv->lock, flags);
status = priv->line_status;
spin_unlock_irqrestore(&priv->lock, flags);
changed = prevstatus ^ status;
if (((arg & TIOCM_RNG) && (changed & UART_RING)) ||
((arg & TIOCM_DSR) && (changed & UART_DSR)) ||
((arg & TIOCM_CD) && (changed & UART_DCD)) ||
((arg & TIOCM_CTS) && (changed & UART_CTS))) {
return 0;
}
prevstatus = status;
}
/* NOTREACHED */
return 0;
}
static int pl2303_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct serial_struct ser;
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s cmd = 0x%04x\n", __func__, cmd);
switch (cmd) {
case TIOCGSERIAL:
memset(&ser, 0, sizeof ser);
ser.type = PORT_16654;
ser.line = port->serial->minor;
ser.port = port->number;
ser.baud_base = 460800;
if (copy_to_user((void __user *)arg, &ser, sizeof ser))
return -EFAULT;
return 0;
default:
dev_dbg(&port->dev, "%s not supported = 0x%04x\n", __func__, cmd);
break;
}
return -ENOIOCTLCMD;
}
static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
u16 state;
int result;
if (break_state == 0)
state = BREAK_OFF;
else
state = BREAK_ON;
dev_dbg(&port->dev, "%s - turning break %s\n", __func__,
state == BREAK_OFF ? "off" : "on");
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
0, NULL, 0, 100);
if (result)
dev_err(&port->dev, "error sending break = %d\n", result);
}
static void pl2303_update_line_status(struct usb_serial_port *port,
unsigned char *data,
unsigned int actual_length)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
struct tty_struct *tty;
unsigned long flags;
u8 status_idx = UART_STATE;
u8 length = UART_STATE + 1;
u8 prev_line_status;
u16 idv, idp;
idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
idp = le16_to_cpu(port->serial->dev->descriptor.idProduct);
if (idv == SIEMENS_VENDOR_ID) {
if (idp == SIEMENS_PRODUCT_ID_X65 ||
idp == SIEMENS_PRODUCT_ID_SX1 ||
idp == SIEMENS_PRODUCT_ID_X75) {
length = 1;
status_idx = 0;
}
}
if (actual_length < length)
return;
/* Save off the uart status for others to look at */
spin_lock_irqsave(&priv->lock, flags);
prev_line_status = priv->line_status;
priv->line_status = data[status_idx];
spin_unlock_irqrestore(&priv->lock, flags);
if (priv->line_status & UART_BREAK_ERROR)
usb_serial_handle_break(port);
wake_up_interruptible(&port->port.delta_msr_wait);
tty = tty_port_tty_get(&port->port);
if (!tty)
return;
if ((priv->line_status ^ prev_line_status) & UART_DCD)
usb_serial_handle_dcd_change(port, tty,
priv->line_status & UART_DCD);
tty_kref_put(tty);
}
static void pl2303_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned int actual_length = urb->actual_length;
int status = urb->status;
int retval;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__,
urb->actual_length, urb->transfer_buffer);
pl2303_update_line_status(port, data, actual_length);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
static void pl2303_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
u8 line_status;
int i;
/* update line status */
spin_lock_irqsave(&priv->lock, flags);
line_status = priv->line_status;
priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
wake_up_interruptible(&port->port.delta_msr_wait);
if (!urb->actual_length)
return;
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (line_status & UART_BREAK_ERROR)
tty_flag = TTY_BREAK;
else if (line_status & UART_PARITY_ERROR)
tty_flag = TTY_PARITY;
else if (line_status & UART_FRAME_ERROR)
tty_flag = TTY_FRAME;
dev_dbg(&port->dev, "%s - tty_flag = %d\n", __func__, tty_flag);
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
if (!usb_serial_handle_sysrq_char(port, data[i]))
tty_insert_flip_char(&port->port, data[i],
tty_flag);
} else {
tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
}
tty_flip_buffer_push(&port->port);
}
/* All of the device info needed for the PL2303 SIO serial converter */
static struct usb_serial_driver pl2303_device = {
.driver = {
.owner = THIS_MODULE,
.name = "pl2303",
},
.id_table = id_table,
.num_ports = 1,
.bulk_in_size = 256,
.bulk_out_size = 256,
.open = pl2303_open,
.close = pl2303_close,
.dtr_rts = pl2303_dtr_rts,
.carrier_raised = pl2303_carrier_raised,
.ioctl = pl2303_ioctl,
.break_ctl = pl2303_break_ctl,
.set_termios = pl2303_set_termios,
.tiocmget = pl2303_tiocmget,
.tiocmset = pl2303_tiocmset,
.tiocmiwait = pl2303_tiocmiwait,
.process_read_urb = pl2303_process_read_urb,
.read_int_callback = pl2303_read_int_callback,
.attach = pl2303_startup,
.release = pl2303_release,
.port_probe = pl2303_port_probe,
.port_remove = pl2303_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&pl2303_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
arkusuma/mediapad-kernel-ics | kernel/power/process.c | 749 | 4480 | /*
* drivers/power/process.c - Functions for starting/stopping processes on
* suspend transitions.
*
* Originally from swsusp.
*/
#undef DEBUG
#include <linux/interrupt.h>
#include <linux/oom.h>
#include <linux/suspend.h>
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/wakelock.h>
#include "power.h"
/*
* Timeout for stopping processes
*/
#define TIMEOUT (20 * HZ)
static inline int freezable(struct task_struct * p)
{
if ((p == current) ||
(p->flags & PF_NOFREEZE) ||
(p->exit_state != 0))
return 0;
return 1;
}
static int try_to_freeze_tasks(bool sig_only)
{
struct task_struct *g, *p;
unsigned long end_time;
unsigned int todo;
bool wq_busy = false;
struct timeval start, end;
u64 elapsed_csecs64;
unsigned int elapsed_csecs;
bool wakeup = false;
do_gettimeofday(&start);
end_time = jiffies + TIMEOUT;
if (!sig_only)
freeze_workqueues_begin();
while (true) {
todo = 0;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
if (frozen(p) || !freezable(p))
continue;
if (!freeze_task(p, sig_only))
continue;
/*
* Now that we've done set_freeze_flag, don't
* perturb a task in TASK_STOPPED or TASK_TRACED.
* It is "frozen enough". If the task does wake
* up, it will immediately call try_to_freeze.
*
* Because freeze_task() goes through p's
* scheduler lock after setting TIF_FREEZE, it's
* guaranteed that either we see TASK_RUNNING or
* try_to_stop() after schedule() in ptrace/signal
* stop sees TIF_FREEZE.
*/
if (!task_is_stopped_or_traced(p) &&
!freezer_should_skip(p))
todo++;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
if (!sig_only) {
wq_busy = freeze_workqueues_busy();
todo += wq_busy;
}
if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
wakeup = 1;
break;
}
if (!todo || time_after(jiffies, end_time))
break;
if (pm_wakeup_pending()) {
wakeup = true;
break;
}
/*
* We need to retry, but first give the freezing tasks some
* time to enter the regrigerator.
*/
msleep(10);
}
do_gettimeofday(&end);
elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
elapsed_csecs = elapsed_csecs64;
if (todo) {
/* This does not unfreeze processes that are already frozen
* (we have slightly ugly calling convention in that respect,
* and caller must call thaw_processes() if something fails),
* but it cleans up leftover PF_FREEZE requests.
*/
if(wakeup) {
printk("\n");
printk(KERN_ERR "Freezing of %s aborted\n",
sig_only ? "user space " : "tasks ");
}
else {
printk("\n");
printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n",
elapsed_csecs / 100, elapsed_csecs % 100,
todo - wq_busy, wq_busy);
}
thaw_workqueues();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
if (freezing(p) && !freezer_should_skip(p) &&
elapsed_csecs > 100)
sched_show_task(p);
cancel_freezing(p);
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
} else {
printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100,
elapsed_csecs % 100);
}
return todo ? -EBUSY : 0;
}
/**
* freeze_processes - tell processes to enter the refrigerator
*/
int freeze_processes(void)
{
int error;
printk("Freezing user space processes ... ");
error = try_to_freeze_tasks(true);
if (error)
goto Exit;
printk("done.\n");
error = suspend_sys_sync_wait();
if (error)
goto Exit;
printk("Freezing remaining freezable tasks ... ");
error = try_to_freeze_tasks(false);
if (error)
goto Exit;
printk("done.");
oom_killer_disable();
Exit:
BUG_ON(in_atomic());
printk("\n");
return error;
}
static void thaw_tasks(bool nosig_only)
{
struct task_struct *g, *p;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
if (!freezable(p))
continue;
if (nosig_only && should_send_signal(p))
continue;
if (cgroup_freezing_or_frozen(p))
continue;
thaw_process(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
void thaw_processes(void)
{
oom_killer_enable();
printk("Restarting tasks ... ");
thaw_workqueues();
thaw_tasks(true);
thaw_tasks(false);
schedule();
printk("done.\n");
}
| gpl-2.0 |
h0tw1r3/kernel_samsung_sghi717 | drivers/net/bonding/bond_alb.c | 749 | 43742 | /*
* Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/pkt_sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_bonding.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <net/ipx.h>
#include <net/arp.h>
#include <net/ipv6.h>
#include <asm/byteorder.h>
#include "bonding.h"
#include "bond_alb.h"
#ifndef __long_aligned
#define __long_aligned __attribute__((aligned((sizeof(long)))))
#endif
static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
};
static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
#pragma pack(1)
struct learning_pkt {
u8 mac_dst[ETH_ALEN];
u8 mac_src[ETH_ALEN];
__be16 type;
u8 padding[ETH_ZLEN - ETH_HLEN];
};
struct arp_pkt {
__be16 hw_addr_space;
__be16 prot_addr_space;
u8 hw_addr_len;
u8 prot_addr_len;
__be16 op_code;
u8 mac_src[ETH_ALEN]; /* sender hardware address */
__be32 ip_src; /* sender IP address */
u8 mac_dst[ETH_ALEN]; /* target hardware address */
__be32 ip_dst; /* target IP address */
};
#pragma pack()
static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
{
return (struct arp_pkt *)skb_network_header(skb);
}
/* Forward declaration */
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
{
int i;
u8 hash = 0;
for (i = 0; i < hash_size; i++) {
hash ^= hash_start[i];
}
return hash;
}
/*********************** tlb specific functions ***************************/
static inline void _lock_tx_hashtbl(struct bonding *bond)
{
spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
static inline void _unlock_tx_hashtbl(struct bonding *bond)
{
spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
/* Caller must hold tx_hashtbl lock */
static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
{
if (save_load) {
entry->load_history = 1 + entry->tx_bytes /
BOND_TLB_REBALANCE_INTERVAL;
entry->tx_bytes = 0;
}
entry->tx_slave = NULL;
entry->next = TLB_NULL_INDEX;
entry->prev = TLB_NULL_INDEX;
}
static inline void tlb_init_slave(struct slave *slave)
{
SLAVE_TLB_INFO(slave).load = 0;
SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
}
/* Caller must hold bond lock for read */
static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load)
{
struct tlb_client_info *tx_hash_table;
u32 index;
_lock_tx_hashtbl(bond);
/* clear slave from tx_hashtbl */
tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
/* skip this if we've already freed the tx hash table */
if (tx_hash_table) {
index = SLAVE_TLB_INFO(slave).head;
while (index != TLB_NULL_INDEX) {
u32 next_index = tx_hash_table[index].next;
tlb_init_table_entry(&tx_hash_table[index], save_load);
index = next_index;
}
}
tlb_init_slave(slave);
_unlock_tx_hashtbl(bond);
}
/* Must be called before starting the monitor timer */
static int tlb_initialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
struct tlb_client_info *new_hashtbl;
int i;
new_hashtbl = kzalloc(size, GFP_KERNEL);
if (!new_hashtbl) {
pr_err("%s: Error: Failed to allocate TLB hash table\n",
bond->dev->name);
return -1;
}
_lock_tx_hashtbl(bond);
bond_info->tx_hashtbl = new_hashtbl;
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
}
_unlock_tx_hashtbl(bond);
return 0;
}
/* Must be called only after all slaves have been released */
static void tlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
_lock_tx_hashtbl(bond);
kfree(bond_info->tx_hashtbl);
bond_info->tx_hashtbl = NULL;
_unlock_tx_hashtbl(bond);
}
static long long compute_gap(struct slave *slave)
{
return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
(s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
}
/* Caller must hold bond lock for read */
static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
{
struct slave *slave, *least_loaded;
long long max_gap;
int i;
least_loaded = NULL;
max_gap = LLONG_MIN;
/* Find the slave with the largest gap */
bond_for_each_slave(bond, slave, i) {
if (SLAVE_IS_OK(slave)) {
long long gap = compute_gap(slave);
if (max_gap < gap) {
least_loaded = slave;
max_gap = gap;
}
}
}
return least_loaded;
}
/* Caller must hold bond lock for read */
static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct tlb_client_info *hash_table;
struct slave *assigned_slave;
_lock_tx_hashtbl(bond);
hash_table = bond_info->tx_hashtbl;
assigned_slave = hash_table[hash_index].tx_slave;
if (!assigned_slave) {
assigned_slave = tlb_get_least_loaded_slave(bond);
if (assigned_slave) {
struct tlb_slave_info *slave_info =
&(SLAVE_TLB_INFO(assigned_slave));
u32 next_index = slave_info->head;
hash_table[hash_index].tx_slave = assigned_slave;
hash_table[hash_index].next = next_index;
hash_table[hash_index].prev = TLB_NULL_INDEX;
if (next_index != TLB_NULL_INDEX) {
hash_table[next_index].prev = hash_index;
}
slave_info->head = hash_index;
slave_info->load +=
hash_table[hash_index].load_history;
}
}
if (assigned_slave) {
hash_table[hash_index].tx_bytes += skb_len;
}
_unlock_tx_hashtbl(bond);
return assigned_slave;
}
/*********************** rlb specific functions ***************************/
static inline void _lock_rx_hashtbl(struct bonding *bond)
{
spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
static inline void _unlock_rx_hashtbl(struct bonding *bond)
{
spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
/* when an ARP REPLY is received from a client update its info
* in the rx_hashtbl
*/
static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *client_info;
u32 hash_index;
_lock_rx_hashtbl(bond);
hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if ((client_info->assigned) &&
(client_info->ip_src == arp->ip_dst) &&
(client_info->ip_dst == arp->ip_src) &&
(compare_ether_addr_64bits(client_info->mac_dst, arp->mac_src))) {
/* update the clients MAC address */
memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
client_info->ntt = 1;
bond_info->rx_ntt = 1;
}
_unlock_rx_hashtbl(bond);
}
static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arp_pkt *arp;
if (skb->protocol != cpu_to_be16(ETH_P_ARP))
return;
arp = (struct arp_pkt *) skb->data;
if (!arp) {
pr_debug("Packet has no ARP data\n");
return;
}
if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
return;
if (skb->len < sizeof(struct arp_pkt)) {
pr_debug("Packet is too small to be an ARP\n");
return;
}
if (arp->op_code == htons(ARPOP_REPLY)) {
/* update rx hash table for this ARP */
rlb_update_entry_from_arp(bond, arp);
pr_debug("Server received an ARP Reply from client\n");
}
}
/* Caller must hold bond lock for read */
static struct slave *rlb_next_rx_slave(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *rx_slave, *slave, *start_at;
int i = 0;
if (bond_info->next_rx_slave) {
start_at = bond_info->next_rx_slave;
} else {
start_at = bond->first_slave;
}
rx_slave = NULL;
bond_for_each_slave_from(bond, slave, i, start_at) {
if (SLAVE_IS_OK(slave)) {
if (!rx_slave) {
rx_slave = slave;
} else if (slave->speed > rx_slave->speed) {
rx_slave = slave;
}
}
}
if (rx_slave) {
bond_info->next_rx_slave = rx_slave->next;
}
return rx_slave;
}
/* teach the switch the mac of a disabled slave
* on the primary for fault tolerance
*
* Caller must hold bond->curr_slave_lock for write or bond lock for write
*/
static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
{
if (!bond->curr_active_slave) {
return;
}
if (!bond->alb_info.primary_is_promisc) {
if (!dev_set_promiscuity(bond->curr_active_slave->dev, 1))
bond->alb_info.primary_is_promisc = 1;
else
bond->alb_info.primary_is_promisc = 0;
}
bond->alb_info.rlb_promisc_timeout_counter = 0;
alb_send_learning_packets(bond->curr_active_slave, addr);
}
/* slave being removed should not be active at this point
*
* Caller must hold bond lock for read
*/
static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *rx_hash_table;
u32 index, next_index;
/* clear slave from rx_hashtbl */
_lock_rx_hashtbl(bond);
rx_hash_table = bond_info->rx_hashtbl;
index = bond_info->rx_hashtbl_head;
for (; index != RLB_NULL_INDEX; index = next_index) {
next_index = rx_hash_table[index].next;
if (rx_hash_table[index].slave == slave) {
struct slave *assigned_slave = rlb_next_rx_slave(bond);
if (assigned_slave) {
rx_hash_table[index].slave = assigned_slave;
if (compare_ether_addr_64bits(rx_hash_table[index].mac_dst,
mac_bcast)) {
bond_info->rx_hashtbl[index].ntt = 1;
bond_info->rx_ntt = 1;
/* A slave has been removed from the
* table because it is either disabled
* or being released. We must retry the
* update to avoid clients from not
* being updated & disconnecting when
* there is stress
*/
bond_info->rlb_update_retry_counter =
RLB_UPDATE_RETRY;
}
} else { /* there is no active slave */
rx_hash_table[index].slave = NULL;
}
}
}
_unlock_rx_hashtbl(bond);
write_lock_bh(&bond->curr_slave_lock);
if (slave != bond->curr_active_slave) {
rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
}
write_unlock_bh(&bond->curr_slave_lock);
}
static void rlb_update_client(struct rlb_client_info *client_info)
{
int i;
if (!client_info->slave) {
return;
}
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
struct sk_buff *skb;
skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
client_info->ip_dst,
client_info->slave->dev,
client_info->ip_src,
client_info->mac_dst,
client_info->slave->dev->dev_addr,
client_info->mac_dst);
if (!skb) {
pr_err("%s: Error: failed to create an ARP packet\n",
client_info->slave->dev->master->name);
continue;
}
skb->dev = client_info->slave->dev;
if (client_info->tag) {
skb = vlan_put_tag(skb, client_info->vlan_id);
if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
client_info->slave->dev->master->name);
continue;
}
}
arp_xmit(skb);
}
}
/* sends ARP REPLIES that update the clients that need updating */
static void rlb_update_rx_clients(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *client_info;
u32 hash_index;
_lock_rx_hashtbl(bond);
hash_index = bond_info->rx_hashtbl_head;
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->ntt) {
rlb_update_client(client_info);
if (bond_info->rlb_update_retry_counter == 0) {
client_info->ntt = 0;
}
}
}
/* do not update the entries again until this counter is zero so that
* not to confuse the clients.
*/
bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
_unlock_rx_hashtbl(bond);
}
/* The slave was assigned a new mac address - update the clients */
static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *slave)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *client_info;
int ntt = 0;
u32 hash_index;
_lock_rx_hashtbl(bond);
hash_index = bond_info->rx_hashtbl_head;
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
if ((client_info->slave == slave) &&
compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) {
client_info->ntt = 1;
ntt = 1;
}
}
// update the team's flag only after the whole iteration
if (ntt) {
bond_info->rx_ntt = 1;
//fasten the change
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
}
_unlock_rx_hashtbl(bond);
}
/* mark all clients using src_ip to be updated */
static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *client_info;
u32 hash_index;
_lock_rx_hashtbl(bond);
hash_index = bond_info->rx_hashtbl_head;
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (!client_info->slave) {
pr_err("%s: Error: found a client with no channel in the client's hash table\n",
bond->dev->name);
continue;
}
/*update all clients using this src_ip, that are not assigned
* to the team's address (curr_active_slave) and have a known
* unicast mac address.
*/
if ((client_info->ip_src == src_ip) &&
compare_ether_addr_64bits(client_info->slave->dev->dev_addr,
bond->dev->dev_addr) &&
compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) {
client_info->ntt = 1;
bond_info->rx_ntt = 1;
}
}
_unlock_rx_hashtbl(bond);
}
/* Caller must hold both bond and ptr locks for read */
static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct arp_pkt *arp = arp_pkt(skb);
struct slave *assigned_slave;
struct rlb_client_info *client_info;
u32 hash_index = 0;
_lock_rx_hashtbl(bond);
hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->assigned) {
if ((client_info->ip_src == arp->ip_src) &&
(client_info->ip_dst == arp->ip_dst)) {
/* the entry is already assigned to this client */
if (compare_ether_addr_64bits(arp->mac_dst, mac_bcast)) {
/* update mac address from arp */
memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
}
assigned_slave = client_info->slave;
if (assigned_slave) {
_unlock_rx_hashtbl(bond);
return assigned_slave;
}
} else {
/* the entry is already assigned to some other client,
* move the old client to primary (curr_active_slave) so
* that the new client can be assigned to this entry.
*/
if (bond->curr_active_slave &&
client_info->slave != bond->curr_active_slave) {
client_info->slave = bond->curr_active_slave;
rlb_update_client(client_info);
}
}
}
/* assign a new slave */
assigned_slave = rlb_next_rx_slave(bond);
if (assigned_slave) {
client_info->ip_src = arp->ip_src;
client_info->ip_dst = arp->ip_dst;
/* arp->mac_dst is broadcast for arp reqeusts.
* will be updated with clients actual unicast mac address
* upon receiving an arp reply.
*/
memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
client_info->slave = assigned_slave;
if (compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) {
client_info->ntt = 1;
bond->alb_info.rx_ntt = 1;
} else {
client_info->ntt = 0;
}
if (bond->vlgrp) {
if (!vlan_get_tag(skb, &client_info->vlan_id))
client_info->tag = 1;
}
if (!client_info->assigned) {
u32 prev_tbl_head = bond_info->rx_hashtbl_head;
bond_info->rx_hashtbl_head = hash_index;
client_info->next = prev_tbl_head;
if (prev_tbl_head != RLB_NULL_INDEX) {
bond_info->rx_hashtbl[prev_tbl_head].prev =
hash_index;
}
client_info->assigned = 1;
}
}
_unlock_rx_hashtbl(bond);
return assigned_slave;
}
/* chooses (and returns) transmit channel for arp reply
* does not choose channel for other arp types since they are
* sent on the curr_active_slave
*/
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
struct arp_pkt *arp = arp_pkt(skb);
struct slave *tx_slave = NULL;
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected
* rx channel
*/
tx_slave = rlb_choose_channel(skb, bond);
if (tx_slave) {
memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
}
pr_debug("Server sent ARP Reply packet\n");
} else if (arp->op_code == htons(ARPOP_REQUEST)) {
/* Create an entry in the rx_hashtbl for this client as a
* place holder.
* When the arp reply is received the entry will be updated
* with the correct unicast address of the client.
*/
rlb_choose_channel(skb, bond);
/* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.
*/
bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
/* arp requests are broadcast and are sent on the primary
* the arp request will collapse all clients on the subnet to
* the primary slave. We must register these clients to be
* updated with their assigned mac.
*/
rlb_req_update_subnet_clients(bond, arp->ip_src);
pr_debug("Server sent ARP Request packet\n");
}
return tx_slave;
}
/* Caller must hold bond lock for read */
static void rlb_rebalance(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *assigned_slave;
struct rlb_client_info *client_info;
int ntt;
u32 hash_index;
_lock_rx_hashtbl(bond);
ntt = 0;
hash_index = bond_info->rx_hashtbl_head;
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
assigned_slave = rlb_next_rx_slave(bond);
if (assigned_slave && (client_info->slave != assigned_slave)) {
client_info->slave = assigned_slave;
client_info->ntt = 1;
ntt = 1;
}
}
/* update the team's flag only after the whole iteration */
if (ntt) {
bond_info->rx_ntt = 1;
}
_unlock_rx_hashtbl(bond);
}
/* Caller must hold rx_hashtbl lock */
static void rlb_init_table_entry(struct rlb_client_info *entry)
{
memset(entry, 0, sizeof(struct rlb_client_info));
entry->next = RLB_NULL_INDEX;
entry->prev = RLB_NULL_INDEX;
}
static int rlb_initialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *new_hashtbl;
int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
int i;
new_hashtbl = kmalloc(size, GFP_KERNEL);
if (!new_hashtbl) {
pr_err("%s: Error: Failed to allocate RLB hash table\n",
bond->dev->name);
return -1;
}
_lock_rx_hashtbl(bond);
bond_info->rx_hashtbl = new_hashtbl;
bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) {
rlb_init_table_entry(bond_info->rx_hashtbl + i);
}
_unlock_rx_hashtbl(bond);
/* register to receive ARPs */
bond->recv_probe = rlb_arp_recv;
return 0;
}
static void rlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
_lock_rx_hashtbl(bond);
kfree(bond_info->rx_hashtbl);
bond_info->rx_hashtbl = NULL;
bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
_unlock_rx_hashtbl(bond);
}
static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
u32 curr_index;
_lock_rx_hashtbl(bond);
curr_index = bond_info->rx_hashtbl_head;
while (curr_index != RLB_NULL_INDEX) {
struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
u32 next_index = bond_info->rx_hashtbl[curr_index].next;
u32 prev_index = bond_info->rx_hashtbl[curr_index].prev;
if (curr->tag && (curr->vlan_id == vlan_id)) {
if (curr_index == bond_info->rx_hashtbl_head) {
bond_info->rx_hashtbl_head = next_index;
}
if (prev_index != RLB_NULL_INDEX) {
bond_info->rx_hashtbl[prev_index].next = next_index;
}
if (next_index != RLB_NULL_INDEX) {
bond_info->rx_hashtbl[next_index].prev = prev_index;
}
rlb_init_table_entry(curr);
}
curr_index = next_index;
}
_unlock_rx_hashtbl(bond);
}
/*********************** tlb/rlb shared functions *********************/
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
{
struct bonding *bond = bond_get_bond_by_slave(slave);
struct learning_pkt pkt;
int size = sizeof(struct learning_pkt);
int i;
memset(&pkt, 0, size);
memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
pkt.type = cpu_to_be16(ETH_P_LOOP);
for (i = 0; i < MAX_LP_BURST; i++) {
struct sk_buff *skb;
char *data;
skb = dev_alloc_skb(size);
if (!skb) {
return;
}
data = skb_put(skb, size);
memcpy(data, &pkt, size);
skb_reset_mac_header(skb);
skb->network_header = skb->mac_header + ETH_HLEN;
skb->protocol = pkt.type;
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
if (bond->vlgrp) {
struct vlan_entry *vlan;
vlan = bond_next_vlan(bond,
bond->alb_info.current_alb_vlan);
bond->alb_info.current_alb_vlan = vlan;
if (!vlan) {
kfree_skb(skb);
continue;
}
skb = vlan_put_tag(skb, vlan->vlan_id);
if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
bond->dev->name);
continue;
}
}
dev_queue_xmit(skb);
}
}
/* hw is a boolean parameter that determines whether we should try and
* set the hw address of the device as well as the hw address of the
* net_device
*/
static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
{
struct net_device *dev = slave->dev;
struct sockaddr s_addr;
if (!hw) {
memcpy(dev->dev_addr, addr, dev->addr_len);
return 0;
}
/* for rlb each slave must have a unique hw mac addresses so that */
/* each slave will receive packets destined to a different mac */
memcpy(s_addr.sa_data, addr, dev->addr_len);
s_addr.sa_family = dev->type;
if (dev_set_mac_address(dev, &s_addr)) {
pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n"
"ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
dev->master->name, dev->name);
return -EOPNOTSUPP;
}
return 0;
}
/*
* Swap MAC addresses between two slaves.
*
* Called with RTNL held, and no other locks.
*
*/
static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2)
{
u8 tmp_mac_addr[ETH_ALEN];
memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
}
/*
* Send learning packets after MAC address swap.
*
* Called with RTNL and no other locks
*/
static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
struct slave *slave2)
{
int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
struct slave *disabled_slave = NULL;
ASSERT_RTNL();
/* fasten the change in the switch */
if (SLAVE_IS_OK(slave1)) {
alb_send_learning_packets(slave1, slave1->dev->dev_addr);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
* has changed
*/
rlb_req_update_slave_clients(bond, slave1);
}
} else {
disabled_slave = slave1;
}
if (SLAVE_IS_OK(slave2)) {
alb_send_learning_packets(slave2, slave2->dev->dev_addr);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
* has changed
*/
rlb_req_update_slave_clients(bond, slave2);
}
} else {
disabled_slave = slave2;
}
if (bond->alb_info.rlb_enabled && slaves_state_differ) {
/* A disabled slave was assigned an active mac addr */
rlb_teach_disabled_mac_on_primary(bond,
disabled_slave->dev->dev_addr);
}
}
/**
* alb_change_hw_addr_on_detach
* @bond: bonding we're working on
* @slave: the slave that was just detached
*
* We assume that @slave was already detached from the slave list.
*
* If @slave's permanent hw address is different both from its current
* address and from @bond's address, then somewhere in the bond there's
* a slave that has @slave's permanet address as its current address.
* We'll make sure that that slave no longer uses @slave's permanent address.
*
* Caller must hold RTNL and no other locks
*/
static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *slave)
{
int perm_curr_diff;
int perm_bond_diff;
perm_curr_diff = compare_ether_addr_64bits(slave->perm_hwaddr,
slave->dev->dev_addr);
perm_bond_diff = compare_ether_addr_64bits(slave->perm_hwaddr,
bond->dev->dev_addr);
if (perm_curr_diff && perm_bond_diff) {
struct slave *tmp_slave;
int i, found = 0;
bond_for_each_slave(bond, tmp_slave, i) {
if (!compare_ether_addr_64bits(slave->perm_hwaddr,
tmp_slave->dev->dev_addr)) {
found = 1;
break;
}
}
if (found) {
/* locking: needs RTNL and nothing else */
alb_swap_mac_addr(bond, slave, tmp_slave);
alb_fasten_mac_swap(bond, slave, tmp_slave);
}
}
}
/**
* alb_handle_addr_collision_on_attach
* @bond: bonding we're working on
* @slave: the slave that was just attached
*
* checks uniqueness of slave's mac address and handles the case the
* new slave uses the bonds mac address.
*
* If the permanent hw address of @slave is @bond's hw address, we need to
* find a different hw address to give @slave, that isn't in use by any other
* slave in the bond. This address must be, of course, one of the permanent
* addresses of the other slaves.
*
* We go over the slave list, and for each slave there we compare its
* permanent hw address with the current address of all the other slaves.
* If no match was found, then we've found a slave with a permanent address
* that isn't used by any other slave in the bond, so we can assign it to
* @slave.
*
* assumption: this function is called before @slave is attached to the
* bond slave list.
*
* caller must hold the bond lock for write since the mac addresses are compared
* and may be swapped.
*/
static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
{
struct slave *tmp_slave1, *tmp_slave2, *free_mac_slave;
struct slave *has_bond_addr = bond->curr_active_slave;
int i, j, found = 0;
if (bond->slave_cnt == 0) {
/* this is the first slave */
return 0;
}
/* if slave's mac address differs from bond's mac address
* check uniqueness of slave's mac address against the other
* slaves in the bond.
*/
if (compare_ether_addr_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
bond_for_each_slave(bond, tmp_slave1, i) {
if (!compare_ether_addr_64bits(tmp_slave1->dev->dev_addr,
slave->dev->dev_addr)) {
found = 1;
break;
}
}
if (!found)
return 0;
/* Try setting slave mac to bond address and fall-through
to code handling that situation below... */
alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
bond->alb_info.rlb_enabled);
}
/* The slave's address is equal to the address of the bond.
* Search for a spare address in the bond for this slave.
*/
free_mac_slave = NULL;
bond_for_each_slave(bond, tmp_slave1, i) {
found = 0;
bond_for_each_slave(bond, tmp_slave2, j) {
if (!compare_ether_addr_64bits(tmp_slave1->perm_hwaddr,
tmp_slave2->dev->dev_addr)) {
found = 1;
break;
}
}
if (!found) {
/* no slave has tmp_slave1's perm addr
* as its curr addr
*/
free_mac_slave = tmp_slave1;
break;
}
if (!has_bond_addr) {
if (!compare_ether_addr_64bits(tmp_slave1->dev->dev_addr,
bond->dev->dev_addr)) {
has_bond_addr = tmp_slave1;
}
}
}
if (free_mac_slave) {
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
bond->alb_info.rlb_enabled);
pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
bond->dev->name, slave->dev->name,
free_mac_slave->dev->name);
} else if (has_bond_addr) {
pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
bond->dev->name, slave->dev->name);
return -EFAULT;
}
return 0;
}
/**
* alb_set_mac_address
* @bond:
* @addr:
*
* In TLB mode all slaves are configured to the bond's hw address, but set
* their dev_addr field to different addresses (based on their permanent hw
* addresses).
*
* For each slave, this function sets the interface to the new address and then
* changes its dev_addr field to its previous value.
*
* Unwinding assumes bond's mac address has not yet changed.
*/
static int alb_set_mac_address(struct bonding *bond, void *addr)
{
struct sockaddr sa;
struct slave *slave, *stop_at;
char tmp_addr[ETH_ALEN];
int res;
int i;
if (bond->alb_info.rlb_enabled) {
return 0;
}
bond_for_each_slave(bond, slave, i) {
/* save net_device's current hw address */
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
res = dev_set_mac_address(slave->dev, addr);
/* restore net_device's hw address */
memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
if (res)
goto unwind;
}
return 0;
unwind:
memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
sa.sa_family = bond->dev->type;
/* unwind from head to the slave that failed */
stop_at = slave;
bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
dev_set_mac_address(slave->dev, &sa);
memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
}
return res;
}
/************************ exported alb funcions ************************/
int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
{
int res;
res = tlb_initialize(bond);
if (res) {
return res;
}
if (rlb_enabled) {
bond->alb_info.rlb_enabled = 1;
/* initialize rlb */
res = rlb_initialize(bond);
if (res) {
tlb_deinitialize(bond);
return res;
}
} else {
bond->alb_info.rlb_enabled = 0;
}
return 0;
}
void bond_alb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
tlb_deinitialize(bond);
if (bond_info->rlb_enabled) {
rlb_deinitialize(bond);
}
}
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct ethhdr *eth_data;
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *tx_slave = NULL;
static const __be32 ip_bcast = htonl(0xffffffff);
int hash_size = 0;
int do_tx_balance = 1;
u32 hash_index = 0;
const u8 *hash_start = NULL;
int res = 1;
struct ipv6hdr *ip6hdr;
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
/* make sure that the curr_active_slave do not change during tx
*/
read_lock(&bond->curr_slave_lock);
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
const struct iphdr *iph = ip_hdr(skb);
if (!compare_ether_addr_64bits(eth_data->h_dest, mac_bcast) ||
(iph->daddr == ip_bcast) ||
(iph->protocol == IPPROTO_IGMP)) {
do_tx_balance = 0;
break;
}
hash_start = (char *)&(iph->daddr);
hash_size = sizeof(iph->daddr);
}
break;
case ETH_P_IPV6:
/* IPv6 doesn't really use broadcast mac address, but leave
* that here just in case.
*/
if (!compare_ether_addr_64bits(eth_data->h_dest, mac_bcast)) {
do_tx_balance = 0;
break;
}
/* IPv6 uses all-nodes multicast as an equivalent to
* broadcasts in IPv4.
*/
if (!compare_ether_addr_64bits(eth_data->h_dest, mac_v6_allmcast)) {
do_tx_balance = 0;
break;
}
/* Additianally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
*/
ip6hdr = ipv6_hdr(skb);
if (ipv6_addr_any(&ip6hdr->saddr)) {
do_tx_balance = 0;
break;
}
hash_start = (char *)&(ipv6_hdr(skb)->daddr);
hash_size = sizeof(ipv6_hdr(skb)->daddr);
break;
case ETH_P_IPX:
if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
/* something is wrong with this packet */
do_tx_balance = 0;
break;
}
if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
/* The only protocol worth balancing in
* this family since it has an "ARP" like
* mechanism
*/
do_tx_balance = 0;
break;
}
hash_start = (char*)eth_data->h_dest;
hash_size = ETH_ALEN;
break;
case ETH_P_ARP:
do_tx_balance = 0;
if (bond_info->rlb_enabled) {
tx_slave = rlb_arp_xmit(skb, bond);
}
break;
default:
do_tx_balance = 0;
break;
}
if (do_tx_balance) {
hash_index = _simple_hash(hash_start, hash_size);
tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
}
if (!tx_slave) {
/* unbalanced or unassigned, send through primary */
tx_slave = bond->curr_active_slave;
bond_info->unbalanced_load += skb->len;
}
if (tx_slave && SLAVE_IS_OK(tx_slave)) {
if (tx_slave != bond->curr_active_slave) {
memcpy(eth_data->h_source,
tx_slave->dev->dev_addr,
ETH_ALEN);
}
res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
} else {
if (tx_slave) {
tlb_clear_slave(bond, tx_slave, 0);
}
}
if (res) {
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
read_unlock(&bond->curr_slave_lock);
return NETDEV_TX_OK;
}
void bond_alb_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
alb_work.work);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *slave;
int i;
read_lock(&bond->lock);
if (bond->kill_timers) {
goto out;
}
if (bond->slave_cnt == 0) {
bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0;
goto re_arm;
}
bond_info->tx_rebalance_counter++;
bond_info->lp_counter++;
/* send learning packets */
if (bond_info->lp_counter >= BOND_ALB_LP_TICKS) {
/* change of curr_active_slave involves swapping of mac addresses.
* in order to avoid this swapping from happening while
* sending the learning packets, the curr_slave_lock must be held for
* read.
*/
read_lock(&bond->curr_slave_lock);
bond_for_each_slave(bond, slave, i) {
alb_send_learning_packets(slave, slave->dev->dev_addr);
}
read_unlock(&bond->curr_slave_lock);
bond_info->lp_counter = 0;
}
/* rebalance tx traffic */
if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
read_lock(&bond->curr_slave_lock);
bond_for_each_slave(bond, slave, i) {
tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load =
bond_info->unbalanced_load /
BOND_TLB_REBALANCE_INTERVAL;
bond_info->unbalanced_load = 0;
}
}
read_unlock(&bond->curr_slave_lock);
bond_info->tx_rebalance_counter = 0;
}
/* handle rlb stuff */
if (bond_info->rlb_enabled) {
if (bond_info->primary_is_promisc &&
(++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
/*
* dev_set_promiscuity requires rtnl and
* nothing else.
*/
read_unlock(&bond->lock);
rtnl_lock();
bond_info->rlb_promisc_timeout_counter = 0;
/* If the primary was set to promiscuous mode
* because a slave was disabled then
* it can now leave promiscuous mode.
*/
dev_set_promiscuity(bond->curr_active_slave->dev, -1);
bond_info->primary_is_promisc = 0;
rtnl_unlock();
read_lock(&bond->lock);
}
if (bond_info->rlb_rebalance) {
bond_info->rlb_rebalance = 0;
rlb_rebalance(bond);
}
/* check if clients need updating */
if (bond_info->rx_ntt) {
if (bond_info->rlb_update_delay_counter) {
--bond_info->rlb_update_delay_counter;
} else {
rlb_update_rx_clients(bond);
if (bond_info->rlb_update_retry_counter) {
--bond_info->rlb_update_retry_counter;
} else {
bond_info->rx_ntt = 0;
}
}
}
}
re_arm:
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
out:
read_unlock(&bond->lock);
}
/* assumption: called before the slave is attached to the bond
* and not locked by the bond lock
*/
int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
{
int res;
res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
bond->alb_info.rlb_enabled);
if (res) {
return res;
}
/* caller must hold the bond lock for write since the mac addresses
* are compared and may be swapped.
*/
read_lock(&bond->lock);
res = alb_handle_addr_collision_on_attach(bond, slave);
read_unlock(&bond->lock);
if (res) {
return res;
}
tlb_init_slave(slave);
/* order a rebalance ASAP */
bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
if (bond->alb_info.rlb_enabled) {
bond->alb_info.rlb_rebalance = 1;
}
return 0;
}
/*
* Remove slave from tlb and rlb hash tables, and fix up MAC addresses
* if necessary.
*
* Caller must hold RTNL and no other locks
*/
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
{
if (bond->slave_cnt > 1) {
alb_change_hw_addr_on_detach(bond, slave);
}
tlb_clear_slave(bond, slave, 0);
if (bond->alb_info.rlb_enabled) {
bond->alb_info.next_rx_slave = NULL;
rlb_clear_slave(bond, slave);
}
}
/* Caller must hold bond lock for read */
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
if (link == BOND_LINK_DOWN) {
tlb_clear_slave(bond, slave, 0);
if (bond->alb_info.rlb_enabled) {
rlb_clear_slave(bond, slave);
}
} else if (link == BOND_LINK_UP) {
/* order a rebalance ASAP */
bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
if (bond->alb_info.rlb_enabled) {
bond->alb_info.rlb_rebalance = 1;
/* If the updelay module parameter is smaller than the
* forwarding delay of the switch the rebalance will
* not work because the rebalance arp replies will
* not be forwarded to the clients..
*/
}
}
}
/**
* bond_alb_handle_active_change - assign new curr_active_slave
* @bond: our bonding struct
* @new_slave: new slave to assign
*
* Set the bond->curr_active_slave to @new_slave and handle
* mac address swapping and promiscuity changes as needed.
*
* If new_slave is NULL, caller must hold curr_slave_lock or
* bond->lock for write.
*
* If new_slave is not NULL, caller must hold RTNL, bond->lock for
* read and curr_slave_lock for write. Processing here may sleep, so
* no other locks may be held.
*/
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
__releases(&bond->curr_slave_lock)
__releases(&bond->lock)
__acquires(&bond->lock)
__acquires(&bond->curr_slave_lock)
{
struct slave *swap_slave;
int i;
if (bond->curr_active_slave == new_slave) {
return;
}
if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) {
dev_set_promiscuity(bond->curr_active_slave->dev, -1);
bond->alb_info.primary_is_promisc = 0;
bond->alb_info.rlb_promisc_timeout_counter = 0;
}
swap_slave = bond->curr_active_slave;
bond->curr_active_slave = new_slave;
if (!new_slave || (bond->slave_cnt == 0)) {
return;
}
/* set the new curr_active_slave to the bonds mac address
* i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
*/
if (!swap_slave) {
struct slave *tmp_slave;
/* find slave that is holding the bond's mac address */
bond_for_each_slave(bond, tmp_slave, i) {
if (!compare_ether_addr_64bits(tmp_slave->dev->dev_addr,
bond->dev->dev_addr)) {
swap_slave = tmp_slave;
break;
}
}
}
/*
* Arrange for swap_slave and new_slave to temporarily be
* ignored so we can mess with their MAC addresses without
* fear of interference from transmit activity.
*/
if (swap_slave) {
tlb_clear_slave(bond, swap_slave, 1);
}
tlb_clear_slave(bond, new_slave, 1);
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
ASSERT_RTNL();
/* curr_active_slave must be set before calling alb_swap_mac_addr */
if (swap_slave) {
/* swap mac address */
alb_swap_mac_addr(bond, swap_slave, new_slave);
} else {
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
bond->alb_info.rlb_enabled);
}
if (swap_slave) {
alb_fasten_mac_swap(bond, swap_slave, new_slave);
read_lock(&bond->lock);
} else {
read_lock(&bond->lock);
alb_send_learning_packets(new_slave, bond->dev->dev_addr);
}
write_lock_bh(&bond->curr_slave_lock);
}
/*
* Called with RTNL
*/
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
__acquires(&bond->lock)
__releases(&bond->lock)
{
struct bonding *bond = netdev_priv(bond_dev);
struct sockaddr *sa = addr;
struct slave *slave, *swap_slave;
int res;
int i;
if (!is_valid_ether_addr(sa->sa_data)) {
return -EADDRNOTAVAIL;
}
res = alb_set_mac_address(bond, addr);
if (res) {
return res;
}
memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
/* If there is no curr_active_slave there is nothing else to do.
* Otherwise we'll need to pass the new address to it and handle
* duplications.
*/
if (!bond->curr_active_slave) {
return 0;
}
swap_slave = NULL;
bond_for_each_slave(bond, slave, i) {
if (!compare_ether_addr_64bits(slave->dev->dev_addr,
bond_dev->dev_addr)) {
swap_slave = slave;
break;
}
}
if (swap_slave) {
alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
} else {
alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
bond->alb_info.rlb_enabled);
read_lock(&bond->lock);
alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
if (bond->alb_info.rlb_enabled) {
/* inform clients mac address has changed */
rlb_req_update_slave_clients(bond, bond->curr_active_slave);
}
read_unlock(&bond->lock);
}
return 0;
}
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{
if (bond->alb_info.current_alb_vlan &&
(bond->alb_info.current_alb_vlan->vlan_id == vlan_id)) {
bond->alb_info.current_alb_vlan = NULL;
}
if (bond->alb_info.rlb_enabled) {
rlb_clear_vlan(bond, vlan_id);
}
}
| gpl-2.0 |
futuretekinc/cortina-kernel-2.6.36 | drivers/watchdog/wm831x_wdt.c | 1261 | 9274 | /*
* Watchdog driver for the wm831x PMICs
*
* Copyright (C) 2009 Wolfson Microelectronics
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
#include <linux/gpio.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
#include <linux/mfd/wm831x/watchdog.h>
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static unsigned long wm831x_wdt_users;
static struct miscdevice wm831x_wdt_miscdev;
static int wm831x_wdt_expect_close;
static DEFINE_MUTEX(wdt_mutex);
static struct wm831x *wm831x;
static unsigned int update_gpio;
static unsigned int update_state;
/* We can't use the sub-second values here but they're included
* for completeness. */
static struct {
int time; /* Seconds */
u16 val; /* WDOG_TO value */
} wm831x_wdt_cfgs[] = {
{ 1, 2 },
{ 2, 3 },
{ 4, 4 },
{ 8, 5 },
{ 16, 6 },
{ 32, 7 },
{ 33, 7 }, /* Actually 32.768s so include both, others round down */
};
static int wm831x_wdt_set_timeout(struct wm831x *wm831x, u16 value)
{
int ret;
mutex_lock(&wdt_mutex);
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG,
WM831X_WDOG_TO_MASK, value);
wm831x_reg_lock(wm831x);
} else {
dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
ret);
}
mutex_unlock(&wdt_mutex);
return ret;
}
static int wm831x_wdt_start(struct wm831x *wm831x)
{
int ret;
mutex_lock(&wdt_mutex);
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG,
WM831X_WDOG_ENA, WM831X_WDOG_ENA);
wm831x_reg_lock(wm831x);
} else {
dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
ret);
}
mutex_unlock(&wdt_mutex);
return ret;
}
static int wm831x_wdt_stop(struct wm831x *wm831x)
{
int ret;
mutex_lock(&wdt_mutex);
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG,
WM831X_WDOG_ENA, 0);
wm831x_reg_lock(wm831x);
} else {
dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
ret);
}
mutex_unlock(&wdt_mutex);
return ret;
}
static int wm831x_wdt_kick(struct wm831x *wm831x)
{
int ret;
u16 reg;
mutex_lock(&wdt_mutex);
if (update_gpio) {
gpio_set_value_cansleep(update_gpio, update_state);
update_state = !update_state;
ret = 0;
goto out;
}
reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
if (!(reg & WM831X_WDOG_RST_SRC)) {
dev_err(wm831x->dev, "Hardware watchdog update unsupported\n");
ret = -EINVAL;
goto out;
}
reg |= WM831X_WDOG_RESET;
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg);
wm831x_reg_lock(wm831x);
} else {
dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
ret);
}
out:
mutex_unlock(&wdt_mutex);
return ret;
}
static int wm831x_wdt_open(struct inode *inode, struct file *file)
{
int ret;
if (!wm831x)
return -ENODEV;
if (test_and_set_bit(0, &wm831x_wdt_users))
return -EBUSY;
ret = wm831x_wdt_start(wm831x);
if (ret != 0)
return ret;
return nonseekable_open(inode, file);
}
static int wm831x_wdt_release(struct inode *inode, struct file *file)
{
if (wm831x_wdt_expect_close)
wm831x_wdt_stop(wm831x);
else {
dev_warn(wm831x->dev, "Watchdog device closed uncleanly\n");
wm831x_wdt_kick(wm831x);
}
clear_bit(0, &wm831x_wdt_users);
return 0;
}
static ssize_t wm831x_wdt_write(struct file *file,
const char __user *data, size_t count,
loff_t *ppos)
{
size_t i;
if (count) {
wm831x_wdt_kick(wm831x);
if (!nowayout) {
/* In case it was set long ago */
wm831x_wdt_expect_close = 0;
/* scan to see whether or not we got the magic
character */
for (i = 0; i != count; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
wm831x_wdt_expect_close = 42;
}
}
}
return count;
}
static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "WM831x Watchdog",
};
static long wm831x_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret = -ENOTTY, time, i;
void __user *argp = (void __user *)arg;
int __user *p = argp;
u16 reg;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
ret = put_user(0, p);
break;
case WDIOC_SETOPTIONS:
{
int options;
if (get_user(options, p))
return -EFAULT;
ret = -EINVAL;
/* Setting both simultaneously means at least one must fail */
if (options == WDIOS_DISABLECARD)
ret = wm831x_wdt_start(wm831x);
if (options == WDIOS_ENABLECARD)
ret = wm831x_wdt_stop(wm831x);
break;
}
case WDIOC_KEEPALIVE:
ret = wm831x_wdt_kick(wm831x);
break;
case WDIOC_SETTIMEOUT:
ret = get_user(time, p);
if (ret)
break;
if (time == 0) {
if (nowayout)
ret = -EINVAL;
else
wm831x_wdt_stop(wm831x);
break;
}
for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
if (wm831x_wdt_cfgs[i].time == time)
break;
if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
ret = -EINVAL;
else
ret = wm831x_wdt_set_timeout(wm831x,
wm831x_wdt_cfgs[i].val);
break;
case WDIOC_GETTIMEOUT:
reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
reg &= WM831X_WDOG_TO_MASK;
for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
if (wm831x_wdt_cfgs[i].val == reg)
break;
if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) {
dev_warn(wm831x->dev,
"Unknown watchdog configuration: %x\n", reg);
ret = -EINVAL;
} else
ret = put_user(wm831x_wdt_cfgs[i].time, p);
}
return ret;
}
static const struct file_operations wm831x_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = wm831x_wdt_write,
.unlocked_ioctl = wm831x_wdt_ioctl,
.open = wm831x_wdt_open,
.release = wm831x_wdt_release,
};
static struct miscdevice wm831x_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wm831x_wdt_fops,
};
static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
{
struct wm831x_pdata *chip_pdata;
struct wm831x_watchdog_pdata *pdata;
int reg, ret;
wm831x = dev_get_drvdata(pdev->dev.parent);
ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
if (ret < 0) {
dev_err(wm831x->dev, "Failed to read watchdog status: %d\n",
ret);
goto err;
}
reg = ret;
if (reg & WM831X_WDOG_DEBUG)
dev_warn(wm831x->dev, "Watchdog is paused\n");
/* Apply any configuration */
if (pdev->dev.parent->platform_data) {
chip_pdata = pdev->dev.parent->platform_data;
pdata = chip_pdata->watchdog;
} else {
pdata = NULL;
}
if (pdata) {
reg &= ~(WM831X_WDOG_SECACT_MASK | WM831X_WDOG_PRIMACT_MASK |
WM831X_WDOG_RST_SRC);
reg |= pdata->primary << WM831X_WDOG_PRIMACT_SHIFT;
reg |= pdata->secondary << WM831X_WDOG_SECACT_SHIFT;
reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT;
if (pdata->update_gpio) {
ret = gpio_request(pdata->update_gpio,
"Watchdog update");
if (ret < 0) {
dev_err(wm831x->dev,
"Failed to request update GPIO: %d\n",
ret);
goto err;
}
ret = gpio_direction_output(pdata->update_gpio, 0);
if (ret != 0) {
dev_err(wm831x->dev,
"gpio_direction_output returned: %d\n",
ret);
goto err_gpio;
}
update_gpio = pdata->update_gpio;
/* Make sure the watchdog takes hardware updates */
reg |= WM831X_WDOG_RST_SRC;
}
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg);
wm831x_reg_lock(wm831x);
} else {
dev_err(wm831x->dev,
"Failed to unlock security key: %d\n", ret);
goto err_gpio;
}
}
wm831x_wdt_miscdev.parent = &pdev->dev;
ret = misc_register(&wm831x_wdt_miscdev);
if (ret != 0) {
dev_err(wm831x->dev, "Failed to register miscdev: %d\n", ret);
goto err_gpio;
}
return 0;
err_gpio:
if (update_gpio) {
gpio_free(update_gpio);
update_gpio = 0;
}
err:
return ret;
}
static int __devexit wm831x_wdt_remove(struct platform_device *pdev)
{
if (update_gpio) {
gpio_free(update_gpio);
update_gpio = 0;
}
misc_deregister(&wm831x_wdt_miscdev);
return 0;
}
static struct platform_driver wm831x_wdt_driver = {
.probe = wm831x_wdt_probe,
.remove = __devexit_p(wm831x_wdt_remove),
.driver = {
.name = "wm831x-watchdog",
},
};
static int __init wm831x_wdt_init(void)
{
return platform_driver_register(&wm831x_wdt_driver);
}
module_init(wm831x_wdt_init);
static void __exit wm831x_wdt_exit(void)
{
platform_driver_unregister(&wm831x_wdt_driver);
}
module_exit(wm831x_wdt_exit);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM831x Watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-watchdog");
| gpl-2.0 |
rachitrawat/Vengeance-Kernel-U8500 | fs/ext4/fsync.c | 2285 | 6977 | /*
* linux/fs/ext4/fsync.c
*
* Copyright (C) 1993 Stephen Tweedie (sct@redhat.com)
* from
* Copyright (C) 1992 Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
* from
* linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds
*
* ext4fs fsync primitive
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
*
* Removed unnecessary code duplication for little endian machines
* and excessive __inline__s.
* Andi Kleen, 1997
*
* Major simplications and cleanup - we only need to do the metadata, because
* we can depend on generic_block_fdatasync() to sync the data blocks.
*/
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/writeback.h>
#include <linux/jbd2.h>
#include <linux/blkdev.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include <trace/events/ext4.h>
static void dump_completed_IO(struct inode * inode)
{
#ifdef EXT4FS_DEBUG
struct list_head *cur, *before, *after;
ext4_io_end_t *io, *io0, *io1;
unsigned long flags;
if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
return;
}
ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
cur = &io->list;
before = cur->prev;
io0 = container_of(before, ext4_io_end_t, list);
after = cur->next;
io1 = container_of(after, ext4_io_end_t, list);
ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
io, inode->i_ino, io0, io1);
}
spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
#endif
}
/*
* This function is called from ext4_sync_file().
*
* When IO is completed, the work to convert unwritten extents to
* written is queued on workqueue but may not get immediately
* scheduled. When fsync is called, we need to ensure the
* conversion is complete before fsync returns.
* The inode keeps track of a list of pending/completed IO that
* might needs to do the conversion. This function walks through
* the list and convert the related unwritten extents for completed IO
* to written.
* The function return the number of pending IOs on success.
*/
extern int ext4_flush_completed_IO(struct inode *inode)
{
ext4_io_end_t *io;
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned long flags;
int ret = 0;
int ret2 = 0;
if (list_empty(&ei->i_completed_io_list))
return ret;
dump_completed_IO(inode);
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
while (!list_empty(&ei->i_completed_io_list)){
io = list_entry(ei->i_completed_io_list.next,
ext4_io_end_t, list);
/*
* Calling ext4_end_io_nolock() to convert completed
* IO to written.
*
* When ext4_sync_file() is called, run_queue() may already
* about to flush the work corresponding to this io structure.
* It will be upset if it founds the io structure related
* to the work-to-be schedule is freed.
*
* Thus we need to keep the io structure still valid here after
* conversion finished. The io structure has a flag to
* avoid double converting from both fsync and background work
* queue work.
*/
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
ret = ext4_end_io_nolock(io);
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
if (ret < 0)
ret2 = ret;
else
list_del_init(&io->list);
}
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
return (ret2 < 0) ? ret2 : 0;
}
/*
* If we're not journaling and this is a just-created file, we have to
* sync our parent directory (if it was freshly created) since
* otherwise it will only be written by writeback, leaving a huge
* window during which a crash may lose the file. This may apply for
* the parent directory's parent as well, and so on recursively, if
* they are also freshly created.
*/
static int ext4_sync_parent(struct inode *inode)
{
struct writeback_control wbc;
struct dentry *dentry = NULL;
int ret = 0;
while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
dentry = list_entry(inode->i_dentry.next,
struct dentry, d_alias);
if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
break;
inode = dentry->d_parent->d_inode;
ret = sync_mapping_buffers(inode->i_mapping);
if (ret)
break;
memset(&wbc, 0, sizeof(wbc));
wbc.sync_mode = WB_SYNC_ALL;
wbc.nr_to_write = 0; /* only write out the inode */
ret = sync_inode(inode, &wbc);
if (ret)
break;
}
return ret;
}
/*
* akpm: A new design for ext4_sync_file().
*
* This is only called from sys_fsync(), sys_fdatasync() and sys_msync().
* There cannot be a transaction open by this task.
* Another task could have dirtied this inode. Its data can be in any
* state in the journalling system.
*
* What we do is just kick off a commit and wait on it. This will snapshot the
* inode to disk.
*
* i_mutex lock is held when entering and exiting this function
*/
int ext4_sync_file(struct file *file, int datasync)
{
struct inode *inode = file->f_mapping->host;
struct ext4_inode_info *ei = EXT4_I(inode);
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
int ret;
tid_t commit_tid;
bool needs_barrier = false;
J_ASSERT(ext4_journal_current_handle() == NULL);
trace_ext4_sync_file_enter(file, datasync);
if (inode->i_sb->s_flags & MS_RDONLY)
return 0;
ret = ext4_flush_completed_IO(inode);
if (ret < 0)
goto out;
if (!journal) {
ret = generic_file_fsync(file, datasync);
if (!ret && !list_empty(&inode->i_dentry))
ret = ext4_sync_parent(inode);
goto out;
}
/*
* data=writeback,ordered:
* The caller's filemap_fdatawrite()/wait will sync the data.
* Metadata is in the journal, we wait for proper transaction to
* commit here.
*
* data=journal:
* filemap_fdatawrite won't do anything (the buffers are clean).
* ext4_force_commit will write the file data into the journal and
* will wait on that.
* filemap_fdatawait() will encounter a ton of newly-dirtied pages
* (they were dirtied by commit). But that's OK - the blocks are
* safe in-journal, which is all fsync() needs to ensure.
*/
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
goto out;
}
commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
if (journal->j_flags & JBD2_BARRIER &&
!jbd2_trans_will_send_data_barrier(journal, commit_tid))
needs_barrier = true;
jbd2_log_start_commit(journal, commit_tid);
ret = jbd2_log_wait_commit(journal, commit_tid);
if (needs_barrier)
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
out:
trace_ext4_sync_file_exit(inode, ret);
return ret;
}
| gpl-2.0 |
CyanogenMod/android_kernel_motorola_omap4-kexec-common | arch/arm/mach-davinci/board-dm365-evm.c | 2285 | 15016 | /*
* TI DaVinci DM365 EVM board support
*
* Copyright (C) 2009 Texas Instruments Incorporated
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/i2c/at24.h>
#include <linux/leds.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
#include <linux/mtd/nand.h>
#include <linux/input.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/mux.h>
#include <mach/dm365.h>
#include <mach/common.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/mmc.h>
#include <mach/nand.h>
#include <mach/keyscan.h>
#include <media/tvp514x.h>
static inline int have_imager(void)
{
/* REVISIT when it's supported, trigger via Kconfig */
return 0;
}
static inline int have_tvp7002(void)
{
/* REVISIT when it's supported, trigger via Kconfig */
return 0;
}
#define DM365_EVM_PHY_ID "0:01"
/*
* A MAX-II CPLD is used for various board control functions.
*/
#define CPLD_OFFSET(a13a8,a2a1) (((a13a8) << 10) + ((a2a1) << 3))
#define CPLD_VERSION CPLD_OFFSET(0,0) /* r/o */
#define CPLD_TEST CPLD_OFFSET(0,1)
#define CPLD_LEDS CPLD_OFFSET(0,2)
#define CPLD_MUX CPLD_OFFSET(0,3)
#define CPLD_SWITCH CPLD_OFFSET(1,0) /* r/o */
#define CPLD_POWER CPLD_OFFSET(1,1)
#define CPLD_VIDEO CPLD_OFFSET(1,2)
#define CPLD_CARDSTAT CPLD_OFFSET(1,3) /* r/o */
#define CPLD_DILC_OUT CPLD_OFFSET(2,0)
#define CPLD_DILC_IN CPLD_OFFSET(2,1) /* r/o */
#define CPLD_IMG_DIR0 CPLD_OFFSET(2,2)
#define CPLD_IMG_MUX0 CPLD_OFFSET(2,3)
#define CPLD_IMG_MUX1 CPLD_OFFSET(3,0)
#define CPLD_IMG_DIR1 CPLD_OFFSET(3,1)
#define CPLD_IMG_MUX2 CPLD_OFFSET(3,2)
#define CPLD_IMG_MUX3 CPLD_OFFSET(3,3)
#define CPLD_IMG_DIR2 CPLD_OFFSET(4,0)
#define CPLD_IMG_MUX4 CPLD_OFFSET(4,1)
#define CPLD_IMG_MUX5 CPLD_OFFSET(4,2)
#define CPLD_RESETS CPLD_OFFSET(4,3)
#define CPLD_CCD_DIR1 CPLD_OFFSET(0x3e,0)
#define CPLD_CCD_IO1 CPLD_OFFSET(0x3e,1)
#define CPLD_CCD_DIR2 CPLD_OFFSET(0x3e,2)
#define CPLD_CCD_IO2 CPLD_OFFSET(0x3e,3)
#define CPLD_CCD_DIR3 CPLD_OFFSET(0x3f,0)
#define CPLD_CCD_IO3 CPLD_OFFSET(0x3f,1)
static void __iomem *cpld;
/* NOTE: this is geared for the standard config, with a socketed
* 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you
* swap chips with a different block size, partitioning will
* need to be changed. This NAND chip MT29F16G08FAA is the default
* NAND shipped with the Spectrum Digital DM365 EVM
*/
#define NAND_BLOCK_SIZE SZ_128K
static struct mtd_partition davinci_nand_partitions[] = {
{
/* UBL (a few copies) plus U-Boot */
.name = "bootloader",
.offset = 0,
.size = 28 * NAND_BLOCK_SIZE,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
/* U-Boot environment */
.name = "params",
.offset = MTDPART_OFS_APPEND,
.size = 2 * NAND_BLOCK_SIZE,
.mask_flags = 0,
}, {
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = SZ_4M,
.mask_flags = 0,
}, {
.name = "filesystem1",
.offset = MTDPART_OFS_APPEND,
.size = SZ_512M,
.mask_flags = 0,
}, {
.name = "filesystem2",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0,
}
/* two blocks with bad block table (and mirror) at the end */
};
static struct davinci_nand_pdata davinci_nand_data = {
.mask_chipsel = BIT(14),
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW,
.options = NAND_USE_FLASH_BBT,
.ecc_bits = 4,
};
static struct resource davinci_nand_resources[] = {
{
.start = DM365_ASYNC_EMIF_DATA_CE0_BASE,
.end = DM365_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
.start = DM365_ASYNC_EMIF_CONTROL_BASE,
.end = DM365_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device davinci_nand_device = {
.name = "davinci_nand",
.id = 0,
.num_resources = ARRAY_SIZE(davinci_nand_resources),
.resource = davinci_nand_resources,
.dev = {
.platform_data = &davinci_nand_data,
},
};
static struct at24_platform_data eeprom_info = {
.byte_len = (256*1024) / 8,
.page_size = 64,
.flags = AT24_FLAG_ADDR16,
.setup = davinci_get_mac_addr,
.context = (void *)0x7f00,
};
static struct snd_platform_data dm365_evm_snd_data = {
.asp_chan_q = EVENTQ_3,
};
static struct i2c_board_info i2c_info[] = {
{
I2C_BOARD_INFO("24c256", 0x50),
.platform_data = &eeprom_info,
},
{
I2C_BOARD_INFO("tlv320aic3x", 0x18),
},
};
static struct davinci_i2c_platform_data i2c_pdata = {
.bus_freq = 400 /* kHz */,
.bus_delay = 0 /* usec */,
};
static int dm365evm_keyscan_enable(struct device *dev)
{
return davinci_cfg_reg(DM365_KEYSCAN);
}
static unsigned short dm365evm_keymap[] = {
KEY_KP2,
KEY_LEFT,
KEY_EXIT,
KEY_DOWN,
KEY_ENTER,
KEY_UP,
KEY_KP1,
KEY_RIGHT,
KEY_MENU,
KEY_RECORD,
KEY_REWIND,
KEY_KPMINUS,
KEY_STOP,
KEY_FASTFORWARD,
KEY_KPPLUS,
KEY_PLAYPAUSE,
0
};
static struct davinci_ks_platform_data dm365evm_ks_data = {
.device_enable = dm365evm_keyscan_enable,
.keymap = dm365evm_keymap,
.keymapsize = ARRAY_SIZE(dm365evm_keymap),
.rep = 1,
/* Scan period = strobe + interval */
.strobe = 0x5,
.interval = 0x2,
.matrix_type = DAVINCI_KEYSCAN_MATRIX_4X4,
};
static int cpld_mmc_get_cd(int module)
{
if (!cpld)
return -ENXIO;
/* low == card present */
return !(__raw_readb(cpld + CPLD_CARDSTAT) & BIT(module ? 4 : 0));
}
static int cpld_mmc_get_ro(int module)
{
if (!cpld)
return -ENXIO;
/* high == card's write protect switch active */
return !!(__raw_readb(cpld + CPLD_CARDSTAT) & BIT(module ? 5 : 1));
}
static struct davinci_mmc_config dm365evm_mmc_config = {
.get_cd = cpld_mmc_get_cd,
.get_ro = cpld_mmc_get_ro,
.wires = 4,
.max_freq = 50000000,
.caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
.version = MMC_CTLR_VERSION_2,
};
static void dm365evm_emac_configure(void)
{
/*
* EMAC pins are multiplexed with GPIO and UART
* Further details are available at the DM365 ARM
* Subsystem Users Guide(sprufg5.pdf) pages 125 - 127
*/
davinci_cfg_reg(DM365_EMAC_TX_EN);
davinci_cfg_reg(DM365_EMAC_TX_CLK);
davinci_cfg_reg(DM365_EMAC_COL);
davinci_cfg_reg(DM365_EMAC_TXD3);
davinci_cfg_reg(DM365_EMAC_TXD2);
davinci_cfg_reg(DM365_EMAC_TXD1);
davinci_cfg_reg(DM365_EMAC_TXD0);
davinci_cfg_reg(DM365_EMAC_RXD3);
davinci_cfg_reg(DM365_EMAC_RXD2);
davinci_cfg_reg(DM365_EMAC_RXD1);
davinci_cfg_reg(DM365_EMAC_RXD0);
davinci_cfg_reg(DM365_EMAC_RX_CLK);
davinci_cfg_reg(DM365_EMAC_RX_DV);
davinci_cfg_reg(DM365_EMAC_RX_ER);
davinci_cfg_reg(DM365_EMAC_CRS);
davinci_cfg_reg(DM365_EMAC_MDIO);
davinci_cfg_reg(DM365_EMAC_MDCLK);
/*
* EMAC interrupts are multiplexed with GPIO interrupts
* Details are available at the DM365 ARM
* Subsystem Users Guide(sprufg5.pdf) pages 133 - 134
*/
davinci_cfg_reg(DM365_INT_EMAC_RXTHRESH);
davinci_cfg_reg(DM365_INT_EMAC_RXPULSE);
davinci_cfg_reg(DM365_INT_EMAC_TXPULSE);
davinci_cfg_reg(DM365_INT_EMAC_MISCPULSE);
}
static void dm365evm_mmc_configure(void)
{
/*
* MMC/SD pins are multiplexed with GPIO and EMIF
* Further details are available at the DM365 ARM
* Subsystem Users Guide(sprufg5.pdf) pages 118, 128 - 131
*/
davinci_cfg_reg(DM365_SD1_CLK);
davinci_cfg_reg(DM365_SD1_CMD);
davinci_cfg_reg(DM365_SD1_DATA3);
davinci_cfg_reg(DM365_SD1_DATA2);
davinci_cfg_reg(DM365_SD1_DATA1);
davinci_cfg_reg(DM365_SD1_DATA0);
}
static struct tvp514x_platform_data tvp5146_pdata = {
.clk_polarity = 0,
.hs_polarity = 1,
.vs_polarity = 1
};
#define TVP514X_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL)
/* Inputs available at the TVP5146 */
static struct v4l2_input tvp5146_inputs[] = {
{
.index = 0,
.name = "Composite",
.type = V4L2_INPUT_TYPE_CAMERA,
.std = TVP514X_STD_ALL,
},
{
.index = 1,
.name = "S-Video",
.type = V4L2_INPUT_TYPE_CAMERA,
.std = TVP514X_STD_ALL,
},
};
/*
* this is the route info for connecting each input to decoder
* ouput that goes to vpfe. There is a one to one correspondence
* with tvp5146_inputs
*/
static struct vpfe_route tvp5146_routes[] = {
{
.input = INPUT_CVBS_VI2B,
.output = OUTPUT_10BIT_422_EMBEDDED_SYNC,
},
{
.input = INPUT_SVIDEO_VI2C_VI1C,
.output = OUTPUT_10BIT_422_EMBEDDED_SYNC,
},
};
static struct vpfe_subdev_info vpfe_sub_devs[] = {
{
.name = "tvp5146",
.grp_id = 0,
.num_inputs = ARRAY_SIZE(tvp5146_inputs),
.inputs = tvp5146_inputs,
.routes = tvp5146_routes,
.can_route = 1,
.ccdc_if_params = {
.if_type = VPFE_BT656,
.hdpol = VPFE_PINPOL_POSITIVE,
.vdpol = VPFE_PINPOL_POSITIVE,
},
.board_info = {
I2C_BOARD_INFO("tvp5146", 0x5d),
.platform_data = &tvp5146_pdata,
},
},
};
static struct vpfe_config vpfe_cfg = {
.num_subdevs = ARRAY_SIZE(vpfe_sub_devs),
.sub_devs = vpfe_sub_devs,
.i2c_adapter_id = 1,
.card_name = "DM365 EVM",
.ccdc = "ISIF",
};
static void __init evm_init_i2c(void)
{
davinci_init_i2c(&i2c_pdata);
i2c_register_board_info(1, i2c_info, ARRAY_SIZE(i2c_info));
}
static struct platform_device *dm365_evm_nand_devices[] __initdata = {
&davinci_nand_device,
};
static inline int have_leds(void)
{
#ifdef CONFIG_LEDS_CLASS
return 1;
#else
return 0;
#endif
}
struct cpld_led {
struct led_classdev cdev;
u8 mask;
};
static const struct {
const char *name;
const char *trigger;
} cpld_leds[] = {
{ "dm365evm::ds2", },
{ "dm365evm::ds3", },
{ "dm365evm::ds4", },
{ "dm365evm::ds5", },
{ "dm365evm::ds6", "nand-disk", },
{ "dm365evm::ds7", "mmc1", },
{ "dm365evm::ds8", "mmc0", },
{ "dm365evm::ds9", "heartbeat", },
};
static void cpld_led_set(struct led_classdev *cdev, enum led_brightness b)
{
struct cpld_led *led = container_of(cdev, struct cpld_led, cdev);
u8 reg = __raw_readb(cpld + CPLD_LEDS);
if (b != LED_OFF)
reg &= ~led->mask;
else
reg |= led->mask;
__raw_writeb(reg, cpld + CPLD_LEDS);
}
static enum led_brightness cpld_led_get(struct led_classdev *cdev)
{
struct cpld_led *led = container_of(cdev, struct cpld_led, cdev);
u8 reg = __raw_readb(cpld + CPLD_LEDS);
return (reg & led->mask) ? LED_OFF : LED_FULL;
}
static int __init cpld_leds_init(void)
{
int i;
if (!have_leds() || !cpld)
return 0;
/* setup LEDs */
__raw_writeb(0xff, cpld + CPLD_LEDS);
for (i = 0; i < ARRAY_SIZE(cpld_leds); i++) {
struct cpld_led *led;
led = kzalloc(sizeof(*led), GFP_KERNEL);
if (!led)
break;
led->cdev.name = cpld_leds[i].name;
led->cdev.brightness_set = cpld_led_set;
led->cdev.brightness_get = cpld_led_get;
led->cdev.default_trigger = cpld_leds[i].trigger;
led->mask = BIT(i);
if (led_classdev_register(NULL, &led->cdev) < 0) {
kfree(led);
break;
}
}
return 0;
}
/* run after subsys_initcall() for LEDs */
fs_initcall(cpld_leds_init);
static void __init evm_init_cpld(void)
{
u8 mux, resets;
const char *label;
struct clk *aemif_clk;
/* Make sure we can configure the CPLD through CS1. Then
* leave it on for later access to MMC and LED registers.
*/
aemif_clk = clk_get(NULL, "aemif");
if (IS_ERR(aemif_clk))
return;
clk_enable(aemif_clk);
if (request_mem_region(DM365_ASYNC_EMIF_DATA_CE1_BASE, SECTION_SIZE,
"cpld") == NULL)
goto fail;
cpld = ioremap(DM365_ASYNC_EMIF_DATA_CE1_BASE, SECTION_SIZE);
if (!cpld) {
release_mem_region(DM365_ASYNC_EMIF_DATA_CE1_BASE,
SECTION_SIZE);
fail:
pr_err("ERROR: can't map CPLD\n");
clk_disable(aemif_clk);
return;
}
/* External muxing for some signals */
mux = 0;
/* Read SW5 to set up NAND + keypad _or_ OneNAND (sync read).
* NOTE: SW4 bus width setting must match!
*/
if ((__raw_readb(cpld + CPLD_SWITCH) & BIT(5)) == 0) {
/* external keypad mux */
mux |= BIT(7);
platform_add_devices(dm365_evm_nand_devices,
ARRAY_SIZE(dm365_evm_nand_devices));
} else {
/* no OneNAND support yet */
}
/* Leave external chips in reset when unused. */
resets = BIT(3) | BIT(2) | BIT(1) | BIT(0);
/* Static video input config with SN74CBT16214 1-of-3 mux:
* - port b1 == tvp7002 (mux lowbits == 1 or 6)
* - port b2 == imager (mux lowbits == 2 or 7)
* - port b3 == tvp5146 (mux lowbits == 5)
*
* Runtime switching could work too, with limitations.
*/
if (have_imager()) {
label = "HD imager";
mux |= 2;
/* externally mux MMC1/ENET/AIC33 to imager */
mux |= BIT(6) | BIT(5) | BIT(3);
} else {
struct davinci_soc_info *soc_info = &davinci_soc_info;
/* we can use MMC1 ... */
dm365evm_mmc_configure();
davinci_setup_mmc(1, &dm365evm_mmc_config);
/* ... and ENET ... */
dm365evm_emac_configure();
soc_info->emac_pdata->phy_id = DM365_EVM_PHY_ID;
resets &= ~BIT(3);
/* ... and AIC33 */
resets &= ~BIT(1);
if (have_tvp7002()) {
mux |= 1;
resets &= ~BIT(2);
label = "tvp7002 HD";
} else {
/* default to tvp5146 */
mux |= 5;
resets &= ~BIT(0);
label = "tvp5146 SD";
}
}
__raw_writeb(mux, cpld + CPLD_MUX);
__raw_writeb(resets, cpld + CPLD_RESETS);
pr_info("EVM: %s video input\n", label);
/* REVISIT export switches: NTSC/PAL (SW5.6), EXTRA1 (SW5.2), etc */
}
static struct davinci_uart_config uart_config __initdata = {
.enabled_uarts = (1 << 0),
};
static void __init dm365_evm_map_io(void)
{
/* setup input configuration for VPFE input devices */
dm365_set_vpfe_config(&vpfe_cfg);
dm365_init();
}
static struct spi_eeprom at25640 = {
.byte_len = SZ_64K / 8,
.name = "at25640",
.page_size = 32,
.flags = EE_ADDR2,
};
static struct spi_board_info dm365_evm_spi_info[] __initconst = {
{
.modalias = "at25",
.platform_data = &at25640,
.max_speed_hz = 10 * 1000 * 1000,
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_0,
},
};
static __init void dm365_evm_init(void)
{
evm_init_i2c();
davinci_serial_init(&uart_config);
dm365evm_emac_configure();
dm365evm_mmc_configure();
davinci_setup_mmc(0, &dm365evm_mmc_config);
/* maybe setup mmc1/etc ... _after_ mmc0 */
evm_init_cpld();
#ifdef CONFIG_SND_DM365_AIC3X_CODEC
dm365_init_asp(&dm365_evm_snd_data);
#elif defined(CONFIG_SND_DM365_VOICE_CODEC)
dm365_init_vc(&dm365_evm_snd_data);
#endif
dm365_init_rtc();
dm365_init_ks(&dm365evm_ks_data);
dm365_init_spi0(BIT(0), dm365_evm_spi_info,
ARRAY_SIZE(dm365_evm_spi_info));
}
MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
.boot_params = (0x80000100),
.map_io = dm365_evm_map_io,
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm365_evm_init,
MACHINE_END
| gpl-2.0 |
nikez/android_kernel_htc_msm8660 | arch/x86/xen/setup.c | 3053 | 11373 | /*
* Machine specific setup for xen
*
* Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/memblock.h>
#include <linux/cpuidle.h>
#include <linux/cpufreq.h>
#include <asm/elf.h>
#include <asm/vdso.h>
#include <asm/e820.h>
#include <asm/setup.h>
#include <asm/acpi.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/callback.h>
#include <xen/interface/memory.h>
#include <xen/interface/physdev.h>
#include <xen/features.h>
#include "xen-ops.h"
#include "vdso.h"
/* These are code, but not functions. Defined in entry.S */
extern const char xen_hypervisor_callback[];
extern const char xen_failsafe_callback[];
extern void xen_sysenter_target(void);
extern void xen_syscall_target(void);
extern void xen_syscall32_target(void);
/* Amount of extra memory space we add to the e820 ranges */
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;
/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define EXTRA_MEM_RATIO (10)
static void __init xen_add_extra_mem(u64 start, u64 size)
{
unsigned long pfn;
int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
/* Add new region. */
if (xen_extra_mem[i].size == 0) {
xen_extra_mem[i].start = start;
xen_extra_mem[i].size = size;
break;
}
/* Append to existing region. */
if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
xen_extra_mem[i].size += size;
break;
}
}
if (i == XEN_EXTRA_MEM_MAX_REGIONS)
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
memblock_reserve(start, size);
xen_max_p2m_pfn = PFN_DOWN(start + size);
for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
static unsigned long __init xen_release_chunk(unsigned long start,
unsigned long end)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
.domid = DOMID_SELF
};
unsigned long len = 0;
unsigned long pfn;
int ret;
for(pfn = start; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
/* Make sure pfn exists to start with */
if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
continue;
set_xen_guest_handle(reservation.extent_start, &mfn);
reservation.nr_extents = 1;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
&reservation);
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) {
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
len++;
}
}
printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
start, end, len);
return len;
}
static unsigned long __init xen_set_identity_and_release(
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
phys_addr_t start = 0;
unsigned long released = 0;
unsigned long identity = 0;
const struct e820entry *entry;
int i;
/*
* Combine non-RAM regions and gaps until a RAM region (or the
* end of the map) is reached, then set the 1:1 map and
* release the pages (if available) in those non-RAM regions.
*
* The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1
* mapping. This is needed for some BIOSes that put (for
* example) the DMI tables in a reserved region that begins on
* a non-page boundary.
*/
for (i = 0, entry = list; i < map_size; i++, entry++) {
phys_addr_t end = entry->addr + entry->size;
if (entry->type == E820_RAM || i == map_size - 1) {
unsigned long start_pfn = PFN_DOWN(start);
unsigned long end_pfn = PFN_UP(end);
if (entry->type == E820_RAM)
end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) {
if (start_pfn < nr_pages)
released += xen_release_chunk(
start_pfn, min(end_pfn, nr_pages));
identity += set_phys_range_identity(
start_pfn, end_pfn);
}
start = end;
}
}
printk(KERN_INFO "Released %lu pages of unused memory\n", released);
printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
return released;
}
static unsigned long __init xen_get_max_pages(void)
{
unsigned long max_pages = MAX_DOMAIN_PAGES;
domid_t domid = DOMID_SELF;
int ret;
/*
* For the initial domain we use the maximum reservation as
* the maximum page.
*
* For guest domains the current maximum reservation reflects
* the current maximum rather than the static maximum. In this
* case the e820 map provided to us will cover the static
* maximum region.
*/
if (xen_initial_domain()) {
ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
if (ret > 0)
max_pages = ret;
}
return min(max_pages, MAX_DOMAIN_PAGES);
}
static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
{
u64 end = start + size;
/* Align RAM regions to page boundaries. */
if (type == E820_RAM) {
start = PAGE_ALIGN(start);
end &= ~((u64)PAGE_SIZE - 1);
}
e820_add_region(start, end - start, type);
}
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
char * __init xen_memory_setup(void)
{
static struct e820entry map[E820MAX] __initdata;
unsigned long max_pfn = xen_start_info->nr_pages;
unsigned long long mem_end;
int rc;
struct xen_memory_map memmap;
unsigned long max_pages;
unsigned long extra_pages = 0;
int i;
int op;
max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
mem_end = PFN_PHYS(max_pfn);
memmap.nr_entries = E820MAX;
set_xen_guest_handle(memmap.buffer, map);
op = xen_initial_domain() ?
XENMEM_machine_memory_map :
XENMEM_memory_map;
rc = HYPERVISOR_memory_op(op, &memmap);
if (rc == -ENOSYS) {
BUG_ON(xen_initial_domain());
memmap.nr_entries = 1;
map[0].addr = 0ULL;
map[0].size = mem_end;
/* 8MB slack (to balance backend allocations). */
map[0].size += 8ULL << 20;
map[0].type = E820_RAM;
rc = 0;
}
BUG_ON(rc);
/* Make sure the Xen-supplied memory map is well-ordered. */
sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
max_pages = xen_get_max_pages();
if (max_pages > max_pfn)
extra_pages += max_pages - max_pfn;
/*
* Set P2M for all non-RAM pages and E820 gaps to be identity
* type PFNs. Any RAM pages that would be made inaccesible by
* this are first released.
*/
xen_released_pages = xen_set_identity_and_release(
map, memmap.nr_entries, max_pfn);
extra_pages += xen_released_pages;
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size. On non-highmem systems, the base
* size is the full initial memory allocation; on highmem it
* is limited to the max size of lowmem, so that it doesn't
* get completely filled.
*
* In principle there could be a problem in lowmem systems if
* the initial memory is also very large with respect to
* lowmem, but we won't try to deal with that here.
*/
extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages);
i = 0;
while (i < memmap.nr_entries) {
u64 addr = map[i].addr;
u64 size = map[i].size;
u32 type = map[i].type;
if (type == E820_RAM) {
if (addr < mem_end) {
size = min(size, mem_end - addr);
} else if (extra_pages) {
size = min(size, (u64)extra_pages * PAGE_SIZE);
extra_pages -= size / PAGE_SIZE;
xen_add_extra_mem(addr, size);
} else
type = E820_UNUSABLE;
}
xen_align_and_add_e820_region(addr, size, type);
map[i].addr += size;
map[i].size -= size;
if (map[i].size == 0)
i++;
}
/*
* In domU, the ISA region is normal, usable memory, but we
* reserve ISA memory anyway because too many things poke
* about in there.
*/
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);
/*
* Reserve Xen bits:
* - mfn_list
* - xen_start_info
* See comment above "struct start_info" in <xen/interface/xen.h>
*/
memblock_reserve(__pa(xen_start_info->mfn_list),
xen_start_info->pt_base - xen_start_info->mfn_list);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
return "Xen";
}
/*
* Set the bit indicating "nosegneg" library variants should be used.
* We only need to bother in pure 32-bit mode; compat 32-bit processes
* can have un-truncated segments, so wrapping around is allowed.
*/
static void __init fiddle_vdso(void)
{
#ifdef CONFIG_X86_32
u32 *mask;
mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
#endif
}
static int __cpuinit register_callback(unsigned type, const void *func)
{
struct callback_register callback = {
.type = type,
.address = XEN_CALLBACK(__KERNEL_CS, func),
.flags = CALLBACKF_mask_events,
};
return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
}
void __cpuinit xen_enable_sysenter(void)
{
int ret;
unsigned sysenter_feature;
#ifdef CONFIG_X86_32
sysenter_feature = X86_FEATURE_SEP;
#else
sysenter_feature = X86_FEATURE_SYSENTER32;
#endif
if (!boot_cpu_has(sysenter_feature))
return;
ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
if(ret != 0)
setup_clear_cpu_cap(sysenter_feature);
}
void __cpuinit xen_enable_syscall(void)
{
#ifdef CONFIG_X86_64
int ret;
ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
if (ret != 0) {
printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
/* Pretty fatal; 64-bit userspace has no other
mechanism for syscalls. */
}
if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
ret = register_callback(CALLBACKTYPE_syscall32,
xen_syscall32_target);
if (ret != 0)
setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
}
#endif /* CONFIG_X86_64 */
}
void __init xen_arch_setup(void)
{
xen_panic_handler_init();
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
if (!xen_feature(XENFEAT_auto_translated_physmap))
HYPERVISOR_vm_assist(VMASST_CMD_enable,
VMASST_TYPE_pae_extended_cr3);
if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
BUG();
xen_enable_sysenter();
xen_enable_syscall();
#ifdef CONFIG_ACPI
if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
disable_acpi();
}
#endif
memcpy(boot_command_line, xen_start_info->cmd_line,
MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
/* Set up idle, making sure it calls safe_halt() pvop */
#ifdef CONFIG_X86_32
boot_cpu_data.hlt_works_ok = 1;
#endif
disable_cpuidle();
disable_cpufreq();
WARN_ON(set_pm_idle_to_default());
fiddle_vdso();
}
| gpl-2.0 |
The-Sickness/G920T-MM | drivers/media/pci/ivtv/ivtvfb.c | 3565 | 37085 | /*
On Screen Display cx23415 Framebuffer driver
This module presents the cx23415 OSD (onscreen display) framebuffer memory
as a standard Linux /dev/fb style framebuffer device. The framebuffer has
support for 8, 16 & 32 bpp packed pixel formats with alpha channel. In 16bpp
mode, there is a choice of a three color depths (12, 15 or 16 bits), but no
local alpha. The colorspace is selectable between rgb & yuv.
Depending on the TV standard configured in the ivtv module at load time,
the initial resolution is either 640x400 (NTSC) or 640x480 (PAL) at 8bpp.
Video timings are locked to ensure a vertical refresh rate of 50Hz (PAL)
or 59.94 (NTSC)
Copyright (c) 2003 Matt T. Yourst <yourst@yourst.com>
Derived from drivers/video/vesafb.c
Portions (c) 1998 Gerd Knorr <kraxel@goldbach.in-berlin.de>
2.6 kernel port:
Copyright (C) 2004 Matthias Badaire
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
Copyright (C) 2006 Ian Armstrong <ian@iarmst.demon.co.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/ivtvfb.h>
#include <linux/slab.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include "ivtv-driver.h"
#include "ivtv-cards.h"
#include "ivtv-i2c.h"
#include "ivtv-udma.h"
#include "ivtv-mailbox.h"
#include "ivtv-firmware.h"
/* card parameters */
static int ivtvfb_card_id = -1;
static int ivtvfb_debug = 0;
static bool osd_laced;
static int osd_depth;
static int osd_upper;
static int osd_left;
static int osd_yres;
static int osd_xres;
module_param(ivtvfb_card_id, int, 0444);
module_param_named(debug,ivtvfb_debug, int, 0644);
module_param(osd_laced, bool, 0444);
module_param(osd_depth, int, 0444);
module_param(osd_upper, int, 0444);
module_param(osd_left, int, 0444);
module_param(osd_yres, int, 0444);
module_param(osd_xres, int, 0444);
MODULE_PARM_DESC(ivtvfb_card_id,
"Only use framebuffer of the specified ivtv card (0-31)\n"
"\t\t\tdefault -1: initialize all available framebuffers");
MODULE_PARM_DESC(debug,
"Debug level (bitmask). Default: errors only\n"
"\t\t\t(debug = 3 gives full debugging)");
/* Why upper, left, xres, yres, depth, laced ? To match terminology used
by fbset.
Why start at 1 for left & upper coordinate ? Because X doesn't allow 0 */
MODULE_PARM_DESC(osd_laced,
"Interlaced mode\n"
"\t\t\t0=off\n"
"\t\t\t1=on\n"
"\t\t\tdefault off");
MODULE_PARM_DESC(osd_depth,
"Bits per pixel - 8, 16, 32\n"
"\t\t\tdefault 8");
MODULE_PARM_DESC(osd_upper,
"Vertical start position\n"
"\t\t\tdefault 0 (Centered)");
MODULE_PARM_DESC(osd_left,
"Horizontal start position\n"
"\t\t\tdefault 0 (Centered)");
MODULE_PARM_DESC(osd_yres,
"Display height\n"
"\t\t\tdefault 480 (PAL)\n"
"\t\t\t 400 (NTSC)");
MODULE_PARM_DESC(osd_xres,
"Display width\n"
"\t\t\tdefault 640");
MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil, John Harvey, Ian Armstrong");
MODULE_LICENSE("GPL");
/* --------------------------------------------------------------------- */
#define IVTVFB_DBGFLG_WARN (1 << 0)
#define IVTVFB_DBGFLG_INFO (1 << 1)
#define IVTVFB_DEBUG(x, type, fmt, args...) \
do { \
if ((x) & ivtvfb_debug) \
printk(KERN_INFO "ivtvfb%d " type ": " fmt, itv->instance , ## args); \
} while (0)
#define IVTVFB_DEBUG_WARN(fmt, args...) IVTVFB_DEBUG(IVTVFB_DBGFLG_WARN, "warning", fmt , ## args)
#define IVTVFB_DEBUG_INFO(fmt, args...) IVTVFB_DEBUG(IVTVFB_DBGFLG_INFO, "info", fmt , ## args)
/* Standard kernel messages */
#define IVTVFB_ERR(fmt, args...) printk(KERN_ERR "ivtvfb%d: " fmt, itv->instance , ## args)
#define IVTVFB_WARN(fmt, args...) printk(KERN_WARNING "ivtvfb%d: " fmt, itv->instance , ## args)
#define IVTVFB_INFO(fmt, args...) printk(KERN_INFO "ivtvfb%d: " fmt, itv->instance , ## args)
/* --------------------------------------------------------------------- */
#define IVTV_OSD_MAX_WIDTH 720
#define IVTV_OSD_MAX_HEIGHT 576
#define IVTV_OSD_BPP_8 0x00
#define IVTV_OSD_BPP_16_444 0x03
#define IVTV_OSD_BPP_16_555 0x02
#define IVTV_OSD_BPP_16_565 0x01
#define IVTV_OSD_BPP_32 0x04
struct osd_info {
/* Physical base address */
unsigned long video_pbase;
/* Relative base address (relative to start of decoder memory) */
u32 video_rbase;
/* Mapped base address */
volatile char __iomem *video_vbase;
/* Buffer size */
u32 video_buffer_size;
#ifdef CONFIG_MTRR
/* video_base rounded down as required by hardware MTRRs */
unsigned long fb_start_aligned_physaddr;
/* video_base rounded up as required by hardware MTRRs */
unsigned long fb_end_aligned_physaddr;
#endif
/* Store the buffer offset */
int set_osd_coords_x;
int set_osd_coords_y;
/* Current dimensions (NOT VISIBLE SIZE!) */
int display_width;
int display_height;
int display_byte_stride;
/* Current bits per pixel */
int bits_per_pixel;
int bytes_per_pixel;
/* Frame buffer stuff */
struct fb_info ivtvfb_info;
struct fb_var_screeninfo ivtvfb_defined;
struct fb_fix_screeninfo ivtvfb_fix;
/* Used for a warm start */
struct fb_var_screeninfo fbvar_cur;
int blank_cur;
u32 palette_cur[256];
u32 pan_cur;
};
struct ivtv_osd_coords {
unsigned long offset;
unsigned long max_offset;
int pixel_stride;
int lines;
int x;
int y;
};
/* --------------------------------------------------------------------- */
/* ivtv API calls for framebuffer related support */
static int ivtvfb_get_framebuffer(struct ivtv *itv, u32 *fbbase,
u32 *fblength)
{
u32 data[CX2341X_MBOX_MAX_DATA];
int rc;
ivtv_firmware_check(itv, "ivtvfb_get_framebuffer");
rc = ivtv_vapi_result(itv, data, CX2341X_OSD_GET_FRAMEBUFFER, 0);
*fbbase = data[0];
*fblength = data[1];
return rc;
}
static int ivtvfb_get_osd_coords(struct ivtv *itv,
struct ivtv_osd_coords *osd)
{
struct osd_info *oi = itv->osd_info;
u32 data[CX2341X_MBOX_MAX_DATA];
ivtv_vapi_result(itv, data, CX2341X_OSD_GET_OSD_COORDS, 0);
osd->offset = data[0] - oi->video_rbase;
osd->max_offset = oi->display_width * oi->display_height * 4;
osd->pixel_stride = data[1];
osd->lines = data[2];
osd->x = data[3];
osd->y = data[4];
return 0;
}
static int ivtvfb_set_osd_coords(struct ivtv *itv, const struct ivtv_osd_coords *osd)
{
struct osd_info *oi = itv->osd_info;
oi->display_width = osd->pixel_stride;
oi->display_byte_stride = osd->pixel_stride * oi->bytes_per_pixel;
oi->set_osd_coords_x += osd->x;
oi->set_osd_coords_y = osd->y;
return ivtv_vapi(itv, CX2341X_OSD_SET_OSD_COORDS, 5,
osd->offset + oi->video_rbase,
osd->pixel_stride,
osd->lines, osd->x, osd->y);
}
static int ivtvfb_set_display_window(struct ivtv *itv, struct v4l2_rect *ivtv_window)
{
int osd_height_limit = itv->is_out_50hz ? 576 : 480;
/* Only fail if resolution too high, otherwise fudge the start coords. */
if ((ivtv_window->height > osd_height_limit) || (ivtv_window->width > IVTV_OSD_MAX_WIDTH))
return -EINVAL;
/* Ensure we don't exceed display limits */
if (ivtv_window->top + ivtv_window->height > osd_height_limit) {
IVTVFB_DEBUG_WARN("ivtv_ioctl_fb_set_display_window - Invalid height setting (%d, %d)\n",
ivtv_window->top, ivtv_window->height);
ivtv_window->top = osd_height_limit - ivtv_window->height;
}
if (ivtv_window->left + ivtv_window->width > IVTV_OSD_MAX_WIDTH) {
IVTVFB_DEBUG_WARN("ivtv_ioctl_fb_set_display_window - Invalid width setting (%d, %d)\n",
ivtv_window->left, ivtv_window->width);
ivtv_window->left = IVTV_OSD_MAX_WIDTH - ivtv_window->width;
}
/* Set the OSD origin */
write_reg((ivtv_window->top << 16) | ivtv_window->left, 0x02a04);
/* How much to display */
write_reg(((ivtv_window->top+ivtv_window->height) << 16) | (ivtv_window->left+ivtv_window->width), 0x02a08);
/* Pass this info back the yuv handler */
itv->yuv_info.osd_vis_w = ivtv_window->width;
itv->yuv_info.osd_vis_h = ivtv_window->height;
itv->yuv_info.osd_x_offset = ivtv_window->left;
itv->yuv_info.osd_y_offset = ivtv_window->top;
return 0;
}
static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv,
unsigned long ivtv_dest_addr, void __user *userbuf,
int size_in_bytes)
{
DEFINE_WAIT(wait);
int got_sig = 0;
mutex_lock(&itv->udma.lock);
/* Map User DMA */
if (ivtv_udma_setup(itv, ivtv_dest_addr, userbuf, size_in_bytes) <= 0) {
mutex_unlock(&itv->udma.lock);
IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, "
"Error with get_user_pages: %d bytes, %d pages returned\n",
size_in_bytes, itv->udma.page_count);
/* get_user_pages must have failed completely */
return -EIO;
}
IVTVFB_DEBUG_INFO("ivtvfb_prep_dec_dma_to_device, %d bytes, %d pages\n",
size_in_bytes, itv->udma.page_count);
ivtv_udma_prepare(itv);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
/* if no UDMA is pending and no UDMA is in progress, then the DMA
is finished */
while (test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags) ||
test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
/* don't interrupt if the DMA is in progress but break off
a still pending DMA. */
got_sig = signal_pending(current);
if (got_sig && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
break;
got_sig = 0;
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
/* Unmap Last DMA Xfer */
ivtv_udma_unmap(itv);
mutex_unlock(&itv->udma.lock);
if (got_sig) {
IVTV_DEBUG_INFO("User stopped OSD\n");
return -EINTR;
}
return 0;
}
static int ivtvfb_prep_frame(struct ivtv *itv, int cmd, void __user *source,
unsigned long dest_offset, int count)
{
DEFINE_WAIT(wait);
struct osd_info *oi = itv->osd_info;
/* Nothing to do */
if (count == 0) {
IVTVFB_DEBUG_WARN("ivtvfb_prep_frame: Nothing to do. count = 0\n");
return -EINVAL;
}
/* Check Total FB Size */
if ((dest_offset + count) > oi->video_buffer_size) {
IVTVFB_WARN("ivtvfb_prep_frame: Overflowing the framebuffer %ld, only %d available\n",
dest_offset + count, oi->video_buffer_size);
return -E2BIG;
}
/* Not fatal, but will have undesirable results */
if ((unsigned long)source & 3)
IVTVFB_WARN("ivtvfb_prep_frame: Source address not 32 bit aligned (0x%08lx)\n",
(unsigned long)source);
if (dest_offset & 3)
IVTVFB_WARN("ivtvfb_prep_frame: Dest offset not 32 bit aligned (%ld)\n", dest_offset);
if (count & 3)
IVTVFB_WARN("ivtvfb_prep_frame: Count not a multiple of 4 (%d)\n", count);
/* Check Source */
if (!access_ok(VERIFY_READ, source + dest_offset, count)) {
IVTVFB_WARN("Invalid userspace pointer 0x%08lx\n",
(unsigned long)source);
IVTVFB_DEBUG_WARN("access_ok() failed for offset 0x%08lx source 0x%08lx count %d\n",
dest_offset, (unsigned long)source,
count);
return -EINVAL;
}
/* OSD Address to send DMA to */
dest_offset += IVTV_DECODER_OFFSET + oi->video_rbase;
/* Fill Buffers */
return ivtvfb_prep_dec_dma_to_device(itv, dest_offset, source, count);
}
static ssize_t ivtvfb_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
void *dst;
int err = 0;
int dma_err;
unsigned long total_size;
struct ivtv *itv = (struct ivtv *) info->par;
unsigned long dma_offset =
IVTV_DECODER_OFFSET + itv->osd_info->video_rbase;
unsigned long dma_size;
u16 lead = 0, tail = 0;
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
total_size = info->screen_size;
if (total_size == 0)
total_size = info->fix.smem_len;
if (p > total_size)
return -EFBIG;
if (count > total_size) {
err = -EFBIG;
count = total_size;
}
if (count + p > total_size) {
if (!err)
err = -ENOSPC;
count = total_size - p;
}
dst = (void __force *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
/* If transfer size > threshold and both src/dst
addresses are aligned, use DMA */
if (count >= 4096 &&
((unsigned long)buf & 3) == ((unsigned long)dst & 3)) {
/* Odd address = can't DMA. Align */
if ((unsigned long)dst & 3) {
lead = 4 - ((unsigned long)dst & 3);
if (copy_from_user(dst, buf, lead))
return -EFAULT;
buf += lead;
dst += lead;
}
/* DMA resolution is 32 bits */
if ((count - lead) & 3)
tail = (count - lead) & 3;
/* DMA the data */
dma_size = count - lead - tail;
dma_err = ivtvfb_prep_dec_dma_to_device(itv,
p + lead + dma_offset, (void __user *)buf, dma_size);
if (dma_err)
return dma_err;
dst += dma_size;
buf += dma_size;
/* Copy any leftover data */
if (tail && copy_from_user(dst, buf, tail))
return -EFAULT;
} else if (copy_from_user(dst, buf, count)) {
return -EFAULT;
}
if (!err)
*ppos += count;
return (err) ? err : count;
}
static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
DEFINE_WAIT(wait);
struct ivtv *itv = (struct ivtv *)info->par;
int rc = 0;
switch (cmd) {
case FBIOGET_VBLANK: {
struct fb_vblank vblank;
u32 trace;
memset(&vblank, 0, sizeof(struct fb_vblank));
vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_VSYNC;
trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
if (itv->is_out_50hz && trace > 312)
trace -= 312;
else if (itv->is_out_60hz && trace > 262)
trace -= 262;
if (trace == 1)
vblank.flags |= FB_VBLANK_VSYNCING;
vblank.count = itv->last_vsync_field;
vblank.vcount = trace;
vblank.hcount = 0;
if (copy_to_user((void __user *)arg, &vblank, sizeof(vblank)))
return -EFAULT;
return 0;
}
case FBIO_WAITFORVSYNC:
prepare_to_wait(&itv->vsync_waitq, &wait, TASK_INTERRUPTIBLE);
if (!schedule_timeout(msecs_to_jiffies(50)))
rc = -ETIMEDOUT;
finish_wait(&itv->vsync_waitq, &wait);
return rc;
case IVTVFB_IOC_DMA_FRAME: {
struct ivtvfb_dma_frame args;
IVTVFB_DEBUG_INFO("IVTVFB_IOC_DMA_FRAME\n");
if (copy_from_user(&args, (void __user *)arg, sizeof(args)))
return -EFAULT;
return ivtvfb_prep_frame(itv, cmd, args.source, args.dest_offset, args.count);
}
default:
IVTVFB_DEBUG_INFO("Unknown ioctl %08x\n", cmd);
return -EINVAL;
}
return 0;
}
/* Framebuffer device handling */
static int ivtvfb_set_var(struct ivtv *itv, struct fb_var_screeninfo *var)
{
struct osd_info *oi = itv->osd_info;
struct ivtv_osd_coords ivtv_osd;
struct v4l2_rect ivtv_window;
int osd_mode = -1;
IVTVFB_DEBUG_INFO("ivtvfb_set_var\n");
/* Select color space */
if (var->nonstd) /* YUV */
write_reg(read_reg(0x02a00) | 0x0002000, 0x02a00);
else /* RGB */
write_reg(read_reg(0x02a00) & ~0x0002000, 0x02a00);
/* Set the color mode */
switch (var->bits_per_pixel) {
case 8:
osd_mode = IVTV_OSD_BPP_8;
break;
case 32:
osd_mode = IVTV_OSD_BPP_32;
break;
case 16:
switch (var->green.length) {
case 4:
osd_mode = IVTV_OSD_BPP_16_444;
break;
case 5:
osd_mode = IVTV_OSD_BPP_16_555;
break;
case 6:
osd_mode = IVTV_OSD_BPP_16_565;
break;
default:
IVTVFB_DEBUG_WARN("ivtvfb_set_var - Invalid bpp\n");
}
break;
default:
IVTVFB_DEBUG_WARN("ivtvfb_set_var - Invalid bpp\n");
}
/* Set video mode. Although rare, the display can become scrambled even
if we don't change mode. Always 'bounce' to osd_mode via mode 0 */
if (osd_mode != -1) {
ivtv_vapi(itv, CX2341X_OSD_SET_PIXEL_FORMAT, 1, 0);
ivtv_vapi(itv, CX2341X_OSD_SET_PIXEL_FORMAT, 1, osd_mode);
}
oi->bits_per_pixel = var->bits_per_pixel;
oi->bytes_per_pixel = var->bits_per_pixel / 8;
/* Set the flicker filter */
switch (var->vmode & FB_VMODE_MASK) {
case FB_VMODE_NONINTERLACED: /* Filter on */
ivtv_vapi(itv, CX2341X_OSD_SET_FLICKER_STATE, 1, 1);
break;
case FB_VMODE_INTERLACED: /* Filter off */
ivtv_vapi(itv, CX2341X_OSD_SET_FLICKER_STATE, 1, 0);
break;
default:
IVTVFB_DEBUG_WARN("ivtvfb_set_var - Invalid video mode\n");
}
/* Read the current osd info */
ivtvfb_get_osd_coords(itv, &ivtv_osd);
/* Now set the OSD to the size we want */
ivtv_osd.pixel_stride = var->xres_virtual;
ivtv_osd.lines = var->yres_virtual;
ivtv_osd.x = 0;
ivtv_osd.y = 0;
ivtvfb_set_osd_coords(itv, &ivtv_osd);
/* Can't seem to find the right API combo for this.
Use another function which does what we need through direct register access. */
ivtv_window.width = var->xres;
ivtv_window.height = var->yres;
/* Minimum margin cannot be 0, as X won't allow such a mode */
if (!var->upper_margin)
var->upper_margin++;
if (!var->left_margin)
var->left_margin++;
ivtv_window.top = var->upper_margin - 1;
ivtv_window.left = var->left_margin - 1;
ivtvfb_set_display_window(itv, &ivtv_window);
/* Pass screen size back to yuv handler */
itv->yuv_info.osd_full_w = ivtv_osd.pixel_stride;
itv->yuv_info.osd_full_h = ivtv_osd.lines;
/* Force update of yuv registers */
itv->yuv_info.yuv_forced_update = 1;
/* Keep a copy of these settings */
memcpy(&oi->fbvar_cur, var, sizeof(oi->fbvar_cur));
IVTVFB_DEBUG_INFO("Display size: %dx%d (virtual %dx%d) @ %dbpp\n",
var->xres, var->yres,
var->xres_virtual, var->yres_virtual,
var->bits_per_pixel);
IVTVFB_DEBUG_INFO("Display position: %d, %d\n",
var->left_margin, var->upper_margin);
IVTVFB_DEBUG_INFO("Display filter: %s\n",
(var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED ? "on" : "off");
IVTVFB_DEBUG_INFO("Color space: %s\n", var->nonstd ? "YUV" : "RGB");
return 0;
}
static int ivtvfb_get_fix(struct ivtv *itv, struct fb_fix_screeninfo *fix)
{
struct osd_info *oi = itv->osd_info;
IVTVFB_DEBUG_INFO("ivtvfb_get_fix\n");
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strlcpy(fix->id, "cx23415 TV out", sizeof(fix->id));
fix->smem_start = oi->video_pbase;
fix->smem_len = oi->video_buffer_size;
fix->type = FB_TYPE_PACKED_PIXELS;
fix->visual = (oi->bits_per_pixel == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
fix->xpanstep = 1;
fix->ypanstep = 1;
fix->ywrapstep = 0;
fix->line_length = oi->display_byte_stride;
fix->accel = FB_ACCEL_NONE;
return 0;
}
/* Check the requested display mode, returning -EINVAL if we can't
handle it. */
static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
{
struct osd_info *oi = itv->osd_info;
int osd_height_limit;
u32 pixclock, hlimit, vlimit;
IVTVFB_DEBUG_INFO("ivtvfb_check_var\n");
/* Set base references for mode calcs. */
if (itv->is_out_50hz) {
pixclock = 84316;
hlimit = 776;
vlimit = 591;
osd_height_limit = 576;
}
else {
pixclock = 83926;
hlimit = 776;
vlimit = 495;
osd_height_limit = 480;
}
if (var->bits_per_pixel == 8 || var->bits_per_pixel == 32) {
var->transp.offset = 24;
var->transp.length = 8;
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
}
else if (var->bits_per_pixel == 16) {
/* To find out the true mode, check green length */
switch (var->green.length) {
case 4:
var->red.offset = 8;
var->red.length = 4;
var->green.offset = 4;
var->green.length = 4;
var->blue.offset = 0;
var->blue.length = 4;
var->transp.offset = 12;
var->transp.length = 1;
break;
case 5:
var->red.offset = 10;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 5;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 15;
var->transp.length = 1;
break;
default:
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
break;
}
}
else {
IVTVFB_DEBUG_WARN("Invalid colour mode: %d\n", var->bits_per_pixel);
return -EINVAL;
}
/* Check the resolution */
if (var->xres > IVTV_OSD_MAX_WIDTH || var->yres > osd_height_limit) {
IVTVFB_DEBUG_WARN("Invalid resolution: %dx%d\n",
var->xres, var->yres);
return -EINVAL;
}
/* Max horizontal size is 1023 @ 32bpp, 2046 & 16bpp, 4092 @ 8bpp */
if (var->xres_virtual > 4095 / (var->bits_per_pixel / 8) ||
var->xres_virtual * var->yres_virtual * (var->bits_per_pixel / 8) > oi->video_buffer_size ||
var->xres_virtual < var->xres ||
var->yres_virtual < var->yres) {
IVTVFB_DEBUG_WARN("Invalid virtual resolution: %dx%d\n",
var->xres_virtual, var->yres_virtual);
return -EINVAL;
}
/* Some extra checks if in 8 bit mode */
if (var->bits_per_pixel == 8) {
/* Width must be a multiple of 4 */
if (var->xres & 3) {
IVTVFB_DEBUG_WARN("Invalid resolution for 8bpp: %d\n", var->xres);
return -EINVAL;
}
if (var->xres_virtual & 3) {
IVTVFB_DEBUG_WARN("Invalid virtual resolution for 8bpp: %d)\n", var->xres_virtual);
return -EINVAL;
}
}
else if (var->bits_per_pixel == 16) {
/* Width must be a multiple of 2 */
if (var->xres & 1) {
IVTVFB_DEBUG_WARN("Invalid resolution for 16bpp: %d\n", var->xres);
return -EINVAL;
}
if (var->xres_virtual & 1) {
IVTVFB_DEBUG_WARN("Invalid virtual resolution for 16bpp: %d)\n", var->xres_virtual);
return -EINVAL;
}
}
/* Now check the offsets */
if (var->xoffset >= var->xres_virtual || var->yoffset >= var->yres_virtual) {
IVTVFB_DEBUG_WARN("Invalid offset: %d (%d) %d (%d)\n",
var->xoffset, var->xres_virtual, var->yoffset, var->yres_virtual);
return -EINVAL;
}
/* Check pixel format */
if (var->nonstd > 1) {
IVTVFB_DEBUG_WARN("Invalid nonstd % d\n", var->nonstd);
return -EINVAL;
}
/* Check video mode */
if (((var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED) &&
((var->vmode & FB_VMODE_MASK) != FB_VMODE_INTERLACED)) {
IVTVFB_DEBUG_WARN("Invalid video mode: %d\n", var->vmode & FB_VMODE_MASK);
return -EINVAL;
}
/* Check the left & upper margins
If the margins are too large, just center the screen
(enforcing margins causes too many problems) */
if (var->left_margin + var->xres > IVTV_OSD_MAX_WIDTH + 1)
var->left_margin = 1 + ((IVTV_OSD_MAX_WIDTH - var->xres) / 2);
if (var->upper_margin + var->yres > (itv->is_out_50hz ? 577 : 481))
var->upper_margin = 1 + (((itv->is_out_50hz ? 576 : 480) -
var->yres) / 2);
/* Maintain overall 'size' for a constant refresh rate */
var->right_margin = hlimit - var->left_margin - var->xres;
var->lower_margin = vlimit - var->upper_margin - var->yres;
/* Fixed sync times */
var->hsync_len = 24;
var->vsync_len = 2;
/* Non-interlaced / interlaced mode is used to switch the OSD filter
on or off. Adjust the clock timings to maintain a constant
vertical refresh rate. */
if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED)
var->pixclock = pixclock / 2;
else
var->pixclock = pixclock;
itv->osd_rect.width = var->xres;
itv->osd_rect.height = var->yres;
IVTVFB_DEBUG_INFO("Display size: %dx%d (virtual %dx%d) @ %dbpp\n",
var->xres, var->yres,
var->xres_virtual, var->yres_virtual,
var->bits_per_pixel);
IVTVFB_DEBUG_INFO("Display position: %d, %d\n",
var->left_margin, var->upper_margin);
IVTVFB_DEBUG_INFO("Display filter: %s\n",
(var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED ? "on" : "off");
IVTVFB_DEBUG_INFO("Color space: %s\n", var->nonstd ? "YUV" : "RGB");
return 0;
}
static int ivtvfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct ivtv *itv = (struct ivtv *) info->par;
IVTVFB_DEBUG_INFO("ivtvfb_check_var\n");
return _ivtvfb_check_var(var, itv);
}
static int ivtvfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
u32 osd_pan_index;
struct ivtv *itv = (struct ivtv *) info->par;
if (var->yoffset + info->var.yres > info->var.yres_virtual ||
var->xoffset + info->var.xres > info->var.xres_virtual)
return -EINVAL;
osd_pan_index = var->yoffset * info->fix.line_length
+ var->xoffset * info->var.bits_per_pixel / 8;
write_reg(osd_pan_index, 0x02A0C);
/* Pass this info back the yuv handler */
itv->yuv_info.osd_x_pan = var->xoffset;
itv->yuv_info.osd_y_pan = var->yoffset;
/* Force update of yuv registers */
itv->yuv_info.yuv_forced_update = 1;
/* Remember this value */
itv->osd_info->pan_cur = osd_pan_index;
return 0;
}
static int ivtvfb_set_par(struct fb_info *info)
{
int rc = 0;
struct ivtv *itv = (struct ivtv *) info->par;
IVTVFB_DEBUG_INFO("ivtvfb_set_par\n");
rc = ivtvfb_set_var(itv, &info->var);
ivtvfb_pan_display(&info->var, info);
ivtvfb_get_fix(itv, &info->fix);
ivtv_firmware_check(itv, "ivtvfb_set_par");
return rc;
}
static int ivtvfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
u32 color, *palette;
struct ivtv *itv = (struct ivtv *)info->par;
if (regno >= info->cmap.len)
return -EINVAL;
color = ((transp & 0xFF00) << 16) |((red & 0xFF00) << 8) | (green & 0xFF00) | ((blue & 0xFF00) >> 8);
if (info->var.bits_per_pixel <= 8) {
write_reg(regno, 0x02a30);
write_reg(color, 0x02a34);
itv->osd_info->palette_cur[regno] = color;
return 0;
}
if (regno >= 16)
return -EINVAL;
palette = info->pseudo_palette;
if (info->var.bits_per_pixel == 16) {
switch (info->var.green.length) {
case 4:
color = ((red & 0xf000) >> 4) |
((green & 0xf000) >> 8) |
((blue & 0xf000) >> 12);
break;
case 5:
color = ((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
break;
case 6:
color = (red & 0xf800 ) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
break;
}
}
palette[regno] = color;
return 0;
}
/* We don't really support blanking. All this does is enable or
disable the OSD. */
static int ivtvfb_blank(int blank_mode, struct fb_info *info)
{
struct ivtv *itv = (struct ivtv *)info->par;
IVTVFB_DEBUG_INFO("Set blanking mode : %d\n", blank_mode);
switch (blank_mode) {
case FB_BLANK_UNBLANK:
ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 1);
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
break;
case FB_BLANK_NORMAL:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_VSYNC_SUSPEND:
ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
break;
case FB_BLANK_POWERDOWN:
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0);
ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
break;
}
itv->osd_info->blank_cur = blank_mode;
return 0;
}
static struct fb_ops ivtvfb_ops = {
.owner = THIS_MODULE,
.fb_write = ivtvfb_write,
.fb_check_var = ivtvfb_check_var,
.fb_set_par = ivtvfb_set_par,
.fb_setcolreg = ivtvfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_cursor = NULL,
.fb_ioctl = ivtvfb_ioctl,
.fb_pan_display = ivtvfb_pan_display,
.fb_blank = ivtvfb_blank,
};
/* Restore hardware after firmware restart */
static void ivtvfb_restore(struct ivtv *itv)
{
struct osd_info *oi = itv->osd_info;
int i;
ivtvfb_set_var(itv, &oi->fbvar_cur);
ivtvfb_blank(oi->blank_cur, &oi->ivtvfb_info);
for (i = 0; i < 256; i++) {
write_reg(i, 0x02a30);
write_reg(oi->palette_cur[i], 0x02a34);
}
write_reg(oi->pan_cur, 0x02a0c);
}
/* Initialization */
/* Setup our initial video mode */
static int ivtvfb_init_vidmode(struct ivtv *itv)
{
struct osd_info *oi = itv->osd_info;
struct v4l2_rect start_window;
int max_height;
/* Color mode */
if (osd_depth != 8 && osd_depth != 16 && osd_depth != 32)
osd_depth = 8;
oi->bits_per_pixel = osd_depth;
oi->bytes_per_pixel = oi->bits_per_pixel / 8;
/* Horizontal size & position */
if (osd_xres > 720)
osd_xres = 720;
/* Must be a multiple of 4 for 8bpp & 2 for 16bpp */
if (osd_depth == 8)
osd_xres &= ~3;
else if (osd_depth == 16)
osd_xres &= ~1;
start_window.width = osd_xres ? osd_xres : 640;
/* Check horizontal start (osd_left). */
if (osd_left && osd_left + start_window.width > 721) {
IVTVFB_ERR("Invalid osd_left - assuming default\n");
osd_left = 0;
}
/* Hardware coords start at 0, user coords start at 1. */
osd_left--;
start_window.left = osd_left >= 0 ?
osd_left : ((IVTV_OSD_MAX_WIDTH - start_window.width) / 2);
oi->display_byte_stride =
start_window.width * oi->bytes_per_pixel;
/* Vertical size & position */
max_height = itv->is_out_50hz ? 576 : 480;
if (osd_yres > max_height)
osd_yres = max_height;
start_window.height = osd_yres ?
osd_yres : itv->is_out_50hz ? 480 : 400;
/* Check vertical start (osd_upper). */
if (osd_upper + start_window.height > max_height + 1) {
IVTVFB_ERR("Invalid osd_upper - assuming default\n");
osd_upper = 0;
}
/* Hardware coords start at 0, user coords start at 1. */
osd_upper--;
start_window.top = osd_upper >= 0 ? osd_upper : ((max_height - start_window.height) / 2);
oi->display_width = start_window.width;
oi->display_height = start_window.height;
/* Generate a valid fb_var_screeninfo */
oi->ivtvfb_defined.xres = oi->display_width;
oi->ivtvfb_defined.yres = oi->display_height;
oi->ivtvfb_defined.xres_virtual = oi->display_width;
oi->ivtvfb_defined.yres_virtual = oi->display_height;
oi->ivtvfb_defined.bits_per_pixel = oi->bits_per_pixel;
oi->ivtvfb_defined.vmode = (osd_laced ? FB_VMODE_INTERLACED : FB_VMODE_NONINTERLACED);
oi->ivtvfb_defined.left_margin = start_window.left + 1;
oi->ivtvfb_defined.upper_margin = start_window.top + 1;
oi->ivtvfb_defined.accel_flags = FB_ACCEL_NONE;
oi->ivtvfb_defined.nonstd = 0;
/* We've filled in the most data, let the usual mode check
routine fill in the rest. */
_ivtvfb_check_var(&oi->ivtvfb_defined, itv);
/* Generate valid fb_fix_screeninfo */
ivtvfb_get_fix(itv, &oi->ivtvfb_fix);
/* Generate valid fb_info */
oi->ivtvfb_info.node = -1;
oi->ivtvfb_info.flags = FBINFO_FLAG_DEFAULT;
oi->ivtvfb_info.fbops = &ivtvfb_ops;
oi->ivtvfb_info.par = itv;
oi->ivtvfb_info.var = oi->ivtvfb_defined;
oi->ivtvfb_info.fix = oi->ivtvfb_fix;
oi->ivtvfb_info.screen_base = (u8 __iomem *)oi->video_vbase;
oi->ivtvfb_info.fbops = &ivtvfb_ops;
/* Supply some monitor specs. Bogus values will do for now */
oi->ivtvfb_info.monspecs.hfmin = 8000;
oi->ivtvfb_info.monspecs.hfmax = 70000;
oi->ivtvfb_info.monspecs.vfmin = 10;
oi->ivtvfb_info.monspecs.vfmax = 100;
/* Allocate color map */
if (fb_alloc_cmap(&oi->ivtvfb_info.cmap, 256, 1)) {
IVTVFB_ERR("abort, unable to alloc cmap\n");
return -ENOMEM;
}
/* Allocate the pseudo palette */
oi->ivtvfb_info.pseudo_palette =
kmalloc(sizeof(u32) * 16, GFP_KERNEL|__GFP_NOWARN);
if (!oi->ivtvfb_info.pseudo_palette) {
IVTVFB_ERR("abort, unable to alloc pseudo palette\n");
return -ENOMEM;
}
return 0;
}
/* Find OSD buffer base & size. Add to mtrr. Zero osd buffer. */
static int ivtvfb_init_io(struct ivtv *itv)
{
struct osd_info *oi = itv->osd_info;
mutex_lock(&itv->serialize_lock);
if (ivtv_init_on_first_open(itv)) {
mutex_unlock(&itv->serialize_lock);
IVTVFB_ERR("Failed to initialize ivtv\n");
return -ENXIO;
}
mutex_unlock(&itv->serialize_lock);
if (ivtvfb_get_framebuffer(itv, &oi->video_rbase,
&oi->video_buffer_size) < 0) {
IVTVFB_ERR("Firmware failed to respond\n");
return -EIO;
}
/* The osd buffer size depends on the number of video buffers allocated
on the PVR350 itself. For now we'll hardcode the smallest osd buffer
size to prevent any overlap. */
oi->video_buffer_size = 1704960;
oi->video_pbase = itv->base_addr + IVTV_DECODER_OFFSET + oi->video_rbase;
oi->video_vbase = itv->dec_mem + oi->video_rbase;
if (!oi->video_vbase) {
IVTVFB_ERR("abort, video memory 0x%x @ 0x%lx isn't mapped!\n",
oi->video_buffer_size, oi->video_pbase);
return -EIO;
}
IVTVFB_INFO("Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
oi->video_pbase, oi->video_vbase,
oi->video_buffer_size / 1024);
#ifdef CONFIG_MTRR
{
/* Find the largest power of two that maps the whole buffer */
int size_shift = 31;
while (!(oi->video_buffer_size & (1 << size_shift))) {
size_shift--;
}
size_shift++;
oi->fb_start_aligned_physaddr = oi->video_pbase & ~((1 << size_shift) - 1);
oi->fb_end_aligned_physaddr = oi->video_pbase + oi->video_buffer_size;
oi->fb_end_aligned_physaddr += (1 << size_shift) - 1;
oi->fb_end_aligned_physaddr &= ~((1 << size_shift) - 1);
if (mtrr_add(oi->fb_start_aligned_physaddr,
oi->fb_end_aligned_physaddr - oi->fb_start_aligned_physaddr,
MTRR_TYPE_WRCOMB, 1) < 0) {
IVTVFB_INFO("disabled mttr\n");
oi->fb_start_aligned_physaddr = 0;
oi->fb_end_aligned_physaddr = 0;
}
}
#endif
/* Blank the entire osd. */
memset_io(oi->video_vbase, 0, oi->video_buffer_size);
return 0;
}
/* Release any memory we've grabbed & remove mtrr entry */
static void ivtvfb_release_buffers (struct ivtv *itv)
{
struct osd_info *oi = itv->osd_info;
/* Release cmap */
if (oi->ivtvfb_info.cmap.len)
fb_dealloc_cmap(&oi->ivtvfb_info.cmap);
/* Release pseudo palette */
kfree(oi->ivtvfb_info.pseudo_palette);
#ifdef CONFIG_MTRR
if (oi->fb_end_aligned_physaddr) {
mtrr_del(-1, oi->fb_start_aligned_physaddr,
oi->fb_end_aligned_physaddr - oi->fb_start_aligned_physaddr);
}
#endif
kfree(oi);
itv->osd_info = NULL;
}
/* Initialize the specified card */
static int ivtvfb_init_card(struct ivtv *itv)
{
int rc;
if (itv->osd_info) {
IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
return -EBUSY;
}
itv->osd_info = kzalloc(sizeof(struct osd_info),
GFP_ATOMIC|__GFP_NOWARN);
if (itv->osd_info == NULL) {
IVTVFB_ERR("Failed to allocate memory for osd_info\n");
return -ENOMEM;
}
/* Find & setup the OSD buffer */
rc = ivtvfb_init_io(itv);
if (rc) {
ivtvfb_release_buffers(itv);
return rc;
}
/* Set the startup video mode information */
if ((rc = ivtvfb_init_vidmode(itv))) {
ivtvfb_release_buffers(itv);
return rc;
}
/* Register the framebuffer */
if (register_framebuffer(&itv->osd_info->ivtvfb_info) < 0) {
ivtvfb_release_buffers(itv);
return -EINVAL;
}
itv->osd_video_pbase = itv->osd_info->video_pbase;
/* Set the card to the requested mode */
ivtvfb_set_par(&itv->osd_info->ivtvfb_info);
/* Set color 0 to black */
write_reg(0, 0x02a30);
write_reg(0, 0x02a34);
/* Enable the osd */
ivtvfb_blank(FB_BLANK_UNBLANK, &itv->osd_info->ivtvfb_info);
/* Enable restart */
itv->ivtvfb_restore = ivtvfb_restore;
/* Allocate DMA */
ivtv_udma_alloc(itv);
return 0;
}
static int __init ivtvfb_callback_init(struct device *dev, void *p)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
struct ivtv *itv = container_of(v4l2_dev, struct ivtv, v4l2_dev);
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
if (ivtvfb_init_card(itv) == 0) {
IVTVFB_INFO("Framebuffer registered on %s\n",
itv->v4l2_dev.name);
(*(int *)p)++;
}
}
return 0;
}
static int ivtvfb_callback_cleanup(struct device *dev, void *p)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
struct ivtv *itv = container_of(v4l2_dev, struct ivtv, v4l2_dev);
struct osd_info *oi = itv->osd_info;
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
if (unregister_framebuffer(&itv->osd_info->ivtvfb_info)) {
IVTVFB_WARN("Framebuffer %d is in use, cannot unload\n",
itv->instance);
return 0;
}
IVTVFB_INFO("Unregister framebuffer %d\n", itv->instance);
itv->ivtvfb_restore = NULL;
ivtvfb_blank(FB_BLANK_VSYNC_SUSPEND, &oi->ivtvfb_info);
ivtvfb_release_buffers(itv);
itv->osd_video_pbase = 0;
}
return 0;
}
static int __init ivtvfb_init(void)
{
struct device_driver *drv;
int registered = 0;
int err;
if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
IVTV_MAX_CARDS - 1);
return -EINVAL;
}
drv = driver_find("ivtv", &pci_bus_type);
err = driver_for_each_device(drv, NULL, ®istered, ivtvfb_callback_init);
(void)err; /* suppress compiler warning */
if (!registered) {
printk(KERN_ERR "ivtvfb: no cards found\n");
return -ENODEV;
}
return 0;
}
static void ivtvfb_cleanup(void)
{
struct device_driver *drv;
int err;
printk(KERN_INFO "ivtvfb: Unloading framebuffer module\n");
drv = driver_find("ivtv", &pci_bus_type);
err = driver_for_each_device(drv, NULL, NULL, ivtvfb_callback_cleanup);
(void)err; /* suppress compiler warning */
}
module_init(ivtvfb_init);
module_exit(ivtvfb_cleanup);
| gpl-2.0 |
StelixROM/android_kernel_lge_mako | arch/arm/mm/highmem.c | 3821 | 3192 | /*
* arch/arm/mm/highmem.c -- ARM highmem support
*
* Author: Nicolas Pitre
* Created: september 8, 2008
* Copyright: Marvell Semiconductors Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <asm/fixmap.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "mm.h"
void *kmap(struct page *page)
{
might_sleep();
if (!PageHighMem(page))
return page_address(page);
return kmap_high(page);
}
EXPORT_SYMBOL(kmap);
void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
}
EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page)
{
unsigned int idx;
unsigned long vaddr;
void *kmap;
int type;
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
* There is no cache coherency issue when non VIVT, so force the
* dedicated kmap usage for better debugging purposes in that case.
*/
if (!cache_is_vivt())
kmap = NULL;
else
#endif
kmap = kmap_high_get(page);
if (kmap)
return kmap;
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
* With debugging enabled, kunmap_atomic forces that entry to 0.
* Make sure it was indeed properly unmapped.
*/
BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
/*
* When debugging is off, kunmap_atomic leaves the previous mapping
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
set_top_pte(vaddr, mk_pte(page, kmap_prot));
return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int idx, type;
if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
set_top_pte(vaddr, __pte(0));
#else
(void) idx; /* to kill a warning */
#endif
kmap_atomic_idx_pop();
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
/* this address was obtained through kmap_high_get() */
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
void *kmap_atomic_pfn(unsigned long pfn)
{
unsigned long vaddr;
int idx, type;
pagefault_disable();
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
return (void *)vaddr;
}
struct page *kmap_atomic_to_page(const void *ptr)
{
unsigned long vaddr = (unsigned long)ptr;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
return pte_page(get_top_pte(vaddr));
}
| gpl-2.0 |
Davletvm/linux | sound/usb/proc.c | 3821 | 6138 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/usb.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/pcm.h>
#include "usbaudio.h"
#include "helper.h"
#include "card.h"
#include "endpoint.h"
#include "proc.h"
/* convert our full speed USB rate into sampling rate in Hz */
static inline unsigned get_full_speed_hz(unsigned int usb_rate)
{
return (usb_rate * 125 + (1 << 12)) >> 13;
}
/* convert our high speed USB rate into sampling rate in Hz */
static inline unsigned get_high_speed_hz(unsigned int usb_rate)
{
return (usb_rate * 125 + (1 << 9)) >> 10;
}
/*
* common proc files to show the usb device info
*/
static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct snd_usb_audio *chip = entry->private_data;
if (!chip->shutdown)
snd_iprintf(buffer, "%03d/%03d\n", chip->dev->bus->busnum, chip->dev->devnum);
}
static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct snd_usb_audio *chip = entry->private_data;
if (!chip->shutdown)
snd_iprintf(buffer, "%04x:%04x\n",
USB_ID_VENDOR(chip->usb_id),
USB_ID_PRODUCT(chip->usb_id));
}
void snd_usb_audio_create_proc(struct snd_usb_audio *chip)
{
struct snd_info_entry *entry;
if (!snd_card_proc_new(chip->card, "usbbus", &entry))
snd_info_set_text_ops(entry, chip, proc_audio_usbbus_read);
if (!snd_card_proc_new(chip->card, "usbid", &entry))
snd_info_set_text_ops(entry, chip, proc_audio_usbid_read);
}
/*
* proc interface for list the supported pcm formats
*/
static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct snd_info_buffer *buffer)
{
struct audioformat *fp;
static char *sync_types[4] = {
"NONE", "ASYNC", "ADAPTIVE", "SYNC"
};
list_for_each_entry(fp, &subs->fmt_list, list) {
snd_pcm_format_t fmt;
snd_iprintf(buffer, " Interface %d\n", fp->iface);
snd_iprintf(buffer, " Altset %d\n", fp->altsetting);
snd_iprintf(buffer, " Format:");
for (fmt = 0; fmt <= SNDRV_PCM_FORMAT_LAST; ++fmt)
if (fp->formats & pcm_format_to_bits(fmt))
snd_iprintf(buffer, " %s",
snd_pcm_format_name(fmt));
snd_iprintf(buffer, "\n");
snd_iprintf(buffer, " Channels: %d\n", fp->channels);
snd_iprintf(buffer, " Endpoint: %d %s (%s)\n",
fp->endpoint & USB_ENDPOINT_NUMBER_MASK,
fp->endpoint & USB_DIR_IN ? "IN" : "OUT",
sync_types[(fp->ep_attr & USB_ENDPOINT_SYNCTYPE) >> 2]);
if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) {
snd_iprintf(buffer, " Rates: %d - %d (continuous)\n",
fp->rate_min, fp->rate_max);
} else {
unsigned int i;
snd_iprintf(buffer, " Rates: ");
for (i = 0; i < fp->nr_rates; i++) {
if (i > 0)
snd_iprintf(buffer, ", ");
snd_iprintf(buffer, "%d", fp->rate_table[i]);
}
snd_iprintf(buffer, "\n");
}
if (subs->speed != USB_SPEED_FULL)
snd_iprintf(buffer, " Data packet interval: %d us\n",
125 * (1 << fp->datainterval));
// snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize);
// snd_iprintf(buffer, " EP Attribute = %#x\n", fp->attributes);
}
}
static void proc_dump_ep_status(struct snd_usb_substream *subs,
struct snd_usb_endpoint *data_ep,
struct snd_usb_endpoint *sync_ep,
struct snd_info_buffer *buffer)
{
if (!data_ep)
return;
snd_iprintf(buffer, " Packet Size = %d\n", data_ep->curpacksize);
snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n",
subs->speed == USB_SPEED_FULL
? get_full_speed_hz(data_ep->freqm)
: get_high_speed_hz(data_ep->freqm),
data_ep->freqm >> 16, data_ep->freqm & 0xffff);
if (sync_ep && data_ep->freqshift != INT_MIN) {
int res = 16 - data_ep->freqshift;
snd_iprintf(buffer, " Feedback Format = %d.%d\n",
(sync_ep->syncmaxsize > 3 ? 32 : 24) - res, res);
}
}
static void proc_dump_substream_status(struct snd_usb_substream *subs, struct snd_info_buffer *buffer)
{
if (subs->running) {
snd_iprintf(buffer, " Status: Running\n");
snd_iprintf(buffer, " Interface = %d\n", subs->interface);
snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx);
proc_dump_ep_status(subs, subs->data_endpoint, subs->sync_endpoint, buffer);
} else {
snd_iprintf(buffer, " Status: Stop\n");
}
}
static void proc_pcm_format_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct snd_usb_stream *stream = entry->private_data;
snd_iprintf(buffer, "%s : %s\n", stream->chip->card->longname, stream->pcm->name);
if (stream->substream[SNDRV_PCM_STREAM_PLAYBACK].num_formats) {
snd_iprintf(buffer, "\nPlayback:\n");
proc_dump_substream_status(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer);
proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer);
}
if (stream->substream[SNDRV_PCM_STREAM_CAPTURE].num_formats) {
snd_iprintf(buffer, "\nCapture:\n");
proc_dump_substream_status(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer);
proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer);
}
}
void snd_usb_proc_pcm_format_add(struct snd_usb_stream *stream)
{
struct snd_info_entry *entry;
char name[32];
struct snd_card *card = stream->chip->card;
sprintf(name, "stream%d", stream->pcm_index);
if (!snd_card_proc_new(card, name, &entry))
snd_info_set_text_ops(entry, stream, proc_pcm_format_read);
}
| gpl-2.0 |
championswimmer/android_kernel_sony_huashan | arch/ia64/sn/kernel/sn2/sn_hwperf.c | 4589 | 23323 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved.
*
* SGI Altix topology and hardware performance monitoring API.
* Mark Goodwin <markgw@sgi.com>.
*
* Creates /proc/sgi_sn/sn_topology (read-only) to export
* info about Altix nodes, routers, CPUs and NumaLink
* interconnection/topology.
*
* Also creates a dynamic misc device named "sn_hwperf"
* that supports an ioctl interface to call down into SAL
* to discover hw objects, topology and to read/write
* memory mapped registers, e.g. for performance monitoring.
* The "sn_hwperf" device is registered only after the procfs
* file is first opened, i.e. only if/when it's needed.
*
* This API is used by SGI Performance Co-Pilot and other
* tools, see http://oss.sgi.com/projects/pcp
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
#include <linux/utsname.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/smp.h>
#include <linux/mutex.h>
#include <asm/processor.h>
#include <asm/topology.h>
#include <asm/uaccess.h>
#include <asm/sal.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/module.h>
#include <asm/sn/geo.h>
#include <asm/sn/sn2/sn_hwperf.h>
#include <asm/sn/addrs.h>
static void *sn_hwperf_salheap = NULL;
static int sn_hwperf_obj_cnt = 0;
static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
static int sn_hwperf_init(void);
static DEFINE_MUTEX(sn_hwperf_init_mutex);
#define cnode_possible(n) ((n) < num_cnodes)
static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
{
int e;
u64 sz;
struct sn_hwperf_object_info *objbuf = NULL;
if ((e = sn_hwperf_init()) < 0) {
printk(KERN_ERR "sn_hwperf_init failed: err %d\n", e);
goto out;
}
sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info);
objbuf = vmalloc(sz);
if (objbuf == NULL) {
printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz);
e = -ENOMEM;
goto out;
}
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_OBJECTS,
0, sz, (u64) objbuf, 0, 0, NULL);
if (e != SN_HWPERF_OP_OK) {
e = -EINVAL;
vfree(objbuf);
}
out:
*nobj = sn_hwperf_obj_cnt;
*ret = objbuf;
return e;
}
static int sn_hwperf_location_to_bpos(char *location,
int *rack, int *bay, int *slot, int *slab)
{
char type;
/* first scan for an old style geoid string */
if (sscanf(location, "%03d%c%02d#%d",
rack, &type, bay, slab) == 4)
*slot = 0;
else /* scan for a new bladed geoid string */
if (sscanf(location, "%03d%c%02d^%02d#%d",
rack, &type, bay, slot, slab) != 5)
return -1;
/* success */
return 0;
}
static int sn_hwperf_geoid_to_cnode(char *location)
{
int cnode;
geoid_t geoid;
moduleid_t module_id;
int rack, bay, slot, slab;
int this_rack, this_bay, this_slot, this_slab;
if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
return -1;
/*
* FIXME: replace with cleaner for_each_XXX macro which addresses
* both compute and IO nodes once ACPI3.0 is available.
*/
for (cnode = 0; cnode < num_cnodes; cnode++) {
geoid = cnodeid_get_geoid(cnode);
module_id = geo_module(geoid);
this_rack = MODULE_GET_RACK(module_id);
this_bay = MODULE_GET_BPOS(module_id);
this_slot = geo_slot(geoid);
this_slab = geo_slab(geoid);
if (rack == this_rack && bay == this_bay &&
slot == this_slot && slab == this_slab) {
break;
}
}
return cnode_possible(cnode) ? cnode : -1;
}
static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
{
if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
BUG();
if (SN_HWPERF_FOREIGN(obj))
return -1;
return sn_hwperf_geoid_to_cnode(obj->location);
}
static int sn_hwperf_generic_ordinal(struct sn_hwperf_object_info *obj,
struct sn_hwperf_object_info *objs)
{
int ordinal;
struct sn_hwperf_object_info *p;
for (ordinal=0, p=objs; p != obj; p++) {
if (SN_HWPERF_FOREIGN(p))
continue;
if (SN_HWPERF_SAME_OBJTYPE(p, obj))
ordinal++;
}
return ordinal;
}
static const char *slabname_node = "node"; /* SHub asic */
static const char *slabname_ionode = "ionode"; /* TIO asic */
static const char *slabname_router = "router"; /* NL3R or NL4R */
static const char *slabname_other = "other"; /* unknown asic */
static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
struct sn_hwperf_object_info *objs, int *ordinal)
{
int isnode;
const char *slabname = slabname_other;
if ((isnode = SN_HWPERF_IS_NODE(obj)) || SN_HWPERF_IS_IONODE(obj)) {
slabname = isnode ? slabname_node : slabname_ionode;
*ordinal = sn_hwperf_obj_to_cnode(obj);
}
else {
*ordinal = sn_hwperf_generic_ordinal(obj, objs);
if (SN_HWPERF_IS_ROUTER(obj))
slabname = slabname_router;
}
return slabname;
}
static void print_pci_topology(struct seq_file *s)
{
char *p;
size_t sz;
int e;
for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) {
if (!(p = kmalloc(sz, GFP_KERNEL)))
break;
e = ia64_sn_ioif_get_pci_topology(__pa(p), sz);
if (e == SALRET_OK)
seq_puts(s, p);
kfree(p);
if (e == SALRET_OK || e == SALRET_NOT_IMPLEMENTED)
break;
}
}
static inline int sn_hwperf_has_cpus(cnodeid_t node)
{
return node < MAX_NUMNODES && node_online(node) && nr_cpus_node(node);
}
static inline int sn_hwperf_has_mem(cnodeid_t node)
{
return node < MAX_NUMNODES && node_online(node) && NODE_DATA(node)->node_present_pages;
}
static struct sn_hwperf_object_info *
sn_hwperf_findobj_id(struct sn_hwperf_object_info *objbuf,
int nobj, int id)
{
int i;
struct sn_hwperf_object_info *p = objbuf;
for (i=0; i < nobj; i++, p++) {
if (p->id == id)
return p;
}
return NULL;
}
static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objbuf,
int nobj, cnodeid_t node, cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node)
{
int e;
struct sn_hwperf_object_info *nodeobj = NULL;
struct sn_hwperf_object_info *op;
struct sn_hwperf_object_info *dest;
struct sn_hwperf_object_info *router;
struct sn_hwperf_port_info ptdata[16];
int sz, i, j;
cnodeid_t c;
int found_mem = 0;
int found_cpu = 0;
if (!cnode_possible(node))
return -EINVAL;
if (sn_hwperf_has_cpus(node)) {
if (near_cpu_node)
*near_cpu_node = node;
found_cpu++;
}
if (sn_hwperf_has_mem(node)) {
if (near_mem_node)
*near_mem_node = node;
found_mem++;
}
if (found_cpu && found_mem)
return 0; /* trivially successful */
/* find the argument node object */
for (i=0, op=objbuf; i < nobj; i++, op++) {
if (!SN_HWPERF_IS_NODE(op) && !SN_HWPERF_IS_IONODE(op))
continue;
if (node == sn_hwperf_obj_to_cnode(op)) {
nodeobj = op;
break;
}
}
if (!nodeobj) {
e = -ENOENT;
goto err;
}
/* get it's interconnect topology */
sz = op->ports * sizeof(struct sn_hwperf_port_info);
BUG_ON(sz > sizeof(ptdata));
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, nodeobj->id, sz,
(u64)&ptdata, 0, 0, NULL);
if (e != SN_HWPERF_OP_OK) {
e = -EINVAL;
goto err;
}
/* find nearest node with cpus and nearest memory */
for (router=NULL, j=0; j < op->ports; j++) {
dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id);
if (dest && SN_HWPERF_IS_ROUTER(dest))
router = dest;
if (!dest || SN_HWPERF_FOREIGN(dest) ||
!SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) {
continue;
}
c = sn_hwperf_obj_to_cnode(dest);
if (!found_cpu && sn_hwperf_has_cpus(c)) {
if (near_cpu_node)
*near_cpu_node = c;
found_cpu++;
}
if (!found_mem && sn_hwperf_has_mem(c)) {
if (near_mem_node)
*near_mem_node = c;
found_mem++;
}
}
if (router && (!found_cpu || !found_mem)) {
/* search for a node connected to the same router */
sz = router->ports * sizeof(struct sn_hwperf_port_info);
BUG_ON(sz > sizeof(ptdata));
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, router->id, sz,
(u64)&ptdata, 0, 0, NULL);
if (e != SN_HWPERF_OP_OK) {
e = -EINVAL;
goto err;
}
for (j=0; j < router->ports; j++) {
dest = sn_hwperf_findobj_id(objbuf, nobj,
ptdata[j].conn_id);
if (!dest || dest->id == node ||
SN_HWPERF_FOREIGN(dest) ||
!SN_HWPERF_IS_NODE(dest) ||
SN_HWPERF_IS_IONODE(dest)) {
continue;
}
c = sn_hwperf_obj_to_cnode(dest);
if (!found_cpu && sn_hwperf_has_cpus(c)) {
if (near_cpu_node)
*near_cpu_node = c;
found_cpu++;
}
if (!found_mem && sn_hwperf_has_mem(c)) {
if (near_mem_node)
*near_mem_node = c;
found_mem++;
}
if (found_cpu && found_mem)
break;
}
}
if (!found_cpu || !found_mem) {
/* resort to _any_ node with CPUs and memory */
for (i=0, op=objbuf; i < nobj; i++, op++) {
if (SN_HWPERF_FOREIGN(op) ||
SN_HWPERF_IS_IONODE(op) ||
!SN_HWPERF_IS_NODE(op)) {
continue;
}
c = sn_hwperf_obj_to_cnode(op);
if (!found_cpu && sn_hwperf_has_cpus(c)) {
if (near_cpu_node)
*near_cpu_node = c;
found_cpu++;
}
if (!found_mem && sn_hwperf_has_mem(c)) {
if (near_mem_node)
*near_mem_node = c;
found_mem++;
}
if (found_cpu && found_mem)
break;
}
}
if (!found_cpu || !found_mem)
e = -ENODATA;
err:
return e;
}
static int sn_topology_show(struct seq_file *s, void *d)
{
int sz;
int pt;
int e = 0;
int i;
int j;
const char *slabname;
int ordinal;
char slice;
struct cpuinfo_ia64 *c;
struct sn_hwperf_port_info *ptdata;
struct sn_hwperf_object_info *p;
struct sn_hwperf_object_info *obj = d; /* this object */
struct sn_hwperf_object_info *objs = s->private; /* all objects */
u8 shubtype;
u8 system_size;
u8 sharing_size;
u8 partid;
u8 coher;
u8 nasid_shift;
u8 region_size;
u16 nasid_mask;
int nasid_msb;
if (obj == objs) {
seq_printf(s, "# sn_topology version 2\n");
seq_printf(s, "# objtype ordinal location partition"
" [attribute value [, ...]]\n");
if (ia64_sn_get_sn_info(0,
&shubtype, &nasid_mask, &nasid_shift, &system_size,
&sharing_size, &partid, &coher, ®ion_size))
BUG();
for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
break;
}
seq_printf(s, "partition %u %s local "
"shubtype %s, "
"nasid_mask 0x%016llx, "
"nasid_bits %d:%d, "
"system_size %d, "
"sharing_size %d, "
"coherency_domain %d, "
"region_size %d\n",
partid, utsname()->nodename,
shubtype ? "shub2" : "shub1",
(u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
system_size, sharing_size, coher, region_size);
print_pci_topology(s);
}
if (SN_HWPERF_FOREIGN(obj)) {
/* private in another partition: not interesting */
return 0;
}
for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) {
if (obj->name[i] == ' ')
obj->name[i] = '_';
}
slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
obj->sn_hwp_this_part ? "local" : "shared", obj->name);
if (ordinal < 0 || (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)))
seq_putc(s, '\n');
else {
cnodeid_t near_mem = -1;
cnodeid_t near_cpu = -1;
seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
if (sn_hwperf_get_nearest_node_objdata(objs, sn_hwperf_obj_cnt,
ordinal, &near_mem, &near_cpu) == 0) {
seq_printf(s, ", near_mem_nodeid %d, near_cpu_nodeid %d",
near_mem, near_cpu);
}
if (!SN_HWPERF_IS_IONODE(obj)) {
for_each_online_node(i) {
seq_printf(s, i ? ":%d" : ", dist %d",
node_distance(ordinal, i));
}
}
seq_putc(s, '\n');
/*
* CPUs on this node, if any
*/
if (!SN_HWPERF_IS_IONODE(obj)) {
for_each_cpu_and(i, cpu_online_mask,
cpumask_of_node(ordinal)) {
slice = 'a' + cpuid_to_slice(i);
c = cpu_data(i);
seq_printf(s, "cpu %d %s%c local"
" freq %luMHz, arch ia64",
i, obj->location, slice,
c->proc_freq / 1000000);
for_each_online_cpu(j) {
seq_printf(s, j ? ":%d" : ", dist %d",
node_distance(
cpu_to_node(i),
cpu_to_node(j)));
}
seq_putc(s, '\n');
}
}
}
if (obj->ports) {
/*
* numalink ports
*/
sz = obj->ports * sizeof(struct sn_hwperf_port_info);
if ((ptdata = kmalloc(sz, GFP_KERNEL)) == NULL)
return -ENOMEM;
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, obj->id, sz,
(u64) ptdata, 0, 0, NULL);
if (e != SN_HWPERF_OP_OK)
return -EINVAL;
for (ordinal=0, p=objs; p != obj; p++) {
if (!SN_HWPERF_FOREIGN(p))
ordinal += p->ports;
}
for (pt = 0; pt < obj->ports; pt++) {
for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
if (ptdata[pt].conn_id == p->id) {
break;
}
}
seq_printf(s, "numalink %d %s-%d",
ordinal+pt, obj->location, ptdata[pt].port);
if (i >= sn_hwperf_obj_cnt) {
/* no connection */
seq_puts(s, " local endpoint disconnected"
", protocol unknown\n");
continue;
}
if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
/* both ends local to this partition */
seq_puts(s, " local");
else if (SN_HWPERF_FOREIGN(p))
/* both ends of the link in foreign partiton */
seq_puts(s, " foreign");
else
/* link straddles a partition */
seq_puts(s, " shared");
/*
* Unlikely, but strictly should query the LLP config
* registers because an NL4R can be configured to run
* NL3 protocol, even when not talking to an NL3 router.
* Ditto for node-node.
*/
seq_printf(s, " endpoint %s-%d, protocol %s\n",
p->location, ptdata[pt].conn_port,
(SN_HWPERF_IS_NL3ROUTER(obj) ||
SN_HWPERF_IS_NL3ROUTER(p)) ? "LLP3" : "LLP4");
}
kfree(ptdata);
}
return 0;
}
static void *sn_topology_start(struct seq_file *s, loff_t * pos)
{
struct sn_hwperf_object_info *objs = s->private;
if (*pos < sn_hwperf_obj_cnt)
return (void *)(objs + *pos);
return NULL;
}
static void *sn_topology_next(struct seq_file *s, void *v, loff_t * pos)
{
++*pos;
return sn_topology_start(s, pos);
}
static void sn_topology_stop(struct seq_file *m, void *v)
{
return;
}
/*
* /proc/sgi_sn/sn_topology, read-only using seq_file
*/
static const struct seq_operations sn_topology_seq_ops = {
.start = sn_topology_start,
.next = sn_topology_next,
.stop = sn_topology_stop,
.show = sn_topology_show
};
struct sn_hwperf_op_info {
u64 op;
struct sn_hwperf_ioctl_args *a;
void *p;
int *v0;
int ret;
};
static void sn_hwperf_call_sal(void *info)
{
struct sn_hwperf_op_info *op_info = info;
int r;
r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->op,
op_info->a->arg, op_info->a->sz,
(u64) op_info->p, 0, 0, op_info->v0);
op_info->ret = r;
}
static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
{
u32 cpu;
u32 use_ipi;
int r = 0;
cpumask_t save_allowed;
cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
if (cpu != SN_HWPERF_ARG_ANY_CPU) {
if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
r = -EINVAL;
goto out;
}
}
if (cpu == SN_HWPERF_ARG_ANY_CPU) {
/* don't care which cpu */
sn_hwperf_call_sal(op_info);
} else if (cpu == get_cpu()) {
/* already on correct cpu */
sn_hwperf_call_sal(op_info);
put_cpu();
} else {
put_cpu();
if (use_ipi) {
/* use an interprocessor interrupt to call SAL */
smp_call_function_single(cpu, sn_hwperf_call_sal,
op_info, 1);
}
else {
/* migrate the task before calling SAL */
save_allowed = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpumask_of(cpu));
sn_hwperf_call_sal(op_info);
set_cpus_allowed_ptr(current, &save_allowed);
}
}
r = op_info->ret;
out:
return r;
}
/* map SAL hwperf error code to system error code */
static int sn_hwperf_map_err(int hwperf_err)
{
int e;
switch(hwperf_err) {
case SN_HWPERF_OP_OK:
e = 0;
break;
case SN_HWPERF_OP_NOMEM:
e = -ENOMEM;
break;
case SN_HWPERF_OP_NO_PERM:
e = -EPERM;
break;
case SN_HWPERF_OP_IO_ERROR:
e = -EIO;
break;
case SN_HWPERF_OP_BUSY:
e = -EBUSY;
break;
case SN_HWPERF_OP_RECONFIGURE:
e = -EAGAIN;
break;
case SN_HWPERF_OP_INVAL:
default:
e = -EINVAL;
break;
}
return e;
}
/*
* ioctl for "sn_hwperf" misc device
*/
static long sn_hwperf_ioctl(struct file *fp, u32 op, unsigned long arg)
{
struct sn_hwperf_ioctl_args a;
struct cpuinfo_ia64 *cdata;
struct sn_hwperf_object_info *objs;
struct sn_hwperf_object_info *cpuobj;
struct sn_hwperf_op_info op_info;
void *p = NULL;
int nobj;
char slice;
int node;
int r;
int v0;
int i;
int j;
/* only user requests are allowed here */
if ((op & SN_HWPERF_OP_MASK) < 10) {
r = -EINVAL;
goto error;
}
r = copy_from_user(&a, (const void __user *)arg,
sizeof(struct sn_hwperf_ioctl_args));
if (r != 0) {
r = -EFAULT;
goto error;
}
/*
* Allocate memory to hold a kernel copy of the user buffer. The
* buffer contents are either copied in or out (or both) of user
* space depending on the flags encoded in the requested operation.
*/
if (a.ptr) {
p = vmalloc(a.sz);
if (!p) {
r = -ENOMEM;
goto error;
}
}
if (op & SN_HWPERF_OP_MEM_COPYIN) {
r = copy_from_user(p, (const void __user *)a.ptr, a.sz);
if (r != 0) {
r = -EFAULT;
goto error;
}
}
switch (op) {
case SN_HWPERF_GET_CPU_INFO:
if (a.sz == sizeof(u64)) {
/* special case to get size needed */
*(u64 *) p = (u64) num_online_cpus() *
sizeof(struct sn_hwperf_object_info);
} else
if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) {
r = -ENOMEM;
goto error;
} else
if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
int cpuobj_index = 0;
memset(p, 0, a.sz);
for (i = 0; i < nobj; i++) {
if (!SN_HWPERF_IS_NODE(objs + i))
continue;
node = sn_hwperf_obj_to_cnode(objs + i);
for_each_online_cpu(j) {
if (node != cpu_to_node(j))
continue;
cpuobj = (struct sn_hwperf_object_info *) p + cpuobj_index++;
slice = 'a' + cpuid_to_slice(j);
cdata = cpu_data(j);
cpuobj->id = j;
snprintf(cpuobj->name,
sizeof(cpuobj->name),
"CPU %luMHz %s",
cdata->proc_freq / 1000000,
cdata->vendor);
snprintf(cpuobj->location,
sizeof(cpuobj->location),
"%s%c", objs[i].location,
slice);
}
}
vfree(objs);
}
break;
case SN_HWPERF_GET_NODE_NASID:
if (a.sz != sizeof(u64) ||
(node = a.arg) < 0 || !cnode_possible(node)) {
r = -EINVAL;
goto error;
}
*(u64 *)p = (u64)cnodeid_to_nasid(node);
break;
case SN_HWPERF_GET_OBJ_NODE:
i = a.arg;
if (a.sz != sizeof(u64) || i < 0) {
r = -EINVAL;
goto error;
}
if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
if (i >= nobj) {
r = -EINVAL;
vfree(objs);
goto error;
}
if (objs[i].id != a.arg) {
for (i = 0; i < nobj; i++) {
if (objs[i].id == a.arg)
break;
}
}
if (i == nobj) {
r = -EINVAL;
vfree(objs);
goto error;
}
if (!SN_HWPERF_IS_NODE(objs + i) &&
!SN_HWPERF_IS_IONODE(objs + i)) {
r = -ENOENT;
vfree(objs);
goto error;
}
*(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i);
vfree(objs);
}
break;
case SN_HWPERF_GET_MMRS:
case SN_HWPERF_SET_MMRS:
case SN_HWPERF_OBJECT_DISTANCE:
op_info.p = p;
op_info.a = &a;
op_info.v0 = &v0;
op_info.op = op;
r = sn_hwperf_op_cpu(&op_info);
if (r) {
r = sn_hwperf_map_err(r);
a.v0 = v0;
goto error;
}
break;
default:
/* all other ops are a direct SAL call */
r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op,
a.arg, a.sz, (u64) p, 0, 0, &v0);
if (r) {
r = sn_hwperf_map_err(r);
goto error;
}
a.v0 = v0;
break;
}
if (op & SN_HWPERF_OP_MEM_COPYOUT) {
r = copy_to_user((void __user *)a.ptr, p, a.sz);
if (r != 0) {
r = -EFAULT;
goto error;
}
}
error:
vfree(p);
return r;
}
static const struct file_operations sn_hwperf_fops = {
.unlocked_ioctl = sn_hwperf_ioctl,
.llseek = noop_llseek,
};
static struct miscdevice sn_hwperf_dev = {
MISC_DYNAMIC_MINOR,
"sn_hwperf",
&sn_hwperf_fops
};
static int sn_hwperf_init(void)
{
u64 v;
int salr;
int e = 0;
/* single threaded, once-only initialization */
mutex_lock(&sn_hwperf_init_mutex);
if (sn_hwperf_salheap) {
mutex_unlock(&sn_hwperf_init_mutex);
return e;
}
/*
* The PROM code needs a fixed reference node. For convenience the
* same node as the console I/O is used.
*/
sn_hwperf_master_nasid = (nasid_t) ia64_sn_get_console_nasid();
/*
* Request the needed size and install the PROM scratch area.
* The PROM keeps various tracking bits in this memory area.
*/
salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
(u64) SN_HWPERF_GET_HEAPSIZE, 0,
(u64) sizeof(u64), (u64) &v, 0, 0, NULL);
if (salr != SN_HWPERF_OP_OK) {
e = -EINVAL;
goto out;
}
if ((sn_hwperf_salheap = vmalloc(v)) == NULL) {
e = -ENOMEM;
goto out;
}
salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_INSTALL_HEAP, 0, v,
(u64) sn_hwperf_salheap, 0, 0, NULL);
if (salr != SN_HWPERF_OP_OK) {
e = -EINVAL;
goto out;
}
salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_OBJECT_COUNT, 0,
sizeof(u64), (u64) &v, 0, 0, NULL);
if (salr != SN_HWPERF_OP_OK) {
e = -EINVAL;
goto out;
}
sn_hwperf_obj_cnt = (int)v;
out:
if (e < 0 && sn_hwperf_salheap) {
vfree(sn_hwperf_salheap);
sn_hwperf_salheap = NULL;
sn_hwperf_obj_cnt = 0;
}
mutex_unlock(&sn_hwperf_init_mutex);
return e;
}
int sn_topology_open(struct inode *inode, struct file *file)
{
int e;
struct seq_file *seq;
struct sn_hwperf_object_info *objbuf;
int nobj;
if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
e = seq_open(file, &sn_topology_seq_ops);
seq = file->private_data;
seq->private = objbuf;
}
return e;
}
int sn_topology_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
vfree(seq->private);
return seq_release(inode, file);
}
int sn_hwperf_get_nearest_node(cnodeid_t node,
cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node)
{
int e;
int nobj;
struct sn_hwperf_object_info *objbuf;
if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
e = sn_hwperf_get_nearest_node_objdata(objbuf, nobj,
node, near_mem_node, near_cpu_node);
vfree(objbuf);
}
return e;
}
static int __devinit sn_hwperf_misc_register_init(void)
{
int e;
if (!ia64_platform_is("sn2"))
return 0;
sn_hwperf_init();
/*
* Register a dynamic misc device for hwperf ioctls. Platforms
* supporting hotplug will create /dev/sn_hwperf, else user
* can to look up the minor number in /proc/misc.
*/
if ((e = misc_register(&sn_hwperf_dev)) != 0) {
printk(KERN_ERR "sn_hwperf_misc_register_init: failed to "
"register misc device for \"%s\"\n", sn_hwperf_dev.name);
}
return e;
}
device_initcall(sn_hwperf_misc_register_init); /* after misc_init() */
EXPORT_SYMBOL(sn_hwperf_get_nearest_node);
| gpl-2.0 |
nquest/kernel_dns_s4502m | drivers/i2c/busses/i2c-s6000.c | 5613 | 11192 | /*
* drivers/i2c/busses/i2c-s6000.c
*
* Description: Driver for S6000 Family I2C Interface
* Copyright (c) 2008 emlix GmbH
* Author: Oskar Schirmer <os@emlix.com>
*
* Partially based on i2c-bfin-twi.c driver by <sonic.zhang@analog.com>
* Copyright (c) 2005-2007 Analog Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/i2c/s6000.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include "i2c-s6000.h"
#define DRV_NAME "i2c-s6000"
#define POLL_TIMEOUT (2 * HZ)
struct s6i2c_if {
u8 __iomem *reg; /* memory mapped registers */
int irq;
spinlock_t lock;
struct i2c_msg *msgs; /* messages currently handled */
int msgs_num; /* nb of msgs to do */
int msgs_push; /* nb of msgs read/written */
int msgs_done; /* nb of msgs finally handled */
unsigned push; /* nb of bytes read/written in msg */
unsigned done; /* nb of bytes finally handled */
int timeout_count; /* timeout retries left */
struct timer_list timeout_timer;
struct i2c_adapter adap;
struct completion complete;
struct clk *clk;
struct resource *res;
};
static inline u16 i2c_rd16(struct s6i2c_if *iface, unsigned n)
{
return readw(iface->reg + (n));
}
static inline void i2c_wr16(struct s6i2c_if *iface, unsigned n, u16 v)
{
writew(v, iface->reg + (n));
}
static inline u32 i2c_rd32(struct s6i2c_if *iface, unsigned n)
{
return readl(iface->reg + (n));
}
static inline void i2c_wr32(struct s6i2c_if *iface, unsigned n, u32 v)
{
writel(v, iface->reg + (n));
}
static struct s6i2c_if s6i2c_if;
static void s6i2c_handle_interrupt(struct s6i2c_if *iface)
{
if (i2c_rd16(iface, S6_I2C_INTRSTAT) & (1 << S6_I2C_INTR_TXABRT)) {
i2c_rd16(iface, S6_I2C_CLRTXABRT);
i2c_wr16(iface, S6_I2C_INTRMASK, 0);
complete(&iface->complete);
return;
}
if (iface->msgs_done >= iface->msgs_num) {
dev_err(&iface->adap.dev, "s6i2c: spurious I2C irq: %04x\n",
i2c_rd16(iface, S6_I2C_INTRSTAT));
i2c_wr16(iface, S6_I2C_INTRMASK, 0);
return;
}
while ((iface->msgs_push < iface->msgs_num)
&& (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_TFNF))) {
struct i2c_msg *m = &iface->msgs[iface->msgs_push];
if (!(m->flags & I2C_M_RD))
i2c_wr16(iface, S6_I2C_DATACMD, m->buf[iface->push]);
else
i2c_wr16(iface, S6_I2C_DATACMD,
1 << S6_I2C_DATACMD_READ);
if (++iface->push >= m->len) {
iface->push = 0;
iface->msgs_push += 1;
}
}
do {
struct i2c_msg *m = &iface->msgs[iface->msgs_done];
if (!(m->flags & I2C_M_RD)) {
if (iface->msgs_done < iface->msgs_push)
iface->msgs_done += 1;
else
break;
} else if (i2c_rd16(iface, S6_I2C_STATUS)
& (1 << S6_I2C_STATUS_RFNE)) {
m->buf[iface->done] = i2c_rd16(iface, S6_I2C_DATACMD);
if (++iface->done >= m->len) {
iface->done = 0;
iface->msgs_done += 1;
}
} else{
break;
}
} while (iface->msgs_done < iface->msgs_num);
if (iface->msgs_done >= iface->msgs_num) {
i2c_wr16(iface, S6_I2C_INTRMASK, 1 << S6_I2C_INTR_TXABRT);
complete(&iface->complete);
} else if (iface->msgs_push >= iface->msgs_num) {
i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXABRT) |
(1 << S6_I2C_INTR_RXFULL));
} else {
i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXABRT) |
(1 << S6_I2C_INTR_TXEMPTY) |
(1 << S6_I2C_INTR_RXFULL));
}
}
static irqreturn_t s6i2c_interrupt_entry(int irq, void *dev_id)
{
struct s6i2c_if *iface = dev_id;
if (!(i2c_rd16(iface, S6_I2C_STATUS) & ((1 << S6_I2C_INTR_RXUNDER)
| (1 << S6_I2C_INTR_RXOVER)
| (1 << S6_I2C_INTR_RXFULL)
| (1 << S6_I2C_INTR_TXOVER)
| (1 << S6_I2C_INTR_TXEMPTY)
| (1 << S6_I2C_INTR_RDREQ)
| (1 << S6_I2C_INTR_TXABRT)
| (1 << S6_I2C_INTR_RXDONE)
| (1 << S6_I2C_INTR_ACTIVITY)
| (1 << S6_I2C_INTR_STOPDET)
| (1 << S6_I2C_INTR_STARTDET)
| (1 << S6_I2C_INTR_GENCALL))))
return IRQ_NONE;
spin_lock(&iface->lock);
del_timer(&iface->timeout_timer);
s6i2c_handle_interrupt(iface);
spin_unlock(&iface->lock);
return IRQ_HANDLED;
}
static void s6i2c_timeout(unsigned long data)
{
struct s6i2c_if *iface = (struct s6i2c_if *)data;
unsigned long flags;
spin_lock_irqsave(&iface->lock, flags);
s6i2c_handle_interrupt(iface);
if (--iface->timeout_count > 0) {
iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
add_timer(&iface->timeout_timer);
} else {
complete(&iface->complete);
i2c_wr16(iface, S6_I2C_INTRMASK, 0);
}
spin_unlock_irqrestore(&iface->lock, flags);
}
static int s6i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct s6i2c_if *iface = adap->algo_data;
int i;
if (num == 0)
return 0;
if (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_ACTIVITY))
yield();
i2c_wr16(iface, S6_I2C_INTRMASK, 0);
i2c_rd16(iface, S6_I2C_CLRINTR);
for (i = 0; i < num; i++) {
if (msgs[i].flags & I2C_M_TEN) {
dev_err(&adap->dev,
"s6i2c: 10 bits addr not supported\n");
return -EINVAL;
}
if (msgs[i].len == 0) {
dev_err(&adap->dev,
"s6i2c: zero length message not supported\n");
return -EINVAL;
}
if (msgs[i].addr != msgs[0].addr) {
dev_err(&adap->dev,
"s6i2c: multiple xfer cannot change target\n");
return -EINVAL;
}
}
iface->msgs = msgs;
iface->msgs_num = num;
iface->msgs_push = 0;
iface->msgs_done = 0;
iface->push = 0;
iface->done = 0;
iface->timeout_count = 10;
i2c_wr16(iface, S6_I2C_TAR, msgs[0].addr);
i2c_wr16(iface, S6_I2C_ENABLE, 1);
i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXEMPTY) |
(1 << S6_I2C_INTR_TXABRT));
iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
add_timer(&iface->timeout_timer);
wait_for_completion(&iface->complete);
del_timer_sync(&iface->timeout_timer);
while (i2c_rd32(iface, S6_I2C_TXFLR) > 0)
schedule();
while (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_ACTIVITY))
schedule();
i2c_wr16(iface, S6_I2C_INTRMASK, 0);
i2c_wr16(iface, S6_I2C_ENABLE, 0);
return iface->msgs_done;
}
static u32 s6i2c_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static struct i2c_algorithm s6i2c_algorithm = {
.master_xfer = s6i2c_master_xfer,
.functionality = s6i2c_functionality,
};
static u16 __devinit nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
{
u32 dividend = ((clk_get_rate(iface->clk) / 1000) * ns) / 1000000;
if (dividend > 0xffff)
return 0xffff;
return dividend;
}
static int __devinit s6i2c_probe(struct platform_device *dev)
{
struct s6i2c_if *iface = &s6i2c_if;
struct i2c_adapter *p_adap;
const char *clock;
int bus_num, rc;
spin_lock_init(&iface->lock);
init_completion(&iface->complete);
iface->irq = platform_get_irq(dev, 0);
if (iface->irq < 0) {
rc = iface->irq;
goto err_out;
}
iface->res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!iface->res) {
rc = -ENXIO;
goto err_out;
}
iface->res = request_mem_region(iface->res->start,
resource_size(iface->res),
dev->dev.bus_id);
if (!iface->res) {
rc = -EBUSY;
goto err_out;
}
iface->reg = ioremap_nocache(iface->res->start,
resource_size(iface->res));
if (!iface->reg) {
rc = -ENOMEM;
goto err_reg;
}
clock = 0;
bus_num = -1;
if (dev->dev.platform_data) {
struct s6_i2c_platform_data *pdata = dev->dev.platform_data;
bus_num = pdata->bus_num;
clock = pdata->clock;
}
iface->clk = clk_get(&dev->dev, clock);
if (IS_ERR(iface->clk)) {
rc = PTR_ERR(iface->clk);
goto err_map;
}
rc = clk_enable(iface->clk);
if (rc < 0)
goto err_clk_put;
init_timer(&iface->timeout_timer);
iface->timeout_timer.function = s6i2c_timeout;
iface->timeout_timer.data = (unsigned long)iface;
p_adap = &iface->adap;
strlcpy(p_adap->name, dev->name, sizeof(p_adap->name));
p_adap->algo = &s6i2c_algorithm;
p_adap->algo_data = iface;
p_adap->nr = bus_num;
p_adap->class = 0;
p_adap->dev.parent = &dev->dev;
i2c_wr16(iface, S6_I2C_INTRMASK, 0);
rc = request_irq(iface->irq, s6i2c_interrupt_entry,
IRQF_SHARED, dev->name, iface);
if (rc) {
dev_err(&p_adap->dev, "s6i2c: can't get IRQ %d\n", iface->irq);
goto err_clk_dis;
}
i2c_wr16(iface, S6_I2C_ENABLE, 0);
udelay(1);
i2c_wr32(iface, S6_I2C_SRESET, 1 << S6_I2C_SRESET_IC_SRST);
i2c_wr16(iface, S6_I2C_CLRTXABRT, 1);
i2c_wr16(iface, S6_I2C_CON,
(1 << S6_I2C_CON_MASTER) |
(S6_I2C_CON_SPEED_NORMAL << S6_I2C_CON_SPEED) |
(0 << S6_I2C_CON_10BITSLAVE) |
(0 << S6_I2C_CON_10BITMASTER) |
(1 << S6_I2C_CON_RESTARTENA) |
(1 << S6_I2C_CON_SLAVEDISABLE));
i2c_wr16(iface, S6_I2C_SSHCNT, nanoseconds_on_clk(iface, 4000));
i2c_wr16(iface, S6_I2C_SSLCNT, nanoseconds_on_clk(iface, 4700));
i2c_wr16(iface, S6_I2C_FSHCNT, nanoseconds_on_clk(iface, 600));
i2c_wr16(iface, S6_I2C_FSLCNT, nanoseconds_on_clk(iface, 1300));
i2c_wr16(iface, S6_I2C_RXTL, 0);
i2c_wr16(iface, S6_I2C_TXTL, 0);
platform_set_drvdata(dev, iface);
rc = i2c_add_numbered_adapter(p_adap);
if (rc)
goto err_irq_free;
return 0;
err_irq_free:
free_irq(iface->irq, iface);
err_clk_dis:
clk_disable(iface->clk);
err_clk_put:
clk_put(iface->clk);
err_map:
iounmap(iface->reg);
err_reg:
release_mem_region(iface->res->start,
resource_size(iface->res));
err_out:
return rc;
}
static int __devexit s6i2c_remove(struct platform_device *pdev)
{
struct s6i2c_if *iface = platform_get_drvdata(pdev);
i2c_wr16(iface, S6_I2C_ENABLE, 0);
platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&iface->adap);
free_irq(iface->irq, iface);
clk_disable(iface->clk);
clk_put(iface->clk);
iounmap(iface->reg);
release_mem_region(iface->res->start,
resource_size(iface->res));
return 0;
}
static struct platform_driver s6i2c_driver = {
.probe = s6i2c_probe,
.remove = __devexit_p(s6i2c_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init s6i2c_init(void)
{
pr_info("I2C: S6000 I2C driver\n");
return platform_driver_register(&s6i2c_driver);
}
static void __exit s6i2c_exit(void)
{
platform_driver_unregister(&s6i2c_driver);
}
MODULE_DESCRIPTION("I2C-Bus adapter routines for S6000 I2C");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
subsys_initcall(s6i2c_init);
module_exit(s6i2c_exit);
| gpl-2.0 |
entrusc/linux-lcd | drivers/pcmcia/sa11xx_base.c | 7661 | 7573 | /*======================================================================
Device driver for the PCMCIA control functionality of StrongARM
SA-1100 microprocessors.
The contents of this file are subject to the Mozilla Public
License Version 1.1 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of
the License at http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS
IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
implied. See the License for the specific language governing
rights and limitations under the License.
The initial developer of the original code is John G. Dorsey
<john+@cs.cmu.edu>. Portions created by John G. Dorsey are
Copyright (C) 1999 John G. Dorsey. All Rights Reserved.
Alternatively, the contents of this file may be used under the
terms of the GNU Public License version 2 (the "GPL"), in which
case the provisions of the GPL are applicable instead of the
above. If you wish to allow the use of your version of this file
only under the terms of the GPL and not to allow others to use
your version of this file under the MPL, indicate your decision
by deleting the provisions above and replace them with the notice
and other provisions required by the GPL. If you do not delete
the provisions above, a recipient may use your version of this
file under either the MPL or the GPL.
======================================================================*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include "soc_common.h"
#include "sa11xx_base.h"
/*
* sa1100_pcmcia_default_mecr_timing
* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
* Calculate MECR clock wait states for given CPU clock
* speed and command wait state. This function can be over-
* written by a board specific version.
*
* The default is to simply calculate the BS values as specified in
* the INTEL SA1100 development manual
* "Expansion Memory (PCMCIA) Configuration Register (MECR)"
* that's section 10.2.5 in _my_ version of the manual ;)
*/
static unsigned int
sa1100_pcmcia_default_mecr_timing(struct soc_pcmcia_socket *skt,
unsigned int cpu_speed,
unsigned int cmd_time)
{
return sa1100_pcmcia_mecr_bs(cmd_time, cpu_speed);
}
/* sa1100_pcmcia_set_mecr()
* ^^^^^^^^^^^^^^^^^^^^^^^^
*
* set MECR value for socket <sock> based on this sockets
* io, mem and attribute space access speed.
* Call board specific BS value calculation to allow boards
* to tweak the BS values.
*/
static int
sa1100_pcmcia_set_mecr(struct soc_pcmcia_socket *skt, unsigned int cpu_clock)
{
struct soc_pcmcia_timing timing;
u32 mecr, old_mecr;
unsigned long flags;
unsigned int bs_io, bs_mem, bs_attr;
soc_common_pcmcia_get_timing(skt, &timing);
bs_io = skt->ops->get_timing(skt, cpu_clock, timing.io);
bs_mem = skt->ops->get_timing(skt, cpu_clock, timing.mem);
bs_attr = skt->ops->get_timing(skt, cpu_clock, timing.attr);
local_irq_save(flags);
old_mecr = mecr = MECR;
MECR_FAST_SET(mecr, skt->nr, 0);
MECR_BSIO_SET(mecr, skt->nr, bs_io);
MECR_BSA_SET(mecr, skt->nr, bs_attr);
MECR_BSM_SET(mecr, skt->nr, bs_mem);
if (old_mecr != mecr)
MECR = mecr;
local_irq_restore(flags);
debug(skt, 2, "FAST %X BSM %X BSA %X BSIO %X\n",
MECR_FAST_GET(mecr, skt->nr),
MECR_BSM_GET(mecr, skt->nr), MECR_BSA_GET(mecr, skt->nr),
MECR_BSIO_GET(mecr, skt->nr));
return 0;
}
#ifdef CONFIG_CPU_FREQ
static int
sa1100_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
unsigned long val,
struct cpufreq_freqs *freqs)
{
switch (val) {
case CPUFREQ_PRECHANGE:
if (freqs->new > freqs->old)
sa1100_pcmcia_set_mecr(skt, freqs->new);
break;
case CPUFREQ_POSTCHANGE:
if (freqs->new < freqs->old)
sa1100_pcmcia_set_mecr(skt, freqs->new);
break;
case CPUFREQ_RESUMECHANGE:
sa1100_pcmcia_set_mecr(skt, freqs->new);
break;
}
return 0;
}
#endif
static int
sa1100_pcmcia_set_timing(struct soc_pcmcia_socket *skt)
{
return sa1100_pcmcia_set_mecr(skt, cpufreq_get(0));
}
static int
sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
{
struct soc_pcmcia_timing timing;
unsigned int clock = cpufreq_get(0);
unsigned long mecr = MECR;
char *p = buf;
soc_common_pcmcia_get_timing(skt, &timing);
p+=sprintf(p, "I/O : %u (%u)\n", timing.io,
sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr)));
p+=sprintf(p, "attribute: %u (%u)\n", timing.attr,
sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr)));
p+=sprintf(p, "common : %u (%u)\n", timing.mem,
sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr)));
return p - buf;
}
static const char *skt_names[] = {
"PCMCIA socket 0",
"PCMCIA socket 1",
};
#define SKT_DEV_INFO_SIZE(n) \
(sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket))
int sa11xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt)
{
skt->res_skt.start = _PCMCIA(skt->nr);
skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1;
skt->res_skt.name = skt_names[skt->nr];
skt->res_skt.flags = IORESOURCE_MEM;
skt->res_io.start = _PCMCIAIO(skt->nr);
skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1;
skt->res_io.name = "io";
skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
skt->res_mem.start = _PCMCIAMem(skt->nr);
skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1;
skt->res_mem.name = "memory";
skt->res_mem.flags = IORESOURCE_MEM;
skt->res_attr.start = _PCMCIAAttr(skt->nr);
skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1;
skt->res_attr.name = "attribute";
skt->res_attr.flags = IORESOURCE_MEM;
return soc_pcmcia_add_one(skt);
}
EXPORT_SYMBOL(sa11xx_drv_pcmcia_add_one);
void sa11xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
{
/*
* set default MECR calculation if the board specific
* code did not specify one...
*/
if (!ops->get_timing)
ops->get_timing = sa1100_pcmcia_default_mecr_timing;
/* Provide our SA11x0 specific timing routines. */
ops->set_timing = sa1100_pcmcia_set_timing;
ops->show_timing = sa1100_pcmcia_show_timing;
#ifdef CONFIG_CPU_FREQ
ops->frequency_change = sa1100_pcmcia_frequency_change;
#endif
}
EXPORT_SYMBOL(sa11xx_drv_pcmcia_ops);
int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
int first, int nr)
{
struct skt_dev_info *sinfo;
struct soc_pcmcia_socket *skt;
int i, ret = 0;
sa11xx_drv_pcmcia_ops(ops);
sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
sinfo->nskt = nr;
/* Initialize processor specific parameters */
for (i = 0; i < nr; i++) {
skt = &sinfo->skt[i];
skt->nr = first + i;
soc_pcmcia_init_one(skt, ops, dev);
ret = sa11xx_drv_pcmcia_add_one(skt);
if (ret)
break;
}
if (ret) {
while (--i >= 0)
soc_pcmcia_remove_one(&sinfo->skt[i]);
kfree(sinfo);
} else {
dev_set_drvdata(dev, sinfo);
}
return ret;
}
EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe);
static int __init sa11xx_pcmcia_init(void)
{
return 0;
}
fs_initcall(sa11xx_pcmcia_init);
static void __exit sa11xx_pcmcia_exit(void) {}
module_exit(sa11xx_pcmcia_exit);
MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>");
MODULE_DESCRIPTION("Linux PCMCIA Card Services: SA-11xx core socket driver");
MODULE_LICENSE("Dual MPL/GPL");
| gpl-2.0 |
sky34/android_kernel_sony_lbmsm8960t | drivers/scsi/aic7xxx/aic7xxx_pci.c | 10477 | 61834 | /*
* Product specific probe and attach routines for:
* 3940, 2940, aic7895, aic7890, aic7880,
* aic7870, aic7860 and aic7850 SCSI controllers
*
* Copyright (c) 1994-2001 Justin T. Gibbs.
* Copyright (c) 2000-2001 Adaptec Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $
*/
#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
#else
#include <dev/aic7xxx/aic7xxx_osm.h>
#include <dev/aic7xxx/aic7xxx_inline.h>
#include <dev/aic7xxx/aic7xxx_93cx6.h>
#endif
#include "aic7xxx_pci.h"
static inline uint64_t
ahc_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor)
{
uint64_t id;
id = subvendor
| (subdevice << 16)
| ((uint64_t)vendor << 32)
| ((uint64_t)device << 48);
return (id);
}
#define AHC_PCI_IOADDR PCIR_MAPS /* I/O Address */
#define AHC_PCI_MEMADDR (PCIR_MAPS + 4) /* Mem I/O Address */
#define DEVID_9005_TYPE(id) ((id) & 0xF)
#define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */
#define DEVID_9005_TYPE_AAA 0x3 /* RAID Card */
#define DEVID_9005_TYPE_SISL 0x5 /* Container ROMB */
#define DEVID_9005_TYPE_MB 0xF /* On Motherboard */
#define DEVID_9005_MAXRATE(id) (((id) & 0x30) >> 4)
#define DEVID_9005_MAXRATE_U160 0x0
#define DEVID_9005_MAXRATE_ULTRA2 0x1
#define DEVID_9005_MAXRATE_ULTRA 0x2
#define DEVID_9005_MAXRATE_FAST 0x3
#define DEVID_9005_MFUNC(id) (((id) & 0x40) >> 6)
#define DEVID_9005_CLASS(id) (((id) & 0xFF00) >> 8)
#define DEVID_9005_CLASS_SPI 0x0 /* Parallel SCSI */
#define SUBID_9005_TYPE(id) ((id) & 0xF)
#define SUBID_9005_TYPE_MB 0xF /* On Motherboard */
#define SUBID_9005_TYPE_CARD 0x0 /* Standard Card */
#define SUBID_9005_TYPE_LCCARD 0x1 /* Low Cost Card */
#define SUBID_9005_TYPE_RAID 0x3 /* Combined with Raid */
#define SUBID_9005_TYPE_KNOWN(id) \
((((id) & 0xF) == SUBID_9005_TYPE_MB) \
|| (((id) & 0xF) == SUBID_9005_TYPE_CARD) \
|| (((id) & 0xF) == SUBID_9005_TYPE_LCCARD) \
|| (((id) & 0xF) == SUBID_9005_TYPE_RAID))
#define SUBID_9005_MAXRATE(id) (((id) & 0x30) >> 4)
#define SUBID_9005_MAXRATE_ULTRA2 0x0
#define SUBID_9005_MAXRATE_ULTRA 0x1
#define SUBID_9005_MAXRATE_U160 0x2
#define SUBID_9005_MAXRATE_RESERVED 0x3
#define SUBID_9005_SEEPTYPE(id) \
((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
? ((id) & 0xC0) >> 6 \
: ((id) & 0x300) >> 8)
#define SUBID_9005_SEEPTYPE_NONE 0x0
#define SUBID_9005_SEEPTYPE_1K 0x1
#define SUBID_9005_SEEPTYPE_2K_4K 0x2
#define SUBID_9005_SEEPTYPE_RESERVED 0x3
#define SUBID_9005_AUTOTERM(id) \
((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
? (((id) & 0x400) >> 10) == 0 \
: (((id) & 0x40) >> 6) == 0)
#define SUBID_9005_NUMCHAN(id) \
((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
? ((id) & 0x300) >> 8 \
: ((id) & 0xC00) >> 10)
#define SUBID_9005_LEGACYCONN(id) \
((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
? 0 \
: ((id) & 0x80) >> 7)
#define SUBID_9005_MFUNCENB(id) \
((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
? ((id) & 0x800) >> 11 \
: ((id) & 0x1000) >> 12)
/*
* Informational only. Should use chip register to be
* certain, but may be use in identification strings.
*/
#define SUBID_9005_CARD_SCSIWIDTH_MASK 0x2000
#define SUBID_9005_CARD_PCIWIDTH_MASK 0x4000
#define SUBID_9005_CARD_SEDIFF_MASK 0x8000
static ahc_device_setup_t ahc_aic785X_setup;
static ahc_device_setup_t ahc_aic7860_setup;
static ahc_device_setup_t ahc_apa1480_setup;
static ahc_device_setup_t ahc_aic7870_setup;
static ahc_device_setup_t ahc_aic7870h_setup;
static ahc_device_setup_t ahc_aha394X_setup;
static ahc_device_setup_t ahc_aha394Xh_setup;
static ahc_device_setup_t ahc_aha494X_setup;
static ahc_device_setup_t ahc_aha494Xh_setup;
static ahc_device_setup_t ahc_aha398X_setup;
static ahc_device_setup_t ahc_aic7880_setup;
static ahc_device_setup_t ahc_aic7880h_setup;
static ahc_device_setup_t ahc_aha2940Pro_setup;
static ahc_device_setup_t ahc_aha394XU_setup;
static ahc_device_setup_t ahc_aha394XUh_setup;
static ahc_device_setup_t ahc_aha398XU_setup;
static ahc_device_setup_t ahc_aic7890_setup;
static ahc_device_setup_t ahc_aic7892_setup;
static ahc_device_setup_t ahc_aic7895_setup;
static ahc_device_setup_t ahc_aic7895h_setup;
static ahc_device_setup_t ahc_aic7896_setup;
static ahc_device_setup_t ahc_aic7899_setup;
static ahc_device_setup_t ahc_aha29160C_setup;
static ahc_device_setup_t ahc_raid_setup;
static ahc_device_setup_t ahc_aha394XX_setup;
static ahc_device_setup_t ahc_aha494XX_setup;
static ahc_device_setup_t ahc_aha398XX_setup;
static const struct ahc_pci_identity ahc_pci_ident_table[] = {
/* aic7850 based controllers */
{
ID_AHA_2902_04_10_15_20C_30C,
ID_ALL_MASK,
"Adaptec 2902/04/10/15/20C/30C SCSI adapter",
ahc_aic785X_setup
},
/* aic7860 based controllers */
{
ID_AHA_2930CU,
ID_ALL_MASK,
"Adaptec 2930CU SCSI adapter",
ahc_aic7860_setup
},
{
ID_AHA_1480A & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 1480A Ultra SCSI adapter",
ahc_apa1480_setup
},
{
ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2940A Ultra SCSI adapter",
ahc_aic7860_setup
},
{
ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2940A/CN Ultra SCSI adapter",
ahc_aic7860_setup
},
{
ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2930C Ultra SCSI adapter (VAR)",
ahc_aic7860_setup
},
/* aic7870 based controllers */
{
ID_AHA_2940,
ID_ALL_MASK,
"Adaptec 2940 SCSI adapter",
ahc_aic7870_setup
},
{
ID_AHA_3940,
ID_ALL_MASK,
"Adaptec 3940 SCSI adapter",
ahc_aha394X_setup
},
{
ID_AHA_398X,
ID_ALL_MASK,
"Adaptec 398X SCSI RAID adapter",
ahc_aha398X_setup
},
{
ID_AHA_2944,
ID_ALL_MASK,
"Adaptec 2944 SCSI adapter",
ahc_aic7870h_setup
},
{
ID_AHA_3944,
ID_ALL_MASK,
"Adaptec 3944 SCSI adapter",
ahc_aha394Xh_setup
},
{
ID_AHA_4944,
ID_ALL_MASK,
"Adaptec 4944 SCSI adapter",
ahc_aha494Xh_setup
},
/* aic7880 based controllers */
{
ID_AHA_2940U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2940 Ultra SCSI adapter",
ahc_aic7880_setup
},
{
ID_AHA_3940U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 3940 Ultra SCSI adapter",
ahc_aha394XU_setup
},
{
ID_AHA_2944U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2944 Ultra SCSI adapter",
ahc_aic7880h_setup
},
{
ID_AHA_3944U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 3944 Ultra SCSI adapter",
ahc_aha394XUh_setup
},
{
ID_AHA_398XU & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 398X Ultra SCSI RAID adapter",
ahc_aha398XU_setup
},
{
/*
* XXX Don't know the slot numbers
* so we can't identify channels
*/
ID_AHA_4944U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 4944 Ultra SCSI adapter",
ahc_aic7880h_setup
},
{
ID_AHA_2930U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2930 Ultra SCSI adapter",
ahc_aic7880_setup
},
{
ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2940 Pro Ultra SCSI adapter",
ahc_aha2940Pro_setup
},
{
ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2940/CN Ultra SCSI adapter",
ahc_aic7880_setup
},
/* Ignore all SISL (AAC on MB) based controllers. */
{
ID_9005_SISL_ID,
ID_9005_SISL_MASK,
NULL,
NULL
},
/* aic7890 based controllers */
{
ID_AHA_2930U2,
ID_ALL_MASK,
"Adaptec 2930 Ultra2 SCSI adapter",
ahc_aic7890_setup
},
{
ID_AHA_2940U2B,
ID_ALL_MASK,
"Adaptec 2940B Ultra2 SCSI adapter",
ahc_aic7890_setup
},
{
ID_AHA_2940U2_OEM,
ID_ALL_MASK,
"Adaptec 2940 Ultra2 SCSI adapter (OEM)",
ahc_aic7890_setup
},
{
ID_AHA_2940U2,
ID_ALL_MASK,
"Adaptec 2940 Ultra2 SCSI adapter",
ahc_aic7890_setup
},
{
ID_AHA_2950U2B,
ID_ALL_MASK,
"Adaptec 2950 Ultra2 SCSI adapter",
ahc_aic7890_setup
},
{
ID_AIC7890_ARO,
ID_ALL_MASK,
"Adaptec aic7890/91 Ultra2 SCSI adapter (ARO)",
ahc_aic7890_setup
},
{
ID_AAA_131U2,
ID_ALL_MASK,
"Adaptec AAA-131 Ultra2 RAID adapter",
ahc_aic7890_setup
},
/* aic7892 based controllers */
{
ID_AHA_29160,
ID_ALL_MASK,
"Adaptec 29160 Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AHA_29160_CPQ,
ID_ALL_MASK,
"Adaptec (Compaq OEM) 29160 Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AHA_29160N,
ID_ALL_MASK,
"Adaptec 29160N Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AHA_29160C,
ID_ALL_MASK,
"Adaptec 29160C Ultra160 SCSI adapter",
ahc_aha29160C_setup
},
{
ID_AHA_29160B,
ID_ALL_MASK,
"Adaptec 29160B Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AHA_19160B,
ID_ALL_MASK,
"Adaptec 19160B Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AIC7892_ARO,
ID_ALL_MASK,
"Adaptec aic7892 Ultra160 SCSI adapter (ARO)",
ahc_aic7892_setup
},
{
ID_AHA_2915_30LP,
ID_ALL_MASK,
"Adaptec 2915/30LP Ultra160 SCSI adapter",
ahc_aic7892_setup
},
/* aic7895 based controllers */
{
ID_AHA_2940U_DUAL,
ID_ALL_MASK,
"Adaptec 2940/DUAL Ultra SCSI adapter",
ahc_aic7895_setup
},
{
ID_AHA_3940AU,
ID_ALL_MASK,
"Adaptec 3940A Ultra SCSI adapter",
ahc_aic7895_setup
},
{
ID_AHA_3944AU,
ID_ALL_MASK,
"Adaptec 3944A Ultra SCSI adapter",
ahc_aic7895h_setup
},
{
ID_AIC7895_ARO,
ID_AIC7895_ARO_MASK,
"Adaptec aic7895 Ultra SCSI adapter (ARO)",
ahc_aic7895_setup
},
/* aic7896/97 based controllers */
{
ID_AHA_3950U2B_0,
ID_ALL_MASK,
"Adaptec 3950B Ultra2 SCSI adapter",
ahc_aic7896_setup
},
{
ID_AHA_3950U2B_1,
ID_ALL_MASK,
"Adaptec 3950B Ultra2 SCSI adapter",
ahc_aic7896_setup
},
{
ID_AHA_3950U2D_0,
ID_ALL_MASK,
"Adaptec 3950D Ultra2 SCSI adapter",
ahc_aic7896_setup
},
{
ID_AHA_3950U2D_1,
ID_ALL_MASK,
"Adaptec 3950D Ultra2 SCSI adapter",
ahc_aic7896_setup
},
{
ID_AIC7896_ARO,
ID_ALL_MASK,
"Adaptec aic7896/97 Ultra2 SCSI adapter (ARO)",
ahc_aic7896_setup
},
/* aic7899 based controllers */
{
ID_AHA_3960D,
ID_ALL_MASK,
"Adaptec 3960D Ultra160 SCSI adapter",
ahc_aic7899_setup
},
{
ID_AHA_3960D_CPQ,
ID_ALL_MASK,
"Adaptec (Compaq OEM) 3960D Ultra160 SCSI adapter",
ahc_aic7899_setup
},
{
ID_AIC7899_ARO,
ID_ALL_MASK,
"Adaptec aic7899 Ultra160 SCSI adapter (ARO)",
ahc_aic7899_setup
},
/* Generic chip probes for devices we don't know 'exactly' */
{
ID_AIC7850 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7850 SCSI adapter",
ahc_aic785X_setup
},
{
ID_AIC7855 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7855 SCSI adapter",
ahc_aic785X_setup
},
{
ID_AIC7859 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7859 SCSI adapter",
ahc_aic7860_setup
},
{
ID_AIC7860 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7860 Ultra SCSI adapter",
ahc_aic7860_setup
},
{
ID_AIC7870 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7870 SCSI adapter",
ahc_aic7870_setup
},
{
ID_AIC7880 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7880 Ultra SCSI adapter",
ahc_aic7880_setup
},
{
ID_AIC7890 & ID_9005_GENERIC_MASK,
ID_9005_GENERIC_MASK,
"Adaptec aic7890/91 Ultra2 SCSI adapter",
ahc_aic7890_setup
},
{
ID_AIC7892 & ID_9005_GENERIC_MASK,
ID_9005_GENERIC_MASK,
"Adaptec aic7892 Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AIC7895 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7895 Ultra SCSI adapter",
ahc_aic7895_setup
},
{
ID_AIC7896 & ID_9005_GENERIC_MASK,
ID_9005_GENERIC_MASK,
"Adaptec aic7896/97 Ultra2 SCSI adapter",
ahc_aic7896_setup
},
{
ID_AIC7899 & ID_9005_GENERIC_MASK,
ID_9005_GENERIC_MASK,
"Adaptec aic7899 Ultra160 SCSI adapter",
ahc_aic7899_setup
},
{
ID_AIC7810 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7810 RAID memory controller",
ahc_raid_setup
},
{
ID_AIC7815 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7815 RAID memory controller",
ahc_raid_setup
}
};
static const u_int ahc_num_pci_devs = ARRAY_SIZE(ahc_pci_ident_table);
#define AHC_394X_SLOT_CHANNEL_A 4
#define AHC_394X_SLOT_CHANNEL_B 5
#define AHC_398X_SLOT_CHANNEL_A 4
#define AHC_398X_SLOT_CHANNEL_B 8
#define AHC_398X_SLOT_CHANNEL_C 12
#define AHC_494X_SLOT_CHANNEL_A 4
#define AHC_494X_SLOT_CHANNEL_B 5
#define AHC_494X_SLOT_CHANNEL_C 6
#define AHC_494X_SLOT_CHANNEL_D 7
#define DEVCONFIG 0x40
#define PCIERRGENDIS 0x80000000ul
#define SCBSIZE32 0x00010000ul /* aic789X only */
#define REXTVALID 0x00001000ul /* ultra cards only */
#define MPORTMODE 0x00000400ul /* aic7870+ only */
#define RAMPSM 0x00000200ul /* aic7870+ only */
#define VOLSENSE 0x00000100ul
#define PCI64BIT 0x00000080ul /* 64Bit PCI bus (Ultra2 Only)*/
#define SCBRAMSEL 0x00000080ul
#define MRDCEN 0x00000040ul
#define EXTSCBTIME 0x00000020ul /* aic7870 only */
#define EXTSCBPEN 0x00000010ul /* aic7870 only */
#define BERREN 0x00000008ul
#define DACEN 0x00000004ul
#define STPWLEVEL 0x00000002ul
#define DIFACTNEGEN 0x00000001ul /* aic7870 only */
#define CSIZE_LATTIME 0x0c
#define CACHESIZE 0x0000003ful /* only 5 bits */
#define LATTIME 0x0000ff00ul
/* PCI STATUS definitions */
#define DPE 0x80
#define SSE 0x40
#define RMA 0x20
#define RTA 0x10
#define STA 0x08
#define DPR 0x01
static int ahc_9005_subdevinfo_valid(uint16_t vendor, uint16_t device,
uint16_t subvendor, uint16_t subdevice);
static int ahc_ext_scbram_present(struct ahc_softc *ahc);
static void ahc_scbram_config(struct ahc_softc *ahc, int enable,
int pcheck, int fast, int large);
static void ahc_probe_ext_scbram(struct ahc_softc *ahc);
static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1);
static void ahc_parse_pci_eeprom(struct ahc_softc *ahc,
struct seeprom_config *sc);
static void configure_termination(struct ahc_softc *ahc,
struct seeprom_descriptor *sd,
u_int adapter_control,
u_int *sxfrctl1);
static void ahc_new_term_detect(struct ahc_softc *ahc,
int *enableSEC_low,
int *enableSEC_high,
int *enablePRI_low,
int *enablePRI_high,
int *eeprom_present);
static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
int *internal68_present,
int *externalcable_present,
int *eeprom_present);
static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
int *externalcable_present,
int *eeprom_present);
static void write_brdctl(struct ahc_softc *ahc, uint8_t value);
static uint8_t read_brdctl(struct ahc_softc *ahc);
static void ahc_pci_intr(struct ahc_softc *ahc);
static int ahc_pci_chip_init(struct ahc_softc *ahc);
static int
ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor,
uint16_t subdevice, uint16_t subvendor)
{
int result;
/* Default to invalid. */
result = 0;
if (vendor == 0x9005
&& subvendor == 0x9005
&& subdevice != device
&& SUBID_9005_TYPE_KNOWN(subdevice) != 0) {
switch (SUBID_9005_TYPE(subdevice)) {
case SUBID_9005_TYPE_MB:
break;
case SUBID_9005_TYPE_CARD:
case SUBID_9005_TYPE_LCCARD:
/*
* Currently only trust Adaptec cards to
* get the sub device info correct.
*/
if (DEVID_9005_TYPE(device) == DEVID_9005_TYPE_HBA)
result = 1;
break;
case SUBID_9005_TYPE_RAID:
break;
default:
break;
}
}
return (result);
}
const struct ahc_pci_identity *
ahc_find_pci_device(ahc_dev_softc_t pci)
{
uint64_t full_id;
uint16_t device;
uint16_t vendor;
uint16_t subdevice;
uint16_t subvendor;
const struct ahc_pci_identity *entry;
u_int i;
vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
/*
* If the second function is not hooked up, ignore it.
* Unfortunately, not all MB vendors implement the
* subdevice ID as per the Adaptec spec, so do our best
* to sanity check it prior to accepting the subdevice
* ID as valid.
*/
if (ahc_get_pci_function(pci) > 0
&& ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice)
&& SUBID_9005_MFUNCENB(subdevice) == 0)
return (NULL);
for (i = 0; i < ahc_num_pci_devs; i++) {
entry = &ahc_pci_ident_table[i];
if (entry->full_id == (full_id & entry->id_mask)) {
/* Honor exclusion entries. */
if (entry->name == NULL)
return (NULL);
return (entry);
}
}
return (NULL);
}
int
ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
{
u_int command;
u_int our_id;
u_int sxfrctl1;
u_int scsiseq;
u_int dscommand0;
uint32_t devconfig;
int error;
uint8_t sblkctl;
our_id = 0;
error = entry->setup(ahc);
if (error != 0)
return (error);
ahc->chip |= AHC_PCI;
ahc->description = entry->name;
pci_set_power_state(ahc->dev_softc, AHC_POWER_STATE_D0);
error = ahc_pci_map_registers(ahc);
if (error != 0)
return (error);
/*
* Before we continue probing the card, ensure that
* its interrupts are *disabled*. We don't want
* a misstep to hang the machine in an interrupt
* storm.
*/
ahc_intr_enable(ahc, FALSE);
devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
/*
* If we need to support high memory, enable dual
* address cycles. This bit must be set to enable
* high address bit generation even if we are on a
* 64bit bus (PCI64BIT set in devconfig).
*/
if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
if (bootverbose)
printk("%s: Enabling 39Bit Addressing\n",
ahc_name(ahc));
devconfig |= DACEN;
}
/* Ensure that pci error generation, a test feature, is disabled. */
devconfig |= PCIERRGENDIS;
ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
/* Ensure busmastering is enabled */
command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2);
command |= PCIM_CMD_BUSMASTEREN;
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, /*bytes*/2);
/* On all PCI adapters, we allow SCB paging */
ahc->flags |= AHC_PAGESCBS;
error = ahc_softc_init(ahc);
if (error != 0)
return (error);
/*
* Disable PCI parity error checking. Users typically
* do this to work around broken PCI chipsets that get
* the parity timing wrong and thus generate lots of spurious
* errors. The chip only allows us to disable *all* parity
* error reporting when doing this, so CIO bus, scb ram, and
* scratch ram parity errors will be ignored too.
*/
if ((ahc->flags & AHC_DISABLE_PCI_PERR) != 0)
ahc->seqctl |= FAILDIS;
ahc->bus_intr = ahc_pci_intr;
ahc->bus_chip_init = ahc_pci_chip_init;
/* Remember how the card was setup in case there is no SEEPROM */
if ((ahc_inb(ahc, HCNTRL) & POWRDN) == 0) {
ahc_pause(ahc);
if ((ahc->features & AHC_ULTRA2) != 0)
our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
else
our_id = ahc_inb(ahc, SCSIID) & OID;
sxfrctl1 = ahc_inb(ahc, SXFRCTL1) & STPWEN;
scsiseq = ahc_inb(ahc, SCSISEQ);
} else {
sxfrctl1 = STPWEN;
our_id = 7;
scsiseq = 0;
}
error = ahc_reset(ahc, /*reinit*/FALSE);
if (error != 0)
return (ENXIO);
if ((ahc->features & AHC_DT) != 0) {
u_int sfunct;
/* Perform ALT-Mode Setup */
sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
ahc_outb(ahc, OPTIONMODE,
OPTIONMODE_DEFAULTS|AUTOACKEN|BUSFREEREV|EXPPHASEDIS);
ahc_outb(ahc, SFUNCT, sfunct);
/* Normal mode setup */
ahc_outb(ahc, CRCCONTROL1, CRCVALCHKEN|CRCENDCHKEN|CRCREQCHKEN
|TARGCRCENDEN);
}
dscommand0 = ahc_inb(ahc, DSCOMMAND0);
dscommand0 |= MPARCKEN|CACHETHEN;
if ((ahc->features & AHC_ULTRA2) != 0) {
/*
* DPARCKEN doesn't work correctly on
* some MBs so don't use it.
*/
dscommand0 &= ~DPARCKEN;
}
/*
* Handle chips that must have cache line
* streaming (dis/en)abled.
*/
if ((ahc->bugs & AHC_CACHETHEN_DIS_BUG) != 0)
dscommand0 |= CACHETHEN;
if ((ahc->bugs & AHC_CACHETHEN_BUG) != 0)
dscommand0 &= ~CACHETHEN;
ahc_outb(ahc, DSCOMMAND0, dscommand0);
ahc->pci_cachesize =
ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME,
/*bytes*/1) & CACHESIZE;
ahc->pci_cachesize *= 4;
if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0
&& ahc->pci_cachesize == 4) {
ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
0, /*bytes*/1);
ahc->pci_cachesize = 0;
}
/*
* We cannot perform ULTRA speeds without the presence
* of the external precision resistor.
*/
if ((ahc->features & AHC_ULTRA) != 0) {
uint32_t devconfig;
devconfig = ahc_pci_read_config(ahc->dev_softc,
DEVCONFIG, /*bytes*/4);
if ((devconfig & REXTVALID) == 0)
ahc->features &= ~AHC_ULTRA;
}
/* See if we have a SEEPROM and perform auto-term */
check_extport(ahc, &sxfrctl1);
/*
* Take the LED out of diagnostic mode
*/
sblkctl = ahc_inb(ahc, SBLKCTL);
ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON)));
if ((ahc->features & AHC_ULTRA2) != 0) {
ahc_outb(ahc, DFF_THRSH, RD_DFTHRSH_MAX|WR_DFTHRSH_MAX);
} else {
ahc_outb(ahc, DSPCISTATUS, DFTHRSH_100);
}
if (ahc->flags & AHC_USEDEFAULTS) {
/*
* PCI Adapter default setup
* Should only be used if the adapter does not have
* a SEEPROM.
*/
/* See if someone else set us up already */
if ((ahc->flags & AHC_NO_BIOS_INIT) == 0
&& scsiseq != 0) {
printk("%s: Using left over BIOS settings\n",
ahc_name(ahc));
ahc->flags &= ~AHC_USEDEFAULTS;
ahc->flags |= AHC_BIOS_ENABLED;
} else {
/*
* Assume only one connector and always turn
* on termination.
*/
our_id = 0x07;
sxfrctl1 = STPWEN;
}
ahc_outb(ahc, SCSICONF, our_id|ENSPCHK|RESET_SCSI);
ahc->our_id = our_id;
}
/*
* Take a look to see if we have external SRAM.
* We currently do not attempt to use SRAM that is
* shared among multiple controllers.
*/
ahc_probe_ext_scbram(ahc);
/*
* Record our termination setting for the
* generic initialization routine.
*/
if ((sxfrctl1 & STPWEN) != 0)
ahc->flags |= AHC_TERM_ENB_A;
/*
* Save chip register configuration data for chip resets
* that occur during runtime and resume events.
*/
ahc->bus_softc.pci_softc.devconfig =
ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
ahc->bus_softc.pci_softc.command =
ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1);
ahc->bus_softc.pci_softc.csize_lattime =
ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1);
ahc->bus_softc.pci_softc.dscommand0 = ahc_inb(ahc, DSCOMMAND0);
ahc->bus_softc.pci_softc.dspcistatus = ahc_inb(ahc, DSPCISTATUS);
if ((ahc->features & AHC_DT) != 0) {
u_int sfunct;
sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
ahc->bus_softc.pci_softc.optionmode = ahc_inb(ahc, OPTIONMODE);
ahc->bus_softc.pci_softc.targcrccnt = ahc_inw(ahc, TARGCRCCNT);
ahc_outb(ahc, SFUNCT, sfunct);
ahc->bus_softc.pci_softc.crccontrol1 =
ahc_inb(ahc, CRCCONTROL1);
}
if ((ahc->features & AHC_MULTI_FUNC) != 0)
ahc->bus_softc.pci_softc.scbbaddr = ahc_inb(ahc, SCBBADDR);
if ((ahc->features & AHC_ULTRA2) != 0)
ahc->bus_softc.pci_softc.dff_thrsh = ahc_inb(ahc, DFF_THRSH);
/* Core initialization */
error = ahc_init(ahc);
if (error != 0)
return (error);
ahc->init_level++;
/*
* Allow interrupts now that we are completely setup.
*/
return ahc_pci_map_int(ahc);
}
/*
* Test for the presence of external sram in an
* "unshared" configuration.
*/
static int
ahc_ext_scbram_present(struct ahc_softc *ahc)
{
u_int chip;
int ramps;
int single_user;
uint32_t devconfig;
chip = ahc->chip & AHC_CHIPID_MASK;
devconfig = ahc_pci_read_config(ahc->dev_softc,
DEVCONFIG, /*bytes*/4);
single_user = (devconfig & MPORTMODE) != 0;
if ((ahc->features & AHC_ULTRA2) != 0)
ramps = (ahc_inb(ahc, DSCOMMAND0) & RAMPS) != 0;
else if (chip == AHC_AIC7895 || chip == AHC_AIC7895C)
/*
* External SCBRAM arbitration is flakey
* on these chips. Unfortunately this means
* we don't use the extra SCB ram space on the
* 3940AUW.
*/
ramps = 0;
else if (chip >= AHC_AIC7870)
ramps = (devconfig & RAMPSM) != 0;
else
ramps = 0;
if (ramps && single_user)
return (1);
return (0);
}
/*
* Enable external scbram.
*/
static void
ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck,
int fast, int large)
{
uint32_t devconfig;
if (ahc->features & AHC_MULTI_FUNC) {
/*
* Set the SCB Base addr (highest address bit)
* depending on which channel we are.
*/
ahc_outb(ahc, SCBBADDR, ahc_get_pci_function(ahc->dev_softc));
}
ahc->flags &= ~AHC_LSCBS_ENABLED;
if (large)
ahc->flags |= AHC_LSCBS_ENABLED;
devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
if ((ahc->features & AHC_ULTRA2) != 0) {
u_int dscommand0;
dscommand0 = ahc_inb(ahc, DSCOMMAND0);
if (enable)
dscommand0 &= ~INTSCBRAMSEL;
else
dscommand0 |= INTSCBRAMSEL;
if (large)
dscommand0 &= ~USCBSIZE32;
else
dscommand0 |= USCBSIZE32;
ahc_outb(ahc, DSCOMMAND0, dscommand0);
} else {
if (fast)
devconfig &= ~EXTSCBTIME;
else
devconfig |= EXTSCBTIME;
if (enable)
devconfig &= ~SCBRAMSEL;
else
devconfig |= SCBRAMSEL;
if (large)
devconfig &= ~SCBSIZE32;
else
devconfig |= SCBSIZE32;
}
if (pcheck)
devconfig |= EXTSCBPEN;
else
devconfig &= ~EXTSCBPEN;
ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
}
/*
* Take a look to see if we have external SRAM.
* We currently do not attempt to use SRAM that is
* shared among multiple controllers.
*/
static void
ahc_probe_ext_scbram(struct ahc_softc *ahc)
{
int num_scbs;
int test_num_scbs;
int enable;
int pcheck;
int fast;
int large;
enable = FALSE;
pcheck = FALSE;
fast = FALSE;
large = FALSE;
num_scbs = 0;
if (ahc_ext_scbram_present(ahc) == 0)
goto done;
/*
* Probe for the best parameters to use.
*/
ahc_scbram_config(ahc, /*enable*/TRUE, pcheck, fast, large);
num_scbs = ahc_probe_scbs(ahc);
if (num_scbs == 0) {
/* The SRAM wasn't really present. */
goto done;
}
enable = TRUE;
/*
* Clear any outstanding parity error
* and ensure that parity error reporting
* is enabled.
*/
ahc_outb(ahc, SEQCTL, 0);
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, CLRINT, CLRBRKADRINT);
/* Now see if we can do parity */
ahc_scbram_config(ahc, enable, /*pcheck*/TRUE, fast, large);
num_scbs = ahc_probe_scbs(ahc);
if ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0
|| (ahc_inb(ahc, ERROR) & MPARERR) == 0)
pcheck = TRUE;
/* Clear any resulting parity error */
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, CLRINT, CLRBRKADRINT);
/* Now see if we can do fast timing */
ahc_scbram_config(ahc, enable, pcheck, /*fast*/TRUE, large);
test_num_scbs = ahc_probe_scbs(ahc);
if (test_num_scbs == num_scbs
&& ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0
|| (ahc_inb(ahc, ERROR) & MPARERR) == 0))
fast = TRUE;
/*
* See if we can use large SCBs and still maintain
* the same overall count of SCBs.
*/
if ((ahc->features & AHC_LARGE_SCBS) != 0) {
ahc_scbram_config(ahc, enable, pcheck, fast, /*large*/TRUE);
test_num_scbs = ahc_probe_scbs(ahc);
if (test_num_scbs >= num_scbs) {
large = TRUE;
num_scbs = test_num_scbs;
if (num_scbs >= 64) {
/*
* We have enough space to move the
* "busy targets table" into SCB space
* and make it qualify all the way to the
* lun level.
*/
ahc->flags |= AHC_SCB_BTT;
}
}
}
done:
/*
* Disable parity error reporting until we
* can load instruction ram.
*/
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS);
/* Clear any latched parity error */
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, CLRINT, CLRBRKADRINT);
if (bootverbose && enable) {
printk("%s: External SRAM, %s access%s, %dbytes/SCB\n",
ahc_name(ahc), fast ? "fast" : "slow",
pcheck ? ", parity checking enabled" : "",
large ? 64 : 32);
}
ahc_scbram_config(ahc, enable, pcheck, fast, large);
}
/*
* Perform some simple tests that should catch situations where
* our registers are invalidly mapped.
*/
int
ahc_pci_test_register_access(struct ahc_softc *ahc)
{
int error;
u_int status1;
uint32_t cmd;
uint8_t hcntrl;
error = EIO;
/*
* Enable PCI error interrupt status, but suppress NMIs
* generated by SERR raised due to target aborts.
*/
cmd = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2);
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2);
/*
* First a simple test to see if any
* registers can be read. Reading
* HCNTRL has no side effects and has
* at least one bit that is guaranteed to
* be zero so it is a good register to
* use for this test.
*/
hcntrl = ahc_inb(ahc, HCNTRL);
if (hcntrl == 0xFF)
goto fail;
if ((hcntrl & CHIPRST) != 0) {
/*
* The chip has not been initialized since
* PCI/EISA/VLB bus reset. Don't trust
* "left over BIOS data".
*/
ahc->flags |= AHC_NO_BIOS_INIT;
}
/*
* Next create a situation where write combining
* or read prefetching could be initiated by the
* CPU or host bridge. Our device does not support
* either, so look for data corruption and/or flagged
* PCI errors. First pause without causing another
* chip reset.
*/
hcntrl &= ~CHIPRST;
ahc_outb(ahc, HCNTRL, hcntrl|PAUSE);
while (ahc_is_paused(ahc) == 0)
;
/* Clear any PCI errors that occurred before our driver attached. */
status1 = ahc_pci_read_config(ahc->dev_softc,
PCIR_STATUS + 1, /*bytes*/1);
ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
status1, /*bytes*/1);
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, SEQCTL, PERRORDIS);
ahc_outb(ahc, SCBPTR, 0);
ahc_outl(ahc, SCB_BASE, 0x5aa555aa);
if (ahc_inl(ahc, SCB_BASE) != 0x5aa555aa)
goto fail;
status1 = ahc_pci_read_config(ahc->dev_softc,
PCIR_STATUS + 1, /*bytes*/1);
if ((status1 & STA) != 0)
goto fail;
error = 0;
fail:
/* Silently clear any latched errors. */
status1 = ahc_pci_read_config(ahc->dev_softc,
PCIR_STATUS + 1, /*bytes*/1);
ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
status1, /*bytes*/1);
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS);
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2);
return (error);
}
/*
* Check the external port logic for a serial eeprom
* and termination/cable detection contrls.
*/
static void
check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
{
struct seeprom_descriptor sd;
struct seeprom_config *sc;
int have_seeprom;
int have_autoterm;
sd.sd_ahc = ahc;
sd.sd_control_offset = SEECTL;
sd.sd_status_offset = SEECTL;
sd.sd_dataout_offset = SEECTL;
sc = ahc->seep_config;
/*
* For some multi-channel devices, the c46 is simply too
* small to work. For the other controller types, we can
* get our information from either SEEPROM type. Set the
* type to start our probe with accordingly.
*/
if (ahc->flags & AHC_LARGE_SEEPROM)
sd.sd_chip = C56_66;
else
sd.sd_chip = C46;
sd.sd_MS = SEEMS;
sd.sd_RDY = SEERDY;
sd.sd_CS = SEECS;
sd.sd_CK = SEECK;
sd.sd_DO = SEEDO;
sd.sd_DI = SEEDI;
have_seeprom = ahc_acquire_seeprom(ahc, &sd);
if (have_seeprom) {
if (bootverbose)
printk("%s: Reading SEEPROM...", ahc_name(ahc));
for (;;) {
u_int start_addr;
start_addr = 32 * (ahc->channel - 'A');
have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc,
start_addr,
sizeof(*sc)/2);
if (have_seeprom)
have_seeprom = ahc_verify_cksum(sc);
if (have_seeprom != 0 || sd.sd_chip == C56_66) {
if (bootverbose) {
if (have_seeprom == 0)
printk ("checksum error\n");
else
printk ("done.\n");
}
break;
}
sd.sd_chip = C56_66;
}
ahc_release_seeprom(&sd);
/* Remember the SEEPROM type for later */
if (sd.sd_chip == C56_66)
ahc->flags |= AHC_LARGE_SEEPROM;
}
if (!have_seeprom) {
/*
* Pull scratch ram settings and treat them as
* if they are the contents of an seeprom if
* the 'ADPT' signature is found in SCB2.
* We manually compose the data as 16bit values
* to avoid endian issues.
*/
ahc_outb(ahc, SCBPTR, 2);
if (ahc_inb(ahc, SCB_BASE) == 'A'
&& ahc_inb(ahc, SCB_BASE + 1) == 'D'
&& ahc_inb(ahc, SCB_BASE + 2) == 'P'
&& ahc_inb(ahc, SCB_BASE + 3) == 'T') {
uint16_t *sc_data;
int i;
sc_data = (uint16_t *)sc;
for (i = 0; i < 32; i++, sc_data++) {
int j;
j = i * 2;
*sc_data = ahc_inb(ahc, SRAM_BASE + j)
| ahc_inb(ahc, SRAM_BASE + j + 1) << 8;
}
have_seeprom = ahc_verify_cksum(sc);
if (have_seeprom)
ahc->flags |= AHC_SCB_CONFIG_USED;
}
/*
* Clear any SCB parity errors in case this data and
* its associated parity was not initialized by the BIOS
*/
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, CLRINT, CLRBRKADRINT);
}
if (!have_seeprom) {
if (bootverbose)
printk("%s: No SEEPROM available.\n", ahc_name(ahc));
ahc->flags |= AHC_USEDEFAULTS;
kfree(ahc->seep_config);
ahc->seep_config = NULL;
sc = NULL;
} else {
ahc_parse_pci_eeprom(ahc, sc);
}
/*
* Cards that have the external logic necessary to talk to
* a SEEPROM, are almost certain to have the remaining logic
* necessary for auto-termination control. This assumption
* hasn't failed yet...
*/
have_autoterm = have_seeprom;
/*
* Some low-cost chips have SEEPROM and auto-term control built
* in, instead of using a GAL. They can tell us directly
* if the termination logic is enabled.
*/
if ((ahc->features & AHC_SPIOCAP) != 0) {
if ((ahc_inb(ahc, SPIOCAP) & SSPIOCPS) == 0)
have_autoterm = FALSE;
}
if (have_autoterm) {
ahc->flags |= AHC_HAS_TERM_LOGIC;
ahc_acquire_seeprom(ahc, &sd);
configure_termination(ahc, &sd, sc->adapter_control, sxfrctl1);
ahc_release_seeprom(&sd);
} else if (have_seeprom) {
*sxfrctl1 &= ~STPWEN;
if ((sc->adapter_control & CFSTERM) != 0)
*sxfrctl1 |= STPWEN;
if (bootverbose)
printk("%s: Low byte termination %sabled\n",
ahc_name(ahc),
(*sxfrctl1 & STPWEN) ? "en" : "dis");
}
}
static void
ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc)
{
/*
* Put the data we've collected down into SRAM
* where ahc_init will find it.
*/
int i;
int max_targ = sc->max_targets & CFMAXTARG;
u_int scsi_conf;
uint16_t discenable;
uint16_t ultraenb;
discenable = 0;
ultraenb = 0;
if ((sc->adapter_control & CFULTRAEN) != 0) {
/*
* Determine if this adapter has a "newstyle"
* SEEPROM format.
*/
for (i = 0; i < max_targ; i++) {
if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) {
ahc->flags |= AHC_NEWEEPROM_FMT;
break;
}
}
}
for (i = 0; i < max_targ; i++) {
u_int scsirate;
uint16_t target_mask;
target_mask = 0x01 << i;
if (sc->device_flags[i] & CFDISC)
discenable |= target_mask;
if ((ahc->flags & AHC_NEWEEPROM_FMT) != 0) {
if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0)
ultraenb |= target_mask;
} else if ((sc->adapter_control & CFULTRAEN) != 0) {
ultraenb |= target_mask;
}
if ((sc->device_flags[i] & CFXFER) == 0x04
&& (ultraenb & target_mask) != 0) {
/* Treat 10MHz as a non-ultra speed */
sc->device_flags[i] &= ~CFXFER;
ultraenb &= ~target_mask;
}
if ((ahc->features & AHC_ULTRA2) != 0) {
u_int offset;
if (sc->device_flags[i] & CFSYNCH)
offset = MAX_OFFSET_ULTRA2;
else
offset = 0;
ahc_outb(ahc, TARG_OFFSET + i, offset);
/*
* The ultra enable bits contain the
* high bit of the ultra2 sync rate
* field.
*/
scsirate = (sc->device_flags[i] & CFXFER)
| ((ultraenb & target_mask) ? 0x8 : 0x0);
if (sc->device_flags[i] & CFWIDEB)
scsirate |= WIDEXFER;
} else {
scsirate = (sc->device_flags[i] & CFXFER) << 4;
if (sc->device_flags[i] & CFSYNCH)
scsirate |= SOFS;
if (sc->device_flags[i] & CFWIDEB)
scsirate |= WIDEXFER;
}
ahc_outb(ahc, TARG_SCSIRATE + i, scsirate);
}
ahc->our_id = sc->brtime_id & CFSCSIID;
scsi_conf = (ahc->our_id & 0x7);
if (sc->adapter_control & CFSPARITY)
scsi_conf |= ENSPCHK;
if (sc->adapter_control & CFRESETB)
scsi_conf |= RESET_SCSI;
ahc->flags |= (sc->adapter_control & CFBOOTCHAN) >> CFBOOTCHANSHIFT;
if (sc->bios_control & CFEXTEND)
ahc->flags |= AHC_EXTENDED_TRANS_A;
if (sc->bios_control & CFBIOSEN)
ahc->flags |= AHC_BIOS_ENABLED;
if (ahc->features & AHC_ULTRA
&& (ahc->flags & AHC_NEWEEPROM_FMT) == 0) {
/* Should we enable Ultra mode? */
if (!(sc->adapter_control & CFULTRAEN))
/* Treat us as a non-ultra card */
ultraenb = 0;
}
if (sc->signature == CFSIGNATURE
|| sc->signature == CFSIGNATURE2) {
uint32_t devconfig;
/* Honor the STPWLEVEL settings */
devconfig = ahc_pci_read_config(ahc->dev_softc,
DEVCONFIG, /*bytes*/4);
devconfig &= ~STPWLEVEL;
if ((sc->bios_control & CFSTPWLEVEL) != 0)
devconfig |= STPWLEVEL;
ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
devconfig, /*bytes*/4);
}
/* Set SCSICONF info */
ahc_outb(ahc, SCSICONF, scsi_conf);
ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff));
ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff));
ahc_outb(ahc, ULTRA_ENB, ultraenb & 0xff);
ahc_outb(ahc, ULTRA_ENB + 1, (ultraenb >> 8) & 0xff);
}
static void
configure_termination(struct ahc_softc *ahc,
struct seeprom_descriptor *sd,
u_int adapter_control,
u_int *sxfrctl1)
{
uint8_t brddat;
brddat = 0;
/*
* Update the settings in sxfrctl1 to match the
* termination settings
*/
*sxfrctl1 = 0;
/*
* SEECS must be on for the GALS to latch
* the data properly. Be sure to leave MS
* on or we will release the seeprom.
*/
SEEPROM_OUTB(sd, sd->sd_MS | sd->sd_CS);
if ((adapter_control & CFAUTOTERM) != 0
|| (ahc->features & AHC_NEW_TERMCTL) != 0) {
int internal50_present;
int internal68_present;
int externalcable_present;
int eeprom_present;
int enableSEC_low;
int enableSEC_high;
int enablePRI_low;
int enablePRI_high;
int sum;
enableSEC_low = 0;
enableSEC_high = 0;
enablePRI_low = 0;
enablePRI_high = 0;
if ((ahc->features & AHC_NEW_TERMCTL) != 0) {
ahc_new_term_detect(ahc, &enableSEC_low,
&enableSEC_high,
&enablePRI_low,
&enablePRI_high,
&eeprom_present);
if ((adapter_control & CFSEAUTOTERM) == 0) {
if (bootverbose)
printk("%s: Manual SE Termination\n",
ahc_name(ahc));
enableSEC_low = (adapter_control & CFSELOWTERM);
enableSEC_high =
(adapter_control & CFSEHIGHTERM);
}
if ((adapter_control & CFAUTOTERM) == 0) {
if (bootverbose)
printk("%s: Manual LVD Termination\n",
ahc_name(ahc));
enablePRI_low = (adapter_control & CFSTERM);
enablePRI_high = (adapter_control & CFWSTERM);
}
/* Make the table calculations below happy */
internal50_present = 0;
internal68_present = 1;
externalcable_present = 1;
} else if ((ahc->features & AHC_SPIOCAP) != 0) {
aic785X_cable_detect(ahc, &internal50_present,
&externalcable_present,
&eeprom_present);
/* Can never support a wide connector. */
internal68_present = 0;
} else {
aic787X_cable_detect(ahc, &internal50_present,
&internal68_present,
&externalcable_present,
&eeprom_present);
}
if ((ahc->features & AHC_WIDE) == 0)
internal68_present = 0;
if (bootverbose
&& (ahc->features & AHC_ULTRA2) == 0) {
printk("%s: internal 50 cable %s present",
ahc_name(ahc),
internal50_present ? "is":"not");
if ((ahc->features & AHC_WIDE) != 0)
printk(", internal 68 cable %s present",
internal68_present ? "is":"not");
printk("\n%s: external cable %s present\n",
ahc_name(ahc),
externalcable_present ? "is":"not");
}
if (bootverbose)
printk("%s: BIOS eeprom %s present\n",
ahc_name(ahc), eeprom_present ? "is" : "not");
if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) {
/*
* The 50 pin connector is a separate bus,
* so force it to always be terminated.
* In the future, perform current sensing
* to determine if we are in the middle of
* a properly terminated bus.
*/
internal50_present = 0;
}
/*
* Now set the termination based on what
* we found.
* Flash Enable = BRDDAT7
* Secondary High Term Enable = BRDDAT6
* Secondary Low Term Enable = BRDDAT5 (7890)
* Primary High Term Enable = BRDDAT4 (7890)
*/
if ((ahc->features & AHC_ULTRA2) == 0
&& (internal50_present != 0)
&& (internal68_present != 0)
&& (externalcable_present != 0)) {
printk("%s: Illegal cable configuration!!. "
"Only two connectors on the "
"adapter may be used at a "
"time!\n", ahc_name(ahc));
/*
* Pretend there are no cables in the hope
* that having all of the termination on
* gives us a more stable bus.
*/
internal50_present = 0;
internal68_present = 0;
externalcable_present = 0;
}
if ((ahc->features & AHC_WIDE) != 0
&& ((externalcable_present == 0)
|| (internal68_present == 0)
|| (enableSEC_high != 0))) {
brddat |= BRDDAT6;
if (bootverbose) {
if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
printk("%s: 68 pin termination "
"Enabled\n", ahc_name(ahc));
else
printk("%s: %sHigh byte termination "
"Enabled\n", ahc_name(ahc),
enableSEC_high ? "Secondary "
: "");
}
}
sum = internal50_present + internal68_present
+ externalcable_present;
if (sum < 2 || (enableSEC_low != 0)) {
if ((ahc->features & AHC_ULTRA2) != 0)
brddat |= BRDDAT5;
else
*sxfrctl1 |= STPWEN;
if (bootverbose) {
if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
printk("%s: 50 pin termination "
"Enabled\n", ahc_name(ahc));
else
printk("%s: %sLow byte termination "
"Enabled\n", ahc_name(ahc),
enableSEC_low ? "Secondary "
: "");
}
}
if (enablePRI_low != 0) {
*sxfrctl1 |= STPWEN;
if (bootverbose)
printk("%s: Primary Low Byte termination "
"Enabled\n", ahc_name(ahc));
}
/*
* Setup STPWEN before setting up the rest of
* the termination per the tech note on the U160 cards.
*/
ahc_outb(ahc, SXFRCTL1, *sxfrctl1);
if (enablePRI_high != 0) {
brddat |= BRDDAT4;
if (bootverbose)
printk("%s: Primary High Byte "
"termination Enabled\n",
ahc_name(ahc));
}
write_brdctl(ahc, brddat);
} else {
if ((adapter_control & CFSTERM) != 0) {
*sxfrctl1 |= STPWEN;
if (bootverbose)
printk("%s: %sLow byte termination Enabled\n",
ahc_name(ahc),
(ahc->features & AHC_ULTRA2) ? "Primary "
: "");
}
if ((adapter_control & CFWSTERM) != 0
&& (ahc->features & AHC_WIDE) != 0) {
brddat |= BRDDAT6;
if (bootverbose)
printk("%s: %sHigh byte termination Enabled\n",
ahc_name(ahc),
(ahc->features & AHC_ULTRA2)
? "Secondary " : "");
}
/*
* Setup STPWEN before setting up the rest of
* the termination per the tech note on the U160 cards.
*/
ahc_outb(ahc, SXFRCTL1, *sxfrctl1);
if ((ahc->features & AHC_WIDE) != 0)
write_brdctl(ahc, brddat);
}
SEEPROM_OUTB(sd, sd->sd_MS); /* Clear CS */
}
static void
ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low,
int *enableSEC_high, int *enablePRI_low,
int *enablePRI_high, int *eeprom_present)
{
uint8_t brdctl;
/*
* BRDDAT7 = Eeprom
* BRDDAT6 = Enable Secondary High Byte termination
* BRDDAT5 = Enable Secondary Low Byte termination
* BRDDAT4 = Enable Primary high byte termination
* BRDDAT3 = Enable Primary low byte termination
*/
brdctl = read_brdctl(ahc);
*eeprom_present = brdctl & BRDDAT7;
*enableSEC_high = (brdctl & BRDDAT6);
*enableSEC_low = (brdctl & BRDDAT5);
*enablePRI_high = (brdctl & BRDDAT4);
*enablePRI_low = (brdctl & BRDDAT3);
}
static void
aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
int *internal68_present, int *externalcable_present,
int *eeprom_present)
{
uint8_t brdctl;
/*
* First read the status of our cables.
* Set the rom bank to 0 since the
* bank setting serves as a multiplexor
* for the cable detection logic.
* BRDDAT5 controls the bank switch.
*/
write_brdctl(ahc, 0);
/*
* Now read the state of the internal
* connectors. BRDDAT6 is INT50 and
* BRDDAT7 is INT68.
*/
brdctl = read_brdctl(ahc);
*internal50_present = (brdctl & BRDDAT6) ? 0 : 1;
*internal68_present = (brdctl & BRDDAT7) ? 0 : 1;
/*
* Set the rom bank to 1 and determine
* the other signals.
*/
write_brdctl(ahc, BRDDAT5);
/*
* Now read the state of the external
* connectors. BRDDAT6 is EXT68 and
* BRDDAT7 is EPROMPS.
*/
brdctl = read_brdctl(ahc);
*externalcable_present = (brdctl & BRDDAT6) ? 0 : 1;
*eeprom_present = (brdctl & BRDDAT7) ? 1 : 0;
}
static void
aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
int *externalcable_present, int *eeprom_present)
{
uint8_t brdctl;
uint8_t spiocap;
spiocap = ahc_inb(ahc, SPIOCAP);
spiocap &= ~SOFTCMDEN;
spiocap |= EXT_BRDCTL;
ahc_outb(ahc, SPIOCAP, spiocap);
ahc_outb(ahc, BRDCTL, BRDRW|BRDCS);
ahc_flush_device_writes(ahc);
ahc_delay(500);
ahc_outb(ahc, BRDCTL, 0);
ahc_flush_device_writes(ahc);
ahc_delay(500);
brdctl = ahc_inb(ahc, BRDCTL);
*internal50_present = (brdctl & BRDDAT5) ? 0 : 1;
*externalcable_present = (brdctl & BRDDAT6) ? 0 : 1;
*eeprom_present = (ahc_inb(ahc, SPIOCAP) & EEPROM) ? 1 : 0;
}
int
ahc_acquire_seeprom(struct ahc_softc *ahc, struct seeprom_descriptor *sd)
{
int wait;
if ((ahc->features & AHC_SPIOCAP) != 0
&& (ahc_inb(ahc, SPIOCAP) & SEEPROM) == 0)
return (0);
/*
* Request access of the memory port. When access is
* granted, SEERDY will go high. We use a 1 second
* timeout which should be near 1 second more than
* is needed. Reason: after the chip reset, there
* should be no contention.
*/
SEEPROM_OUTB(sd, sd->sd_MS);
wait = 1000; /* 1 second timeout in msec */
while (--wait && ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0)) {
ahc_delay(1000); /* delay 1 msec */
}
if ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0) {
SEEPROM_OUTB(sd, 0);
return (0);
}
return(1);
}
void
ahc_release_seeprom(struct seeprom_descriptor *sd)
{
/* Release access to the memory port and the serial EEPROM. */
SEEPROM_OUTB(sd, 0);
}
static void
write_brdctl(struct ahc_softc *ahc, uint8_t value)
{
uint8_t brdctl;
if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) {
brdctl = BRDSTB;
if (ahc->channel == 'B')
brdctl |= BRDCS;
} else if ((ahc->features & AHC_ULTRA2) != 0) {
brdctl = 0;
} else {
brdctl = BRDSTB|BRDCS;
}
ahc_outb(ahc, BRDCTL, brdctl);
ahc_flush_device_writes(ahc);
brdctl |= value;
ahc_outb(ahc, BRDCTL, brdctl);
ahc_flush_device_writes(ahc);
if ((ahc->features & AHC_ULTRA2) != 0)
brdctl |= BRDSTB_ULTRA2;
else
brdctl &= ~BRDSTB;
ahc_outb(ahc, BRDCTL, brdctl);
ahc_flush_device_writes(ahc);
if ((ahc->features & AHC_ULTRA2) != 0)
brdctl = 0;
else
brdctl &= ~BRDCS;
ahc_outb(ahc, BRDCTL, brdctl);
}
static uint8_t
read_brdctl(struct ahc_softc *ahc)
{
uint8_t brdctl;
uint8_t value;
if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) {
brdctl = BRDRW;
if (ahc->channel == 'B')
brdctl |= BRDCS;
} else if ((ahc->features & AHC_ULTRA2) != 0) {
brdctl = BRDRW_ULTRA2;
} else {
brdctl = BRDRW|BRDCS;
}
ahc_outb(ahc, BRDCTL, brdctl);
ahc_flush_device_writes(ahc);
value = ahc_inb(ahc, BRDCTL);
ahc_outb(ahc, BRDCTL, 0);
return (value);
}
static void
ahc_pci_intr(struct ahc_softc *ahc)
{
u_int error;
u_int status1;
error = ahc_inb(ahc, ERROR);
if ((error & PCIERRSTAT) == 0)
return;
status1 = ahc_pci_read_config(ahc->dev_softc,
PCIR_STATUS + 1, /*bytes*/1);
printk("%s: PCI error Interrupt at seqaddr = 0x%x\n",
ahc_name(ahc),
ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
if (status1 & DPE) {
ahc->pci_target_perr_count++;
printk("%s: Data Parity Error Detected during address "
"or write data phase\n", ahc_name(ahc));
}
if (status1 & SSE) {
printk("%s: Signal System Error Detected\n", ahc_name(ahc));
}
if (status1 & RMA) {
printk("%s: Received a Master Abort\n", ahc_name(ahc));
}
if (status1 & RTA) {
printk("%s: Received a Target Abort\n", ahc_name(ahc));
}
if (status1 & STA) {
printk("%s: Signaled a Target Abort\n", ahc_name(ahc));
}
if (status1 & DPR) {
printk("%s: Data Parity Error has been reported via PERR#\n",
ahc_name(ahc));
}
/* Clear latched errors. */
ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
status1, /*bytes*/1);
if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) {
printk("%s: Latched PCIERR interrupt with "
"no status bits set\n", ahc_name(ahc));
} else {
ahc_outb(ahc, CLRINT, CLRPARERR);
}
if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH) {
printk(
"%s: WARNING WARNING WARNING WARNING\n"
"%s: Too many PCI parity errors observed as a target.\n"
"%s: Some device on this bus is generating bad parity.\n"
"%s: This is an error *observed by*, not *generated by*, this controller.\n"
"%s: PCI parity error checking has been disabled.\n"
"%s: WARNING WARNING WARNING WARNING\n",
ahc_name(ahc), ahc_name(ahc), ahc_name(ahc),
ahc_name(ahc), ahc_name(ahc), ahc_name(ahc));
ahc->seqctl |= FAILDIS;
ahc_outb(ahc, SEQCTL, ahc->seqctl);
}
ahc_unpause(ahc);
}
static int
ahc_pci_chip_init(struct ahc_softc *ahc)
{
ahc_outb(ahc, DSCOMMAND0, ahc->bus_softc.pci_softc.dscommand0);
ahc_outb(ahc, DSPCISTATUS, ahc->bus_softc.pci_softc.dspcistatus);
if ((ahc->features & AHC_DT) != 0) {
u_int sfunct;
sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
ahc_outb(ahc, OPTIONMODE, ahc->bus_softc.pci_softc.optionmode);
ahc_outw(ahc, TARGCRCCNT, ahc->bus_softc.pci_softc.targcrccnt);
ahc_outb(ahc, SFUNCT, sfunct);
ahc_outb(ahc, CRCCONTROL1,
ahc->bus_softc.pci_softc.crccontrol1);
}
if ((ahc->features & AHC_MULTI_FUNC) != 0)
ahc_outb(ahc, SCBBADDR, ahc->bus_softc.pci_softc.scbbaddr);
if ((ahc->features & AHC_ULTRA2) != 0)
ahc_outb(ahc, DFF_THRSH, ahc->bus_softc.pci_softc.dff_thrsh);
return (ahc_chip_init(ahc));
}
#ifdef CONFIG_PM
void
ahc_pci_resume(struct ahc_softc *ahc)
{
/*
* We assume that the OS has restored our register
* mappings, etc. Just update the config space registers
* that the OS doesn't know about and rely on our chip
* reset handler to handle the rest.
*/
ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
ahc->bus_softc.pci_softc.devconfig, /*bytes*/4);
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
ahc->bus_softc.pci_softc.command, /*bytes*/1);
ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1);
if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) {
struct seeprom_descriptor sd;
u_int sxfrctl1;
sd.sd_ahc = ahc;
sd.sd_control_offset = SEECTL;
sd.sd_status_offset = SEECTL;
sd.sd_dataout_offset = SEECTL;
ahc_acquire_seeprom(ahc, &sd);
configure_termination(ahc, &sd,
ahc->seep_config->adapter_control,
&sxfrctl1);
ahc_release_seeprom(&sd);
}
}
#endif
static int
ahc_aic785X_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
uint8_t rev;
pci = ahc->dev_softc;
ahc->channel = 'A';
ahc->chip = AHC_AIC7850;
ahc->features = AHC_AIC7850_FE;
ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
if (rev >= 1)
ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
ahc->instruction_ram_size = 512;
return (0);
}
static int
ahc_aic7860_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
uint8_t rev;
pci = ahc->dev_softc;
ahc->channel = 'A';
ahc->chip = AHC_AIC7860;
ahc->features = AHC_AIC7860_FE;
ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
if (rev >= 1)
ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
ahc->instruction_ram_size = 512;
return (0);
}
static int
ahc_apa1480_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7860_setup(ahc);
if (error != 0)
return (error);
ahc->features |= AHC_REMOVABLE;
return (0);
}
static int
ahc_aic7870_setup(struct ahc_softc *ahc)
{
ahc->channel = 'A';
ahc->chip = AHC_AIC7870;
ahc->features = AHC_AIC7870_FE;
ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
ahc->instruction_ram_size = 512;
return (0);
}
static int
ahc_aic7870h_setup(struct ahc_softc *ahc)
{
int error = ahc_aic7870_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aha394X_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7870_setup(ahc);
if (error == 0)
error = ahc_aha394XX_setup(ahc);
return (error);
}
static int
ahc_aha394Xh_setup(struct ahc_softc *ahc)
{
int error = ahc_aha394X_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aha398X_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7870_setup(ahc);
if (error == 0)
error = ahc_aha398XX_setup(ahc);
return (error);
}
static int
ahc_aha494X_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7870_setup(ahc);
if (error == 0)
error = ahc_aha494XX_setup(ahc);
return (error);
}
static int
ahc_aha494Xh_setup(struct ahc_softc *ahc)
{
int error = ahc_aha494X_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aic7880_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
uint8_t rev;
pci = ahc->dev_softc;
ahc->channel = 'A';
ahc->chip = AHC_AIC7880;
ahc->features = AHC_AIC7880_FE;
ahc->bugs |= AHC_TMODE_WIDEODD_BUG;
rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
if (rev >= 1) {
ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
} else {
ahc->bugs |= AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
}
ahc->instruction_ram_size = 512;
return (0);
}
static int
ahc_aic7880h_setup(struct ahc_softc *ahc)
{
int error = ahc_aic7880_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aha2940Pro_setup(struct ahc_softc *ahc)
{
ahc->flags |= AHC_INT50_SPEEDFLEX;
return (ahc_aic7880_setup(ahc));
}
static int
ahc_aha394XU_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7880_setup(ahc);
if (error == 0)
error = ahc_aha394XX_setup(ahc);
return (error);
}
static int
ahc_aha394XUh_setup(struct ahc_softc *ahc)
{
int error = ahc_aha394XU_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aha398XU_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7880_setup(ahc);
if (error == 0)
error = ahc_aha398XX_setup(ahc);
return (error);
}
static int
ahc_aic7890_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
uint8_t rev;
pci = ahc->dev_softc;
ahc->channel = 'A';
ahc->chip = AHC_AIC7890;
ahc->features = AHC_AIC7890_FE;
ahc->flags |= AHC_NEWEEPROM_FMT;
rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
if (rev == 0)
ahc->bugs |= AHC_AUTOFLUSH_BUG|AHC_CACHETHEN_BUG;
ahc->instruction_ram_size = 768;
return (0);
}
static int
ahc_aic7892_setup(struct ahc_softc *ahc)
{
ahc->channel = 'A';
ahc->chip = AHC_AIC7892;
ahc->features = AHC_AIC7892_FE;
ahc->flags |= AHC_NEWEEPROM_FMT;
ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG;
ahc->instruction_ram_size = 1024;
return (0);
}
static int
ahc_aic7895_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
uint8_t rev;
pci = ahc->dev_softc;
ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
/*
* The 'C' revision of the aic7895 has a few additional features.
*/
rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
if (rev >= 4) {
ahc->chip = AHC_AIC7895C;
ahc->features = AHC_AIC7895C_FE;
} else {
u_int command;
ahc->chip = AHC_AIC7895;
ahc->features = AHC_AIC7895_FE;
/*
* The BIOS disables the use of MWI transactions
* since it does not have the MWI bug work around
* we have. Disabling MWI reduces performance, so
* turn it on again.
*/
command = ahc_pci_read_config(pci, PCIR_COMMAND, /*bytes*/1);
command |= PCIM_CMD_MWRICEN;
ahc_pci_write_config(pci, PCIR_COMMAND, command, /*bytes*/1);
ahc->bugs |= AHC_PCI_MWI_BUG;
}
/*
* XXX Does CACHETHEN really not work??? What about PCI retry?
* on C level chips. Need to test, but for now, play it safe.
*/
ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_PCI_2_1_RETRY_BUG
| AHC_CACHETHEN_BUG;
#if 0
uint32_t devconfig;
/*
* Cachesize must also be zero due to stray DAC
* problem when sitting behind some bridges.
*/
ahc_pci_write_config(pci, CSIZE_LATTIME, 0, /*bytes*/1);
devconfig = ahc_pci_read_config(pci, DEVCONFIG, /*bytes*/1);
devconfig |= MRDCEN;
ahc_pci_write_config(pci, DEVCONFIG, devconfig, /*bytes*/1);
#endif
ahc->flags |= AHC_NEWEEPROM_FMT;
ahc->instruction_ram_size = 512;
return (0);
}
static int
ahc_aic7895h_setup(struct ahc_softc *ahc)
{
int error = ahc_aic7895_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aic7896_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
pci = ahc->dev_softc;
ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
ahc->chip = AHC_AIC7896;
ahc->features = AHC_AIC7896_FE;
ahc->flags |= AHC_NEWEEPROM_FMT;
ahc->bugs |= AHC_CACHETHEN_DIS_BUG;
ahc->instruction_ram_size = 768;
return (0);
}
static int
ahc_aic7899_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
pci = ahc->dev_softc;
ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
ahc->chip = AHC_AIC7899;
ahc->features = AHC_AIC7899_FE;
ahc->flags |= AHC_NEWEEPROM_FMT;
ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG;
ahc->instruction_ram_size = 1024;
return (0);
}
static int
ahc_aha29160C_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7899_setup(ahc);
if (error != 0)
return (error);
ahc->features |= AHC_REMOVABLE;
return (0);
}
static int
ahc_raid_setup(struct ahc_softc *ahc)
{
printk("RAID functionality unsupported\n");
return (ENXIO);
}
static int
ahc_aha394XX_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
pci = ahc->dev_softc;
switch (ahc_get_pci_slot(pci)) {
case AHC_394X_SLOT_CHANNEL_A:
ahc->channel = 'A';
break;
case AHC_394X_SLOT_CHANNEL_B:
ahc->channel = 'B';
break;
default:
printk("adapter at unexpected slot %d\n"
"unable to map to a channel\n",
ahc_get_pci_slot(pci));
ahc->channel = 'A';
}
return (0);
}
static int
ahc_aha398XX_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
pci = ahc->dev_softc;
switch (ahc_get_pci_slot(pci)) {
case AHC_398X_SLOT_CHANNEL_A:
ahc->channel = 'A';
break;
case AHC_398X_SLOT_CHANNEL_B:
ahc->channel = 'B';
break;
case AHC_398X_SLOT_CHANNEL_C:
ahc->channel = 'C';
break;
default:
printk("adapter at unexpected slot %d\n"
"unable to map to a channel\n",
ahc_get_pci_slot(pci));
ahc->channel = 'A';
break;
}
ahc->flags |= AHC_LARGE_SEEPROM;
return (0);
}
static int
ahc_aha494XX_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
pci = ahc->dev_softc;
switch (ahc_get_pci_slot(pci)) {
case AHC_494X_SLOT_CHANNEL_A:
ahc->channel = 'A';
break;
case AHC_494X_SLOT_CHANNEL_B:
ahc->channel = 'B';
break;
case AHC_494X_SLOT_CHANNEL_C:
ahc->channel = 'C';
break;
case AHC_494X_SLOT_CHANNEL_D:
ahc->channel = 'D';
break;
default:
printk("adapter at unexpected slot %d\n"
"unable to map to a channel\n",
ahc_get_pci_slot(pci));
ahc->channel = 'A';
}
ahc->flags |= AHC_LARGE_SEEPROM;
return (0);
}
| gpl-2.0 |
syhost/android_kernel_pantech_ef59l | drivers/scsi/aic7xxx/aic7xxx_pci.c | 10477 | 61834 | /*
* Product specific probe and attach routines for:
* 3940, 2940, aic7895, aic7890, aic7880,
* aic7870, aic7860 and aic7850 SCSI controllers
*
* Copyright (c) 1994-2001 Justin T. Gibbs.
* Copyright (c) 2000-2001 Adaptec Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $
*/
#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
#else
#include <dev/aic7xxx/aic7xxx_osm.h>
#include <dev/aic7xxx/aic7xxx_inline.h>
#include <dev/aic7xxx/aic7xxx_93cx6.h>
#endif
#include "aic7xxx_pci.h"
static inline uint64_t
ahc_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor)
{
uint64_t id;
id = subvendor
| (subdevice << 16)
| ((uint64_t)vendor << 32)
| ((uint64_t)device << 48);
return (id);
}
#define AHC_PCI_IOADDR PCIR_MAPS /* I/O Address */
#define AHC_PCI_MEMADDR (PCIR_MAPS + 4) /* Mem I/O Address */
#define DEVID_9005_TYPE(id) ((id) & 0xF)
#define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */
#define DEVID_9005_TYPE_AAA 0x3 /* RAID Card */
#define DEVID_9005_TYPE_SISL 0x5 /* Container ROMB */
#define DEVID_9005_TYPE_MB 0xF /* On Motherboard */
#define DEVID_9005_MAXRATE(id) (((id) & 0x30) >> 4)
#define DEVID_9005_MAXRATE_U160 0x0
#define DEVID_9005_MAXRATE_ULTRA2 0x1
#define DEVID_9005_MAXRATE_ULTRA 0x2
#define DEVID_9005_MAXRATE_FAST 0x3
#define DEVID_9005_MFUNC(id) (((id) & 0x40) >> 6)
#define DEVID_9005_CLASS(id) (((id) & 0xFF00) >> 8)
#define DEVID_9005_CLASS_SPI 0x0 /* Parallel SCSI */
#define SUBID_9005_TYPE(id) ((id) & 0xF)
#define SUBID_9005_TYPE_MB 0xF /* On Motherboard */
#define SUBID_9005_TYPE_CARD 0x0 /* Standard Card */
#define SUBID_9005_TYPE_LCCARD 0x1 /* Low Cost Card */
#define SUBID_9005_TYPE_RAID 0x3 /* Combined with Raid */
#define SUBID_9005_TYPE_KNOWN(id) \
((((id) & 0xF) == SUBID_9005_TYPE_MB) \
|| (((id) & 0xF) == SUBID_9005_TYPE_CARD) \
|| (((id) & 0xF) == SUBID_9005_TYPE_LCCARD) \
|| (((id) & 0xF) == SUBID_9005_TYPE_RAID))
#define SUBID_9005_MAXRATE(id) (((id) & 0x30) >> 4)
#define SUBID_9005_MAXRATE_ULTRA2 0x0
#define SUBID_9005_MAXRATE_ULTRA 0x1
#define SUBID_9005_MAXRATE_U160 0x2
#define SUBID_9005_MAXRATE_RESERVED 0x3
#define SUBID_9005_SEEPTYPE(id) \
((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
? ((id) & 0xC0) >> 6 \
: ((id) & 0x300) >> 8)
#define SUBID_9005_SEEPTYPE_NONE 0x0
#define SUBID_9005_SEEPTYPE_1K 0x1
#define SUBID_9005_SEEPTYPE_2K_4K 0x2
#define SUBID_9005_SEEPTYPE_RESERVED 0x3
#define SUBID_9005_AUTOTERM(id) \
((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
? (((id) & 0x400) >> 10) == 0 \
: (((id) & 0x40) >> 6) == 0)
#define SUBID_9005_NUMCHAN(id) \
((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
? ((id) & 0x300) >> 8 \
: ((id) & 0xC00) >> 10)
#define SUBID_9005_LEGACYCONN(id) \
((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
? 0 \
: ((id) & 0x80) >> 7)
#define SUBID_9005_MFUNCENB(id) \
((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
? ((id) & 0x800) >> 11 \
: ((id) & 0x1000) >> 12)
/*
* Informational only. Should use chip register to be
* certain, but may be use in identification strings.
*/
#define SUBID_9005_CARD_SCSIWIDTH_MASK 0x2000
#define SUBID_9005_CARD_PCIWIDTH_MASK 0x4000
#define SUBID_9005_CARD_SEDIFF_MASK 0x8000
static ahc_device_setup_t ahc_aic785X_setup;
static ahc_device_setup_t ahc_aic7860_setup;
static ahc_device_setup_t ahc_apa1480_setup;
static ahc_device_setup_t ahc_aic7870_setup;
static ahc_device_setup_t ahc_aic7870h_setup;
static ahc_device_setup_t ahc_aha394X_setup;
static ahc_device_setup_t ahc_aha394Xh_setup;
static ahc_device_setup_t ahc_aha494X_setup;
static ahc_device_setup_t ahc_aha494Xh_setup;
static ahc_device_setup_t ahc_aha398X_setup;
static ahc_device_setup_t ahc_aic7880_setup;
static ahc_device_setup_t ahc_aic7880h_setup;
static ahc_device_setup_t ahc_aha2940Pro_setup;
static ahc_device_setup_t ahc_aha394XU_setup;
static ahc_device_setup_t ahc_aha394XUh_setup;
static ahc_device_setup_t ahc_aha398XU_setup;
static ahc_device_setup_t ahc_aic7890_setup;
static ahc_device_setup_t ahc_aic7892_setup;
static ahc_device_setup_t ahc_aic7895_setup;
static ahc_device_setup_t ahc_aic7895h_setup;
static ahc_device_setup_t ahc_aic7896_setup;
static ahc_device_setup_t ahc_aic7899_setup;
static ahc_device_setup_t ahc_aha29160C_setup;
static ahc_device_setup_t ahc_raid_setup;
static ahc_device_setup_t ahc_aha394XX_setup;
static ahc_device_setup_t ahc_aha494XX_setup;
static ahc_device_setup_t ahc_aha398XX_setup;
static const struct ahc_pci_identity ahc_pci_ident_table[] = {
/* aic7850 based controllers */
{
ID_AHA_2902_04_10_15_20C_30C,
ID_ALL_MASK,
"Adaptec 2902/04/10/15/20C/30C SCSI adapter",
ahc_aic785X_setup
},
/* aic7860 based controllers */
{
ID_AHA_2930CU,
ID_ALL_MASK,
"Adaptec 2930CU SCSI adapter",
ahc_aic7860_setup
},
{
ID_AHA_1480A & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 1480A Ultra SCSI adapter",
ahc_apa1480_setup
},
{
ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2940A Ultra SCSI adapter",
ahc_aic7860_setup
},
{
ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2940A/CN Ultra SCSI adapter",
ahc_aic7860_setup
},
{
ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2930C Ultra SCSI adapter (VAR)",
ahc_aic7860_setup
},
/* aic7870 based controllers */
{
ID_AHA_2940,
ID_ALL_MASK,
"Adaptec 2940 SCSI adapter",
ahc_aic7870_setup
},
{
ID_AHA_3940,
ID_ALL_MASK,
"Adaptec 3940 SCSI adapter",
ahc_aha394X_setup
},
{
ID_AHA_398X,
ID_ALL_MASK,
"Adaptec 398X SCSI RAID adapter",
ahc_aha398X_setup
},
{
ID_AHA_2944,
ID_ALL_MASK,
"Adaptec 2944 SCSI adapter",
ahc_aic7870h_setup
},
{
ID_AHA_3944,
ID_ALL_MASK,
"Adaptec 3944 SCSI adapter",
ahc_aha394Xh_setup
},
{
ID_AHA_4944,
ID_ALL_MASK,
"Adaptec 4944 SCSI adapter",
ahc_aha494Xh_setup
},
/* aic7880 based controllers */
{
ID_AHA_2940U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2940 Ultra SCSI adapter",
ahc_aic7880_setup
},
{
ID_AHA_3940U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 3940 Ultra SCSI adapter",
ahc_aha394XU_setup
},
{
ID_AHA_2944U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2944 Ultra SCSI adapter",
ahc_aic7880h_setup
},
{
ID_AHA_3944U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 3944 Ultra SCSI adapter",
ahc_aha394XUh_setup
},
{
ID_AHA_398XU & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 398X Ultra SCSI RAID adapter",
ahc_aha398XU_setup
},
{
/*
* XXX Don't know the slot numbers
* so we can't identify channels
*/
ID_AHA_4944U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 4944 Ultra SCSI adapter",
ahc_aic7880h_setup
},
{
ID_AHA_2930U & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2930 Ultra SCSI adapter",
ahc_aic7880_setup
},
{
ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2940 Pro Ultra SCSI adapter",
ahc_aha2940Pro_setup
},
{
ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec 2940/CN Ultra SCSI adapter",
ahc_aic7880_setup
},
/* Ignore all SISL (AAC on MB) based controllers. */
{
ID_9005_SISL_ID,
ID_9005_SISL_MASK,
NULL,
NULL
},
/* aic7890 based controllers */
{
ID_AHA_2930U2,
ID_ALL_MASK,
"Adaptec 2930 Ultra2 SCSI adapter",
ahc_aic7890_setup
},
{
ID_AHA_2940U2B,
ID_ALL_MASK,
"Adaptec 2940B Ultra2 SCSI adapter",
ahc_aic7890_setup
},
{
ID_AHA_2940U2_OEM,
ID_ALL_MASK,
"Adaptec 2940 Ultra2 SCSI adapter (OEM)",
ahc_aic7890_setup
},
{
ID_AHA_2940U2,
ID_ALL_MASK,
"Adaptec 2940 Ultra2 SCSI adapter",
ahc_aic7890_setup
},
{
ID_AHA_2950U2B,
ID_ALL_MASK,
"Adaptec 2950 Ultra2 SCSI adapter",
ahc_aic7890_setup
},
{
ID_AIC7890_ARO,
ID_ALL_MASK,
"Adaptec aic7890/91 Ultra2 SCSI adapter (ARO)",
ahc_aic7890_setup
},
{
ID_AAA_131U2,
ID_ALL_MASK,
"Adaptec AAA-131 Ultra2 RAID adapter",
ahc_aic7890_setup
},
/* aic7892 based controllers */
{
ID_AHA_29160,
ID_ALL_MASK,
"Adaptec 29160 Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AHA_29160_CPQ,
ID_ALL_MASK,
"Adaptec (Compaq OEM) 29160 Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AHA_29160N,
ID_ALL_MASK,
"Adaptec 29160N Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AHA_29160C,
ID_ALL_MASK,
"Adaptec 29160C Ultra160 SCSI adapter",
ahc_aha29160C_setup
},
{
ID_AHA_29160B,
ID_ALL_MASK,
"Adaptec 29160B Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AHA_19160B,
ID_ALL_MASK,
"Adaptec 19160B Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AIC7892_ARO,
ID_ALL_MASK,
"Adaptec aic7892 Ultra160 SCSI adapter (ARO)",
ahc_aic7892_setup
},
{
ID_AHA_2915_30LP,
ID_ALL_MASK,
"Adaptec 2915/30LP Ultra160 SCSI adapter",
ahc_aic7892_setup
},
/* aic7895 based controllers */
{
ID_AHA_2940U_DUAL,
ID_ALL_MASK,
"Adaptec 2940/DUAL Ultra SCSI adapter",
ahc_aic7895_setup
},
{
ID_AHA_3940AU,
ID_ALL_MASK,
"Adaptec 3940A Ultra SCSI adapter",
ahc_aic7895_setup
},
{
ID_AHA_3944AU,
ID_ALL_MASK,
"Adaptec 3944A Ultra SCSI adapter",
ahc_aic7895h_setup
},
{
ID_AIC7895_ARO,
ID_AIC7895_ARO_MASK,
"Adaptec aic7895 Ultra SCSI adapter (ARO)",
ahc_aic7895_setup
},
/* aic7896/97 based controllers */
{
ID_AHA_3950U2B_0,
ID_ALL_MASK,
"Adaptec 3950B Ultra2 SCSI adapter",
ahc_aic7896_setup
},
{
ID_AHA_3950U2B_1,
ID_ALL_MASK,
"Adaptec 3950B Ultra2 SCSI adapter",
ahc_aic7896_setup
},
{
ID_AHA_3950U2D_0,
ID_ALL_MASK,
"Adaptec 3950D Ultra2 SCSI adapter",
ahc_aic7896_setup
},
{
ID_AHA_3950U2D_1,
ID_ALL_MASK,
"Adaptec 3950D Ultra2 SCSI adapter",
ahc_aic7896_setup
},
{
ID_AIC7896_ARO,
ID_ALL_MASK,
"Adaptec aic7896/97 Ultra2 SCSI adapter (ARO)",
ahc_aic7896_setup
},
/* aic7899 based controllers */
{
ID_AHA_3960D,
ID_ALL_MASK,
"Adaptec 3960D Ultra160 SCSI adapter",
ahc_aic7899_setup
},
{
ID_AHA_3960D_CPQ,
ID_ALL_MASK,
"Adaptec (Compaq OEM) 3960D Ultra160 SCSI adapter",
ahc_aic7899_setup
},
{
ID_AIC7899_ARO,
ID_ALL_MASK,
"Adaptec aic7899 Ultra160 SCSI adapter (ARO)",
ahc_aic7899_setup
},
/* Generic chip probes for devices we don't know 'exactly' */
{
ID_AIC7850 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7850 SCSI adapter",
ahc_aic785X_setup
},
{
ID_AIC7855 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7855 SCSI adapter",
ahc_aic785X_setup
},
{
ID_AIC7859 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7859 SCSI adapter",
ahc_aic7860_setup
},
{
ID_AIC7860 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7860 Ultra SCSI adapter",
ahc_aic7860_setup
},
{
ID_AIC7870 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7870 SCSI adapter",
ahc_aic7870_setup
},
{
ID_AIC7880 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7880 Ultra SCSI adapter",
ahc_aic7880_setup
},
{
ID_AIC7890 & ID_9005_GENERIC_MASK,
ID_9005_GENERIC_MASK,
"Adaptec aic7890/91 Ultra2 SCSI adapter",
ahc_aic7890_setup
},
{
ID_AIC7892 & ID_9005_GENERIC_MASK,
ID_9005_GENERIC_MASK,
"Adaptec aic7892 Ultra160 SCSI adapter",
ahc_aic7892_setup
},
{
ID_AIC7895 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7895 Ultra SCSI adapter",
ahc_aic7895_setup
},
{
ID_AIC7896 & ID_9005_GENERIC_MASK,
ID_9005_GENERIC_MASK,
"Adaptec aic7896/97 Ultra2 SCSI adapter",
ahc_aic7896_setup
},
{
ID_AIC7899 & ID_9005_GENERIC_MASK,
ID_9005_GENERIC_MASK,
"Adaptec aic7899 Ultra160 SCSI adapter",
ahc_aic7899_setup
},
{
ID_AIC7810 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7810 RAID memory controller",
ahc_raid_setup
},
{
ID_AIC7815 & ID_DEV_VENDOR_MASK,
ID_DEV_VENDOR_MASK,
"Adaptec aic7815 RAID memory controller",
ahc_raid_setup
}
};
static const u_int ahc_num_pci_devs = ARRAY_SIZE(ahc_pci_ident_table);
#define AHC_394X_SLOT_CHANNEL_A 4
#define AHC_394X_SLOT_CHANNEL_B 5
#define AHC_398X_SLOT_CHANNEL_A 4
#define AHC_398X_SLOT_CHANNEL_B 8
#define AHC_398X_SLOT_CHANNEL_C 12
#define AHC_494X_SLOT_CHANNEL_A 4
#define AHC_494X_SLOT_CHANNEL_B 5
#define AHC_494X_SLOT_CHANNEL_C 6
#define AHC_494X_SLOT_CHANNEL_D 7
#define DEVCONFIG 0x40
#define PCIERRGENDIS 0x80000000ul
#define SCBSIZE32 0x00010000ul /* aic789X only */
#define REXTVALID 0x00001000ul /* ultra cards only */
#define MPORTMODE 0x00000400ul /* aic7870+ only */
#define RAMPSM 0x00000200ul /* aic7870+ only */
#define VOLSENSE 0x00000100ul
#define PCI64BIT 0x00000080ul /* 64Bit PCI bus (Ultra2 Only)*/
#define SCBRAMSEL 0x00000080ul
#define MRDCEN 0x00000040ul
#define EXTSCBTIME 0x00000020ul /* aic7870 only */
#define EXTSCBPEN 0x00000010ul /* aic7870 only */
#define BERREN 0x00000008ul
#define DACEN 0x00000004ul
#define STPWLEVEL 0x00000002ul
#define DIFACTNEGEN 0x00000001ul /* aic7870 only */
#define CSIZE_LATTIME 0x0c
#define CACHESIZE 0x0000003ful /* only 5 bits */
#define LATTIME 0x0000ff00ul
/* PCI STATUS definitions */
#define DPE 0x80
#define SSE 0x40
#define RMA 0x20
#define RTA 0x10
#define STA 0x08
#define DPR 0x01
static int ahc_9005_subdevinfo_valid(uint16_t vendor, uint16_t device,
uint16_t subvendor, uint16_t subdevice);
static int ahc_ext_scbram_present(struct ahc_softc *ahc);
static void ahc_scbram_config(struct ahc_softc *ahc, int enable,
int pcheck, int fast, int large);
static void ahc_probe_ext_scbram(struct ahc_softc *ahc);
static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1);
static void ahc_parse_pci_eeprom(struct ahc_softc *ahc,
struct seeprom_config *sc);
static void configure_termination(struct ahc_softc *ahc,
struct seeprom_descriptor *sd,
u_int adapter_control,
u_int *sxfrctl1);
static void ahc_new_term_detect(struct ahc_softc *ahc,
int *enableSEC_low,
int *enableSEC_high,
int *enablePRI_low,
int *enablePRI_high,
int *eeprom_present);
static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
int *internal68_present,
int *externalcable_present,
int *eeprom_present);
static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
int *externalcable_present,
int *eeprom_present);
static void write_brdctl(struct ahc_softc *ahc, uint8_t value);
static uint8_t read_brdctl(struct ahc_softc *ahc);
static void ahc_pci_intr(struct ahc_softc *ahc);
static int ahc_pci_chip_init(struct ahc_softc *ahc);
static int
ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor,
uint16_t subdevice, uint16_t subvendor)
{
int result;
/* Default to invalid. */
result = 0;
if (vendor == 0x9005
&& subvendor == 0x9005
&& subdevice != device
&& SUBID_9005_TYPE_KNOWN(subdevice) != 0) {
switch (SUBID_9005_TYPE(subdevice)) {
case SUBID_9005_TYPE_MB:
break;
case SUBID_9005_TYPE_CARD:
case SUBID_9005_TYPE_LCCARD:
/*
* Currently only trust Adaptec cards to
* get the sub device info correct.
*/
if (DEVID_9005_TYPE(device) == DEVID_9005_TYPE_HBA)
result = 1;
break;
case SUBID_9005_TYPE_RAID:
break;
default:
break;
}
}
return (result);
}
const struct ahc_pci_identity *
ahc_find_pci_device(ahc_dev_softc_t pci)
{
uint64_t full_id;
uint16_t device;
uint16_t vendor;
uint16_t subdevice;
uint16_t subvendor;
const struct ahc_pci_identity *entry;
u_int i;
vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
/*
* If the second function is not hooked up, ignore it.
* Unfortunately, not all MB vendors implement the
* subdevice ID as per the Adaptec spec, so do our best
* to sanity check it prior to accepting the subdevice
* ID as valid.
*/
if (ahc_get_pci_function(pci) > 0
&& ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice)
&& SUBID_9005_MFUNCENB(subdevice) == 0)
return (NULL);
for (i = 0; i < ahc_num_pci_devs; i++) {
entry = &ahc_pci_ident_table[i];
if (entry->full_id == (full_id & entry->id_mask)) {
/* Honor exclusion entries. */
if (entry->name == NULL)
return (NULL);
return (entry);
}
}
return (NULL);
}
int
ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
{
u_int command;
u_int our_id;
u_int sxfrctl1;
u_int scsiseq;
u_int dscommand0;
uint32_t devconfig;
int error;
uint8_t sblkctl;
our_id = 0;
error = entry->setup(ahc);
if (error != 0)
return (error);
ahc->chip |= AHC_PCI;
ahc->description = entry->name;
pci_set_power_state(ahc->dev_softc, AHC_POWER_STATE_D0);
error = ahc_pci_map_registers(ahc);
if (error != 0)
return (error);
/*
* Before we continue probing the card, ensure that
* its interrupts are *disabled*. We don't want
* a misstep to hang the machine in an interrupt
* storm.
*/
ahc_intr_enable(ahc, FALSE);
devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
/*
* If we need to support high memory, enable dual
* address cycles. This bit must be set to enable
* high address bit generation even if we are on a
* 64bit bus (PCI64BIT set in devconfig).
*/
if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
if (bootverbose)
printk("%s: Enabling 39Bit Addressing\n",
ahc_name(ahc));
devconfig |= DACEN;
}
/* Ensure that pci error generation, a test feature, is disabled. */
devconfig |= PCIERRGENDIS;
ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
/* Ensure busmastering is enabled */
command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2);
command |= PCIM_CMD_BUSMASTEREN;
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, /*bytes*/2);
/* On all PCI adapters, we allow SCB paging */
ahc->flags |= AHC_PAGESCBS;
error = ahc_softc_init(ahc);
if (error != 0)
return (error);
/*
* Disable PCI parity error checking. Users typically
* do this to work around broken PCI chipsets that get
* the parity timing wrong and thus generate lots of spurious
* errors. The chip only allows us to disable *all* parity
* error reporting when doing this, so CIO bus, scb ram, and
* scratch ram parity errors will be ignored too.
*/
if ((ahc->flags & AHC_DISABLE_PCI_PERR) != 0)
ahc->seqctl |= FAILDIS;
ahc->bus_intr = ahc_pci_intr;
ahc->bus_chip_init = ahc_pci_chip_init;
/* Remember how the card was setup in case there is no SEEPROM */
if ((ahc_inb(ahc, HCNTRL) & POWRDN) == 0) {
ahc_pause(ahc);
if ((ahc->features & AHC_ULTRA2) != 0)
our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
else
our_id = ahc_inb(ahc, SCSIID) & OID;
sxfrctl1 = ahc_inb(ahc, SXFRCTL1) & STPWEN;
scsiseq = ahc_inb(ahc, SCSISEQ);
} else {
sxfrctl1 = STPWEN;
our_id = 7;
scsiseq = 0;
}
error = ahc_reset(ahc, /*reinit*/FALSE);
if (error != 0)
return (ENXIO);
if ((ahc->features & AHC_DT) != 0) {
u_int sfunct;
/* Perform ALT-Mode Setup */
sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
ahc_outb(ahc, OPTIONMODE,
OPTIONMODE_DEFAULTS|AUTOACKEN|BUSFREEREV|EXPPHASEDIS);
ahc_outb(ahc, SFUNCT, sfunct);
/* Normal mode setup */
ahc_outb(ahc, CRCCONTROL1, CRCVALCHKEN|CRCENDCHKEN|CRCREQCHKEN
|TARGCRCENDEN);
}
dscommand0 = ahc_inb(ahc, DSCOMMAND0);
dscommand0 |= MPARCKEN|CACHETHEN;
if ((ahc->features & AHC_ULTRA2) != 0) {
/*
* DPARCKEN doesn't work correctly on
* some MBs so don't use it.
*/
dscommand0 &= ~DPARCKEN;
}
/*
* Handle chips that must have cache line
* streaming (dis/en)abled.
*/
if ((ahc->bugs & AHC_CACHETHEN_DIS_BUG) != 0)
dscommand0 |= CACHETHEN;
if ((ahc->bugs & AHC_CACHETHEN_BUG) != 0)
dscommand0 &= ~CACHETHEN;
ahc_outb(ahc, DSCOMMAND0, dscommand0);
ahc->pci_cachesize =
ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME,
/*bytes*/1) & CACHESIZE;
ahc->pci_cachesize *= 4;
if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0
&& ahc->pci_cachesize == 4) {
ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
0, /*bytes*/1);
ahc->pci_cachesize = 0;
}
/*
* We cannot perform ULTRA speeds without the presence
* of the external precision resistor.
*/
if ((ahc->features & AHC_ULTRA) != 0) {
uint32_t devconfig;
devconfig = ahc_pci_read_config(ahc->dev_softc,
DEVCONFIG, /*bytes*/4);
if ((devconfig & REXTVALID) == 0)
ahc->features &= ~AHC_ULTRA;
}
/* See if we have a SEEPROM and perform auto-term */
check_extport(ahc, &sxfrctl1);
/*
* Take the LED out of diagnostic mode
*/
sblkctl = ahc_inb(ahc, SBLKCTL);
ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON)));
if ((ahc->features & AHC_ULTRA2) != 0) {
ahc_outb(ahc, DFF_THRSH, RD_DFTHRSH_MAX|WR_DFTHRSH_MAX);
} else {
ahc_outb(ahc, DSPCISTATUS, DFTHRSH_100);
}
if (ahc->flags & AHC_USEDEFAULTS) {
/*
* PCI Adapter default setup
* Should only be used if the adapter does not have
* a SEEPROM.
*/
/* See if someone else set us up already */
if ((ahc->flags & AHC_NO_BIOS_INIT) == 0
&& scsiseq != 0) {
printk("%s: Using left over BIOS settings\n",
ahc_name(ahc));
ahc->flags &= ~AHC_USEDEFAULTS;
ahc->flags |= AHC_BIOS_ENABLED;
} else {
/*
* Assume only one connector and always turn
* on termination.
*/
our_id = 0x07;
sxfrctl1 = STPWEN;
}
ahc_outb(ahc, SCSICONF, our_id|ENSPCHK|RESET_SCSI);
ahc->our_id = our_id;
}
/*
* Take a look to see if we have external SRAM.
* We currently do not attempt to use SRAM that is
* shared among multiple controllers.
*/
ahc_probe_ext_scbram(ahc);
/*
* Record our termination setting for the
* generic initialization routine.
*/
if ((sxfrctl1 & STPWEN) != 0)
ahc->flags |= AHC_TERM_ENB_A;
/*
* Save chip register configuration data for chip resets
* that occur during runtime and resume events.
*/
ahc->bus_softc.pci_softc.devconfig =
ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
ahc->bus_softc.pci_softc.command =
ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1);
ahc->bus_softc.pci_softc.csize_lattime =
ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1);
ahc->bus_softc.pci_softc.dscommand0 = ahc_inb(ahc, DSCOMMAND0);
ahc->bus_softc.pci_softc.dspcistatus = ahc_inb(ahc, DSPCISTATUS);
if ((ahc->features & AHC_DT) != 0) {
u_int sfunct;
sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
ahc->bus_softc.pci_softc.optionmode = ahc_inb(ahc, OPTIONMODE);
ahc->bus_softc.pci_softc.targcrccnt = ahc_inw(ahc, TARGCRCCNT);
ahc_outb(ahc, SFUNCT, sfunct);
ahc->bus_softc.pci_softc.crccontrol1 =
ahc_inb(ahc, CRCCONTROL1);
}
if ((ahc->features & AHC_MULTI_FUNC) != 0)
ahc->bus_softc.pci_softc.scbbaddr = ahc_inb(ahc, SCBBADDR);
if ((ahc->features & AHC_ULTRA2) != 0)
ahc->bus_softc.pci_softc.dff_thrsh = ahc_inb(ahc, DFF_THRSH);
/* Core initialization */
error = ahc_init(ahc);
if (error != 0)
return (error);
ahc->init_level++;
/*
* Allow interrupts now that we are completely setup.
*/
return ahc_pci_map_int(ahc);
}
/*
* Test for the presence of external sram in an
* "unshared" configuration.
*/
static int
ahc_ext_scbram_present(struct ahc_softc *ahc)
{
u_int chip;
int ramps;
int single_user;
uint32_t devconfig;
chip = ahc->chip & AHC_CHIPID_MASK;
devconfig = ahc_pci_read_config(ahc->dev_softc,
DEVCONFIG, /*bytes*/4);
single_user = (devconfig & MPORTMODE) != 0;
if ((ahc->features & AHC_ULTRA2) != 0)
ramps = (ahc_inb(ahc, DSCOMMAND0) & RAMPS) != 0;
else if (chip == AHC_AIC7895 || chip == AHC_AIC7895C)
/*
* External SCBRAM arbitration is flakey
* on these chips. Unfortunately this means
* we don't use the extra SCB ram space on the
* 3940AUW.
*/
ramps = 0;
else if (chip >= AHC_AIC7870)
ramps = (devconfig & RAMPSM) != 0;
else
ramps = 0;
if (ramps && single_user)
return (1);
return (0);
}
/*
* Enable external scbram.
*/
static void
ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck,
int fast, int large)
{
uint32_t devconfig;
if (ahc->features & AHC_MULTI_FUNC) {
/*
* Set the SCB Base addr (highest address bit)
* depending on which channel we are.
*/
ahc_outb(ahc, SCBBADDR, ahc_get_pci_function(ahc->dev_softc));
}
ahc->flags &= ~AHC_LSCBS_ENABLED;
if (large)
ahc->flags |= AHC_LSCBS_ENABLED;
devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
if ((ahc->features & AHC_ULTRA2) != 0) {
u_int dscommand0;
dscommand0 = ahc_inb(ahc, DSCOMMAND0);
if (enable)
dscommand0 &= ~INTSCBRAMSEL;
else
dscommand0 |= INTSCBRAMSEL;
if (large)
dscommand0 &= ~USCBSIZE32;
else
dscommand0 |= USCBSIZE32;
ahc_outb(ahc, DSCOMMAND0, dscommand0);
} else {
if (fast)
devconfig &= ~EXTSCBTIME;
else
devconfig |= EXTSCBTIME;
if (enable)
devconfig &= ~SCBRAMSEL;
else
devconfig |= SCBRAMSEL;
if (large)
devconfig &= ~SCBSIZE32;
else
devconfig |= SCBSIZE32;
}
if (pcheck)
devconfig |= EXTSCBPEN;
else
devconfig &= ~EXTSCBPEN;
ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
}
/*
* Take a look to see if we have external SRAM.
* We currently do not attempt to use SRAM that is
* shared among multiple controllers.
*/
static void
ahc_probe_ext_scbram(struct ahc_softc *ahc)
{
int num_scbs;
int test_num_scbs;
int enable;
int pcheck;
int fast;
int large;
enable = FALSE;
pcheck = FALSE;
fast = FALSE;
large = FALSE;
num_scbs = 0;
if (ahc_ext_scbram_present(ahc) == 0)
goto done;
/*
* Probe for the best parameters to use.
*/
ahc_scbram_config(ahc, /*enable*/TRUE, pcheck, fast, large);
num_scbs = ahc_probe_scbs(ahc);
if (num_scbs == 0) {
/* The SRAM wasn't really present. */
goto done;
}
enable = TRUE;
/*
* Clear any outstanding parity error
* and ensure that parity error reporting
* is enabled.
*/
ahc_outb(ahc, SEQCTL, 0);
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, CLRINT, CLRBRKADRINT);
/* Now see if we can do parity */
ahc_scbram_config(ahc, enable, /*pcheck*/TRUE, fast, large);
num_scbs = ahc_probe_scbs(ahc);
if ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0
|| (ahc_inb(ahc, ERROR) & MPARERR) == 0)
pcheck = TRUE;
/* Clear any resulting parity error */
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, CLRINT, CLRBRKADRINT);
/* Now see if we can do fast timing */
ahc_scbram_config(ahc, enable, pcheck, /*fast*/TRUE, large);
test_num_scbs = ahc_probe_scbs(ahc);
if (test_num_scbs == num_scbs
&& ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0
|| (ahc_inb(ahc, ERROR) & MPARERR) == 0))
fast = TRUE;
/*
* See if we can use large SCBs and still maintain
* the same overall count of SCBs.
*/
if ((ahc->features & AHC_LARGE_SCBS) != 0) {
ahc_scbram_config(ahc, enable, pcheck, fast, /*large*/TRUE);
test_num_scbs = ahc_probe_scbs(ahc);
if (test_num_scbs >= num_scbs) {
large = TRUE;
num_scbs = test_num_scbs;
if (num_scbs >= 64) {
/*
* We have enough space to move the
* "busy targets table" into SCB space
* and make it qualify all the way to the
* lun level.
*/
ahc->flags |= AHC_SCB_BTT;
}
}
}
done:
/*
* Disable parity error reporting until we
* can load instruction ram.
*/
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS);
/* Clear any latched parity error */
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, CLRINT, CLRBRKADRINT);
if (bootverbose && enable) {
printk("%s: External SRAM, %s access%s, %dbytes/SCB\n",
ahc_name(ahc), fast ? "fast" : "slow",
pcheck ? ", parity checking enabled" : "",
large ? 64 : 32);
}
ahc_scbram_config(ahc, enable, pcheck, fast, large);
}
/*
* Perform some simple tests that should catch situations where
* our registers are invalidly mapped.
*/
int
ahc_pci_test_register_access(struct ahc_softc *ahc)
{
int error;
u_int status1;
uint32_t cmd;
uint8_t hcntrl;
error = EIO;
/*
* Enable PCI error interrupt status, but suppress NMIs
* generated by SERR raised due to target aborts.
*/
cmd = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2);
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2);
/*
* First a simple test to see if any
* registers can be read. Reading
* HCNTRL has no side effects and has
* at least one bit that is guaranteed to
* be zero so it is a good register to
* use for this test.
*/
hcntrl = ahc_inb(ahc, HCNTRL);
if (hcntrl == 0xFF)
goto fail;
if ((hcntrl & CHIPRST) != 0) {
/*
* The chip has not been initialized since
* PCI/EISA/VLB bus reset. Don't trust
* "left over BIOS data".
*/
ahc->flags |= AHC_NO_BIOS_INIT;
}
/*
* Next create a situation where write combining
* or read prefetching could be initiated by the
* CPU or host bridge. Our device does not support
* either, so look for data corruption and/or flagged
* PCI errors. First pause without causing another
* chip reset.
*/
hcntrl &= ~CHIPRST;
ahc_outb(ahc, HCNTRL, hcntrl|PAUSE);
while (ahc_is_paused(ahc) == 0)
;
/* Clear any PCI errors that occurred before our driver attached. */
status1 = ahc_pci_read_config(ahc->dev_softc,
PCIR_STATUS + 1, /*bytes*/1);
ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
status1, /*bytes*/1);
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, SEQCTL, PERRORDIS);
ahc_outb(ahc, SCBPTR, 0);
ahc_outl(ahc, SCB_BASE, 0x5aa555aa);
if (ahc_inl(ahc, SCB_BASE) != 0x5aa555aa)
goto fail;
status1 = ahc_pci_read_config(ahc->dev_softc,
PCIR_STATUS + 1, /*bytes*/1);
if ((status1 & STA) != 0)
goto fail;
error = 0;
fail:
/* Silently clear any latched errors. */
status1 = ahc_pci_read_config(ahc->dev_softc,
PCIR_STATUS + 1, /*bytes*/1);
ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
status1, /*bytes*/1);
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS);
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2);
return (error);
}
/*
* Check the external port logic for a serial eeprom
* and termination/cable detection contrls.
*/
static void
check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
{
struct seeprom_descriptor sd;
struct seeprom_config *sc;
int have_seeprom;
int have_autoterm;
sd.sd_ahc = ahc;
sd.sd_control_offset = SEECTL;
sd.sd_status_offset = SEECTL;
sd.sd_dataout_offset = SEECTL;
sc = ahc->seep_config;
/*
* For some multi-channel devices, the c46 is simply too
* small to work. For the other controller types, we can
* get our information from either SEEPROM type. Set the
* type to start our probe with accordingly.
*/
if (ahc->flags & AHC_LARGE_SEEPROM)
sd.sd_chip = C56_66;
else
sd.sd_chip = C46;
sd.sd_MS = SEEMS;
sd.sd_RDY = SEERDY;
sd.sd_CS = SEECS;
sd.sd_CK = SEECK;
sd.sd_DO = SEEDO;
sd.sd_DI = SEEDI;
have_seeprom = ahc_acquire_seeprom(ahc, &sd);
if (have_seeprom) {
if (bootverbose)
printk("%s: Reading SEEPROM...", ahc_name(ahc));
for (;;) {
u_int start_addr;
start_addr = 32 * (ahc->channel - 'A');
have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc,
start_addr,
sizeof(*sc)/2);
if (have_seeprom)
have_seeprom = ahc_verify_cksum(sc);
if (have_seeprom != 0 || sd.sd_chip == C56_66) {
if (bootverbose) {
if (have_seeprom == 0)
printk ("checksum error\n");
else
printk ("done.\n");
}
break;
}
sd.sd_chip = C56_66;
}
ahc_release_seeprom(&sd);
/* Remember the SEEPROM type for later */
if (sd.sd_chip == C56_66)
ahc->flags |= AHC_LARGE_SEEPROM;
}
if (!have_seeprom) {
/*
* Pull scratch ram settings and treat them as
* if they are the contents of an seeprom if
* the 'ADPT' signature is found in SCB2.
* We manually compose the data as 16bit values
* to avoid endian issues.
*/
ahc_outb(ahc, SCBPTR, 2);
if (ahc_inb(ahc, SCB_BASE) == 'A'
&& ahc_inb(ahc, SCB_BASE + 1) == 'D'
&& ahc_inb(ahc, SCB_BASE + 2) == 'P'
&& ahc_inb(ahc, SCB_BASE + 3) == 'T') {
uint16_t *sc_data;
int i;
sc_data = (uint16_t *)sc;
for (i = 0; i < 32; i++, sc_data++) {
int j;
j = i * 2;
*sc_data = ahc_inb(ahc, SRAM_BASE + j)
| ahc_inb(ahc, SRAM_BASE + j + 1) << 8;
}
have_seeprom = ahc_verify_cksum(sc);
if (have_seeprom)
ahc->flags |= AHC_SCB_CONFIG_USED;
}
/*
* Clear any SCB parity errors in case this data and
* its associated parity was not initialized by the BIOS
*/
ahc_outb(ahc, CLRINT, CLRPARERR);
ahc_outb(ahc, CLRINT, CLRBRKADRINT);
}
if (!have_seeprom) {
if (bootverbose)
printk("%s: No SEEPROM available.\n", ahc_name(ahc));
ahc->flags |= AHC_USEDEFAULTS;
kfree(ahc->seep_config);
ahc->seep_config = NULL;
sc = NULL;
} else {
ahc_parse_pci_eeprom(ahc, sc);
}
/*
* Cards that have the external logic necessary to talk to
* a SEEPROM, are almost certain to have the remaining logic
* necessary for auto-termination control. This assumption
* hasn't failed yet...
*/
have_autoterm = have_seeprom;
/*
* Some low-cost chips have SEEPROM and auto-term control built
* in, instead of using a GAL. They can tell us directly
* if the termination logic is enabled.
*/
if ((ahc->features & AHC_SPIOCAP) != 0) {
if ((ahc_inb(ahc, SPIOCAP) & SSPIOCPS) == 0)
have_autoterm = FALSE;
}
if (have_autoterm) {
ahc->flags |= AHC_HAS_TERM_LOGIC;
ahc_acquire_seeprom(ahc, &sd);
configure_termination(ahc, &sd, sc->adapter_control, sxfrctl1);
ahc_release_seeprom(&sd);
} else if (have_seeprom) {
*sxfrctl1 &= ~STPWEN;
if ((sc->adapter_control & CFSTERM) != 0)
*sxfrctl1 |= STPWEN;
if (bootverbose)
printk("%s: Low byte termination %sabled\n",
ahc_name(ahc),
(*sxfrctl1 & STPWEN) ? "en" : "dis");
}
}
static void
ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc)
{
/*
* Put the data we've collected down into SRAM
* where ahc_init will find it.
*/
int i;
int max_targ = sc->max_targets & CFMAXTARG;
u_int scsi_conf;
uint16_t discenable;
uint16_t ultraenb;
discenable = 0;
ultraenb = 0;
if ((sc->adapter_control & CFULTRAEN) != 0) {
/*
* Determine if this adapter has a "newstyle"
* SEEPROM format.
*/
for (i = 0; i < max_targ; i++) {
if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) {
ahc->flags |= AHC_NEWEEPROM_FMT;
break;
}
}
}
for (i = 0; i < max_targ; i++) {
u_int scsirate;
uint16_t target_mask;
target_mask = 0x01 << i;
if (sc->device_flags[i] & CFDISC)
discenable |= target_mask;
if ((ahc->flags & AHC_NEWEEPROM_FMT) != 0) {
if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0)
ultraenb |= target_mask;
} else if ((sc->adapter_control & CFULTRAEN) != 0) {
ultraenb |= target_mask;
}
if ((sc->device_flags[i] & CFXFER) == 0x04
&& (ultraenb & target_mask) != 0) {
/* Treat 10MHz as a non-ultra speed */
sc->device_flags[i] &= ~CFXFER;
ultraenb &= ~target_mask;
}
if ((ahc->features & AHC_ULTRA2) != 0) {
u_int offset;
if (sc->device_flags[i] & CFSYNCH)
offset = MAX_OFFSET_ULTRA2;
else
offset = 0;
ahc_outb(ahc, TARG_OFFSET + i, offset);
/*
* The ultra enable bits contain the
* high bit of the ultra2 sync rate
* field.
*/
scsirate = (sc->device_flags[i] & CFXFER)
| ((ultraenb & target_mask) ? 0x8 : 0x0);
if (sc->device_flags[i] & CFWIDEB)
scsirate |= WIDEXFER;
} else {
scsirate = (sc->device_flags[i] & CFXFER) << 4;
if (sc->device_flags[i] & CFSYNCH)
scsirate |= SOFS;
if (sc->device_flags[i] & CFWIDEB)
scsirate |= WIDEXFER;
}
ahc_outb(ahc, TARG_SCSIRATE + i, scsirate);
}
ahc->our_id = sc->brtime_id & CFSCSIID;
scsi_conf = (ahc->our_id & 0x7);
if (sc->adapter_control & CFSPARITY)
scsi_conf |= ENSPCHK;
if (sc->adapter_control & CFRESETB)
scsi_conf |= RESET_SCSI;
ahc->flags |= (sc->adapter_control & CFBOOTCHAN) >> CFBOOTCHANSHIFT;
if (sc->bios_control & CFEXTEND)
ahc->flags |= AHC_EXTENDED_TRANS_A;
if (sc->bios_control & CFBIOSEN)
ahc->flags |= AHC_BIOS_ENABLED;
if (ahc->features & AHC_ULTRA
&& (ahc->flags & AHC_NEWEEPROM_FMT) == 0) {
/* Should we enable Ultra mode? */
if (!(sc->adapter_control & CFULTRAEN))
/* Treat us as a non-ultra card */
ultraenb = 0;
}
if (sc->signature == CFSIGNATURE
|| sc->signature == CFSIGNATURE2) {
uint32_t devconfig;
/* Honor the STPWLEVEL settings */
devconfig = ahc_pci_read_config(ahc->dev_softc,
DEVCONFIG, /*bytes*/4);
devconfig &= ~STPWLEVEL;
if ((sc->bios_control & CFSTPWLEVEL) != 0)
devconfig |= STPWLEVEL;
ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
devconfig, /*bytes*/4);
}
/* Set SCSICONF info */
ahc_outb(ahc, SCSICONF, scsi_conf);
ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff));
ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff));
ahc_outb(ahc, ULTRA_ENB, ultraenb & 0xff);
ahc_outb(ahc, ULTRA_ENB + 1, (ultraenb >> 8) & 0xff);
}
static void
configure_termination(struct ahc_softc *ahc,
struct seeprom_descriptor *sd,
u_int adapter_control,
u_int *sxfrctl1)
{
uint8_t brddat;
brddat = 0;
/*
* Update the settings in sxfrctl1 to match the
* termination settings
*/
*sxfrctl1 = 0;
/*
* SEECS must be on for the GALS to latch
* the data properly. Be sure to leave MS
* on or we will release the seeprom.
*/
SEEPROM_OUTB(sd, sd->sd_MS | sd->sd_CS);
if ((adapter_control & CFAUTOTERM) != 0
|| (ahc->features & AHC_NEW_TERMCTL) != 0) {
int internal50_present;
int internal68_present;
int externalcable_present;
int eeprom_present;
int enableSEC_low;
int enableSEC_high;
int enablePRI_low;
int enablePRI_high;
int sum;
enableSEC_low = 0;
enableSEC_high = 0;
enablePRI_low = 0;
enablePRI_high = 0;
if ((ahc->features & AHC_NEW_TERMCTL) != 0) {
ahc_new_term_detect(ahc, &enableSEC_low,
&enableSEC_high,
&enablePRI_low,
&enablePRI_high,
&eeprom_present);
if ((adapter_control & CFSEAUTOTERM) == 0) {
if (bootverbose)
printk("%s: Manual SE Termination\n",
ahc_name(ahc));
enableSEC_low = (adapter_control & CFSELOWTERM);
enableSEC_high =
(adapter_control & CFSEHIGHTERM);
}
if ((adapter_control & CFAUTOTERM) == 0) {
if (bootverbose)
printk("%s: Manual LVD Termination\n",
ahc_name(ahc));
enablePRI_low = (adapter_control & CFSTERM);
enablePRI_high = (adapter_control & CFWSTERM);
}
/* Make the table calculations below happy */
internal50_present = 0;
internal68_present = 1;
externalcable_present = 1;
} else if ((ahc->features & AHC_SPIOCAP) != 0) {
aic785X_cable_detect(ahc, &internal50_present,
&externalcable_present,
&eeprom_present);
/* Can never support a wide connector. */
internal68_present = 0;
} else {
aic787X_cable_detect(ahc, &internal50_present,
&internal68_present,
&externalcable_present,
&eeprom_present);
}
if ((ahc->features & AHC_WIDE) == 0)
internal68_present = 0;
if (bootverbose
&& (ahc->features & AHC_ULTRA2) == 0) {
printk("%s: internal 50 cable %s present",
ahc_name(ahc),
internal50_present ? "is":"not");
if ((ahc->features & AHC_WIDE) != 0)
printk(", internal 68 cable %s present",
internal68_present ? "is":"not");
printk("\n%s: external cable %s present\n",
ahc_name(ahc),
externalcable_present ? "is":"not");
}
if (bootverbose)
printk("%s: BIOS eeprom %s present\n",
ahc_name(ahc), eeprom_present ? "is" : "not");
if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) {
/*
* The 50 pin connector is a separate bus,
* so force it to always be terminated.
* In the future, perform current sensing
* to determine if we are in the middle of
* a properly terminated bus.
*/
internal50_present = 0;
}
/*
* Now set the termination based on what
* we found.
* Flash Enable = BRDDAT7
* Secondary High Term Enable = BRDDAT6
* Secondary Low Term Enable = BRDDAT5 (7890)
* Primary High Term Enable = BRDDAT4 (7890)
*/
if ((ahc->features & AHC_ULTRA2) == 0
&& (internal50_present != 0)
&& (internal68_present != 0)
&& (externalcable_present != 0)) {
printk("%s: Illegal cable configuration!!. "
"Only two connectors on the "
"adapter may be used at a "
"time!\n", ahc_name(ahc));
/*
* Pretend there are no cables in the hope
* that having all of the termination on
* gives us a more stable bus.
*/
internal50_present = 0;
internal68_present = 0;
externalcable_present = 0;
}
if ((ahc->features & AHC_WIDE) != 0
&& ((externalcable_present == 0)
|| (internal68_present == 0)
|| (enableSEC_high != 0))) {
brddat |= BRDDAT6;
if (bootverbose) {
if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
printk("%s: 68 pin termination "
"Enabled\n", ahc_name(ahc));
else
printk("%s: %sHigh byte termination "
"Enabled\n", ahc_name(ahc),
enableSEC_high ? "Secondary "
: "");
}
}
sum = internal50_present + internal68_present
+ externalcable_present;
if (sum < 2 || (enableSEC_low != 0)) {
if ((ahc->features & AHC_ULTRA2) != 0)
brddat |= BRDDAT5;
else
*sxfrctl1 |= STPWEN;
if (bootverbose) {
if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
printk("%s: 50 pin termination "
"Enabled\n", ahc_name(ahc));
else
printk("%s: %sLow byte termination "
"Enabled\n", ahc_name(ahc),
enableSEC_low ? "Secondary "
: "");
}
}
if (enablePRI_low != 0) {
*sxfrctl1 |= STPWEN;
if (bootverbose)
printk("%s: Primary Low Byte termination "
"Enabled\n", ahc_name(ahc));
}
/*
* Setup STPWEN before setting up the rest of
* the termination per the tech note on the U160 cards.
*/
ahc_outb(ahc, SXFRCTL1, *sxfrctl1);
if (enablePRI_high != 0) {
brddat |= BRDDAT4;
if (bootverbose)
printk("%s: Primary High Byte "
"termination Enabled\n",
ahc_name(ahc));
}
write_brdctl(ahc, brddat);
} else {
if ((adapter_control & CFSTERM) != 0) {
*sxfrctl1 |= STPWEN;
if (bootverbose)
printk("%s: %sLow byte termination Enabled\n",
ahc_name(ahc),
(ahc->features & AHC_ULTRA2) ? "Primary "
: "");
}
if ((adapter_control & CFWSTERM) != 0
&& (ahc->features & AHC_WIDE) != 0) {
brddat |= BRDDAT6;
if (bootverbose)
printk("%s: %sHigh byte termination Enabled\n",
ahc_name(ahc),
(ahc->features & AHC_ULTRA2)
? "Secondary " : "");
}
/*
* Setup STPWEN before setting up the rest of
* the termination per the tech note on the U160 cards.
*/
ahc_outb(ahc, SXFRCTL1, *sxfrctl1);
if ((ahc->features & AHC_WIDE) != 0)
write_brdctl(ahc, brddat);
}
SEEPROM_OUTB(sd, sd->sd_MS); /* Clear CS */
}
static void
ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low,
int *enableSEC_high, int *enablePRI_low,
int *enablePRI_high, int *eeprom_present)
{
uint8_t brdctl;
/*
* BRDDAT7 = Eeprom
* BRDDAT6 = Enable Secondary High Byte termination
* BRDDAT5 = Enable Secondary Low Byte termination
* BRDDAT4 = Enable Primary high byte termination
* BRDDAT3 = Enable Primary low byte termination
*/
brdctl = read_brdctl(ahc);
*eeprom_present = brdctl & BRDDAT7;
*enableSEC_high = (brdctl & BRDDAT6);
*enableSEC_low = (brdctl & BRDDAT5);
*enablePRI_high = (brdctl & BRDDAT4);
*enablePRI_low = (brdctl & BRDDAT3);
}
static void
aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
int *internal68_present, int *externalcable_present,
int *eeprom_present)
{
uint8_t brdctl;
/*
* First read the status of our cables.
* Set the rom bank to 0 since the
* bank setting serves as a multiplexor
* for the cable detection logic.
* BRDDAT5 controls the bank switch.
*/
write_brdctl(ahc, 0);
/*
* Now read the state of the internal
* connectors. BRDDAT6 is INT50 and
* BRDDAT7 is INT68.
*/
brdctl = read_brdctl(ahc);
*internal50_present = (brdctl & BRDDAT6) ? 0 : 1;
*internal68_present = (brdctl & BRDDAT7) ? 0 : 1;
/*
* Set the rom bank to 1 and determine
* the other signals.
*/
write_brdctl(ahc, BRDDAT5);
/*
* Now read the state of the external
* connectors. BRDDAT6 is EXT68 and
* BRDDAT7 is EPROMPS.
*/
brdctl = read_brdctl(ahc);
*externalcable_present = (brdctl & BRDDAT6) ? 0 : 1;
*eeprom_present = (brdctl & BRDDAT7) ? 1 : 0;
}
static void
aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
int *externalcable_present, int *eeprom_present)
{
uint8_t brdctl;
uint8_t spiocap;
spiocap = ahc_inb(ahc, SPIOCAP);
spiocap &= ~SOFTCMDEN;
spiocap |= EXT_BRDCTL;
ahc_outb(ahc, SPIOCAP, spiocap);
ahc_outb(ahc, BRDCTL, BRDRW|BRDCS);
ahc_flush_device_writes(ahc);
ahc_delay(500);
ahc_outb(ahc, BRDCTL, 0);
ahc_flush_device_writes(ahc);
ahc_delay(500);
brdctl = ahc_inb(ahc, BRDCTL);
*internal50_present = (brdctl & BRDDAT5) ? 0 : 1;
*externalcable_present = (brdctl & BRDDAT6) ? 0 : 1;
*eeprom_present = (ahc_inb(ahc, SPIOCAP) & EEPROM) ? 1 : 0;
}
int
ahc_acquire_seeprom(struct ahc_softc *ahc, struct seeprom_descriptor *sd)
{
int wait;
if ((ahc->features & AHC_SPIOCAP) != 0
&& (ahc_inb(ahc, SPIOCAP) & SEEPROM) == 0)
return (0);
/*
* Request access of the memory port. When access is
* granted, SEERDY will go high. We use a 1 second
* timeout which should be near 1 second more than
* is needed. Reason: after the chip reset, there
* should be no contention.
*/
SEEPROM_OUTB(sd, sd->sd_MS);
wait = 1000; /* 1 second timeout in msec */
while (--wait && ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0)) {
ahc_delay(1000); /* delay 1 msec */
}
if ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0) {
SEEPROM_OUTB(sd, 0);
return (0);
}
return(1);
}
void
ahc_release_seeprom(struct seeprom_descriptor *sd)
{
/* Release access to the memory port and the serial EEPROM. */
SEEPROM_OUTB(sd, 0);
}
static void
write_brdctl(struct ahc_softc *ahc, uint8_t value)
{
uint8_t brdctl;
if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) {
brdctl = BRDSTB;
if (ahc->channel == 'B')
brdctl |= BRDCS;
} else if ((ahc->features & AHC_ULTRA2) != 0) {
brdctl = 0;
} else {
brdctl = BRDSTB|BRDCS;
}
ahc_outb(ahc, BRDCTL, brdctl);
ahc_flush_device_writes(ahc);
brdctl |= value;
ahc_outb(ahc, BRDCTL, brdctl);
ahc_flush_device_writes(ahc);
if ((ahc->features & AHC_ULTRA2) != 0)
brdctl |= BRDSTB_ULTRA2;
else
brdctl &= ~BRDSTB;
ahc_outb(ahc, BRDCTL, brdctl);
ahc_flush_device_writes(ahc);
if ((ahc->features & AHC_ULTRA2) != 0)
brdctl = 0;
else
brdctl &= ~BRDCS;
ahc_outb(ahc, BRDCTL, brdctl);
}
static uint8_t
read_brdctl(struct ahc_softc *ahc)
{
uint8_t brdctl;
uint8_t value;
if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) {
brdctl = BRDRW;
if (ahc->channel == 'B')
brdctl |= BRDCS;
} else if ((ahc->features & AHC_ULTRA2) != 0) {
brdctl = BRDRW_ULTRA2;
} else {
brdctl = BRDRW|BRDCS;
}
ahc_outb(ahc, BRDCTL, brdctl);
ahc_flush_device_writes(ahc);
value = ahc_inb(ahc, BRDCTL);
ahc_outb(ahc, BRDCTL, 0);
return (value);
}
static void
ahc_pci_intr(struct ahc_softc *ahc)
{
u_int error;
u_int status1;
error = ahc_inb(ahc, ERROR);
if ((error & PCIERRSTAT) == 0)
return;
status1 = ahc_pci_read_config(ahc->dev_softc,
PCIR_STATUS + 1, /*bytes*/1);
printk("%s: PCI error Interrupt at seqaddr = 0x%x\n",
ahc_name(ahc),
ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
if (status1 & DPE) {
ahc->pci_target_perr_count++;
printk("%s: Data Parity Error Detected during address "
"or write data phase\n", ahc_name(ahc));
}
if (status1 & SSE) {
printk("%s: Signal System Error Detected\n", ahc_name(ahc));
}
if (status1 & RMA) {
printk("%s: Received a Master Abort\n", ahc_name(ahc));
}
if (status1 & RTA) {
printk("%s: Received a Target Abort\n", ahc_name(ahc));
}
if (status1 & STA) {
printk("%s: Signaled a Target Abort\n", ahc_name(ahc));
}
if (status1 & DPR) {
printk("%s: Data Parity Error has been reported via PERR#\n",
ahc_name(ahc));
}
/* Clear latched errors. */
ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
status1, /*bytes*/1);
if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) {
printk("%s: Latched PCIERR interrupt with "
"no status bits set\n", ahc_name(ahc));
} else {
ahc_outb(ahc, CLRINT, CLRPARERR);
}
if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH) {
printk(
"%s: WARNING WARNING WARNING WARNING\n"
"%s: Too many PCI parity errors observed as a target.\n"
"%s: Some device on this bus is generating bad parity.\n"
"%s: This is an error *observed by*, not *generated by*, this controller.\n"
"%s: PCI parity error checking has been disabled.\n"
"%s: WARNING WARNING WARNING WARNING\n",
ahc_name(ahc), ahc_name(ahc), ahc_name(ahc),
ahc_name(ahc), ahc_name(ahc), ahc_name(ahc));
ahc->seqctl |= FAILDIS;
ahc_outb(ahc, SEQCTL, ahc->seqctl);
}
ahc_unpause(ahc);
}
static int
ahc_pci_chip_init(struct ahc_softc *ahc)
{
ahc_outb(ahc, DSCOMMAND0, ahc->bus_softc.pci_softc.dscommand0);
ahc_outb(ahc, DSPCISTATUS, ahc->bus_softc.pci_softc.dspcistatus);
if ((ahc->features & AHC_DT) != 0) {
u_int sfunct;
sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
ahc_outb(ahc, OPTIONMODE, ahc->bus_softc.pci_softc.optionmode);
ahc_outw(ahc, TARGCRCCNT, ahc->bus_softc.pci_softc.targcrccnt);
ahc_outb(ahc, SFUNCT, sfunct);
ahc_outb(ahc, CRCCONTROL1,
ahc->bus_softc.pci_softc.crccontrol1);
}
if ((ahc->features & AHC_MULTI_FUNC) != 0)
ahc_outb(ahc, SCBBADDR, ahc->bus_softc.pci_softc.scbbaddr);
if ((ahc->features & AHC_ULTRA2) != 0)
ahc_outb(ahc, DFF_THRSH, ahc->bus_softc.pci_softc.dff_thrsh);
return (ahc_chip_init(ahc));
}
#ifdef CONFIG_PM
void
ahc_pci_resume(struct ahc_softc *ahc)
{
/*
* We assume that the OS has restored our register
* mappings, etc. Just update the config space registers
* that the OS doesn't know about and rely on our chip
* reset handler to handle the rest.
*/
ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
ahc->bus_softc.pci_softc.devconfig, /*bytes*/4);
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
ahc->bus_softc.pci_softc.command, /*bytes*/1);
ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1);
if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) {
struct seeprom_descriptor sd;
u_int sxfrctl1;
sd.sd_ahc = ahc;
sd.sd_control_offset = SEECTL;
sd.sd_status_offset = SEECTL;
sd.sd_dataout_offset = SEECTL;
ahc_acquire_seeprom(ahc, &sd);
configure_termination(ahc, &sd,
ahc->seep_config->adapter_control,
&sxfrctl1);
ahc_release_seeprom(&sd);
}
}
#endif
static int
ahc_aic785X_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
uint8_t rev;
pci = ahc->dev_softc;
ahc->channel = 'A';
ahc->chip = AHC_AIC7850;
ahc->features = AHC_AIC7850_FE;
ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
if (rev >= 1)
ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
ahc->instruction_ram_size = 512;
return (0);
}
static int
ahc_aic7860_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
uint8_t rev;
pci = ahc->dev_softc;
ahc->channel = 'A';
ahc->chip = AHC_AIC7860;
ahc->features = AHC_AIC7860_FE;
ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
if (rev >= 1)
ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
ahc->instruction_ram_size = 512;
return (0);
}
static int
ahc_apa1480_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7860_setup(ahc);
if (error != 0)
return (error);
ahc->features |= AHC_REMOVABLE;
return (0);
}
static int
ahc_aic7870_setup(struct ahc_softc *ahc)
{
ahc->channel = 'A';
ahc->chip = AHC_AIC7870;
ahc->features = AHC_AIC7870_FE;
ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
ahc->instruction_ram_size = 512;
return (0);
}
static int
ahc_aic7870h_setup(struct ahc_softc *ahc)
{
int error = ahc_aic7870_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aha394X_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7870_setup(ahc);
if (error == 0)
error = ahc_aha394XX_setup(ahc);
return (error);
}
static int
ahc_aha394Xh_setup(struct ahc_softc *ahc)
{
int error = ahc_aha394X_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aha398X_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7870_setup(ahc);
if (error == 0)
error = ahc_aha398XX_setup(ahc);
return (error);
}
static int
ahc_aha494X_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7870_setup(ahc);
if (error == 0)
error = ahc_aha494XX_setup(ahc);
return (error);
}
static int
ahc_aha494Xh_setup(struct ahc_softc *ahc)
{
int error = ahc_aha494X_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aic7880_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
uint8_t rev;
pci = ahc->dev_softc;
ahc->channel = 'A';
ahc->chip = AHC_AIC7880;
ahc->features = AHC_AIC7880_FE;
ahc->bugs |= AHC_TMODE_WIDEODD_BUG;
rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
if (rev >= 1) {
ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
} else {
ahc->bugs |= AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
}
ahc->instruction_ram_size = 512;
return (0);
}
static int
ahc_aic7880h_setup(struct ahc_softc *ahc)
{
int error = ahc_aic7880_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aha2940Pro_setup(struct ahc_softc *ahc)
{
ahc->flags |= AHC_INT50_SPEEDFLEX;
return (ahc_aic7880_setup(ahc));
}
static int
ahc_aha394XU_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7880_setup(ahc);
if (error == 0)
error = ahc_aha394XX_setup(ahc);
return (error);
}
static int
ahc_aha394XUh_setup(struct ahc_softc *ahc)
{
int error = ahc_aha394XU_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aha398XU_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7880_setup(ahc);
if (error == 0)
error = ahc_aha398XX_setup(ahc);
return (error);
}
static int
ahc_aic7890_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
uint8_t rev;
pci = ahc->dev_softc;
ahc->channel = 'A';
ahc->chip = AHC_AIC7890;
ahc->features = AHC_AIC7890_FE;
ahc->flags |= AHC_NEWEEPROM_FMT;
rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
if (rev == 0)
ahc->bugs |= AHC_AUTOFLUSH_BUG|AHC_CACHETHEN_BUG;
ahc->instruction_ram_size = 768;
return (0);
}
static int
ahc_aic7892_setup(struct ahc_softc *ahc)
{
ahc->channel = 'A';
ahc->chip = AHC_AIC7892;
ahc->features = AHC_AIC7892_FE;
ahc->flags |= AHC_NEWEEPROM_FMT;
ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG;
ahc->instruction_ram_size = 1024;
return (0);
}
static int
ahc_aic7895_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
uint8_t rev;
pci = ahc->dev_softc;
ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
/*
* The 'C' revision of the aic7895 has a few additional features.
*/
rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
if (rev >= 4) {
ahc->chip = AHC_AIC7895C;
ahc->features = AHC_AIC7895C_FE;
} else {
u_int command;
ahc->chip = AHC_AIC7895;
ahc->features = AHC_AIC7895_FE;
/*
* The BIOS disables the use of MWI transactions
* since it does not have the MWI bug work around
* we have. Disabling MWI reduces performance, so
* turn it on again.
*/
command = ahc_pci_read_config(pci, PCIR_COMMAND, /*bytes*/1);
command |= PCIM_CMD_MWRICEN;
ahc_pci_write_config(pci, PCIR_COMMAND, command, /*bytes*/1);
ahc->bugs |= AHC_PCI_MWI_BUG;
}
/*
* XXX Does CACHETHEN really not work??? What about PCI retry?
* on C level chips. Need to test, but for now, play it safe.
*/
ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_PCI_2_1_RETRY_BUG
| AHC_CACHETHEN_BUG;
#if 0
uint32_t devconfig;
/*
* Cachesize must also be zero due to stray DAC
* problem when sitting behind some bridges.
*/
ahc_pci_write_config(pci, CSIZE_LATTIME, 0, /*bytes*/1);
devconfig = ahc_pci_read_config(pci, DEVCONFIG, /*bytes*/1);
devconfig |= MRDCEN;
ahc_pci_write_config(pci, DEVCONFIG, devconfig, /*bytes*/1);
#endif
ahc->flags |= AHC_NEWEEPROM_FMT;
ahc->instruction_ram_size = 512;
return (0);
}
static int
ahc_aic7895h_setup(struct ahc_softc *ahc)
{
int error = ahc_aic7895_setup(ahc);
ahc->features |= AHC_HVD;
return error;
}
static int
ahc_aic7896_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
pci = ahc->dev_softc;
ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
ahc->chip = AHC_AIC7896;
ahc->features = AHC_AIC7896_FE;
ahc->flags |= AHC_NEWEEPROM_FMT;
ahc->bugs |= AHC_CACHETHEN_DIS_BUG;
ahc->instruction_ram_size = 768;
return (0);
}
static int
ahc_aic7899_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
pci = ahc->dev_softc;
ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
ahc->chip = AHC_AIC7899;
ahc->features = AHC_AIC7899_FE;
ahc->flags |= AHC_NEWEEPROM_FMT;
ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG;
ahc->instruction_ram_size = 1024;
return (0);
}
static int
ahc_aha29160C_setup(struct ahc_softc *ahc)
{
int error;
error = ahc_aic7899_setup(ahc);
if (error != 0)
return (error);
ahc->features |= AHC_REMOVABLE;
return (0);
}
static int
ahc_raid_setup(struct ahc_softc *ahc)
{
printk("RAID functionality unsupported\n");
return (ENXIO);
}
static int
ahc_aha394XX_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
pci = ahc->dev_softc;
switch (ahc_get_pci_slot(pci)) {
case AHC_394X_SLOT_CHANNEL_A:
ahc->channel = 'A';
break;
case AHC_394X_SLOT_CHANNEL_B:
ahc->channel = 'B';
break;
default:
printk("adapter at unexpected slot %d\n"
"unable to map to a channel\n",
ahc_get_pci_slot(pci));
ahc->channel = 'A';
}
return (0);
}
static int
ahc_aha398XX_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
pci = ahc->dev_softc;
switch (ahc_get_pci_slot(pci)) {
case AHC_398X_SLOT_CHANNEL_A:
ahc->channel = 'A';
break;
case AHC_398X_SLOT_CHANNEL_B:
ahc->channel = 'B';
break;
case AHC_398X_SLOT_CHANNEL_C:
ahc->channel = 'C';
break;
default:
printk("adapter at unexpected slot %d\n"
"unable to map to a channel\n",
ahc_get_pci_slot(pci));
ahc->channel = 'A';
break;
}
ahc->flags |= AHC_LARGE_SEEPROM;
return (0);
}
static int
ahc_aha494XX_setup(struct ahc_softc *ahc)
{
ahc_dev_softc_t pci;
pci = ahc->dev_softc;
switch (ahc_get_pci_slot(pci)) {
case AHC_494X_SLOT_CHANNEL_A:
ahc->channel = 'A';
break;
case AHC_494X_SLOT_CHANNEL_B:
ahc->channel = 'B';
break;
case AHC_494X_SLOT_CHANNEL_C:
ahc->channel = 'C';
break;
case AHC_494X_SLOT_CHANNEL_D:
ahc->channel = 'D';
break;
default:
printk("adapter at unexpected slot %d\n"
"unable to map to a channel\n",
ahc_get_pci_slot(pci));
ahc->channel = 'A';
}
ahc->flags |= AHC_LARGE_SEEPROM;
return (0);
}
| gpl-2.0 |
DirtyUnicorns/android_kernel_htc_flounder | net/irda/iriap_event.c | 12525 | 12753 | /*********************************************************************
*
* Filename: iriap_event.c
* Version: 0.1
* Description: IAP Finite State Machine
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Aug 21 00:02:07 1997
* Modified at: Wed Mar 1 11:28:34 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997, 1999-2000 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
* Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Dag Brattli nor University of Tromsø admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
********************************************************************/
#include <linux/slab.h>
#include <net/irda/irda.h>
#include <net/irda/irlmp.h>
#include <net/irda/iriap.h>
#include <net/irda/iriap_event.h>
static void state_s_disconnect (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_s_connecting (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_s_call (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_s_make_call (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_s_calling (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_s_outstanding (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_s_replying (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_s_wait_active (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_r_disconnect (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_r_call (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_r_waiting (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_r_wait_active (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_r_receiving (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_r_execute (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void state_r_returning (struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb);
static void (*iriap_state[])(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb) = {
/* Client FSM */
state_s_disconnect,
state_s_connecting,
state_s_call,
/* S-Call FSM */
state_s_make_call,
state_s_calling,
state_s_outstanding,
state_s_replying,
state_s_wait_for_call,
state_s_wait_active,
/* Server FSM */
state_r_disconnect,
state_r_call,
/* R-Connect FSM */
state_r_waiting,
state_r_wait_active,
state_r_receiving,
state_r_execute,
state_r_returning,
};
void iriap_next_client_state(struct iriap_cb *self, IRIAP_STATE state)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
self->client_state = state;
}
void iriap_next_call_state(struct iriap_cb *self, IRIAP_STATE state)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
self->call_state = state;
}
void iriap_next_server_state(struct iriap_cb *self, IRIAP_STATE state)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
self->server_state = state;
}
void iriap_next_r_connect_state(struct iriap_cb *self, IRIAP_STATE state)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
self->r_connect_state = state;
}
void iriap_do_client_event(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
(*iriap_state[ self->client_state]) (self, event, skb);
}
void iriap_do_call_event(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
(*iriap_state[ self->call_state]) (self, event, skb);
}
void iriap_do_server_event(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
(*iriap_state[ self->server_state]) (self, event, skb);
}
void iriap_do_r_connect_event(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
(*iriap_state[ self->r_connect_state]) (self, event, skb);
}
/*
* Function state_s_disconnect (event, skb)
*
* S-Disconnect, The device has no LSAP connection to a particular
* remote device.
*/
static void state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
switch (event) {
case IAP_CALL_REQUEST_GVBC:
iriap_next_client_state(self, S_CONNECTING);
IRDA_ASSERT(self->request_skb == NULL, return;);
/* Don't forget to refcount it -
* see iriap_getvaluebyclass_request(). */
skb_get(skb);
self->request_skb = skb;
iriap_connect_request(self);
break;
case IAP_LM_DISCONNECT_INDICATION:
break;
default:
IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
break;
}
}
/*
* Function state_s_connecting (self, event, skb)
*
* S-Connecting
*
*/
static void state_s_connecting(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
switch (event) {
case IAP_LM_CONNECT_CONFIRM:
/*
* Jump to S-Call FSM
*/
iriap_do_call_event(self, IAP_CALL_REQUEST, skb);
/* iriap_call_request(self, 0,0,0); */
iriap_next_client_state(self, S_CALL);
break;
case IAP_LM_DISCONNECT_INDICATION:
/* Abort calls */
iriap_next_call_state(self, S_MAKE_CALL);
iriap_next_client_state(self, S_DISCONNECT);
break;
default:
IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
break;
}
}
/*
* Function state_s_call (self, event, skb)
*
* S-Call, The device can process calls to a specific remote
* device. Whenever the LSAP connection is disconnected, this state
* catches that event and clears up
*/
static void state_s_call(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_ASSERT(self != NULL, return;);
switch (event) {
case IAP_LM_DISCONNECT_INDICATION:
/* Abort calls */
iriap_next_call_state(self, S_MAKE_CALL);
iriap_next_client_state(self, S_DISCONNECT);
break;
default:
IRDA_DEBUG(0, "state_s_call: Unknown event %d\n", event);
break;
}
}
/*
* Function state_s_make_call (event, skb)
*
* S-Make-Call
*
*/
static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
struct sk_buff *tx_skb;
IRDA_ASSERT(self != NULL, return;);
switch (event) {
case IAP_CALL_REQUEST:
/* Already refcounted - see state_s_disconnect() */
tx_skb = self->request_skb;
self->request_skb = NULL;
irlmp_data_request(self->lsap, tx_skb);
iriap_next_call_state(self, S_OUTSTANDING);
break;
default:
IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
break;
}
}
/*
* Function state_s_calling (event, skb)
*
* S-Calling
*
*/
static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
}
/*
* Function state_s_outstanding (event, skb)
*
* S-Outstanding, The device is waiting for a response to a command
*
*/
static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_ASSERT(self != NULL, return;);
switch (event) {
case IAP_RECV_F_LST:
/*iriap_send_ack(self);*/
/*LM_Idle_request(idle); */
iriap_next_call_state(self, S_WAIT_FOR_CALL);
break;
default:
IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
break;
}
}
/*
* Function state_s_replying (event, skb)
*
* S-Replying, The device is collecting a multiple part response
*/
static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
}
/*
* Function state_s_wait_for_call (event, skb)
*
* S-Wait-for-Call
*
*/
static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
}
/*
* Function state_s_wait_active (event, skb)
*
* S-Wait-Active
*
*/
static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
}
/**************************************************************************
*
* Server FSM
*
**************************************************************************/
/*
* Function state_r_disconnect (self, event, skb)
*
* LM-IAS server is disconnected (not processing any requests!)
*
*/
static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
struct sk_buff *tx_skb;
switch (event) {
case IAP_LM_CONNECT_INDICATION:
tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
if (tx_skb == NULL) {
IRDA_WARNING("%s: unable to malloc!\n", __func__);
return;
}
/* Reserve space for MUX_CONTROL and LAP header */
skb_reserve(tx_skb, LMP_MAX_HEADER);
irlmp_connect_response(self->lsap, tx_skb);
/*LM_Idle_request(idle); */
iriap_next_server_state(self, R_CALL);
/*
* Jump to R-Connect FSM, we skip R-Waiting since we do not
* care about LM_Idle_request()!
*/
iriap_next_r_connect_state(self, R_RECEIVING);
break;
default:
IRDA_DEBUG(0, "%s(), unknown event %d\n", __func__, event);
break;
}
}
/*
* Function state_r_call (self, event, skb)
*/
static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_DEBUG(4, "%s()\n", __func__);
switch (event) {
case IAP_LM_DISCONNECT_INDICATION:
/* Abort call */
iriap_next_server_state(self, R_DISCONNECT);
iriap_next_r_connect_state(self, R_WAITING);
break;
default:
IRDA_DEBUG(0, "%s(), unknown event!\n", __func__);
break;
}
}
/*
* R-Connect FSM
*/
/*
* Function state_r_waiting (self, event, skb)
*/
static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
}
static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
}
/*
* Function state_r_receiving (self, event, skb)
*
* We are receiving a command
*
*/
static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_DEBUG(4, "%s()\n", __func__);
switch (event) {
case IAP_RECV_F_LST:
iriap_next_r_connect_state(self, R_EXECUTE);
iriap_call_indication(self, skb);
break;
default:
IRDA_DEBUG(0, "%s(), unknown event!\n", __func__);
break;
}
}
/*
* Function state_r_execute (self, event, skb)
*
* The server is processing the request
*
*/
static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
switch (event) {
case IAP_CALL_RESPONSE:
/*
* Since we don't implement the Waiting state, we return
* to state Receiving instead, DB.
*/
iriap_next_r_connect_state(self, R_RECEIVING);
/* Don't forget to refcount it - see
* iriap_getvaluebyclass_response(). */
skb_get(skb);
irlmp_data_request(self->lsap, skb);
break;
default:
IRDA_DEBUG(0, "%s(), unknown event!\n", __func__);
break;
}
}
static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
IRDA_DEBUG(0, "%s(), event=%d\n", __func__, event);
switch (event) {
case IAP_RECV_F_LST:
break;
default:
break;
}
}
| gpl-2.0 |
MoKee/android_kernel_zte_x9180 | sound/pci/echoaudio/echo3g_dsp.c | 12525 | 4228 | /****************************************************************************
Copyright Echo Digital Audio Corporation (c) 1998 - 2004
All rights reserved
www.echoaudio.com
This file is part of Echo Digital Audio's generic driver library.
Echo Digital Audio's generic driver library is free software;
you can redistribute it and/or modify it under the terms of
the GNU General Public License as published by the Free Software
Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA.
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
****************************************************************************/
static int load_asic(struct echoaudio *chip);
static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode);
static int set_digital_mode(struct echoaudio *chip, u8 mode);
static int check_asic_status(struct echoaudio *chip);
static int set_sample_rate(struct echoaudio *chip, u32 rate);
static int set_input_clock(struct echoaudio *chip, u16 clock);
static int set_professional_spdif(struct echoaudio *chip, char prof);
static int set_phantom_power(struct echoaudio *chip, char on);
static int write_control_reg(struct echoaudio *chip, u32 ctl, u32 frq,
char force);
#include <linux/interrupt.h>
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
local_irq_enable();
DE_INIT(("init_hw() - Echo3G\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != ECHO3G))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->comm_page->e3g_frq_register =
cpu_to_le32((E3G_MAGIC_NUMBER / 48000) - 2);
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->has_midi = TRUE;
chip->dsp_code_to_load = FW_ECHO3G_DSP;
/* Load the DSP code and the ASIC on the PCI card and get
what type of external box is attached */
err = load_firmware(chip);
if (err < 0) {
return err;
} else if (err == E3G_GINA3G_BOX_TYPE) {
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
ECHO_CLOCK_BIT_SPDIF |
ECHO_CLOCK_BIT_ADAT;
chip->card_name = "Gina3G";
chip->px_digital_out = chip->bx_digital_out = 6;
chip->px_analog_in = chip->bx_analog_in = 14;
chip->px_digital_in = chip->bx_digital_in = 16;
chip->px_num = chip->bx_num = 24;
chip->has_phantom_power = TRUE;
chip->hasnt_input_nominal_level = TRUE;
} else if (err == E3G_LAYLA3G_BOX_TYPE) {
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
ECHO_CLOCK_BIT_SPDIF |
ECHO_CLOCK_BIT_ADAT |
ECHO_CLOCK_BIT_WORD;
chip->card_name = "Layla3G";
chip->px_digital_out = chip->bx_digital_out = 8;
chip->px_analog_in = chip->bx_analog_in = 16;
chip->px_digital_in = chip->bx_digital_in = 24;
chip->px_num = chip->bx_num = 32;
} else {
return -ENODEV;
}
chip->digital_modes = ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_RCA |
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_OPTICAL |
ECHOCAPS_HAS_DIGITAL_MODE_ADAT;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
chip->professional_spdif = FALSE;
chip->non_audio_spdif = FALSE;
chip->bad_board = FALSE;
chip->phantom_power = FALSE;
return init_line_levels(chip);
}
static int set_phantom_power(struct echoaudio *chip, char on)
{
u32 control_reg = le32_to_cpu(chip->comm_page->control_register);
if (on)
control_reg |= E3G_PHANTOM_POWER;
else
control_reg &= ~E3G_PHANTOM_POWER;
chip->phantom_power = on;
return write_control_reg(chip, control_reg,
le32_to_cpu(chip->comm_page->e3g_frq_register),
0);
}
| gpl-2.0 |
charles1018/kernel_blu_spark | drivers/video/msm/mdss/mdp3.c | 238 | 28996 | /* Copyright (c) 2013, The Linux Foundation. All rights reserved.
* Copyright (C) 2007 Google Incorporated
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/memory_alloc.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/spinlock.h>
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/file.h>
#include <linux/msm_kgsl.h>
#include <mach/board.h>
#include <mach/clk.h>
#include <mach/hardware.h>
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/msm_memtypes.h>
#include "mdp3.h"
#include "mdss_fb.h"
#include "mdp3_hwio.h"
#include "mdp3_ctrl.h"
#include "mdp3_ppp.h"
#define MDP_CORE_HW_VERSION 0x03040310
struct mdp3_hw_resource *mdp3_res;
#define MDP_BUS_VECTOR_ENTRY_DMA(ab_val, ib_val) \
{ \
.src = MSM_BUS_MASTER_MDP_PORT0, \
.dst = MSM_BUS_SLAVE_EBI_CH0, \
.ab = (ab_val), \
.ib = (ib_val), \
}
static struct msm_bus_vectors mdp_bus_dma_vectors[] = {
MDP_BUS_VECTOR_ENTRY_DMA(0, 0),
MDP_BUS_VECTOR_ENTRY_DMA(SZ_128M, SZ_256M),
MDP_BUS_VECTOR_ENTRY_DMA(SZ_256M, SZ_512M),
};
static struct msm_bus_paths
mdp_bus_dma_usecases[ARRAY_SIZE(mdp_bus_dma_vectors)];
static struct msm_bus_scale_pdata mdp_bus_dma_scale_table = {
.usecase = mdp_bus_dma_usecases,
.num_usecases = ARRAY_SIZE(mdp_bus_dma_usecases),
.name = "mdp3",
};
#define MDP_BUS_VECTOR_ENTRY_PPP(ab_val, ib_val) \
{ \
.src = MSM_BUS_MASTER_MDPE, \
.dst = MSM_BUS_SLAVE_EBI_CH0, \
.ab = (ab_val), \
.ib = (ib_val), \
}
static struct msm_bus_vectors mdp_bus_ppp_vectors[] = {
MDP_BUS_VECTOR_ENTRY_PPP(0, 0),
MDP_BUS_VECTOR_ENTRY_PPP(SZ_128M, SZ_256M),
MDP_BUS_VECTOR_ENTRY_PPP(SZ_256M, SZ_512M),
};
static struct msm_bus_paths
mdp_bus_ppp_usecases[ARRAY_SIZE(mdp_bus_ppp_vectors)];
static struct msm_bus_scale_pdata mdp_bus_ppp_scale_table = {
.usecase = mdp_bus_ppp_usecases,
.num_usecases = ARRAY_SIZE(mdp_bus_ppp_usecases),
.name = "mdp3_ppp",
};
struct mdp3_bus_handle_map mdp3_bus_handle[MDP3_BUS_HANDLE_MAX] = {
[MDP3_BUS_HANDLE_DMA] = {
.bus_vector = mdp_bus_dma_vectors,
.usecases = mdp_bus_dma_usecases,
.scale_pdata = &mdp_bus_dma_scale_table,
.current_bus_idx = 0,
.handle = 0,
},
[MDP3_BUS_HANDLE_PPP] = {
.bus_vector = mdp_bus_ppp_vectors,
.usecases = mdp_bus_ppp_usecases,
.scale_pdata = &mdp_bus_ppp_scale_table,
.current_bus_idx = 0,
.handle = 0,
},
};
struct mdp3_iommu_domain_map mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_MAX] = {
[MDP3_IOMMU_DOMAIN] = {
.domain_type = MDP3_IOMMU_DOMAIN,
.client_name = "mdp_dma",
.partitions = {
{
.start = SZ_128K,
.size = SZ_1G - SZ_128K,
},
},
.npartitions = 1,
},
};
struct mdp3_iommu_ctx_map mdp3_iommu_contexts[MDP3_IOMMU_CTX_MAX] = {
[MDP3_IOMMU_CTX_PPP_0] = {
.ctx_type = MDP3_IOMMU_CTX_PPP_0,
.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
.ctx_name = "mdpe_0",
.attached = 0,
},
[MDP3_IOMMU_CTX_PPP_1] = {
.ctx_type = MDP3_IOMMU_CTX_PPP_1,
.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
.ctx_name = "mdpe_1",
.attached = 0,
},
[MDP3_IOMMU_CTX_DMA_0] = {
.ctx_type = MDP3_IOMMU_CTX_DMA_0,
.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
.ctx_name = "mdps_0",
.attached = 0,
},
[MDP3_IOMMU_CTX_DMA_1] = {
.ctx_type = MDP3_IOMMU_CTX_DMA_1,
.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
.ctx_name = "mdps_1",
.attached = 0,
},
};
static irqreturn_t mdp3_irq_handler(int irq, void *ptr)
{
int i = 0;
struct mdp3_hw_resource *mdata = (struct mdp3_hw_resource *)ptr;
u32 mdp_interrupt = 0;
spin_lock(&mdata->irq_lock);
if (!mdata->irq_mask) {
pr_err("spurious interrupt\n");
spin_unlock(&mdata->irq_lock);
return IRQ_HANDLED;
}
mdp_interrupt = MDP3_REG_READ(MDP3_REG_INTR_STATUS);
MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, mdp_interrupt);
pr_debug("mdp3_irq_handler irq=%d\n", mdp_interrupt);
mdp_interrupt &= mdata->irq_mask;
while (mdp_interrupt && i < MDP3_MAX_INTR) {
if ((mdp_interrupt & 0x1) && mdata->callbacks[i].cb)
mdata->callbacks[i].cb(i, mdata->callbacks[i].data);
mdp_interrupt = mdp_interrupt >> 1;
i++;
}
spin_unlock(&mdata->irq_lock);
return IRQ_HANDLED;
}
void mdp3_irq_enable(int type)
{
unsigned long flag;
pr_debug("mdp3_irq_enable type=%d\n", type);
spin_lock_irqsave(&mdp3_res->irq_lock, flag);
mdp3_res->irq_ref_count[type] += 1;
if (mdp3_res->irq_ref_count[type] > 1) {
pr_debug("interrupt %d already enabled\n", type);
spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
return;
}
mdp3_res->irq_mask |= BIT(type);
MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask);
spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
}
void mdp3_irq_disable(int type)
{
unsigned long flag;
spin_lock_irqsave(&mdp3_res->irq_lock, flag);
mdp3_irq_disable_nosync(type);
spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
}
void mdp3_irq_disable_nosync(int type)
{
if (mdp3_res->irq_ref_count[type] <= 0) {
pr_debug("interrupt %d not enabled\n", type);
return;
}
mdp3_res->irq_ref_count[type] -= 1;
if (mdp3_res->irq_ref_count[type] == 0) {
mdp3_res->irq_mask &= ~BIT(type);
MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask);
}
}
int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb)
{
unsigned long flag;
pr_debug("interrupt %d callback\n", type);
spin_lock_irqsave(&mdp3_res->irq_lock, flag);
if (cb)
mdp3_res->callbacks[type] = *cb;
else
mdp3_res->callbacks[type].cb = NULL;
spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
return 0;
}
void mdp3_irq_register(void)
{
unsigned long flag;
pr_debug("mdp3_irq_register\n");
spin_lock_irqsave(&mdp3_res->irq_lock, flag);
enable_irq(mdp3_res->irq);
spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
}
void mdp3_irq_deregister(void)
{
unsigned long flag;
pr_debug("mdp3_irq_deregister\n");
spin_lock_irqsave(&mdp3_res->irq_lock, flag);
memset(mdp3_res->irq_ref_count, 0, sizeof(u32) * MDP3_MAX_INTR);
mdp3_res->irq_mask = 0;
MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, 0xfffffff);
disable_irq_nosync(mdp3_res->irq);
spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
}
static int mdp3_bus_scale_register(void)
{
int i;
if (!mdp3_res->bus_handle) {
pr_err("No bus handle\n");
return -EINVAL;
}
for (i = 0; i < MDP3_BUS_HANDLE_MAX; i++) {
struct mdp3_bus_handle_map *bus_handle =
&mdp3_res->bus_handle[i];
if (!bus_handle->handle) {
int j;
struct msm_bus_scale_pdata *bus_pdata =
bus_handle->scale_pdata;
for (j = 0; j < bus_pdata->num_usecases; j++) {
bus_handle->usecases[j].num_paths = 1;
bus_handle->usecases[j].vectors =
&bus_handle->bus_vector[j];
}
bus_handle->handle =
msm_bus_scale_register_client(bus_pdata);
if (!bus_handle->handle) {
pr_err("not able to get bus scale i=%d\n", i);
return -ENOMEM;
}
pr_debug("register bus_hdl=%x\n",
bus_handle->handle);
}
}
return 0;
}
static void mdp3_bus_scale_unregister(void)
{
int i;
if (!mdp3_res->bus_handle)
return;
for (i = 0; i < MDP3_BUS_HANDLE_MAX; i++) {
pr_debug("unregister index=%d bus_handle=%x\n",
i, mdp3_res->bus_handle[i].handle);
if (mdp3_res->bus_handle[i].handle) {
msm_bus_scale_unregister_client(
mdp3_res->bus_handle[i].handle);
mdp3_res->bus_handle[i].handle = 0;
}
}
}
int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
{
struct mdp3_bus_handle_map *bus_handle;
int cur_bus_idx;
int bus_idx;
int client_idx;
int rc;
if (client == MDP3_CLIENT_DMA_P) {
client_idx = MDP3_BUS_HANDLE_DMA;
} else if (client == MDP3_CLIENT_PPP) {
client_idx = MDP3_BUS_HANDLE_PPP;
} else {
pr_err("invalid client %d\n", client);
return -EINVAL;
}
bus_handle = &mdp3_res->bus_handle[client_idx];
cur_bus_idx = bus_handle->current_bus_idx;
if (bus_handle->handle < 1) {
pr_err("invalid bus handle %d\n", bus_handle->handle);
return -EINVAL;
}
if ((ab_quota | ib_quota) == 0) {
bus_idx = 0;
} else {
int num_cases = bus_handle->scale_pdata->num_usecases;
struct msm_bus_vectors *vect = NULL;
bus_idx = (cur_bus_idx % (num_cases - 1)) + 1;
/* aligning to avoid performing updates for small changes */
ab_quota = ALIGN(ab_quota, SZ_64M);
ib_quota = ALIGN(ib_quota, SZ_64M);
vect = bus_handle->scale_pdata->usecase[cur_bus_idx].vectors;
if ((ab_quota == vect->ab) && (ib_quota == vect->ib)) {
pr_debug("skip bus scaling, no change in vectors\n");
return 0;
}
vect = bus_handle->scale_pdata->usecase[bus_idx].vectors;
vect->ab = ab_quota;
vect->ib = ib_quota;
pr_debug("bus scale idx=%d ab=%llu ib=%llu\n", bus_idx,
vect->ab, vect->ib);
}
bus_handle->current_bus_idx = bus_idx;
rc = msm_bus_scale_client_update_request(bus_handle->handle, bus_idx);
return rc;
}
static int mdp3_clk_update(u32 clk_idx, u32 enable)
{
int ret = -EINVAL;
struct clk *clk;
int count = 0;
if (clk_idx >= MDP3_MAX_CLK || !mdp3_res->clocks[clk_idx])
return -ENODEV;
clk = mdp3_res->clocks[clk_idx];
if (enable)
mdp3_res->clock_ref_count[clk_idx]++;
else
mdp3_res->clock_ref_count[clk_idx]--;
count = mdp3_res->clock_ref_count[clk_idx];
if (count == 1 && enable) {
pr_debug("clk=%d en=%d\n", clk_idx, enable);
ret = clk_prepare_enable(clk);
} else if (count == 0) {
pr_debug("clk=%d disable\n", clk_idx);
clk_disable_unprepare(clk);
ret = 0;
} else if (count < 0) {
pr_err("clk=%d count=%d\n", clk_idx, count);
ret = -EINVAL;
}
return ret;
}
int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate,
int client)
{
int ret = 0;
unsigned long rounded_rate;
struct clk *clk = mdp3_res->clocks[clk_type];
if (clk) {
mutex_lock(&mdp3_res->res_mutex);
rounded_rate = clk_round_rate(clk, clk_rate);
if (IS_ERR_VALUE(rounded_rate)) {
pr_err("unable to round rate err=%ld\n", rounded_rate);
mutex_unlock(&mdp3_res->res_mutex);
return -EINVAL;
}
if (clk_type == MDP3_CLK_CORE) {
if (client == MDP3_CLIENT_DMA_P) {
mdp3_res->dma_core_clk_request = rounded_rate;
} else if (client == MDP3_CLIENT_PPP) {
mdp3_res->ppp_core_clk_request = rounded_rate;
} else {
pr_err("unrecognized client=%d\n", client);
mutex_unlock(&mdp3_res->res_mutex);
return -EINVAL;
}
rounded_rate = max(mdp3_res->dma_core_clk_request,
mdp3_res->ppp_core_clk_request);
}
if (rounded_rate != clk_get_rate(clk)) {
ret = clk_set_rate(clk, rounded_rate);
if (ret)
pr_err("clk_set_rate failed ret=%d\n", ret);
else
pr_debug("mdp clk rate=%lu\n", rounded_rate);
}
mutex_unlock(&mdp3_res->res_mutex);
} else {
pr_err("mdp src clk not setup properly\n");
ret = -EINVAL;
}
return ret;
}
unsigned long mdp3_get_clk_rate(u32 clk_idx)
{
unsigned long clk_rate = 0;
struct clk *clk;
if (clk_idx >= MDP3_MAX_CLK)
return -ENODEV;
clk = mdp3_res->clocks[clk_idx];
if (clk) {
mutex_lock(&mdp3_res->res_mutex);
clk_rate = clk_get_rate(clk);
mutex_unlock(&mdp3_res->res_mutex);
}
return clk_rate;
}
static int mdp3_clk_register(char *clk_name, int clk_idx)
{
struct clk *tmp;
if (clk_idx >= MDP3_MAX_CLK) {
pr_err("invalid clk index %d\n", clk_idx);
return -EINVAL;
}
tmp = devm_clk_get(&mdp3_res->pdev->dev, clk_name);
if (IS_ERR(tmp)) {
pr_err("unable to get clk: %s\n", clk_name);
return PTR_ERR(tmp);
}
mdp3_res->clocks[clk_idx] = tmp;
return 0;
}
static int mdp3_clk_setup(void)
{
int rc;
rc = mdp3_clk_register("iface_clk", MDP3_CLK_AHB);
if (rc)
return rc;
rc = mdp3_clk_register("core_clk", MDP3_CLK_CORE);
if (rc)
return rc;
rc = mdp3_clk_register("vsync_clk", MDP3_CLK_VSYNC);
if (rc)
return rc;
rc = mdp3_clk_register("lcdc_clk", MDP3_CLK_LCDC);
if (rc)
return rc;
rc = mdp3_clk_register("dsi_clk", MDP3_CLK_DSI);
if (rc)
return rc;
return rc;
}
static void mdp3_clk_remove(void)
{
if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_AHB]))
clk_put(mdp3_res->clocks[MDP3_CLK_AHB]);
if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_CORE]))
clk_put(mdp3_res->clocks[MDP3_CLK_CORE]);
if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_VSYNC]))
clk_put(mdp3_res->clocks[MDP3_CLK_VSYNC]);
if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_LCDC]))
clk_put(mdp3_res->clocks[MDP3_CLK_LCDC]);
if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_DSI]))
clk_put(mdp3_res->clocks[MDP3_CLK_DSI]);
}
int mdp3_clk_enable(int enable)
{
int rc;
pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
mutex_lock(&mdp3_res->res_mutex);
rc = mdp3_clk_update(MDP3_CLK_AHB, enable);
rc |= mdp3_clk_update(MDP3_CLK_CORE, enable);
rc |= mdp3_clk_update(MDP3_CLK_VSYNC, enable);
rc |= mdp3_clk_update(MDP3_CLK_DSI, enable);
mutex_unlock(&mdp3_res->res_mutex);
return rc;
}
static int mdp3_irq_setup(void)
{
int ret;
ret = devm_request_irq(&mdp3_res->pdev->dev,
mdp3_res->irq,
mdp3_irq_handler,
IRQF_DISABLED, "MDP", mdp3_res);
if (ret) {
pr_err("mdp request_irq() failed!\n");
return ret;
}
disable_irq(mdp3_res->irq);
mdp3_res->irq_registered = true;
return 0;
}
int mdp3_iommu_attach(int context)
{
struct mdp3_iommu_ctx_map *context_map;
struct mdp3_iommu_domain_map *domain_map;
if (context >= MDP3_IOMMU_CTX_MAX)
return -EINVAL;
context_map = mdp3_res->iommu_contexts + context;
if (context_map->attached) {
pr_warn("mdp iommu already attached\n");
return 0;
}
domain_map = context_map->domain;
iommu_attach_device(domain_map->domain, context_map->ctx);
context_map->attached = true;
return 0;
}
int mdp3_iommu_dettach(int context)
{
struct mdp3_iommu_ctx_map *context_map;
struct mdp3_iommu_domain_map *domain_map;
if (!mdp3_res->iommu_contexts ||
context >= MDP3_IOMMU_CTX_MAX)
return -EINVAL;
context_map = mdp3_res->iommu_contexts + context;
if (!context_map->attached) {
pr_warn("mdp iommu not attached\n");
return 0;
}
domain_map = context_map->domain;
iommu_detach_device(domain_map->domain, context_map->ctx);
context_map->attached = false;
return 0;
}
int mdp3_iommu_domain_init(void)
{
struct msm_iova_layout layout;
int i;
if (mdp3_res->domains) {
pr_warn("iommu domain already initialized\n");
return 0;
}
for (i = 0; i < MDP3_IOMMU_DOMAIN_MAX; i++) {
int domain_idx;
layout.client_name = mdp3_iommu_domains[i].client_name;
layout.partitions = mdp3_iommu_domains[i].partitions;
layout.npartitions = mdp3_iommu_domains[i].npartitions;
layout.is_secure = false;
domain_idx = msm_register_domain(&layout);
if (IS_ERR_VALUE(domain_idx))
return -EINVAL;
mdp3_iommu_domains[i].domain_idx = domain_idx;
mdp3_iommu_domains[i].domain = msm_get_iommu_domain(domain_idx);
if (IS_ERR_OR_NULL(mdp3_iommu_domains[i].domain)) {
pr_err("unable to get iommu domain(%d)\n",
domain_idx);
if (!mdp3_iommu_domains[i].domain)
return -EINVAL;
else
return PTR_ERR(mdp3_iommu_domains[i].domain);
}
}
mdp3_res->domains = mdp3_iommu_domains;
return 0;
}
int mdp3_iommu_context_init(void)
{
int i;
if (mdp3_res->iommu_contexts) {
pr_warn("iommu context already initialized\n");
return 0;
}
for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++) {
mdp3_iommu_contexts[i].ctx =
msm_iommu_get_ctx(mdp3_iommu_contexts[i].ctx_name);
if (IS_ERR_OR_NULL(mdp3_iommu_contexts[i].ctx)) {
pr_warn("unable to get iommu ctx(%s)\n",
mdp3_iommu_contexts[i].ctx_name);
if (!mdp3_iommu_contexts[i].ctx)
return -EINVAL;
else
return PTR_ERR(mdp3_iommu_contexts[i].ctx);
}
}
mdp3_res->iommu_contexts = mdp3_iommu_contexts;
return 0;
}
int mdp3_iommu_init(void)
{
int ret;
ret = mdp3_iommu_domain_init();
if (ret) {
pr_err("mdp3 iommu domain init fails\n");
return ret;
}
ret = mdp3_iommu_context_init();
if (ret) {
pr_err("mdp3 iommu context init fails\n");
return ret;
}
return ret;
}
void mdp3_iommu_deinit(void)
{
int i;
if (!mdp3_res->domains)
return;
for (i = 0; i < MDP3_IOMMU_DOMAIN_MAX; i++) {
if (!IS_ERR_OR_NULL(mdp3_res->domains[i].domain))
msm_unregister_domain(mdp3_res->domains[i].domain);
}
}
static int mdp3_check_version(void)
{
int rc;
rc = mdp3_clk_update(MDP3_CLK_AHB, 1);
rc |= mdp3_clk_update(MDP3_CLK_CORE, 1);
if (rc)
return rc;
mdp3_res->mdp_rev = MDP3_REG_READ(MDP3_REG_HW_VERSION);
rc = mdp3_clk_update(MDP3_CLK_AHB, 0);
rc |= mdp3_clk_update(MDP3_CLK_CORE, 0);
if (rc)
pr_err("fail to turn off the MDP3_CLK_AHB clk\n");
if (mdp3_res->mdp_rev != MDP_CORE_HW_VERSION) {
pr_err("mdp_hw_revision=%x mismatch\n", mdp3_res->mdp_rev);
rc = -ENODEV;
}
return rc;
}
static int mdp3_hw_init(void)
{
int i;
for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) {
mdp3_res->dma[i].dma_sel = i;
mdp3_res->dma[i].capability = MDP3_DMA_CAP_ALL;
mdp3_res->dma[i].in_use = 0;
mdp3_res->dma[i].available = 1;
}
mdp3_res->dma[MDP3_DMA_S].capability = MDP3_DMA_CAP_DITHER;
mdp3_res->dma[MDP3_DMA_E].available = 0;
for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) {
mdp3_res->intf[i].cfg.type = i;
mdp3_res->intf[i].active = 0;
mdp3_res->intf[i].in_use = 0;
mdp3_res->intf[i].available = 1;
}
mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_AHB].available = 0;
mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_LCDC].available = 0;
return 0;
}
static int mdp3_res_init(void)
{
int rc = 0;
rc = mdp3_irq_setup();
if (rc)
return rc;
rc = mdp3_clk_setup();
if (rc)
return rc;
mdp3_res->ion_client = msm_ion_client_create(-1, mdp3_res->pdev->name);
if (IS_ERR_OR_NULL(mdp3_res->ion_client)) {
pr_err("msm_ion_client_create() return error (%p)\n",
mdp3_res->ion_client);
mdp3_res->ion_client = NULL;
return -EINVAL;
}
rc = mdp3_iommu_init();
if (rc)
return rc;
mdp3_res->bus_handle = mdp3_bus_handle;
rc = mdp3_bus_scale_register();
if (rc) {
pr_err("unable to register bus scaling\n");
return rc;
}
rc = mdp3_hw_init();
return rc;
}
static void mdp3_res_deinit(void)
{
mdp3_bus_scale_unregister();
mdp3_iommu_dettach(MDP3_IOMMU_CTX_DMA_0);
mdp3_iommu_deinit();
if (!IS_ERR_OR_NULL(mdp3_res->ion_client))
ion_client_destroy(mdp3_res->ion_client);
mdp3_clk_remove();
if (mdp3_res->irq_registered)
devm_free_irq(&mdp3_res->pdev->dev, mdp3_res->irq, mdp3_res);
}
static int mdp3_parse_dt(struct platform_device *pdev)
{
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys");
if (!res) {
pr_err("unable to get MDP base address\n");
return -EINVAL;
}
mdp3_res->mdp_reg_size = resource_size(res);
mdp3_res->mdp_base = devm_ioremap(&pdev->dev, res->start,
mdp3_res->mdp_reg_size);
if (unlikely(!mdp3_res->mdp_base)) {
pr_err("unable to map MDP base\n");
return -ENOMEM;
}
pr_debug("MDP HW Base phy_Address=0x%x virt=0x%x\n",
(int) res->start,
(int) mdp3_res->mdp_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
pr_err("unable to get MDSS irq\n");
return -EINVAL;
}
mdp3_res->irq = res->start;
return 0;
}
int mdp3_put_img(struct mdp3_img_data *data)
{
struct ion_client *iclient = mdp3_res->ion_client;
int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN)->domain_idx;
if (data->flags & MDP_MEMORY_ID_TYPE_FB) {
pr_info("mdp3_put_img fb mem buf=0x%x\n", data->addr);
fput_light(data->srcp_file, data->p_need);
data->srcp_file = NULL;
} else if (!IS_ERR_OR_NULL(data->srcp_ihdl)) {
ion_unmap_iommu(iclient, data->srcp_ihdl, dom, 0);
ion_free(iclient, data->srcp_ihdl);
data->srcp_ihdl = NULL;
} else {
return -EINVAL;
}
return 0;
}
int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data)
{
struct file *file;
int ret = -EINVAL;
int fb_num;
unsigned long *start, *len;
struct ion_client *iclient = mdp3_res->ion_client;
int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN)->domain_idx;
start = (unsigned long *) &data->addr;
len = (unsigned long *) &data->len;
data->flags = img->flags;
data->p_need = 0;
if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
file = fget_light(img->memory_id, &data->p_need);
if (file == NULL) {
pr_err("invalid framebuffer file (%d)\n",
img->memory_id);
return -EINVAL;
}
if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
fb_num = MINOR(file->f_dentry->d_inode->i_rdev);
ret = mdss_fb_get_phys_info(start, len, fb_num);
if (ret) {
pr_err("mdss_fb_get_phys_info() failed\n");
fput_light(file, data->p_need);
file = NULL;
}
} else {
pr_err("invalid FB_MAJOR\n");
fput_light(file, data->p_need);
file = NULL;
ret = -EINVAL;
}
data->srcp_file = file;
if (!ret)
goto done;
} else if (iclient) {
data->srcp_ihdl = ion_import_dma_buf(iclient, img->memory_id);
if (IS_ERR_OR_NULL(data->srcp_ihdl)) {
pr_err("error on ion_import_fd\n");
if (!data->srcp_ihdl)
ret = -EINVAL;
else
ret = PTR_ERR(data->srcp_ihdl);
data->srcp_ihdl = NULL;
return ret;
}
ret = ion_map_iommu(iclient, data->srcp_ihdl, dom,
0, SZ_4K, 0, start, len, 0, 0);
if (IS_ERR_VALUE(ret)) {
ion_free(iclient, data->srcp_ihdl);
pr_err("failed to map ion handle (%d)\n", ret);
return ret;
}
}
done:
if (!ret && (img->offset < data->len)) {
data->addr += img->offset;
data->len -= img->offset;
pr_debug("mem=%d ihdl=%p buf=0x%x len=0x%x\n", img->memory_id,
data->srcp_ihdl, data->addr, data->len);
} else {
mdp3_put_img(data);
return -EINVAL;
}
return ret;
}
int mdp3_iommu_enable(int client)
{
int rc;
if (client == MDP3_CLIENT_DMA_P) {
rc = mdp3_iommu_attach(MDP3_IOMMU_CTX_DMA_0);
} else {
rc = mdp3_iommu_attach(MDP3_IOMMU_CTX_PPP_0);
rc |= mdp3_iommu_attach(MDP3_IOMMU_CTX_PPP_1);
}
return rc;
}
int mdp3_iommu_disable(int client)
{
int rc;
if (client == MDP3_CLIENT_DMA_P) {
rc = mdp3_iommu_dettach(MDP3_IOMMU_CTX_DMA_0);
} else {
rc = mdp3_iommu_dettach(MDP3_IOMMU_CTX_PPP_0);
rc |= mdp3_iommu_dettach(MDP3_IOMMU_CTX_PPP_1);
}
return rc;
}
static int mdp3_init(struct msm_fb_data_type *mfd)
{
int rc;
rc = mdp3_ctrl_init(mfd);
rc |= mdp3_ppp_res_init(mfd);
return rc;
}
u32 mdp3_fb_stride(u32 fb_index, u32 xres, int bpp)
{
/*
* The adreno GPU hardware requires that the pitch be aligned to
* 32 pixels for color buffers, so for the cases where the GPU
* is writing directly to fb0, the framebuffer pitch
* also needs to be 32 pixel aligned
*/
if (fb_index == 0)
return ALIGN(xres, 32) * bpp;
else
return xres * bpp;
}
static int mdp3_fbmem_alloc(struct msm_fb_data_type *mfd)
{
int ret = -ENOMEM, dom;
void *virt = NULL;
unsigned long phys = 0;
size_t size;
u32 yres = mfd->fbi->var.yres_virtual;
size = PAGE_ALIGN(mfd->fbi->fix.line_length * yres);
if (mfd->index != 0) {
mfd->fbi->screen_base = virt;
mfd->fbi->fix.smem_start = phys;
mfd->fbi->fix.smem_len = 0;
return 0;
}
mdp3_res->ion_handle = ion_alloc(mdp3_res->ion_client, size,
SZ_1M,
ION_HEAP(ION_QSECOM_HEAP_ID), 0);
if (!IS_ERR_OR_NULL(mdp3_res->ion_handle)) {
virt = ion_map_kernel(mdp3_res->ion_client,
mdp3_res->ion_handle);
if (IS_ERR(virt)) {
pr_err("%s map kernel error\n", __func__);
goto ion_map_kernel_err;
}
ret = ion_phys(mdp3_res->ion_client, mdp3_res->ion_handle,
&phys, &size);
if (ret) {
pr_err("%s ion_phys error\n", __func__);
goto ion_map_phys_err;
}
} else {
pr_err("%s ion alloc fail\n", __func__);
mdp3_res->ion_handle = NULL;
return -ENOMEM;
}
dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN)->domain_idx;
ret = ion_map_iommu(mdp3_res->ion_client, mdp3_res->ion_handle,
dom, 0, SZ_4K, 0, &mfd->iova,
(unsigned long *)&size, 0, 0);
if (ret) {
pr_err("%s map IOMMU error\n", __func__);
goto ion_map_phys_err;
}
pr_info("allocating %u bytes at %p (%lx phys) for fb %d\n",
size, virt, phys, mfd->index);
mfd->fbi->screen_base = virt;
mfd->fbi->fix.smem_start = phys;
mfd->fbi->fix.smem_len = size;
return 0;
ion_map_phys_err:
ion_unmap_kernel(mdp3_res->ion_client, mdp3_res->ion_handle);
ion_map_kernel_err:
ion_free(mdp3_res->ion_client, mdp3_res->ion_handle);
mdp3_res->ion_handle = NULL;
return -ENOMEM;
}
void mdp3_fbmem_free(struct msm_fb_data_type *mfd)
{
pr_info("mdp3_fbmem_free\n");
if (mdp3_res->ion_handle) {
int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN)->domain_idx;
ion_unmap_kernel(mdp3_res->ion_client, mdp3_res->ion_handle);
ion_unmap_iommu(mdp3_res->ion_client, mdp3_res->ion_handle,
dom, 0);
ion_free(mdp3_res->ion_client, mdp3_res->ion_handle);
mdp3_res->ion_handle = NULL;
mfd->fbi->screen_base = 0;
mfd->fbi->fix.smem_start = 0;
mfd->fbi->fix.smem_len = 0;
mfd->iova = 0;
}
}
struct mdp3_dma *mdp3_get_dma_pipe(int capability)
{
int i;
for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) {
if (!mdp3_res->dma[i].in_use && mdp3_res->dma[i].available &&
mdp3_res->dma[i].capability & capability) {
mdp3_res->dma[i].in_use = true;
return &mdp3_res->dma[i];
}
}
return NULL;
}
struct mdp3_intf *mdp3_get_display_intf(int type)
{
int i;
for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) {
if (!mdp3_res->intf[i].in_use && mdp3_res->intf[i].available &&
mdp3_res->intf[i].cfg.type == type) {
mdp3_res->intf[i].in_use = true;
return &mdp3_res->intf[i];
}
}
return NULL;
}
static int mdp3_fb_mem_get_iommu_domain(void)
{
if (!mdp3_res)
return -ENODEV;
return mdp3_res->domains[MDP3_IOMMU_DOMAIN].domain_idx;
}
static int mdp3_probe(struct platform_device *pdev)
{
int rc;
static struct msm_mdp_interface mdp3_interface = {
.init_fnc = mdp3_init,
.fb_mem_get_iommu_domain = mdp3_fb_mem_get_iommu_domain,
.fb_mem_alloc_fnc = mdp3_fbmem_alloc,
.fb_stride = mdp3_fb_stride,
};
if (!pdev->dev.of_node) {
pr_err("MDP driver only supports device tree probe\n");
return -ENOTSUPP;
}
if (mdp3_res) {
pr_err("MDP already initialized\n");
return -EINVAL;
}
mdp3_res = devm_kzalloc(&pdev->dev, sizeof(struct mdp3_hw_resource),
GFP_KERNEL);
if (mdp3_res == NULL)
return -ENOMEM;
pdev->id = 0;
mdp3_res->pdev = pdev;
mutex_init(&mdp3_res->res_mutex);
spin_lock_init(&mdp3_res->irq_lock);
platform_set_drvdata(pdev, mdp3_res);
rc = mdp3_parse_dt(pdev);
if (rc)
goto probe_done;
rc = mdp3_res_init();
if (rc) {
pr_err("unable to initialize mdp3 resources\n");
goto probe_done;
}
rc = mdp3_check_version();
if (rc) {
pr_err("mdp3 check version failed\n");
goto probe_done;
}
rc = mdss_fb_register_mdp_instance(&mdp3_interface);
if (rc)
pr_err("unable to register mdp instance\n");
probe_done:
if (IS_ERR_VALUE(rc)) {
mdp3_res_deinit();
if (mdp3_res->mdp_base)
devm_iounmap(&pdev->dev, mdp3_res->mdp_base);
devm_kfree(&pdev->dev, mdp3_res);
mdp3_res = NULL;
}
return rc;
}
static int mdp3_suspend_sub(struct mdp3_hw_resource *mdata)
{
return 0;
}
static int mdp3_resume_sub(struct mdp3_hw_resource *mdata)
{
return 0;
}
static int mdp3_suspend(struct platform_device *pdev, pm_message_t state)
{
struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
if (!mdata)
return -ENODEV;
pr_debug("display suspend\n");
return mdp3_suspend_sub(mdata);
}
static int mdp3_resume(struct platform_device *pdev)
{
struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
if (!mdata)
return -ENODEV;
pr_debug("display resume\n");
return mdp3_resume_sub(mdata);
}
static int mdp3_remove(struct platform_device *pdev)
{
struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
if (!mdata)
return -ENODEV;
pm_runtime_disable(&pdev->dev);
mdp3_bus_scale_unregister();
mdp3_clk_remove();
return 0;
}
static const struct of_device_id mdp3_dt_match[] = {
{ .compatible = "qcom,mdss_mdp3",},
{}
};
MODULE_DEVICE_TABLE(of, mdp3_dt_match);
EXPORT_COMPAT("qcom,mdss_mdp3");
static struct platform_driver mdp3_driver = {
.probe = mdp3_probe,
.remove = mdp3_remove,
.suspend = mdp3_suspend,
.resume = mdp3_resume,
.shutdown = NULL,
.driver = {
.name = "mdp3",
.of_match_table = mdp3_dt_match,
},
};
static int __init mdp3_driver_init(void)
{
int ret;
ret = platform_driver_register(&mdp3_driver);
if (ret) {
pr_err("register mdp3 driver failed!\n");
return ret;
}
return 0;
}
module_init(mdp3_driver_init);
| gpl-2.0 |
kernel13D/linux-rpi | arch/arm/kernel/time.c | 494 | 2904 | /*
* linux/arch/arm/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for ARM (C) 1994-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains the ARM-specific time handling details:
* reading the RTC at bootup, etc...
*/
#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/sched_clock.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/timer.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/stacktrace.h>
#include <asm/thread_info.h>
#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || \
defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE)
/* this needs a better home */
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
#endif /* pc-style 'CMOS' RTC support */
/* change this if you have some constant time drift */
#define USECS_PER_JIFFY (1000000/HZ)
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
struct stackframe frame;
if (!in_lock_functions(regs->ARM_pc))
return regs->ARM_pc;
arm_get_current_stackframe(regs, &frame);
do {
int ret = unwind_frame(&frame);
if (ret < 0)
return 0;
} while (in_lock_functions(frame.pc));
return frame.pc;
}
EXPORT_SYMBOL(profile_pc);
#endif
#ifndef CONFIG_GENERIC_CLOCKEVENTS
/*
* Kernel system timer support.
*/
void timer_tick(void)
{
profile_tick(CPU_PROFILING);
xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
}
#endif
static void dummy_clock_access(struct timespec64 *ts)
{
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
static clock_access_fn __read_persistent_clock = dummy_clock_access;
static clock_access_fn __read_boot_clock = dummy_clock_access;;
void read_persistent_clock64(struct timespec64 *ts)
{
__read_persistent_clock(ts);
}
void read_boot_clock64(struct timespec64 *ts)
{
__read_boot_clock(ts);
}
int __init register_persistent_clock(clock_access_fn read_boot,
clock_access_fn read_persistent)
{
/* Only allow the clockaccess functions to be registered once */
if (__read_persistent_clock == dummy_clock_access &&
__read_boot_clock == dummy_clock_access) {
if (read_boot)
__read_boot_clock = read_boot;
if (read_persistent)
__read_persistent_clock = read_persistent;
return 0;
}
return -EINVAL;
}
void __init time_init(void)
{
if (machine_desc->init_time) {
machine_desc->init_time();
} else {
#ifdef CONFIG_COMMON_CLK
of_clk_init(NULL);
#endif
clocksource_probe();
}
}
| gpl-2.0 |
ziozzang/kernel-rhel6 | arch/sparc/kernel/ldc.c | 494 | 49707 | /* ldc.c: Logical Domain Channel link-layer protocol driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/init.h>
#include <asm/hypervisor.h>
#include <asm/iommu.h>
#include <asm/page.h>
#include <asm/ldc.h>
#include <asm/mdesc.h>
#define DRV_MODULE_NAME "ldc"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "July 22, 2008"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
#define LDC_PACKET_SIZE 64
/* Packet header layout for unreliable and reliable mode frames.
* When in RAW mode, packets are simply straight 64-byte payloads
* with no headers.
*/
struct ldc_packet {
u8 type;
#define LDC_CTRL 0x01
#define LDC_DATA 0x02
#define LDC_ERR 0x10
u8 stype;
#define LDC_INFO 0x01
#define LDC_ACK 0x02
#define LDC_NACK 0x04
u8 ctrl;
#define LDC_VERS 0x01 /* Link Version */
#define LDC_RTS 0x02 /* Request To Send */
#define LDC_RTR 0x03 /* Ready To Receive */
#define LDC_RDX 0x04 /* Ready for Data eXchange */
#define LDC_CTRL_MSK 0x0f
u8 env;
#define LDC_LEN 0x3f
#define LDC_FRAG_MASK 0xc0
#define LDC_START 0x40
#define LDC_STOP 0x80
u32 seqid;
union {
u8 u_data[LDC_PACKET_SIZE - 8];
struct {
u32 pad;
u32 ackid;
u8 r_data[LDC_PACKET_SIZE - 8 - 8];
} r;
} u;
};
struct ldc_version {
u16 major;
u16 minor;
};
/* Ordered from largest major to lowest. */
static struct ldc_version ver_arr[] = {
{ .major = 1, .minor = 0 },
};
#define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE)
#define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
struct ldc_channel;
struct ldc_mode_ops {
int (*write)(struct ldc_channel *, const void *, unsigned int);
int (*read)(struct ldc_channel *, void *, unsigned int);
};
static const struct ldc_mode_ops raw_ops;
static const struct ldc_mode_ops nonraw_ops;
static const struct ldc_mode_ops stream_ops;
int ldom_domaining_enabled;
struct ldc_iommu {
/* Protects arena alloc/free. */
spinlock_t lock;
struct iommu_arena arena;
struct ldc_mtable_entry *page_table;
};
struct ldc_channel {
/* Protects all operations that depend upon channel state. */
spinlock_t lock;
unsigned long id;
u8 *mssbuf;
u32 mssbuf_len;
u32 mssbuf_off;
struct ldc_packet *tx_base;
unsigned long tx_head;
unsigned long tx_tail;
unsigned long tx_num_entries;
unsigned long tx_ra;
unsigned long tx_acked;
struct ldc_packet *rx_base;
unsigned long rx_head;
unsigned long rx_tail;
unsigned long rx_num_entries;
unsigned long rx_ra;
u32 rcv_nxt;
u32 snd_nxt;
unsigned long chan_state;
struct ldc_channel_config cfg;
void *event_arg;
const struct ldc_mode_ops *mops;
struct ldc_iommu iommu;
struct ldc_version ver;
u8 hs_state;
#define LDC_HS_CLOSED 0x00
#define LDC_HS_OPEN 0x01
#define LDC_HS_GOTVERS 0x02
#define LDC_HS_SENTRTR 0x03
#define LDC_HS_GOTRTR 0x04
#define LDC_HS_COMPLETE 0x10
u8 flags;
#define LDC_FLAG_ALLOCED_QUEUES 0x01
#define LDC_FLAG_REGISTERED_QUEUES 0x02
#define LDC_FLAG_REGISTERED_IRQS 0x04
#define LDC_FLAG_RESET 0x10
u8 mss;
u8 state;
#define LDC_IRQ_NAME_MAX 32
char rx_irq_name[LDC_IRQ_NAME_MAX];
char tx_irq_name[LDC_IRQ_NAME_MAX];
struct hlist_head mh_list;
struct hlist_node list;
};
#define ldcdbg(TYPE, f, a...) \
do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
} while (0)
static const char *state_to_str(u8 state)
{
switch (state) {
case LDC_STATE_INVALID:
return "INVALID";
case LDC_STATE_INIT:
return "INIT";
case LDC_STATE_BOUND:
return "BOUND";
case LDC_STATE_READY:
return "READY";
case LDC_STATE_CONNECTED:
return "CONNECTED";
default:
return "<UNKNOWN>";
}
}
static void ldc_set_state(struct ldc_channel *lp, u8 state)
{
ldcdbg(STATE, "STATE (%s) --> (%s)\n",
state_to_str(lp->state),
state_to_str(state));
lp->state = state;
}
static unsigned long __advance(unsigned long off, unsigned long num_entries)
{
off += LDC_PACKET_SIZE;
if (off == (num_entries * LDC_PACKET_SIZE))
off = 0;
return off;
}
static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off)
{
return __advance(off, lp->rx_num_entries);
}
static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off)
{
return __advance(off, lp->tx_num_entries);
}
static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp,
unsigned long *new_tail)
{
struct ldc_packet *p;
unsigned long t;
t = tx_advance(lp, lp->tx_tail);
if (t == lp->tx_head)
return NULL;
*new_tail = t;
p = lp->tx_base;
return p + (lp->tx_tail / LDC_PACKET_SIZE);
}
/* When we are in reliable or stream mode, have to track the next packet
* we haven't gotten an ACK for in the TX queue using tx_acked. We have
* to be careful not to stomp over the queue past that point. During
* the handshake, we don't have TX data packets pending in the queue
* and that's why handshake_get_tx_packet() need not be mindful of
* lp->tx_acked.
*/
static unsigned long head_for_data(struct ldc_channel *lp)
{
if (lp->cfg.mode == LDC_MODE_STREAM)
return lp->tx_acked;
return lp->tx_head;
}
static int tx_has_space_for(struct ldc_channel *lp, unsigned int size)
{
unsigned long limit, tail, new_tail, diff;
unsigned int mss;
limit = head_for_data(lp);
tail = lp->tx_tail;
new_tail = tx_advance(lp, tail);
if (new_tail == limit)
return 0;
if (limit > new_tail)
diff = limit - new_tail;
else
diff = (limit +
((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail));
diff /= LDC_PACKET_SIZE;
mss = lp->mss;
if (diff * mss < size)
return 0;
return 1;
}
static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp,
unsigned long *new_tail)
{
struct ldc_packet *p;
unsigned long h, t;
h = head_for_data(lp);
t = tx_advance(lp, lp->tx_tail);
if (t == h)
return NULL;
*new_tail = t;
p = lp->tx_base;
return p + (lp->tx_tail / LDC_PACKET_SIZE);
}
static int set_tx_tail(struct ldc_channel *lp, unsigned long tail)
{
unsigned long orig_tail = lp->tx_tail;
int limit = 1000;
lp->tx_tail = tail;
while (limit-- > 0) {
unsigned long err;
err = sun4v_ldc_tx_set_qtail(lp->id, tail);
if (!err)
return 0;
if (err != HV_EWOULDBLOCK) {
lp->tx_tail = orig_tail;
return -EINVAL;
}
udelay(1);
}
lp->tx_tail = orig_tail;
return -EBUSY;
}
/* This just updates the head value in the hypervisor using
* a polling loop with a timeout. The caller takes care of
* upating software state representing the head change, if any.
*/
static int __set_rx_head(struct ldc_channel *lp, unsigned long head)
{
int limit = 1000;
while (limit-- > 0) {
unsigned long err;
err = sun4v_ldc_rx_set_qhead(lp->id, head);
if (!err)
return 0;
if (err != HV_EWOULDBLOCK)
return -EINVAL;
udelay(1);
}
return -EBUSY;
}
static int send_tx_packet(struct ldc_channel *lp,
struct ldc_packet *p,
unsigned long new_tail)
{
BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE)));
return set_tx_tail(lp, new_tail);
}
static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp,
u8 stype, u8 ctrl,
void *data, int dlen,
unsigned long *new_tail)
{
struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail);
if (p) {
memset(p, 0, sizeof(*p));
p->type = LDC_CTRL;
p->stype = stype;
p->ctrl = ctrl;
if (data)
memcpy(p->u.u_data, data, dlen);
}
return p;
}
static int start_handshake(struct ldc_channel *lp)
{
struct ldc_packet *p;
struct ldc_version *ver;
unsigned long new_tail;
ver = &ver_arr[0];
ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n",
ver->major, ver->minor);
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
ver, sizeof(*ver), &new_tail);
if (p) {
int err = send_tx_packet(lp, p, new_tail);
if (!err)
lp->flags &= ~LDC_FLAG_RESET;
return err;
}
return -EBUSY;
}
static int send_version_nack(struct ldc_channel *lp,
u16 major, u16 minor)
{
struct ldc_packet *p;
struct ldc_version ver;
unsigned long new_tail;
ver.major = major;
ver.minor = minor;
p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS,
&ver, sizeof(ver), &new_tail);
if (p) {
ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n",
ver.major, ver.minor);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_version_ack(struct ldc_channel *lp,
struct ldc_version *vp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS,
vp, sizeof(*vp), &new_tail);
if (p) {
ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n",
vp->major, vp->minor);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_rts(struct ldc_channel *lp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0,
&new_tail);
if (p) {
p->env = lp->cfg.mode;
p->seqid = 0;
lp->rcv_nxt = 0;
ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n",
p->env, p->seqid);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_rtr(struct ldc_channel *lp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0,
&new_tail);
if (p) {
p->env = lp->cfg.mode;
p->seqid = 0;
ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n",
p->env, p->seqid);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_rdx(struct ldc_channel *lp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0,
&new_tail);
if (p) {
p->env = 0;
p->seqid = ++lp->snd_nxt;
p->u.r.ackid = lp->rcv_nxt;
ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n",
p->env, p->seqid, p->u.r.ackid);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
{
struct ldc_packet *p;
unsigned long new_tail;
int err;
p = data_get_tx_packet(lp, &new_tail);
if (!p)
return -EBUSY;
memset(p, 0, sizeof(*p));
p->type = data_pkt->type;
p->stype = LDC_NACK;
p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK;
p->seqid = lp->snd_nxt + 1;
p->u.r.ackid = lp->rcv_nxt;
ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n",
p->type, p->ctrl, p->seqid, p->u.r.ackid);
err = send_tx_packet(lp, p, new_tail);
if (!err)
lp->snd_nxt++;
return err;
}
static int ldc_abort(struct ldc_channel *lp)
{
unsigned long hv_err;
ldcdbg(STATE, "ABORT\n");
/* We report but do not act upon the hypervisor errors because
* there really isn't much we can do if they fail at this point.
*/
hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
lp->id, lp->tx_ra, lp->tx_num_entries, hv_err);
hv_err = sun4v_ldc_tx_get_state(lp->id,
&lp->tx_head,
&lp->tx_tail,
&lp->chan_state);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n",
lp->id, hv_err);
hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
lp->id, lp->rx_ra, lp->rx_num_entries, hv_err);
/* Refetch the RX queue state as well, because we could be invoked
* here in the queue processing context.
*/
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n",
lp->id, hv_err);
return -ECONNRESET;
}
static struct ldc_version *find_by_major(u16 major)
{
struct ldc_version *ret = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(ver_arr); i++) {
struct ldc_version *v = &ver_arr[i];
if (v->major <= major) {
ret = v;
break;
}
}
return ret;
}
static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
{
struct ldc_version *vap;
int err;
ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n",
vp->major, vp->minor);
if (lp->hs_state == LDC_HS_GOTVERS) {
lp->hs_state = LDC_HS_OPEN;
memset(&lp->ver, 0, sizeof(lp->ver));
}
vap = find_by_major(vp->major);
if (!vap) {
err = send_version_nack(lp, 0, 0);
} else if (vap->major != vp->major) {
err = send_version_nack(lp, vap->major, vap->minor);
} else {
struct ldc_version ver = *vp;
if (ver.minor > vap->minor)
ver.minor = vap->minor;
err = send_version_ack(lp, &ver);
if (!err) {
lp->ver = ver;
lp->hs_state = LDC_HS_GOTVERS;
}
}
if (err)
return ldc_abort(lp);
return 0;
}
static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
{
ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n",
vp->major, vp->minor);
if (lp->hs_state == LDC_HS_GOTVERS) {
if (lp->ver.major != vp->major ||
lp->ver.minor != vp->minor)
return ldc_abort(lp);
} else {
lp->ver = *vp;
lp->hs_state = LDC_HS_GOTVERS;
}
if (send_rts(lp))
return ldc_abort(lp);
return 0;
}
static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
{
struct ldc_version *vap;
struct ldc_packet *p;
unsigned long new_tail;
if (vp->major == 0 && vp->minor == 0)
return ldc_abort(lp);
vap = find_by_major(vp->major);
if (!vap)
return ldc_abort(lp);
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
vap, sizeof(*vap),
&new_tail);
if (!p)
return ldc_abort(lp);
return send_tx_packet(lp, p, new_tail);
}
static int process_version(struct ldc_channel *lp,
struct ldc_packet *p)
{
struct ldc_version *vp;
vp = (struct ldc_version *) p->u.u_data;
switch (p->stype) {
case LDC_INFO:
return process_ver_info(lp, vp);
case LDC_ACK:
return process_ver_ack(lp, vp);
case LDC_NACK:
return process_ver_nack(lp, vp);
default:
return ldc_abort(lp);
}
}
static int process_rts(struct ldc_channel *lp,
struct ldc_packet *p)
{
ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n",
p->stype, p->seqid, p->env);
if (p->stype != LDC_INFO ||
lp->hs_state != LDC_HS_GOTVERS ||
p->env != lp->cfg.mode)
return ldc_abort(lp);
lp->snd_nxt = p->seqid;
lp->rcv_nxt = p->seqid;
lp->hs_state = LDC_HS_SENTRTR;
if (send_rtr(lp))
return ldc_abort(lp);
return 0;
}
static int process_rtr(struct ldc_channel *lp,
struct ldc_packet *p)
{
ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n",
p->stype, p->seqid, p->env);
if (p->stype != LDC_INFO ||
p->env != lp->cfg.mode)
return ldc_abort(lp);
lp->snd_nxt = p->seqid;
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
send_rdx(lp);
return LDC_EVENT_UP;
}
static int rx_seq_ok(struct ldc_channel *lp, u32 seqid)
{
return lp->rcv_nxt + 1 == seqid;
}
static int process_rdx(struct ldc_channel *lp,
struct ldc_packet *p)
{
ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n",
p->stype, p->seqid, p->env, p->u.r.ackid);
if (p->stype != LDC_INFO ||
!(rx_seq_ok(lp, p->seqid)))
return ldc_abort(lp);
lp->rcv_nxt = p->seqid;
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
return LDC_EVENT_UP;
}
static int process_control_frame(struct ldc_channel *lp,
struct ldc_packet *p)
{
switch (p->ctrl) {
case LDC_VERS:
return process_version(lp, p);
case LDC_RTS:
return process_rts(lp, p);
case LDC_RTR:
return process_rtr(lp, p);
case LDC_RDX:
return process_rdx(lp, p);
default:
return ldc_abort(lp);
}
}
static int process_error_frame(struct ldc_channel *lp,
struct ldc_packet *p)
{
return ldc_abort(lp);
}
static int process_data_ack(struct ldc_channel *lp,
struct ldc_packet *ack)
{
unsigned long head = lp->tx_acked;
u32 ackid = ack->u.r.ackid;
while (1) {
struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE);
head = tx_advance(lp, head);
if (p->seqid == ackid) {
lp->tx_acked = head;
return 0;
}
if (head == lp->tx_tail)
return ldc_abort(lp);
}
return 0;
}
static void send_events(struct ldc_channel *lp, unsigned int event_mask)
{
if (event_mask & LDC_EVENT_RESET)
lp->cfg.event(lp->event_arg, LDC_EVENT_RESET);
if (event_mask & LDC_EVENT_UP)
lp->cfg.event(lp->event_arg, LDC_EVENT_UP);
if (event_mask & LDC_EVENT_DATA_READY)
lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY);
}
static irqreturn_t ldc_rx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
unsigned long orig_state, hv_err, flags;
unsigned int event_mask;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
event_mask = 0;
if (lp->cfg.mode == LDC_MODE_RAW &&
lp->chan_state == LDC_CHANNEL_UP) {
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
event_mask |= LDC_EVENT_UP;
orig_state = lp->chan_state;
}
/* If we are in reset state, flush the RX queue and ignore
* everything.
*/
if (lp->flags & LDC_FLAG_RESET) {
(void) __set_rx_head(lp, lp->rx_tail);
goto out;
}
/* Once we finish the handshake, we let the ldc_read()
* paths do all of the control frame and state management.
* Just trigger the callback.
*/
if (lp->hs_state == LDC_HS_COMPLETE) {
handshake_complete:
if (lp->chan_state != orig_state) {
unsigned int event = LDC_EVENT_RESET;
if (lp->chan_state == LDC_CHANNEL_UP)
event = LDC_EVENT_UP;
event_mask |= event;
}
if (lp->rx_head != lp->rx_tail)
event_mask |= LDC_EVENT_DATA_READY;
goto out;
}
if (lp->chan_state != orig_state)
goto out;
while (lp->rx_head != lp->rx_tail) {
struct ldc_packet *p;
unsigned long new;
int err;
p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
switch (p->type) {
case LDC_CTRL:
err = process_control_frame(lp, p);
if (err > 0)
event_mask |= err;
break;
case LDC_DATA:
event_mask |= LDC_EVENT_DATA_READY;
err = 0;
break;
case LDC_ERR:
err = process_error_frame(lp, p);
break;
default:
err = ldc_abort(lp);
break;
}
if (err < 0)
break;
new = lp->rx_head;
new += LDC_PACKET_SIZE;
if (new == (lp->rx_num_entries * LDC_PACKET_SIZE))
new = 0;
lp->rx_head = new;
err = __set_rx_head(lp, new);
if (err < 0) {
(void) ldc_abort(lp);
break;
}
if (lp->hs_state == LDC_HS_COMPLETE)
goto handshake_complete;
}
out:
spin_unlock_irqrestore(&lp->lock, flags);
send_events(lp, event_mask);
return IRQ_HANDLED;
}
static irqreturn_t ldc_tx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
unsigned long flags, hv_err, orig_state;
unsigned int event_mask = 0;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
hv_err = sun4v_ldc_tx_get_state(lp->id,
&lp->tx_head,
&lp->tx_tail,
&lp->chan_state);
ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
if (lp->cfg.mode == LDC_MODE_RAW &&
lp->chan_state == LDC_CHANNEL_UP) {
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
event_mask |= LDC_EVENT_UP;
}
spin_unlock_irqrestore(&lp->lock, flags);
send_events(lp, event_mask);
return IRQ_HANDLED;
}
/* XXX ldc_alloc() and ldc_free() needs to run under a mutex so
* XXX that addition and removal from the ldc_channel_list has
* XXX atomicity, otherwise the __ldc_channel_exists() check is
* XXX totally pointless as another thread can slip into ldc_alloc()
* XXX and add a channel with the same ID. There also needs to be
* XXX a spinlock for ldc_channel_list.
*/
static HLIST_HEAD(ldc_channel_list);
static int __ldc_channel_exists(unsigned long id)
{
struct ldc_channel *lp;
struct hlist_node *n;
hlist_for_each_entry(lp, n, &ldc_channel_list, list) {
if (lp->id == id)
return 1;
}
return 0;
}
static int alloc_queue(const char *name, unsigned long num_entries,
struct ldc_packet **base, unsigned long *ra)
{
unsigned long size, order;
void *q;
size = num_entries * LDC_PACKET_SIZE;
order = get_order(size);
q = (void *) __get_free_pages(GFP_KERNEL, order);
if (!q) {
printk(KERN_ERR PFX "Alloc of %s queue failed with "
"size=%lu order=%lu\n", name, size, order);
return -ENOMEM;
}
memset(q, 0, PAGE_SIZE << order);
*base = q;
*ra = __pa(q);
return 0;
}
static void free_queue(unsigned long num_entries, struct ldc_packet *q)
{
unsigned long size, order;
if (!q)
return;
size = num_entries * LDC_PACKET_SIZE;
order = get_order(size);
free_pages((unsigned long)q, order);
}
/* XXX Make this configurable... XXX */
#define LDC_IOTABLE_SIZE (8 * 1024)
static int ldc_iommu_init(struct ldc_channel *lp)
{
unsigned long sz, num_tsb_entries, tsbsize, order;
struct ldc_iommu *iommu = &lp->iommu;
struct ldc_mtable_entry *table;
unsigned long hv_err;
int err;
num_tsb_entries = LDC_IOTABLE_SIZE;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
spin_lock_init(&iommu->lock);
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
iommu->arena.map = kzalloc(sz, GFP_KERNEL);
if (!iommu->arena.map) {
printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
return -ENOMEM;
}
iommu->arena.limit = num_tsb_entries;
order = get_order(tsbsize);
table = (struct ldc_mtable_entry *)
__get_free_pages(GFP_KERNEL, order);
err = -ENOMEM;
if (!table) {
printk(KERN_ERR PFX "Alloc of MTE table failed, "
"size=%lu order=%lu\n", tsbsize, order);
goto out_free_map;
}
memset(table, 0, PAGE_SIZE << order);
iommu->page_table = table;
hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
num_tsb_entries);
err = -EINVAL;
if (hv_err)
goto out_free_table;
return 0;
out_free_table:
free_pages((unsigned long) table, order);
iommu->page_table = NULL;
out_free_map:
kfree(iommu->arena.map);
iommu->arena.map = NULL;
return err;
}
static void ldc_iommu_release(struct ldc_channel *lp)
{
struct ldc_iommu *iommu = &lp->iommu;
unsigned long num_tsb_entries, tsbsize, order;
(void) sun4v_ldc_set_map_table(lp->id, 0, 0);
num_tsb_entries = iommu->arena.limit;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
order = get_order(tsbsize);
free_pages((unsigned long) iommu->page_table, order);
iommu->page_table = NULL;
kfree(iommu->arena.map);
iommu->arena.map = NULL;
}
struct ldc_channel *ldc_alloc(unsigned long id,
const struct ldc_channel_config *cfgp,
void *event_arg)
{
struct ldc_channel *lp;
const struct ldc_mode_ops *mops;
unsigned long dummy1, dummy2, hv_err;
u8 mss, *mssbuf;
int err;
err = -ENODEV;
if (!ldom_domaining_enabled)
goto out_err;
err = -EINVAL;
if (!cfgp)
goto out_err;
switch (cfgp->mode) {
case LDC_MODE_RAW:
mops = &raw_ops;
mss = LDC_PACKET_SIZE;
break;
case LDC_MODE_UNRELIABLE:
mops = &nonraw_ops;
mss = LDC_PACKET_SIZE - 8;
break;
case LDC_MODE_STREAM:
mops = &stream_ops;
mss = LDC_PACKET_SIZE - 8 - 8;
break;
default:
goto out_err;
}
if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq)
goto out_err;
hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2);
err = -ENODEV;
if (hv_err == HV_ECHANNEL)
goto out_err;
err = -EEXIST;
if (__ldc_channel_exists(id))
goto out_err;
mssbuf = NULL;
lp = kzalloc(sizeof(*lp), GFP_KERNEL);
err = -ENOMEM;
if (!lp)
goto out_err;
spin_lock_init(&lp->lock);
lp->id = id;
err = ldc_iommu_init(lp);
if (err)
goto out_free_ldc;
lp->mops = mops;
lp->mss = mss;
lp->cfg = *cfgp;
if (!lp->cfg.mtu)
lp->cfg.mtu = LDC_DEFAULT_MTU;
if (lp->cfg.mode == LDC_MODE_STREAM) {
mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL);
if (!mssbuf) {
err = -ENOMEM;
goto out_free_iommu;
}
lp->mssbuf = mssbuf;
}
lp->event_arg = event_arg;
/* XXX allow setting via ldc_channel_config to override defaults
* XXX or use some formula based upon mtu
*/
lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
err = alloc_queue("TX", lp->tx_num_entries,
&lp->tx_base, &lp->tx_ra);
if (err)
goto out_free_mssbuf;
err = alloc_queue("RX", lp->rx_num_entries,
&lp->rx_base, &lp->rx_ra);
if (err)
goto out_free_txq;
lp->flags |= LDC_FLAG_ALLOCED_QUEUES;
lp->hs_state = LDC_HS_CLOSED;
ldc_set_state(lp, LDC_STATE_INIT);
INIT_HLIST_NODE(&lp->list);
hlist_add_head(&lp->list, &ldc_channel_list);
INIT_HLIST_HEAD(&lp->mh_list);
return lp;
out_free_txq:
free_queue(lp->tx_num_entries, lp->tx_base);
out_free_mssbuf:
kfree(mssbuf);
out_free_iommu:
ldc_iommu_release(lp);
out_free_ldc:
kfree(lp);
out_err:
return ERR_PTR(err);
}
EXPORT_SYMBOL(ldc_alloc);
void ldc_free(struct ldc_channel *lp)
{
if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
free_irq(lp->cfg.rx_irq, lp);
free_irq(lp->cfg.tx_irq, lp);
}
if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
sun4v_ldc_tx_qconf(lp->id, 0, 0);
sun4v_ldc_rx_qconf(lp->id, 0, 0);
lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
}
if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) {
free_queue(lp->tx_num_entries, lp->tx_base);
free_queue(lp->rx_num_entries, lp->rx_base);
lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
}
hlist_del(&lp->list);
kfree(lp->mssbuf);
ldc_iommu_release(lp);
kfree(lp);
}
EXPORT_SYMBOL(ldc_free);
/* Bind the channel. This registers the LDC queues with
* the hypervisor and puts the channel into a pseudo-listening
* state. This does not initiate a handshake, ldc_connect() does
* that.
*/
int ldc_bind(struct ldc_channel *lp, const char *name)
{
unsigned long hv_err, flags;
int err = -EINVAL;
if (!name ||
(lp->state != LDC_STATE_INIT))
return -EINVAL;
snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
err = request_irq(lp->cfg.rx_irq, ldc_rx,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
lp->rx_irq_name, lp);
if (err)
return err;
err = request_irq(lp->cfg.tx_irq, ldc_tx,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);
return err;
}
spin_lock_irqsave(&lp->lock, flags);
enable_irq(lp->cfg.rx_irq);
enable_irq(lp->cfg.tx_irq);
lp->flags |= LDC_FLAG_REGISTERED_IRQS;
err = -ENODEV;
hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_free_irqs;
hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
if (hv_err)
goto out_free_irqs;
hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_unmap_tx;
hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
if (hv_err)
goto out_unmap_tx;
lp->flags |= LDC_FLAG_REGISTERED_QUEUES;
hv_err = sun4v_ldc_tx_get_state(lp->id,
&lp->tx_head,
&lp->tx_tail,
&lp->chan_state);
err = -EBUSY;
if (hv_err)
goto out_unmap_rx;
lp->tx_acked = lp->tx_head;
lp->hs_state = LDC_HS_OPEN;
ldc_set_state(lp, LDC_STATE_BOUND);
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
out_unmap_rx:
lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
sun4v_ldc_rx_qconf(lp->id, 0, 0);
out_unmap_tx:
sun4v_ldc_tx_qconf(lp->id, 0, 0);
out_free_irqs:
lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
free_irq(lp->cfg.tx_irq, lp);
free_irq(lp->cfg.rx_irq, lp);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_bind);
int ldc_connect(struct ldc_channel *lp)
{
unsigned long flags;
int err;
if (lp->cfg.mode == LDC_MODE_RAW)
return -EINVAL;
spin_lock_irqsave(&lp->lock, flags);
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
lp->hs_state != LDC_HS_OPEN)
err = -EINVAL;
else
err = start_handshake(lp);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_connect);
int ldc_disconnect(struct ldc_channel *lp)
{
unsigned long hv_err, flags;
int err;
if (lp->cfg.mode == LDC_MODE_RAW)
return -EINVAL;
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES))
return -EINVAL;
spin_lock_irqsave(&lp->lock, flags);
err = -ENODEV;
hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_err;
hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
if (hv_err)
goto out_err;
hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_err;
hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
if (hv_err)
goto out_err;
ldc_set_state(lp, LDC_STATE_BOUND);
lp->hs_state = LDC_HS_OPEN;
lp->flags |= LDC_FLAG_RESET;
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
out_err:
sun4v_ldc_tx_qconf(lp->id, 0, 0);
sun4v_ldc_rx_qconf(lp->id, 0, 0);
free_irq(lp->cfg.tx_irq, lp);
free_irq(lp->cfg.rx_irq, lp);
lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS |
LDC_FLAG_REGISTERED_QUEUES);
ldc_set_state(lp, LDC_STATE_INIT);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_disconnect);
int ldc_state(struct ldc_channel *lp)
{
return lp->state;
}
EXPORT_SYMBOL(ldc_state);
static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
{
struct ldc_packet *p;
unsigned long new_tail;
int err;
if (size > LDC_PACKET_SIZE)
return -EMSGSIZE;
p = data_get_tx_packet(lp, &new_tail);
if (!p)
return -EAGAIN;
memcpy(p, buf, size);
err = send_tx_packet(lp, p, new_tail);
if (!err)
err = size;
return err;
}
static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
{
struct ldc_packet *p;
unsigned long hv_err, new;
int err;
if (size < LDC_PACKET_SIZE)
return -EINVAL;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return ldc_abort(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
return -ECONNRESET;
if (lp->rx_head == lp->rx_tail)
return 0;
p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
memcpy(buf, p, LDC_PACKET_SIZE);
new = rx_advance(lp, lp->rx_head);
lp->rx_head = new;
err = __set_rx_head(lp, new);
if (err < 0)
err = -ECONNRESET;
else
err = LDC_PACKET_SIZE;
return err;
}
static const struct ldc_mode_ops raw_ops = {
.write = write_raw,
.read = read_raw,
};
static int write_nonraw(struct ldc_channel *lp, const void *buf,
unsigned int size)
{
unsigned long hv_err, tail;
unsigned int copied;
u32 seq;
int err;
hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
&lp->chan_state);
if (unlikely(hv_err))
return -EBUSY;
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
return ldc_abort(lp);
if (!tx_has_space_for(lp, size))
return -EAGAIN;
seq = lp->snd_nxt;
copied = 0;
tail = lp->tx_tail;
while (copied < size) {
struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE);
u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ?
p->u.u_data :
p->u.r.r_data);
int data_len;
p->type = LDC_DATA;
p->stype = LDC_INFO;
p->ctrl = 0;
data_len = size - copied;
if (data_len > lp->mss)
data_len = lp->mss;
BUG_ON(data_len > LDC_LEN);
p->env = (data_len |
(copied == 0 ? LDC_START : 0) |
(data_len == size - copied ? LDC_STOP : 0));
p->seqid = ++seq;
ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n",
p->type,
p->stype,
p->ctrl,
p->env,
p->seqid);
memcpy(data, buf, data_len);
buf += data_len;
copied += data_len;
tail = tx_advance(lp, tail);
}
err = set_tx_tail(lp, tail);
if (!err) {
lp->snd_nxt = seq;
err = size;
}
return err;
}
static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
struct ldc_packet *first_frag)
{
int err;
if (first_frag)
lp->rcv_nxt = first_frag->seqid - 1;
err = send_data_nack(lp, p);
if (err)
return err;
err = __set_rx_head(lp, lp->rx_tail);
if (err < 0)
return ldc_abort(lp);
return 0;
}
static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
{
if (p->stype & LDC_ACK) {
int err = process_data_ack(lp, p);
if (err)
return err;
}
if (p->stype & LDC_NACK)
return ldc_abort(lp);
return 0;
}
static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
{
unsigned long dummy;
int limit = 1000;
ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n",
cur_head, lp->rx_head, lp->rx_tail);
while (limit-- > 0) {
unsigned long hv_err;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&dummy,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return ldc_abort(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
return -ECONNRESET;
if (cur_head != lp->rx_tail) {
ldcdbg(DATA, "DATA WAIT DONE "
"head[%lx] tail[%lx] chan_state[%lx]\n",
dummy, lp->rx_tail, lp->chan_state);
return 0;
}
udelay(1);
}
return -EAGAIN;
}
static int rx_set_head(struct ldc_channel *lp, unsigned long head)
{
int err = __set_rx_head(lp, head);
if (err < 0)
return ldc_abort(lp);
lp->rx_head = head;
return 0;
}
static void send_data_ack(struct ldc_channel *lp)
{
unsigned long new_tail;
struct ldc_packet *p;
p = data_get_tx_packet(lp, &new_tail);
if (likely(p)) {
int err;
memset(p, 0, sizeof(*p));
p->type = LDC_DATA;
p->stype = LDC_ACK;
p->ctrl = 0;
p->seqid = lp->snd_nxt + 1;
p->u.r.ackid = lp->rcv_nxt;
err = send_tx_packet(lp, p, new_tail);
if (!err)
lp->snd_nxt++;
}
}
static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
{
struct ldc_packet *first_frag;
unsigned long hv_err, new;
int err, copied;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return ldc_abort(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
return -ECONNRESET;
if (lp->rx_head == lp->rx_tail)
return 0;
first_frag = NULL;
copied = err = 0;
new = lp->rx_head;
while (1) {
struct ldc_packet *p;
int pkt_len;
BUG_ON(new == lp->rx_tail);
p = lp->rx_base + (new / LDC_PACKET_SIZE);
ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] "
"rcv_nxt[%08x]\n",
p->type,
p->stype,
p->ctrl,
p->env,
p->seqid,
p->u.r.ackid,
lp->rcv_nxt);
if (unlikely(!rx_seq_ok(lp, p->seqid))) {
err = rx_bad_seq(lp, p, first_frag);
copied = 0;
break;
}
if (p->type & LDC_CTRL) {
err = process_control_frame(lp, p);
if (err < 0)
break;
err = 0;
}
lp->rcv_nxt = p->seqid;
if (!(p->type & LDC_DATA)) {
new = rx_advance(lp, new);
goto no_data;
}
if (p->stype & (LDC_ACK | LDC_NACK)) {
err = data_ack_nack(lp, p);
if (err)
break;
}
if (!(p->stype & LDC_INFO)) {
new = rx_advance(lp, new);
err = rx_set_head(lp, new);
if (err)
break;
goto no_data;
}
pkt_len = p->env & LDC_LEN;
/* Every initial packet starts with the START bit set.
*
* Singleton packets will have both START+STOP set.
*
* Fragments will have START set in the first frame, STOP
* set in the last frame, and neither bit set in middle
* frames of the packet.
*
* Therefore if we are at the beginning of a packet and
* we don't see START, or we are in the middle of a fragmented
* packet and do see START, we are unsynchronized and should
* flush the RX queue.
*/
if ((first_frag == NULL && !(p->env & LDC_START)) ||
(first_frag != NULL && (p->env & LDC_START))) {
if (!first_frag)
new = rx_advance(lp, new);
err = rx_set_head(lp, new);
if (err)
break;
if (!first_frag)
goto no_data;
}
if (!first_frag)
first_frag = p;
if (pkt_len > size - copied) {
/* User didn't give us a big enough buffer,
* what to do? This is a pretty serious error.
*
* Since we haven't updated the RX ring head to
* consume any of the packets, signal the error
* to the user and just leave the RX ring alone.
*
* This seems the best behavior because this allows
* a user of the LDC layer to start with a small
* RX buffer for ldc_read() calls and use -EMSGSIZE
* as a cue to enlarge it's read buffer.
*/
err = -EMSGSIZE;
break;
}
/* Ok, we are gonna eat this one. */
new = rx_advance(lp, new);
memcpy(buf,
(lp->cfg.mode == LDC_MODE_UNRELIABLE ?
p->u.u_data : p->u.r.r_data), pkt_len);
buf += pkt_len;
copied += pkt_len;
if (p->env & LDC_STOP)
break;
no_data:
if (new == lp->rx_tail) {
err = rx_data_wait(lp, new);
if (err)
break;
}
}
if (!err)
err = rx_set_head(lp, new);
if (err && first_frag)
lp->rcv_nxt = first_frag->seqid - 1;
if (!err) {
err = copied;
if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE)
send_data_ack(lp);
}
return err;
}
static const struct ldc_mode_ops nonraw_ops = {
.write = write_nonraw,
.read = read_nonraw,
};
static int write_stream(struct ldc_channel *lp, const void *buf,
unsigned int size)
{
if (size > lp->cfg.mtu)
size = lp->cfg.mtu;
return write_nonraw(lp, buf, size);
}
static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size)
{
if (!lp->mssbuf_len) {
int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu);
if (err < 0)
return err;
lp->mssbuf_len = err;
lp->mssbuf_off = 0;
}
if (size > lp->mssbuf_len)
size = lp->mssbuf_len;
memcpy(buf, lp->mssbuf + lp->mssbuf_off, size);
lp->mssbuf_off += size;
lp->mssbuf_len -= size;
return size;
}
static const struct ldc_mode_ops stream_ops = {
.write = write_stream,
.read = read_stream,
};
int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size)
{
unsigned long flags;
int err;
if (!buf)
return -EINVAL;
if (!size)
return 0;
spin_lock_irqsave(&lp->lock, flags);
if (lp->hs_state != LDC_HS_COMPLETE)
err = -ENOTCONN;
else
err = lp->mops->write(lp, buf, size);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_write);
int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
{
unsigned long flags;
int err;
if (!buf)
return -EINVAL;
if (!size)
return 0;
spin_lock_irqsave(&lp->lock, flags);
if (lp->hs_state != LDC_HS_COMPLETE)
err = -ENOTCONN;
else
err = lp->mops->read(lp, buf, size);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_read);
static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
{
struct iommu_arena *arena = &iommu->arena;
unsigned long n, i, start, end, limit;
int pass;
limit = arena->limit;
start = arena->hint;
pass = 0;
again:
n = find_next_zero_bit(arena->map, limit, start);
end = n + npages;
if (unlikely(end >= limit)) {
if (likely(pass < 1)) {
limit = start;
start = 0;
pass++;
goto again;
} else {
/* Scanned the whole thing, give up. */
return -1;
}
}
for (i = n; i < end; i++) {
if (test_bit(i, arena->map)) {
start = i + 1;
goto again;
}
}
for (i = n; i < end; i++)
__set_bit(i, arena->map);
arena->hint = end;
return n;
}
#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
#define COOKIE_PGSZ_CODE_SHIFT 60ULL
static u64 pagesize_code(void)
{
switch (PAGE_SIZE) {
default:
case (8ULL * 1024ULL):
return 0;
case (64ULL * 1024ULL):
return 1;
case (512ULL * 1024ULL):
return 2;
case (4ULL * 1024ULL * 1024ULL):
return 3;
case (32ULL * 1024ULL * 1024ULL):
return 4;
case (256ULL * 1024ULL * 1024ULL):
return 5;
}
}
static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
{
return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) |
(index << PAGE_SHIFT) |
page_offset);
}
static u64 cookie_to_index(u64 cookie, unsigned long *shift)
{
u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
cookie &= ~COOKIE_PGSZ_CODE;
*shift = szcode * 3;
return (cookie >> (13ULL + (szcode * 3ULL)));
}
static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
unsigned long npages)
{
long entry;
entry = arena_alloc(iommu, npages);
if (unlikely(entry < 0))
return NULL;
return iommu->page_table + entry;
}
static u64 perm_to_mte(unsigned int map_perm)
{
u64 mte_base;
mte_base = pagesize_code();
if (map_perm & LDC_MAP_SHADOW) {
if (map_perm & LDC_MAP_R)
mte_base |= LDC_MTE_COPY_R;
if (map_perm & LDC_MAP_W)
mte_base |= LDC_MTE_COPY_W;
}
if (map_perm & LDC_MAP_DIRECT) {
if (map_perm & LDC_MAP_R)
mte_base |= LDC_MTE_READ;
if (map_perm & LDC_MAP_W)
mte_base |= LDC_MTE_WRITE;
if (map_perm & LDC_MAP_X)
mte_base |= LDC_MTE_EXEC;
}
if (map_perm & LDC_MAP_IO) {
if (map_perm & LDC_MAP_R)
mte_base |= LDC_MTE_IOMMU_R;
if (map_perm & LDC_MAP_W)
mte_base |= LDC_MTE_IOMMU_W;
}
return mte_base;
}
static int pages_in_region(unsigned long base, long len)
{
int count = 0;
do {
unsigned long new = (base + PAGE_SIZE) & PAGE_MASK;
len -= (new - base);
base = new;
count++;
} while (len > 0);
return count;
}
struct cookie_state {
struct ldc_mtable_entry *page_table;
struct ldc_trans_cookie *cookies;
u64 mte_base;
u64 prev_cookie;
u32 pte_idx;
u32 nc;
};
static void fill_cookies(struct cookie_state *sp, unsigned long pa,
unsigned long off, unsigned long len)
{
do {
unsigned long tlen, new = pa + PAGE_SIZE;
u64 this_cookie;
sp->page_table[sp->pte_idx].mte = sp->mte_base | pa;
tlen = PAGE_SIZE;
if (off)
tlen = PAGE_SIZE - off;
if (tlen > len)
tlen = len;
this_cookie = make_cookie(sp->pte_idx,
pagesize_code(), off);
off = 0;
if (this_cookie == sp->prev_cookie) {
sp->cookies[sp->nc - 1].cookie_size += tlen;
} else {
sp->cookies[sp->nc].cookie_addr = this_cookie;
sp->cookies[sp->nc].cookie_size = tlen;
sp->nc++;
}
sp->prev_cookie = this_cookie + tlen;
sp->pte_idx++;
len -= tlen;
pa = new;
} while (len > 0);
}
static int sg_count_one(struct scatterlist *sg)
{
unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
long len = sg->length;
if ((sg->offset | len) & (8UL - 1))
return -EFAULT;
return pages_in_region(base + sg->offset, len);
}
static int sg_count_pages(struct scatterlist *sg, int num_sg)
{
int count;
int i;
count = 0;
for (i = 0; i < num_sg; i++) {
int err = sg_count_one(sg + i);
if (err < 0)
return err;
count += err;
}
return count;
}
int ldc_map_sg(struct ldc_channel *lp,
struct scatterlist *sg, int num_sg,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
unsigned long i, npages, flags;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
int err;
if (map_perm & ~LDC_MAP_ALL)
return -EINVAL;
err = sg_count_pages(sg, num_sg);
if (err < 0)
return err;
npages = err;
if (err > ncookies)
return -EMSGSIZE;
iommu = &lp->iommu;
spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
state.page_table = iommu->page_table;
state.cookies = cookies;
state.mte_base = perm_to_mte(map_perm);
state.prev_cookie = ~(u64)0;
state.pte_idx = (base - iommu->page_table);
state.nc = 0;
for (i = 0; i < num_sg; i++)
fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT,
sg[i].offset, sg[i].length);
return state.nc;
}
EXPORT_SYMBOL(ldc_map_sg);
int ldc_map_single(struct ldc_channel *lp,
void *buf, unsigned int len,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
unsigned long npages, pa, flags;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1))
return -EINVAL;
pa = __pa(buf);
if ((pa | len) & (8UL - 1))
return -EFAULT;
npages = pages_in_region(pa, len);
iommu = &lp->iommu;
spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
state.page_table = iommu->page_table;
state.cookies = cookies;
state.mte_base = perm_to_mte(map_perm);
state.prev_cookie = ~(u64)0;
state.pte_idx = (base - iommu->page_table);
state.nc = 0;
fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len);
BUG_ON(state.nc != 1);
return state.nc;
}
EXPORT_SYMBOL(ldc_map_single);
static void free_npages(unsigned long id, struct ldc_iommu *iommu,
u64 cookie, u64 size)
{
struct iommu_arena *arena = &iommu->arena;
unsigned long i, shift, index, npages;
struct ldc_mtable_entry *base;
npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
index = cookie_to_index(cookie, &shift);
base = iommu->page_table + index;
BUG_ON(index > arena->limit ||
(index + npages) > arena->limit);
for (i = 0; i < npages; i++) {
if (base->cookie)
sun4v_ldc_revoke(id, cookie + (i << shift),
base->cookie);
base->mte = 0;
__clear_bit(index + i, arena->map);
}
}
void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
int ncookies)
{
struct ldc_iommu *iommu = &lp->iommu;
unsigned long flags;
int i;
spin_lock_irqsave(&iommu->lock, flags);
for (i = 0; i < ncookies; i++) {
u64 addr = cookies[i].cookie_addr;
u64 size = cookies[i].cookie_size;
free_npages(lp->id, iommu, addr, size);
}
spin_unlock_irqrestore(&iommu->lock, flags);
}
EXPORT_SYMBOL(ldc_unmap);
int ldc_copy(struct ldc_channel *lp, int copy_dir,
void *buf, unsigned int len, unsigned long offset,
struct ldc_trans_cookie *cookies, int ncookies)
{
unsigned int orig_len;
unsigned long ra;
int i;
if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n",
lp->id, copy_dir);
return -EINVAL;
}
ra = __pa(buf);
if ((ra | len | offset) & (8UL - 1)) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer "
"ra[%lx] len[%x] offset[%lx]\n",
lp->id, ra, len, offset);
return -EFAULT;
}
if (lp->hs_state != LDC_HS_COMPLETE ||
(lp->flags & LDC_FLAG_RESET)) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] "
"flags[%x]\n", lp->id, lp->hs_state, lp->flags);
return -ECONNRESET;
}
orig_len = len;
for (i = 0; i < ncookies; i++) {
unsigned long cookie_raddr = cookies[i].cookie_addr;
unsigned long this_len = cookies[i].cookie_size;
unsigned long actual_len;
if (unlikely(offset)) {
unsigned long this_off = offset;
if (this_off > this_len)
this_off = this_len;
offset -= this_off;
this_len -= this_off;
if (!this_len)
continue;
cookie_raddr += this_off;
}
if (this_len > len)
this_len = len;
while (1) {
unsigned long hv_err;
hv_err = sun4v_ldc_copy(lp->id, copy_dir,
cookie_raddr, ra,
this_len, &actual_len);
if (unlikely(hv_err)) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] "
"HV error %lu\n",
lp->id, hv_err);
if (lp->hs_state != LDC_HS_COMPLETE ||
(lp->flags & LDC_FLAG_RESET))
return -ECONNRESET;
else
return -EFAULT;
}
cookie_raddr += actual_len;
ra += actual_len;
len -= actual_len;
if (actual_len == this_len)
break;
this_len -= actual_len;
}
if (!len)
break;
}
/* It is caller policy what to do about short copies.
* For example, a networking driver can declare the
* packet a runt and drop it.
*/
return orig_len - len;
}
EXPORT_SYMBOL(ldc_copy);
void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
struct ldc_trans_cookie *cookies, int *ncookies,
unsigned int map_perm)
{
void *buf;
int err;
if (len & (8UL - 1))
return ERR_PTR(-EINVAL);
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm);
if (err < 0) {
kfree(buf);
return ERR_PTR(err);
}
*ncookies = err;
return buf;
}
EXPORT_SYMBOL(ldc_alloc_exp_dring);
void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len,
struct ldc_trans_cookie *cookies, int ncookies)
{
ldc_unmap(lp, cookies, ncookies);
kfree(buf);
}
EXPORT_SYMBOL(ldc_free_exp_dring);
static int __init ldc_init(void)
{
unsigned long major, minor;
struct mdesc_handle *hp;
const u64 *v;
int err;
u64 mp;
hp = mdesc_grab();
if (!hp)
return -ENODEV;
mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
err = -ENODEV;
if (mp == MDESC_NODE_NULL)
goto out;
v = mdesc_get_property(hp, mp, "domaining-enabled", NULL);
if (!v)
goto out;
major = 1;
minor = 0;
if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) {
printk(KERN_INFO PFX "Could not register LDOM hvapi.\n");
goto out;
}
printk(KERN_INFO "%s", version);
if (!*v) {
printk(KERN_INFO PFX "Domaining disabled.\n");
goto out;
}
ldom_domaining_enabled = 1;
err = 0;
out:
mdesc_release(hp);
return err;
}
core_initcall(ldc_init);
| gpl-2.0 |
liquidware/liquidware_beagleboard_android_kernel | arch/sparc/kernel/ldc.c | 494 | 49707 | /* ldc.c: Logical Domain Channel link-layer protocol driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/init.h>
#include <asm/hypervisor.h>
#include <asm/iommu.h>
#include <asm/page.h>
#include <asm/ldc.h>
#include <asm/mdesc.h>
#define DRV_MODULE_NAME "ldc"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "July 22, 2008"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
#define LDC_PACKET_SIZE 64
/* Packet header layout for unreliable and reliable mode frames.
* When in RAW mode, packets are simply straight 64-byte payloads
* with no headers.
*/
struct ldc_packet {
u8 type;
#define LDC_CTRL 0x01
#define LDC_DATA 0x02
#define LDC_ERR 0x10
u8 stype;
#define LDC_INFO 0x01
#define LDC_ACK 0x02
#define LDC_NACK 0x04
u8 ctrl;
#define LDC_VERS 0x01 /* Link Version */
#define LDC_RTS 0x02 /* Request To Send */
#define LDC_RTR 0x03 /* Ready To Receive */
#define LDC_RDX 0x04 /* Ready for Data eXchange */
#define LDC_CTRL_MSK 0x0f
u8 env;
#define LDC_LEN 0x3f
#define LDC_FRAG_MASK 0xc0
#define LDC_START 0x40
#define LDC_STOP 0x80
u32 seqid;
union {
u8 u_data[LDC_PACKET_SIZE - 8];
struct {
u32 pad;
u32 ackid;
u8 r_data[LDC_PACKET_SIZE - 8 - 8];
} r;
} u;
};
struct ldc_version {
u16 major;
u16 minor;
};
/* Ordered from largest major to lowest. */
static struct ldc_version ver_arr[] = {
{ .major = 1, .minor = 0 },
};
#define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE)
#define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
struct ldc_channel;
struct ldc_mode_ops {
int (*write)(struct ldc_channel *, const void *, unsigned int);
int (*read)(struct ldc_channel *, void *, unsigned int);
};
static const struct ldc_mode_ops raw_ops;
static const struct ldc_mode_ops nonraw_ops;
static const struct ldc_mode_ops stream_ops;
int ldom_domaining_enabled;
struct ldc_iommu {
/* Protects arena alloc/free. */
spinlock_t lock;
struct iommu_arena arena;
struct ldc_mtable_entry *page_table;
};
struct ldc_channel {
/* Protects all operations that depend upon channel state. */
spinlock_t lock;
unsigned long id;
u8 *mssbuf;
u32 mssbuf_len;
u32 mssbuf_off;
struct ldc_packet *tx_base;
unsigned long tx_head;
unsigned long tx_tail;
unsigned long tx_num_entries;
unsigned long tx_ra;
unsigned long tx_acked;
struct ldc_packet *rx_base;
unsigned long rx_head;
unsigned long rx_tail;
unsigned long rx_num_entries;
unsigned long rx_ra;
u32 rcv_nxt;
u32 snd_nxt;
unsigned long chan_state;
struct ldc_channel_config cfg;
void *event_arg;
const struct ldc_mode_ops *mops;
struct ldc_iommu iommu;
struct ldc_version ver;
u8 hs_state;
#define LDC_HS_CLOSED 0x00
#define LDC_HS_OPEN 0x01
#define LDC_HS_GOTVERS 0x02
#define LDC_HS_SENTRTR 0x03
#define LDC_HS_GOTRTR 0x04
#define LDC_HS_COMPLETE 0x10
u8 flags;
#define LDC_FLAG_ALLOCED_QUEUES 0x01
#define LDC_FLAG_REGISTERED_QUEUES 0x02
#define LDC_FLAG_REGISTERED_IRQS 0x04
#define LDC_FLAG_RESET 0x10
u8 mss;
u8 state;
#define LDC_IRQ_NAME_MAX 32
char rx_irq_name[LDC_IRQ_NAME_MAX];
char tx_irq_name[LDC_IRQ_NAME_MAX];
struct hlist_head mh_list;
struct hlist_node list;
};
#define ldcdbg(TYPE, f, a...) \
do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
} while (0)
static const char *state_to_str(u8 state)
{
switch (state) {
case LDC_STATE_INVALID:
return "INVALID";
case LDC_STATE_INIT:
return "INIT";
case LDC_STATE_BOUND:
return "BOUND";
case LDC_STATE_READY:
return "READY";
case LDC_STATE_CONNECTED:
return "CONNECTED";
default:
return "<UNKNOWN>";
}
}
static void ldc_set_state(struct ldc_channel *lp, u8 state)
{
ldcdbg(STATE, "STATE (%s) --> (%s)\n",
state_to_str(lp->state),
state_to_str(state));
lp->state = state;
}
static unsigned long __advance(unsigned long off, unsigned long num_entries)
{
off += LDC_PACKET_SIZE;
if (off == (num_entries * LDC_PACKET_SIZE))
off = 0;
return off;
}
static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off)
{
return __advance(off, lp->rx_num_entries);
}
static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off)
{
return __advance(off, lp->tx_num_entries);
}
static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp,
unsigned long *new_tail)
{
struct ldc_packet *p;
unsigned long t;
t = tx_advance(lp, lp->tx_tail);
if (t == lp->tx_head)
return NULL;
*new_tail = t;
p = lp->tx_base;
return p + (lp->tx_tail / LDC_PACKET_SIZE);
}
/* When we are in reliable or stream mode, have to track the next packet
* we haven't gotten an ACK for in the TX queue using tx_acked. We have
* to be careful not to stomp over the queue past that point. During
* the handshake, we don't have TX data packets pending in the queue
* and that's why handshake_get_tx_packet() need not be mindful of
* lp->tx_acked.
*/
static unsigned long head_for_data(struct ldc_channel *lp)
{
if (lp->cfg.mode == LDC_MODE_STREAM)
return lp->tx_acked;
return lp->tx_head;
}
static int tx_has_space_for(struct ldc_channel *lp, unsigned int size)
{
unsigned long limit, tail, new_tail, diff;
unsigned int mss;
limit = head_for_data(lp);
tail = lp->tx_tail;
new_tail = tx_advance(lp, tail);
if (new_tail == limit)
return 0;
if (limit > new_tail)
diff = limit - new_tail;
else
diff = (limit +
((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail));
diff /= LDC_PACKET_SIZE;
mss = lp->mss;
if (diff * mss < size)
return 0;
return 1;
}
static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp,
unsigned long *new_tail)
{
struct ldc_packet *p;
unsigned long h, t;
h = head_for_data(lp);
t = tx_advance(lp, lp->tx_tail);
if (t == h)
return NULL;
*new_tail = t;
p = lp->tx_base;
return p + (lp->tx_tail / LDC_PACKET_SIZE);
}
static int set_tx_tail(struct ldc_channel *lp, unsigned long tail)
{
unsigned long orig_tail = lp->tx_tail;
int limit = 1000;
lp->tx_tail = tail;
while (limit-- > 0) {
unsigned long err;
err = sun4v_ldc_tx_set_qtail(lp->id, tail);
if (!err)
return 0;
if (err != HV_EWOULDBLOCK) {
lp->tx_tail = orig_tail;
return -EINVAL;
}
udelay(1);
}
lp->tx_tail = orig_tail;
return -EBUSY;
}
/* This just updates the head value in the hypervisor using
* a polling loop with a timeout. The caller takes care of
* upating software state representing the head change, if any.
*/
static int __set_rx_head(struct ldc_channel *lp, unsigned long head)
{
int limit = 1000;
while (limit-- > 0) {
unsigned long err;
err = sun4v_ldc_rx_set_qhead(lp->id, head);
if (!err)
return 0;
if (err != HV_EWOULDBLOCK)
return -EINVAL;
udelay(1);
}
return -EBUSY;
}
static int send_tx_packet(struct ldc_channel *lp,
struct ldc_packet *p,
unsigned long new_tail)
{
BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE)));
return set_tx_tail(lp, new_tail);
}
static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp,
u8 stype, u8 ctrl,
void *data, int dlen,
unsigned long *new_tail)
{
struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail);
if (p) {
memset(p, 0, sizeof(*p));
p->type = LDC_CTRL;
p->stype = stype;
p->ctrl = ctrl;
if (data)
memcpy(p->u.u_data, data, dlen);
}
return p;
}
static int start_handshake(struct ldc_channel *lp)
{
struct ldc_packet *p;
struct ldc_version *ver;
unsigned long new_tail;
ver = &ver_arr[0];
ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n",
ver->major, ver->minor);
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
ver, sizeof(*ver), &new_tail);
if (p) {
int err = send_tx_packet(lp, p, new_tail);
if (!err)
lp->flags &= ~LDC_FLAG_RESET;
return err;
}
return -EBUSY;
}
static int send_version_nack(struct ldc_channel *lp,
u16 major, u16 minor)
{
struct ldc_packet *p;
struct ldc_version ver;
unsigned long new_tail;
ver.major = major;
ver.minor = minor;
p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS,
&ver, sizeof(ver), &new_tail);
if (p) {
ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n",
ver.major, ver.minor);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_version_ack(struct ldc_channel *lp,
struct ldc_version *vp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS,
vp, sizeof(*vp), &new_tail);
if (p) {
ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n",
vp->major, vp->minor);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_rts(struct ldc_channel *lp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0,
&new_tail);
if (p) {
p->env = lp->cfg.mode;
p->seqid = 0;
lp->rcv_nxt = 0;
ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n",
p->env, p->seqid);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_rtr(struct ldc_channel *lp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0,
&new_tail);
if (p) {
p->env = lp->cfg.mode;
p->seqid = 0;
ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n",
p->env, p->seqid);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_rdx(struct ldc_channel *lp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0,
&new_tail);
if (p) {
p->env = 0;
p->seqid = ++lp->snd_nxt;
p->u.r.ackid = lp->rcv_nxt;
ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n",
p->env, p->seqid, p->u.r.ackid);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
{
struct ldc_packet *p;
unsigned long new_tail;
int err;
p = data_get_tx_packet(lp, &new_tail);
if (!p)
return -EBUSY;
memset(p, 0, sizeof(*p));
p->type = data_pkt->type;
p->stype = LDC_NACK;
p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK;
p->seqid = lp->snd_nxt + 1;
p->u.r.ackid = lp->rcv_nxt;
ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n",
p->type, p->ctrl, p->seqid, p->u.r.ackid);
err = send_tx_packet(lp, p, new_tail);
if (!err)
lp->snd_nxt++;
return err;
}
static int ldc_abort(struct ldc_channel *lp)
{
unsigned long hv_err;
ldcdbg(STATE, "ABORT\n");
/* We report but do not act upon the hypervisor errors because
* there really isn't much we can do if they fail at this point.
*/
hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
lp->id, lp->tx_ra, lp->tx_num_entries, hv_err);
hv_err = sun4v_ldc_tx_get_state(lp->id,
&lp->tx_head,
&lp->tx_tail,
&lp->chan_state);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n",
lp->id, hv_err);
hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
lp->id, lp->rx_ra, lp->rx_num_entries, hv_err);
/* Refetch the RX queue state as well, because we could be invoked
* here in the queue processing context.
*/
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n",
lp->id, hv_err);
return -ECONNRESET;
}
static struct ldc_version *find_by_major(u16 major)
{
struct ldc_version *ret = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(ver_arr); i++) {
struct ldc_version *v = &ver_arr[i];
if (v->major <= major) {
ret = v;
break;
}
}
return ret;
}
static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
{
struct ldc_version *vap;
int err;
ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n",
vp->major, vp->minor);
if (lp->hs_state == LDC_HS_GOTVERS) {
lp->hs_state = LDC_HS_OPEN;
memset(&lp->ver, 0, sizeof(lp->ver));
}
vap = find_by_major(vp->major);
if (!vap) {
err = send_version_nack(lp, 0, 0);
} else if (vap->major != vp->major) {
err = send_version_nack(lp, vap->major, vap->minor);
} else {
struct ldc_version ver = *vp;
if (ver.minor > vap->minor)
ver.minor = vap->minor;
err = send_version_ack(lp, &ver);
if (!err) {
lp->ver = ver;
lp->hs_state = LDC_HS_GOTVERS;
}
}
if (err)
return ldc_abort(lp);
return 0;
}
static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
{
ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n",
vp->major, vp->minor);
if (lp->hs_state == LDC_HS_GOTVERS) {
if (lp->ver.major != vp->major ||
lp->ver.minor != vp->minor)
return ldc_abort(lp);
} else {
lp->ver = *vp;
lp->hs_state = LDC_HS_GOTVERS;
}
if (send_rts(lp))
return ldc_abort(lp);
return 0;
}
static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
{
struct ldc_version *vap;
struct ldc_packet *p;
unsigned long new_tail;
if (vp->major == 0 && vp->minor == 0)
return ldc_abort(lp);
vap = find_by_major(vp->major);
if (!vap)
return ldc_abort(lp);
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
vap, sizeof(*vap),
&new_tail);
if (!p)
return ldc_abort(lp);
return send_tx_packet(lp, p, new_tail);
}
static int process_version(struct ldc_channel *lp,
struct ldc_packet *p)
{
struct ldc_version *vp;
vp = (struct ldc_version *) p->u.u_data;
switch (p->stype) {
case LDC_INFO:
return process_ver_info(lp, vp);
case LDC_ACK:
return process_ver_ack(lp, vp);
case LDC_NACK:
return process_ver_nack(lp, vp);
default:
return ldc_abort(lp);
}
}
static int process_rts(struct ldc_channel *lp,
struct ldc_packet *p)
{
ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n",
p->stype, p->seqid, p->env);
if (p->stype != LDC_INFO ||
lp->hs_state != LDC_HS_GOTVERS ||
p->env != lp->cfg.mode)
return ldc_abort(lp);
lp->snd_nxt = p->seqid;
lp->rcv_nxt = p->seqid;
lp->hs_state = LDC_HS_SENTRTR;
if (send_rtr(lp))
return ldc_abort(lp);
return 0;
}
static int process_rtr(struct ldc_channel *lp,
struct ldc_packet *p)
{
ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n",
p->stype, p->seqid, p->env);
if (p->stype != LDC_INFO ||
p->env != lp->cfg.mode)
return ldc_abort(lp);
lp->snd_nxt = p->seqid;
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
send_rdx(lp);
return LDC_EVENT_UP;
}
static int rx_seq_ok(struct ldc_channel *lp, u32 seqid)
{
return lp->rcv_nxt + 1 == seqid;
}
static int process_rdx(struct ldc_channel *lp,
struct ldc_packet *p)
{
ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n",
p->stype, p->seqid, p->env, p->u.r.ackid);
if (p->stype != LDC_INFO ||
!(rx_seq_ok(lp, p->seqid)))
return ldc_abort(lp);
lp->rcv_nxt = p->seqid;
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
return LDC_EVENT_UP;
}
static int process_control_frame(struct ldc_channel *lp,
struct ldc_packet *p)
{
switch (p->ctrl) {
case LDC_VERS:
return process_version(lp, p);
case LDC_RTS:
return process_rts(lp, p);
case LDC_RTR:
return process_rtr(lp, p);
case LDC_RDX:
return process_rdx(lp, p);
default:
return ldc_abort(lp);
}
}
static int process_error_frame(struct ldc_channel *lp,
struct ldc_packet *p)
{
return ldc_abort(lp);
}
static int process_data_ack(struct ldc_channel *lp,
struct ldc_packet *ack)
{
unsigned long head = lp->tx_acked;
u32 ackid = ack->u.r.ackid;
while (1) {
struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE);
head = tx_advance(lp, head);
if (p->seqid == ackid) {
lp->tx_acked = head;
return 0;
}
if (head == lp->tx_tail)
return ldc_abort(lp);
}
return 0;
}
static void send_events(struct ldc_channel *lp, unsigned int event_mask)
{
if (event_mask & LDC_EVENT_RESET)
lp->cfg.event(lp->event_arg, LDC_EVENT_RESET);
if (event_mask & LDC_EVENT_UP)
lp->cfg.event(lp->event_arg, LDC_EVENT_UP);
if (event_mask & LDC_EVENT_DATA_READY)
lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY);
}
static irqreturn_t ldc_rx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
unsigned long orig_state, hv_err, flags;
unsigned int event_mask;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
event_mask = 0;
if (lp->cfg.mode == LDC_MODE_RAW &&
lp->chan_state == LDC_CHANNEL_UP) {
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
event_mask |= LDC_EVENT_UP;
orig_state = lp->chan_state;
}
/* If we are in reset state, flush the RX queue and ignore
* everything.
*/
if (lp->flags & LDC_FLAG_RESET) {
(void) __set_rx_head(lp, lp->rx_tail);
goto out;
}
/* Once we finish the handshake, we let the ldc_read()
* paths do all of the control frame and state management.
* Just trigger the callback.
*/
if (lp->hs_state == LDC_HS_COMPLETE) {
handshake_complete:
if (lp->chan_state != orig_state) {
unsigned int event = LDC_EVENT_RESET;
if (lp->chan_state == LDC_CHANNEL_UP)
event = LDC_EVENT_UP;
event_mask |= event;
}
if (lp->rx_head != lp->rx_tail)
event_mask |= LDC_EVENT_DATA_READY;
goto out;
}
if (lp->chan_state != orig_state)
goto out;
while (lp->rx_head != lp->rx_tail) {
struct ldc_packet *p;
unsigned long new;
int err;
p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
switch (p->type) {
case LDC_CTRL:
err = process_control_frame(lp, p);
if (err > 0)
event_mask |= err;
break;
case LDC_DATA:
event_mask |= LDC_EVENT_DATA_READY;
err = 0;
break;
case LDC_ERR:
err = process_error_frame(lp, p);
break;
default:
err = ldc_abort(lp);
break;
}
if (err < 0)
break;
new = lp->rx_head;
new += LDC_PACKET_SIZE;
if (new == (lp->rx_num_entries * LDC_PACKET_SIZE))
new = 0;
lp->rx_head = new;
err = __set_rx_head(lp, new);
if (err < 0) {
(void) ldc_abort(lp);
break;
}
if (lp->hs_state == LDC_HS_COMPLETE)
goto handshake_complete;
}
out:
spin_unlock_irqrestore(&lp->lock, flags);
send_events(lp, event_mask);
return IRQ_HANDLED;
}
static irqreturn_t ldc_tx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
unsigned long flags, hv_err, orig_state;
unsigned int event_mask = 0;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
hv_err = sun4v_ldc_tx_get_state(lp->id,
&lp->tx_head,
&lp->tx_tail,
&lp->chan_state);
ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
if (lp->cfg.mode == LDC_MODE_RAW &&
lp->chan_state == LDC_CHANNEL_UP) {
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
event_mask |= LDC_EVENT_UP;
}
spin_unlock_irqrestore(&lp->lock, flags);
send_events(lp, event_mask);
return IRQ_HANDLED;
}
/* XXX ldc_alloc() and ldc_free() needs to run under a mutex so
* XXX that addition and removal from the ldc_channel_list has
* XXX atomicity, otherwise the __ldc_channel_exists() check is
* XXX totally pointless as another thread can slip into ldc_alloc()
* XXX and add a channel with the same ID. There also needs to be
* XXX a spinlock for ldc_channel_list.
*/
static HLIST_HEAD(ldc_channel_list);
static int __ldc_channel_exists(unsigned long id)
{
struct ldc_channel *lp;
struct hlist_node *n;
hlist_for_each_entry(lp, n, &ldc_channel_list, list) {
if (lp->id == id)
return 1;
}
return 0;
}
static int alloc_queue(const char *name, unsigned long num_entries,
struct ldc_packet **base, unsigned long *ra)
{
unsigned long size, order;
void *q;
size = num_entries * LDC_PACKET_SIZE;
order = get_order(size);
q = (void *) __get_free_pages(GFP_KERNEL, order);
if (!q) {
printk(KERN_ERR PFX "Alloc of %s queue failed with "
"size=%lu order=%lu\n", name, size, order);
return -ENOMEM;
}
memset(q, 0, PAGE_SIZE << order);
*base = q;
*ra = __pa(q);
return 0;
}
static void free_queue(unsigned long num_entries, struct ldc_packet *q)
{
unsigned long size, order;
if (!q)
return;
size = num_entries * LDC_PACKET_SIZE;
order = get_order(size);
free_pages((unsigned long)q, order);
}
/* XXX Make this configurable... XXX */
#define LDC_IOTABLE_SIZE (8 * 1024)
static int ldc_iommu_init(struct ldc_channel *lp)
{
unsigned long sz, num_tsb_entries, tsbsize, order;
struct ldc_iommu *iommu = &lp->iommu;
struct ldc_mtable_entry *table;
unsigned long hv_err;
int err;
num_tsb_entries = LDC_IOTABLE_SIZE;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
spin_lock_init(&iommu->lock);
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
iommu->arena.map = kzalloc(sz, GFP_KERNEL);
if (!iommu->arena.map) {
printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
return -ENOMEM;
}
iommu->arena.limit = num_tsb_entries;
order = get_order(tsbsize);
table = (struct ldc_mtable_entry *)
__get_free_pages(GFP_KERNEL, order);
err = -ENOMEM;
if (!table) {
printk(KERN_ERR PFX "Alloc of MTE table failed, "
"size=%lu order=%lu\n", tsbsize, order);
goto out_free_map;
}
memset(table, 0, PAGE_SIZE << order);
iommu->page_table = table;
hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
num_tsb_entries);
err = -EINVAL;
if (hv_err)
goto out_free_table;
return 0;
out_free_table:
free_pages((unsigned long) table, order);
iommu->page_table = NULL;
out_free_map:
kfree(iommu->arena.map);
iommu->arena.map = NULL;
return err;
}
static void ldc_iommu_release(struct ldc_channel *lp)
{
struct ldc_iommu *iommu = &lp->iommu;
unsigned long num_tsb_entries, tsbsize, order;
(void) sun4v_ldc_set_map_table(lp->id, 0, 0);
num_tsb_entries = iommu->arena.limit;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
order = get_order(tsbsize);
free_pages((unsigned long) iommu->page_table, order);
iommu->page_table = NULL;
kfree(iommu->arena.map);
iommu->arena.map = NULL;
}
struct ldc_channel *ldc_alloc(unsigned long id,
const struct ldc_channel_config *cfgp,
void *event_arg)
{
struct ldc_channel *lp;
const struct ldc_mode_ops *mops;
unsigned long dummy1, dummy2, hv_err;
u8 mss, *mssbuf;
int err;
err = -ENODEV;
if (!ldom_domaining_enabled)
goto out_err;
err = -EINVAL;
if (!cfgp)
goto out_err;
switch (cfgp->mode) {
case LDC_MODE_RAW:
mops = &raw_ops;
mss = LDC_PACKET_SIZE;
break;
case LDC_MODE_UNRELIABLE:
mops = &nonraw_ops;
mss = LDC_PACKET_SIZE - 8;
break;
case LDC_MODE_STREAM:
mops = &stream_ops;
mss = LDC_PACKET_SIZE - 8 - 8;
break;
default:
goto out_err;
}
if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq)
goto out_err;
hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2);
err = -ENODEV;
if (hv_err == HV_ECHANNEL)
goto out_err;
err = -EEXIST;
if (__ldc_channel_exists(id))
goto out_err;
mssbuf = NULL;
lp = kzalloc(sizeof(*lp), GFP_KERNEL);
err = -ENOMEM;
if (!lp)
goto out_err;
spin_lock_init(&lp->lock);
lp->id = id;
err = ldc_iommu_init(lp);
if (err)
goto out_free_ldc;
lp->mops = mops;
lp->mss = mss;
lp->cfg = *cfgp;
if (!lp->cfg.mtu)
lp->cfg.mtu = LDC_DEFAULT_MTU;
if (lp->cfg.mode == LDC_MODE_STREAM) {
mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL);
if (!mssbuf) {
err = -ENOMEM;
goto out_free_iommu;
}
lp->mssbuf = mssbuf;
}
lp->event_arg = event_arg;
/* XXX allow setting via ldc_channel_config to override defaults
* XXX or use some formula based upon mtu
*/
lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
err = alloc_queue("TX", lp->tx_num_entries,
&lp->tx_base, &lp->tx_ra);
if (err)
goto out_free_mssbuf;
err = alloc_queue("RX", lp->rx_num_entries,
&lp->rx_base, &lp->rx_ra);
if (err)
goto out_free_txq;
lp->flags |= LDC_FLAG_ALLOCED_QUEUES;
lp->hs_state = LDC_HS_CLOSED;
ldc_set_state(lp, LDC_STATE_INIT);
INIT_HLIST_NODE(&lp->list);
hlist_add_head(&lp->list, &ldc_channel_list);
INIT_HLIST_HEAD(&lp->mh_list);
return lp;
out_free_txq:
free_queue(lp->tx_num_entries, lp->tx_base);
out_free_mssbuf:
kfree(mssbuf);
out_free_iommu:
ldc_iommu_release(lp);
out_free_ldc:
kfree(lp);
out_err:
return ERR_PTR(err);
}
EXPORT_SYMBOL(ldc_alloc);
void ldc_free(struct ldc_channel *lp)
{
if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
free_irq(lp->cfg.rx_irq, lp);
free_irq(lp->cfg.tx_irq, lp);
}
if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
sun4v_ldc_tx_qconf(lp->id, 0, 0);
sun4v_ldc_rx_qconf(lp->id, 0, 0);
lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
}
if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) {
free_queue(lp->tx_num_entries, lp->tx_base);
free_queue(lp->rx_num_entries, lp->rx_base);
lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
}
hlist_del(&lp->list);
kfree(lp->mssbuf);
ldc_iommu_release(lp);
kfree(lp);
}
EXPORT_SYMBOL(ldc_free);
/* Bind the channel. This registers the LDC queues with
* the hypervisor and puts the channel into a pseudo-listening
* state. This does not initiate a handshake, ldc_connect() does
* that.
*/
int ldc_bind(struct ldc_channel *lp, const char *name)
{
unsigned long hv_err, flags;
int err = -EINVAL;
if (!name ||
(lp->state != LDC_STATE_INIT))
return -EINVAL;
snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
err = request_irq(lp->cfg.rx_irq, ldc_rx,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
lp->rx_irq_name, lp);
if (err)
return err;
err = request_irq(lp->cfg.tx_irq, ldc_tx,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);
return err;
}
spin_lock_irqsave(&lp->lock, flags);
enable_irq(lp->cfg.rx_irq);
enable_irq(lp->cfg.tx_irq);
lp->flags |= LDC_FLAG_REGISTERED_IRQS;
err = -ENODEV;
hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_free_irqs;
hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
if (hv_err)
goto out_free_irqs;
hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_unmap_tx;
hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
if (hv_err)
goto out_unmap_tx;
lp->flags |= LDC_FLAG_REGISTERED_QUEUES;
hv_err = sun4v_ldc_tx_get_state(lp->id,
&lp->tx_head,
&lp->tx_tail,
&lp->chan_state);
err = -EBUSY;
if (hv_err)
goto out_unmap_rx;
lp->tx_acked = lp->tx_head;
lp->hs_state = LDC_HS_OPEN;
ldc_set_state(lp, LDC_STATE_BOUND);
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
out_unmap_rx:
lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
sun4v_ldc_rx_qconf(lp->id, 0, 0);
out_unmap_tx:
sun4v_ldc_tx_qconf(lp->id, 0, 0);
out_free_irqs:
lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
free_irq(lp->cfg.tx_irq, lp);
free_irq(lp->cfg.rx_irq, lp);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_bind);
int ldc_connect(struct ldc_channel *lp)
{
unsigned long flags;
int err;
if (lp->cfg.mode == LDC_MODE_RAW)
return -EINVAL;
spin_lock_irqsave(&lp->lock, flags);
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
lp->hs_state != LDC_HS_OPEN)
err = -EINVAL;
else
err = start_handshake(lp);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_connect);
int ldc_disconnect(struct ldc_channel *lp)
{
unsigned long hv_err, flags;
int err;
if (lp->cfg.mode == LDC_MODE_RAW)
return -EINVAL;
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES))
return -EINVAL;
spin_lock_irqsave(&lp->lock, flags);
err = -ENODEV;
hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_err;
hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
if (hv_err)
goto out_err;
hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_err;
hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
if (hv_err)
goto out_err;
ldc_set_state(lp, LDC_STATE_BOUND);
lp->hs_state = LDC_HS_OPEN;
lp->flags |= LDC_FLAG_RESET;
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
out_err:
sun4v_ldc_tx_qconf(lp->id, 0, 0);
sun4v_ldc_rx_qconf(lp->id, 0, 0);
free_irq(lp->cfg.tx_irq, lp);
free_irq(lp->cfg.rx_irq, lp);
lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS |
LDC_FLAG_REGISTERED_QUEUES);
ldc_set_state(lp, LDC_STATE_INIT);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_disconnect);
int ldc_state(struct ldc_channel *lp)
{
return lp->state;
}
EXPORT_SYMBOL(ldc_state);
static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
{
struct ldc_packet *p;
unsigned long new_tail;
int err;
if (size > LDC_PACKET_SIZE)
return -EMSGSIZE;
p = data_get_tx_packet(lp, &new_tail);
if (!p)
return -EAGAIN;
memcpy(p, buf, size);
err = send_tx_packet(lp, p, new_tail);
if (!err)
err = size;
return err;
}
static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
{
struct ldc_packet *p;
unsigned long hv_err, new;
int err;
if (size < LDC_PACKET_SIZE)
return -EINVAL;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return ldc_abort(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
return -ECONNRESET;
if (lp->rx_head == lp->rx_tail)
return 0;
p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
memcpy(buf, p, LDC_PACKET_SIZE);
new = rx_advance(lp, lp->rx_head);
lp->rx_head = new;
err = __set_rx_head(lp, new);
if (err < 0)
err = -ECONNRESET;
else
err = LDC_PACKET_SIZE;
return err;
}
static const struct ldc_mode_ops raw_ops = {
.write = write_raw,
.read = read_raw,
};
static int write_nonraw(struct ldc_channel *lp, const void *buf,
unsigned int size)
{
unsigned long hv_err, tail;
unsigned int copied;
u32 seq;
int err;
hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
&lp->chan_state);
if (unlikely(hv_err))
return -EBUSY;
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
return ldc_abort(lp);
if (!tx_has_space_for(lp, size))
return -EAGAIN;
seq = lp->snd_nxt;
copied = 0;
tail = lp->tx_tail;
while (copied < size) {
struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE);
u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ?
p->u.u_data :
p->u.r.r_data);
int data_len;
p->type = LDC_DATA;
p->stype = LDC_INFO;
p->ctrl = 0;
data_len = size - copied;
if (data_len > lp->mss)
data_len = lp->mss;
BUG_ON(data_len > LDC_LEN);
p->env = (data_len |
(copied == 0 ? LDC_START : 0) |
(data_len == size - copied ? LDC_STOP : 0));
p->seqid = ++seq;
ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n",
p->type,
p->stype,
p->ctrl,
p->env,
p->seqid);
memcpy(data, buf, data_len);
buf += data_len;
copied += data_len;
tail = tx_advance(lp, tail);
}
err = set_tx_tail(lp, tail);
if (!err) {
lp->snd_nxt = seq;
err = size;
}
return err;
}
static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
struct ldc_packet *first_frag)
{
int err;
if (first_frag)
lp->rcv_nxt = first_frag->seqid - 1;
err = send_data_nack(lp, p);
if (err)
return err;
err = __set_rx_head(lp, lp->rx_tail);
if (err < 0)
return ldc_abort(lp);
return 0;
}
static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
{
if (p->stype & LDC_ACK) {
int err = process_data_ack(lp, p);
if (err)
return err;
}
if (p->stype & LDC_NACK)
return ldc_abort(lp);
return 0;
}
static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
{
unsigned long dummy;
int limit = 1000;
ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n",
cur_head, lp->rx_head, lp->rx_tail);
while (limit-- > 0) {
unsigned long hv_err;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&dummy,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return ldc_abort(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
return -ECONNRESET;
if (cur_head != lp->rx_tail) {
ldcdbg(DATA, "DATA WAIT DONE "
"head[%lx] tail[%lx] chan_state[%lx]\n",
dummy, lp->rx_tail, lp->chan_state);
return 0;
}
udelay(1);
}
return -EAGAIN;
}
static int rx_set_head(struct ldc_channel *lp, unsigned long head)
{
int err = __set_rx_head(lp, head);
if (err < 0)
return ldc_abort(lp);
lp->rx_head = head;
return 0;
}
static void send_data_ack(struct ldc_channel *lp)
{
unsigned long new_tail;
struct ldc_packet *p;
p = data_get_tx_packet(lp, &new_tail);
if (likely(p)) {
int err;
memset(p, 0, sizeof(*p));
p->type = LDC_DATA;
p->stype = LDC_ACK;
p->ctrl = 0;
p->seqid = lp->snd_nxt + 1;
p->u.r.ackid = lp->rcv_nxt;
err = send_tx_packet(lp, p, new_tail);
if (!err)
lp->snd_nxt++;
}
}
static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
{
struct ldc_packet *first_frag;
unsigned long hv_err, new;
int err, copied;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return ldc_abort(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
return -ECONNRESET;
if (lp->rx_head == lp->rx_tail)
return 0;
first_frag = NULL;
copied = err = 0;
new = lp->rx_head;
while (1) {
struct ldc_packet *p;
int pkt_len;
BUG_ON(new == lp->rx_tail);
p = lp->rx_base + (new / LDC_PACKET_SIZE);
ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] "
"rcv_nxt[%08x]\n",
p->type,
p->stype,
p->ctrl,
p->env,
p->seqid,
p->u.r.ackid,
lp->rcv_nxt);
if (unlikely(!rx_seq_ok(lp, p->seqid))) {
err = rx_bad_seq(lp, p, first_frag);
copied = 0;
break;
}
if (p->type & LDC_CTRL) {
err = process_control_frame(lp, p);
if (err < 0)
break;
err = 0;
}
lp->rcv_nxt = p->seqid;
if (!(p->type & LDC_DATA)) {
new = rx_advance(lp, new);
goto no_data;
}
if (p->stype & (LDC_ACK | LDC_NACK)) {
err = data_ack_nack(lp, p);
if (err)
break;
}
if (!(p->stype & LDC_INFO)) {
new = rx_advance(lp, new);
err = rx_set_head(lp, new);
if (err)
break;
goto no_data;
}
pkt_len = p->env & LDC_LEN;
/* Every initial packet starts with the START bit set.
*
* Singleton packets will have both START+STOP set.
*
* Fragments will have START set in the first frame, STOP
* set in the last frame, and neither bit set in middle
* frames of the packet.
*
* Therefore if we are at the beginning of a packet and
* we don't see START, or we are in the middle of a fragmented
* packet and do see START, we are unsynchronized and should
* flush the RX queue.
*/
if ((first_frag == NULL && !(p->env & LDC_START)) ||
(first_frag != NULL && (p->env & LDC_START))) {
if (!first_frag)
new = rx_advance(lp, new);
err = rx_set_head(lp, new);
if (err)
break;
if (!first_frag)
goto no_data;
}
if (!first_frag)
first_frag = p;
if (pkt_len > size - copied) {
/* User didn't give us a big enough buffer,
* what to do? This is a pretty serious error.
*
* Since we haven't updated the RX ring head to
* consume any of the packets, signal the error
* to the user and just leave the RX ring alone.
*
* This seems the best behavior because this allows
* a user of the LDC layer to start with a small
* RX buffer for ldc_read() calls and use -EMSGSIZE
* as a cue to enlarge it's read buffer.
*/
err = -EMSGSIZE;
break;
}
/* Ok, we are gonna eat this one. */
new = rx_advance(lp, new);
memcpy(buf,
(lp->cfg.mode == LDC_MODE_UNRELIABLE ?
p->u.u_data : p->u.r.r_data), pkt_len);
buf += pkt_len;
copied += pkt_len;
if (p->env & LDC_STOP)
break;
no_data:
if (new == lp->rx_tail) {
err = rx_data_wait(lp, new);
if (err)
break;
}
}
if (!err)
err = rx_set_head(lp, new);
if (err && first_frag)
lp->rcv_nxt = first_frag->seqid - 1;
if (!err) {
err = copied;
if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE)
send_data_ack(lp);
}
return err;
}
static const struct ldc_mode_ops nonraw_ops = {
.write = write_nonraw,
.read = read_nonraw,
};
static int write_stream(struct ldc_channel *lp, const void *buf,
unsigned int size)
{
if (size > lp->cfg.mtu)
size = lp->cfg.mtu;
return write_nonraw(lp, buf, size);
}
static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size)
{
if (!lp->mssbuf_len) {
int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu);
if (err < 0)
return err;
lp->mssbuf_len = err;
lp->mssbuf_off = 0;
}
if (size > lp->mssbuf_len)
size = lp->mssbuf_len;
memcpy(buf, lp->mssbuf + lp->mssbuf_off, size);
lp->mssbuf_off += size;
lp->mssbuf_len -= size;
return size;
}
static const struct ldc_mode_ops stream_ops = {
.write = write_stream,
.read = read_stream,
};
int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size)
{
unsigned long flags;
int err;
if (!buf)
return -EINVAL;
if (!size)
return 0;
spin_lock_irqsave(&lp->lock, flags);
if (lp->hs_state != LDC_HS_COMPLETE)
err = -ENOTCONN;
else
err = lp->mops->write(lp, buf, size);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_write);
int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
{
unsigned long flags;
int err;
if (!buf)
return -EINVAL;
if (!size)
return 0;
spin_lock_irqsave(&lp->lock, flags);
if (lp->hs_state != LDC_HS_COMPLETE)
err = -ENOTCONN;
else
err = lp->mops->read(lp, buf, size);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_read);
static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
{
struct iommu_arena *arena = &iommu->arena;
unsigned long n, i, start, end, limit;
int pass;
limit = arena->limit;
start = arena->hint;
pass = 0;
again:
n = find_next_zero_bit(arena->map, limit, start);
end = n + npages;
if (unlikely(end >= limit)) {
if (likely(pass < 1)) {
limit = start;
start = 0;
pass++;
goto again;
} else {
/* Scanned the whole thing, give up. */
return -1;
}
}
for (i = n; i < end; i++) {
if (test_bit(i, arena->map)) {
start = i + 1;
goto again;
}
}
for (i = n; i < end; i++)
__set_bit(i, arena->map);
arena->hint = end;
return n;
}
#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
#define COOKIE_PGSZ_CODE_SHIFT 60ULL
static u64 pagesize_code(void)
{
switch (PAGE_SIZE) {
default:
case (8ULL * 1024ULL):
return 0;
case (64ULL * 1024ULL):
return 1;
case (512ULL * 1024ULL):
return 2;
case (4ULL * 1024ULL * 1024ULL):
return 3;
case (32ULL * 1024ULL * 1024ULL):
return 4;
case (256ULL * 1024ULL * 1024ULL):
return 5;
}
}
static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
{
return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) |
(index << PAGE_SHIFT) |
page_offset);
}
static u64 cookie_to_index(u64 cookie, unsigned long *shift)
{
u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
cookie &= ~COOKIE_PGSZ_CODE;
*shift = szcode * 3;
return (cookie >> (13ULL + (szcode * 3ULL)));
}
static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
unsigned long npages)
{
long entry;
entry = arena_alloc(iommu, npages);
if (unlikely(entry < 0))
return NULL;
return iommu->page_table + entry;
}
static u64 perm_to_mte(unsigned int map_perm)
{
u64 mte_base;
mte_base = pagesize_code();
if (map_perm & LDC_MAP_SHADOW) {
if (map_perm & LDC_MAP_R)
mte_base |= LDC_MTE_COPY_R;
if (map_perm & LDC_MAP_W)
mte_base |= LDC_MTE_COPY_W;
}
if (map_perm & LDC_MAP_DIRECT) {
if (map_perm & LDC_MAP_R)
mte_base |= LDC_MTE_READ;
if (map_perm & LDC_MAP_W)
mte_base |= LDC_MTE_WRITE;
if (map_perm & LDC_MAP_X)
mte_base |= LDC_MTE_EXEC;
}
if (map_perm & LDC_MAP_IO) {
if (map_perm & LDC_MAP_R)
mte_base |= LDC_MTE_IOMMU_R;
if (map_perm & LDC_MAP_W)
mte_base |= LDC_MTE_IOMMU_W;
}
return mte_base;
}
static int pages_in_region(unsigned long base, long len)
{
int count = 0;
do {
unsigned long new = (base + PAGE_SIZE) & PAGE_MASK;
len -= (new - base);
base = new;
count++;
} while (len > 0);
return count;
}
struct cookie_state {
struct ldc_mtable_entry *page_table;
struct ldc_trans_cookie *cookies;
u64 mte_base;
u64 prev_cookie;
u32 pte_idx;
u32 nc;
};
static void fill_cookies(struct cookie_state *sp, unsigned long pa,
unsigned long off, unsigned long len)
{
do {
unsigned long tlen, new = pa + PAGE_SIZE;
u64 this_cookie;
sp->page_table[sp->pte_idx].mte = sp->mte_base | pa;
tlen = PAGE_SIZE;
if (off)
tlen = PAGE_SIZE - off;
if (tlen > len)
tlen = len;
this_cookie = make_cookie(sp->pte_idx,
pagesize_code(), off);
off = 0;
if (this_cookie == sp->prev_cookie) {
sp->cookies[sp->nc - 1].cookie_size += tlen;
} else {
sp->cookies[sp->nc].cookie_addr = this_cookie;
sp->cookies[sp->nc].cookie_size = tlen;
sp->nc++;
}
sp->prev_cookie = this_cookie + tlen;
sp->pte_idx++;
len -= tlen;
pa = new;
} while (len > 0);
}
static int sg_count_one(struct scatterlist *sg)
{
unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
long len = sg->length;
if ((sg->offset | len) & (8UL - 1))
return -EFAULT;
return pages_in_region(base + sg->offset, len);
}
static int sg_count_pages(struct scatterlist *sg, int num_sg)
{
int count;
int i;
count = 0;
for (i = 0; i < num_sg; i++) {
int err = sg_count_one(sg + i);
if (err < 0)
return err;
count += err;
}
return count;
}
int ldc_map_sg(struct ldc_channel *lp,
struct scatterlist *sg, int num_sg,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
unsigned long i, npages, flags;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
int err;
if (map_perm & ~LDC_MAP_ALL)
return -EINVAL;
err = sg_count_pages(sg, num_sg);
if (err < 0)
return err;
npages = err;
if (err > ncookies)
return -EMSGSIZE;
iommu = &lp->iommu;
spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
state.page_table = iommu->page_table;
state.cookies = cookies;
state.mte_base = perm_to_mte(map_perm);
state.prev_cookie = ~(u64)0;
state.pte_idx = (base - iommu->page_table);
state.nc = 0;
for (i = 0; i < num_sg; i++)
fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT,
sg[i].offset, sg[i].length);
return state.nc;
}
EXPORT_SYMBOL(ldc_map_sg);
int ldc_map_single(struct ldc_channel *lp,
void *buf, unsigned int len,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
unsigned long npages, pa, flags;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1))
return -EINVAL;
pa = __pa(buf);
if ((pa | len) & (8UL - 1))
return -EFAULT;
npages = pages_in_region(pa, len);
iommu = &lp->iommu;
spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
state.page_table = iommu->page_table;
state.cookies = cookies;
state.mte_base = perm_to_mte(map_perm);
state.prev_cookie = ~(u64)0;
state.pte_idx = (base - iommu->page_table);
state.nc = 0;
fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len);
BUG_ON(state.nc != 1);
return state.nc;
}
EXPORT_SYMBOL(ldc_map_single);
static void free_npages(unsigned long id, struct ldc_iommu *iommu,
u64 cookie, u64 size)
{
struct iommu_arena *arena = &iommu->arena;
unsigned long i, shift, index, npages;
struct ldc_mtable_entry *base;
npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
index = cookie_to_index(cookie, &shift);
base = iommu->page_table + index;
BUG_ON(index > arena->limit ||
(index + npages) > arena->limit);
for (i = 0; i < npages; i++) {
if (base->cookie)
sun4v_ldc_revoke(id, cookie + (i << shift),
base->cookie);
base->mte = 0;
__clear_bit(index + i, arena->map);
}
}
void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
int ncookies)
{
struct ldc_iommu *iommu = &lp->iommu;
unsigned long flags;
int i;
spin_lock_irqsave(&iommu->lock, flags);
for (i = 0; i < ncookies; i++) {
u64 addr = cookies[i].cookie_addr;
u64 size = cookies[i].cookie_size;
free_npages(lp->id, iommu, addr, size);
}
spin_unlock_irqrestore(&iommu->lock, flags);
}
EXPORT_SYMBOL(ldc_unmap);
int ldc_copy(struct ldc_channel *lp, int copy_dir,
void *buf, unsigned int len, unsigned long offset,
struct ldc_trans_cookie *cookies, int ncookies)
{
unsigned int orig_len;
unsigned long ra;
int i;
if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n",
lp->id, copy_dir);
return -EINVAL;
}
ra = __pa(buf);
if ((ra | len | offset) & (8UL - 1)) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer "
"ra[%lx] len[%x] offset[%lx]\n",
lp->id, ra, len, offset);
return -EFAULT;
}
if (lp->hs_state != LDC_HS_COMPLETE ||
(lp->flags & LDC_FLAG_RESET)) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] "
"flags[%x]\n", lp->id, lp->hs_state, lp->flags);
return -ECONNRESET;
}
orig_len = len;
for (i = 0; i < ncookies; i++) {
unsigned long cookie_raddr = cookies[i].cookie_addr;
unsigned long this_len = cookies[i].cookie_size;
unsigned long actual_len;
if (unlikely(offset)) {
unsigned long this_off = offset;
if (this_off > this_len)
this_off = this_len;
offset -= this_off;
this_len -= this_off;
if (!this_len)
continue;
cookie_raddr += this_off;
}
if (this_len > len)
this_len = len;
while (1) {
unsigned long hv_err;
hv_err = sun4v_ldc_copy(lp->id, copy_dir,
cookie_raddr, ra,
this_len, &actual_len);
if (unlikely(hv_err)) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] "
"HV error %lu\n",
lp->id, hv_err);
if (lp->hs_state != LDC_HS_COMPLETE ||
(lp->flags & LDC_FLAG_RESET))
return -ECONNRESET;
else
return -EFAULT;
}
cookie_raddr += actual_len;
ra += actual_len;
len -= actual_len;
if (actual_len == this_len)
break;
this_len -= actual_len;
}
if (!len)
break;
}
/* It is caller policy what to do about short copies.
* For example, a networking driver can declare the
* packet a runt and drop it.
*/
return orig_len - len;
}
EXPORT_SYMBOL(ldc_copy);
void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
struct ldc_trans_cookie *cookies, int *ncookies,
unsigned int map_perm)
{
void *buf;
int err;
if (len & (8UL - 1))
return ERR_PTR(-EINVAL);
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm);
if (err < 0) {
kfree(buf);
return ERR_PTR(err);
}
*ncookies = err;
return buf;
}
EXPORT_SYMBOL(ldc_alloc_exp_dring);
void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len,
struct ldc_trans_cookie *cookies, int ncookies)
{
ldc_unmap(lp, cookies, ncookies);
kfree(buf);
}
EXPORT_SYMBOL(ldc_free_exp_dring);
static int __init ldc_init(void)
{
unsigned long major, minor;
struct mdesc_handle *hp;
const u64 *v;
int err;
u64 mp;
hp = mdesc_grab();
if (!hp)
return -ENODEV;
mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
err = -ENODEV;
if (mp == MDESC_NODE_NULL)
goto out;
v = mdesc_get_property(hp, mp, "domaining-enabled", NULL);
if (!v)
goto out;
major = 1;
minor = 0;
if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) {
printk(KERN_INFO PFX "Could not register LDOM hvapi.\n");
goto out;
}
printk(KERN_INFO "%s", version);
if (!*v) {
printk(KERN_INFO PFX "Domaining disabled.\n");
goto out;
}
ldom_domaining_enabled = 1;
err = 0;
out:
mdesc_release(hp);
return err;
}
core_initcall(ldc_init);
| gpl-2.0 |
fastos/fastsocket | kernel/fs/9p/fid.c | 750 | 4696 | /*
* V9FS FID Management
*
* Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2005, 2006 by Eric Van Hensbergen <ericvh@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/idr.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "fid.h"
/**
* v9fs_fid_add - add a fid to a dentry
* @dentry: dentry that the fid is being added to
* @fid: fid to add
*
*/
int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
{
struct v9fs_dentry *dent;
P9_DPRINTK(P9_DEBUG_VFS, "fid %d dentry %s\n",
fid->fid, dentry->d_name.name);
dent = dentry->d_fsdata;
if (!dent) {
dent = kmalloc(sizeof(struct v9fs_dentry), GFP_KERNEL);
if (!dent)
return -ENOMEM;
spin_lock_init(&dent->lock);
INIT_LIST_HEAD(&dent->fidlist);
dentry->d_fsdata = dent;
}
spin_lock(&dent->lock);
list_add(&fid->dlist, &dent->fidlist);
spin_unlock(&dent->lock);
return 0;
}
/**
* v9fs_fid_find - retrieve a fid that belongs to the specified uid
* @dentry: dentry to look for fid in
* @uid: return fid that belongs to the specified user
* @any: if non-zero, return any fid associated with the dentry
*
*/
static struct p9_fid *v9fs_fid_find(struct dentry *dentry, u32 uid, int any)
{
struct v9fs_dentry *dent;
struct p9_fid *fid, *ret;
P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n",
dentry->d_name.name, dentry, uid, any);
dent = (struct v9fs_dentry *) dentry->d_fsdata;
ret = NULL;
if (dent) {
spin_lock(&dent->lock);
list_for_each_entry(fid, &dent->fidlist, dlist) {
if (any || fid->uid == uid) {
ret = fid;
break;
}
}
spin_unlock(&dent->lock);
}
return ret;
}
/**
* v9fs_fid_lookup - lookup for a fid, try to walk if not found
* @dentry: dentry to look for fid in
*
* Look for a fid in the specified dentry for the current user.
* If no fid is found, try to create one walking from a fid from the parent
* dentry (if it has one), or the root dentry. If the user haven't accessed
* the fs yet, attach now and walk from the root.
*/
struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
{
int i, n, l, clone, any, access;
u32 uid;
struct p9_fid *fid;
struct dentry *d, *ds;
struct v9fs_session_info *v9ses;
char **wnames, *uname;
v9ses = v9fs_inode2v9ses(dentry->d_inode);
access = v9ses->flags & V9FS_ACCESS_MASK;
switch (access) {
case V9FS_ACCESS_SINGLE:
case V9FS_ACCESS_USER:
uid = current_fsuid();
any = 0;
break;
case V9FS_ACCESS_ANY:
uid = v9ses->uid;
any = 1;
break;
default:
uid = ~0;
any = 0;
break;
}
fid = v9fs_fid_find(dentry, uid, any);
if (fid)
return fid;
ds = dentry->d_parent;
fid = v9fs_fid_find(ds, uid, any);
if (!fid) { /* walk from the root */
n = 0;
for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent)
n++;
fid = v9fs_fid_find(ds, uid, any);
if (!fid) { /* the user is not attached to the fs yet */
if (access == V9FS_ACCESS_SINGLE)
return ERR_PTR(-EPERM);
if (v9fs_extended(v9ses))
uname = NULL;
else
uname = v9ses->uname;
fid = p9_client_attach(v9ses->clnt, NULL, uname, uid,
v9ses->aname);
if (IS_ERR(fid))
return fid;
v9fs_fid_add(ds, fid);
}
} else /* walk from the parent */
n = 1;
if (ds == dentry)
return fid;
wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL);
if (!wnames)
return ERR_PTR(-ENOMEM);
for (d = dentry, i = (n-1); i >= 0; i--, d = d->d_parent)
wnames[i] = (char *) d->d_name.name;
clone = 1;
i = 0;
while (i < n) {
l = min(n - i, P9_MAXWELEM);
fid = p9_client_walk(fid, l, &wnames[i], clone);
if (IS_ERR(fid)) {
kfree(wnames);
return fid;
}
i += l;
clone = 0;
}
kfree(wnames);
v9fs_fid_add(dentry, fid);
return fid;
}
struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
{
struct p9_fid *fid, *ret;
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return fid;
ret = p9_client_walk(fid, 0, NULL, 1);
return ret;
}
| gpl-2.0 |
archos-sa/archos-gpl-gen9-kernel | fs/xfs/quota/xfs_qm.c | 750 | 63558 | /*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_bmap.h"
#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_trans_space.h"
#include "xfs_utils.h"
#include "xfs_qm.h"
#include "xfs_trace.h"
/*
* The global quota manager. There is only one of these for the entire
* system, _not_ one per file system. XQM keeps track of the overall
* quota functionality, including maintaining the freelist and hash
* tables of dquots.
*/
struct mutex xfs_Gqm_lock;
struct xfs_qm *xfs_Gqm;
uint ndquot;
kmem_zone_t *qm_dqzone;
kmem_zone_t *qm_dqtrxzone;
static cred_t xfs_zerocr;
STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t);
static struct shrinker xfs_qm_shaker = {
.shrink = xfs_qm_shake,
.seeks = DEFAULT_SEEKS,
};
#ifdef DEBUG
extern struct mutex qcheck_lock;
#endif
#ifdef QUOTADEBUG
static void
xfs_qm_dquot_list_print(
struct xfs_mount *mp)
{
xfs_dquot_t *dqp;
int i = 0;
list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) {
cmn_err(CE_DEBUG, " %d. \"%d (%s)\" "
"bcnt = %lld, icnt = %lld, refs = %d",
i++, be32_to_cpu(dqp->q_core.d_id),
DQFLAGTO_TYPESTR(dqp),
(long long)be64_to_cpu(dqp->q_core.d_bcount),
(long long)be64_to_cpu(dqp->q_core.d_icount),
dqp->q_nrefs);
}
}
#else
static void xfs_qm_dquot_list_print(struct xfs_mount *mp) { }
#endif
/*
* Initialize the XQM structure.
* Note that there is not one quota manager per file system.
*/
STATIC struct xfs_qm *
xfs_Gqm_init(void)
{
xfs_dqhash_t *udqhash, *gdqhash;
xfs_qm_t *xqm;
size_t hsize;
uint i;
/*
* Initialize the dquot hash tables.
*/
udqhash = kmem_zalloc_greedy(&hsize,
XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t));
if (!udqhash)
goto out;
gdqhash = kmem_zalloc_large(hsize);
if (!gdqhash)
goto out_free_udqhash;
hsize /= sizeof(xfs_dqhash_t);
ndquot = hsize << 8;
xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
xqm->qm_dqhashmask = hsize - 1;
xqm->qm_usr_dqhtable = udqhash;
xqm->qm_grp_dqhtable = gdqhash;
ASSERT(xqm->qm_usr_dqhtable != NULL);
ASSERT(xqm->qm_grp_dqhtable != NULL);
for (i = 0; i < hsize; i++) {
xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i);
xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i);
}
/*
* Freelist of all dquots of all file systems
*/
INIT_LIST_HEAD(&xqm->qm_dqfrlist);
xqm->qm_dqfrlist_cnt = 0;
mutex_init(&xqm->qm_dqfrlist_lock);
/*
* dquot zone. we register our own low-memory callback.
*/
if (!qm_dqzone) {
xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t),
"xfs_dquots");
qm_dqzone = xqm->qm_dqzone;
} else
xqm->qm_dqzone = qm_dqzone;
register_shrinker(&xfs_qm_shaker);
/*
* The t_dqinfo portion of transactions.
*/
if (!qm_dqtrxzone) {
xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t),
"xfs_dqtrx");
qm_dqtrxzone = xqm->qm_dqtrxzone;
} else
xqm->qm_dqtrxzone = qm_dqtrxzone;
atomic_set(&xqm->qm_totaldquots, 0);
xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
xqm->qm_nrefs = 0;
#ifdef DEBUG
mutex_init(&qcheck_lock);
#endif
return xqm;
out_free_udqhash:
kmem_free_large(udqhash);
out:
return NULL;
}
/*
* Destroy the global quota manager when its reference count goes to zero.
*/
STATIC void
xfs_qm_destroy(
struct xfs_qm *xqm)
{
struct xfs_dquot *dqp, *n;
int hsize, i;
ASSERT(xqm != NULL);
ASSERT(xqm->qm_nrefs == 0);
unregister_shrinker(&xfs_qm_shaker);
hsize = xqm->qm_dqhashmask + 1;
for (i = 0; i < hsize; i++) {
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
}
kmem_free_large(xqm->qm_usr_dqhtable);
kmem_free_large(xqm->qm_grp_dqhtable);
xqm->qm_usr_dqhtable = NULL;
xqm->qm_grp_dqhtable = NULL;
xqm->qm_dqhashmask = 0;
/* frlist cleanup */
mutex_lock(&xqm->qm_dqfrlist_lock);
list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
xfs_dqlock(dqp);
#ifdef QUOTADEBUG
cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
#endif
list_del_init(&dqp->q_freelist);
xfs_Gqm->qm_dqfrlist_cnt--;
xfs_dqunlock(dqp);
xfs_qm_dqdestroy(dqp);
}
mutex_unlock(&xqm->qm_dqfrlist_lock);
mutex_destroy(&xqm->qm_dqfrlist_lock);
#ifdef DEBUG
mutex_destroy(&qcheck_lock);
#endif
kmem_free(xqm);
}
/*
* Called at mount time to let XQM know that another file system is
* starting quotas. This isn't crucial information as the individual mount
* structures are pretty independent, but it helps the XQM keep a
* global view of what's going on.
*/
/* ARGSUSED */
STATIC int
xfs_qm_hold_quotafs_ref(
struct xfs_mount *mp)
{
/*
* Need to lock the xfs_Gqm structure for things like this. For example,
* the structure could disappear between the entry to this routine and
* a HOLD operation if not locked.
*/
mutex_lock(&xfs_Gqm_lock);
if (!xfs_Gqm) {
xfs_Gqm = xfs_Gqm_init();
if (!xfs_Gqm) {
mutex_unlock(&xfs_Gqm_lock);
return ENOMEM;
}
}
/*
* We can keep a list of all filesystems with quotas mounted for
* debugging and statistical purposes, but ...
* Just take a reference and get out.
*/
xfs_Gqm->qm_nrefs++;
mutex_unlock(&xfs_Gqm_lock);
return 0;
}
/*
* Release the reference that a filesystem took at mount time,
* so that we know when we need to destroy the entire quota manager.
*/
/* ARGSUSED */
STATIC void
xfs_qm_rele_quotafs_ref(
struct xfs_mount *mp)
{
xfs_dquot_t *dqp, *n;
ASSERT(xfs_Gqm);
ASSERT(xfs_Gqm->qm_nrefs > 0);
/*
* Go thru the freelist and destroy all inactive dquots.
*/
mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) {
xfs_dqlock(dqp);
if (dqp->dq_flags & XFS_DQ_INACTIVE) {
ASSERT(dqp->q_mount == NULL);
ASSERT(! XFS_DQ_IS_DIRTY(dqp));
ASSERT(list_empty(&dqp->q_hashlist));
ASSERT(list_empty(&dqp->q_mplist));
list_del_init(&dqp->q_freelist);
xfs_Gqm->qm_dqfrlist_cnt--;
xfs_dqunlock(dqp);
xfs_qm_dqdestroy(dqp);
} else {
xfs_dqunlock(dqp);
}
}
mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
/*
* Destroy the entire XQM. If somebody mounts with quotaon, this'll
* be restarted.
*/
mutex_lock(&xfs_Gqm_lock);
if (--xfs_Gqm->qm_nrefs == 0) {
xfs_qm_destroy(xfs_Gqm);
xfs_Gqm = NULL;
}
mutex_unlock(&xfs_Gqm_lock);
}
/*
* Just destroy the quotainfo structure.
*/
void
xfs_qm_unmount(
struct xfs_mount *mp)
{
if (mp->m_quotainfo) {
xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
xfs_qm_destroy_quotainfo(mp);
}
}
/*
* This is called from xfs_mountfs to start quotas and initialize all
* necessary data structures like quotainfo. This is also responsible for
* running a quotacheck as necessary. We are guaranteed that the superblock
* is consistently read in at this point.
*
* If we fail here, the mount will continue with quota turned off. We don't
* need to inidicate success or failure at all.
*/
void
xfs_qm_mount_quotas(
xfs_mount_t *mp)
{
int error = 0;
uint sbf;
/*
* If quotas on realtime volumes is not supported, we disable
* quotas immediately.
*/
if (mp->m_sb.sb_rextents) {
cmn_err(CE_NOTE,
"Cannot turn on quotas for realtime filesystem %s",
mp->m_fsname);
mp->m_qflags = 0;
goto write_changes;
}
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
/*
* Allocate the quotainfo structure inside the mount struct, and
* create quotainode(s), and change/rev superblock if necessary.
*/
error = xfs_qm_init_quotainfo(mp);
if (error) {
/*
* We must turn off quotas.
*/
ASSERT(mp->m_quotainfo == NULL);
mp->m_qflags = 0;
goto write_changes;
}
/*
* If any of the quotas are not consistent, do a quotacheck.
*/
if (XFS_QM_NEED_QUOTACHECK(mp)) {
error = xfs_qm_quotacheck(mp);
if (error) {
/* Quotacheck failed and disabled quotas. */
return;
}
}
/*
* If one type of quotas is off, then it will lose its
* quotachecked status, since we won't be doing accounting for
* that type anymore.
*/
if (!XFS_IS_UQUOTA_ON(mp))
mp->m_qflags &= ~XFS_UQUOTA_CHKD;
if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
mp->m_qflags &= ~XFS_OQUOTA_CHKD;
write_changes:
/*
* We actually don't have to acquire the m_sb_lock at all.
* This can only be called from mount, and that's single threaded. XXX
*/
spin_lock(&mp->m_sb_lock);
sbf = mp->m_sb.sb_qflags;
mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
spin_unlock(&mp->m_sb_lock);
if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
/*
* We could only have been turning quotas off.
* We aren't in very good shape actually because
* the incore structures are convinced that quotas are
* off, but the on disk superblock doesn't know that !
*/
ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
xfs_fs_cmn_err(CE_ALERT, mp,
"XFS mount_quotas: Superblock update failed!");
}
}
if (error) {
xfs_fs_cmn_err(CE_WARN, mp,
"Failed to initialize disk quotas.");
return;
}
#ifdef QUOTADEBUG
if (XFS_IS_QUOTA_ON(mp))
xfs_qm_internalqcheck(mp);
#endif
}
/*
* Called from the vfsops layer.
*/
void
xfs_qm_unmount_quotas(
xfs_mount_t *mp)
{
/*
* Release the dquots that root inode, et al might be holding,
* before we flush quotas and blow away the quotainfo structure.
*/
ASSERT(mp->m_rootip);
xfs_qm_dqdetach(mp->m_rootip);
if (mp->m_rbmip)
xfs_qm_dqdetach(mp->m_rbmip);
if (mp->m_rsumip)
xfs_qm_dqdetach(mp->m_rsumip);
/*
* Release the quota inodes.
*/
if (mp->m_quotainfo) {
if (mp->m_quotainfo->qi_uquotaip) {
IRELE(mp->m_quotainfo->qi_uquotaip);
mp->m_quotainfo->qi_uquotaip = NULL;
}
if (mp->m_quotainfo->qi_gquotaip) {
IRELE(mp->m_quotainfo->qi_gquotaip);
mp->m_quotainfo->qi_gquotaip = NULL;
}
}
}
/*
* Flush all dquots of the given file system to disk. The dquots are
* _not_ purged from memory here, just their data written to disk.
*/
STATIC int
xfs_qm_dqflush_all(
struct xfs_mount *mp,
int sync_mode)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
int recl;
struct xfs_dquot *dqp;
int niters;
int error;
if (!q)
return 0;
niters = 0;
again:
mutex_lock(&q->qi_dqlist_lock);
list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
xfs_dqlock(dqp);
if (! XFS_DQ_IS_DIRTY(dqp)) {
xfs_dqunlock(dqp);
continue;
}
/* XXX a sentinel would be better */
recl = q->qi_dqreclaims;
if (!xfs_dqflock_nowait(dqp)) {
/*
* If we can't grab the flush lock then check
* to see if the dquot has been flushed delayed
* write. If so, grab its buffer and send it
* out immediately. We'll be able to acquire
* the flush lock when the I/O completes.
*/
xfs_qm_dqflock_pushbuf_wait(dqp);
}
/*
* Let go of the mplist lock. We don't want to hold it
* across a disk write.
*/
mutex_unlock(&q->qi_dqlist_lock);
error = xfs_qm_dqflush(dqp, sync_mode);
xfs_dqunlock(dqp);
if (error)
return error;
mutex_lock(&q->qi_dqlist_lock);
if (recl != q->qi_dqreclaims) {
mutex_unlock(&q->qi_dqlist_lock);
/* XXX restart limit */
goto again;
}
}
mutex_unlock(&q->qi_dqlist_lock);
/* return ! busy */
return 0;
}
/*
* Release the group dquot pointers the user dquots may be
* carrying around as a hint. mplist is locked on entry and exit.
*/
STATIC void
xfs_qm_detach_gdquots(
struct xfs_mount *mp)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_dquot *dqp, *gdqp;
int nrecl;
again:
ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
xfs_dqlock(dqp);
if ((gdqp = dqp->q_gdquot)) {
xfs_dqlock(gdqp);
dqp->q_gdquot = NULL;
}
xfs_dqunlock(dqp);
if (gdqp) {
/*
* Can't hold the mplist lock across a dqput.
* XXXmust convert to marker based iterations here.
*/
nrecl = q->qi_dqreclaims;
mutex_unlock(&q->qi_dqlist_lock);
xfs_qm_dqput(gdqp);
mutex_lock(&q->qi_dqlist_lock);
if (nrecl != q->qi_dqreclaims)
goto again;
}
}
}
/*
* Go through all the incore dquots of this file system and take them
* off the mplist and hashlist, if the dquot type matches the dqtype
* parameter. This is used when turning off quota accounting for
* users and/or groups, as well as when the filesystem is unmounting.
*/
STATIC int
xfs_qm_dqpurge_int(
struct xfs_mount *mp,
uint flags)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_dquot *dqp, *n;
uint dqtype;
int nrecl;
int nmisses;
if (!q)
return 0;
dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
mutex_lock(&q->qi_dqlist_lock);
/*
* In the first pass through all incore dquots of this filesystem,
* we release the group dquot pointers the user dquots may be
* carrying around as a hint. We need to do this irrespective of
* what's being turned off.
*/
xfs_qm_detach_gdquots(mp);
again:
nmisses = 0;
ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
/*
* Try to get rid of all of the unwanted dquots. The idea is to
* get them off mplist and hashlist, but leave them on freelist.
*/
list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
/*
* It's OK to look at the type without taking dqlock here.
* We're holding the mplist lock here, and that's needed for
* a dqreclaim.
*/
if ((dqp->dq_flags & dqtype) == 0)
continue;
if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
nrecl = q->qi_dqreclaims;
mutex_unlock(&q->qi_dqlist_lock);
mutex_lock(&dqp->q_hash->qh_lock);
mutex_lock(&q->qi_dqlist_lock);
/*
* XXXTheoretically, we can get into a very long
* ping pong game here.
* No one can be adding dquots to the mplist at
* this point, but somebody might be taking things off.
*/
if (nrecl != q->qi_dqreclaims) {
mutex_unlock(&dqp->q_hash->qh_lock);
goto again;
}
}
/*
* Take the dquot off the mplist and hashlist. It may remain on
* freelist in INACTIVE state.
*/
nmisses += xfs_qm_dqpurge(dqp);
}
mutex_unlock(&q->qi_dqlist_lock);
return nmisses;
}
int
xfs_qm_dqpurge_all(
xfs_mount_t *mp,
uint flags)
{
int ndquots;
/*
* Purge the dquot cache.
* None of the dquots should really be busy at this point.
*/
if (mp->m_quotainfo) {
while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) {
delay(ndquots * 10);
}
}
return 0;
}
STATIC int
xfs_qm_dqattach_one(
xfs_inode_t *ip,
xfs_dqid_t id,
uint type,
uint doalloc,
xfs_dquot_t *udqhint, /* hint */
xfs_dquot_t **IO_idqpp)
{
xfs_dquot_t *dqp;
int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
error = 0;
/*
* See if we already have it in the inode itself. IO_idqpp is
* &i_udquot or &i_gdquot. This made the code look weird, but
* made the logic a lot simpler.
*/
dqp = *IO_idqpp;
if (dqp) {
trace_xfs_dqattach_found(dqp);
return 0;
}
/*
* udqhint is the i_udquot field in inode, and is non-NULL only
* when the type arg is group/project. Its purpose is to save a
* lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
* the user dquot.
*/
if (udqhint) {
ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
xfs_dqlock(udqhint);
/*
* No need to take dqlock to look at the id.
*
* The ID can't change until it gets reclaimed, and it won't
* be reclaimed as long as we have a ref from inode and we
* hold the ilock.
*/
dqp = udqhint->q_gdquot;
if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
xfs_dqlock(dqp);
XFS_DQHOLD(dqp);
ASSERT(*IO_idqpp == NULL);
*IO_idqpp = dqp;
xfs_dqunlock(dqp);
xfs_dqunlock(udqhint);
return 0;
}
/*
* We can't hold a dquot lock when we call the dqget code.
* We'll deadlock in no time, because of (not conforming to)
* lock ordering - the inodelock comes before any dquot lock,
* and we may drop and reacquire the ilock in xfs_qm_dqget().
*/
xfs_dqunlock(udqhint);
}
/*
* Find the dquot from somewhere. This bumps the
* reference count of dquot and returns it locked.
* This can return ENOENT if dquot didn't exist on
* disk and we didn't ask it to allocate;
* ESRCH if quotas got turned off suddenly.
*/
error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp);
if (error)
return error;
trace_xfs_dqattach_get(dqp);
/*
* dqget may have dropped and re-acquired the ilock, but it guarantees
* that the dquot returned is the one that should go in the inode.
*/
*IO_idqpp = dqp;
xfs_dqunlock(dqp);
return 0;
}
/*
* Given a udquot and gdquot, attach a ptr to the group dquot in the
* udquot as a hint for future lookups. The idea sounds simple, but the
* execution isn't, because the udquot might have a group dquot attached
* already and getting rid of that gets us into lock ordering constraints.
* The process is complicated more by the fact that the dquots may or may not
* be locked on entry.
*/
STATIC void
xfs_qm_dqattach_grouphint(
xfs_dquot_t *udq,
xfs_dquot_t *gdq)
{
xfs_dquot_t *tmp;
xfs_dqlock(udq);
if ((tmp = udq->q_gdquot)) {
if (tmp == gdq) {
xfs_dqunlock(udq);
return;
}
udq->q_gdquot = NULL;
/*
* We can't keep any dqlocks when calling dqrele,
* because the freelist lock comes before dqlocks.
*/
xfs_dqunlock(udq);
/*
* we took a hard reference once upon a time in dqget,
* so give it back when the udquot no longer points at it
* dqput() does the unlocking of the dquot.
*/
xfs_qm_dqrele(tmp);
xfs_dqlock(udq);
xfs_dqlock(gdq);
} else {
ASSERT(XFS_DQ_IS_LOCKED(udq));
xfs_dqlock(gdq);
}
ASSERT(XFS_DQ_IS_LOCKED(udq));
ASSERT(XFS_DQ_IS_LOCKED(gdq));
/*
* Somebody could have attached a gdquot here,
* when we dropped the uqlock. If so, just do nothing.
*/
if (udq->q_gdquot == NULL) {
XFS_DQHOLD(gdq);
udq->q_gdquot = gdq;
}
xfs_dqunlock(gdq);
xfs_dqunlock(udq);
}
/*
* Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
* into account.
* If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
* Inode may get unlocked and relocked in here, and the caller must deal with
* the consequences.
*/
int
xfs_qm_dqattach_locked(
xfs_inode_t *ip,
uint flags)
{
xfs_mount_t *mp = ip->i_mount;
uint nquotas = 0;
int error = 0;
if (!XFS_IS_QUOTA_RUNNING(mp) ||
!XFS_IS_QUOTA_ON(mp) ||
!XFS_NOT_DQATTACHED(mp, ip) ||
ip->i_ino == mp->m_sb.sb_uquotino ||
ip->i_ino == mp->m_sb.sb_gquotino)
return 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (XFS_IS_UQUOTA_ON(mp)) {
error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
flags & XFS_QMOPT_DQALLOC,
NULL, &ip->i_udquot);
if (error)
goto done;
nquotas++;
}
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (XFS_IS_OQUOTA_ON(mp)) {
error = XFS_IS_GQUOTA_ON(mp) ?
xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
flags & XFS_QMOPT_DQALLOC,
ip->i_udquot, &ip->i_gdquot) :
xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
flags & XFS_QMOPT_DQALLOC,
ip->i_udquot, &ip->i_gdquot);
/*
* Don't worry about the udquot that we may have
* attached above. It'll get detached, if not already.
*/
if (error)
goto done;
nquotas++;
}
/*
* Attach this group quota to the user quota as a hint.
* This WON'T, in general, result in a thrash.
*/
if (nquotas == 2) {
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(ip->i_udquot);
ASSERT(ip->i_gdquot);
/*
* We may or may not have the i_udquot locked at this point,
* but this check is OK since we don't depend on the i_gdquot to
* be accurate 100% all the time. It is just a hint, and this
* will succeed in general.
*/
if (ip->i_udquot->q_gdquot == ip->i_gdquot)
goto done;
/*
* Attach i_gdquot to the gdquot hint inside the i_udquot.
*/
xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
}
done:
#ifdef QUOTADEBUG
if (! error) {
if (XFS_IS_UQUOTA_ON(mp))
ASSERT(ip->i_udquot);
if (XFS_IS_OQUOTA_ON(mp))
ASSERT(ip->i_gdquot);
}
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
#endif
return error;
}
int
xfs_qm_dqattach(
struct xfs_inode *ip,
uint flags)
{
int error;
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_qm_dqattach_locked(ip, flags);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
* Release dquots (and their references) if any.
* The inode should be locked EXCL except when this's called by
* xfs_ireclaim.
*/
void
xfs_qm_dqdetach(
xfs_inode_t *ip)
{
if (!(ip->i_udquot || ip->i_gdquot))
return;
trace_xfs_dquot_dqdetach(ip);
ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
if (ip->i_udquot) {
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
if (ip->i_gdquot) {
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
}
int
xfs_qm_sync(
struct xfs_mount *mp,
int flags)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
int recl, restarts;
struct xfs_dquot *dqp;
int error;
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return 0;
restarts = 0;
again:
mutex_lock(&q->qi_dqlist_lock);
/*
* dqpurge_all() also takes the mplist lock and iterate thru all dquots
* in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
* when we have the mplist lock, we know that dquots will be consistent
* as long as we have it locked.
*/
if (!XFS_IS_QUOTA_ON(mp)) {
mutex_unlock(&q->qi_dqlist_lock);
return 0;
}
ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
/*
* If this is vfs_sync calling, then skip the dquots that
* don't 'seem' to be dirty. ie. don't acquire dqlock.
* This is very similar to what xfs_sync does with inodes.
*/
if (flags & SYNC_TRYLOCK) {
if (!XFS_DQ_IS_DIRTY(dqp))
continue;
if (!xfs_qm_dqlock_nowait(dqp))
continue;
} else {
xfs_dqlock(dqp);
}
/*
* Now, find out for sure if this dquot is dirty or not.
*/
if (! XFS_DQ_IS_DIRTY(dqp)) {
xfs_dqunlock(dqp);
continue;
}
/* XXX a sentinel would be better */
recl = q->qi_dqreclaims;
if (!xfs_dqflock_nowait(dqp)) {
if (flags & SYNC_TRYLOCK) {
xfs_dqunlock(dqp);
continue;
}
/*
* If we can't grab the flush lock then if the caller
* really wanted us to give this our best shot, so
* see if we can give a push to the buffer before we wait
* on the flush lock. At this point, we know that
* even though the dquot is being flushed,
* it has (new) dirty data.
*/
xfs_qm_dqflock_pushbuf_wait(dqp);
}
/*
* Let go of the mplist lock. We don't want to hold it
* across a disk write
*/
mutex_unlock(&q->qi_dqlist_lock);
error = xfs_qm_dqflush(dqp, flags);
xfs_dqunlock(dqp);
if (error && XFS_FORCED_SHUTDOWN(mp))
return 0; /* Need to prevent umount failure */
else if (error)
return error;
mutex_lock(&q->qi_dqlist_lock);
if (recl != q->qi_dqreclaims) {
if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
break;
mutex_unlock(&q->qi_dqlist_lock);
goto again;
}
}
mutex_unlock(&q->qi_dqlist_lock);
return 0;
}
/*
* The hash chains and the mplist use the same xfs_dqhash structure as
* their list head, but we can take the mplist qh_lock and one of the
* hash qh_locks at the same time without any problem as they aren't
* related.
*/
static struct lock_class_key xfs_quota_mplist_class;
/*
* This initializes all the quota information that's kept in the
* mount structure
*/
STATIC int
xfs_qm_init_quotainfo(
xfs_mount_t *mp)
{
xfs_quotainfo_t *qinf;
int error;
xfs_dquot_t *dqp;
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
/*
* Tell XQM that we exist as soon as possible.
*/
if ((error = xfs_qm_hold_quotafs_ref(mp))) {
return error;
}
qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
/*
* See if quotainodes are setup, and if not, allocate them,
* and change the superblock accordingly.
*/
if ((error = xfs_qm_init_quotainos(mp))) {
kmem_free(qinf);
mp->m_quotainfo = NULL;
return error;
}
INIT_LIST_HEAD(&qinf->qi_dqlist);
mutex_init(&qinf->qi_dqlist_lock);
lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class);
qinf->qi_dqreclaims = 0;
/* mutex used to serialize quotaoffs */
mutex_init(&qinf->qi_quotaofflock);
/* Precalc some constants */
qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
ASSERT(qinf->qi_dqchunklen);
qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
/*
* We try to get the limits from the superuser's limits fields.
* This is quite hacky, but it is standard quota practice.
* We look at the USR dquot with id == 0 first, but if user quotas
* are not enabled we goto the GRP dquot with id == 0.
* We don't really care to keep separate default limits for user
* and group quotas, at least not at this point.
*/
error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,
XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
(XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
XFS_DQ_PROJ),
XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,
&dqp);
if (! error) {
xfs_disk_dquot_t *ddqp = &dqp->q_core;
/*
* The warnings and timers set the grace period given to
* a user or group before he or she can not perform any
* more writing. If it is zero, a default is used.
*/
qinf->qi_btimelimit = ddqp->d_btimer ?
be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
qinf->qi_itimelimit = ddqp->d_itimer ?
be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
qinf->qi_bwarnlimit = ddqp->d_bwarns ?
be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
qinf->qi_iwarnlimit = ddqp->d_iwarns ?
be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
/*
* We sent the XFS_QMOPT_DQSUSER flag to dqget because
* we don't want this dquot cached. We haven't done a
* quotacheck yet, and quotacheck doesn't like incore dquots.
*/
xfs_qm_dqdestroy(dqp);
} else {
qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
}
return 0;
}
/*
* Gets called when unmounting a filesystem or when all quotas get
* turned off.
* This purges the quota inodes, destroys locks and frees itself.
*/
void
xfs_qm_destroy_quotainfo(
xfs_mount_t *mp)
{
xfs_quotainfo_t *qi;
qi = mp->m_quotainfo;
ASSERT(qi != NULL);
ASSERT(xfs_Gqm != NULL);
/*
* Release the reference that XQM kept, so that we know
* when the XQM structure should be freed. We cannot assume
* that xfs_Gqm is non-null after this point.
*/
xfs_qm_rele_quotafs_ref(mp);
ASSERT(list_empty(&qi->qi_dqlist));
mutex_destroy(&qi->qi_dqlist_lock);
if (qi->qi_uquotaip) {
IRELE(qi->qi_uquotaip);
qi->qi_uquotaip = NULL; /* paranoia */
}
if (qi->qi_gquotaip) {
IRELE(qi->qi_gquotaip);
qi->qi_gquotaip = NULL;
}
mutex_destroy(&qi->qi_quotaofflock);
kmem_free(qi);
mp->m_quotainfo = NULL;
}
/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */
/* ARGSUSED */
STATIC void
xfs_qm_list_init(
xfs_dqlist_t *list,
char *str,
int n)
{
mutex_init(&list->qh_lock);
INIT_LIST_HEAD(&list->qh_list);
list->qh_version = 0;
list->qh_nelems = 0;
}
STATIC void
xfs_qm_list_destroy(
xfs_dqlist_t *list)
{
mutex_destroy(&(list->qh_lock));
}
/*
* Stripped down version of dqattach. This doesn't attach, or even look at the
* dquots attached to the inode. The rationale is that there won't be any
* attached at the time this is called from quotacheck.
*/
STATIC int
xfs_qm_dqget_noattach(
xfs_inode_t *ip,
xfs_dquot_t **O_udqpp,
xfs_dquot_t **O_gdqpp)
{
int error;
xfs_mount_t *mp;
xfs_dquot_t *udqp, *gdqp;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
mp = ip->i_mount;
udqp = NULL;
gdqp = NULL;
if (XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ip->i_udquot == NULL);
/*
* We want the dquot allocated if it doesn't exist.
*/
if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER,
XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN,
&udqp))) {
/*
* Shouldn't be able to turn off quotas here.
*/
ASSERT(error != ESRCH);
ASSERT(error != ENOENT);
return error;
}
ASSERT(udqp);
}
if (XFS_IS_OQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL);
if (udqp)
xfs_dqunlock(udqp);
error = XFS_IS_GQUOTA_ON(mp) ?
xfs_qm_dqget(mp, ip,
ip->i_d.di_gid, XFS_DQ_GROUP,
XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
&gdqp) :
xfs_qm_dqget(mp, ip,
ip->i_d.di_projid, XFS_DQ_PROJ,
XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
&gdqp);
if (error) {
if (udqp)
xfs_qm_dqrele(udqp);
ASSERT(error != ESRCH);
ASSERT(error != ENOENT);
return error;
}
ASSERT(gdqp);
/* Reacquire the locks in the right order */
if (udqp) {
if (! xfs_qm_dqlock_nowait(udqp)) {
xfs_dqunlock(gdqp);
xfs_dqlock(udqp);
xfs_dqlock(gdqp);
}
}
}
*O_udqpp = udqp;
*O_gdqpp = gdqp;
#ifdef QUOTADEBUG
if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp));
if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp));
#endif
return 0;
}
/*
* Create an inode and return with a reference already taken, but unlocked
* This is how we create quota inodes
*/
STATIC int
xfs_qm_qino_alloc(
xfs_mount_t *mp,
xfs_inode_t **ip,
__int64_t sbfields,
uint flags)
{
xfs_trans_t *tp;
int error;
int committed;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
if ((error = xfs_trans_reserve(tp,
XFS_QM_QINOCREATE_SPACE_RES(mp),
XFS_CREATE_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES,
XFS_CREATE_LOG_COUNT))) {
xfs_trans_cancel(tp, 0);
return error;
}
if ((error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0,
&xfs_zerocr, 0, 1, ip, &committed))) {
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT);
return error;
}
/*
* Keep an extra reference to this quota inode. This inode is
* locked exclusively and joined to the transaction already.
*/
ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL));
IHOLD(*ip);
/*
* Make the changes in the superblock, and log those too.
* sbfields arg may contain fields other than *QUOTINO;
* VERSIONNUM for example.
*/
spin_lock(&mp->m_sb_lock);
if (flags & XFS_QMOPT_SBVERSION) {
ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
(XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
xfs_sb_version_addquota(&mp->m_sb);
mp->m_sb.sb_uquotino = NULLFSINO;
mp->m_sb.sb_gquotino = NULLFSINO;
/* qflags will get updated _after_ quotacheck */
mp->m_sb.sb_qflags = 0;
}
if (flags & XFS_QMOPT_UQUOTA)
mp->m_sb.sb_uquotino = (*ip)->i_ino;
else
mp->m_sb.sb_gquotino = (*ip)->i_ino;
spin_unlock(&mp->m_sb_lock);
xfs_mod_sb(tp, sbfields);
if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!");
return error;
}
return 0;
}
STATIC void
xfs_qm_reset_dqcounts(
xfs_mount_t *mp,
xfs_buf_t *bp,
xfs_dqid_t id,
uint type)
{
xfs_disk_dquot_t *ddq;
int j;
trace_xfs_reset_dqcounts(bp, _RET_IP_);
/*
* Reset all counters and timers. They'll be
* started afresh by xfs_qm_quotacheck.
*/
#ifdef DEBUG
j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
do_div(j, sizeof(xfs_dqblk_t));
ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
#endif
ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
/*
* Do a sanity check, and if needed, repair the dqblk. Don't
* output any warnings because it's perfectly possible to
* find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
*/
(void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,
"xfs_quotacheck");
ddq->d_bcount = 0;
ddq->d_icount = 0;
ddq->d_rtbcount = 0;
ddq->d_btimer = 0;
ddq->d_itimer = 0;
ddq->d_rtbtimer = 0;
ddq->d_bwarns = 0;
ddq->d_iwarns = 0;
ddq->d_rtbwarns = 0;
ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
}
}
STATIC int
xfs_qm_dqiter_bufs(
xfs_mount_t *mp,
xfs_dqid_t firstid,
xfs_fsblock_t bno,
xfs_filblks_t blkcnt,
uint flags)
{
xfs_buf_t *bp;
int error;
int notcommitted;
int incr;
int type;
ASSERT(blkcnt > 0);
notcommitted = 0;
incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
error = 0;
/*
* Blkcnt arg can be a very big number, and might even be
* larger than the log itself. So, we have to break it up into
* manageable-sized transactions.
* Note that we don't start a permanent transaction here; we might
* not be able to get a log reservation for the whole thing up front,
* and we don't really care to either, because we just discard
* everything if we were to crash in the middle of this loop.
*/
while (blkcnt--) {
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, bno),
mp->m_quotainfo->qi_dqchunklen, 0, &bp);
if (error)
break;
xfs_qm_reset_dqcounts(mp, bp, firstid, type);
xfs_bdwrite(mp, bp);
/*
* goto the next block.
*/
bno++;
firstid += mp->m_quotainfo->qi_dqperchunk;
}
return error;
}
/*
* Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
* caller supplied function for every chunk of dquots that we find.
*/
STATIC int
xfs_qm_dqiterate(
xfs_mount_t *mp,
xfs_inode_t *qip,
uint flags)
{
xfs_bmbt_irec_t *map;
int i, nmaps; /* number of map entries */
int error; /* return value */
xfs_fileoff_t lblkno;
xfs_filblks_t maxlblkcnt;
xfs_dqid_t firstid;
xfs_fsblock_t rablkno;
xfs_filblks_t rablkcnt;
error = 0;
/*
* This looks racy, but we can't keep an inode lock across a
* trans_reserve. But, this gets called during quotacheck, and that
* happens only at mount time which is single threaded.
*/
if (qip->i_d.di_nblocks == 0)
return 0;
map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
lblkno = 0;
maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
do {
nmaps = XFS_DQITER_MAP_SIZE;
/*
* We aren't changing the inode itself. Just changing
* some of its data. No new blocks are added here, and
* the inode is never added to the transaction.
*/
xfs_ilock(qip, XFS_ILOCK_SHARED);
error = xfs_bmapi(NULL, qip, lblkno,
maxlblkcnt - lblkno,
XFS_BMAPI_METADATA,
NULL,
0, map, &nmaps, NULL, NULL);
xfs_iunlock(qip, XFS_ILOCK_SHARED);
if (error)
break;
ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
for (i = 0; i < nmaps; i++) {
ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
ASSERT(map[i].br_blockcount);
lblkno += map[i].br_blockcount;
if (map[i].br_startblock == HOLESTARTBLOCK)
continue;
firstid = (xfs_dqid_t) map[i].br_startoff *
mp->m_quotainfo->qi_dqperchunk;
/*
* Do a read-ahead on the next extent.
*/
if ((i+1 < nmaps) &&
(map[i+1].br_startblock != HOLESTARTBLOCK)) {
rablkcnt = map[i+1].br_blockcount;
rablkno = map[i+1].br_startblock;
while (rablkcnt--) {
xfs_baread(mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, rablkno),
mp->m_quotainfo->qi_dqchunklen);
rablkno++;
}
}
/*
* Iterate thru all the blks in the extent and
* reset the counters of all the dquots inside them.
*/
if ((error = xfs_qm_dqiter_bufs(mp,
firstid,
map[i].br_startblock,
map[i].br_blockcount,
flags))) {
break;
}
}
if (error)
break;
} while (nmaps > 0);
kmem_free(map);
return error;
}
/*
* Called by dqusage_adjust in doing a quotacheck.
* Given the inode, and a dquot (either USR or GRP, doesn't matter),
* this updates its incore copy as well as the buffer copy. This is
* so that once the quotacheck is done, we can just log all the buffers,
* as opposed to logging numerous updates to individual dquots.
*/
STATIC void
xfs_qm_quotacheck_dqadjust(
xfs_dquot_t *dqp,
xfs_qcnt_t nblks,
xfs_qcnt_t rtblks)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
trace_xfs_dqadjust(dqp);
/*
* Adjust the inode count and the block count to reflect this inode's
* resource usage.
*/
be64_add_cpu(&dqp->q_core.d_icount, 1);
dqp->q_res_icount++;
if (nblks) {
be64_add_cpu(&dqp->q_core.d_bcount, nblks);
dqp->q_res_bcount += nblks;
}
if (rtblks) {
be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
dqp->q_res_rtbcount += rtblks;
}
/*
* Set default limits, adjust timers (since we changed usages)
*
* There are no timers for the default values set in the root dquot.
*/
if (dqp->q_core.d_id) {
xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
}
dqp->dq_flags |= XFS_DQ_DIRTY;
}
STATIC int
xfs_qm_get_rtblks(
xfs_inode_t *ip,
xfs_qcnt_t *O_rtblks)
{
xfs_filblks_t rtblks; /* total rt blks */
xfs_extnum_t idx; /* extent record index */
xfs_ifork_t *ifp; /* inode fork pointer */
xfs_extnum_t nextents; /* number of extent entries */
int error;
ASSERT(XFS_IS_REALTIME_INODE(ip));
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
return error;
}
rtblks = 0;
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
for (idx = 0; idx < nextents; idx++)
rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
*O_rtblks = (xfs_qcnt_t)rtblks;
return 0;
}
/*
* callback routine supplied to bulkstat(). Given an inumber, find its
* dquots and update them to account for resources taken by that inode.
*/
/* ARGSUSED */
STATIC int
xfs_qm_dqusage_adjust(
xfs_mount_t *mp, /* mount point for filesystem */
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* not used */
int ubsize, /* not used */
int *ubused, /* not used */
int *res) /* result code value */
{
xfs_inode_t *ip;
xfs_dquot_t *udqp, *gdqp;
xfs_qcnt_t nblks, rtblks;
int error;
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
/*
* rootino must have its resources accounted for, not so with the quota
* inodes.
*/
if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
*res = BULKSTAT_RV_NOTHING;
return XFS_ERROR(EINVAL);
}
/*
* We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
* interface expects the inode to be exclusively locked because that's
* the case in all other instances. It's OK that we do this because
* quotacheck is done only at mount time.
*/
if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) {
*res = BULKSTAT_RV_NOTHING;
return error;
}
/*
* Obtain the locked dquots. In case of an error (eg. allocation
* fails for ENOSPC), we return the negative of the error number
* to bulkstat, so that it can get propagated to quotacheck() and
* making us disable quotas for the file system.
*/
if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
xfs_iput(ip, XFS_ILOCK_EXCL);
*res = BULKSTAT_RV_GIVEUP;
return error;
}
rtblks = 0;
if (! XFS_IS_REALTIME_INODE(ip)) {
nblks = (xfs_qcnt_t)ip->i_d.di_nblocks;
} else {
/*
* Walk thru the extent list and count the realtime blocks.
*/
if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {
xfs_iput(ip, XFS_ILOCK_EXCL);
if (udqp)
xfs_qm_dqput(udqp);
if (gdqp)
xfs_qm_dqput(gdqp);
*res = BULKSTAT_RV_GIVEUP;
return error;
}
nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
}
ASSERT(ip->i_delayed_blks == 0);
/*
* We can't release the inode while holding its dquot locks.
* The inode can go into inactive and might try to acquire the dquotlocks.
* So, just unlock here and do a vn_rele at the end.
*/
xfs_iunlock(ip, XFS_ILOCK_EXCL);
/*
* Add the (disk blocks and inode) resources occupied by this
* inode to its dquots. We do this adjustment in the incore dquot,
* and also copy the changes to its buffer.
* We don't care about putting these changes in a transaction
* envelope because if we crash in the middle of a 'quotacheck'
* we have to start from the beginning anyway.
* Once we're done, we'll log all the dquot bufs.
*
* The *QUOTA_ON checks below may look pretty racy, but quotachecks
* and quotaoffs don't race. (Quotachecks happen at mount time only).
*/
if (XFS_IS_UQUOTA_ON(mp)) {
ASSERT(udqp);
xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks);
xfs_qm_dqput(udqp);
}
if (XFS_IS_OQUOTA_ON(mp)) {
ASSERT(gdqp);
xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks);
xfs_qm_dqput(gdqp);
}
/*
* Now release the inode. This will send it to 'inactive', and
* possibly even free blocks.
*/
IRELE(ip);
/*
* Goto next inode.
*/
*res = BULKSTAT_RV_DIDONE;
return 0;
}
/*
* Walk thru all the filesystem inodes and construct a consistent view
* of the disk quota world. If the quotacheck fails, disable quotas.
*/
int
xfs_qm_quotacheck(
xfs_mount_t *mp)
{
int done, count, error;
xfs_ino_t lastino;
size_t structsz;
xfs_inode_t *uip, *gip;
uint flags;
count = INT_MAX;
structsz = 1;
lastino = 0;
flags = 0;
ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
/*
* There should be no cached dquots. The (simplistic) quotacheck
* algorithm doesn't like that.
*/
ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist));
cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
/*
* First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
* their counters to zero. We need a clean slate.
* We don't log our changes till later.
*/
uip = mp->m_quotainfo->qi_uquotaip;
if (uip) {
error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA);
if (error)
goto error_return;
flags |= XFS_UQUOTA_CHKD;
}
gip = mp->m_quotainfo->qi_gquotaip;
if (gip) {
error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
if (error)
goto error_return;
flags |= XFS_OQUOTA_CHKD;
}
do {
/*
* Iterate thru all the inodes in the file system,
* adjusting the corresponding dquot counters in core.
*/
error = xfs_bulkstat(mp, &lastino, &count,
xfs_qm_dqusage_adjust,
structsz, NULL, &done);
if (error)
break;
} while (!done);
/*
* We've made all the changes that we need to make incore.
* Flush them down to disk buffers if everything was updated
* successfully.
*/
if (!error)
error = xfs_qm_dqflush_all(mp, 0);
/*
* We can get this error if we couldn't do a dquot allocation inside
* xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
* dirty dquots that might be cached, we just want to get rid of them
* and turn quotaoff. The dquots won't be attached to any of the inodes
* at this point (because we intentionally didn't in dqget_noattach).
*/
if (error) {
xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
goto error_return;
}
/*
* We didn't log anything, because if we crashed, we'll have to
* start the quotacheck from scratch anyway. However, we must make
* sure that our dquot changes are secure before we put the
* quotacheck'd stamp on the superblock. So, here we do a synchronous
* flush.
*/
XFS_bflush(mp->m_ddev_targp);
/*
* If one type of quotas is off, then it will lose its
* quotachecked status, since we won't be doing accounting for
* that type anymore.
*/
mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
mp->m_qflags |= flags;
xfs_qm_dquot_list_print(mp);
error_return:
if (error) {
cmn_err(CE_WARN, "XFS quotacheck %s: Unsuccessful (Error %d): "
"Disabling quotas.",
mp->m_fsname, error);
/*
* We must turn off quotas.
*/
ASSERT(mp->m_quotainfo != NULL);
ASSERT(xfs_Gqm != NULL);
xfs_qm_destroy_quotainfo(mp);
if (xfs_mount_reset_sbqflags(mp)) {
cmn_err(CE_WARN, "XFS quotacheck %s: "
"Failed to reset quota flags.", mp->m_fsname);
}
} else {
cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
}
return (error);
}
/*
* This is called after the superblock has been read in and we're ready to
* iget the quota inodes.
*/
STATIC int
xfs_qm_init_quotainos(
xfs_mount_t *mp)
{
xfs_inode_t *uip, *gip;
int error;
__int64_t sbflags;
uint flags;
ASSERT(mp->m_quotainfo);
uip = gip = NULL;
sbflags = 0;
flags = 0;
/*
* Get the uquota and gquota inodes
*/
if (xfs_sb_version_hasquota(&mp->m_sb)) {
if (XFS_IS_UQUOTA_ON(mp) &&
mp->m_sb.sb_uquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_uquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
0, 0, &uip)))
return XFS_ERROR(error);
}
if (XFS_IS_OQUOTA_ON(mp) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_gquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
0, 0, &gip))) {
if (uip)
IRELE(uip);
return XFS_ERROR(error);
}
}
} else {
flags |= XFS_QMOPT_SBVERSION;
sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
}
/*
* Create the two inodes, if they don't exist already. The changes
* made above will get added to a transaction and logged in one of
* the qino_alloc calls below. If the device is readonly,
* temporarily switch to read-write to do this.
*/
if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
if ((error = xfs_qm_qino_alloc(mp, &uip,
sbflags | XFS_SB_UQUOTINO,
flags | XFS_QMOPT_UQUOTA)))
return XFS_ERROR(error);
flags &= ~XFS_QMOPT_SBVERSION;
}
if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
flags |= (XFS_IS_GQUOTA_ON(mp) ?
XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
error = xfs_qm_qino_alloc(mp, &gip,
sbflags | XFS_SB_GQUOTINO, flags);
if (error) {
if (uip)
IRELE(uip);
return XFS_ERROR(error);
}
}
mp->m_quotainfo->qi_uquotaip = uip;
mp->m_quotainfo->qi_gquotaip = gip;
return 0;
}
/*
* Just pop the least recently used dquot off the freelist and
* recycle it. The returned dquot is locked.
*/
STATIC xfs_dquot_t *
xfs_qm_dqreclaim_one(void)
{
xfs_dquot_t *dqpout;
xfs_dquot_t *dqp;
int restarts;
restarts = 0;
dqpout = NULL;
/* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
startagain:
mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
struct xfs_mount *mp = dqp->q_mount;
xfs_dqlock(dqp);
/*
* We are racing with dqlookup here. Naturally we don't
* want to reclaim a dquot that lookup wants. We release the
* freelist lock and start over, so that lookup will grab
* both the dquot and the freelistlock.
*/
if (dqp->dq_flags & XFS_DQ_WANT) {
ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
trace_xfs_dqreclaim_want(dqp);
xfs_dqunlock(dqp);
mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
return NULL;
XQM_STATS_INC(xqmstats.xs_qm_dqwants);
goto startagain;
}
/*
* If the dquot is inactive, we are assured that it is
* not on the mplist or the hashlist, and that makes our
* life easier.
*/
if (dqp->dq_flags & XFS_DQ_INACTIVE) {
ASSERT(mp == NULL);
ASSERT(! XFS_DQ_IS_DIRTY(dqp));
ASSERT(list_empty(&dqp->q_hashlist));
ASSERT(list_empty(&dqp->q_mplist));
list_del_init(&dqp->q_freelist);
xfs_Gqm->qm_dqfrlist_cnt--;
xfs_dqunlock(dqp);
dqpout = dqp;
XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
break;
}
ASSERT(dqp->q_hash);
ASSERT(!list_empty(&dqp->q_mplist));
/*
* Try to grab the flush lock. If this dquot is in the process of
* getting flushed to disk, we don't want to reclaim it.
*/
if (!xfs_dqflock_nowait(dqp)) {
xfs_dqunlock(dqp);
continue;
}
/*
* We have the flush lock so we know that this is not in the
* process of being flushed. So, if this is dirty, flush it
* DELWRI so that we don't get a freelist infested with
* dirty dquots.
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
trace_xfs_dqreclaim_dirty(dqp);
/*
* We flush it delayed write, so don't bother
* releasing the freelist lock.
*/
error = xfs_qm_dqflush(dqp, 0);
if (error) {
xfs_fs_cmn_err(CE_WARN, mp,
"xfs_qm_dqreclaim: dquot %p flush failed", dqp);
}
xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
continue;
}
/*
* We're trying to get the hashlock out of order. This races
* with dqlookup; so, we giveup and goto the next dquot if
* we couldn't get the hashlock. This way, we won't starve
* a dqlookup process that holds the hashlock that is
* waiting for the freelist lock.
*/
if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
restarts++;
goto dqfunlock;
}
/*
* This races with dquot allocation code as well as dqflush_all
* and reclaim code. So, if we failed to grab the mplist lock,
* giveup everything and start over.
*/
if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
restarts++;
mutex_unlock(&dqp->q_hash->qh_lock);
xfs_dqfunlock(dqp);
xfs_dqunlock(dqp);
mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
return NULL;
goto startagain;
}
ASSERT(dqp->q_nrefs == 0);
list_del_init(&dqp->q_mplist);
mp->m_quotainfo->qi_dquots--;
mp->m_quotainfo->qi_dqreclaims++;
list_del_init(&dqp->q_hashlist);
dqp->q_hash->qh_version++;
list_del_init(&dqp->q_freelist);
xfs_Gqm->qm_dqfrlist_cnt--;
dqpout = dqp;
mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
mutex_unlock(&dqp->q_hash->qh_lock);
dqfunlock:
xfs_dqfunlock(dqp);
xfs_dqunlock(dqp);
if (dqpout)
break;
if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
return NULL;
}
mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
return dqpout;
}
/*
* Traverse the freelist of dquots and attempt to reclaim a maximum of
* 'howmany' dquots. This operation races with dqlookup(), and attempts to
* favor the lookup function ...
*/
STATIC int
xfs_qm_shake_freelist(
int howmany)
{
int nreclaimed = 0;
xfs_dquot_t *dqp;
if (howmany <= 0)
return 0;
while (nreclaimed < howmany) {
dqp = xfs_qm_dqreclaim_one();
if (!dqp)
return nreclaimed;
xfs_qm_dqdestroy(dqp);
nreclaimed++;
}
return nreclaimed;
}
/*
* The kmem_shake interface is invoked when memory is running low.
*/
/* ARGSUSED */
STATIC int
xfs_qm_shake(
struct shrinker *shrink,
int nr_to_scan,
gfp_t gfp_mask)
{
int ndqused, nfree, n;
if (!kmem_shake_allow(gfp_mask))
return 0;
if (!xfs_Gqm)
return 0;
nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
/* incore dquots in all f/s's */
ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
ASSERT(ndqused >= 0);
if (nfree <= ndqused && nfree < ndquot)
return 0;
ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */
n = nfree - ndqused - ndquot; /* # over target */
return xfs_qm_shake_freelist(MAX(nfree, n));
}
/*------------------------------------------------------------------*/
/*
* Return a new incore dquot. Depending on the number of
* dquots in the system, we either allocate a new one on the kernel heap,
* or reclaim a free one.
* Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
* to reclaim an existing one from the freelist.
*/
boolean_t
xfs_qm_dqalloc_incore(
xfs_dquot_t **O_dqpp)
{
xfs_dquot_t *dqp;
/*
* Check against high water mark to see if we want to pop
* a nincompoop dquot off the freelist.
*/
if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) {
/*
* Try to recycle a dquot from the freelist.
*/
if ((dqp = xfs_qm_dqreclaim_one())) {
XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
/*
* Just zero the core here. The rest will get
* reinitialized by caller. XXX we shouldn't even
* do this zero ...
*/
memset(&dqp->q_core, 0, sizeof(dqp->q_core));
*O_dqpp = dqp;
return B_FALSE;
}
XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
}
/*
* Allocate a brand new dquot on the kernel heap and return it
* to the caller to initialize.
*/
ASSERT(xfs_Gqm->qm_dqzone != NULL);
*O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
atomic_inc(&xfs_Gqm->qm_totaldquots);
return B_TRUE;
}
/*
* Start a transaction and write the incore superblock changes to
* disk. flags parameter indicates which fields have changed.
*/
int
xfs_qm_write_sb_changes(
xfs_mount_t *mp,
__int64_t flags)
{
xfs_trans_t *tp;
int error;
#ifdef QUOTADEBUG
cmn_err(CE_NOTE, "Writing superblock quota changes :%s", mp->m_fsname);
#endif
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
if ((error = xfs_trans_reserve(tp, 0,
mp->m_sb.sb_sectsize + 128, 0,
0,
XFS_DEFAULT_LOG_COUNT))) {
xfs_trans_cancel(tp, 0);
return error;
}
xfs_mod_sb(tp, flags);
error = xfs_trans_commit(tp, 0);
return error;
}
/* --------------- utility functions for vnodeops ---------------- */
/*
* Given an inode, a uid and gid (from cred_t) make sure that we have
* allocated relevant dquot(s) on disk, and that we won't exceed inode
* quotas by creating this file.
* This also attaches dquot(s) to the given inode after locking it,
* and returns the dquots corresponding to the uid and/or gid.
*
* in : inode (unlocked)
* out : udquot, gdquot with references taken and unlocked
*/
int
xfs_qm_vop_dqalloc(
struct xfs_inode *ip,
uid_t uid,
gid_t gid,
prid_t prid,
uint flags,
struct xfs_dquot **O_udqpp,
struct xfs_dquot **O_gdqpp)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_dquot *uq, *gq;
int error;
uint lockflags;
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return 0;
lockflags = XFS_ILOCK_EXCL;
xfs_ilock(ip, lockflags);
if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
gid = ip->i_d.di_gid;
/*
* Attach the dquot(s) to this inode, doing a dquot allocation
* if necessary. The dquot(s) will not be locked.
*/
if (XFS_NOT_DQATTACHED(mp, ip)) {
error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
if (error) {
xfs_iunlock(ip, lockflags);
return error;
}
}
uq = gq = NULL;
if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
if (ip->i_d.di_uid != uid) {
/*
* What we need is the dquot that has this uid, and
* if we send the inode to dqget, the uid of the inode
* takes priority over what's sent in the uid argument.
* We must unlock inode here before calling dqget if
* we're not sending the inode, because otherwise
* we'll deadlock by doing trans_reserve while
* holding ilock.
*/
xfs_iunlock(ip, lockflags);
if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
XFS_DQ_USER,
XFS_QMOPT_DQALLOC |
XFS_QMOPT_DOWARN,
&uq))) {
ASSERT(error != ENOENT);
return error;
}
/*
* Get the ilock in the right order.
*/
xfs_dqunlock(uq);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
/*
* Take an extra reference, because we'll return
* this to caller
*/
ASSERT(ip->i_udquot);
uq = ip->i_udquot;
xfs_dqlock(uq);
XFS_DQHOLD(uq);
xfs_dqunlock(uq);
}
}
if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
if (ip->i_d.di_gid != gid) {
xfs_iunlock(ip, lockflags);
if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
XFS_DQ_GROUP,
XFS_QMOPT_DQALLOC |
XFS_QMOPT_DOWARN,
&gq))) {
if (uq)
xfs_qm_dqrele(uq);
ASSERT(error != ENOENT);
return error;
}
xfs_dqunlock(gq);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
ASSERT(ip->i_gdquot);
gq = ip->i_gdquot;
xfs_dqlock(gq);
XFS_DQHOLD(gq);
xfs_dqunlock(gq);
}
} else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
if (ip->i_d.di_projid != prid) {
xfs_iunlock(ip, lockflags);
if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
XFS_DQ_PROJ,
XFS_QMOPT_DQALLOC |
XFS_QMOPT_DOWARN,
&gq))) {
if (uq)
xfs_qm_dqrele(uq);
ASSERT(error != ENOENT);
return (error);
}
xfs_dqunlock(gq);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
ASSERT(ip->i_gdquot);
gq = ip->i_gdquot;
xfs_dqlock(gq);
XFS_DQHOLD(gq);
xfs_dqunlock(gq);
}
}
if (uq)
trace_xfs_dquot_dqalloc(ip);
xfs_iunlock(ip, lockflags);
if (O_udqpp)
*O_udqpp = uq;
else if (uq)
xfs_qm_dqrele(uq);
if (O_gdqpp)
*O_gdqpp = gq;
else if (gq)
xfs_qm_dqrele(gq);
return 0;
}
/*
* Actually transfer ownership, and do dquot modifications.
* These were already reserved.
*/
xfs_dquot_t *
xfs_qm_vop_chown(
xfs_trans_t *tp,
xfs_inode_t *ip,
xfs_dquot_t **IO_olddq,
xfs_dquot_t *newdq)
{
xfs_dquot_t *prevdq;
uint bfield = XFS_IS_REALTIME_INODE(ip) ?
XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
/* old dquot */
prevdq = *IO_olddq;
ASSERT(prevdq);
ASSERT(prevdq != newdq);
xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
/* the sparkling new dquot */
xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
/*
* Take an extra reference, because the inode
* is going to keep this dquot pointer even
* after the trans_commit.
*/
xfs_dqlock(newdq);
XFS_DQHOLD(newdq);
xfs_dqunlock(newdq);
*IO_olddq = newdq;
return prevdq;
}
/*
* Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
*/
int
xfs_qm_vop_chown_reserve(
xfs_trans_t *tp,
xfs_inode_t *ip,
xfs_dquot_t *udqp,
xfs_dquot_t *gdqp,
uint flags)
{
xfs_mount_t *mp = ip->i_mount;
uint delblks, blkflags, prjflags = 0;
xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
delblks = ip->i_delayed_blks;
delblksudq = delblksgdq = unresudq = unresgdq = NULL;
blkflags = XFS_IS_REALTIME_INODE(ip) ?
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
if (XFS_IS_UQUOTA_ON(mp) && udqp &&
ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
delblksudq = udqp;
/*
* If there are delayed allocation blocks, then we have to
* unreserve those from the old dquot, and add them to the
* new dquot.
*/
if (delblks) {
ASSERT(ip->i_udquot);
unresudq = ip->i_udquot;
}
}
if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id))
prjflags = XFS_QMOPT_ENOSPC;
if (prjflags ||
(XFS_IS_GQUOTA_ON(ip->i_mount) &&
ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
delblksgdq = gdqp;
if (delblks) {
ASSERT(ip->i_gdquot);
unresgdq = ip->i_gdquot;
}
}
}
if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
flags | blkflags | prjflags)))
return (error);
/*
* Do the delayed blks reservations/unreservations now. Since, these
* are done without the help of a transaction, if a reservation fails
* its previous reservations won't be automatically undone by trans
* code. So, we have to do it manually here.
*/
if (delblks) {
/*
* Do the reservations first. Unreservation can't fail.
*/
ASSERT(delblksudq || delblksgdq);
ASSERT(unresudq || unresgdq);
if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
flags | blkflags | prjflags)))
return (error);
xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
blkflags);
}
return (0);
}
int
xfs_qm_vop_rename_dqattach(
struct xfs_inode **i_tab)
{
struct xfs_mount *mp = i_tab[0]->i_mount;
int i;
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return 0;
for (i = 0; (i < 4 && i_tab[i]); i++) {
struct xfs_inode *ip = i_tab[i];
int error;
/*
* Watch out for duplicate entries in the table.
*/
if (i == 0 || ip != i_tab[i-1]) {
if (XFS_NOT_DQATTACHED(mp, ip)) {
error = xfs_qm_dqattach(ip, 0);
if (error)
return error;
}
}
}
return 0;
}
void
xfs_qm_vop_create_dqattach(
struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_dquot *udqp,
struct xfs_dquot *gdqp)
{
struct xfs_mount *mp = tp->t_mountp;
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
if (udqp) {
xfs_dqlock(udqp);
XFS_DQHOLD(udqp);
xfs_dqunlock(udqp);
ASSERT(ip->i_udquot == NULL);
ip->i_udquot = udqp;
ASSERT(XFS_IS_UQUOTA_ON(mp));
ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
}
if (gdqp) {
xfs_dqlock(gdqp);
XFS_DQHOLD(gdqp);
xfs_dqunlock(gdqp);
ASSERT(ip->i_gdquot == NULL);
ip->i_gdquot = gdqp;
ASSERT(XFS_IS_OQUOTA_ON(mp));
ASSERT((XFS_IS_GQUOTA_ON(mp) ?
ip->i_d.di_gid : ip->i_d.di_projid) ==
be32_to_cpu(gdqp->q_core.d_id));
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
}
| gpl-2.0 |
aborche/cx-919-radxa-rbox-linux-rockchip | fs/ubifs/sb.c | 1006 | 23411 | /*
* This file is part of UBIFS.
*
* Copyright (C) 2006-2008 Nokia Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Authors: Artem Bityutskiy (Битюцкий Артём)
* Adrian Hunter
*/
/*
* This file implements UBIFS superblock. The superblock is stored at the first
* LEB of the volume and is never changed by UBIFS. Only user-space tools may
* change it. The superblock node mostly contains geometry information.
*/
#include "ubifs.h"
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/math64.h>
/*
* Default journal size in logical eraseblocks as a percent of total
* flash size.
*/
#define DEFAULT_JNL_PERCENT 5
/* Default maximum journal size in bytes */
#define DEFAULT_MAX_JNL (32*1024*1024)
/* Default indexing tree fanout */
#define DEFAULT_FANOUT 8
/* Default number of data journal heads */
#define DEFAULT_JHEADS_CNT 1
/* Default positions of different LEBs in the main area */
#define DEFAULT_IDX_LEB 0
#define DEFAULT_DATA_LEB 1
#define DEFAULT_GC_LEB 2
/* Default number of LEB numbers in LPT's save table */
#define DEFAULT_LSAVE_CNT 256
/* Default reserved pool size as a percent of maximum free space */
#define DEFAULT_RP_PERCENT 5
/* The default maximum size of reserved pool in bytes */
#define DEFAULT_MAX_RP_SIZE (5*1024*1024)
/* Default time granularity in nanoseconds */
#define DEFAULT_TIME_GRAN 1000000000
/**
* create_default_filesystem - format empty UBI volume.
* @c: UBIFS file-system description object
*
* This function creates default empty file-system. Returns zero in case of
* success and a negative error code in case of failure.
*/
static int create_default_filesystem(struct ubifs_info *c)
{
struct ubifs_sb_node *sup;
struct ubifs_mst_node *mst;
struct ubifs_idx_node *idx;
struct ubifs_branch *br;
struct ubifs_ino_node *ino;
struct ubifs_cs_node *cs;
union ubifs_key key;
int err, tmp, jnl_lebs, log_lebs, max_buds, main_lebs, main_first;
int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0;
int min_leb_cnt = UBIFS_MIN_LEB_CNT;
long long tmp64, main_bytes;
__le64 tmp_le64;
/* Some functions called from here depend on the @c->key_len filed */
c->key_len = UBIFS_SK_LEN;
/*
* First of all, we have to calculate default file-system geometry -
* log size, journal size, etc.
*/
if (c->leb_cnt < 0x7FFFFFFF / DEFAULT_JNL_PERCENT)
/* We can first multiply then divide and have no overflow */
jnl_lebs = c->leb_cnt * DEFAULT_JNL_PERCENT / 100;
else
jnl_lebs = (c->leb_cnt / 100) * DEFAULT_JNL_PERCENT;
if (jnl_lebs < UBIFS_MIN_JNL_LEBS)
jnl_lebs = UBIFS_MIN_JNL_LEBS;
if (jnl_lebs * c->leb_size > DEFAULT_MAX_JNL)
jnl_lebs = DEFAULT_MAX_JNL / c->leb_size;
/*
* The log should be large enough to fit reference nodes for all bud
* LEBs. Because buds do not have to start from the beginning of LEBs
* (half of the LEB may contain committed data), the log should
* generally be larger, make it twice as large.
*/
tmp = 2 * (c->ref_node_alsz * jnl_lebs) + c->leb_size - 1;
log_lebs = tmp / c->leb_size;
/* Plus one LEB reserved for commit */
log_lebs += 1;
if (c->leb_cnt - min_leb_cnt > 8) {
/* And some extra space to allow writes while committing */
log_lebs += 1;
min_leb_cnt += 1;
}
max_buds = jnl_lebs - log_lebs;
if (max_buds < UBIFS_MIN_BUD_LEBS)
max_buds = UBIFS_MIN_BUD_LEBS;
/*
* Orphan nodes are stored in a separate area. One node can store a lot
* of orphan inode numbers, but when new orphan comes we just add a new
* orphan node. At some point the nodes are consolidated into one
* orphan node.
*/
orph_lebs = UBIFS_MIN_ORPH_LEBS;
#ifdef CONFIG_UBIFS_FS_DEBUG
if (c->leb_cnt - min_leb_cnt > 1)
/*
* For debugging purposes it is better to have at least 2
* orphan LEBs, because the orphan subsystem would need to do
* consolidations and would be stressed more.
*/
orph_lebs += 1;
#endif
main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS - log_lebs;
main_lebs -= orph_lebs;
lpt_first = UBIFS_LOG_LNUM + log_lebs;
c->lsave_cnt = DEFAULT_LSAVE_CNT;
c->max_leb_cnt = c->leb_cnt;
err = ubifs_create_dflt_lpt(c, &main_lebs, lpt_first, &lpt_lebs,
&big_lpt);
if (err)
return err;
dbg_gen("LEB Properties Tree created (LEBs %d-%d)", lpt_first,
lpt_first + lpt_lebs - 1);
main_first = c->leb_cnt - main_lebs;
/* Create default superblock */
tmp = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size);
sup = kzalloc(tmp, GFP_KERNEL);
if (!sup)
return -ENOMEM;
tmp64 = (long long)max_buds * c->leb_size;
if (big_lpt)
sup_flags |= UBIFS_FLG_BIGLPT;
sup->ch.node_type = UBIFS_SB_NODE;
sup->key_hash = UBIFS_KEY_HASH_R5;
sup->flags = cpu_to_le32(sup_flags);
sup->min_io_size = cpu_to_le32(c->min_io_size);
sup->leb_size = cpu_to_le32(c->leb_size);
sup->leb_cnt = cpu_to_le32(c->leb_cnt);
sup->max_leb_cnt = cpu_to_le32(c->max_leb_cnt);
sup->max_bud_bytes = cpu_to_le64(tmp64);
sup->log_lebs = cpu_to_le32(log_lebs);
sup->lpt_lebs = cpu_to_le32(lpt_lebs);
sup->orph_lebs = cpu_to_le32(orph_lebs);
sup->jhead_cnt = cpu_to_le32(DEFAULT_JHEADS_CNT);
sup->fanout = cpu_to_le32(DEFAULT_FANOUT);
sup->lsave_cnt = cpu_to_le32(c->lsave_cnt);
sup->fmt_version = cpu_to_le32(UBIFS_FORMAT_VERSION);
sup->time_gran = cpu_to_le32(DEFAULT_TIME_GRAN);
if (c->mount_opts.override_compr)
sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
else
sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);
generate_random_uuid(sup->uuid);
main_bytes = (long long)main_lebs * c->leb_size;
tmp64 = div_u64(main_bytes * DEFAULT_RP_PERCENT, 100);
if (tmp64 > DEFAULT_MAX_RP_SIZE)
tmp64 = DEFAULT_MAX_RP_SIZE;
sup->rp_size = cpu_to_le64(tmp64);
sup->ro_compat_version = cpu_to_le32(UBIFS_RO_COMPAT_VERSION);
err = ubifs_write_node(c, sup, UBIFS_SB_NODE_SZ, 0, 0, UBI_LONGTERM);
kfree(sup);
if (err)
return err;
dbg_gen("default superblock created at LEB 0:0");
/* Create default master node */
mst = kzalloc(c->mst_node_alsz, GFP_KERNEL);
if (!mst)
return -ENOMEM;
mst->ch.node_type = UBIFS_MST_NODE;
mst->log_lnum = cpu_to_le32(UBIFS_LOG_LNUM);
mst->highest_inum = cpu_to_le64(UBIFS_FIRST_INO);
mst->cmt_no = 0;
mst->root_lnum = cpu_to_le32(main_first + DEFAULT_IDX_LEB);
mst->root_offs = 0;
tmp = ubifs_idx_node_sz(c, 1);
mst->root_len = cpu_to_le32(tmp);
mst->gc_lnum = cpu_to_le32(main_first + DEFAULT_GC_LEB);
mst->ihead_lnum = cpu_to_le32(main_first + DEFAULT_IDX_LEB);
mst->ihead_offs = cpu_to_le32(ALIGN(tmp, c->min_io_size));
mst->index_size = cpu_to_le64(ALIGN(tmp, 8));
mst->lpt_lnum = cpu_to_le32(c->lpt_lnum);
mst->lpt_offs = cpu_to_le32(c->lpt_offs);
mst->nhead_lnum = cpu_to_le32(c->nhead_lnum);
mst->nhead_offs = cpu_to_le32(c->nhead_offs);
mst->ltab_lnum = cpu_to_le32(c->ltab_lnum);
mst->ltab_offs = cpu_to_le32(c->ltab_offs);
mst->lsave_lnum = cpu_to_le32(c->lsave_lnum);
mst->lsave_offs = cpu_to_le32(c->lsave_offs);
mst->lscan_lnum = cpu_to_le32(main_first);
mst->empty_lebs = cpu_to_le32(main_lebs - 2);
mst->idx_lebs = cpu_to_le32(1);
mst->leb_cnt = cpu_to_le32(c->leb_cnt);
/* Calculate lprops statistics */
tmp64 = main_bytes;
tmp64 -= ALIGN(ubifs_idx_node_sz(c, 1), c->min_io_size);
tmp64 -= ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size);
mst->total_free = cpu_to_le64(tmp64);
tmp64 = ALIGN(ubifs_idx_node_sz(c, 1), c->min_io_size);
ino_waste = ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size) -
UBIFS_INO_NODE_SZ;
tmp64 += ino_waste;
tmp64 -= ALIGN(ubifs_idx_node_sz(c, 1), 8);
mst->total_dirty = cpu_to_le64(tmp64);
/* The indexing LEB does not contribute to dark space */
tmp64 = (c->main_lebs - 1) * c->dark_wm;
mst->total_dark = cpu_to_le64(tmp64);
mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ);
err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0,
UBI_UNKNOWN);
if (err) {
kfree(mst);
return err;
}
err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1, 0,
UBI_UNKNOWN);
kfree(mst);
if (err)
return err;
dbg_gen("default master node created at LEB %d:0", UBIFS_MST_LNUM);
/* Create the root indexing node */
tmp = ubifs_idx_node_sz(c, 1);
idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL);
if (!idx)
return -ENOMEM;
c->key_fmt = UBIFS_SIMPLE_KEY_FMT;
c->key_hash = key_r5_hash;
idx->ch.node_type = UBIFS_IDX_NODE;
idx->child_cnt = cpu_to_le16(1);
ino_key_init(c, &key, UBIFS_ROOT_INO);
br = ubifs_idx_branch(c, idx, 0);
key_write_idx(c, &key, &br->key);
br->lnum = cpu_to_le32(main_first + DEFAULT_DATA_LEB);
br->len = cpu_to_le32(UBIFS_INO_NODE_SZ);
err = ubifs_write_node(c, idx, tmp, main_first + DEFAULT_IDX_LEB, 0,
UBI_UNKNOWN);
kfree(idx);
if (err)
return err;
dbg_gen("default root indexing node created LEB %d:0",
main_first + DEFAULT_IDX_LEB);
/* Create default root inode */
tmp = ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size);
ino = kzalloc(tmp, GFP_KERNEL);
if (!ino)
return -ENOMEM;
ino_key_init_flash(c, &ino->key, UBIFS_ROOT_INO);
ino->ch.node_type = UBIFS_INO_NODE;
ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
ino->nlink = cpu_to_le32(2);
tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec);
ino->atime_sec = tmp_le64;
ino->ctime_sec = tmp_le64;
ino->mtime_sec = tmp_le64;
ino->atime_nsec = 0;
ino->ctime_nsec = 0;
ino->mtime_nsec = 0;
ino->mode = cpu_to_le32(S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
ino->size = cpu_to_le64(UBIFS_INO_NODE_SZ);
/* Set compression enabled by default */
ino->flags = cpu_to_le32(UBIFS_COMPR_FL);
err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ,
main_first + DEFAULT_DATA_LEB, 0,
UBI_UNKNOWN);
kfree(ino);
if (err)
return err;
dbg_gen("root inode created at LEB %d:0",
main_first + DEFAULT_DATA_LEB);
/*
* The first node in the log has to be the commit start node. This is
* always the case during normal file-system operation. Write a fake
* commit start node to the log.
*/
tmp = ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size);
cs = kzalloc(tmp, GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->ch.node_type = UBIFS_CS_NODE;
err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM,
0, UBI_UNKNOWN);
kfree(cs);
ubifs_msg("default file-system created");
return 0;
}
/**
* validate_sb - validate superblock node.
* @c: UBIFS file-system description object
* @sup: superblock node
*
* This function validates superblock node @sup. Since most of data was read
* from the superblock and stored in @c, the function validates fields in @c
* instead. Returns zero in case of success and %-EINVAL in case of validation
* failure.
*/
static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
{
long long max_bytes;
int err = 1, min_leb_cnt;
if (!c->key_hash) {
err = 2;
goto failed;
}
if (sup->key_fmt != UBIFS_SIMPLE_KEY_FMT) {
err = 3;
goto failed;
}
if (le32_to_cpu(sup->min_io_size) != c->min_io_size) {
ubifs_err("min. I/O unit mismatch: %d in superblock, %d real",
le32_to_cpu(sup->min_io_size), c->min_io_size);
goto failed;
}
if (le32_to_cpu(sup->leb_size) != c->leb_size) {
ubifs_err("LEB size mismatch: %d in superblock, %d real",
le32_to_cpu(sup->leb_size), c->leb_size);
goto failed;
}
if (c->log_lebs < UBIFS_MIN_LOG_LEBS ||
c->lpt_lebs < UBIFS_MIN_LPT_LEBS ||
c->orph_lebs < UBIFS_MIN_ORPH_LEBS ||
c->main_lebs < UBIFS_MIN_MAIN_LEBS) {
err = 4;
goto failed;
}
/*
* Calculate minimum allowed amount of main area LEBs. This is very
* similar to %UBIFS_MIN_LEB_CNT, but we take into account real what we
* have just read from the superblock.
*/
min_leb_cnt = UBIFS_SB_LEBS + UBIFS_MST_LEBS + c->log_lebs;
min_leb_cnt += c->lpt_lebs + c->orph_lebs + c->jhead_cnt + 6;
if (c->leb_cnt < min_leb_cnt || c->leb_cnt > c->vi.size) {
ubifs_err("bad LEB count: %d in superblock, %d on UBI volume, "
"%d minimum required", c->leb_cnt, c->vi.size,
min_leb_cnt);
goto failed;
}
if (c->max_leb_cnt < c->leb_cnt) {
ubifs_err("max. LEB count %d less than LEB count %d",
c->max_leb_cnt, c->leb_cnt);
goto failed;
}
if (c->main_lebs < UBIFS_MIN_MAIN_LEBS) {
err = 7;
goto failed;
}
if (c->max_bud_bytes < (long long)c->leb_size * UBIFS_MIN_BUD_LEBS ||
c->max_bud_bytes > (long long)c->leb_size * c->main_lebs) {
err = 8;
goto failed;
}
if (c->jhead_cnt < NONDATA_JHEADS_CNT + 1 ||
c->jhead_cnt > NONDATA_JHEADS_CNT + UBIFS_MAX_JHEADS) {
err = 9;
goto failed;
}
if (c->fanout < UBIFS_MIN_FANOUT ||
ubifs_idx_node_sz(c, c->fanout) > c->leb_size) {
err = 10;
goto failed;
}
if (c->lsave_cnt < 0 || (c->lsave_cnt > DEFAULT_LSAVE_CNT &&
c->lsave_cnt > c->max_leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS -
c->log_lebs - c->lpt_lebs - c->orph_lebs)) {
err = 11;
goto failed;
}
if (UBIFS_SB_LEBS + UBIFS_MST_LEBS + c->log_lebs + c->lpt_lebs +
c->orph_lebs + c->main_lebs != c->leb_cnt) {
err = 12;
goto failed;
}
if (c->default_compr < 0 || c->default_compr >= UBIFS_COMPR_TYPES_CNT) {
err = 13;
goto failed;
}
max_bytes = c->main_lebs * (long long)c->leb_size;
if (c->rp_size < 0 || max_bytes < c->rp_size) {
err = 14;
goto failed;
}
if (le32_to_cpu(sup->time_gran) > 1000000000 ||
le32_to_cpu(sup->time_gran) < 1) {
err = 15;
goto failed;
}
return 0;
failed:
ubifs_err("bad superblock, error %d", err);
dbg_dump_node(c, sup);
return -EINVAL;
}
/**
* ubifs_read_sb_node - read superblock node.
* @c: UBIFS file-system description object
*
* This function returns a pointer to the superblock node or a negative error
* code. Note, the user of this function is responsible of kfree()'ing the
* returned superblock buffer.
*/
struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
{
struct ubifs_sb_node *sup;
int err;
sup = kmalloc(ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size), GFP_NOFS);
if (!sup)
return ERR_PTR(-ENOMEM);
err = ubifs_read_node(c, sup, UBIFS_SB_NODE, UBIFS_SB_NODE_SZ,
UBIFS_SB_LNUM, 0);
if (err) {
kfree(sup);
return ERR_PTR(err);
}
return sup;
}
/**
* ubifs_write_sb_node - write superblock node.
* @c: UBIFS file-system description object
* @sup: superblock node read with 'ubifs_read_sb_node()'
*
* This function returns %0 on success and a negative error code on failure.
*/
int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup)
{
int len = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size);
ubifs_prepare_node(c, sup, UBIFS_SB_NODE_SZ, 1);
return ubifs_leb_change(c, UBIFS_SB_LNUM, sup, len, UBI_LONGTERM);
}
/**
* ubifs_read_superblock - read superblock.
* @c: UBIFS file-system description object
*
* This function finds, reads and checks the superblock. If an empty UBI volume
* is being mounted, this function creates default superblock. Returns zero in
* case of success, and a negative error code in case of failure.
*/
int ubifs_read_superblock(struct ubifs_info *c)
{
int err, sup_flags;
struct ubifs_sb_node *sup;
if (c->empty) {
err = create_default_filesystem(c);
if (err)
return err;
}
sup = ubifs_read_sb_node(c);
if (IS_ERR(sup))
return PTR_ERR(sup);
c->fmt_version = le32_to_cpu(sup->fmt_version);
c->ro_compat_version = le32_to_cpu(sup->ro_compat_version);
/*
* The software supports all previous versions but not future versions,
* due to the unavailability of time-travelling equipment.
*/
if (c->fmt_version > UBIFS_FORMAT_VERSION) {
ubifs_assert(!c->ro_media || c->ro_mount);
if (!c->ro_mount ||
c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
ubifs_err("on-flash format version is w%d/r%d, but "
"software only supports up to version "
"w%d/r%d", c->fmt_version,
c->ro_compat_version, UBIFS_FORMAT_VERSION,
UBIFS_RO_COMPAT_VERSION);
if (c->ro_compat_version <= UBIFS_RO_COMPAT_VERSION) {
ubifs_msg("only R/O mounting is possible");
err = -EROFS;
} else
err = -EINVAL;
goto out;
}
/*
* The FS is mounted R/O, and the media format is
* R/O-compatible with the UBIFS implementation, so we can
* mount.
*/
c->rw_incompat = 1;
}
if (c->fmt_version < 3) {
ubifs_err("on-flash format version %d is not supported",
c->fmt_version);
err = -EINVAL;
goto out;
}
switch (sup->key_hash) {
case UBIFS_KEY_HASH_R5:
c->key_hash = key_r5_hash;
c->key_hash_type = UBIFS_KEY_HASH_R5;
break;
case UBIFS_KEY_HASH_TEST:
c->key_hash = key_test_hash;
c->key_hash_type = UBIFS_KEY_HASH_TEST;
break;
};
c->key_fmt = sup->key_fmt;
switch (c->key_fmt) {
case UBIFS_SIMPLE_KEY_FMT:
c->key_len = UBIFS_SK_LEN;
break;
default:
ubifs_err("unsupported key format");
err = -EINVAL;
goto out;
}
c->leb_cnt = le32_to_cpu(sup->leb_cnt);
c->max_leb_cnt = le32_to_cpu(sup->max_leb_cnt);
c->max_bud_bytes = le64_to_cpu(sup->max_bud_bytes);
c->log_lebs = le32_to_cpu(sup->log_lebs);
c->lpt_lebs = le32_to_cpu(sup->lpt_lebs);
c->orph_lebs = le32_to_cpu(sup->orph_lebs);
c->jhead_cnt = le32_to_cpu(sup->jhead_cnt) + NONDATA_JHEADS_CNT;
c->fanout = le32_to_cpu(sup->fanout);
c->lsave_cnt = le32_to_cpu(sup->lsave_cnt);
c->rp_size = le64_to_cpu(sup->rp_size);
c->rp_uid = le32_to_cpu(sup->rp_uid);
c->rp_gid = le32_to_cpu(sup->rp_gid);
sup_flags = le32_to_cpu(sup->flags);
if (!c->mount_opts.override_compr)
c->default_compr = le16_to_cpu(sup->default_compr);
c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran);
memcpy(&c->uuid, &sup->uuid, 16);
c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT);
c->space_fixup = !!(sup_flags & UBIFS_FLG_SPACE_FIXUP);
/* Automatically increase file system size to the maximum size */
c->old_leb_cnt = c->leb_cnt;
if (c->leb_cnt < c->vi.size && c->leb_cnt < c->max_leb_cnt) {
c->leb_cnt = min_t(int, c->max_leb_cnt, c->vi.size);
if (c->ro_mount)
dbg_mnt("Auto resizing (ro) from %d LEBs to %d LEBs",
c->old_leb_cnt, c->leb_cnt);
else {
dbg_mnt("Auto resizing (sb) from %d LEBs to %d LEBs",
c->old_leb_cnt, c->leb_cnt);
sup->leb_cnt = cpu_to_le32(c->leb_cnt);
err = ubifs_write_sb_node(c, sup);
if (err)
goto out;
c->old_leb_cnt = c->leb_cnt;
}
}
c->log_bytes = (long long)c->log_lebs * c->leb_size;
c->log_last = UBIFS_LOG_LNUM + c->log_lebs - 1;
c->lpt_first = UBIFS_LOG_LNUM + c->log_lebs;
c->lpt_last = c->lpt_first + c->lpt_lebs - 1;
c->orph_first = c->lpt_last + 1;
c->orph_last = c->orph_first + c->orph_lebs - 1;
c->main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS;
c->main_lebs -= c->log_lebs + c->lpt_lebs + c->orph_lebs;
c->main_first = c->leb_cnt - c->main_lebs;
err = validate_sb(c, sup);
out:
kfree(sup);
return err;
}
/**
* fixup_leb - fixup/unmap an LEB containing free space.
* @c: UBIFS file-system description object
* @lnum: the LEB number to fix up
* @len: number of used bytes in LEB (starting at offset 0)
*
* This function reads the contents of the given LEB number @lnum, then fixes
* it up, so that empty min. I/O units in the end of LEB are actually erased on
* flash (rather than being just all-0xff real data). If the LEB is completely
* empty, it is simply unmapped.
*/
static int fixup_leb(struct ubifs_info *c, int lnum, int len)
{
int err;
ubifs_assert(len >= 0);
ubifs_assert(len % c->min_io_size == 0);
ubifs_assert(len < c->leb_size);
if (len == 0) {
dbg_mnt("unmap empty LEB %d", lnum);
return ubi_leb_unmap(c->ubi, lnum);
}
dbg_mnt("fixup LEB %d, data len %d", lnum, len);
err = ubi_read(c->ubi, lnum, c->sbuf, 0, len);
if (err)
return err;
return ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN);
}
/**
* fixup_free_space - find & remap all LEBs containing free space.
* @c: UBIFS file-system description object
*
* This function walks through all LEBs in the filesystem and fiexes up those
* containing free/empty space.
*/
static int fixup_free_space(struct ubifs_info *c)
{
int lnum, err = 0;
struct ubifs_lprops *lprops;
ubifs_get_lprops(c);
/* Fixup LEBs in the master area */
for (lnum = UBIFS_MST_LNUM; lnum < UBIFS_LOG_LNUM; lnum++) {
err = fixup_leb(c, lnum, c->mst_offs + c->mst_node_alsz);
if (err)
goto out;
}
/* Unmap unused log LEBs */
lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
while (lnum != c->ltail_lnum) {
err = fixup_leb(c, lnum, 0);
if (err)
goto out;
lnum = ubifs_next_log_lnum(c, lnum);
}
/*
* Fixup the log head which contains the only a CS node at the
* beginning.
*/
err = fixup_leb(c, c->lhead_lnum,
ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
if (err)
goto out;
/* Fixup LEBs in the LPT area */
for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) {
int free = c->ltab[lnum - c->lpt_first].free;
if (free > 0) {
err = fixup_leb(c, lnum, c->leb_size - free);
if (err)
goto out;
}
}
/* Unmap LEBs in the orphans area */
for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
err = fixup_leb(c, lnum, 0);
if (err)
goto out;
}
/* Fixup LEBs in the main area */
for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
lprops = ubifs_lpt_lookup(c, lnum);
if (IS_ERR(lprops)) {
err = PTR_ERR(lprops);
goto out;
}
if (lprops->free > 0) {
err = fixup_leb(c, lnum, c->leb_size - lprops->free);
if (err)
goto out;
}
}
out:
ubifs_release_lprops(c);
return err;
}
/**
* ubifs_fixup_free_space - find & fix all LEBs with free space.
* @c: UBIFS file-system description object
*
* This function fixes up LEBs containing free space on first mount, if the
* appropriate flag was set when the FS was created. Each LEB with one or more
* empty min. I/O unit (i.e. free-space-count > 0) is re-written, to make sure
* the free space is actually erased. E.g., this is necessary for some NAND
* chips, since the free space may have been programmed like real "0xff" data
* (generating a non-0xff ECC), causing future writes to the not-really-erased
* NAND pages to behave badly. After the space is fixed up, the superblock flag
* is cleared, so that this is skipped for all future mounts.
*/
int ubifs_fixup_free_space(struct ubifs_info *c)
{
int err;
struct ubifs_sb_node *sup;
ubifs_assert(c->space_fixup);
ubifs_assert(!c->ro_mount);
ubifs_msg("start fixing up free space");
err = fixup_free_space(c);
if (err)
return err;
sup = ubifs_read_sb_node(c);
if (IS_ERR(sup))
return PTR_ERR(sup);
/* Free-space fixup is no longer required */
c->space_fixup = 0;
sup->flags &= cpu_to_le32(~UBIFS_FLG_SPACE_FIXUP);
err = ubifs_write_sb_node(c, sup);
kfree(sup);
if (err)
return err;
ubifs_msg("free space fixup complete");
return err;
}
| gpl-2.0 |
s2plus/android_kernel_samsung_galaxys2plus | drivers/media/dvb/frontends/s5h1420.c | 2542 | 25938 | /*
* Driver for
* Samsung S5H1420 and
* PnpNetwork PN1010 QPSK Demodulator
*
* Copyright (C) 2005 Andrew de Quincey <adq_dvb@lidskialf.net>
* Copyright (C) 2005-8 Patrick Boettcher <pb@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <asm/div64.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
#include "s5h1420.h"
#include "s5h1420_priv.h"
#define TONE_FREQ 22000
struct s5h1420_state {
struct i2c_adapter* i2c;
const struct s5h1420_config* config;
struct dvb_frontend frontend;
struct i2c_adapter tuner_i2c_adapter;
u8 CON_1_val;
u8 postlocked:1;
u32 fclk;
u32 tunedfreq;
fe_code_rate_t fec_inner;
u32 symbol_rate;
/* FIXME: ugly workaround for flexcop's incapable i2c-controller
* it does not support repeated-start, workaround: write addr-1
* and then read
*/
u8 shadow[256];
};
static u32 s5h1420_getsymbolrate(struct s5h1420_state* state);
static int s5h1420_get_tune_settings(struct dvb_frontend* fe,
struct dvb_frontend_tune_settings* fesettings);
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debugging");
#define dprintk(x...) do { \
if (debug) \
printk(KERN_DEBUG "S5H1420: " x); \
} while (0)
static u8 s5h1420_readreg(struct s5h1420_state *state, u8 reg)
{
int ret;
u8 b[2];
struct i2c_msg msg[] = {
{ .addr = state->config->demod_address, .flags = 0, .buf = b, .len = 2 },
{ .addr = state->config->demod_address, .flags = 0, .buf = ®, .len = 1 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b, .len = 1 },
};
b[0] = (reg - 1) & 0xff;
b[1] = state->shadow[(reg - 1) & 0xff];
if (state->config->repeated_start_workaround) {
ret = i2c_transfer(state->i2c, msg, 3);
if (ret != 3)
return ret;
} else {
ret = i2c_transfer(state->i2c, &msg[1], 1);
if (ret != 1)
return ret;
ret = i2c_transfer(state->i2c, &msg[2], 1);
if (ret != 1)
return ret;
}
/* dprintk("rd(%02x): %02x %02x\n", state->config->demod_address, reg, b[0]); */
return b[0];
}
static int s5h1420_writereg (struct s5h1420_state* state, u8 reg, u8 data)
{
u8 buf[] = { reg, data };
struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 };
int err;
/* dprintk("wr(%02x): %02x %02x\n", state->config->demod_address, reg, data); */
err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
dprintk("%s: writereg error (err == %i, reg == 0x%02x, data == 0x%02x)\n", __func__, err, reg, data);
return -EREMOTEIO;
}
state->shadow[reg] = data;
return 0;
}
static int s5h1420_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltage)
{
struct s5h1420_state* state = fe->demodulator_priv;
dprintk("enter %s\n", __func__);
switch(voltage) {
case SEC_VOLTAGE_13:
s5h1420_writereg(state, 0x3c,
(s5h1420_readreg(state, 0x3c) & 0xfe) | 0x02);
break;
case SEC_VOLTAGE_18:
s5h1420_writereg(state, 0x3c, s5h1420_readreg(state, 0x3c) | 0x03);
break;
case SEC_VOLTAGE_OFF:
s5h1420_writereg(state, 0x3c, s5h1420_readreg(state, 0x3c) & 0xfd);
break;
}
dprintk("leave %s\n", __func__);
return 0;
}
static int s5h1420_set_tone (struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
{
struct s5h1420_state* state = fe->demodulator_priv;
dprintk("enter %s\n", __func__);
switch(tone) {
case SEC_TONE_ON:
s5h1420_writereg(state, 0x3b,
(s5h1420_readreg(state, 0x3b) & 0x74) | 0x08);
break;
case SEC_TONE_OFF:
s5h1420_writereg(state, 0x3b,
(s5h1420_readreg(state, 0x3b) & 0x74) | 0x01);
break;
}
dprintk("leave %s\n", __func__);
return 0;
}
static int s5h1420_send_master_cmd (struct dvb_frontend* fe,
struct dvb_diseqc_master_cmd* cmd)
{
struct s5h1420_state* state = fe->demodulator_priv;
u8 val;
int i;
unsigned long timeout;
int result = 0;
dprintk("enter %s\n", __func__);
if (cmd->msg_len > 8)
return -EINVAL;
/* setup for DISEQC */
val = s5h1420_readreg(state, 0x3b);
s5h1420_writereg(state, 0x3b, 0x02);
msleep(15);
/* write the DISEQC command bytes */
for(i=0; i< cmd->msg_len; i++) {
s5h1420_writereg(state, 0x3d + i, cmd->msg[i]);
}
/* kick off transmission */
s5h1420_writereg(state, 0x3b, s5h1420_readreg(state, 0x3b) |
((cmd->msg_len-1) << 4) | 0x08);
/* wait for transmission to complete */
timeout = jiffies + ((100*HZ) / 1000);
while(time_before(jiffies, timeout)) {
if (!(s5h1420_readreg(state, 0x3b) & 0x08))
break;
msleep(5);
}
if (time_after(jiffies, timeout))
result = -ETIMEDOUT;
/* restore original settings */
s5h1420_writereg(state, 0x3b, val);
msleep(15);
dprintk("leave %s\n", __func__);
return result;
}
static int s5h1420_recv_slave_reply (struct dvb_frontend* fe,
struct dvb_diseqc_slave_reply* reply)
{
struct s5h1420_state* state = fe->demodulator_priv;
u8 val;
int i;
int length;
unsigned long timeout;
int result = 0;
/* setup for DISEQC receive */
val = s5h1420_readreg(state, 0x3b);
s5h1420_writereg(state, 0x3b, 0x82); /* FIXME: guess - do we need to set DIS_RDY(0x08) in receive mode? */
msleep(15);
/* wait for reception to complete */
timeout = jiffies + ((reply->timeout*HZ) / 1000);
while(time_before(jiffies, timeout)) {
if (!(s5h1420_readreg(state, 0x3b) & 0x80)) /* FIXME: do we test DIS_RDY(0x08) or RCV_EN(0x80)? */
break;
msleep(5);
}
if (time_after(jiffies, timeout)) {
result = -ETIMEDOUT;
goto exit;
}
/* check error flag - FIXME: not sure what this does - docs do not describe
* beyond "error flag for diseqc receive data :( */
if (s5h1420_readreg(state, 0x49)) {
result = -EIO;
goto exit;
}
/* check length */
length = (s5h1420_readreg(state, 0x3b) & 0x70) >> 4;
if (length > sizeof(reply->msg)) {
result = -EOVERFLOW;
goto exit;
}
reply->msg_len = length;
/* extract data */
for(i=0; i< length; i++) {
reply->msg[i] = s5h1420_readreg(state, 0x3d + i);
}
exit:
/* restore original settings */
s5h1420_writereg(state, 0x3b, val);
msleep(15);
return result;
}
static int s5h1420_send_burst (struct dvb_frontend* fe, fe_sec_mini_cmd_t minicmd)
{
struct s5h1420_state* state = fe->demodulator_priv;
u8 val;
int result = 0;
unsigned long timeout;
/* setup for tone burst */
val = s5h1420_readreg(state, 0x3b);
s5h1420_writereg(state, 0x3b, (s5h1420_readreg(state, 0x3b) & 0x70) | 0x01);
/* set value for B position if requested */
if (minicmd == SEC_MINI_B) {
s5h1420_writereg(state, 0x3b, s5h1420_readreg(state, 0x3b) | 0x04);
}
msleep(15);
/* start transmission */
s5h1420_writereg(state, 0x3b, s5h1420_readreg(state, 0x3b) | 0x08);
/* wait for transmission to complete */
timeout = jiffies + ((100*HZ) / 1000);
while(time_before(jiffies, timeout)) {
if (!(s5h1420_readreg(state, 0x3b) & 0x08))
break;
msleep(5);
}
if (time_after(jiffies, timeout))
result = -ETIMEDOUT;
/* restore original settings */
s5h1420_writereg(state, 0x3b, val);
msleep(15);
return result;
}
static fe_status_t s5h1420_get_status_bits(struct s5h1420_state* state)
{
u8 val;
fe_status_t status = 0;
val = s5h1420_readreg(state, 0x14);
if (val & 0x02)
status |= FE_HAS_SIGNAL;
if (val & 0x01)
status |= FE_HAS_CARRIER;
val = s5h1420_readreg(state, 0x36);
if (val & 0x01)
status |= FE_HAS_VITERBI;
if (val & 0x20)
status |= FE_HAS_SYNC;
if (status == (FE_HAS_SIGNAL|FE_HAS_CARRIER|FE_HAS_VITERBI|FE_HAS_SYNC))
status |= FE_HAS_LOCK;
return status;
}
static int s5h1420_read_status(struct dvb_frontend* fe, fe_status_t* status)
{
struct s5h1420_state* state = fe->demodulator_priv;
u8 val;
dprintk("enter %s\n", __func__);
if (status == NULL)
return -EINVAL;
/* determine lock state */
*status = s5h1420_get_status_bits(state);
/* fix for FEC 5/6 inversion issue - if it doesn't quite lock, invert
the inversion, wait a bit and check again */
if (*status == (FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI)) {
val = s5h1420_readreg(state, Vit10);
if ((val & 0x07) == 0x03) {
if (val & 0x08)
s5h1420_writereg(state, Vit09, 0x13);
else
s5h1420_writereg(state, Vit09, 0x1b);
/* wait a bit then update lock status */
mdelay(200);
*status = s5h1420_get_status_bits(state);
}
}
/* perform post lock setup */
if ((*status & FE_HAS_LOCK) && !state->postlocked) {
/* calculate the data rate */
u32 tmp = s5h1420_getsymbolrate(state);
switch (s5h1420_readreg(state, Vit10) & 0x07) {
case 0: tmp = (tmp * 2 * 1) / 2; break;
case 1: tmp = (tmp * 2 * 2) / 3; break;
case 2: tmp = (tmp * 2 * 3) / 4; break;
case 3: tmp = (tmp * 2 * 5) / 6; break;
case 4: tmp = (tmp * 2 * 6) / 7; break;
case 5: tmp = (tmp * 2 * 7) / 8; break;
}
if (tmp == 0) {
printk(KERN_ERR "s5h1420: avoided division by 0\n");
tmp = 1;
}
tmp = state->fclk / tmp;
/* set the MPEG_CLK_INTL for the calculated data rate */
if (tmp < 2)
val = 0x00;
else if (tmp < 5)
val = 0x01;
else if (tmp < 9)
val = 0x02;
else if (tmp < 13)
val = 0x03;
else if (tmp < 17)
val = 0x04;
else if (tmp < 25)
val = 0x05;
else if (tmp < 33)
val = 0x06;
else
val = 0x07;
dprintk("for MPEG_CLK_INTL %d %x\n", tmp, val);
s5h1420_writereg(state, FEC01, 0x18);
s5h1420_writereg(state, FEC01, 0x10);
s5h1420_writereg(state, FEC01, val);
/* Enable "MPEG_Out" */
val = s5h1420_readreg(state, Mpeg02);
s5h1420_writereg(state, Mpeg02, val | (1 << 6));
/* kicker disable */
val = s5h1420_readreg(state, QPSK01) & 0x7f;
s5h1420_writereg(state, QPSK01, val);
/* DC freeze TODO it was never activated by default or it can stay activated */
if (s5h1420_getsymbolrate(state) >= 20000000) {
s5h1420_writereg(state, Loop04, 0x8a);
s5h1420_writereg(state, Loop05, 0x6a);
} else {
s5h1420_writereg(state, Loop04, 0x58);
s5h1420_writereg(state, Loop05, 0x27);
}
/* post-lock processing has been done! */
state->postlocked = 1;
}
dprintk("leave %s\n", __func__);
return 0;
}
static int s5h1420_read_ber(struct dvb_frontend* fe, u32* ber)
{
struct s5h1420_state* state = fe->demodulator_priv;
s5h1420_writereg(state, 0x46, 0x1d);
mdelay(25);
*ber = (s5h1420_readreg(state, 0x48) << 8) | s5h1420_readreg(state, 0x47);
return 0;
}
static int s5h1420_read_signal_strength(struct dvb_frontend* fe, u16* strength)
{
struct s5h1420_state* state = fe->demodulator_priv;
u8 val = s5h1420_readreg(state, 0x15);
*strength = (u16) ((val << 8) | val);
return 0;
}
static int s5h1420_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
{
struct s5h1420_state* state = fe->demodulator_priv;
s5h1420_writereg(state, 0x46, 0x1f);
mdelay(25);
*ucblocks = (s5h1420_readreg(state, 0x48) << 8) | s5h1420_readreg(state, 0x47);
return 0;
}
static void s5h1420_reset(struct s5h1420_state* state)
{
dprintk("%s\n", __func__);
s5h1420_writereg (state, 0x01, 0x08);
s5h1420_writereg (state, 0x01, 0x00);
udelay(10);
}
static void s5h1420_setsymbolrate(struct s5h1420_state* state,
struct dvb_frontend_parameters *p)
{
u8 v;
u64 val;
dprintk("enter %s\n", __func__);
val = ((u64) p->u.qpsk.symbol_rate / 1000ULL) * (1ULL<<24);
if (p->u.qpsk.symbol_rate < 29000000)
val *= 2;
do_div(val, (state->fclk / 1000));
dprintk("symbol rate register: %06llx\n", (unsigned long long)val);
v = s5h1420_readreg(state, Loop01);
s5h1420_writereg(state, Loop01, v & 0x7f);
s5h1420_writereg(state, Tnco01, val >> 16);
s5h1420_writereg(state, Tnco02, val >> 8);
s5h1420_writereg(state, Tnco03, val & 0xff);
s5h1420_writereg(state, Loop01, v | 0x80);
dprintk("leave %s\n", __func__);
}
static u32 s5h1420_getsymbolrate(struct s5h1420_state* state)
{
return state->symbol_rate;
}
static void s5h1420_setfreqoffset(struct s5h1420_state* state, int freqoffset)
{
int val;
u8 v;
dprintk("enter %s\n", __func__);
/* remember freqoffset is in kHz, but the chip wants the offset in Hz, so
* divide fclk by 1000000 to get the correct value. */
val = -(int) ((freqoffset * (1<<24)) / (state->fclk / 1000000));
dprintk("phase rotator/freqoffset: %d %06x\n", freqoffset, val);
v = s5h1420_readreg(state, Loop01);
s5h1420_writereg(state, Loop01, v & 0xbf);
s5h1420_writereg(state, Pnco01, val >> 16);
s5h1420_writereg(state, Pnco02, val >> 8);
s5h1420_writereg(state, Pnco03, val & 0xff);
s5h1420_writereg(state, Loop01, v | 0x40);
dprintk("leave %s\n", __func__);
}
static int s5h1420_getfreqoffset(struct s5h1420_state* state)
{
int val;
s5h1420_writereg(state, 0x06, s5h1420_readreg(state, 0x06) | 0x08);
val = s5h1420_readreg(state, 0x0e) << 16;
val |= s5h1420_readreg(state, 0x0f) << 8;
val |= s5h1420_readreg(state, 0x10);
s5h1420_writereg(state, 0x06, s5h1420_readreg(state, 0x06) & 0xf7);
if (val & 0x800000)
val |= 0xff000000;
/* remember freqoffset is in kHz, but the chip wants the offset in Hz, so
* divide fclk by 1000000 to get the correct value. */
val = (((-val) * (state->fclk/1000000)) / (1<<24));
return val;
}
static void s5h1420_setfec_inversion(struct s5h1420_state* state,
struct dvb_frontend_parameters *p)
{
u8 inversion = 0;
u8 vit08, vit09;
dprintk("enter %s\n", __func__);
if (p->inversion == INVERSION_OFF)
inversion = state->config->invert ? 0x08 : 0;
else if (p->inversion == INVERSION_ON)
inversion = state->config->invert ? 0 : 0x08;
if ((p->u.qpsk.fec_inner == FEC_AUTO) || (p->inversion == INVERSION_AUTO)) {
vit08 = 0x3f;
vit09 = 0;
} else {
switch(p->u.qpsk.fec_inner) {
case FEC_1_2:
vit08 = 0x01; vit09 = 0x10;
break;
case FEC_2_3:
vit08 = 0x02; vit09 = 0x11;
break;
case FEC_3_4:
vit08 = 0x04; vit09 = 0x12;
break;
case FEC_5_6:
vit08 = 0x08; vit09 = 0x13;
break;
case FEC_6_7:
vit08 = 0x10; vit09 = 0x14;
break;
case FEC_7_8:
vit08 = 0x20; vit09 = 0x15;
break;
default:
return;
}
}
vit09 |= inversion;
dprintk("fec: %02x %02x\n", vit08, vit09);
s5h1420_writereg(state, Vit08, vit08);
s5h1420_writereg(state, Vit09, vit09);
dprintk("leave %s\n", __func__);
}
static fe_code_rate_t s5h1420_getfec(struct s5h1420_state* state)
{
switch(s5h1420_readreg(state, 0x32) & 0x07) {
case 0:
return FEC_1_2;
case 1:
return FEC_2_3;
case 2:
return FEC_3_4;
case 3:
return FEC_5_6;
case 4:
return FEC_6_7;
case 5:
return FEC_7_8;
}
return FEC_NONE;
}
static fe_spectral_inversion_t s5h1420_getinversion(struct s5h1420_state* state)
{
if (s5h1420_readreg(state, 0x32) & 0x08)
return INVERSION_ON;
return INVERSION_OFF;
}
static int s5h1420_set_frontend(struct dvb_frontend* fe,
struct dvb_frontend_parameters *p)
{
struct s5h1420_state* state = fe->demodulator_priv;
int frequency_delta;
struct dvb_frontend_tune_settings fesettings;
uint8_t clock_settting;
dprintk("enter %s\n", __func__);
/* check if we should do a fast-tune */
memcpy(&fesettings.parameters, p, sizeof(struct dvb_frontend_parameters));
s5h1420_get_tune_settings(fe, &fesettings);
frequency_delta = p->frequency - state->tunedfreq;
if ((frequency_delta > -fesettings.max_drift) &&
(frequency_delta < fesettings.max_drift) &&
(frequency_delta != 0) &&
(state->fec_inner == p->u.qpsk.fec_inner) &&
(state->symbol_rate == p->u.qpsk.symbol_rate)) {
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe, p);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
if (fe->ops.tuner_ops.get_frequency) {
u32 tmp;
fe->ops.tuner_ops.get_frequency(fe, &tmp);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
s5h1420_setfreqoffset(state, p->frequency - tmp);
} else {
s5h1420_setfreqoffset(state, 0);
}
dprintk("simple tune\n");
return 0;
}
dprintk("tuning demod\n");
/* first of all, software reset */
s5h1420_reset(state);
/* set s5h1420 fclk PLL according to desired symbol rate */
if (p->u.qpsk.symbol_rate > 33000000)
state->fclk = 80000000;
else if (p->u.qpsk.symbol_rate > 28500000)
state->fclk = 59000000;
else if (p->u.qpsk.symbol_rate > 25000000)
state->fclk = 86000000;
else if (p->u.qpsk.symbol_rate > 1900000)
state->fclk = 88000000;
else
state->fclk = 44000000;
/* Clock */
switch (state->fclk) {
default:
case 88000000:
clock_settting = 80;
break;
case 86000000:
clock_settting = 78;
break;
case 80000000:
clock_settting = 72;
break;
case 59000000:
clock_settting = 51;
break;
case 44000000:
clock_settting = 36;
break;
}
dprintk("pll01: %d, ToneFreq: %d\n", state->fclk/1000000 - 8, (state->fclk + (TONE_FREQ * 32) - 1) / (TONE_FREQ * 32));
s5h1420_writereg(state, PLL01, state->fclk/1000000 - 8);
s5h1420_writereg(state, PLL02, 0x40);
s5h1420_writereg(state, DiS01, (state->fclk + (TONE_FREQ * 32) - 1) / (TONE_FREQ * 32));
/* TODO DC offset removal, config parameter ? */
if (p->u.qpsk.symbol_rate > 29000000)
s5h1420_writereg(state, QPSK01, 0xae | 0x10);
else
s5h1420_writereg(state, QPSK01, 0xac | 0x10);
/* set misc registers */
s5h1420_writereg(state, CON_1, 0x00);
s5h1420_writereg(state, QPSK02, 0x00);
s5h1420_writereg(state, Pre01, 0xb0);
s5h1420_writereg(state, Loop01, 0xF0);
s5h1420_writereg(state, Loop02, 0x2a); /* e7 for s5h1420 */
s5h1420_writereg(state, Loop03, 0x79); /* 78 for s5h1420 */
if (p->u.qpsk.symbol_rate > 20000000)
s5h1420_writereg(state, Loop04, 0x79);
else
s5h1420_writereg(state, Loop04, 0x58);
s5h1420_writereg(state, Loop05, 0x6b);
if (p->u.qpsk.symbol_rate >= 8000000)
s5h1420_writereg(state, Post01, (0 << 6) | 0x10);
else if (p->u.qpsk.symbol_rate >= 4000000)
s5h1420_writereg(state, Post01, (1 << 6) | 0x10);
else
s5h1420_writereg(state, Post01, (3 << 6) | 0x10);
s5h1420_writereg(state, Monitor12, 0x00); /* unfreeze DC compensation */
s5h1420_writereg(state, Sync01, 0x33);
s5h1420_writereg(state, Mpeg01, state->config->cdclk_polarity);
s5h1420_writereg(state, Mpeg02, 0x3d); /* Parallel output more, disabled -> enabled later */
s5h1420_writereg(state, Err01, 0x03); /* 0x1d for s5h1420 */
s5h1420_writereg(state, Vit06, 0x6e); /* 0x8e for s5h1420 */
s5h1420_writereg(state, DiS03, 0x00);
s5h1420_writereg(state, Rf01, 0x61); /* Tuner i2c address - for the gate controller */
/* set tuner PLL */
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe, p);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
s5h1420_setfreqoffset(state, 0);
}
/* set the reset of the parameters */
s5h1420_setsymbolrate(state, p);
s5h1420_setfec_inversion(state, p);
/* start QPSK */
s5h1420_writereg(state, QPSK01, s5h1420_readreg(state, QPSK01) | 1);
state->fec_inner = p->u.qpsk.fec_inner;
state->symbol_rate = p->u.qpsk.symbol_rate;
state->postlocked = 0;
state->tunedfreq = p->frequency;
dprintk("leave %s\n", __func__);
return 0;
}
static int s5h1420_get_frontend(struct dvb_frontend* fe,
struct dvb_frontend_parameters *p)
{
struct s5h1420_state* state = fe->demodulator_priv;
p->frequency = state->tunedfreq + s5h1420_getfreqoffset(state);
p->inversion = s5h1420_getinversion(state);
p->u.qpsk.symbol_rate = s5h1420_getsymbolrate(state);
p->u.qpsk.fec_inner = s5h1420_getfec(state);
return 0;
}
static int s5h1420_get_tune_settings(struct dvb_frontend* fe,
struct dvb_frontend_tune_settings* fesettings)
{
if (fesettings->parameters.u.qpsk.symbol_rate > 20000000) {
fesettings->min_delay_ms = 50;
fesettings->step_size = 2000;
fesettings->max_drift = 8000;
} else if (fesettings->parameters.u.qpsk.symbol_rate > 12000000) {
fesettings->min_delay_ms = 100;
fesettings->step_size = 1500;
fesettings->max_drift = 9000;
} else if (fesettings->parameters.u.qpsk.symbol_rate > 8000000) {
fesettings->min_delay_ms = 100;
fesettings->step_size = 1000;
fesettings->max_drift = 8000;
} else if (fesettings->parameters.u.qpsk.symbol_rate > 4000000) {
fesettings->min_delay_ms = 100;
fesettings->step_size = 500;
fesettings->max_drift = 7000;
} else if (fesettings->parameters.u.qpsk.symbol_rate > 2000000) {
fesettings->min_delay_ms = 200;
fesettings->step_size = (fesettings->parameters.u.qpsk.symbol_rate / 8000);
fesettings->max_drift = 14 * fesettings->step_size;
} else {
fesettings->min_delay_ms = 200;
fesettings->step_size = (fesettings->parameters.u.qpsk.symbol_rate / 8000);
fesettings->max_drift = 18 * fesettings->step_size;
}
return 0;
}
static int s5h1420_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
{
struct s5h1420_state* state = fe->demodulator_priv;
if (enable)
return s5h1420_writereg(state, 0x02, state->CON_1_val | 1);
else
return s5h1420_writereg(state, 0x02, state->CON_1_val & 0xfe);
}
static int s5h1420_init (struct dvb_frontend* fe)
{
struct s5h1420_state* state = fe->demodulator_priv;
/* disable power down and do reset */
state->CON_1_val = state->config->serial_mpeg << 4;
s5h1420_writereg(state, 0x02, state->CON_1_val);
msleep(10);
s5h1420_reset(state);
return 0;
}
static int s5h1420_sleep(struct dvb_frontend* fe)
{
struct s5h1420_state* state = fe->demodulator_priv;
state->CON_1_val = 0x12;
return s5h1420_writereg(state, 0x02, state->CON_1_val);
}
static void s5h1420_release(struct dvb_frontend* fe)
{
struct s5h1420_state* state = fe->demodulator_priv;
i2c_del_adapter(&state->tuner_i2c_adapter);
kfree(state);
}
static u32 s5h1420_tuner_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
{
struct s5h1420_state *state = i2c_get_adapdata(i2c_adap);
struct i2c_msg m[1 + num];
u8 tx_open[2] = { CON_1, state->CON_1_val | 1 }; /* repeater stops once there was a stop condition */
memset(m, 0, sizeof(struct i2c_msg) * (1 + num));
m[0].addr = state->config->demod_address;
m[0].buf = tx_open;
m[0].len = 2;
memcpy(&m[1], msg, sizeof(struct i2c_msg) * num);
return i2c_transfer(state->i2c, m, 1+num) == 1 + num ? num : -EIO;
}
static struct i2c_algorithm s5h1420_tuner_i2c_algo = {
.master_xfer = s5h1420_tuner_i2c_tuner_xfer,
.functionality = s5h1420_tuner_i2c_func,
};
struct i2c_adapter *s5h1420_get_tuner_i2c_adapter(struct dvb_frontend *fe)
{
struct s5h1420_state *state = fe->demodulator_priv;
return &state->tuner_i2c_adapter;
}
EXPORT_SYMBOL(s5h1420_get_tuner_i2c_adapter);
static struct dvb_frontend_ops s5h1420_ops;
struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
struct i2c_adapter *i2c)
{
/* allocate memory for the internal state */
struct s5h1420_state *state = kzalloc(sizeof(struct s5h1420_state), GFP_KERNEL);
u8 i;
if (state == NULL)
goto error;
/* setup the state */
state->config = config;
state->i2c = i2c;
state->postlocked = 0;
state->fclk = 88000000;
state->tunedfreq = 0;
state->fec_inner = FEC_NONE;
state->symbol_rate = 0;
/* check if the demod is there + identify it */
i = s5h1420_readreg(state, ID01);
if (i != 0x03)
goto error;
memset(state->shadow, 0xff, sizeof(state->shadow));
for (i = 0; i < 0x50; i++)
state->shadow[i] = s5h1420_readreg(state, i);
/* create dvb_frontend */
memcpy(&state->frontend.ops, &s5h1420_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
/* create tuner i2c adapter */
strlcpy(state->tuner_i2c_adapter.name, "S5H1420-PN1010 tuner I2C bus",
sizeof(state->tuner_i2c_adapter.name));
state->tuner_i2c_adapter.algo = &s5h1420_tuner_i2c_algo;
state->tuner_i2c_adapter.algo_data = NULL;
i2c_set_adapdata(&state->tuner_i2c_adapter, state);
if (i2c_add_adapter(&state->tuner_i2c_adapter) < 0) {
printk(KERN_ERR "S5H1420/PN1010: tuner i2c bus could not be initialized\n");
goto error;
}
return &state->frontend;
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(s5h1420_attach);
static struct dvb_frontend_ops s5h1420_ops = {
.info = {
.name = "Samsung S5H1420/PnpNetwork PN1010 DVB-S",
.type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125, /* kHz for QPSK frontends */
.frequency_tolerance = 29500,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
/* .symbol_rate_tolerance = ???,*/
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK
},
.release = s5h1420_release,
.init = s5h1420_init,
.sleep = s5h1420_sleep,
.i2c_gate_ctrl = s5h1420_i2c_gate_ctrl,
.set_frontend = s5h1420_set_frontend,
.get_frontend = s5h1420_get_frontend,
.get_tune_settings = s5h1420_get_tune_settings,
.read_status = s5h1420_read_status,
.read_ber = s5h1420_read_ber,
.read_signal_strength = s5h1420_read_signal_strength,
.read_ucblocks = s5h1420_read_ucblocks,
.diseqc_send_master_cmd = s5h1420_send_master_cmd,
.diseqc_recv_slave_reply = s5h1420_recv_slave_reply,
.diseqc_send_burst = s5h1420_send_burst,
.set_tone = s5h1420_set_tone,
.set_voltage = s5h1420_set_voltage,
};
MODULE_DESCRIPTION("Samsung S5H1420/PnpNetwork PN1010 DVB-S Demodulator driver");
MODULE_AUTHOR("Andrew de Quincey, Patrick Boettcher");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bmsitech/linux-imx6 | arch/sh/kernel/cpu/sh2a/clock-sh7264.c | 2798 | 3964 | /*
* arch/sh/kernel/cpu/sh2a/clock-sh7264.c
*
* SH7264 clock framework support
*
* Copyright (C) 2012 Phil Edworthy
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
/* SH7264 registers */
#define FRQCR 0xfffe0010
#define STBCR3 0xfffe0408
#define STBCR4 0xfffe040c
#define STBCR5 0xfffe0410
#define STBCR6 0xfffe0414
#define STBCR7 0xfffe0418
#define STBCR8 0xfffe041c
static const unsigned int pll1rate[] = {8, 12};
static unsigned int pll1_div;
/* Fixed 32 KHz root clock for RTC */
static struct clk r_clk = {
.rate = 32768,
};
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk extal_clk = {
.rate = 18000000,
};
static unsigned long pll_recalc(struct clk *clk)
{
unsigned long rate = clk->parent->rate / pll1_div;
return rate * pll1rate[(__raw_readw(FRQCR) >> 8) & 1];
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
};
struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&pll_clk,
};
static int div2[] = { 1, 2, 3, 4, 6, 8, 12 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = div2,
.nr_divisors = ARRAY_SIZE(div2),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_P,
DIV4_NR };
#define DIV4(_reg, _bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
/* The mask field specifies the div2 entries that are valid */
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4(FRQCR, 4, 0x7, CLK_ENABLE_REG_16BIT
| CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4(FRQCR, 0, 0x78, CLK_ENABLE_REG_16BIT),
};
enum { MSTP77, MSTP74, MSTP72,
MSTP60,
MSTP35, MSTP34, MSTP33, MSTP32, MSTP30,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
[MSTP77] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 7, 0), /* SCIF */
[MSTP74] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 4, 0), /* VDC */
[MSTP72] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 2, 0), /* CMT */
[MSTP60] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR6, 0, 0), /* USB */
[MSTP35] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 6, 0), /* MTU2 */
[MSTP34] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 4, 0), /* SDHI0 */
[MSTP33] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 3, 0), /* SDHI1 */
[MSTP32] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 2, 0), /* ADC */
[MSTP30] = SH_CLK_MSTP8(&r_clk, STBCR3, 0, 0), /* RTC */
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
/* MSTP clocks */
CLKDEV_CON_ID("sci_ick", &mstp_clks[MSTP77]),
CLKDEV_CON_ID("vdc3", &mstp_clks[MSTP74]),
CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP72]),
CLKDEV_CON_ID("usb0", &mstp_clks[MSTP60]),
CLKDEV_CON_ID("mtu2_fck", &mstp_clks[MSTP35]),
CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP34]),
CLKDEV_CON_ID("sdhi1", &mstp_clks[MSTP33]),
CLKDEV_CON_ID("adc0", &mstp_clks[MSTP32]),
CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP30]),
};
int __init arch_clk_init(void)
{
int k, ret = 0;
if (test_mode_pin(MODE_PIN0)) {
if (test_mode_pin(MODE_PIN1))
pll1_div = 3;
else
pll1_div = 4;
} else
pll1_div = 1;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| gpl-2.0 |
hamayun/linux-kvm | drivers/media/common/tuners/tda827x.c | 4078 | 26582 | /*
*
* (c) 2005 Hartmut Hackmann
* (c) 2007 Michael Krufky
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/types.h>
#include <linux/dvb/frontend.h>
#include <linux/videodev2.h>
#include "tda827x.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
#define dprintk(args...) \
do { \
if (debug) printk(KERN_DEBUG "tda827x: " args); \
} while (0)
struct tda827x_priv {
int i2c_addr;
struct i2c_adapter *i2c_adap;
struct tda827x_config *cfg;
unsigned int sgIF;
unsigned char lpsel;
u32 frequency;
u32 bandwidth;
};
static void tda827x_set_std(struct dvb_frontend *fe,
struct analog_parameters *params)
{
struct tda827x_priv *priv = fe->tuner_priv;
char *mode;
priv->lpsel = 0;
if (params->std & V4L2_STD_MN) {
priv->sgIF = 92;
priv->lpsel = 1;
mode = "MN";
} else if (params->std & V4L2_STD_B) {
priv->sgIF = 108;
mode = "B";
} else if (params->std & V4L2_STD_GH) {
priv->sgIF = 124;
mode = "GH";
} else if (params->std & V4L2_STD_PAL_I) {
priv->sgIF = 124;
mode = "I";
} else if (params->std & V4L2_STD_DK) {
priv->sgIF = 124;
mode = "DK";
} else if (params->std & V4L2_STD_SECAM_L) {
priv->sgIF = 124;
mode = "L";
} else if (params->std & V4L2_STD_SECAM_LC) {
priv->sgIF = 20;
mode = "LC";
} else {
priv->sgIF = 124;
mode = "xx";
}
if (params->mode == V4L2_TUNER_RADIO) {
priv->sgIF = 88; /* if frequency is 5.5 MHz */
dprintk("setting tda827x to radio FM\n");
} else
dprintk("setting tda827x to system %s\n", mode);
}
/* ------------------------------------------------------------------ */
struct tda827x_data {
u32 lomax;
u8 spd;
u8 bs;
u8 bp;
u8 cp;
u8 gc3;
u8 div1p5;
};
static const struct tda827x_data tda827x_table[] = {
{ .lomax = 62000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1},
{ .lomax = 66000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1},
{ .lomax = 76000000, .spd = 3, .bs = 1, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0},
{ .lomax = 84000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0},
{ .lomax = 93000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 98000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 109000000, .spd = 3, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 123000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1},
{ .lomax = 133000000, .spd = 2, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1},
{ .lomax = 151000000, .spd = 2, .bs = 1, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 154000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 181000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 0, .div1p5 = 0},
{ .lomax = 185000000, .spd = 2, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 217000000, .spd = 2, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 244000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1},
{ .lomax = 265000000, .spd = 1, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1},
{ .lomax = 302000000, .spd = 1, .bs = 1, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 324000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 370000000, .spd = 1, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 454000000, .spd = 1, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 493000000, .spd = 0, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1},
{ .lomax = 530000000, .spd = 0, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1},
{ .lomax = 554000000, .spd = 0, .bs = 1, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
{ .lomax = 604000000, .spd = 0, .bs = 1, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
{ .lomax = 696000000, .spd = 0, .bs = 2, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
{ .lomax = 740000000, .spd = 0, .bs = 2, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0},
{ .lomax = 820000000, .spd = 0, .bs = 3, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
{ .lomax = 865000000, .spd = 0, .bs = 3, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0},
{ .lomax = 0, .spd = 0, .bs = 0, .bp = 0, .cp = 0, .gc3 = 0, .div1p5 = 0}
};
static int tuner_transfer(struct dvb_frontend *fe,
struct i2c_msg *msg,
const int size)
{
int rc;
struct tda827x_priv *priv = fe->tuner_priv;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
rc = i2c_transfer(priv->i2c_adap, msg, size);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
if (rc >= 0 && rc != size)
return -EIO;
return rc;
}
static int tda827xo_set_params(struct dvb_frontend *fe,
struct dvb_frontend_parameters *params)
{
struct tda827x_priv *priv = fe->tuner_priv;
u8 buf[14];
int rc;
struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0,
.buf = buf, .len = sizeof(buf) };
int i, tuner_freq, if_freq;
u32 N;
dprintk("%s:\n", __func__);
switch (params->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
if_freq = 4000000;
break;
case BANDWIDTH_7_MHZ:
if_freq = 4500000;
break;
default: /* 8 MHz or Auto */
if_freq = 5000000;
break;
}
tuner_freq = params->frequency + if_freq;
i = 0;
while (tda827x_table[i].lomax < tuner_freq) {
if (tda827x_table[i + 1].lomax == 0)
break;
i++;
}
N = ((tuner_freq + 125000) / 250000) << (tda827x_table[i].spd + 2);
buf[0] = 0;
buf[1] = (N>>8) | 0x40;
buf[2] = N & 0xff;
buf[3] = 0;
buf[4] = 0x52;
buf[5] = (tda827x_table[i].spd << 6) + (tda827x_table[i].div1p5 << 5) +
(tda827x_table[i].bs << 3) +
tda827x_table[i].bp;
buf[6] = (tda827x_table[i].gc3 << 4) + 0x8f;
buf[7] = 0xbf;
buf[8] = 0x2a;
buf[9] = 0x05;
buf[10] = 0xff;
buf[11] = 0x00;
buf[12] = 0x00;
buf[13] = 0x40;
msg.len = 14;
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
msleep(500);
/* correct CP value */
buf[0] = 0x30;
buf[1] = 0x50 + tda827x_table[i].cp;
msg.len = 2;
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
priv->frequency = params->frequency;
priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
return 0;
err:
printk(KERN_ERR "%s: could not write to tuner at addr: 0x%02x\n",
__func__, priv->i2c_addr << 1);
return rc;
}
static int tda827xo_sleep(struct dvb_frontend *fe)
{
struct tda827x_priv *priv = fe->tuner_priv;
static u8 buf[] = { 0x30, 0xd0 };
struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0,
.buf = buf, .len = sizeof(buf) };
dprintk("%s:\n", __func__);
tuner_transfer(fe, &msg, 1);
if (priv->cfg && priv->cfg->sleep)
priv->cfg->sleep(fe);
return 0;
}
/* ------------------------------------------------------------------ */
static int tda827xo_set_analog_params(struct dvb_frontend *fe,
struct analog_parameters *params)
{
unsigned char tuner_reg[8];
unsigned char reg2[2];
u32 N;
int i;
struct tda827x_priv *priv = fe->tuner_priv;
struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0 };
unsigned int freq = params->frequency;
tda827x_set_std(fe, params);
if (params->mode == V4L2_TUNER_RADIO)
freq = freq / 1000;
N = freq + priv->sgIF;
i = 0;
while (tda827x_table[i].lomax < N * 62500) {
if (tda827x_table[i + 1].lomax == 0)
break;
i++;
}
N = N << tda827x_table[i].spd;
tuner_reg[0] = 0;
tuner_reg[1] = (unsigned char)(N>>8);
tuner_reg[2] = (unsigned char) N;
tuner_reg[3] = 0x40;
tuner_reg[4] = 0x52 + (priv->lpsel << 5);
tuner_reg[5] = (tda827x_table[i].spd << 6) +
(tda827x_table[i].div1p5 << 5) +
(tda827x_table[i].bs << 3) + tda827x_table[i].bp;
tuner_reg[6] = 0x8f + (tda827x_table[i].gc3 << 4);
tuner_reg[7] = 0x8f;
msg.buf = tuner_reg;
msg.len = 8;
tuner_transfer(fe, &msg, 1);
msg.buf = reg2;
msg.len = 2;
reg2[0] = 0x80;
reg2[1] = 0;
tuner_transfer(fe, &msg, 1);
reg2[0] = 0x60;
reg2[1] = 0xbf;
tuner_transfer(fe, &msg, 1);
reg2[0] = 0x30;
reg2[1] = tuner_reg[4] + 0x80;
tuner_transfer(fe, &msg, 1);
msleep(1);
reg2[0] = 0x30;
reg2[1] = tuner_reg[4] + 4;
tuner_transfer(fe, &msg, 1);
msleep(1);
reg2[0] = 0x30;
reg2[1] = tuner_reg[4];
tuner_transfer(fe, &msg, 1);
msleep(550);
reg2[0] = 0x30;
reg2[1] = (tuner_reg[4] & 0xfc) + tda827x_table[i].cp;
tuner_transfer(fe, &msg, 1);
reg2[0] = 0x60;
reg2[1] = 0x3f;
tuner_transfer(fe, &msg, 1);
reg2[0] = 0x80;
reg2[1] = 0x08; /* Vsync en */
tuner_transfer(fe, &msg, 1);
priv->frequency = params->frequency;
return 0;
}
static void tda827xo_agcf(struct dvb_frontend *fe)
{
struct tda827x_priv *priv = fe->tuner_priv;
unsigned char data[] = { 0x80, 0x0c };
struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0,
.buf = data, .len = 2};
tuner_transfer(fe, &msg, 1);
}
/* ------------------------------------------------------------------ */
struct tda827xa_data {
u32 lomax;
u8 svco;
u8 spd;
u8 scr;
u8 sbs;
u8 gc3;
};
static struct tda827xa_data tda827xa_dvbt[] = {
{ .lomax = 56875000, .svco = 3, .spd = 4, .scr = 0, .sbs = 0, .gc3 = 1},
{ .lomax = 67250000, .svco = 0, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1},
{ .lomax = 81250000, .svco = 1, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1},
{ .lomax = 97500000, .svco = 2, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1},
{ .lomax = 113750000, .svco = 3, .spd = 3, .scr = 0, .sbs = 1, .gc3 = 1},
{ .lomax = 134500000, .svco = 0, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
{ .lomax = 154000000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
{ .lomax = 162500000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
{ .lomax = 183000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
{ .lomax = 195000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 1},
{ .lomax = 227500000, .svco = 3, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 1},
{ .lomax = 269000000, .svco = 0, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 1},
{ .lomax = 290000000, .svco = 1, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 1},
{ .lomax = 325000000, .svco = 1, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1},
{ .lomax = 390000000, .svco = 2, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1},
{ .lomax = 455000000, .svco = 3, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1},
{ .lomax = 520000000, .svco = 0, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1},
{ .lomax = 538000000, .svco = 0, .spd = 0, .scr = 1, .sbs = 3, .gc3 = 1},
{ .lomax = 550000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1},
{ .lomax = 620000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
{ .lomax = 650000000, .svco = 1, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
{ .lomax = 700000000, .svco = 2, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
{ .lomax = 780000000, .svco = 2, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
{ .lomax = 820000000, .svco = 3, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
{ .lomax = 870000000, .svco = 3, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
{ .lomax = 911000000, .svco = 3, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 0},
{ .lomax = 0, .svco = 0, .spd = 0, .scr = 0, .sbs = 0, .gc3 = 0}
};
static struct tda827xa_data tda827xa_dvbc[] = {
{ .lomax = 50125000, .svco = 2, .spd = 4, .scr = 2, .sbs = 0, .gc3 = 3},
{ .lomax = 58500000, .svco = 3, .spd = 4, .scr = 2, .sbs = 0, .gc3 = 3},
{ .lomax = 69250000, .svco = 0, .spd = 3, .scr = 2, .sbs = 0, .gc3 = 3},
{ .lomax = 83625000, .svco = 1, .spd = 3, .scr = 2, .sbs = 0, .gc3 = 3},
{ .lomax = 97500000, .svco = 2, .spd = 3, .scr = 2, .sbs = 0, .gc3 = 3},
{ .lomax = 100250000, .svco = 2, .spd = 3, .scr = 2, .sbs = 1, .gc3 = 1},
{ .lomax = 117000000, .svco = 3, .spd = 3, .scr = 2, .sbs = 1, .gc3 = 1},
{ .lomax = 138500000, .svco = 0, .spd = 2, .scr = 2, .sbs = 1, .gc3 = 1},
{ .lomax = 167250000, .svco = 1, .spd = 2, .scr = 2, .sbs = 1, .gc3 = 1},
{ .lomax = 187000000, .svco = 2, .spd = 2, .scr = 2, .sbs = 1, .gc3 = 1},
{ .lomax = 200500000, .svco = 2, .spd = 2, .scr = 2, .sbs = 2, .gc3 = 1},
{ .lomax = 234000000, .svco = 3, .spd = 2, .scr = 2, .sbs = 2, .gc3 = 3},
{ .lomax = 277000000, .svco = 0, .spd = 1, .scr = 2, .sbs = 2, .gc3 = 3},
{ .lomax = 325000000, .svco = 1, .spd = 1, .scr = 2, .sbs = 2, .gc3 = 1},
{ .lomax = 334500000, .svco = 1, .spd = 1, .scr = 2, .sbs = 3, .gc3 = 3},
{ .lomax = 401000000, .svco = 2, .spd = 1, .scr = 2, .sbs = 3, .gc3 = 3},
{ .lomax = 468000000, .svco = 3, .spd = 1, .scr = 2, .sbs = 3, .gc3 = 1},
{ .lomax = 535000000, .svco = 0, .spd = 0, .scr = 1, .sbs = 3, .gc3 = 1},
{ .lomax = 554000000, .svco = 0, .spd = 0, .scr = 2, .sbs = 3, .gc3 = 1},
{ .lomax = 638000000, .svco = 1, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 1},
{ .lomax = 669000000, .svco = 1, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 1},
{ .lomax = 720000000, .svco = 2, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 1},
{ .lomax = 802000000, .svco = 2, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 1},
{ .lomax = 835000000, .svco = 3, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 1},
{ .lomax = 885000000, .svco = 3, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 1},
{ .lomax = 911000000, .svco = 3, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 1},
{ .lomax = 0, .svco = 0, .spd = 0, .scr = 0, .sbs = 0, .gc3 = 0}
};
static struct tda827xa_data tda827xa_analog[] = {
{ .lomax = 56875000, .svco = 3, .spd = 4, .scr = 0, .sbs = 0, .gc3 = 3},
{ .lomax = 67250000, .svco = 0, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 3},
{ .lomax = 81250000, .svco = 1, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 3},
{ .lomax = 97500000, .svco = 2, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 3},
{ .lomax = 113750000, .svco = 3, .spd = 3, .scr = 0, .sbs = 1, .gc3 = 1},
{ .lomax = 134500000, .svco = 0, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
{ .lomax = 154000000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
{ .lomax = 162500000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
{ .lomax = 183000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
{ .lomax = 195000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 1},
{ .lomax = 227500000, .svco = 3, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 3},
{ .lomax = 269000000, .svco = 0, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 3},
{ .lomax = 325000000, .svco = 1, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 1},
{ .lomax = 390000000, .svco = 2, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 3},
{ .lomax = 455000000, .svco = 3, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 3},
{ .lomax = 520000000, .svco = 0, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1},
{ .lomax = 538000000, .svco = 0, .spd = 0, .scr = 1, .sbs = 3, .gc3 = 1},
{ .lomax = 554000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1},
{ .lomax = 620000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
{ .lomax = 650000000, .svco = 1, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
{ .lomax = 700000000, .svco = 2, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
{ .lomax = 780000000, .svco = 2, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
{ .lomax = 820000000, .svco = 3, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
{ .lomax = 870000000, .svco = 3, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
{ .lomax = 911000000, .svco = 3, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 0},
{ .lomax = 0, .svco = 0, .spd = 0, .scr = 0, .sbs = 0, .gc3 = 0}
};
static int tda827xa_sleep(struct dvb_frontend *fe)
{
struct tda827x_priv *priv = fe->tuner_priv;
static u8 buf[] = { 0x30, 0x90 };
struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0,
.buf = buf, .len = sizeof(buf) };
dprintk("%s:\n", __func__);
tuner_transfer(fe, &msg, 1);
if (priv->cfg && priv->cfg->sleep)
priv->cfg->sleep(fe);
return 0;
}
static void tda827xa_lna_gain(struct dvb_frontend *fe, int high,
struct analog_parameters *params)
{
struct tda827x_priv *priv = fe->tuner_priv;
unsigned char buf[] = {0x22, 0x01};
int arg;
int gp_func;
struct i2c_msg msg = { .flags = 0, .buf = buf, .len = sizeof(buf) };
if (NULL == priv->cfg) {
dprintk("tda827x_config not defined, cannot set LNA gain!\n");
return;
}
msg.addr = priv->cfg->switch_addr;
if (priv->cfg->config) {
if (high)
dprintk("setting LNA to high gain\n");
else
dprintk("setting LNA to low gain\n");
}
switch (priv->cfg->config) {
case 0: /* no LNA */
break;
case 1: /* switch is GPIO 0 of tda8290 */
case 2:
if (params == NULL) {
gp_func = 0;
arg = 0;
} else {
/* turn Vsync on */
gp_func = 1;
if (params->std & V4L2_STD_MN)
arg = 1;
else
arg = 0;
}
if (fe->callback)
fe->callback(priv->i2c_adap->algo_data,
DVB_FRONTEND_COMPONENT_TUNER,
gp_func, arg);
buf[1] = high ? 0 : 1;
if (priv->cfg->config == 2)
buf[1] = high ? 1 : 0;
tuner_transfer(fe, &msg, 1);
break;
case 3: /* switch with GPIO of saa713x */
if (fe->callback)
fe->callback(priv->i2c_adap->algo_data,
DVB_FRONTEND_COMPONENT_TUNER, 0, high);
break;
}
}
static int tda827xa_set_params(struct dvb_frontend *fe,
struct dvb_frontend_parameters *params)
{
struct tda827x_priv *priv = fe->tuner_priv;
struct tda827xa_data *frequency_map = tda827xa_dvbt;
u8 buf[11];
struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0,
.buf = buf, .len = sizeof(buf) };
int i, tuner_freq, if_freq, rc;
u32 N;
dprintk("%s:\n", __func__);
tda827xa_lna_gain(fe, 1, NULL);
msleep(20);
switch (params->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
if_freq = 4000000;
break;
case BANDWIDTH_7_MHZ:
if_freq = 4500000;
break;
default: /* 8 MHz or Auto */
if_freq = 5000000;
break;
}
tuner_freq = params->frequency + if_freq;
if (fe->ops.info.type == FE_QAM) {
dprintk("%s select tda827xa_dvbc\n", __func__);
frequency_map = tda827xa_dvbc;
}
i = 0;
while (frequency_map[i].lomax < tuner_freq) {
if (frequency_map[i + 1].lomax == 0)
break;
i++;
}
N = ((tuner_freq + 31250) / 62500) << frequency_map[i].spd;
buf[0] = 0; // subaddress
buf[1] = N >> 8;
buf[2] = N & 0xff;
buf[3] = 0;
buf[4] = 0x16;
buf[5] = (frequency_map[i].spd << 5) + (frequency_map[i].svco << 3) +
frequency_map[i].sbs;
buf[6] = 0x4b + (frequency_map[i].gc3 << 4);
buf[7] = 0x1c;
buf[8] = 0x06;
buf[9] = 0x24;
buf[10] = 0x00;
msg.len = 11;
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
buf[0] = 0x90;
buf[1] = 0xff;
buf[2] = 0x60;
buf[3] = 0x00;
buf[4] = 0x59; // lpsel, for 6MHz + 2
msg.len = 5;
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
buf[0] = 0xa0;
buf[1] = 0x40;
msg.len = 2;
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
msleep(11);
msg.flags = I2C_M_RD;
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
msg.flags = 0;
buf[1] >>= 4;
dprintk("tda8275a AGC2 gain is: %d\n", buf[1]);
if ((buf[1]) < 2) {
tda827xa_lna_gain(fe, 0, NULL);
buf[0] = 0x60;
buf[1] = 0x0c;
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
}
buf[0] = 0xc0;
buf[1] = 0x99; // lpsel, for 6MHz + 2
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
buf[0] = 0x60;
buf[1] = 0x3c;
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
/* correct CP value */
buf[0] = 0x30;
buf[1] = 0x10 + frequency_map[i].scr;
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
msleep(163);
buf[0] = 0xc0;
buf[1] = 0x39; // lpsel, for 6MHz + 2
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
msleep(3);
/* freeze AGC1 */
buf[0] = 0x50;
buf[1] = 0x4f + (frequency_map[i].gc3 << 4);
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0)
goto err;
priv->frequency = params->frequency;
priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
return 0;
err:
printk(KERN_ERR "%s: could not write to tuner at addr: 0x%02x\n",
__func__, priv->i2c_addr << 1);
return rc;
}
static int tda827xa_set_analog_params(struct dvb_frontend *fe,
struct analog_parameters *params)
{
unsigned char tuner_reg[11];
u32 N;
int i;
struct tda827x_priv *priv = fe->tuner_priv;
struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0,
.buf = tuner_reg, .len = sizeof(tuner_reg) };
unsigned int freq = params->frequency;
tda827x_set_std(fe, params);
tda827xa_lna_gain(fe, 1, params);
msleep(10);
if (params->mode == V4L2_TUNER_RADIO)
freq = freq / 1000;
N = freq + priv->sgIF;
i = 0;
while (tda827xa_analog[i].lomax < N * 62500) {
if (tda827xa_analog[i + 1].lomax == 0)
break;
i++;
}
N = N << tda827xa_analog[i].spd;
tuner_reg[0] = 0;
tuner_reg[1] = (unsigned char)(N>>8);
tuner_reg[2] = (unsigned char) N;
tuner_reg[3] = 0;
tuner_reg[4] = 0x16;
tuner_reg[5] = (tda827xa_analog[i].spd << 5) +
(tda827xa_analog[i].svco << 3) +
tda827xa_analog[i].sbs;
tuner_reg[6] = 0x8b + (tda827xa_analog[i].gc3 << 4);
tuner_reg[7] = 0x1c;
tuner_reg[8] = 4;
tuner_reg[9] = 0x20;
tuner_reg[10] = 0x00;
msg.len = 11;
tuner_transfer(fe, &msg, 1);
tuner_reg[0] = 0x90;
tuner_reg[1] = 0xff;
tuner_reg[2] = 0xe0;
tuner_reg[3] = 0;
tuner_reg[4] = 0x99 + (priv->lpsel << 1);
msg.len = 5;
tuner_transfer(fe, &msg, 1);
tuner_reg[0] = 0xa0;
tuner_reg[1] = 0xc0;
msg.len = 2;
tuner_transfer(fe, &msg, 1);
tuner_reg[0] = 0x30;
tuner_reg[1] = 0x10 + tda827xa_analog[i].scr;
tuner_transfer(fe, &msg, 1);
msg.flags = I2C_M_RD;
tuner_transfer(fe, &msg, 1);
msg.flags = 0;
tuner_reg[1] >>= 4;
dprintk("AGC2 gain is: %d\n", tuner_reg[1]);
if (tuner_reg[1] < 1)
tda827xa_lna_gain(fe, 0, params);
msleep(100);
tuner_reg[0] = 0x60;
tuner_reg[1] = 0x3c;
tuner_transfer(fe, &msg, 1);
msleep(163);
tuner_reg[0] = 0x50;
tuner_reg[1] = 0x8f + (tda827xa_analog[i].gc3 << 4);
tuner_transfer(fe, &msg, 1);
tuner_reg[0] = 0x80;
tuner_reg[1] = 0x28;
tuner_transfer(fe, &msg, 1);
tuner_reg[0] = 0xb0;
tuner_reg[1] = 0x01;
tuner_transfer(fe, &msg, 1);
tuner_reg[0] = 0xc0;
tuner_reg[1] = 0x19 + (priv->lpsel << 1);
tuner_transfer(fe, &msg, 1);
priv->frequency = params->frequency;
return 0;
}
static void tda827xa_agcf(struct dvb_frontend *fe)
{
struct tda827x_priv *priv = fe->tuner_priv;
unsigned char data[] = {0x80, 0x2c};
struct i2c_msg msg = {.addr = priv->i2c_addr, .flags = 0,
.buf = data, .len = 2};
tuner_transfer(fe, &msg, 1);
}
/* ------------------------------------------------------------------ */
static int tda827x_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
return 0;
}
static int tda827x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct tda827x_priv *priv = fe->tuner_priv;
*frequency = priv->frequency;
return 0;
}
static int tda827x_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
{
struct tda827x_priv *priv = fe->tuner_priv;
*bandwidth = priv->bandwidth;
return 0;
}
static int tda827x_init(struct dvb_frontend *fe)
{
struct tda827x_priv *priv = fe->tuner_priv;
dprintk("%s:\n", __func__);
if (priv->cfg && priv->cfg->init)
priv->cfg->init(fe);
return 0;
}
static int tda827x_probe_version(struct dvb_frontend *fe);
static int tda827x_initial_init(struct dvb_frontend *fe)
{
int ret;
ret = tda827x_probe_version(fe);
if (ret)
return ret;
return fe->ops.tuner_ops.init(fe);
}
static int tda827x_initial_sleep(struct dvb_frontend *fe)
{
int ret;
ret = tda827x_probe_version(fe);
if (ret)
return ret;
return fe->ops.tuner_ops.sleep(fe);
}
static struct dvb_tuner_ops tda827xo_tuner_ops = {
.info = {
.name = "Philips TDA827X",
.frequency_min = 55000000,
.frequency_max = 860000000,
.frequency_step = 250000
},
.release = tda827x_release,
.init = tda827x_initial_init,
.sleep = tda827x_initial_sleep,
.set_params = tda827xo_set_params,
.set_analog_params = tda827xo_set_analog_params,
.get_frequency = tda827x_get_frequency,
.get_bandwidth = tda827x_get_bandwidth,
};
static struct dvb_tuner_ops tda827xa_tuner_ops = {
.info = {
.name = "Philips TDA827XA",
.frequency_min = 44000000,
.frequency_max = 906000000,
.frequency_step = 62500
},
.release = tda827x_release,
.init = tda827x_init,
.sleep = tda827xa_sleep,
.set_params = tda827xa_set_params,
.set_analog_params = tda827xa_set_analog_params,
.get_frequency = tda827x_get_frequency,
.get_bandwidth = tda827x_get_bandwidth,
};
static int tda827x_probe_version(struct dvb_frontend *fe)
{
u8 data;
int rc;
struct tda827x_priv *priv = fe->tuner_priv;
struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = I2C_M_RD,
.buf = &data, .len = 1 };
rc = tuner_transfer(fe, &msg, 1);
if (rc < 0) {
printk("%s: could not read from tuner at addr: 0x%02x\n",
__func__, msg.addr << 1);
return rc;
}
if ((data & 0x3c) == 0) {
dprintk("tda827x tuner found\n");
fe->ops.tuner_ops.init = tda827x_init;
fe->ops.tuner_ops.sleep = tda827xo_sleep;
if (priv->cfg)
priv->cfg->agcf = tda827xo_agcf;
} else {
dprintk("tda827xa tuner found\n");
memcpy(&fe->ops.tuner_ops, &tda827xa_tuner_ops, sizeof(struct dvb_tuner_ops));
if (priv->cfg)
priv->cfg->agcf = tda827xa_agcf;
}
return 0;
}
struct dvb_frontend *tda827x_attach(struct dvb_frontend *fe, int addr,
struct i2c_adapter *i2c,
struct tda827x_config *cfg)
{
struct tda827x_priv *priv = NULL;
dprintk("%s:\n", __func__);
priv = kzalloc(sizeof(struct tda827x_priv), GFP_KERNEL);
if (priv == NULL)
return NULL;
priv->i2c_addr = addr;
priv->i2c_adap = i2c;
priv->cfg = cfg;
memcpy(&fe->ops.tuner_ops, &tda827xo_tuner_ops, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = priv;
dprintk("type set to %s\n", fe->ops.tuner_ops.info.name);
return fe;
}
EXPORT_SYMBOL_GPL(tda827x_attach);
MODULE_DESCRIPTION("DVB TDA827x driver");
MODULE_AUTHOR("Hartmut Hackmann <hartmut.hackmann@t-online.de>");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
C-Aniruddh/vortex_condor | arch/powerpc/kernel/of_platform.c | 4590 | 3009 | /*
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
* and Arnd Bergmann, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#undef DEBUG
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/atomic.h>
#include <asm/errno.h>
#include <asm/topology.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
#ifdef CONFIG_PPC_OF_PLATFORM_PCI
/* The probing of PCI controllers from of_platform is currently
* 64 bits only, mostly due to gratuitous differences between
* the 32 and 64 bits PCI code on PowerPC and the 32 bits one
* lacking some bits needed here.
*/
static int __devinit of_pci_phb_probe(struct platform_device *dev)
{
struct pci_controller *phb;
/* Check if we can do that ... */
if (ppc_md.pci_setup_phb == NULL)
return -ENODEV;
pr_info("Setting up PCI bus %s\n", dev->dev.of_node->full_name);
/* Alloc and setup PHB data structure */
phb = pcibios_alloc_controller(dev->dev.of_node);
if (!phb)
return -ENODEV;
/* Setup parent in sysfs */
phb->parent = &dev->dev;
/* Setup the PHB using arch provided callback */
if (ppc_md.pci_setup_phb(phb)) {
pcibios_free_controller(phb);
return -ENODEV;
}
/* Process "ranges" property */
pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0);
/* Init pci_dn data structures */
pci_devs_phb_init_dynamic(phb);
/* Create EEH devices for the PHB */
eeh_dev_phb_init_dynamic(phb);
/* Register devices with EEH */
#ifdef CONFIG_EEH
if (dev->dev.of_node->child)
eeh_add_device_tree_early(dev->dev.of_node);
#endif /* CONFIG_EEH */
/* Scan the bus */
pcibios_scan_phb(phb);
if (phb->bus == NULL)
return -ENXIO;
/* Claim resources. This might need some rework as well depending
* wether we are doing probe-only or not, like assigning unassigned
* resources etc...
*/
pcibios_claim_one_bus(phb->bus);
/* Finish EEH setup */
#ifdef CONFIG_EEH
eeh_add_device_tree_late(phb->bus);
#endif
/* Add probed PCI devices to the device model */
pci_bus_add_devices(phb->bus);
return 0;
}
static struct of_device_id of_pci_phb_ids[] = {
{ .type = "pci", },
{ .type = "pcix", },
{ .type = "pcie", },
{ .type = "pciex", },
{ .type = "ht", },
{}
};
static struct platform_driver of_pci_phb_driver = {
.probe = of_pci_phb_probe,
.driver = {
.name = "of-pci",
.owner = THIS_MODULE,
.of_match_table = of_pci_phb_ids,
},
};
static __init int of_pci_phb_init(void)
{
return platform_driver_register(&of_pci_phb_driver);
}
device_initcall(of_pci_phb_init);
#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
| gpl-2.0 |
maxwen/android_kernel_oppo_n1 | arch/arm/mach-omap1/dma.c | 4846 | 8656 | /*
* OMAP1/OMAP7xx - specific DMA driver
*
* Copyright (C) 2003 - 2008 Nokia Corporation
* Author: Juha Yrjölä <juha.yrjola@nokia.com>
* DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
* Graphics DMA and LCD DMA graphics tranformations
* by Imre Deak <imre.deak@nokia.com>
* OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
* Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
* Converted DMA library into platform driver
* - G, Manjunath Kondaiah <manjugk@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/io.h>
#include <plat/dma.h>
#include <plat/tc.h>
#include <plat/irqs.h>
#define OMAP1_DMA_BASE (0xfffed800)
#define OMAP1_LOGICAL_DMA_CH_COUNT 17
#define OMAP1_DMA_STRIDE 0x40
static u32 errata;
static u32 enable_1510_mode;
static u8 dma_stride;
static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
static u16 reg_map[] = {
[GCR] = 0x400,
[GSCR] = 0x404,
[GRST1] = 0x408,
[HW_ID] = 0x442,
[PCH2_ID] = 0x444,
[PCH0_ID] = 0x446,
[PCH1_ID] = 0x448,
[PCHG_ID] = 0x44a,
[PCHD_ID] = 0x44c,
[CAPS_0] = 0x44e,
[CAPS_1] = 0x452,
[CAPS_2] = 0x456,
[CAPS_3] = 0x458,
[CAPS_4] = 0x45a,
[PCH2_SR] = 0x460,
[PCH0_SR] = 0x480,
[PCH1_SR] = 0x482,
[PCHD_SR] = 0x4c0,
/* Common Registers */
[CSDP] = 0x00,
[CCR] = 0x02,
[CICR] = 0x04,
[CSR] = 0x06,
[CEN] = 0x10,
[CFN] = 0x12,
[CSFI] = 0x14,
[CSEI] = 0x16,
[CPC] = 0x18, /* 15xx only */
[CSAC] = 0x18,
[CDAC] = 0x1a,
[CDEI] = 0x1c,
[CDFI] = 0x1e,
[CLNK_CTRL] = 0x28,
/* Channel specific register offsets */
[CSSA] = 0x08,
[CDSA] = 0x0c,
[COLOR] = 0x20,
[CCR2] = 0x24,
[LCH_CTRL] = 0x2a,
};
static struct resource res[] __initdata = {
[0] = {
.start = OMAP1_DMA_BASE,
.end = OMAP1_DMA_BASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "0",
.start = INT_DMA_CH0_6,
.flags = IORESOURCE_IRQ,
},
[2] = {
.name = "1",
.start = INT_DMA_CH1_7,
.flags = IORESOURCE_IRQ,
},
[3] = {
.name = "2",
.start = INT_DMA_CH2_8,
.flags = IORESOURCE_IRQ,
},
[4] = {
.name = "3",
.start = INT_DMA_CH3,
.flags = IORESOURCE_IRQ,
},
[5] = {
.name = "4",
.start = INT_DMA_CH4,
.flags = IORESOURCE_IRQ,
},
[6] = {
.name = "5",
.start = INT_DMA_CH5,
.flags = IORESOURCE_IRQ,
},
/* Handled in lcd_dma.c */
[7] = {
.name = "6",
.start = INT_1610_DMA_CH6,
.flags = IORESOURCE_IRQ,
},
/* irq's for omap16xx and omap7xx */
[8] = {
.name = "7",
.start = INT_1610_DMA_CH7,
.flags = IORESOURCE_IRQ,
},
[9] = {
.name = "8",
.start = INT_1610_DMA_CH8,
.flags = IORESOURCE_IRQ,
},
[10] = {
.name = "9",
.start = INT_1610_DMA_CH9,
.flags = IORESOURCE_IRQ,
},
[11] = {
.name = "10",
.start = INT_1610_DMA_CH10,
.flags = IORESOURCE_IRQ,
},
[12] = {
.name = "11",
.start = INT_1610_DMA_CH11,
.flags = IORESOURCE_IRQ,
},
[13] = {
.name = "12",
.start = INT_1610_DMA_CH12,
.flags = IORESOURCE_IRQ,
},
[14] = {
.name = "13",
.start = INT_1610_DMA_CH13,
.flags = IORESOURCE_IRQ,
},
[15] = {
.name = "14",
.start = INT_1610_DMA_CH14,
.flags = IORESOURCE_IRQ,
},
[16] = {
.name = "15",
.start = INT_1610_DMA_CH15,
.flags = IORESOURCE_IRQ,
},
[17] = {
.name = "16",
.start = INT_DMA_LCD,
.flags = IORESOURCE_IRQ,
},
};
static void __iomem *dma_base;
static inline void dma_write(u32 val, int reg, int lch)
{
u8 stride;
u32 offset;
stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
offset = reg_map[reg] + (stride * lch);
__raw_writew(val, dma_base + offset);
if ((reg > CLNK_CTRL && reg < CCEN) ||
(reg > PCHD_ID && reg < CAPS_2)) {
u32 offset2 = reg_map[reg] + 2 + (stride * lch);
__raw_writew(val >> 16, dma_base + offset2);
}
}
static inline u32 dma_read(int reg, int lch)
{
u8 stride;
u32 offset, val;
stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
offset = reg_map[reg] + (stride * lch);
val = __raw_readw(dma_base + offset);
if ((reg > CLNK_CTRL && reg < CCEN) ||
(reg > PCHD_ID && reg < CAPS_2)) {
u16 upper;
u32 offset2 = reg_map[reg] + 2 + (stride * lch);
upper = __raw_readw(dma_base + offset2);
val |= (upper << 16);
}
return val;
}
static void omap1_clear_lch_regs(int lch)
{
int i = dma_common_ch_start;
for (; i <= dma_common_ch_end; i += 1)
dma_write(0, i, lch);
}
static void omap1_clear_dma(int lch)
{
u32 l;
l = dma_read(CCR, lch);
l &= ~OMAP_DMA_CCR_EN;
dma_write(l, CCR, lch);
/* Clear pending interrupts */
l = dma_read(CSR, lch);
}
static void omap1_show_dma_caps(void)
{
if (enable_1510_mode) {
printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
} else {
u16 w;
printk(KERN_INFO "OMAP DMA hardware version %d\n",
dma_read(HW_ID, 0));
printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
dma_read(CAPS_0, 0), dma_read(CAPS_1, 0),
dma_read(CAPS_2, 0), dma_read(CAPS_3, 0),
dma_read(CAPS_4, 0));
/* Disable OMAP 3.0/3.1 compatibility mode. */
w = dma_read(GSCR, 0);
w |= 1 << 3;
dma_write(w, GSCR, 0);
}
return;
}
static u32 configure_dma_errata(void)
{
/*
* Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
* read before the DMA controller finished disabling the channel.
*/
if (!cpu_is_omap15xx())
SET_DMA_ERRATA(DMA_ERRATA_3_3);
return errata;
}
static int __init omap1_system_dma_init(void)
{
struct omap_system_dma_plat_info *p;
struct omap_dma_dev_attr *d;
struct platform_device *pdev;
int ret;
pdev = platform_device_alloc("omap_dma_system", 0);
if (!pdev) {
pr_err("%s: Unable to device alloc for dma\n",
__func__);
return -ENOMEM;
}
dma_base = ioremap(res[0].start, resource_size(&res[0]));
if (!dma_base) {
pr_err("%s: Unable to ioremap\n", __func__);
ret = -ENODEV;
goto exit_device_put;
}
ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
if (ret) {
dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
__func__, pdev->name, pdev->id);
goto exit_device_put;
}
p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
if (!p) {
dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n",
__func__, pdev->name);
ret = -ENOMEM;
goto exit_device_del;
}
d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
if (!d) {
dev_err(&pdev->dev, "%s: Unable to allocate 'd' for %s\n",
__func__, pdev->name);
ret = -ENOMEM;
goto exit_release_p;
}
d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
/* Valid attributes for omap1 plus processors */
if (cpu_is_omap15xx())
d->dev_caps = ENABLE_1510_MODE;
enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
d->dev_caps |= SRC_PORT;
d->dev_caps |= DST_PORT;
d->dev_caps |= SRC_INDEX;
d->dev_caps |= DST_INDEX;
d->dev_caps |= IS_BURST_ONLY4;
d->dev_caps |= CLEAR_CSR_ON_READ;
d->dev_caps |= IS_WORD_16;
d->chan = kzalloc(sizeof(struct omap_dma_lch) *
(d->lch_count), GFP_KERNEL);
if (!d->chan) {
dev_err(&pdev->dev, "%s: Memory allocation failed"
"for d->chan!!!\n", __func__);
goto exit_release_d;
}
if (cpu_is_omap15xx())
d->chan_count = 9;
else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
if (!(d->dev_caps & ENABLE_1510_MODE))
d->chan_count = 16;
else
d->chan_count = 9;
}
p->dma_attr = d;
p->show_dma_caps = omap1_show_dma_caps;
p->clear_lch_regs = omap1_clear_lch_regs;
p->clear_dma = omap1_clear_dma;
p->dma_write = dma_write;
p->dma_read = dma_read;
p->disable_irq_lch = NULL;
p->errata = configure_dma_errata();
ret = platform_device_add_data(pdev, p, sizeof(*p));
if (ret) {
dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
__func__, pdev->name, pdev->id);
goto exit_release_chan;
}
ret = platform_device_add(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
__func__, pdev->name, pdev->id);
goto exit_release_chan;
}
dma_stride = OMAP1_DMA_STRIDE;
dma_common_ch_start = CPC;
dma_common_ch_end = COLOR;
return ret;
exit_release_chan:
kfree(d->chan);
exit_release_d:
kfree(d);
exit_release_p:
kfree(p);
exit_device_del:
platform_device_del(pdev);
exit_device_put:
platform_device_put(pdev);
return ret;
}
arch_initcall(omap1_system_dma_init);
| gpl-2.0 |
ABIP/android_kernel_samsung_msm7x30-common | drivers/parisc/dino.c | 4846 | 31036 | /*
** DINO manager
**
** (c) Copyright 1999 Red Hat Software
** (c) Copyright 1999 SuSE GmbH
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
** (c) Copyright 2006 Helge Deller
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This module provides access to Dino PCI bus (config/IOport spaces)
** and helps manage Dino IRQ lines.
**
** Dino interrupt handling is a bit complicated.
** Dino always writes to the broadcast EIR via irr0 for now.
** (BIG WARNING: using broadcast EIR is a really bad thing for SMP!)
** Only one processor interrupt is used for the 11 IRQ line
** inputs to dino.
**
** The different between Built-in Dino and Card-Mode
** dino is in chip initialization and pci device initialization.
**
** Linux drivers can only use Card-Mode Dino if pci devices I/O port
** BARs are configured and used by the driver. Programming MMIO address
** requires substantial knowledge of available Host I/O address ranges
** is currently not supported. Port/Config accessor functions are the
** same. "BIOS" differences are handled within the existing routines.
*/
/* Changes :
** 2001-06-14 : Clement Moyroud (moyroudc@esiee.fr)
** - added support for the integrated RS232.
*/
/*
** TODO: create a virtual address for each Dino HPA.
** GSC code might be able to do this since IODC data tells us
** how many pages are used. PCI subsystem could (must?) do this
** for PCI drivers devices which implement/use MMIO registers.
*/
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h> /* for struct irqaction */
#include <linux/spinlock.h> /* for spinlock_t and prototypes */
#include <asm/pdc.h>
#include <asm/page.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include "gsc.h"
#undef DINO_DEBUG
#ifdef DINO_DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
/*
** Config accessor functions only pass in the 8-bit bus number
** and not the 8-bit "PCI Segment" number. Each Dino will be
** assigned a PCI bus number based on "when" it's discovered.
**
** The "secondary" bus number is set to this before calling
** pci_scan_bus(). If any PPB's are present, the scan will
** discover them and update the "secondary" and "subordinate"
** fields in Dino's pci_bus structure.
**
** Changes in the configuration *will* result in a different
** bus number for each dino.
*/
#define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA)
#define is_cujo(id) ((id)->hversion == 0x682)
#define DINO_IAR0 0x004
#define DINO_IODC_ADDR 0x008
#define DINO_IODC_DATA_0 0x008
#define DINO_IODC_DATA_1 0x008
#define DINO_IRR0 0x00C
#define DINO_IAR1 0x010
#define DINO_IRR1 0x014
#define DINO_IMR 0x018
#define DINO_IPR 0x01C
#define DINO_TOC_ADDR 0x020
#define DINO_ICR 0x024
#define DINO_ILR 0x028
#define DINO_IO_COMMAND 0x030
#define DINO_IO_STATUS 0x034
#define DINO_IO_CONTROL 0x038
#define DINO_IO_GSC_ERR_RESP 0x040
#define DINO_IO_ERR_INFO 0x044
#define DINO_IO_PCI_ERR_RESP 0x048
#define DINO_IO_FBB_EN 0x05c
#define DINO_IO_ADDR_EN 0x060
#define DINO_PCI_ADDR 0x064
#define DINO_CONFIG_DATA 0x068
#define DINO_IO_DATA 0x06c
#define DINO_MEM_DATA 0x070 /* Dino 3.x only */
#define DINO_GSC2X_CONFIG 0x7b4
#define DINO_GMASK 0x800
#define DINO_PAMR 0x804
#define DINO_PAPR 0x808
#define DINO_DAMODE 0x80c
#define DINO_PCICMD 0x810
#define DINO_PCISTS 0x814
#define DINO_MLTIM 0x81c
#define DINO_BRDG_FEAT 0x820
#define DINO_PCIROR 0x824
#define DINO_PCIWOR 0x828
#define DINO_TLTIM 0x830
#define DINO_IRQS 11 /* bits 0-10 are architected */
#define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */
#define DINO_LOCAL_IRQS (DINO_IRQS+1)
#define DINO_MASK_IRQ(x) (1<<(x))
#define PCIINTA 0x001
#define PCIINTB 0x002
#define PCIINTC 0x004
#define PCIINTD 0x008
#define PCIINTE 0x010
#define PCIINTF 0x020
#define GSCEXTINT 0x040
/* #define xxx 0x080 - bit 7 is "default" */
/* #define xxx 0x100 - bit 8 not used */
/* #define xxx 0x200 - bit 9 not used */
#define RS232INT 0x400
struct dino_device
{
struct pci_hba_data hba; /* 'C' inheritance - must be first */
spinlock_t dinosaur_pen;
unsigned long txn_addr; /* EIR addr to generate interrupt */
u32 txn_data; /* EIR data assign to each dino */
u32 imr; /* IRQ's which are enabled */
int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */
#ifdef DINO_DEBUG
unsigned int dino_irr0; /* save most recent IRQ line stat */
#endif
};
/* Looks nice and keeps the compiler happy */
#define DINO_DEV(d) ((struct dino_device *) d)
/*
* Dino Configuration Space Accessor Functions
*/
#define DINO_CFG_TOK(bus,dfn,pos) ((u32) ((bus)<<16 | (dfn)<<8 | (pos)))
/*
* keep the current highest bus count to assist in allocating busses. This
* tries to keep a global bus count total so that when we discover an
* entirely new bus, it can be given a unique bus number.
*/
static int dino_current_bus = 0;
static int dino_cfg_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge));
u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3);
void __iomem *base_addr = d->hba.base_addr;
unsigned long flags;
DBG("%s: %p, %d, %d, %d\n", __func__, base_addr, devfn, where,
size);
spin_lock_irqsave(&d->dinosaur_pen, flags);
/* tell HW which CFG address */
__raw_writel(v, base_addr + DINO_PCI_ADDR);
/* generate cfg read cycle */
if (size == 1) {
*val = readb(base_addr + DINO_CONFIG_DATA + (where & 3));
} else if (size == 2) {
*val = readw(base_addr + DINO_CONFIG_DATA + (where & 2));
} else if (size == 4) {
*val = readl(base_addr + DINO_CONFIG_DATA);
}
spin_unlock_irqrestore(&d->dinosaur_pen, flags);
return 0;
}
/*
* Dino address stepping "feature":
* When address stepping, Dino attempts to drive the bus one cycle too soon
* even though the type of cycle (config vs. MMIO) might be different.
* The read of Ven/Prod ID is harmless and avoids Dino's address stepping.
*/
static int dino_cfg_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge));
u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3);
void __iomem *base_addr = d->hba.base_addr;
unsigned long flags;
DBG("%s: %p, %d, %d, %d\n", __func__, base_addr, devfn, where,
size);
spin_lock_irqsave(&d->dinosaur_pen, flags);
/* avoid address stepping feature */
__raw_writel(v & 0xffffff00, base_addr + DINO_PCI_ADDR);
__raw_readl(base_addr + DINO_CONFIG_DATA);
/* tell HW which CFG address */
__raw_writel(v, base_addr + DINO_PCI_ADDR);
/* generate cfg read cycle */
if (size == 1) {
writeb(val, base_addr + DINO_CONFIG_DATA + (where & 3));
} else if (size == 2) {
writew(val, base_addr + DINO_CONFIG_DATA + (where & 2));
} else if (size == 4) {
writel(val, base_addr + DINO_CONFIG_DATA);
}
spin_unlock_irqrestore(&d->dinosaur_pen, flags);
return 0;
}
static struct pci_ops dino_cfg_ops = {
.read = dino_cfg_read,
.write = dino_cfg_write,
};
/*
* Dino "I/O Port" Space Accessor Functions
*
* Many PCI devices don't require use of I/O port space (eg Tulip,
* NCR720) since they export the same registers to both MMIO and
* I/O port space. Performance is going to stink if drivers use
* I/O port instead of MMIO.
*/
#define DINO_PORT_IN(type, size, mask) \
static u##size dino_in##size (struct pci_hba_data *d, u16 addr) \
{ \
u##size v; \
unsigned long flags; \
spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \
/* tell HW which IO Port address */ \
__raw_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \
/* generate I/O PORT read cycle */ \
v = read##type(d->base_addr+DINO_IO_DATA+(addr&mask)); \
spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \
return v; \
}
DINO_PORT_IN(b, 8, 3)
DINO_PORT_IN(w, 16, 2)
DINO_PORT_IN(l, 32, 0)
#define DINO_PORT_OUT(type, size, mask) \
static void dino_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
{ \
unsigned long flags; \
spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \
/* tell HW which IO port address */ \
__raw_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \
/* generate cfg write cycle */ \
write##type(val, d->base_addr+DINO_IO_DATA+(addr&mask)); \
spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \
}
DINO_PORT_OUT(b, 8, 3)
DINO_PORT_OUT(w, 16, 2)
DINO_PORT_OUT(l, 32, 0)
static struct pci_port_ops dino_port_ops = {
.inb = dino_in8,
.inw = dino_in16,
.inl = dino_in32,
.outb = dino_out8,
.outw = dino_out16,
.outl = dino_out32
};
static void dino_mask_irq(struct irq_data *d)
{
struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
/* Clear the matching bit in the IMR register */
dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
__raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
}
static void dino_unmask_irq(struct irq_data *d)
{
struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
/*
** clear pending IRQ bits
**
** This does NOT change ILR state!
** See comment below for ILR usage.
*/
__raw_readl(dino_dev->hba.base_addr+DINO_IPR);
/* set the matching bit in the IMR register */
dino_dev->imr |= DINO_MASK_IRQ(local_irq); /* used in dino_isr() */
__raw_writel( dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
/* Emulate "Level Triggered" Interrupt
** Basically, a driver is blowing it if the IRQ line is asserted
** while the IRQ is disabled. But tulip.c seems to do that....
** Give 'em a kluge award and a nice round of applause!
**
** The gsc_write will generate an interrupt which invokes dino_isr().
** dino_isr() will read IPR and find nothing. But then catch this
** when it also checks ILR.
*/
tmp = __raw_readl(dino_dev->hba.base_addr+DINO_ILR);
if (tmp & DINO_MASK_IRQ(local_irq)) {
DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n",
__func__, tmp);
gsc_writel(dino_dev->txn_data, dino_dev->txn_addr);
}
}
static struct irq_chip dino_interrupt_type = {
.name = "GSC-PCI",
.irq_unmask = dino_unmask_irq,
.irq_mask = dino_mask_irq,
};
/*
* Handle a Processor interrupt generated by Dino.
*
* ilr_loop counter is a kluge to prevent a "stuck" IRQ line from
* wedging the CPU. Could be removed or made optional at some point.
*/
static irqreturn_t dino_isr(int irq, void *intr_dev)
{
struct dino_device *dino_dev = intr_dev;
u32 mask;
int ilr_loop = 100;
/* read and acknowledge pending interrupts */
#ifdef DINO_DEBUG
dino_dev->dino_irr0 =
#endif
mask = __raw_readl(dino_dev->hba.base_addr+DINO_IRR0) & DINO_IRR_MASK;
if (mask == 0)
return IRQ_NONE;
ilr_again:
do {
int local_irq = __ffs(mask);
int irq = dino_dev->global_irq[local_irq];
DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n",
__func__, irq, intr_dev, mask);
generic_handle_irq(irq);
mask &= ~(1 << local_irq);
} while (mask);
/* Support for level triggered IRQ lines.
**
** Dropping this support would make this routine *much* faster.
** But since PCI requires level triggered IRQ line to share lines...
** device drivers may assume lines are level triggered (and not
** edge triggered like EISA/ISA can be).
*/
mask = __raw_readl(dino_dev->hba.base_addr+DINO_ILR) & dino_dev->imr;
if (mask) {
if (--ilr_loop > 0)
goto ilr_again;
printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n",
dino_dev->hba.base_addr, mask);
return IRQ_NONE;
}
return IRQ_HANDLED;
}
static void dino_assign_irq(struct dino_device *dino, int local_irq, int *irqp)
{
int irq = gsc_assign_irq(&dino_interrupt_type, dino);
if (irq == NO_IRQ)
return;
*irqp = irq;
dino->global_irq[local_irq] = irq;
}
static void dino_choose_irq(struct parisc_device *dev, void *ctrl)
{
int irq;
struct dino_device *dino = ctrl;
switch (dev->id.sversion) {
case 0x00084: irq = 8; break; /* PS/2 */
case 0x0008c: irq = 10; break; /* RS232 */
case 0x00096: irq = 8; break; /* PS/2 */
default: return; /* Unknown */
}
dino_assign_irq(dino, irq, &dev->irq);
}
/*
* Cirrus 6832 Cardbus reports wrong irq on RDI Tadpole PARISC Laptop (deller@gmx.de)
* (the irqs are off-by-one, not sure yet if this is a cirrus, dino-hardware or dino-driver problem...)
*/
static void __devinit quirk_cirrus_cardbus(struct pci_dev *dev)
{
u8 new_irq = dev->irq - 1;
printk(KERN_INFO "PCI: Cirrus Cardbus IRQ fixup for %s, from %d to %d\n",
pci_name(dev), dev->irq, new_irq);
dev->irq = new_irq;
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
static void __init
dino_bios_init(void)
{
DBG("dino_bios_init\n");
}
/*
* dino_card_setup - Set up the memory space for a Dino in card mode.
* @bus: the bus under this dino
*
* Claim an 8MB chunk of unused IO space and call the generic PCI routines
* to set up the addresses of the devices on this bus.
*/
#define _8MB 0x00800000UL
static void __init
dino_card_setup(struct pci_bus *bus, void __iomem *base_addr)
{
int i;
struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
struct resource *res;
char name[128];
int size;
res = &dino_dev->hba.lmmio_space;
res->flags = IORESOURCE_MEM;
size = scnprintf(name, sizeof(name), "Dino LMMIO (%s)",
dev_name(bus->bridge));
res->name = kmalloc(size+1, GFP_KERNEL);
if(res->name)
strcpy((char *)res->name, name);
else
res->name = dino_dev->hba.lmmio_space.name;
if (ccio_allocate_resource(dino_dev->hba.dev, res, _8MB,
F_EXTEND(0xf0000000UL) | _8MB,
F_EXTEND(0xffffffffUL) &~ _8MB, _8MB) < 0) {
struct list_head *ln, *tmp_ln;
printk(KERN_ERR "Dino: cannot attach bus %s\n",
dev_name(bus->bridge));
/* kill the bus, we can't do anything with it */
list_for_each_safe(ln, tmp_ln, &bus->devices) {
struct pci_dev *dev = pci_dev_b(ln);
list_del(&dev->bus_list);
}
return;
}
bus->resource[1] = res;
bus->resource[0] = &(dino_dev->hba.io_space);
/* Now tell dino what range it has */
for (i = 1; i < 31; i++) {
if (res->start == F_EXTEND(0xf0000000UL | (i * _8MB)))
break;
}
DBG("DINO GSC WRITE i=%d, start=%lx, dino addr = %p\n",
i, res->start, base_addr + DINO_IO_ADDR_EN);
__raw_writel(1 << i, base_addr + DINO_IO_ADDR_EN);
}
static void __init
dino_card_fixup(struct pci_dev *dev)
{
u32 irq_pin;
/*
** REVISIT: card-mode PCI-PCI expansion chassis do exist.
** Not sure they were ever productized.
** Die here since we'll die later in dino_inb() anyway.
*/
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
panic("Card-Mode Dino: PCI-PCI Bridge not supported\n");
}
/*
** Set Latency Timer to 0xff (not a shared bus)
** Set CACHELINE_SIZE.
*/
dino_cfg_write(dev->bus, dev->devfn,
PCI_CACHE_LINE_SIZE, 2, 0xff00 | L1_CACHE_BYTES/4);
/*
** Program INT_LINE for card-mode devices.
** The cards are hardwired according to this algorithm.
** And it doesn't matter if PPB's are present or not since
** the IRQ lines bypass the PPB.
**
** "-1" converts INTA-D (1-4) to PCIINTA-D (0-3) range.
** The additional "-1" adjusts for skewing the IRQ<->slot.
*/
dino_cfg_read(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 1, &irq_pin);
dev->irq = pci_swizzle_interrupt_pin(dev, irq_pin) - 1;
/* Shouldn't really need to do this but it's in case someone tries
** to bypass PCI services and look at the card themselves.
*/
dino_cfg_write(dev->bus, dev->devfn, PCI_INTERRUPT_LINE, 1, dev->irq);
}
/* The alignment contraints for PCI bridges under dino */
#define DINO_BRIDGE_ALIGN 0x100000
static void __init
dino_fixup_bus(struct pci_bus *bus)
{
struct list_head *ln;
struct pci_dev *dev;
struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
__func__, bus, bus->secondary,
bus->bridge->platform_data);
/* Firmware doesn't set up card-mode dino, so we have to */
if (is_card_dino(&dino_dev->hba.dev->id)) {
dino_card_setup(bus, dino_dev->hba.base_addr);
} else if (bus->parent) {
int i;
pci_read_bridge_bases(bus);
for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
if((bus->self->resource[i].flags &
(IORESOURCE_IO | IORESOURCE_MEM)) == 0)
continue;
if(bus->self->resource[i].flags & IORESOURCE_MEM) {
/* There's a quirk to alignment of
* bridge memory resources: the start
* is the alignment and start-end is
* the size. However, firmware will
* have assigned start and end, so we
* need to take this into account */
bus->self->resource[i].end = bus->self->resource[i].end - bus->self->resource[i].start + DINO_BRIDGE_ALIGN;
bus->self->resource[i].start = DINO_BRIDGE_ALIGN;
}
DBG("DEBUG %s assigning %d [0x%lx,0x%lx]\n",
dev_name(&bus->self->dev), i,
bus->self->resource[i].start,
bus->self->resource[i].end);
WARN_ON(pci_assign_resource(bus->self, i));
DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n",
dev_name(&bus->self->dev), i,
bus->self->resource[i].start,
bus->self->resource[i].end);
}
}
list_for_each(ln, &bus->devices) {
dev = pci_dev_b(ln);
if (is_card_dino(&dino_dev->hba.dev->id))
dino_card_fixup(dev);
/*
** P2PB's only have 2 BARs, no IRQs.
** I'd like to just ignore them for now.
*/
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
continue;
/* null out the ROM resource if there is one (we don't
* care about an expansion rom on parisc, since it
* usually contains (x86) bios code) */
dev->resource[PCI_ROM_RESOURCE].flags = 0;
if(dev->irq == 255) {
#define DINO_FIX_UNASSIGNED_INTERRUPTS
#ifdef DINO_FIX_UNASSIGNED_INTERRUPTS
/* This code tries to assign an unassigned
* interrupt. Leave it disabled unless you
* *really* know what you're doing since the
* pin<->interrupt line mapping varies by bus
* and machine */
u32 irq_pin;
dino_cfg_read(dev->bus, dev->devfn,
PCI_INTERRUPT_PIN, 1, &irq_pin);
irq_pin = pci_swizzle_interrupt_pin(dev, irq_pin) - 1;
printk(KERN_WARNING "Device %s has undefined IRQ, "
"setting to %d\n", pci_name(dev), irq_pin);
dino_cfg_write(dev->bus, dev->devfn,
PCI_INTERRUPT_LINE, 1, irq_pin);
dino_assign_irq(dino_dev, irq_pin, &dev->irq);
#else
dev->irq = 65535;
printk(KERN_WARNING "Device %s has unassigned IRQ\n", pci_name(dev));
#endif
} else {
/* Adjust INT_LINE for that busses region */
dino_assign_irq(dino_dev, dev->irq, &dev->irq);
}
}
}
static struct pci_bios_ops dino_bios_ops = {
.init = dino_bios_init,
.fixup_bus = dino_fixup_bus
};
/*
* Initialise a DINO controller chip
*/
static void __init
dino_card_init(struct dino_device *dino_dev)
{
u32 brdg_feat = 0x00784e05;
unsigned long status;
status = __raw_readl(dino_dev->hba.base_addr+DINO_IO_STATUS);
if (status & 0x0000ff80) {
__raw_writel(0x00000005,
dino_dev->hba.base_addr+DINO_IO_COMMAND);
udelay(1);
}
__raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_GMASK);
__raw_writel(0x00000001, dino_dev->hba.base_addr+DINO_IO_FBB_EN);
__raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_ICR);
#if 1
/* REVISIT - should be a runtime check (eg if (CPU_IS_PCX_L) ...) */
/*
** PCX-L processors don't support XQL like Dino wants it.
** PCX-L2 ignore XQL signal and it doesn't matter.
*/
brdg_feat &= ~0x4; /* UXQL */
#endif
__raw_writel( brdg_feat, dino_dev->hba.base_addr+DINO_BRDG_FEAT);
/*
** Don't enable address decoding until we know which I/O range
** currently is available from the host. Only affects MMIO
** and not I/O port space.
*/
__raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_IO_ADDR_EN);
__raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_DAMODE);
__raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIROR);
__raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIWOR);
__raw_writel(0x00000040, dino_dev->hba.base_addr+DINO_MLTIM);
__raw_writel(0x00000080, dino_dev->hba.base_addr+DINO_IO_CONTROL);
__raw_writel(0x0000008c, dino_dev->hba.base_addr+DINO_TLTIM);
/* Disable PAMR before writing PAPR */
__raw_writel(0x0000007e, dino_dev->hba.base_addr+DINO_PAMR);
__raw_writel(0x0000007f, dino_dev->hba.base_addr+DINO_PAPR);
__raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_PAMR);
/*
** Dino ERS encourages enabling FBB (0x6f).
** We can't until we know *all* devices below us can support it.
** (Something in device configuration header tells us).
*/
__raw_writel(0x0000004f, dino_dev->hba.base_addr+DINO_PCICMD);
/* Somewhere, the PCI spec says give devices 1 second
** to recover from the #RESET being de-asserted.
** Experience shows most devices only need 10ms.
** This short-cut speeds up booting significantly.
*/
mdelay(pci_post_reset_delay);
}
static int __init
dino_bridge_init(struct dino_device *dino_dev, const char *name)
{
unsigned long io_addr;
int result, i, count=0;
struct resource *res, *prevres = NULL;
/*
* Decoding IO_ADDR_EN only works for Built-in Dino
* since PDC has already initialized this.
*/
io_addr = __raw_readl(dino_dev->hba.base_addr + DINO_IO_ADDR_EN);
if (io_addr == 0) {
printk(KERN_WARNING "%s: No PCI devices enabled.\n", name);
return -ENODEV;
}
res = &dino_dev->hba.lmmio_space;
for (i = 0; i < 32; i++) {
unsigned long start, end;
if((io_addr & (1 << i)) == 0)
continue;
start = F_EXTEND(0xf0000000UL) | (i << 23);
end = start + 8 * 1024 * 1024 - 1;
DBG("DINO RANGE %d is at 0x%lx-0x%lx\n", count,
start, end);
if(prevres && prevres->end + 1 == start) {
prevres->end = end;
} else {
if(count >= DINO_MAX_LMMIO_RESOURCES) {
printk(KERN_ERR "%s is out of resource windows for range %d (0x%lx-0x%lx)\n", name, count, start, end);
break;
}
prevres = res;
res->start = start;
res->end = end;
res->flags = IORESOURCE_MEM;
res->name = kmalloc(64, GFP_KERNEL);
if(res->name)
snprintf((char *)res->name, 64, "%s LMMIO %d",
name, count);
res++;
count++;
}
}
res = &dino_dev->hba.lmmio_space;
for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) {
if(res[i].flags == 0)
break;
result = ccio_request_resource(dino_dev->hba.dev, &res[i]);
if (result < 0) {
printk(KERN_ERR "%s: failed to claim PCI Bus address "
"space %d (0x%lx-0x%lx)!\n", name, i,
(unsigned long)res[i].start, (unsigned long)res[i].end);
return result;
}
}
return 0;
}
static int __init dino_common_init(struct parisc_device *dev,
struct dino_device *dino_dev, const char *name)
{
int status;
u32 eim;
struct gsc_irq gsc_irq;
struct resource *res;
pcibios_register_hba(&dino_dev->hba);
pci_bios = &dino_bios_ops; /* used by pci_scan_bus() */
pci_port = &dino_port_ops;
/*
** Note: SMP systems can make use of IRR1/IAR1 registers
** But it won't buy much performance except in very
** specific applications/configurations. Note Dino
** still only has 11 IRQ input lines - just map some of them
** to a different processor.
*/
dev->irq = gsc_alloc_irq(&gsc_irq);
dino_dev->txn_addr = gsc_irq.txn_addr;
dino_dev->txn_data = gsc_irq.txn_data;
eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
/*
** Dino needs a PA "IRQ" to get a processor's attention.
** arch/parisc/kernel/irq.c returns an EIRR bit.
*/
if (dev->irq < 0) {
printk(KERN_WARNING "%s: gsc_alloc_irq() failed\n", name);
return 1;
}
status = request_irq(dev->irq, dino_isr, 0, name, dino_dev);
if (status) {
printk(KERN_WARNING "%s: request_irq() failed with %d\n",
name, status);
return 1;
}
/* Support the serial port which is sometimes attached on built-in
* Dino / Cujo chips.
*/
gsc_fixup_irqs(dev, dino_dev, dino_choose_irq);
/*
** This enables DINO to generate interrupts when it sees
** any of its inputs *change*. Just asserting an IRQ
** before it's enabled (ie unmasked) isn't good enough.
*/
__raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0);
/*
** Some platforms don't clear Dino's IRR0 register at boot time.
** Reading will clear it now.
*/
__raw_readl(dino_dev->hba.base_addr+DINO_IRR0);
/* allocate I/O Port resource region */
res = &dino_dev->hba.io_space;
if (!is_cujo(&dev->id)) {
res->name = "Dino I/O Port";
} else {
res->name = "Cujo I/O Port";
}
res->start = HBA_PORT_BASE(dino_dev->hba.hba_num);
res->end = res->start + (HBA_PORT_SPACE_SIZE - 1);
res->flags = IORESOURCE_IO; /* do not mark it busy ! */
if (request_resource(&ioport_resource, res) < 0) {
printk(KERN_ERR "%s: request I/O Port region failed "
"0x%lx/%lx (hpa 0x%p)\n",
name, (unsigned long)res->start, (unsigned long)res->end,
dino_dev->hba.base_addr);
return 1;
}
return 0;
}
#define CUJO_RAVEN_ADDR F_EXTEND(0xf1000000UL)
#define CUJO_FIREHAWK_ADDR F_EXTEND(0xf1604000UL)
#define CUJO_RAVEN_BADPAGE 0x01003000UL
#define CUJO_FIREHAWK_BADPAGE 0x01607000UL
static const char *dino_vers[] = {
"2.0",
"2.1",
"3.0",
"3.1"
};
static const char *cujo_vers[] = {
"1.0",
"2.0"
};
void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp);
/*
** Determine if dino should claim this chip (return 0) or not (return 1).
** If so, initialize the chip appropriately (card-mode vs bridge mode).
** Much of the initialization is common though.
*/
static int __init dino_probe(struct parisc_device *dev)
{
struct dino_device *dino_dev; // Dino specific control struct
const char *version = "unknown";
char *name;
int is_cujo = 0;
LIST_HEAD(resources);
struct pci_bus *bus;
unsigned long hpa = dev->hpa.start;
name = "Dino";
if (is_card_dino(&dev->id)) {
version = "3.x (card mode)";
} else {
if (!is_cujo(&dev->id)) {
if (dev->id.hversion_rev < 4) {
version = dino_vers[dev->id.hversion_rev];
}
} else {
name = "Cujo";
is_cujo = 1;
if (dev->id.hversion_rev < 2) {
version = cujo_vers[dev->id.hversion_rev];
}
}
}
printk("%s version %s found at 0x%lx\n", name, version, hpa);
if (!request_mem_region(hpa, PAGE_SIZE, name)) {
printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n",
hpa);
return 1;
}
/* Check for bugs */
if (is_cujo && dev->id.hversion_rev == 1) {
#ifdef CONFIG_IOMMU_CCIO
printk(KERN_WARNING "Enabling Cujo 2.0 bug workaround\n");
if (hpa == (unsigned long)CUJO_RAVEN_ADDR) {
ccio_cujo20_fixup(dev, CUJO_RAVEN_BADPAGE);
} else if (hpa == (unsigned long)CUJO_FIREHAWK_ADDR) {
ccio_cujo20_fixup(dev, CUJO_FIREHAWK_BADPAGE);
} else {
printk("Don't recognise Cujo at address 0x%lx, not enabling workaround\n", hpa);
}
#endif
} else if (!is_cujo && !is_card_dino(&dev->id) &&
dev->id.hversion_rev < 3) {
printk(KERN_WARNING
"The GSCtoPCI (Dino hrev %d) bus converter found may exhibit\n"
"data corruption. See Service Note Numbers: A4190A-01, A4191A-01.\n"
"Systems shipped after Aug 20, 1997 will not exhibit this problem.\n"
"Models affected: C180, C160, C160L, B160L, and B132L workstations.\n\n",
dev->id.hversion_rev);
/* REVISIT: why are C200/C240 listed in the README table but not
** "Models affected"? Could be an omission in the original literature.
*/
}
dino_dev = kzalloc(sizeof(struct dino_device), GFP_KERNEL);
if (!dino_dev) {
printk("dino_init_chip - couldn't alloc dino_device\n");
return 1;
}
dino_dev->hba.dev = dev;
dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
spin_lock_init(&dino_dev->dinosaur_pen);
dino_dev->hba.iommu = ccio_get_iommu(dev);
if (is_card_dino(&dev->id)) {
dino_card_init(dino_dev);
} else {
dino_bridge_init(dino_dev, name);
}
if (dino_common_init(dev, dino_dev, name))
return 1;
dev->dev.platform_data = dino_dev;
pci_add_resource_offset(&resources, &dino_dev->hba.io_space,
HBA_PORT_BASE(dino_dev->hba.hba_num));
if (dino_dev->hba.lmmio_space.flags)
pci_add_resource_offset(&resources, &dino_dev->hba.lmmio_space,
dino_dev->hba.lmmio_space_offset);
if (dino_dev->hba.elmmio_space.flags)
pci_add_resource_offset(&resources, &dino_dev->hba.elmmio_space,
dino_dev->hba.lmmio_space_offset);
if (dino_dev->hba.gmmio_space.flags)
pci_add_resource(&resources, &dino_dev->hba.gmmio_space);
/*
** It's not used to avoid chicken/egg problems
** with configuration accessor functions.
*/
dino_dev->hba.hba_bus = bus = pci_create_root_bus(&dev->dev,
dino_current_bus, &dino_cfg_ops, NULL, &resources);
if (!bus) {
printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (duplicate bus number %d?)\n",
dev_name(&dev->dev), dino_current_bus);
pci_free_resource_list(&resources);
/* increment the bus number in case of duplicates */
dino_current_bus++;
return 0;
}
bus->subordinate = pci_scan_child_bus(bus);
/* This code *depends* on scanning being single threaded
* if it isn't, this global bus number count will fail
*/
dino_current_bus = bus->subordinate + 1;
pci_bus_assign_resources(bus);
pci_bus_add_devices(bus);
return 0;
}
/*
* Normally, we would just test sversion. But the Elroy PCI adapter has
* the same sversion as Dino, so we have to check hversion as well.
* Unfortunately, the J2240 PDC reports the wrong hversion for the first
* Dino, so we have to test for Dino, Cujo and Dino-in-a-J2240.
* For card-mode Dino, most machines report an sversion of 9D. But 715
* and 725 firmware misreport it as 0x08080 for no adequately explained
* reason.
*/
static struct parisc_device_id dino_tbl[] = {
{ HPHW_A_DMA, HVERSION_REV_ANY_ID, 0x004, 0x0009D },/* Card-mode Dino */
{ HPHW_A_DMA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x08080 }, /* XXX */
{ HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x680, 0xa }, /* Bridge-mode Dino */
{ HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x682, 0xa }, /* Bridge-mode Cujo */
{ HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x05d, 0xa }, /* Dino in a J2240 */
{ 0, }
};
static struct parisc_driver dino_driver = {
.name = "dino",
.id_table = dino_tbl,
.probe = dino_probe,
};
/*
* One time initialization to let the world know Dino is here.
* This is the only routine which is NOT static.
* Must be called exactly once before pci_init().
*/
int __init dino_init(void)
{
register_parisc_driver(&dino_driver);
return 0;
}
| gpl-2.0 |
ztemt/z5s_mini_H113_kernel | arch/mips/pci/fixup-cobalt.c | 5358 | 5542 | /*
* Cobalt Qube/Raq PCI support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 2002, 2003 by Ralf Baechle
* Copyright (C) 2001, 2002, 2003 by Liam Davies (ldavies@agile.tv)
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/pci.h>
#include <asm/io.h>
#include <asm/gt64120.h>
#include <cobalt.h>
#include <irq.h>
/*
* PCI slot numbers
*/
#define COBALT_PCICONF_CPU 0x06
#define COBALT_PCICONF_ETH0 0x07
#define COBALT_PCICONF_RAQSCSI 0x08
#define COBALT_PCICONF_VIA 0x09
#define COBALT_PCICONF_PCISLOT 0x0A
#define COBALT_PCICONF_ETH1 0x0C
/*
* The Cobalt board ID information. The boards have an ID number wired
* into the VIA that is available in the high nibble of register 94.
*/
#define VIA_COBALT_BRD_ID_REG 0x94
#define VIA_COBALT_BRD_REG_to_ID(reg) ((unsigned char)(reg) >> 4)
static void qube_raq_galileo_early_fixup(struct pci_dev *dev)
{
if (dev->devfn == PCI_DEVFN(0, 0) &&
(dev->class >> 8) == PCI_CLASS_MEMORY_OTHER) {
dev->class = (PCI_CLASS_BRIDGE_HOST << 8) | (dev->class & 0xff);
printk(KERN_INFO "Galileo: fixed bridge class\n");
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_GT64111,
qube_raq_galileo_early_fixup);
static void qube_raq_via_bmIDE_fixup(struct pci_dev *dev)
{
unsigned short cfgword;
unsigned char lt;
/* Enable Bus Mastering and fast back to back. */
pci_read_config_word(dev, PCI_COMMAND, &cfgword);
cfgword |= (PCI_COMMAND_FAST_BACK | PCI_COMMAND_MASTER);
pci_write_config_word(dev, PCI_COMMAND, cfgword);
/* Enable both ide interfaces. ROM only enables primary one. */
pci_write_config_byte(dev, 0x40, 0xb);
/* Set latency timer to reasonable value. */
pci_read_config_byte(dev, PCI_LATENCY_TIMER, <);
if (lt < 64)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1,
qube_raq_via_bmIDE_fixup);
static void qube_raq_galileo_fixup(struct pci_dev *dev)
{
if (dev->devfn != PCI_DEVFN(0, 0))
return;
/* Fix PCI latency-timer and cache-line-size values in Galileo
* host bridge.
*/
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8);
/*
* The code described by the comment below has been removed
* as it causes bus mastering by the Ethernet controllers
* to break under any kind of network load. We always set
* the retry timeouts to their maximum.
*
* --x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--
*
* On all machines prior to Q2, we had the STOP line disconnected
* from Galileo to VIA on PCI. The new Galileo does not function
* correctly unless we have it connected.
*
* Therefore we must set the disconnect/retry cycle values to
* something sensible when using the new Galileo.
*/
printk(KERN_INFO "Galileo: revision %u\n", dev->revision);
#if 0
if (dev->revision >= 0x10) {
/* New Galileo, assumes PCI stop line to VIA is connected. */
GT_WRITE(GT_PCI0_TOR_OFS, 0x4020);
} else if (dev->revision == 0x1 || dev->revision == 0x2)
#endif
{
signed int timeo;
/* XXX WE MUST DO THIS ELSE GALILEO LOCKS UP! -DaveM */
timeo = GT_READ(GT_PCI0_TOR_OFS);
/* Old Galileo, assumes PCI STOP line to VIA is disconnected. */
GT_WRITE(GT_PCI0_TOR_OFS,
(0xff << 16) | /* retry count */
(0xff << 8) | /* timeout 1 */
0xff); /* timeout 0 */
/* enable PCI retry exceeded interrupt */
GT_WRITE(GT_INTRMASK_OFS, GT_INTR_RETRYCTR0_MSK | GT_READ(GT_INTRMASK_OFS));
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_GT64111,
qube_raq_galileo_fixup);
int cobalt_board_id;
static void qube_raq_via_board_id_fixup(struct pci_dev *dev)
{
u8 id;
int retval;
retval = pci_read_config_byte(dev, VIA_COBALT_BRD_ID_REG, &id);
if (retval) {
panic("Cannot read board ID");
return;
}
cobalt_board_id = VIA_COBALT_BRD_REG_to_ID(id);
printk(KERN_INFO "Cobalt board ID: %d\n", cobalt_board_id);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0,
qube_raq_via_board_id_fixup);
static char irq_tab_qube1[] __initdata = {
[COBALT_PCICONF_CPU] = 0,
[COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
[COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
[COBALT_PCICONF_ETH1] = 0
};
static char irq_tab_cobalt[] __initdata = {
[COBALT_PCICONF_CPU] = 0,
[COBALT_PCICONF_ETH0] = ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
[COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
[COBALT_PCICONF_ETH1] = ETH1_IRQ
};
static char irq_tab_raq2[] __initdata = {
[COBALT_PCICONF_CPU] = 0,
[COBALT_PCICONF_ETH0] = ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ,
[COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
[COBALT_PCICONF_ETH1] = ETH1_IRQ
};
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (cobalt_board_id <= COBALT_BRD_ID_QUBE1)
return irq_tab_qube1[slot];
if (cobalt_board_id == COBALT_BRD_ID_RAQ2)
return irq_tab_raq2[slot];
return irq_tab_cobalt[slot];
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| gpl-2.0 |
jmztaylor/android_kernel_htc_a5dug | arch/x86/platform/olpc/olpc-xo1-rtc.c | 10222 | 1834 | /*
* Support for OLPC XO-1 Real Time Clock (RTC)
*
* Copyright (C) 2011 One Laptop per Child
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/mc146818rtc.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/of.h>
#include <asm/msr.h>
#include <asm/olpc.h>
static void rtc_wake_on(struct device *dev)
{
olpc_xo1_pm_wakeup_set(CS5536_PM_RTC);
}
static void rtc_wake_off(struct device *dev)
{
olpc_xo1_pm_wakeup_clear(CS5536_PM_RTC);
}
static struct resource rtc_platform_resource[] = {
[0] = {
.start = RTC_PORT(0),
.end = RTC_PORT(1),
.flags = IORESOURCE_IO,
},
[1] = {
.start = RTC_IRQ,
.end = RTC_IRQ,
.flags = IORESOURCE_IRQ,
}
};
static struct cmos_rtc_board_info rtc_info = {
.rtc_day_alarm = 0,
.rtc_mon_alarm = 0,
.rtc_century = 0,
.wake_on = rtc_wake_on,
.wake_off = rtc_wake_off,
};
static struct platform_device xo1_rtc_device = {
.name = "rtc_cmos",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_platform_resource),
.dev.platform_data = &rtc_info,
.resource = rtc_platform_resource,
};
static int __init xo1_rtc_init(void)
{
int r;
struct device_node *node;
node = of_find_compatible_node(NULL, NULL, "olpc,xo1-rtc");
if (!node)
return 0;
of_node_put(node);
pr_info("olpc-xo1-rtc: Initializing OLPC XO-1 RTC\n");
rdmsrl(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm);
rdmsrl(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm);
rdmsrl(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century);
r = platform_device_register(&xo1_rtc_device);
if (r)
return r;
device_init_wakeup(&xo1_rtc_device.dev, 1);
return 0;
}
arch_initcall(xo1_rtc_init);
| gpl-2.0 |
cjdoucette/XIA-for-Linux | arch/sh/boards/mach-cayman/panic.c | 13806 | 1031 | /*
* Copyright (C) 2003 Richard Curnow, SuperH UK Limited
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <cpu/registers.h>
/* THIS IS A PHYSICAL ADDRESS */
#define HDSP2534_ADDR (0x04002100)
static void poor_mans_delay(void)
{
int i;
for (i = 0; i < 2500000; i++)
cpu_relax();
}
static void show_value(unsigned long x)
{
int i;
unsigned nibble;
for (i = 0; i < 8; i++) {
nibble = ((x >> (i * 4)) & 0xf);
__raw_writeb(nibble + ((nibble > 9) ? 55 : 48),
HDSP2534_ADDR + 0xe0 + ((7 - i) << 2));
}
}
void
panic_handler(unsigned long panicPC, unsigned long panicSSR,
unsigned long panicEXPEVT)
{
while (1) {
/* This piece of code displays the PC on the LED display */
show_value(panicPC);
poor_mans_delay();
show_value(panicSSR);
poor_mans_delay();
show_value(panicEXPEVT);
poor_mans_delay();
}
}
| gpl-2.0 |
MarvinCorro/linux-cmps107 | net/sched/cls_u32.c | 495 | 24584 | /*
* net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* The filters are packed to hash tables of key nodes
* with a set of 32bit key/mask pairs at every node.
* Nodes reference next level hash tables etc.
*
* This scheme is the best universal classifier I managed to
* invent; it is not super-fast, but it is not slow (provided you
* program it correctly), and general enough. And its relative
* speed grows as the number of rules becomes larger.
*
* It seems that it represents the best middle point between
* speed and manageability both by human and by machine.
*
* It is especially useful for link sharing combined with QoS;
* pure RSVP doesn't need such a general approach and can use
* much simpler (and faster) schemes, sort of cls_rsvp.c.
*
* JHS: We should remove the CONFIG_NET_CLS_IND from here
* eventually when the meta match extension is made available
*
* nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/bitmap.h>
#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
struct tc_u_knode {
struct tc_u_knode __rcu *next;
u32 handle;
struct tc_u_hnode __rcu *ht_up;
struct tcf_exts exts;
#ifdef CONFIG_NET_CLS_IND
int ifindex;
#endif
u8 fshift;
struct tcf_result res;
struct tc_u_hnode __rcu *ht_down;
#ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt __percpu *pf;
#endif
#ifdef CONFIG_CLS_U32_MARK
u32 val;
u32 mask;
u32 __percpu *pcpu_success;
#endif
struct tcf_proto *tp;
struct rcu_head rcu;
/* The 'sel' field MUST be the last field in structure to allow for
* tc_u32_keys allocated at end of structure.
*/
struct tc_u32_sel sel;
};
struct tc_u_hnode {
struct tc_u_hnode __rcu *next;
u32 handle;
u32 prio;
struct tc_u_common *tp_c;
int refcnt;
unsigned int divisor;
struct rcu_head rcu;
/* The 'ht' field MUST be the last field in structure to allow for
* more entries allocated at end of structure.
*/
struct tc_u_knode __rcu *ht[1];
};
struct tc_u_common {
struct tc_u_hnode __rcu *hlist;
struct Qdisc *q;
int refcnt;
u32 hgenerator;
struct rcu_head rcu;
};
static inline unsigned int u32_hash_fold(__be32 key,
const struct tc_u32_sel *sel,
u8 fshift)
{
unsigned int h = ntohl(key & sel->hmask) >> fshift;
return h;
}
static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
{
struct {
struct tc_u_knode *knode;
unsigned int off;
} stack[TC_U32_MAXDEPTH];
struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n;
int sdepth = 0;
int off2 = 0;
int sel = 0;
#ifdef CONFIG_CLS_U32_PERF
int j;
#endif
int i, r;
next_ht:
n = rcu_dereference_bh(ht->ht[sel]);
next_knode:
if (n) {
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
__this_cpu_inc(n->pf->rcnt);
j = 0;
#endif
#ifdef CONFIG_CLS_U32_MARK
if ((skb->mark & n->mask) != n->val) {
n = rcu_dereference_bh(n->next);
goto next_knode;
} else {
__this_cpu_inc(*n->pcpu_success);
}
#endif
for (i = n->sel.nkeys; i > 0; i--, key++) {
int toff = off + key->off + (off2 & key->offmask);
__be32 *data, hdata;
if (skb_headroom(skb) + toff > INT_MAX)
goto out;
data = skb_header_pointer(skb, toff, 4, &hdata);
if (!data)
goto out;
if ((*data ^ key->val) & key->mask) {
n = rcu_dereference_bh(n->next);
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
__this_cpu_inc(n->pf->kcnts[j]);
j++;
#endif
}
ht = rcu_dereference_bh(n->ht_down);
if (!ht) {
check_terminal:
if (n->sel.flags & TC_U32_TERMINAL) {
*res = n->res;
#ifdef CONFIG_NET_CLS_IND
if (!tcf_match_indev(skb, n->ifindex)) {
n = rcu_dereference_bh(n->next);
goto next_knode;
}
#endif
#ifdef CONFIG_CLS_U32_PERF
__this_cpu_inc(n->pf->rhit);
#endif
r = tcf_exts_exec(skb, &n->exts, res);
if (r < 0) {
n = rcu_dereference_bh(n->next);
goto next_knode;
}
return r;
}
n = rcu_dereference_bh(n->next);
goto next_knode;
}
/* PUSH */
if (sdepth >= TC_U32_MAXDEPTH)
goto deadloop;
stack[sdepth].knode = n;
stack[sdepth].off = off;
sdepth++;
ht = rcu_dereference_bh(n->ht_down);
sel = 0;
if (ht->divisor) {
__be32 *data, hdata;
data = skb_header_pointer(skb, off + n->sel.hoff, 4,
&hdata);
if (!data)
goto out;
sel = ht->divisor & u32_hash_fold(*data, &n->sel,
n->fshift);
}
if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
goto next_ht;
if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3;
if (n->sel.flags & TC_U32_VAROFFSET) {
__be16 *data, hdata;
data = skb_header_pointer(skb,
off + n->sel.offoff,
2, &hdata);
if (!data)
goto out;
off2 += ntohs(n->sel.offmask & *data) >>
n->sel.offshift;
}
off2 &= ~3;
}
if (n->sel.flags & TC_U32_EAT) {
off += off2;
off2 = 0;
}
if (off < skb->len)
goto next_ht;
}
/* POP */
if (sdepth--) {
n = stack[sdepth].knode;
ht = rcu_dereference_bh(n->ht_up);
off = stack[sdepth].off;
goto check_terminal;
}
out:
return -1;
deadloop:
net_warn_ratelimited("cls_u32: dead loop\n");
return -1;
}
static struct tc_u_hnode *
u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{
struct tc_u_hnode *ht;
for (ht = rtnl_dereference(tp_c->hlist);
ht;
ht = rtnl_dereference(ht->next))
if (ht->handle == handle)
break;
return ht;
}
static struct tc_u_knode *
u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{
unsigned int sel;
struct tc_u_knode *n = NULL;
sel = TC_U32_HASH(handle);
if (sel > ht->divisor)
goto out;
for (n = rtnl_dereference(ht->ht[sel]);
n;
n = rtnl_dereference(n->next))
if (n->handle == handle)
break;
out:
return n;
}
static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
{
struct tc_u_hnode *ht;
struct tc_u_common *tp_c = tp->data;
if (TC_U32_HTID(handle) == TC_U32_ROOT)
ht = rtnl_dereference(tp->root);
else
ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
if (!ht)
return 0;
if (TC_U32_KEY(handle) == 0)
return (unsigned long)ht;
return (unsigned long)u32_lookup_key(ht, handle);
}
static u32 gen_new_htid(struct tc_u_common *tp_c)
{
int i = 0x800;
/* hgenerator only used inside rtnl lock it is safe to increment
* without read _copy_ update semantics
*/
do {
if (++tp_c->hgenerator == 0x7FF)
tp_c->hgenerator = 1;
} while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
}
static int u32_init(struct tcf_proto *tp)
{
struct tc_u_hnode *root_ht;
struct tc_u_common *tp_c;
tp_c = tp->q->u32_node;
root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
if (root_ht == NULL)
return -ENOBUFS;
root_ht->divisor = 0;
root_ht->refcnt++;
root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
root_ht->prio = tp->prio;
if (tp_c == NULL) {
tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
if (tp_c == NULL) {
kfree(root_ht);
return -ENOBUFS;
}
tp_c->q = tp->q;
tp->q->u32_node = tp_c;
}
tp_c->refcnt++;
RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
rcu_assign_pointer(tp_c->hlist, root_ht);
root_ht->tp_c = tp_c;
rcu_assign_pointer(tp->root, root_ht);
tp->data = tp_c;
return 0;
}
static int u32_destroy_key(struct tcf_proto *tp,
struct tc_u_knode *n,
bool free_pf)
{
tcf_exts_destroy(&n->exts);
if (n->ht_down)
n->ht_down->refcnt--;
#ifdef CONFIG_CLS_U32_PERF
if (free_pf)
free_percpu(n->pf);
#endif
#ifdef CONFIG_CLS_U32_MARK
if (free_pf)
free_percpu(n->pcpu_success);
#endif
kfree(n);
return 0;
}
/* u32_delete_key_rcu should be called when free'ing a copied
* version of a tc_u_knode obtained from u32_init_knode(). When
* copies are obtained from u32_init_knode() the statistics are
* shared between the old and new copies to allow readers to
* continue to update the statistics during the copy. To support
* this the u32_delete_key_rcu variant does not free the percpu
* statistics.
*/
static void u32_delete_key_rcu(struct rcu_head *rcu)
{
struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
u32_destroy_key(key->tp, key, false);
}
/* u32_delete_key_freepf_rcu is the rcu callback variant
* that free's the entire structure including the statistics
* percpu variables. Only use this if the key is not a copy
* returned by u32_init_knode(). See u32_delete_key_rcu()
* for the variant that should be used with keys return from
* u32_init_knode()
*/
static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
{
struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
u32_destroy_key(key->tp, key, true);
}
static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
{
struct tc_u_knode __rcu **kp;
struct tc_u_knode *pkp;
struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
if (ht) {
kp = &ht->ht[TC_U32_HASH(key->handle)];
for (pkp = rtnl_dereference(*kp); pkp;
kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
if (pkp == key) {
RCU_INIT_POINTER(*kp, key->next);
tcf_unbind_filter(tp, &key->res);
call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
return 0;
}
}
}
WARN_ON(1);
return 0;
}
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{
struct tc_u_knode *n;
unsigned int h;
for (h = 0; h <= ht->divisor; h++) {
while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
RCU_INIT_POINTER(ht->ht[h],
rtnl_dereference(n->next));
tcf_unbind_filter(tp, &n->res);
call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
}
}
}
static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode __rcu **hn;
struct tc_u_hnode *phn;
WARN_ON(ht->refcnt);
u32_clear_hnode(tp, ht);
hn = &tp_c->hlist;
for (phn = rtnl_dereference(*hn);
phn;
hn = &phn->next, phn = rtnl_dereference(*hn)) {
if (phn == ht) {
RCU_INIT_POINTER(*hn, ht->next);
kfree_rcu(ht, rcu);
return 0;
}
}
return -ENOENT;
}
static bool ht_empty(struct tc_u_hnode *ht)
{
unsigned int h;
for (h = 0; h <= ht->divisor; h++)
if (rcu_access_pointer(ht->ht[h]))
return false;
return true;
}
static bool u32_destroy(struct tcf_proto *tp, bool force)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
WARN_ON(root_ht == NULL);
if (!force) {
if (root_ht) {
if (root_ht->refcnt > 1)
return false;
if (root_ht->refcnt == 1) {
if (!ht_empty(root_ht))
return false;
}
}
if (tp_c->refcnt > 1)
return false;
if (tp_c->refcnt == 1) {
struct tc_u_hnode *ht;
for (ht = rtnl_dereference(tp_c->hlist);
ht;
ht = rtnl_dereference(ht->next))
if (!ht_empty(ht))
return false;
}
}
if (root_ht && --root_ht->refcnt == 0)
u32_destroy_hnode(tp, root_ht);
if (--tp_c->refcnt == 0) {
struct tc_u_hnode *ht;
tp->q->u32_node = NULL;
for (ht = rtnl_dereference(tp_c->hlist);
ht;
ht = rtnl_dereference(ht->next)) {
ht->refcnt--;
u32_clear_hnode(tp, ht);
}
while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
RCU_INIT_POINTER(tp_c->hlist, ht->next);
kfree_rcu(ht, rcu);
}
kfree(tp_c);
}
tp->data = NULL;
return true;
}
static int u32_delete(struct tcf_proto *tp, unsigned long arg)
{
struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
if (ht == NULL)
return 0;
if (TC_U32_KEY(ht->handle))
return u32_delete_key(tp, (struct tc_u_knode *)ht);
if (root_ht == ht)
return -EINVAL;
if (ht->refcnt == 1) {
ht->refcnt--;
u32_destroy_hnode(tp, ht);
} else {
return -EBUSY;
}
return 0;
}
#define NR_U32_NODE (1<<12)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{
struct tc_u_knode *n;
unsigned long i;
unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
GFP_KERNEL);
if (!bitmap)
return handle | 0xFFF;
for (n = rtnl_dereference(ht->ht[TC_U32_HASH(handle)]);
n;
n = rtnl_dereference(n->next))
set_bit(TC_U32_NODE(n->handle), bitmap);
i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
if (i >= NR_U32_NODE)
i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
kfree(bitmap);
return handle | (i >= NR_U32_NODE ? 0xFFF : i);
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
[TCA_U32_CLASSID] = { .type = NLA_U32 },
[TCA_U32_HASH] = { .type = NLA_U32 },
[TCA_U32_LINK] = { .type = NLA_U32 },
[TCA_U32_DIVISOR] = { .type = NLA_U32 },
[TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
[TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
[TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
};
static int u32_set_parms(struct net *net, struct tcf_proto *tp,
unsigned long base, struct tc_u_hnode *ht,
struct tc_u_knode *n, struct nlattr **tb,
struct nlattr *est, bool ovr)
{
int err;
struct tcf_exts e;
tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
if (err < 0)
return err;
err = -EINVAL;
if (tb[TCA_U32_LINK]) {
u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
struct tc_u_hnode *ht_down = NULL, *ht_old;
if (TC_U32_KEY(handle))
goto errout;
if (handle) {
ht_down = u32_lookup_ht(ht->tp_c, handle);
if (ht_down == NULL)
goto errout;
ht_down->refcnt++;
}
ht_old = rtnl_dereference(n->ht_down);
rcu_assign_pointer(n->ht_down, ht_down);
if (ht_old)
ht_old->refcnt--;
}
if (tb[TCA_U32_CLASSID]) {
n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
tcf_bind_filter(tp, &n->res, base);
}
#ifdef CONFIG_NET_CLS_IND
if (tb[TCA_U32_INDEV]) {
int ret;
ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
if (ret < 0)
goto errout;
n->ifindex = ret;
}
#endif
tcf_exts_change(tp, &n->exts, &e);
return 0;
errout:
tcf_exts_destroy(&e);
return err;
}
static void u32_replace_knode(struct tcf_proto *tp,
struct tc_u_common *tp_c,
struct tc_u_knode *n)
{
struct tc_u_knode __rcu **ins;
struct tc_u_knode *pins;
struct tc_u_hnode *ht;
if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
ht = rtnl_dereference(tp->root);
else
ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
ins = &ht->ht[TC_U32_HASH(n->handle)];
/* The node must always exist for it to be replaced if this is not the
* case then something went very wrong elsewhere.
*/
for (pins = rtnl_dereference(*ins); ;
ins = &pins->next, pins = rtnl_dereference(*ins))
if (pins->handle == n->handle)
break;
RCU_INIT_POINTER(n->next, pins->next);
rcu_assign_pointer(*ins, n);
}
static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
struct tc_u_knode *n)
{
struct tc_u_knode *new;
struct tc_u32_sel *s = &n->sel;
new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
GFP_KERNEL);
if (!new)
return NULL;
RCU_INIT_POINTER(new->next, n->next);
new->handle = n->handle;
RCU_INIT_POINTER(new->ht_up, n->ht_up);
#ifdef CONFIG_NET_CLS_IND
new->ifindex = n->ifindex;
#endif
new->fshift = n->fshift;
new->res = n->res;
RCU_INIT_POINTER(new->ht_down, n->ht_down);
/* bump reference count as long as we hold pointer to structure */
if (new->ht_down)
new->ht_down->refcnt++;
#ifdef CONFIG_CLS_U32_PERF
/* Statistics may be incremented by readers during update
* so we must keep them in tact. When the node is later destroyed
* a special destroy call must be made to not free the pf memory.
*/
new->pf = n->pf;
#endif
#ifdef CONFIG_CLS_U32_MARK
new->val = n->val;
new->mask = n->mask;
/* Similarly success statistics must be moved as pointers */
new->pcpu_success = n->pcpu_success;
#endif
new->tp = tp;
memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE);
return new;
}
static int u32_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca,
unsigned long *arg, bool ovr)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
struct tc_u32_sel *s;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_U32_MAX + 1];
u32 htid;
int err;
#ifdef CONFIG_CLS_U32_PERF
size_t size;
#endif
if (opt == NULL)
return handle ? -EINVAL : 0;
err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
if (err < 0)
return err;
n = (struct tc_u_knode *)*arg;
if (n) {
struct tc_u_knode *new;
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
new = u32_init_knode(tp, n);
if (!new)
return -ENOMEM;
err = u32_set_parms(net, tp, base,
rtnl_dereference(n->ht_up), new, tb,
tca[TCA_RATE], ovr);
if (err) {
u32_destroy_key(tp, new, false);
return err;
}
u32_replace_knode(tp, tp_c, new);
tcf_unbind_filter(tp, &n->res);
call_rcu(&n->rcu, u32_delete_key_rcu);
return 0;
}
if (tb[TCA_U32_DIVISOR]) {
unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
if (--divisor > 0x100)
return -EINVAL;
if (TC_U32_KEY(handle))
return -EINVAL;
if (handle == 0) {
handle = gen_new_htid(tp->data);
if (handle == 0)
return -ENOMEM;
}
ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
if (ht == NULL)
return -ENOBUFS;
ht->tp_c = tp_c;
ht->refcnt = 1;
ht->divisor = divisor;
ht->handle = handle;
ht->prio = tp->prio;
RCU_INIT_POINTER(ht->next, tp_c->hlist);
rcu_assign_pointer(tp_c->hlist, ht);
*arg = (unsigned long)ht;
return 0;
}
if (tb[TCA_U32_HASH]) {
htid = nla_get_u32(tb[TCA_U32_HASH]);
if (TC_U32_HTID(htid) == TC_U32_ROOT) {
ht = rtnl_dereference(tp->root);
htid = ht->handle;
} else {
ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
if (ht == NULL)
return -EINVAL;
}
} else {
ht = rtnl_dereference(tp->root);
htid = ht->handle;
}
if (ht->divisor < TC_U32_HASH(htid))
return -EINVAL;
if (handle) {
if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
return -EINVAL;
handle = htid | TC_U32_NODE(handle);
} else
handle = gen_new_kid(ht, htid);
if (tb[TCA_U32_SEL] == NULL)
return -EINVAL;
s = nla_data(tb[TCA_U32_SEL]);
n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
if (n == NULL)
return -ENOBUFS;
#ifdef CONFIG_CLS_U32_PERF
size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
if (!n->pf) {
kfree(n);
return -ENOBUFS;
}
#endif
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
RCU_INIT_POINTER(n->ht_up, ht);
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
n->tp = tp;
#ifdef CONFIG_CLS_U32_MARK
n->pcpu_success = alloc_percpu(u32);
if (!n->pcpu_success) {
err = -ENOMEM;
goto errout;
}
if (tb[TCA_U32_MARK]) {
struct tc_u32_mark *mark;
mark = nla_data(tb[TCA_U32_MARK]);
n->val = mark->val;
n->mask = mark->mask;
}
#endif
err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
if (err == 0) {
struct tc_u_knode __rcu **ins;
struct tc_u_knode *pins;
ins = &ht->ht[TC_U32_HASH(handle)];
for (pins = rtnl_dereference(*ins); pins;
ins = &pins->next, pins = rtnl_dereference(*ins))
if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
break;
RCU_INIT_POINTER(n->next, pins);
rcu_assign_pointer(*ins, n);
*arg = (unsigned long)n;
return 0;
}
#ifdef CONFIG_CLS_U32_MARK
free_percpu(n->pcpu_success);
errout:
#endif
#ifdef CONFIG_CLS_U32_PERF
free_percpu(n->pf);
#endif
kfree(n);
return err;
}
static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
unsigned int h;
if (arg->stop)
return;
for (ht = rtnl_dereference(tp_c->hlist);
ht;
ht = rtnl_dereference(ht->next)) {
if (ht->prio != tp->prio)
continue;
if (arg->count >= arg->skip) {
if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
arg->stop = 1;
return;
}
}
arg->count++;
for (h = 0; h <= ht->divisor; h++) {
for (n = rtnl_dereference(ht->ht[h]);
n;
n = rtnl_dereference(n->next)) {
if (arg->count < arg->skip) {
arg->count++;
continue;
}
if (arg->fn(tp, (unsigned long)n, arg) < 0) {
arg->stop = 1;
return;
}
arg->count++;
}
}
}
}
static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct tc_u_knode *n = (struct tc_u_knode *)fh;
struct tc_u_hnode *ht_up, *ht_down;
struct nlattr *nest;
if (n == NULL)
return skb->len;
t->tcm_handle = n->handle;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if (TC_U32_KEY(n->handle) == 0) {
struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
u32 divisor = ht->divisor + 1;
if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
goto nla_put_failure;
} else {
#ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt *gpf;
int cpu;
#endif
if (nla_put(skb, TCA_U32_SEL,
sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
&n->sel))
goto nla_put_failure;
ht_up = rtnl_dereference(n->ht_up);
if (ht_up) {
u32 htid = n->handle & 0xFFFFF000;
if (nla_put_u32(skb, TCA_U32_HASH, htid))
goto nla_put_failure;
}
if (n->res.classid &&
nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
goto nla_put_failure;
ht_down = rtnl_dereference(n->ht_down);
if (ht_down &&
nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
goto nla_put_failure;
#ifdef CONFIG_CLS_U32_MARK
if ((n->val || n->mask)) {
struct tc_u32_mark mark = {.val = n->val,
.mask = n->mask,
.success = 0};
int cpum;
for_each_possible_cpu(cpum) {
__u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
mark.success += cnt;
}
if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
goto nla_put_failure;
}
#endif
if (tcf_exts_dump(skb, &n->exts) < 0)
goto nla_put_failure;
#ifdef CONFIG_NET_CLS_IND
if (n->ifindex) {
struct net_device *dev;
dev = __dev_get_by_index(net, n->ifindex);
if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
goto nla_put_failure;
}
#endif
#ifdef CONFIG_CLS_U32_PERF
gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
n->sel.nkeys * sizeof(u64),
GFP_KERNEL);
if (!gpf)
goto nla_put_failure;
for_each_possible_cpu(cpu) {
int i;
struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
gpf->rcnt += pf->rcnt;
gpf->rhit += pf->rhit;
for (i = 0; i < n->sel.nkeys; i++)
gpf->kcnts[i] += pf->kcnts[i];
}
if (nla_put(skb, TCA_U32_PCNT,
sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
gpf)) {
kfree(gpf);
goto nla_put_failure;
}
kfree(gpf);
#endif
}
nla_nest_end(skb, nest);
if (TC_U32_KEY(n->handle))
if (tcf_exts_dump_stats(skb, &n->exts) < 0)
goto nla_put_failure;
return skb->len;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
static struct tcf_proto_ops cls_u32_ops __read_mostly = {
.kind = "u32",
.classify = u32_classify,
.init = u32_init,
.destroy = u32_destroy,
.get = u32_get,
.change = u32_change,
.delete = u32_delete,
.walk = u32_walk,
.dump = u32_dump,
.owner = THIS_MODULE,
};
static int __init init_u32(void)
{
pr_info("u32 classifier\n");
#ifdef CONFIG_CLS_U32_PERF
pr_info(" Performance counters on\n");
#endif
#ifdef CONFIG_NET_CLS_IND
pr_info(" input device check on\n");
#endif
#ifdef CONFIG_NET_CLS_ACT
pr_info(" Actions configured\n");
#endif
return register_tcf_proto_ops(&cls_u32_ops);
}
static void __exit exit_u32(void)
{
unregister_tcf_proto_ops(&cls_u32_ops);
}
module_init(init_u32)
module_exit(exit_u32)
MODULE_LICENSE("GPL");
| gpl-2.0 |
GrinningFerret/android_kernel_lge_w7 | arch/arm/mach-msm/pil-msa.c | 1263 | 9954 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <linux/dma-mapping.h>
#include "peripheral-loader.h"
#include "pil-q6v5.h"
#include "pil-msa.h"
/* Q6 Register Offsets */
#define QDSP6SS_RST_EVB 0x010
/* AXI Halting Registers */
#define MSS_Q6_HALT_BASE 0x180
#define MSS_MODEM_HALT_BASE 0x200
#define MSS_NC_HALT_BASE 0x280
/* RMB Status Register Values */
#define STATUS_PBL_SUCCESS 0x1
#define STATUS_XPU_UNLOCKED 0x1
#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
/* PBL/MBA interface registers */
#define RMB_MBA_IMAGE 0x00
#define RMB_PBL_STATUS 0x04
#define RMB_MBA_COMMAND 0x08
#define RMB_MBA_STATUS 0x0C
#define RMB_PMI_META_DATA 0x10
#define RMB_PMI_CODE_START 0x14
#define RMB_PMI_CODE_LENGTH 0x18
#define MAX_VDD_MX_UV 1150000
#define POLL_INTERVAL_US 50
#define CMD_META_DATA_READY 0x1
#define CMD_LOAD_READY 0x2
#define STATUS_META_DATA_AUTH_SUCCESS 0x3
#define STATUS_AUTH_COMPLETE 0x4
/* External BHS */
#define EXTERNAL_BHS_ON BIT(0)
#define EXTERNAL_BHS_STATUS BIT(4)
#define BHS_TIMEOUT_US 50
static int pbl_mba_boot_timeout_ms = 1000;
module_param(pbl_mba_boot_timeout_ms, int, S_IRUGO | S_IWUSR);
static int modem_auth_timeout_ms = 10000;
module_param(modem_auth_timeout_ms, int, S_IRUGO | S_IWUSR);
static int pil_msa_pbl_power_up(struct q6v5_data *drv)
{
int ret = 0;
struct device *dev = drv->desc.dev;
u32 regval;
if (drv->vreg) {
ret = regulator_enable(drv->vreg);
if (ret)
dev_err(dev, "Failed to enable modem regulator.\n");
}
if (drv->cxrail_bhs) {
regval = readl_relaxed(drv->cxrail_bhs);
regval |= EXTERNAL_BHS_ON;
writel_relaxed(regval, drv->cxrail_bhs);
ret = readl_poll_timeout(drv->cxrail_bhs, regval,
regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
}
return ret;
}
static int pil_msa_pbl_power_down(struct q6v5_data *drv)
{
u32 regval;
if (drv->cxrail_bhs) {
regval = readl_relaxed(drv->cxrail_bhs);
regval &= ~EXTERNAL_BHS_ON;
writel_relaxed(regval, drv->cxrail_bhs);
}
if (drv->vreg)
return regulator_disable(drv->vreg);
return 0;
}
static int pil_msa_pbl_enable_clks(struct q6v5_data *drv)
{
int ret;
ret = clk_prepare_enable(drv->ahb_clk);
if (ret)
goto err_ahb_clk;
ret = clk_prepare_enable(drv->axi_clk);
if (ret)
goto err_axi_clk;
ret = clk_prepare_enable(drv->rom_clk);
if (ret)
goto err_rom_clk;
return 0;
err_rom_clk:
clk_disable_unprepare(drv->axi_clk);
err_axi_clk:
clk_disable_unprepare(drv->ahb_clk);
err_ahb_clk:
return ret;
}
static void pil_msa_pbl_disable_clks(struct q6v5_data *drv)
{
clk_disable_unprepare(drv->rom_clk);
clk_disable_unprepare(drv->axi_clk);
clk_disable_unprepare(drv->ahb_clk);
}
static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
{
struct device *dev = drv->desc.dev;
int ret;
u32 status;
/* Wait for PBL completion. */
ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
if (ret) {
dev_err(dev, "PBL boot timed out\n");
return ret;
}
if (status != STATUS_PBL_SUCCESS) {
dev_err(dev, "PBL returned unexpected status %d\n", status);
return -EINVAL;
}
/* Wait for MBA completion. */
ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
if (ret) {
dev_err(dev, "MBA boot timed out\n");
return ret;
}
if (status != STATUS_XPU_UNLOCKED &&
status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
dev_err(dev, "MBA returned unexpected status %d\n", status);
return -EINVAL;
}
return 0;
}
static int pil_msa_pbl_shutdown(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_Q6_HALT_BASE);
pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_MODEM_HALT_BASE);
pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_NC_HALT_BASE);
writel_relaxed(1, drv->restart_reg);
if (drv->is_booted) {
pil_msa_pbl_disable_clks(drv);
pil_msa_pbl_power_down(drv);
drv->is_booted = false;
}
return 0;
}
static int pil_msa_pbl_reset(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
phys_addr_t start_addr = pil_get_entry_addr(pil);
int ret;
/*
* Bring subsystem out of reset and enable required
* regulators and clocks.
*/
ret = pil_msa_pbl_power_up(drv);
if (ret)
goto err_power;
/* Deassert reset to subsystem and wait for propagation */
writel_relaxed(0, drv->restart_reg);
mb();
udelay(2);
ret = pil_msa_pbl_enable_clks(drv);
if (ret)
goto err_clks;
/* Program Image Address */
if (drv->self_auth) {
writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
/* Ensure write to RMB base occurs before reset is released. */
mb();
} else {
writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
drv->reg_base + QDSP6SS_RST_EVB);
}
ret = pil_q6v5_reset(pil);
if (ret)
goto err_q6v5_reset;
/* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
if (drv->self_auth) {
ret = pil_msa_wait_for_mba_ready(drv);
if (ret)
goto err_q6v5_reset;
}
drv->is_booted = true;
return 0;
err_q6v5_reset:
pil_msa_pbl_disable_clks(drv);
err_clks:
writel_relaxed(1, drv->restart_reg);
pil_msa_pbl_power_down(drv);
err_power:
return ret;
}
static int pil_msa_pbl_make_proxy_votes(struct pil_desc *pil)
{
int ret;
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
ret = regulator_set_voltage(drv->vreg_mx, VDD_MSS_UV, MAX_VDD_MX_UV);
if (ret) {
dev_err(pil->dev, "Failed to request vreg_mx voltage\n");
return ret;
}
ret = regulator_enable(drv->vreg_mx);
if (ret) {
dev_err(pil->dev, "Failed to enable vreg_mx\n");
regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
return ret;
}
ret = pil_q6v5_make_proxy_votes(pil);
if (ret) {
regulator_disable(drv->vreg_mx);
regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
}
return ret;
}
static void pil_msa_pbl_remove_proxy_votes(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
pil_q6v5_remove_proxy_votes(pil);
regulator_disable(drv->vreg_mx);
regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
}
struct pil_reset_ops pil_msa_pbl_ops = {
.proxy_vote = pil_msa_pbl_make_proxy_votes,
.proxy_unvote = pil_msa_pbl_remove_proxy_votes,
.auth_and_reset = pil_msa_pbl_reset,
.shutdown = pil_msa_pbl_shutdown,
};
static int pil_msa_mba_init_image(struct pil_desc *pil,
const u8 *metadata, size_t size)
{
struct mba_data *drv = container_of(pil, struct mba_data, desc);
void *mdata_virt;
dma_addr_t mdata_phys;
s32 status;
int ret;
/* Make metadata physically contiguous and 4K aligned. */
mdata_virt = dma_alloc_coherent(pil->dev, size, &mdata_phys,
GFP_KERNEL);
if (!mdata_virt) {
dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
return -ENOMEM;
}
memcpy(mdata_virt, metadata, size);
/* wmb() ensures copy completes prior to starting authentication. */
wmb();
/* Initialize length counter to 0 */
writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
/* Pass address of meta-data to the MBA and perform authentication */
writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
if (ret) {
dev_err(pil->dev, "MBA authentication of headers timed out\n");
} else if (status < 0) {
dev_err(pil->dev, "MBA returned error %d for headers\n",
status);
ret = -EINVAL;
}
dma_free_coherent(pil->dev, size, mdata_virt, mdata_phys);
return ret;
}
static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
size_t size)
{
struct mba_data *drv = container_of(pil, struct mba_data, desc);
s32 status;
u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
/* Begin image authentication */
if (img_length == 0) {
writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
}
/* Increment length counter */
img_length += size;
writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
if (status < 0) {
dev_err(pil->dev, "MBA returned error %d\n", status);
return -EINVAL;
}
return 0;
}
static int pil_msa_mba_auth(struct pil_desc *pil)
{
struct mba_data *drv = container_of(pil, struct mba_data, desc);
int ret;
s32 status;
/* Wait for all segments to be authenticated or an error to occur */
ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
status == STATUS_AUTH_COMPLETE || status < 0,
50, modem_auth_timeout_ms * 1000);
if (ret) {
dev_err(pil->dev, "MBA authentication of image timed out\n");
} else if (status < 0) {
dev_err(pil->dev, "MBA returned error %d for image\n", status);
ret = -EINVAL;
}
return ret;
}
struct pil_reset_ops pil_msa_mba_ops = {
.init_image = pil_msa_mba_init_image,
.verify_blob = pil_msa_mba_verify_blob,
.auth_and_reset = pil_msa_mba_auth,
};
| gpl-2.0 |
broodplank/samsung-kernel-jfltexx | drivers/tty/serial/ifx6x60.c | 1519 | 37312 | /****************************************************************************
*
* Driver for the IFX 6x60 spi modem.
*
* Copyright (C) 2008 Option International
* Copyright (C) 2008 Filip Aben <f.aben@option.com>
* Denis Joseph Barrow <d.barow@option.com>
* Jan Dumon <j.dumon@option.com>
*
* Copyright (C) 2009, 2010 Intel Corp
* Russ Gorby <russ.gorby@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA
*
* Driver modified by Intel from Option gtm501l_spi.c
*
* Notes
* o The driver currently assumes a single device only. If you need to
* change this then look for saved_ifx_dev and add a device lookup
* o The driver is intended to be big-endian safe but has never been
* tested that way (no suitable hardware). There are a couple of FIXME
* notes by areas that may need addressing
* o Some of the GPIO naming/setup assumptions may need revisiting if
* you need to use this driver for another platform.
*
*****************************************************************************/
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/termios.h>
#include <linux/tty.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/kfifo.h>
#include <linux/tty_flip.h>
#include <linux/timer.h>
#include <linux/serial.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/rfkill.h>
#include <linux/fs.h>
#include <linux/ip.h>
#include <linux/dmapool.h>
#include <linux/gpio.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/spi/ifx_modem.h>
#include <linux/delay.h>
#include "ifx6x60.h"
#define IFX_SPI_MORE_MASK 0x10
#define IFX_SPI_MORE_BIT 12 /* bit position in u16 */
#define IFX_SPI_CTS_BIT 13 /* bit position in u16 */
#define IFX_SPI_MODE SPI_MODE_1
#define IFX_SPI_TTY_ID 0
#define IFX_SPI_TIMEOUT_SEC 2
#define IFX_SPI_HEADER_0 (-1)
#define IFX_SPI_HEADER_F (-2)
/* forward reference */
static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev);
/* local variables */
static int spi_bpw = 16; /* 8, 16 or 32 bit word length */
static struct tty_driver *tty_drv;
static struct ifx_spi_device *saved_ifx_dev;
static struct lock_class_key ifx_spi_key;
/* GPIO/GPE settings */
/**
* mrdy_set_high - set MRDY GPIO
* @ifx: device we are controlling
*
*/
static inline void mrdy_set_high(struct ifx_spi_device *ifx)
{
gpio_set_value(ifx->gpio.mrdy, 1);
}
/**
* mrdy_set_low - clear MRDY GPIO
* @ifx: device we are controlling
*
*/
static inline void mrdy_set_low(struct ifx_spi_device *ifx)
{
gpio_set_value(ifx->gpio.mrdy, 0);
}
/**
* ifx_spi_power_state_set
* @ifx_dev: our SPI device
* @val: bits to set
*
* Set bit in power status and signal power system if status becomes non-0
*/
static void
ifx_spi_power_state_set(struct ifx_spi_device *ifx_dev, unsigned char val)
{
unsigned long flags;
spin_lock_irqsave(&ifx_dev->power_lock, flags);
/*
* if power status is already non-0, just update, else
* tell power system
*/
if (!ifx_dev->power_status)
pm_runtime_get(&ifx_dev->spi_dev->dev);
ifx_dev->power_status |= val;
spin_unlock_irqrestore(&ifx_dev->power_lock, flags);
}
/**
* ifx_spi_power_state_clear - clear power bit
* @ifx_dev: our SPI device
* @val: bits to clear
*
* clear bit in power status and signal power system if status becomes 0
*/
static void
ifx_spi_power_state_clear(struct ifx_spi_device *ifx_dev, unsigned char val)
{
unsigned long flags;
spin_lock_irqsave(&ifx_dev->power_lock, flags);
if (ifx_dev->power_status) {
ifx_dev->power_status &= ~val;
if (!ifx_dev->power_status)
pm_runtime_put(&ifx_dev->spi_dev->dev);
}
spin_unlock_irqrestore(&ifx_dev->power_lock, flags);
}
/**
* swap_buf
* @buf: our buffer
* @len : number of bytes (not words) in the buffer
* @end: end of buffer
*
* Swap the contents of a buffer into big endian format
*/
static inline void swap_buf(u16 *buf, int len, void *end)
{
int n;
len = ((len + 1) >> 1);
if ((void *)&buf[len] > end) {
pr_err("swap_buf: swap exceeds boundary (%p > %p)!",
&buf[len], end);
return;
}
for (n = 0; n < len; n++) {
*buf = cpu_to_be16(*buf);
buf++;
}
}
/**
* mrdy_assert - assert MRDY line
* @ifx_dev: our SPI device
*
* Assert mrdy and set timer to wait for SRDY interrupt, if SRDY is low
* now.
*
* FIXME: Can SRDY even go high as we are running this code ?
*/
static void mrdy_assert(struct ifx_spi_device *ifx_dev)
{
int val = gpio_get_value(ifx_dev->gpio.srdy);
if (!val) {
if (!test_and_set_bit(IFX_SPI_STATE_TIMER_PENDING,
&ifx_dev->flags)) {
ifx_dev->spi_timer.expires =
jiffies + IFX_SPI_TIMEOUT_SEC*HZ;
add_timer(&ifx_dev->spi_timer);
}
}
ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_DATA_PENDING);
mrdy_set_high(ifx_dev);
}
/**
* ifx_spi_hangup - hang up an IFX device
* @ifx_dev: our SPI device
*
* Hang up the tty attached to the IFX device if one is currently
* open. If not take no action
*/
static void ifx_spi_ttyhangup(struct ifx_spi_device *ifx_dev)
{
struct tty_port *pport = &ifx_dev->tty_port;
struct tty_struct *tty = tty_port_tty_get(pport);
if (tty) {
tty_hangup(tty);
tty_kref_put(tty);
}
}
/**
* ifx_spi_timeout - SPI timeout
* @arg: our SPI device
*
* The SPI has timed out: hang up the tty. Users will then see a hangup
* and error events.
*/
static void ifx_spi_timeout(unsigned long arg)
{
struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg;
dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
ifx_spi_ttyhangup(ifx_dev);
mrdy_set_low(ifx_dev);
clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
}
/* char/tty operations */
/**
* ifx_spi_tiocmget - get modem lines
* @tty: our tty device
* @filp: file handle issuing the request
*
* Map the signal state into Linux modem flags and report the value
* in Linux terms
*/
static int ifx_spi_tiocmget(struct tty_struct *tty)
{
unsigned int value;
struct ifx_spi_device *ifx_dev = tty->driver_data;
value =
(test_bit(IFX_SPI_RTS, &ifx_dev->signal_state) ? TIOCM_RTS : 0) |
(test_bit(IFX_SPI_DTR, &ifx_dev->signal_state) ? TIOCM_DTR : 0) |
(test_bit(IFX_SPI_CTS, &ifx_dev->signal_state) ? TIOCM_CTS : 0) |
(test_bit(IFX_SPI_DSR, &ifx_dev->signal_state) ? TIOCM_DSR : 0) |
(test_bit(IFX_SPI_DCD, &ifx_dev->signal_state) ? TIOCM_CAR : 0) |
(test_bit(IFX_SPI_RI, &ifx_dev->signal_state) ? TIOCM_RNG : 0);
return value;
}
/**
* ifx_spi_tiocmset - set modem bits
* @tty: the tty structure
* @set: bits to set
* @clear: bits to clear
*
* The IFX6x60 only supports DTR and RTS. Set them accordingly
* and flag that an update to the modem is needed.
*
* FIXME: do we need to kick the tranfers when we do this ?
*/
static int ifx_spi_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct ifx_spi_device *ifx_dev = tty->driver_data;
if (set & TIOCM_RTS)
set_bit(IFX_SPI_RTS, &ifx_dev->signal_state);
if (set & TIOCM_DTR)
set_bit(IFX_SPI_DTR, &ifx_dev->signal_state);
if (clear & TIOCM_RTS)
clear_bit(IFX_SPI_RTS, &ifx_dev->signal_state);
if (clear & TIOCM_DTR)
clear_bit(IFX_SPI_DTR, &ifx_dev->signal_state);
set_bit(IFX_SPI_UPDATE, &ifx_dev->signal_state);
return 0;
}
/**
* ifx_spi_open - called on tty open
* @tty: our tty device
* @filp: file handle being associated with the tty
*
* Open the tty interface. We let the tty_port layer do all the work
* for us.
*
* FIXME: Remove single device assumption and saved_ifx_dev
*/
static int ifx_spi_open(struct tty_struct *tty, struct file *filp)
{
return tty_port_open(&saved_ifx_dev->tty_port, tty, filp);
}
/**
* ifx_spi_close - called when our tty closes
* @tty: the tty being closed
* @filp: the file handle being closed
*
* Perform the close of the tty. We use the tty_port layer to do all
* our hard work.
*/
static void ifx_spi_close(struct tty_struct *tty, struct file *filp)
{
struct ifx_spi_device *ifx_dev = tty->driver_data;
tty_port_close(&ifx_dev->tty_port, tty, filp);
/* FIXME: should we do an ifx_spi_reset here ? */
}
/**
* ifx_decode_spi_header - decode received header
* @buffer: the received data
* @length: decoded length
* @more: decoded more flag
* @received_cts: status of cts we received
*
* Note how received_cts is handled -- if header is all F it is left
* the same as it was, if header is all 0 it is set to 0 otherwise it is
* taken from the incoming header.
*
* FIXME: endianness
*/
static int ifx_spi_decode_spi_header(unsigned char *buffer, int *length,
unsigned char *more, unsigned char *received_cts)
{
u16 h1;
u16 h2;
u16 *in_buffer = (u16 *)buffer;
h1 = *in_buffer;
h2 = *(in_buffer+1);
if (h1 == 0 && h2 == 0) {
*received_cts = 0;
return IFX_SPI_HEADER_0;
} else if (h1 == 0xffff && h2 == 0xffff) {
/* spi_slave_cts remains as it was */
return IFX_SPI_HEADER_F;
}
*length = h1 & 0xfff; /* upper bits of byte are flags */
*more = (buffer[1] >> IFX_SPI_MORE_BIT) & 1;
*received_cts = (buffer[3] >> IFX_SPI_CTS_BIT) & 1;
return 0;
}
/**
* ifx_setup_spi_header - set header fields
* @txbuffer: pointer to start of SPI buffer
* @tx_count: bytes
* @more: indicate if more to follow
*
* Format up an SPI header for a transfer
*
* FIXME: endianness?
*/
static void ifx_spi_setup_spi_header(unsigned char *txbuffer, int tx_count,
unsigned char more)
{
*(u16 *)(txbuffer) = tx_count;
*(u16 *)(txbuffer+2) = IFX_SPI_PAYLOAD_SIZE;
txbuffer[1] |= (more << IFX_SPI_MORE_BIT) & IFX_SPI_MORE_MASK;
}
/**
* ifx_spi_wakeup_serial - SPI space made
* @port_data: our SPI device
*
* We have emptied the FIFO enough that we want to get more data
* queued into it. Poke the line discipline via tty_wakeup so that
* it will feed us more bits
*/
static void ifx_spi_wakeup_serial(struct ifx_spi_device *ifx_dev)
{
struct tty_struct *tty;
tty = tty_port_tty_get(&ifx_dev->tty_port);
if (!tty)
return;
tty_wakeup(tty);
tty_kref_put(tty);
}
/**
* ifx_spi_prepare_tx_buffer - prepare transmit frame
* @ifx_dev: our SPI device
*
* The transmit buffr needs a header and various other bits of
* information followed by as much data as we can pull from the FIFO
* and transfer. This function formats up a suitable buffer in the
* ifx_dev->tx_buffer
*
* FIXME: performance - should we wake the tty when the queue is half
* empty ?
*/
static int ifx_spi_prepare_tx_buffer(struct ifx_spi_device *ifx_dev)
{
int temp_count;
int queue_length;
int tx_count;
unsigned char *tx_buffer;
tx_buffer = ifx_dev->tx_buffer;
memset(tx_buffer, 0, IFX_SPI_TRANSFER_SIZE);
/* make room for required SPI header */
tx_buffer += IFX_SPI_HEADER_OVERHEAD;
tx_count = IFX_SPI_HEADER_OVERHEAD;
/* clear to signal no more data if this turns out to be the
* last buffer sent in a sequence */
ifx_dev->spi_more = 0;
/* if modem cts is set, just send empty buffer */
if (!ifx_dev->spi_slave_cts) {
/* see if there's tx data */
queue_length = kfifo_len(&ifx_dev->tx_fifo);
if (queue_length != 0) {
/* data to mux -- see if there's room for it */
temp_count = min(queue_length, IFX_SPI_PAYLOAD_SIZE);
temp_count = kfifo_out_locked(&ifx_dev->tx_fifo,
tx_buffer, temp_count,
&ifx_dev->fifo_lock);
/* update buffer pointer and data count in message */
tx_buffer += temp_count;
tx_count += temp_count;
if (temp_count == queue_length)
/* poke port to get more data */
ifx_spi_wakeup_serial(ifx_dev);
else /* more data in port, use next SPI message */
ifx_dev->spi_more = 1;
}
}
/* have data and info for header -- set up SPI header in buffer */
/* spi header needs payload size, not entire buffer size */
ifx_spi_setup_spi_header(ifx_dev->tx_buffer,
tx_count-IFX_SPI_HEADER_OVERHEAD,
ifx_dev->spi_more);
/* swap actual data in the buffer */
swap_buf((u16 *)(ifx_dev->tx_buffer), tx_count,
&ifx_dev->tx_buffer[IFX_SPI_TRANSFER_SIZE]);
return tx_count;
}
/**
* ifx_spi_write - line discipline write
* @tty: our tty device
* @buf: pointer to buffer to write (kernel space)
* @count: size of buffer
*
* Write the characters we have been given into the FIFO. If the device
* is not active then activate it, when the SRDY line is asserted back
* this will commence I/O
*/
static int ifx_spi_write(struct tty_struct *tty, const unsigned char *buf,
int count)
{
struct ifx_spi_device *ifx_dev = tty->driver_data;
unsigned char *tmp_buf = (unsigned char *)buf;
int tx_count = kfifo_in_locked(&ifx_dev->tx_fifo, tmp_buf, count,
&ifx_dev->fifo_lock);
mrdy_assert(ifx_dev);
return tx_count;
}
/**
* ifx_spi_chars_in_buffer - line discipline helper
* @tty: our tty device
*
* Report how much data we can accept before we drop bytes. As we use
* a simple FIFO this is nice and easy.
*/
static int ifx_spi_write_room(struct tty_struct *tty)
{
struct ifx_spi_device *ifx_dev = tty->driver_data;
return IFX_SPI_FIFO_SIZE - kfifo_len(&ifx_dev->tx_fifo);
}
/**
* ifx_spi_chars_in_buffer - line discipline helper
* @tty: our tty device
*
* Report how many characters we have buffered. In our case this is the
* number of bytes sitting in our transmit FIFO.
*/
static int ifx_spi_chars_in_buffer(struct tty_struct *tty)
{
struct ifx_spi_device *ifx_dev = tty->driver_data;
return kfifo_len(&ifx_dev->tx_fifo);
}
/**
* ifx_port_hangup
* @port: our tty port
*
* tty port hang up. Called when tty_hangup processing is invoked either
* by loss of carrier, or by software (eg vhangup). Serialized against
* activate/shutdown by the tty layer.
*/
static void ifx_spi_hangup(struct tty_struct *tty)
{
struct ifx_spi_device *ifx_dev = tty->driver_data;
tty_port_hangup(&ifx_dev->tty_port);
}
/**
* ifx_port_activate
* @port: our tty port
*
* tty port activate method - called for first open. Serialized
* with hangup and shutdown by the tty layer.
*/
static int ifx_port_activate(struct tty_port *port, struct tty_struct *tty)
{
struct ifx_spi_device *ifx_dev =
container_of(port, struct ifx_spi_device, tty_port);
/* clear any old data; can't do this in 'close' */
kfifo_reset(&ifx_dev->tx_fifo);
/* put port data into this tty */
tty->driver_data = ifx_dev;
/* allows flip string push from int context */
tty->low_latency = 1;
return 0;
}
/**
* ifx_port_shutdown
* @port: our tty port
*
* tty port shutdown method - called for last port close. Serialized
* with hangup and activate by the tty layer.
*/
static void ifx_port_shutdown(struct tty_port *port)
{
struct ifx_spi_device *ifx_dev =
container_of(port, struct ifx_spi_device, tty_port);
mrdy_set_low(ifx_dev);
del_timer(&ifx_dev->spi_timer);
clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
tasklet_kill(&ifx_dev->io_work_tasklet);
}
static const struct tty_port_operations ifx_tty_port_ops = {
.activate = ifx_port_activate,
.shutdown = ifx_port_shutdown,
};
static const struct tty_operations ifx_spi_serial_ops = {
.open = ifx_spi_open,
.close = ifx_spi_close,
.write = ifx_spi_write,
.hangup = ifx_spi_hangup,
.write_room = ifx_spi_write_room,
.chars_in_buffer = ifx_spi_chars_in_buffer,
.tiocmget = ifx_spi_tiocmget,
.tiocmset = ifx_spi_tiocmset,
};
/**
* ifx_spi_insert_fip_string - queue received data
* @ifx_ser: our SPI device
* @chars: buffer we have received
* @size: number of chars reeived
*
* Queue bytes to the tty assuming the tty side is currently open. If
* not the discard the data.
*/
static void ifx_spi_insert_flip_string(struct ifx_spi_device *ifx_dev,
unsigned char *chars, size_t size)
{
struct tty_struct *tty = tty_port_tty_get(&ifx_dev->tty_port);
if (!tty)
return;
tty_insert_flip_string(tty, chars, size);
tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
/**
* ifx_spi_complete - SPI transfer completed
* @ctx: our SPI device
*
* An SPI transfer has completed. Process any received data and kick off
* any further transmits we can commence.
*/
static void ifx_spi_complete(void *ctx)
{
struct ifx_spi_device *ifx_dev = ctx;
struct tty_struct *tty;
struct tty_ldisc *ldisc = NULL;
int length;
int actual_length;
unsigned char more;
unsigned char cts;
int local_write_pending = 0;
int queue_length;
int srdy;
int decode_result;
mrdy_set_low(ifx_dev);
if (!ifx_dev->spi_msg.status) {
/* check header validity, get comm flags */
swap_buf((u16 *)ifx_dev->rx_buffer, IFX_SPI_HEADER_OVERHEAD,
&ifx_dev->rx_buffer[IFX_SPI_HEADER_OVERHEAD]);
decode_result = ifx_spi_decode_spi_header(ifx_dev->rx_buffer,
&length, &more, &cts);
if (decode_result == IFX_SPI_HEADER_0) {
dev_dbg(&ifx_dev->spi_dev->dev,
"ignore input: invalid header 0");
ifx_dev->spi_slave_cts = 0;
goto complete_exit;
} else if (decode_result == IFX_SPI_HEADER_F) {
dev_dbg(&ifx_dev->spi_dev->dev,
"ignore input: invalid header F");
goto complete_exit;
}
ifx_dev->spi_slave_cts = cts;
actual_length = min((unsigned int)length,
ifx_dev->spi_msg.actual_length);
swap_buf((u16 *)(ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD),
actual_length,
&ifx_dev->rx_buffer[IFX_SPI_TRANSFER_SIZE]);
ifx_spi_insert_flip_string(
ifx_dev,
ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD,
(size_t)actual_length);
} else {
dev_dbg(&ifx_dev->spi_dev->dev, "SPI transfer error %d",
ifx_dev->spi_msg.status);
}
complete_exit:
if (ifx_dev->write_pending) {
ifx_dev->write_pending = 0;
local_write_pending = 1;
}
clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &(ifx_dev->flags));
queue_length = kfifo_len(&ifx_dev->tx_fifo);
srdy = gpio_get_value(ifx_dev->gpio.srdy);
if (!srdy)
ifx_spi_power_state_clear(ifx_dev, IFX_SPI_POWER_SRDY);
/* schedule output if there is more to do */
if (test_and_clear_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags))
tasklet_schedule(&ifx_dev->io_work_tasklet);
else {
if (more || ifx_dev->spi_more || queue_length > 0 ||
local_write_pending) {
if (ifx_dev->spi_slave_cts) {
if (more)
mrdy_assert(ifx_dev);
} else
mrdy_assert(ifx_dev);
} else {
/*
* poke line discipline driver if any for more data
* may or may not get more data to write
* for now, say not busy
*/
ifx_spi_power_state_clear(ifx_dev,
IFX_SPI_POWER_DATA_PENDING);
tty = tty_port_tty_get(&ifx_dev->tty_port);
if (tty) {
ldisc = tty_ldisc_ref(tty);
if (ldisc) {
ldisc->ops->write_wakeup(tty);
tty_ldisc_deref(ldisc);
}
tty_kref_put(tty);
}
}
}
}
/**
* ifx_spio_io - I/O tasklet
* @data: our SPI device
*
* Queue data for transmission if possible and then kick off the
* transfer.
*/
static void ifx_spi_io(unsigned long data)
{
int retval;
struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *) data;
if (!test_and_set_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags)) {
if (ifx_dev->gpio.unack_srdy_int_nb > 0)
ifx_dev->gpio.unack_srdy_int_nb--;
ifx_spi_prepare_tx_buffer(ifx_dev);
spi_message_init(&ifx_dev->spi_msg);
INIT_LIST_HEAD(&ifx_dev->spi_msg.queue);
ifx_dev->spi_msg.context = ifx_dev;
ifx_dev->spi_msg.complete = ifx_spi_complete;
/* set up our spi transfer */
/* note len is BYTES, not transfers */
ifx_dev->spi_xfer.len = IFX_SPI_TRANSFER_SIZE;
ifx_dev->spi_xfer.cs_change = 0;
ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz;
/* ifx_dev->spi_xfer.speed_hz = 390625; */
ifx_dev->spi_xfer.bits_per_word = spi_bpw;
ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer;
ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer;
/*
* setup dma pointers
*/
if (ifx_dev->use_dma) {
ifx_dev->spi_msg.is_dma_mapped = 1;
ifx_dev->tx_dma = ifx_dev->tx_bus;
ifx_dev->rx_dma = ifx_dev->rx_bus;
ifx_dev->spi_xfer.tx_dma = ifx_dev->tx_dma;
ifx_dev->spi_xfer.rx_dma = ifx_dev->rx_dma;
} else {
ifx_dev->spi_msg.is_dma_mapped = 0;
ifx_dev->tx_dma = (dma_addr_t)0;
ifx_dev->rx_dma = (dma_addr_t)0;
ifx_dev->spi_xfer.tx_dma = (dma_addr_t)0;
ifx_dev->spi_xfer.rx_dma = (dma_addr_t)0;
}
spi_message_add_tail(&ifx_dev->spi_xfer, &ifx_dev->spi_msg);
/* Assert MRDY. This may have already been done by the write
* routine.
*/
mrdy_assert(ifx_dev);
retval = spi_async(ifx_dev->spi_dev, &ifx_dev->spi_msg);
if (retval) {
clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS,
&ifx_dev->flags);
tasklet_schedule(&ifx_dev->io_work_tasklet);
return;
}
} else
ifx_dev->write_pending = 1;
}
/**
* ifx_spi_free_port - free up the tty side
* @ifx_dev: IFX device going away
*
* Unregister and free up a port when the device goes away
*/
static void ifx_spi_free_port(struct ifx_spi_device *ifx_dev)
{
if (ifx_dev->tty_dev)
tty_unregister_device(tty_drv, ifx_dev->minor);
kfifo_free(&ifx_dev->tx_fifo);
}
/**
* ifx_spi_create_port - create a new port
* @ifx_dev: our spi device
*
* Allocate and initialise the tty port that goes with this interface
* and add it to the tty layer so that it can be opened.
*/
static int ifx_spi_create_port(struct ifx_spi_device *ifx_dev)
{
int ret = 0;
struct tty_port *pport = &ifx_dev->tty_port;
spin_lock_init(&ifx_dev->fifo_lock);
lockdep_set_class_and_subclass(&ifx_dev->fifo_lock,
&ifx_spi_key, 0);
if (kfifo_alloc(&ifx_dev->tx_fifo, IFX_SPI_FIFO_SIZE, GFP_KERNEL)) {
ret = -ENOMEM;
goto error_ret;
}
tty_port_init(pport);
pport->ops = &ifx_tty_port_ops;
ifx_dev->minor = IFX_SPI_TTY_ID;
ifx_dev->tty_dev = tty_register_device(tty_drv, ifx_dev->minor,
&ifx_dev->spi_dev->dev);
if (IS_ERR(ifx_dev->tty_dev)) {
dev_dbg(&ifx_dev->spi_dev->dev,
"%s: registering tty device failed", __func__);
ret = PTR_ERR(ifx_dev->tty_dev);
goto error_ret;
}
return 0;
error_ret:
ifx_spi_free_port(ifx_dev);
return ret;
}
/**
* ifx_spi_handle_srdy - handle SRDY
* @ifx_dev: device asserting SRDY
*
* Check our device state and see what we need to kick off when SRDY
* is asserted. This usually means killing the timer and firing off the
* I/O processing.
*/
static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev)
{
if (test_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags)) {
del_timer_sync(&ifx_dev->spi_timer);
clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
}
ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_SRDY);
if (!test_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags))
tasklet_schedule(&ifx_dev->io_work_tasklet);
else
set_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags);
}
/**
* ifx_spi_srdy_interrupt - SRDY asserted
* @irq: our IRQ number
* @dev: our ifx device
*
* The modem asserted SRDY. Handle the srdy event
*/
static irqreturn_t ifx_spi_srdy_interrupt(int irq, void *dev)
{
struct ifx_spi_device *ifx_dev = dev;
ifx_dev->gpio.unack_srdy_int_nb++;
ifx_spi_handle_srdy(ifx_dev);
return IRQ_HANDLED;
}
/**
* ifx_spi_reset_interrupt - Modem has changed reset state
* @irq: interrupt number
* @dev: our device pointer
*
* The modem has either entered or left reset state. Check the GPIO
* line to see which.
*
* FIXME: review locking on MR_INPROGRESS versus
* parallel unsolicited reset/solicited reset
*/
static irqreturn_t ifx_spi_reset_interrupt(int irq, void *dev)
{
struct ifx_spi_device *ifx_dev = dev;
int val = gpio_get_value(ifx_dev->gpio.reset_out);
int solreset = test_bit(MR_START, &ifx_dev->mdm_reset_state);
if (val == 0) {
/* entered reset */
set_bit(MR_INPROGRESS, &ifx_dev->mdm_reset_state);
if (!solreset) {
/* unsolicited reset */
ifx_spi_ttyhangup(ifx_dev);
}
} else {
/* exited reset */
clear_bit(MR_INPROGRESS, &ifx_dev->mdm_reset_state);
if (solreset) {
set_bit(MR_COMPLETE, &ifx_dev->mdm_reset_state);
wake_up(&ifx_dev->mdm_reset_wait);
}
}
return IRQ_HANDLED;
}
/**
* ifx_spi_free_device - free device
* @ifx_dev: device to free
*
* Free the IFX device
*/
static void ifx_spi_free_device(struct ifx_spi_device *ifx_dev)
{
ifx_spi_free_port(ifx_dev);
dma_free_coherent(&ifx_dev->spi_dev->dev,
IFX_SPI_TRANSFER_SIZE,
ifx_dev->tx_buffer,
ifx_dev->tx_bus);
dma_free_coherent(&ifx_dev->spi_dev->dev,
IFX_SPI_TRANSFER_SIZE,
ifx_dev->rx_buffer,
ifx_dev->rx_bus);
}
/**
* ifx_spi_reset - reset modem
* @ifx_dev: modem to reset
*
* Perform a reset on the modem
*/
static int ifx_spi_reset(struct ifx_spi_device *ifx_dev)
{
int ret;
/*
* set up modem power, reset
*
* delays are required on some platforms for the modem
* to reset properly
*/
set_bit(MR_START, &ifx_dev->mdm_reset_state);
gpio_set_value(ifx_dev->gpio.po, 0);
gpio_set_value(ifx_dev->gpio.reset, 0);
msleep(25);
gpio_set_value(ifx_dev->gpio.reset, 1);
msleep(1);
gpio_set_value(ifx_dev->gpio.po, 1);
msleep(1);
gpio_set_value(ifx_dev->gpio.po, 0);
ret = wait_event_timeout(ifx_dev->mdm_reset_wait,
test_bit(MR_COMPLETE,
&ifx_dev->mdm_reset_state),
IFX_RESET_TIMEOUT);
if (!ret)
dev_warn(&ifx_dev->spi_dev->dev, "Modem reset timeout: (state:%lx)",
ifx_dev->mdm_reset_state);
ifx_dev->mdm_reset_state = 0;
return ret;
}
/**
* ifx_spi_spi_probe - probe callback
* @spi: our possible matching SPI device
*
* Probe for a 6x60 modem on SPI bus. Perform any needed device and
* GPIO setup.
*
* FIXME:
* - Support for multiple devices
* - Split out MID specific GPIO handling eventually
*/
static int ifx_spi_spi_probe(struct spi_device *spi)
{
int ret;
int srdy;
struct ifx_modem_platform_data *pl_data;
struct ifx_spi_device *ifx_dev;
if (saved_ifx_dev) {
dev_dbg(&spi->dev, "ignoring subsequent detection");
return -ENODEV;
}
pl_data = (struct ifx_modem_platform_data *)spi->dev.platform_data;
if (!pl_data) {
dev_err(&spi->dev, "missing platform data!");
return -ENODEV;
}
/* initialize structure to hold our device variables */
ifx_dev = kzalloc(sizeof(struct ifx_spi_device), GFP_KERNEL);
if (!ifx_dev) {
dev_err(&spi->dev, "spi device allocation failed");
return -ENOMEM;
}
saved_ifx_dev = ifx_dev;
ifx_dev->spi_dev = spi;
clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags);
spin_lock_init(&ifx_dev->write_lock);
spin_lock_init(&ifx_dev->power_lock);
ifx_dev->power_status = 0;
init_timer(&ifx_dev->spi_timer);
ifx_dev->spi_timer.function = ifx_spi_timeout;
ifx_dev->spi_timer.data = (unsigned long)ifx_dev;
ifx_dev->modem = pl_data->modem_type;
ifx_dev->use_dma = pl_data->use_dma;
ifx_dev->max_hz = pl_data->max_hz;
/* initialize spi mode, etc */
spi->max_speed_hz = ifx_dev->max_hz;
spi->mode = IFX_SPI_MODE | (SPI_LOOP & spi->mode);
spi->bits_per_word = spi_bpw;
ret = spi_setup(spi);
if (ret) {
dev_err(&spi->dev, "SPI setup wasn't successful %d", ret);
return -ENODEV;
}
/* ensure SPI protocol flags are initialized to enable transfer */
ifx_dev->spi_more = 0;
ifx_dev->spi_slave_cts = 0;
/*initialize transfer and dma buffers */
ifx_dev->tx_buffer = dma_alloc_coherent(ifx_dev->spi_dev->dev.parent,
IFX_SPI_TRANSFER_SIZE,
&ifx_dev->tx_bus,
GFP_KERNEL);
if (!ifx_dev->tx_buffer) {
dev_err(&spi->dev, "DMA-TX buffer allocation failed");
ret = -ENOMEM;
goto error_ret;
}
ifx_dev->rx_buffer = dma_alloc_coherent(ifx_dev->spi_dev->dev.parent,
IFX_SPI_TRANSFER_SIZE,
&ifx_dev->rx_bus,
GFP_KERNEL);
if (!ifx_dev->rx_buffer) {
dev_err(&spi->dev, "DMA-RX buffer allocation failed");
ret = -ENOMEM;
goto error_ret;
}
/* initialize waitq for modem reset */
init_waitqueue_head(&ifx_dev->mdm_reset_wait);
spi_set_drvdata(spi, ifx_dev);
tasklet_init(&ifx_dev->io_work_tasklet, ifx_spi_io,
(unsigned long)ifx_dev);
set_bit(IFX_SPI_STATE_PRESENT, &ifx_dev->flags);
/* create our tty port */
ret = ifx_spi_create_port(ifx_dev);
if (ret != 0) {
dev_err(&spi->dev, "create default tty port failed");
goto error_ret;
}
ifx_dev->gpio.reset = pl_data->rst_pmu;
ifx_dev->gpio.po = pl_data->pwr_on;
ifx_dev->gpio.mrdy = pl_data->mrdy;
ifx_dev->gpio.srdy = pl_data->srdy;
ifx_dev->gpio.reset_out = pl_data->rst_out;
dev_info(&spi->dev, "gpios %d, %d, %d, %d, %d",
ifx_dev->gpio.reset, ifx_dev->gpio.po, ifx_dev->gpio.mrdy,
ifx_dev->gpio.srdy, ifx_dev->gpio.reset_out);
/* Configure gpios */
ret = gpio_request(ifx_dev->gpio.reset, "ifxModem");
if (ret < 0) {
dev_err(&spi->dev, "Unable to allocate GPIO%d (RESET)",
ifx_dev->gpio.reset);
goto error_ret;
}
ret += gpio_direction_output(ifx_dev->gpio.reset, 0);
ret += gpio_export(ifx_dev->gpio.reset, 1);
if (ret) {
dev_err(&spi->dev, "Unable to configure GPIO%d (RESET)",
ifx_dev->gpio.reset);
ret = -EBUSY;
goto error_ret2;
}
ret = gpio_request(ifx_dev->gpio.po, "ifxModem");
ret += gpio_direction_output(ifx_dev->gpio.po, 0);
ret += gpio_export(ifx_dev->gpio.po, 1);
if (ret) {
dev_err(&spi->dev, "Unable to configure GPIO%d (ON)",
ifx_dev->gpio.po);
ret = -EBUSY;
goto error_ret3;
}
ret = gpio_request(ifx_dev->gpio.mrdy, "ifxModem");
if (ret < 0) {
dev_err(&spi->dev, "Unable to allocate GPIO%d (MRDY)",
ifx_dev->gpio.mrdy);
goto error_ret3;
}
ret += gpio_export(ifx_dev->gpio.mrdy, 1);
ret += gpio_direction_output(ifx_dev->gpio.mrdy, 0);
if (ret) {
dev_err(&spi->dev, "Unable to configure GPIO%d (MRDY)",
ifx_dev->gpio.mrdy);
ret = -EBUSY;
goto error_ret4;
}
ret = gpio_request(ifx_dev->gpio.srdy, "ifxModem");
if (ret < 0) {
dev_err(&spi->dev, "Unable to allocate GPIO%d (SRDY)",
ifx_dev->gpio.srdy);
ret = -EBUSY;
goto error_ret4;
}
ret += gpio_export(ifx_dev->gpio.srdy, 1);
ret += gpio_direction_input(ifx_dev->gpio.srdy);
if (ret) {
dev_err(&spi->dev, "Unable to configure GPIO%d (SRDY)",
ifx_dev->gpio.srdy);
ret = -EBUSY;
goto error_ret5;
}
ret = gpio_request(ifx_dev->gpio.reset_out, "ifxModem");
if (ret < 0) {
dev_err(&spi->dev, "Unable to allocate GPIO%d (RESET_OUT)",
ifx_dev->gpio.reset_out);
goto error_ret5;
}
ret += gpio_export(ifx_dev->gpio.reset_out, 1);
ret += gpio_direction_input(ifx_dev->gpio.reset_out);
if (ret) {
dev_err(&spi->dev, "Unable to configure GPIO%d (RESET_OUT)",
ifx_dev->gpio.reset_out);
ret = -EBUSY;
goto error_ret6;
}
ret = request_irq(gpio_to_irq(ifx_dev->gpio.reset_out),
ifx_spi_reset_interrupt,
IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING, DRVNAME,
(void *)ifx_dev);
if (ret) {
dev_err(&spi->dev, "Unable to get irq %x\n",
gpio_to_irq(ifx_dev->gpio.reset_out));
goto error_ret6;
}
ret = ifx_spi_reset(ifx_dev);
ret = request_irq(gpio_to_irq(ifx_dev->gpio.srdy),
ifx_spi_srdy_interrupt,
IRQF_TRIGGER_RISING, DRVNAME,
(void *)ifx_dev);
if (ret) {
dev_err(&spi->dev, "Unable to get irq %x",
gpio_to_irq(ifx_dev->gpio.srdy));
goto error_ret7;
}
/* set pm runtime power state and register with power system */
pm_runtime_set_active(&spi->dev);
pm_runtime_enable(&spi->dev);
/* handle case that modem is already signaling SRDY */
/* no outgoing tty open at this point, this just satisfies the
* modem's read and should reset communication properly
*/
srdy = gpio_get_value(ifx_dev->gpio.srdy);
if (srdy) {
mrdy_assert(ifx_dev);
ifx_spi_handle_srdy(ifx_dev);
} else
mrdy_set_low(ifx_dev);
return 0;
error_ret7:
free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), (void *)ifx_dev);
error_ret6:
gpio_free(ifx_dev->gpio.srdy);
error_ret5:
gpio_free(ifx_dev->gpio.mrdy);
error_ret4:
gpio_free(ifx_dev->gpio.reset);
error_ret3:
gpio_free(ifx_dev->gpio.po);
error_ret2:
gpio_free(ifx_dev->gpio.reset_out);
error_ret:
ifx_spi_free_device(ifx_dev);
saved_ifx_dev = NULL;
return ret;
}
/**
* ifx_spi_spi_remove - SPI device was removed
* @spi: SPI device
*
* FIXME: We should be shutting the device down here not in
* the module unload path.
*/
static int ifx_spi_spi_remove(struct spi_device *spi)
{
struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
/* stop activity */
tasklet_kill(&ifx_dev->io_work_tasklet);
/* free irq */
free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), (void *)ifx_dev);
free_irq(gpio_to_irq(ifx_dev->gpio.srdy), (void *)ifx_dev);
gpio_free(ifx_dev->gpio.srdy);
gpio_free(ifx_dev->gpio.mrdy);
gpio_free(ifx_dev->gpio.reset);
gpio_free(ifx_dev->gpio.po);
gpio_free(ifx_dev->gpio.reset_out);
/* free allocations */
ifx_spi_free_device(ifx_dev);
saved_ifx_dev = NULL;
return 0;
}
/**
* ifx_spi_spi_shutdown - called on SPI shutdown
* @spi: SPI device
*
* No action needs to be taken here
*/
static void ifx_spi_spi_shutdown(struct spi_device *spi)
{
}
/*
* various suspends and resumes have nothing to do
* no hardware to save state for
*/
/**
* ifx_spi_spi_suspend - suspend SPI on system suspend
* @dev: device being suspended
*
* Suspend the SPI side. No action needed on Intel MID platforms, may
* need extending for other systems.
*/
static int ifx_spi_spi_suspend(struct spi_device *spi, pm_message_t msg)
{
return 0;
}
/**
* ifx_spi_spi_resume - resume SPI side on system resume
* @dev: device being suspended
*
* Suspend the SPI side. No action needed on Intel MID platforms, may
* need extending for other systems.
*/
static int ifx_spi_spi_resume(struct spi_device *spi)
{
return 0;
}
/**
* ifx_spi_pm_suspend - suspend modem on system suspend
* @dev: device being suspended
*
* Suspend the modem. No action needed on Intel MID platforms, may
* need extending for other systems.
*/
static int ifx_spi_pm_suspend(struct device *dev)
{
return 0;
}
/**
* ifx_spi_pm_resume - resume modem on system resume
* @dev: device being suspended
*
* Allow the modem to resume. No action needed.
*
* FIXME: do we need to reset anything here ?
*/
static int ifx_spi_pm_resume(struct device *dev)
{
return 0;
}
/**
* ifx_spi_pm_runtime_resume - suspend modem
* @dev: device being suspended
*
* Allow the modem to resume. No action needed.
*/
static int ifx_spi_pm_runtime_resume(struct device *dev)
{
return 0;
}
/**
* ifx_spi_pm_runtime_suspend - suspend modem
* @dev: device being suspended
*
* Allow the modem to suspend and thus suspend to continue up the
* device tree.
*/
static int ifx_spi_pm_runtime_suspend(struct device *dev)
{
return 0;
}
/**
* ifx_spi_pm_runtime_idle - check if modem idle
* @dev: our device
*
* Check conditions and queue runtime suspend if idle.
*/
static int ifx_spi_pm_runtime_idle(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
if (!ifx_dev->power_status)
pm_runtime_suspend(dev);
return 0;
}
static const struct dev_pm_ops ifx_spi_pm = {
.resume = ifx_spi_pm_resume,
.suspend = ifx_spi_pm_suspend,
.runtime_resume = ifx_spi_pm_runtime_resume,
.runtime_suspend = ifx_spi_pm_runtime_suspend,
.runtime_idle = ifx_spi_pm_runtime_idle
};
static const struct spi_device_id ifx_id_table[] = {
{"ifx6160", 0},
{"ifx6260", 0},
{ }
};
MODULE_DEVICE_TABLE(spi, ifx_id_table);
/* spi operations */
static const struct spi_driver ifx_spi_driver = {
.driver = {
.name = DRVNAME,
.pm = &ifx_spi_pm,
.owner = THIS_MODULE},
.probe = ifx_spi_spi_probe,
.shutdown = ifx_spi_spi_shutdown,
.remove = __devexit_p(ifx_spi_spi_remove),
.suspend = ifx_spi_spi_suspend,
.resume = ifx_spi_spi_resume,
.id_table = ifx_id_table
};
/**
* ifx_spi_exit - module exit
*
* Unload the module.
*/
static void __exit ifx_spi_exit(void)
{
/* unregister */
tty_unregister_driver(tty_drv);
spi_unregister_driver((void *)&ifx_spi_driver);
}
/**
* ifx_spi_init - module entry point
*
* Initialise the SPI and tty interfaces for the IFX SPI driver
* We need to initialize upper-edge spi driver after the tty
* driver because otherwise the spi probe will race
*/
static int __init ifx_spi_init(void)
{
int result;
tty_drv = alloc_tty_driver(1);
if (!tty_drv) {
pr_err("%s: alloc_tty_driver failed", DRVNAME);
return -ENOMEM;
}
tty_drv->driver_name = DRVNAME;
tty_drv->name = TTYNAME;
tty_drv->minor_start = IFX_SPI_TTY_ID;
tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
tty_drv->subtype = SERIAL_TYPE_NORMAL;
tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
tty_drv->init_termios = tty_std_termios;
tty_set_operations(tty_drv, &ifx_spi_serial_ops);
result = tty_register_driver(tty_drv);
if (result) {
pr_err("%s: tty_register_driver failed(%d)",
DRVNAME, result);
put_tty_driver(tty_drv);
return result;
}
result = spi_register_driver((void *)&ifx_spi_driver);
if (result) {
pr_err("%s: spi_register_driver failed(%d)",
DRVNAME, result);
tty_unregister_driver(tty_drv);
}
return result;
}
module_init(ifx_spi_init);
module_exit(ifx_spi_exit);
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("IFX6x60 spi driver");
MODULE_LICENSE("GPL");
MODULE_INFO(Version, "0.1-IFX6x60");
| gpl-2.0 |
Blechd0se/kernel-moto-g | drivers/s390/kvm/kvm_virtio.c | 1519 | 11880 | /*
* kvm_virtio.c - virtio for kvm on s390
*
* Copyright IBM Corp. 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*/
#include <linux/kernel_stat.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/err.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/slab.h>
#include <linux/virtio_console.h>
#include <linux/interrupt.h>
#include <linux/virtio_ring.h>
#include <linux/export.h>
#include <linux/pfn.h>
#include <asm/io.h>
#include <asm/kvm_para.h>
#include <asm/kvm_virtio.h>
#include <asm/setup.h>
#include <asm/irq.h>
#define VIRTIO_SUBCODE_64 0x0D00
/*
* The pointer to our (page) of device descriptions.
*/
static void *kvm_devices;
static struct work_struct hotplug_work;
struct kvm_device {
struct virtio_device vdev;
struct kvm_device_desc *desc;
};
#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
/*
* memory layout:
* - kvm_device_descriptor
* struct kvm_device_desc
* - configuration
* struct kvm_vqconfig
* - feature bits
* - config space
*/
static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
{
return (struct kvm_vqconfig *)(desc + 1);
}
static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
{
return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
}
static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
{
return kvm_vq_features(desc) + desc->feature_len * 2;
}
/*
* The total size of the config page used by this device (incl. desc)
*/
static unsigned desc_size(const struct kvm_device_desc *desc)
{
return sizeof(*desc)
+ desc->num_vq * sizeof(struct kvm_vqconfig)
+ desc->feature_len * 2
+ desc->config_len;
}
/* This gets the device's feature bits. */
static u32 kvm_get_features(struct virtio_device *vdev)
{
unsigned int i;
u32 features = 0;
struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
u8 *in_features = kvm_vq_features(desc);
for (i = 0; i < min(desc->feature_len * 8, 32); i++)
if (in_features[i / 8] & (1 << (i % 8)))
features |= (1 << i);
return features;
}
static void kvm_finalize_features(struct virtio_device *vdev)
{
unsigned int i, bits;
struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
/* Second half of bitmap is features we accept. */
u8 *out_features = kvm_vq_features(desc) + desc->feature_len;
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
memset(out_features, 0, desc->feature_len);
bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
for (i = 0; i < bits; i++) {
if (test_bit(i, vdev->features))
out_features[i / 8] |= (1 << (i % 8));
}
}
/*
* Reading and writing elements in config space
*/
static void kvm_get(struct virtio_device *vdev, unsigned int offset,
void *buf, unsigned len)
{
struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
BUG_ON(offset + len > desc->config_len);
memcpy(buf, kvm_vq_configspace(desc) + offset, len);
}
static void kvm_set(struct virtio_device *vdev, unsigned int offset,
const void *buf, unsigned len)
{
struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
BUG_ON(offset + len > desc->config_len);
memcpy(kvm_vq_configspace(desc) + offset, buf, len);
}
/*
* The operations to get and set the status word just access
* the status field of the device descriptor. set_status will also
* make a hypercall to the host, to tell about status changes
*/
static u8 kvm_get_status(struct virtio_device *vdev)
{
return to_kvmdev(vdev)->desc->status;
}
static void kvm_set_status(struct virtio_device *vdev, u8 status)
{
BUG_ON(!status);
to_kvmdev(vdev)->desc->status = status;
kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
(unsigned long) to_kvmdev(vdev)->desc);
}
/*
* To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
* descriptor address. The Host will zero the status and all the
* features.
*/
static void kvm_reset(struct virtio_device *vdev)
{
kvm_hypercall1(KVM_S390_VIRTIO_RESET,
(unsigned long) to_kvmdev(vdev)->desc);
}
/*
* When the virtio_ring code wants to notify the Host, it calls us here and we
* make a hypercall. We hand the address of the virtqueue so the Host
* knows which virtqueue we're talking about.
*/
static void kvm_notify(struct virtqueue *vq)
{
struct kvm_vqconfig *config = vq->priv;
kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
}
/*
* This routine finds the first virtqueue described in the configuration of
* this device and sets it up.
*/
static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
unsigned index,
void (*callback)(struct virtqueue *vq),
const char *name)
{
struct kvm_device *kdev = to_kvmdev(vdev);
struct kvm_vqconfig *config;
struct virtqueue *vq;
int err;
if (index >= kdev->desc->num_vq)
return ERR_PTR(-ENOENT);
config = kvm_vq_config(kdev->desc)+index;
err = vmem_add_mapping(config->address,
vring_size(config->num,
KVM_S390_VIRTIO_RING_ALIGN));
if (err)
goto out;
vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
vdev, true, (void *) config->address,
kvm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto unmap;
}
/*
* register a callback token
* The host will sent this via the external interrupt parameter
*/
config->token = (u64) vq;
vq->priv = config;
return vq;
unmap:
vmem_remove_mapping(config->address,
vring_size(config->num,
KVM_S390_VIRTIO_RING_ALIGN));
out:
return ERR_PTR(err);
}
static void kvm_del_vq(struct virtqueue *vq)
{
struct kvm_vqconfig *config = vq->priv;
vring_del_virtqueue(vq);
vmem_remove_mapping(config->address,
vring_size(config->num,
KVM_S390_VIRTIO_RING_ALIGN));
}
static void kvm_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
kvm_del_vq(vq);
}
static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char *names[])
{
struct kvm_device *kdev = to_kvmdev(vdev);
int i;
/* We must have this many virtqueues. */
if (nvqs > kdev->desc->num_vq)
return -ENOENT;
for (i = 0; i < nvqs; ++i) {
vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]);
if (IS_ERR(vqs[i]))
goto error;
}
return 0;
error:
kvm_del_vqs(vdev);
return PTR_ERR(vqs[i]);
}
static const char *kvm_bus_name(struct virtio_device *vdev)
{
return "";
}
/*
* The config ops structure as defined by virtio config
*/
static struct virtio_config_ops kvm_vq_configspace_ops = {
.get_features = kvm_get_features,
.finalize_features = kvm_finalize_features,
.get = kvm_get,
.set = kvm_set,
.get_status = kvm_get_status,
.set_status = kvm_set_status,
.reset = kvm_reset,
.find_vqs = kvm_find_vqs,
.del_vqs = kvm_del_vqs,
.bus_name = kvm_bus_name,
};
/*
* The root device for the kvm virtio devices.
* This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
*/
static struct device *kvm_root;
/*
* adds a new device and register it with virtio
* appropriate drivers are loaded by the device model
*/
static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
{
struct kvm_device *kdev;
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
if (!kdev) {
printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n",
offset, d->type);
return;
}
kdev->vdev.dev.parent = kvm_root;
kdev->vdev.id.device = d->type;
kdev->vdev.config = &kvm_vq_configspace_ops;
kdev->desc = d;
if (register_virtio_device(&kdev->vdev) != 0) {
printk(KERN_ERR "Failed to register kvm device %u type %u\n",
offset, d->type);
kfree(kdev);
}
}
/*
* scan_devices() simply iterates through the device page.
* The type 0 is reserved to mean "end of devices".
*/
static void scan_devices(void)
{
unsigned int i;
struct kvm_device_desc *d;
for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
d = kvm_devices + i;
if (d->type == 0)
break;
add_kvm_device(d, i);
}
}
/*
* match for a kvm device with a specific desc pointer
*/
static int match_desc(struct device *dev, void *data)
{
struct virtio_device *vdev = dev_to_virtio(dev);
struct kvm_device *kdev = to_kvmdev(vdev);
return kdev->desc == data;
}
/*
* hotplug_device tries to find changes in the device page.
*/
static void hotplug_devices(struct work_struct *dummy)
{
unsigned int i;
struct kvm_device_desc *d;
struct device *dev;
for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
d = kvm_devices + i;
/* end of list */
if (d->type == 0)
break;
/* device already exists */
dev = device_find_child(kvm_root, d, match_desc);
if (dev) {
/* XXX check for hotplug remove */
put_device(dev);
continue;
}
/* new device */
printk(KERN_INFO "Adding new virtio device %p\n", d);
add_kvm_device(d, i);
}
}
/*
* we emulate the request_irq behaviour on top of s390 extints
*/
static void kvm_extint_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct virtqueue *vq;
u32 param;
if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
/* The LSB might be overloaded, we have to mask it */
vq = (struct virtqueue *)(param64 & ~1UL);
/* We use ext_params to decide what this interrupt means */
param = param32 & VIRTIO_PARAM_MASK;
switch (param) {
case VIRTIO_PARAM_CONFIG_CHANGED:
{
struct virtio_driver *drv;
drv = container_of(vq->vdev->dev.driver,
struct virtio_driver, driver);
if (drv->config_changed)
drv->config_changed(vq->vdev);
break;
}
case VIRTIO_PARAM_DEV_ADD:
schedule_work(&hotplug_work);
break;
case VIRTIO_PARAM_VRING_INTERRUPT:
default:
vring_interrupt(0, vq);
break;
}
}
/*
* For s390-virtio, we expect a page above main storage containing
* the virtio configuration. Try to actually load from this area
* in order to figure out if the host provides this page.
*/
static int __init test_devices_support(unsigned long addr)
{
int ret = -EIO;
asm volatile(
"0: lura 0,%1\n"
"1: xgr %0,%0\n"
"2:\n"
EX_TABLE(0b,2b)
EX_TABLE(1b,2b)
: "+d" (ret)
: "a" (addr)
: "0", "cc");
return ret;
}
/*
* Init function for virtio
* devices are in a single page above top of "normal" mem
*/
static int __init kvm_devices_init(void)
{
int rc;
if (!MACHINE_IS_KVM)
return -ENODEV;
if (test_devices_support(real_memory_size) < 0)
return -ENODEV;
rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
if (rc)
return rc;
kvm_devices = (void *) real_memory_size;
kvm_root = root_device_register("kvm_s390");
if (IS_ERR(kvm_root)) {
rc = PTR_ERR(kvm_root);
printk(KERN_ERR "Could not register kvm_s390 root device");
vmem_remove_mapping(real_memory_size, PAGE_SIZE);
return rc;
}
INIT_WORK(&hotplug_work, hotplug_devices);
service_subclass_irq_register();
register_external_interrupt(0x2603, kvm_extint_handler);
scan_devices();
return 0;
}
/* code for early console output with virtio_console */
static __init int early_put_chars(u32 vtermno, const char *buf, int count)
{
char scratch[17];
unsigned int len = count;
if (len > sizeof(scratch) - 1)
len = sizeof(scratch) - 1;
scratch[len] = '\0';
memcpy(scratch, buf, len);
kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch));
return len;
}
static int __init s390_virtio_console_init(void)
{
if (!MACHINE_IS_KVM)
return -ENODEV;
return virtio_cons_early_init(early_put_chars);
}
console_initcall(s390_virtio_console_init);
/*
* We do this after core stuff, but before the drivers.
*/
postcore_initcall(kvm_devices_init);
| gpl-2.0 |
maikelwever/android_kernel_htc_msm8660-caf | drivers/pcmcia/pxa2xx_sharpsl.c | 1775 | 7860 | /*
* Sharp SL-C7xx Series PCMCIA routines
*
* Copyright (c) 2004-2005 Richard Purdie
*
* Based on Sharp's 2.4 kernel patches and pxa2xx_mainstone.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/hardware/scoop.h>
#include "soc_common.h"
#define NO_KEEP_VS 0x0001
#define SCOOP_DEV platform_scoop_config->devs
static void sharpsl_pcmcia_init_reset(struct soc_pcmcia_socket *skt)
{
struct scoop_pcmcia_dev *scoopdev = &SCOOP_DEV[skt->nr];
reset_scoop(scoopdev->dev);
/* Shared power controls need to be handled carefully */
if (platform_scoop_config->power_ctrl)
platform_scoop_config->power_ctrl(scoopdev->dev, 0x0000, skt->nr);
else
write_scoop_reg(scoopdev->dev, SCOOP_CPR, 0x0000);
scoopdev->keep_vs = NO_KEEP_VS;
scoopdev->keep_rd = 0;
}
static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret;
if (platform_scoop_config->pcmcia_init)
platform_scoop_config->pcmcia_init();
/* Register interrupts */
if (SCOOP_DEV[skt->nr].cd_irq >= 0) {
struct pcmcia_irqs cd_irq;
cd_irq.sock = skt->nr;
cd_irq.irq = SCOOP_DEV[skt->nr].cd_irq;
cd_irq.str = SCOOP_DEV[skt->nr].cd_irq_str;
ret = soc_pcmcia_request_irqs(skt, &cd_irq, 1);
if (ret) {
printk(KERN_ERR "Request for Compact Flash IRQ failed\n");
return ret;
}
}
skt->socket.pci_irq = SCOOP_DEV[skt->nr].irq;
return 0;
}
static void sharpsl_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
if (SCOOP_DEV[skt->nr].cd_irq >= 0) {
struct pcmcia_irqs cd_irq;
cd_irq.sock = skt->nr;
cd_irq.irq = SCOOP_DEV[skt->nr].cd_irq;
cd_irq.str = SCOOP_DEV[skt->nr].cd_irq_str;
soc_pcmcia_free_irqs(skt, &cd_irq, 1);
}
}
static void sharpsl_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
unsigned short cpr, csr;
struct device *scoop = SCOOP_DEV[skt->nr].dev;
cpr = read_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_CPR);
write_scoop_reg(scoop, SCOOP_IRM, 0x00FF);
write_scoop_reg(scoop, SCOOP_ISR, 0x0000);
write_scoop_reg(scoop, SCOOP_IRM, 0x0000);
csr = read_scoop_reg(scoop, SCOOP_CSR);
if (csr & 0x0004) {
/* card eject */
write_scoop_reg(scoop, SCOOP_CDR, 0x0000);
SCOOP_DEV[skt->nr].keep_vs = NO_KEEP_VS;
}
else if (!(SCOOP_DEV[skt->nr].keep_vs & NO_KEEP_VS)) {
/* keep vs1,vs2 */
write_scoop_reg(scoop, SCOOP_CDR, 0x0000);
csr |= SCOOP_DEV[skt->nr].keep_vs;
}
else if (cpr & 0x0003) {
/* power on */
write_scoop_reg(scoop, SCOOP_CDR, 0x0000);
SCOOP_DEV[skt->nr].keep_vs = (csr & 0x00C0);
}
else {
/* card detect */
if ((machine_is_spitz() || machine_is_borzoi()) && skt->nr == 1) {
write_scoop_reg(scoop, SCOOP_CDR, 0x0000);
} else {
write_scoop_reg(scoop, SCOOP_CDR, 0x0002);
}
}
state->detect = (csr & 0x0004) ? 0 : 1;
state->ready = (csr & 0x0002) ? 1 : 0;
state->bvd1 = (csr & 0x0010) ? 1 : 0;
state->bvd2 = (csr & 0x0020) ? 1 : 0;
state->wrprot = (csr & 0x0008) ? 1 : 0;
state->vs_3v = (csr & 0x0040) ? 0 : 1;
state->vs_Xv = (csr & 0x0080) ? 0 : 1;
if ((cpr & 0x0080) && ((cpr & 0x8040) != 0x8040)) {
printk(KERN_ERR "sharpsl_pcmcia_socket_state(): CPR=%04X, Low voltage!\n", cpr);
}
}
static int sharpsl_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
unsigned long flags;
struct device *scoop = SCOOP_DEV[skt->nr].dev;
unsigned short cpr, ncpr, ccr, nccr, mcr, nmcr, imr, nimr;
switch (state->Vcc) {
case 0: break;
case 33: break;
case 50: break;
default:
printk(KERN_ERR "sharpsl_pcmcia_configure_socket(): bad Vcc %u\n", state->Vcc);
return -1;
}
if ((state->Vpp!=state->Vcc) && (state->Vpp!=0)) {
printk(KERN_ERR "CF slot cannot support Vpp %u\n", state->Vpp);
return -1;
}
local_irq_save(flags);
nmcr = (mcr = read_scoop_reg(scoop, SCOOP_MCR)) & ~0x0010;
ncpr = (cpr = read_scoop_reg(scoop, SCOOP_CPR)) & ~0x0083;
nccr = (ccr = read_scoop_reg(scoop, SCOOP_CCR)) & ~0x0080;
nimr = (imr = read_scoop_reg(scoop, SCOOP_IMR)) & ~0x003E;
if ((machine_is_spitz() || machine_is_borzoi() || machine_is_akita()) && skt->nr == 0) {
ncpr |= (state->Vcc == 33) ? 0x0002 :
(state->Vcc == 50) ? 0x0002 : 0;
} else {
ncpr |= (state->Vcc == 33) ? 0x0001 :
(state->Vcc == 50) ? 0x0002 : 0;
}
nmcr |= (state->flags&SS_IOCARD) ? 0x0010 : 0;
ncpr |= (state->flags&SS_OUTPUT_ENA) ? 0x0080 : 0;
nccr |= (state->flags&SS_RESET)? 0x0080: 0;
nimr |= ((skt->status&SS_DETECT) ? 0x0004 : 0)|
((skt->status&SS_READY) ? 0x0002 : 0)|
((skt->status&SS_BATDEAD)? 0x0010 : 0)|
((skt->status&SS_BATWARN)? 0x0020 : 0)|
((skt->status&SS_STSCHG) ? 0x0010 : 0)|
((skt->status&SS_WRPROT) ? 0x0008 : 0);
if (!(ncpr & 0x0003)) {
SCOOP_DEV[skt->nr].keep_rd = 0;
} else if (!SCOOP_DEV[skt->nr].keep_rd) {
if (nccr & 0x0080)
SCOOP_DEV[skt->nr].keep_rd = 1;
else
nccr |= 0x0080;
}
if (mcr != nmcr)
write_scoop_reg(scoop, SCOOP_MCR, nmcr);
if (cpr != ncpr) {
if (platform_scoop_config->power_ctrl)
platform_scoop_config->power_ctrl(scoop, ncpr , skt->nr);
else
write_scoop_reg(scoop, SCOOP_CPR, ncpr);
}
if (ccr != nccr)
write_scoop_reg(scoop, SCOOP_CCR, nccr);
if (imr != nimr)
write_scoop_reg(scoop, SCOOP_IMR, nimr);
local_irq_restore(flags);
return 0;
}
static void sharpsl_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
{
sharpsl_pcmcia_init_reset(skt);
/* Enable interrupt */
write_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_IMR, 0x00C0);
write_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_MCR, 0x0101);
SCOOP_DEV[skt->nr].keep_vs = NO_KEEP_VS;
}
static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
{
sharpsl_pcmcia_init_reset(skt);
}
static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = sharpsl_pcmcia_hw_init,
.hw_shutdown = sharpsl_pcmcia_hw_shutdown,
.socket_state = sharpsl_pcmcia_socket_state,
.configure_socket = sharpsl_pcmcia_configure_socket,
.socket_init = sharpsl_pcmcia_socket_init,
.socket_suspend = sharpsl_pcmcia_socket_suspend,
.first = 0,
.nr = 0,
};
#ifdef CONFIG_SA1100_COLLIE
#include "sa11xx_base.h"
int __devinit pcmcia_collie_init(struct device *dev)
{
int ret = -ENODEV;
if (machine_is_collie())
ret = sa11xx_drv_pcmcia_probe(dev, &sharpsl_pcmcia_ops, 0, 1);
return ret;
}
#else
static struct platform_device *sharpsl_pcmcia_device;
static int __init sharpsl_pcmcia_init(void)
{
int ret;
if (!platform_scoop_config)
return -ENODEV;
sharpsl_pcmcia_ops.nr = platform_scoop_config->num_devs;
sharpsl_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!sharpsl_pcmcia_device)
return -ENOMEM;
ret = platform_device_add_data(sharpsl_pcmcia_device,
&sharpsl_pcmcia_ops, sizeof(sharpsl_pcmcia_ops));
if (ret == 0) {
sharpsl_pcmcia_device->dev.parent = platform_scoop_config->devs[0].dev;
ret = platform_device_add(sharpsl_pcmcia_device);
}
if (ret)
platform_device_put(sharpsl_pcmcia_device);
return ret;
}
static void __exit sharpsl_pcmcia_exit(void)
{
platform_device_unregister(sharpsl_pcmcia_device);
}
fs_initcall(sharpsl_pcmcia_init);
module_exit(sharpsl_pcmcia_exit);
#endif
MODULE_DESCRIPTION("Sharp SL Series PCMCIA Support");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-pcmcia");
| gpl-2.0 |
RealVNC/android-kernel-msm | arch/arm/mach-imx/devices/platform-mxc_nand.c | 2799 | 2511 | /*
* Copyright (C) 2009-2010 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
#include <asm/sizes.h>
#include "../hardware.h"
#include "devices-common.h"
#define imx_mxc_nand_data_entry_single(soc, _devid, _size) \
{ \
.devid = _devid, \
.iobase = soc ## _NFC_BASE_ADDR, \
.iosize = _size, \
.irq = soc ## _INT_NFC \
}
#define imx_mxc_nandv3_data_entry_single(soc, _devid, _size) \
{ \
.devid = _devid, \
.id = -1, \
.iobase = soc ## _NFC_BASE_ADDR, \
.iosize = _size, \
.axibase = soc ## _NFC_AXI_BASE_ADDR, \
.irq = soc ## _INT_NFC \
}
#ifdef CONFIG_SOC_IMX21
const struct imx_mxc_nand_data imx21_mxc_nand_data __initconst =
imx_mxc_nand_data_entry_single(MX21, "imx21-nand", SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX21 */
#ifdef CONFIG_SOC_IMX25
const struct imx_mxc_nand_data imx25_mxc_nand_data __initconst =
imx_mxc_nand_data_entry_single(MX25, "imx25-nand", SZ_8K);
#endif /* ifdef CONFIG_SOC_IMX25 */
#ifdef CONFIG_SOC_IMX27
const struct imx_mxc_nand_data imx27_mxc_nand_data __initconst =
imx_mxc_nand_data_entry_single(MX27, "imx27-nand", SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX27 */
#ifdef CONFIG_SOC_IMX31
const struct imx_mxc_nand_data imx31_mxc_nand_data __initconst =
imx_mxc_nand_data_entry_single(MX31, "imx27-nand", SZ_4K);
#endif
#ifdef CONFIG_SOC_IMX35
const struct imx_mxc_nand_data imx35_mxc_nand_data __initconst =
imx_mxc_nand_data_entry_single(MX35, "imx25-nand", SZ_8K);
#endif
#ifdef CONFIG_SOC_IMX51
const struct imx_mxc_nand_data imx51_mxc_nand_data __initconst =
imx_mxc_nandv3_data_entry_single(MX51, "imx51-nand", SZ_16K);
#endif
struct platform_device *__init imx_add_mxc_nand(
const struct imx_mxc_nand_data *data,
const struct mxc_nand_platform_data *pdata)
{
/* AXI has to come first, that's how the mxc_nand driver expect it */
struct resource res[] = {
{
.start = data->iobase,
.end = data->iobase + data->iosize - 1,
.flags = IORESOURCE_MEM,
}, {
.start = data->irq,
.end = data->irq,
.flags = IORESOURCE_IRQ,
}, {
.start = data->axibase,
.end = data->axibase + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
};
return imx_add_platform_device(data->devid, data->id,
res, ARRAY_SIZE(res) - !data->axibase,
pdata, sizeof(*pdata));
}
| gpl-2.0 |
coolshou/htc_k2u_kernel-3.4.10 | drivers/bcma/main.c | 3311 | 9131 | /*
* Broadcom specific AMBA
* Bus subsystem
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/module.h>
#include <linux/bcma/bcma.h>
#include <linux/slab.h>
MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
/* contains the number the next bus should get. */
static unsigned int bcma_bus_next_num = 0;
/* bcma_buses_mutex locks the bcma_bus_next_num */
static DEFINE_MUTEX(bcma_buses_mutex);
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
static int bcma_device_probe(struct device *dev);
static int bcma_device_remove(struct device *dev);
static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%03X\n", core->id.manuf);
}
static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%03X\n", core->id.id);
}
static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%02X\n", core->id.rev);
}
static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%X\n", core->id.class);
}
static struct device_attribute bcma_device_attrs[] = {
__ATTR_RO(manuf),
__ATTR_RO(id),
__ATTR_RO(rev),
__ATTR_RO(class),
__ATTR_NULL,
};
static struct bus_type bcma_bus_type = {
.name = "bcma",
.match = bcma_bus_match,
.probe = bcma_device_probe,
.remove = bcma_device_remove,
.uevent = bcma_device_uevent,
.dev_attrs = bcma_device_attrs,
};
struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
{
struct bcma_device *core;
list_for_each_entry(core, &bus->cores, list) {
if (core->id.id == coreid)
return core;
}
return NULL;
}
EXPORT_SYMBOL_GPL(bcma_find_core);
static void bcma_release_core_dev(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
if (core->io_addr)
iounmap(core->io_addr);
if (core->io_wrap)
iounmap(core->io_wrap);
kfree(core);
}
static int bcma_register_cores(struct bcma_bus *bus)
{
struct bcma_device *core;
int err, dev_id = 0;
list_for_each_entry(core, &bus->cores, list) {
/* We support that cores ourself */
switch (core->id.id) {
case BCMA_CORE_CHIPCOMMON:
case BCMA_CORE_PCI:
case BCMA_CORE_PCIE:
case BCMA_CORE_MIPS_74K:
continue;
}
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id);
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
core->dev.parent = &bus->host_pci->dev;
core->dma_dev = &bus->host_pci->dev;
core->irq = bus->host_pci->irq;
break;
case BCMA_HOSTTYPE_SOC:
core->dev.dma_mask = &core->dev.coherent_dma_mask;
core->dma_dev = &core->dev;
break;
case BCMA_HOSTTYPE_SDIO:
break;
}
err = device_register(&core->dev);
if (err) {
pr_err("Could not register dev for core 0x%03X\n",
core->id.id);
continue;
}
core->dev_registered = true;
dev_id++;
}
return 0;
}
static void bcma_unregister_cores(struct bcma_bus *bus)
{
struct bcma_device *core;
list_for_each_entry(core, &bus->cores, list) {
if (core->dev_registered)
device_unregister(&core->dev);
}
}
int __devinit bcma_bus_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
mutex_lock(&bcma_buses_mutex);
bus->num = bcma_bus_next_num++;
mutex_unlock(&bcma_buses_mutex);
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
pr_err("Failed to scan: %d\n", err);
return -1;
}
/* Init CC core */
core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_init(&bus->drv_cc);
}
/* Init MIPS core */
core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
if (core) {
bus->drv_mips.core = core;
bcma_core_mips_init(&bus->drv_mips);
}
/* Init PCIE core */
core = bcma_find_core(bus, BCMA_CORE_PCIE);
if (core) {
bus->drv_pci.core = core;
bcma_core_pci_init(&bus->drv_pci);
}
/* Try to get SPROM */
err = bcma_sprom_get(bus);
if (err == -ENOENT) {
pr_err("No SPROM available\n");
} else if (err)
pr_err("Failed to get SPROM: %d\n", err);
/* Register found cores */
bcma_register_cores(bus);
pr_info("Bus registered\n");
return 0;
}
void bcma_bus_unregister(struct bcma_bus *bus)
{
bcma_unregister_cores(bus);
}
int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
struct bcma_device *core_mips)
{
int err;
struct bcma_device *core;
struct bcma_device_id match;
bcma_init_bus(bus);
match.manuf = BCMA_MANUF_BCM;
match.id = BCMA_CORE_CHIPCOMMON;
match.class = BCMA_CL_SIM;
match.rev = BCMA_ANY_REV;
/* Scan for chip common core */
err = bcma_bus_scan_early(bus, &match, core_cc);
if (err) {
pr_err("Failed to scan for common core: %d\n", err);
return -1;
}
match.manuf = BCMA_MANUF_MIPS;
match.id = BCMA_CORE_MIPS_74K;
match.class = BCMA_CL_SIM;
match.rev = BCMA_ANY_REV;
/* Scan for mips core */
err = bcma_bus_scan_early(bus, &match, core_mips);
if (err) {
pr_err("Failed to scan for mips core: %d\n", err);
return -1;
}
/* Init CC core */
core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_init(&bus->drv_cc);
}
/* Init MIPS core */
core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
if (core) {
bus->drv_mips.core = core;
bcma_core_mips_init(&bus->drv_mips);
}
pr_info("Early bus registered\n");
return 0;
}
#ifdef CONFIG_PM
int bcma_bus_suspend(struct bcma_bus *bus)
{
struct bcma_device *core;
list_for_each_entry(core, &bus->cores, list) {
struct device_driver *drv = core->dev.driver;
if (drv) {
struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
if (adrv->suspend)
adrv->suspend(core);
}
}
return 0;
}
int bcma_bus_resume(struct bcma_bus *bus)
{
struct bcma_device *core;
/* Init CC core */
core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
if (core) {
bus->drv_cc.setup_done = false;
bcma_core_chipcommon_init(&bus->drv_cc);
}
list_for_each_entry(core, &bus->cores, list) {
struct device_driver *drv = core->dev.driver;
if (drv) {
struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
if (adrv->resume)
adrv->resume(core);
}
}
return 0;
}
#endif
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
drv->drv.name = drv->name;
drv->drv.bus = &bcma_bus_type;
drv->drv.owner = owner;
return driver_register(&drv->drv);
}
EXPORT_SYMBOL_GPL(__bcma_driver_register);
void bcma_driver_unregister(struct bcma_driver *drv)
{
driver_unregister(&drv->drv);
}
EXPORT_SYMBOL_GPL(bcma_driver_unregister);
static int bcma_bus_match(struct device *dev, struct device_driver *drv)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
const struct bcma_device_id *cid = &core->id;
const struct bcma_device_id *did;
for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
(did->id == cid->id || did->id == BCMA_ANY_ID) &&
(did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
(did->class == cid->class || did->class == BCMA_ANY_CLASS))
return 1;
}
return 0;
}
static int bcma_device_probe(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
drv);
int err = 0;
if (adrv->probe)
err = adrv->probe(core);
return err;
}
static int bcma_device_remove(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
drv);
if (adrv->remove)
adrv->remove(core);
return 0;
}
static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return add_uevent_var(env,
"MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
core->id.manuf, core->id.id,
core->id.rev, core->id.class);
}
static int __init bcma_modinit(void)
{
int err;
err = bus_register(&bcma_bus_type);
if (err)
return err;
#ifdef CONFIG_BCMA_HOST_PCI
err = bcma_host_pci_init();
if (err) {
pr_err("PCI host initialization failed\n");
err = 0;
}
#endif
return err;
}
fs_initcall(bcma_modinit);
static void __exit bcma_modexit(void)
{
#ifdef CONFIG_BCMA_HOST_PCI
bcma_host_pci_exit();
#endif
bus_unregister(&bcma_bus_type);
}
module_exit(bcma_modexit)
| gpl-2.0 |
squirrel20/linux-4.8.15 | arch/metag/mm/l2cache.c | 4335 | 3794 | #include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/l2cache.h>
#include <asm/metag_isa.h>
/* If non-0, then initialise the L2 cache */
static int l2cache_init = 1;
/* If non-0, then initialise the L2 cache prefetch */
static int l2cache_init_pf = 1;
int l2c_pfenable;
static volatile u32 l2c_testdata[16] __initdata __aligned(64);
static int __init parse_l2cache(char *p)
{
char *cp = p;
if (get_option(&cp, &l2cache_init) != 1) {
pr_err("Bad l2cache parameter (%s)\n", p);
return 1;
}
return 0;
}
early_param("l2cache", parse_l2cache);
static int __init parse_l2cache_pf(char *p)
{
char *cp = p;
if (get_option(&cp, &l2cache_init_pf) != 1) {
pr_err("Bad l2cache_pf parameter (%s)\n", p);
return 1;
}
return 0;
}
early_param("l2cache_pf", parse_l2cache_pf);
static int __init meta_l2c_setup(void)
{
/*
* If the L2 cache isn't even present, don't do anything, but say so in
* the log.
*/
if (!meta_l2c_is_present()) {
pr_info("L2 Cache: Not present\n");
return 0;
}
/*
* Check whether the line size is recognised.
*/
if (!meta_l2c_linesize()) {
pr_warn_once("L2 Cache: unknown line size id (config=0x%08x)\n",
meta_l2c_config());
}
/*
* Initialise state.
*/
l2c_pfenable = _meta_l2c_pf_is_enabled();
/*
* Enable the L2 cache and print to log whether it was already enabled
* by the bootloader.
*/
if (l2cache_init) {
pr_info("L2 Cache: Enabling... ");
if (meta_l2c_enable())
pr_cont("already enabled\n");
else
pr_cont("done\n");
} else {
pr_info("L2 Cache: Not enabling\n");
}
/*
* Enable L2 cache prefetch.
*/
if (l2cache_init_pf) {
pr_info("L2 Cache: Enabling prefetch... ");
if (meta_l2c_pf_enable(1))
pr_cont("already enabled\n");
else
pr_cont("done\n");
} else {
pr_info("L2 Cache: Not enabling prefetch\n");
}
return 0;
}
core_initcall(meta_l2c_setup);
int meta_l2c_disable(void)
{
unsigned long flags;
int en;
if (!meta_l2c_is_present())
return 1;
/*
* Prevent other threads writing during the writeback, otherwise the
* writes will get "lost" when the L2 is disabled.
*/
__global_lock2(flags);
en = meta_l2c_is_enabled();
if (likely(en)) {
_meta_l2c_pf_enable(0);
wr_fence();
_meta_l2c_purge();
_meta_l2c_enable(0);
}
__global_unlock2(flags);
return !en;
}
int meta_l2c_enable(void)
{
unsigned long flags;
int en;
if (!meta_l2c_is_present())
return 0;
/*
* Init (clearing the L2) can happen while the L2 is disabled, so other
* threads are safe to continue executing, however we must not init the
* cache if it's already enabled (dirty lines would be discarded), so
* this operation should still be atomic with other threads.
*/
__global_lock1(flags);
en = meta_l2c_is_enabled();
if (likely(!en)) {
_meta_l2c_init();
_meta_l2c_enable(1);
_meta_l2c_pf_enable(l2c_pfenable);
}
__global_unlock1(flags);
return en;
}
int meta_l2c_pf_enable(int pfenable)
{
unsigned long flags;
int en = l2c_pfenable;
if (!meta_l2c_is_present())
return 0;
/*
* We read modify write the enable register, so this operation must be
* atomic with other threads.
*/
__global_lock1(flags);
en = l2c_pfenable;
l2c_pfenable = pfenable;
if (meta_l2c_is_enabled())
_meta_l2c_pf_enable(pfenable);
__global_unlock1(flags);
return en;
}
int meta_l2c_flush(void)
{
unsigned long flags;
int en;
/*
* Prevent other threads writing during the writeback. This also
* involves read modify writes.
*/
__global_lock2(flags);
en = meta_l2c_is_enabled();
if (likely(en)) {
_meta_l2c_pf_enable(0);
wr_fence();
_meta_l2c_purge();
_meta_l2c_enable(0);
_meta_l2c_init();
_meta_l2c_enable(1);
_meta_l2c_pf_enable(l2c_pfenable);
}
__global_unlock2(flags);
return !en;
}
| gpl-2.0 |
aloksinha2001/rk3x_kernel_3.0.36 | drivers/pcmcia/sa1100_jornada720.c | 4335 | 2483 | /*
* drivers/pcmcia/sa1100_jornada720.c
*
* Jornada720 PCMCIA specific routines
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <mach/hardware.h>
#include <asm/hardware/sa1111.h>
#include <asm/mach-types.h>
#include "sa1111_generic.h"
/* Does SOCKET1_3V actually do anything? */
#define SOCKET0_POWER GPIO_GPIO0
#define SOCKET0_3V GPIO_GPIO2
#define SOCKET1_POWER (GPIO_GPIO1 | GPIO_GPIO3)
#define SOCKET1_3V GPIO_GPIO3
static int
jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
{
struct sa1111_pcmcia_socket *s = to_skt(skt);
unsigned int pa_dwr_mask, pa_dwr_set;
int ret;
printk(KERN_INFO "%s(): config socket %d vcc %d vpp %d\n", __func__,
skt->nr, state->Vcc, state->Vpp);
switch (skt->nr) {
case 0:
pa_dwr_mask = SOCKET0_POWER | SOCKET0_3V;
switch (state->Vcc) {
default:
case 0:
pa_dwr_set = 0;
break;
case 33:
pa_dwr_set = SOCKET0_POWER | SOCKET0_3V;
break;
case 50:
pa_dwr_set = SOCKET0_POWER;
break;
}
break;
case 1:
pa_dwr_mask = SOCKET1_POWER;
switch (state->Vcc) {
default:
case 0:
pa_dwr_set = 0;
break;
case 33:
pa_dwr_set = SOCKET1_POWER;
break;
case 50:
pa_dwr_set = SOCKET1_POWER;
break;
}
break;
default:
return -1;
}
if (state->Vpp != state->Vcc && state->Vpp != 0) {
printk(KERN_ERR "%s(): slot cannot support VPP %u\n",
__func__, state->Vpp);
return -EPERM;
}
ret = sa1111_pcmcia_configure_socket(skt, state);
if (ret == 0) {
unsigned long flags;
local_irq_save(flags);
sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set);
local_irq_restore(flags);
}
return ret;
}
static struct pcmcia_low_level jornada720_pcmcia_ops = {
.owner = THIS_MODULE,
.configure_socket = jornada720_pcmcia_configure_socket,
.socket_init = sa1111_pcmcia_socket_init,
.first = 0,
.nr = 2,
};
int __devinit pcmcia_jornada720_init(struct device *dev)
{
int ret = -ENODEV;
if (machine_is_jornada720()) {
unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
GRER |= 0x00000002;
/* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
sa1111_set_io_dir(dev, pin, 0, 0);
sa1111_set_io(dev, pin, 0);
sa1111_set_sleep_io(dev, pin, 0);
sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
ret = sa1111_pcmcia_add(dev, &jornada720_pcmcia_ops,
sa11xx_drv_pcmcia_add_one);
}
return ret;
}
| gpl-2.0 |
Altaf-Mahdi/android_kernel_msm | arch/arm/mach-omap1/mux.c | 4847 | 20876 | /*
* linux/arch/arm/mach-omap1/mux.c
*
* OMAP1 pin multiplexing configurations
*
* Copyright (C) 2003 - 2008 Nokia Corporation
*
* Written by Tony Lindgren
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <mach/hardware.h>
#include <plat/mux.h>
#ifdef CONFIG_OMAP_MUX
static struct omap_mux_cfg arch_mux_cfg;
#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
static struct pin_config __initdata_or_module omap7xx_pins[] = {
MUX_CFG_7XX("E2_7XX_KBR0", 12, 21, 0, 20, 1, 0)
MUX_CFG_7XX("J7_7XX_KBR1", 12, 25, 0, 24, 1, 0)
MUX_CFG_7XX("E1_7XX_KBR2", 12, 29, 0, 28, 1, 0)
MUX_CFG_7XX("F3_7XX_KBR3", 13, 1, 0, 0, 1, 0)
MUX_CFG_7XX("D2_7XX_KBR4", 13, 5, 0, 4, 1, 0)
MUX_CFG_7XX("C2_7XX_KBC0", 13, 9, 0, 8, 1, 0)
MUX_CFG_7XX("D3_7XX_KBC1", 13, 13, 0, 12, 1, 0)
MUX_CFG_7XX("E4_7XX_KBC2", 13, 17, 0, 16, 1, 0)
MUX_CFG_7XX("F4_7XX_KBC3", 13, 21, 0, 20, 1, 0)
MUX_CFG_7XX("E3_7XX_KBC4", 13, 25, 0, 24, 1, 0)
MUX_CFG_7XX("AA17_7XX_USB_DM", 2, 21, 0, 20, 0, 0)
MUX_CFG_7XX("W16_7XX_USB_PU_EN", 2, 25, 0, 24, 0, 0)
MUX_CFG_7XX("W17_7XX_USB_VBUSI", 2, 29, 6, 28, 1, 0)
MUX_CFG_7XX("W18_7XX_USB_DMCK_OUT",3, 3, 1, 2, 0, 0)
MUX_CFG_7XX("W19_7XX_USB_DCRST", 3, 7, 1, 6, 0, 0)
/* MMC Pins */
MUX_CFG_7XX("MMC_7XX_CMD", 2, 9, 0, 8, 1, 0)
MUX_CFG_7XX("MMC_7XX_CLK", 2, 13, 0, 12, 1, 0)
MUX_CFG_7XX("MMC_7XX_DAT0", 2, 17, 0, 16, 1, 0)
/* I2C interface */
MUX_CFG_7XX("I2C_7XX_SCL", 5, 1, 0, 0, 1, 0)
MUX_CFG_7XX("I2C_7XX_SDA", 5, 5, 0, 0, 1, 0)
/* SPI pins */
MUX_CFG_7XX("SPI_7XX_1", 6, 5, 4, 4, 1, 0)
MUX_CFG_7XX("SPI_7XX_2", 6, 9, 4, 8, 1, 0)
MUX_CFG_7XX("SPI_7XX_3", 6, 13, 4, 12, 1, 0)
MUX_CFG_7XX("SPI_7XX_4", 6, 17, 4, 16, 1, 0)
MUX_CFG_7XX("SPI_7XX_5", 8, 25, 0, 24, 0, 0)
MUX_CFG_7XX("SPI_7XX_6", 9, 5, 0, 4, 0, 0)
/* UART pins */
MUX_CFG_7XX("UART_7XX_1", 3, 21, 0, 20, 0, 0)
MUX_CFG_7XX("UART_7XX_2", 8, 1, 6, 0, 0, 0)
};
#define OMAP7XX_PINS_SZ ARRAY_SIZE(omap7xx_pins)
#else
#define omap7xx_pins NULL
#define OMAP7XX_PINS_SZ 0
#endif /* CONFIG_ARCH_OMAP730 || CONFIG_ARCH_OMAP850 */
#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX)
static struct pin_config __initdata_or_module omap1xxx_pins[] = {
/*
* description mux mode mux pull pull pull pu_pd pu dbg
* reg offset mode reg bit ena reg
*/
MUX_CFG("UART1_TX", 9, 21, 1, 2, 3, 0, NA, 0, 0)
MUX_CFG("UART1_RTS", 9, 12, 1, 2, 0, 0, NA, 0, 0)
/* UART2 (COM_UART_GATING), conflicts with USB2 */
MUX_CFG("UART2_TX", C, 27, 1, 3, 3, 0, NA, 0, 0)
MUX_CFG("UART2_RX", C, 18, 0, 3, 1, 1, NA, 0, 0)
MUX_CFG("UART2_CTS", C, 21, 0, 3, 1, 1, NA, 0, 0)
MUX_CFG("UART2_RTS", C, 24, 1, 3, 2, 0, NA, 0, 0)
/* UART3 (GIGA_UART_GATING) */
MUX_CFG("UART3_TX", 6, 0, 1, 0, 30, 0, NA, 0, 0)
MUX_CFG("UART3_RX", 6, 3, 0, 0, 31, 1, NA, 0, 0)
MUX_CFG("UART3_CTS", 5, 12, 2, 0, 24, 0, NA, 0, 0)
MUX_CFG("UART3_RTS", 5, 15, 2, 0, 25, 0, NA, 0, 0)
MUX_CFG("UART3_CLKREQ", 9, 27, 0, 2, 5, 0, NA, 0, 0)
MUX_CFG("UART3_BCLK", A, 0, 0, 2, 6, 0, NA, 0, 0)
MUX_CFG("Y15_1610_UART3_RTS", A, 0, 1, 2, 6, 0, NA, 0, 0)
/* PWT & PWL, conflicts with UART3 */
MUX_CFG("PWT", 6, 0, 2, 0, 30, 0, NA, 0, 0)
MUX_CFG("PWL", 6, 3, 1, 0, 31, 1, NA, 0, 0)
/* USB internal master generic */
MUX_CFG("R18_USB_VBUS", 7, 9, 2, 1, 11, 0, NA, 0, 1)
MUX_CFG("R18_1510_USB_GPIO0", 7, 9, 0, 1, 11, 1, NA, 0, 1)
/* works around erratum: W4_USB_PUEN and W4_USB_PUDIS are switched! */
MUX_CFG("W4_USB_PUEN", D, 3, 3, 3, 5, 1, NA, 0, 1)
MUX_CFG("W4_USB_CLKO", D, 3, 1, 3, 5, 0, NA, 0, 1)
MUX_CFG("W4_USB_HIGHZ", D, 3, 4, 3, 5, 0, 3, 0, 1)
MUX_CFG("W4_GPIO58", D, 3, 7, 3, 5, 0, 3, 0, 1)
/* USB1 master */
MUX_CFG("USB1_SUSP", 8, 27, 2, 1, 27, 0, NA, 0, 1)
MUX_CFG("USB1_SE0", 9, 0, 2, 1, 28, 0, NA, 0, 1)
MUX_CFG("W13_1610_USB1_SE0", 9, 0, 4, 1, 28, 0, NA, 0, 1)
MUX_CFG("USB1_TXEN", 9, 3, 2, 1, 29, 0, NA, 0, 1)
MUX_CFG("USB1_TXD", 9, 24, 1, 2, 4, 0, NA, 0, 1)
MUX_CFG("USB1_VP", A, 3, 1, 2, 7, 0, NA, 0, 1)
MUX_CFG("USB1_VM", A, 6, 1, 2, 8, 0, NA, 0, 1)
MUX_CFG("USB1_RCV", A, 9, 1, 2, 9, 0, NA, 0, 1)
MUX_CFG("USB1_SPEED", A, 12, 2, 2, 10, 0, NA, 0, 1)
MUX_CFG("R13_1610_USB1_SPEED", A, 12, 5, 2, 10, 0, NA, 0, 1)
MUX_CFG("R13_1710_USB1_SEO", A, 12, 5, 2, 10, 0, NA, 0, 1)
/* USB2 master */
MUX_CFG("USB2_SUSP", B, 3, 1, 2, 17, 0, NA, 0, 1)
MUX_CFG("USB2_VP", B, 6, 1, 2, 18, 0, NA, 0, 1)
MUX_CFG("USB2_TXEN", B, 9, 1, 2, 19, 0, NA, 0, 1)
MUX_CFG("USB2_VM", C, 18, 1, 3, 0, 0, NA, 0, 1)
MUX_CFG("USB2_RCV", C, 21, 1, 3, 1, 0, NA, 0, 1)
MUX_CFG("USB2_SE0", C, 24, 2, 3, 2, 0, NA, 0, 1)
MUX_CFG("USB2_TXD", C, 27, 2, 3, 3, 0, NA, 0, 1)
/* OMAP-1510 GPIO */
MUX_CFG("R18_1510_GPIO0", 7, 9, 0, 1, 11, 1, 0, 0, 1)
MUX_CFG("R19_1510_GPIO1", 7, 6, 0, 1, 10, 1, 0, 0, 1)
MUX_CFG("M14_1510_GPIO2", 7, 3, 0, 1, 9, 1, 0, 0, 1)
/* OMAP1610 GPIO */
MUX_CFG("P18_1610_GPIO3", 7, 0, 0, 1, 8, 0, NA, 0, 1)
MUX_CFG("Y15_1610_GPIO17", A, 0, 7, 2, 6, 0, NA, 0, 1)
/* OMAP-1710 GPIO */
MUX_CFG("R18_1710_GPIO0", 7, 9, 0, 1, 11, 1, 1, 1, 1)
MUX_CFG("V2_1710_GPIO10", F, 27, 1, 4, 3, 1, 4, 1, 1)
MUX_CFG("N21_1710_GPIO14", 6, 9, 0, 1, 1, 1, 1, 1, 1)
MUX_CFG("W15_1710_GPIO40", 9, 27, 7, 2, 5, 1, 2, 1, 1)
/* MPUIO */
MUX_CFG("MPUIO2", 7, 18, 0, 1, 14, 1, NA, 0, 1)
MUX_CFG("N15_1610_MPUIO2", 7, 18, 0, 1, 14, 1, 1, 0, 1)
MUX_CFG("MPUIO4", 7, 15, 0, 1, 13, 1, NA, 0, 1)
MUX_CFG("MPUIO5", 7, 12, 0, 1, 12, 1, NA, 0, 1)
MUX_CFG("T20_1610_MPUIO5", 7, 12, 0, 1, 12, 0, 3, 0, 1)
MUX_CFG("W11_1610_MPUIO6", 10, 15, 2, 3, 8, 0, 3, 0, 1)
MUX_CFG("V10_1610_MPUIO7", A, 24, 2, 2, 14, 0, 2, 0, 1)
MUX_CFG("W11_1610_MPUIO9", 10, 15, 1, 3, 8, 0, 3, 0, 1)
MUX_CFG("V10_1610_MPUIO10", A, 24, 1, 2, 14, 0, 2, 0, 1)
MUX_CFG("W10_1610_MPUIO11", A, 18, 2, 2, 11, 0, 2, 0, 1)
MUX_CFG("E20_1610_MPUIO13", 3, 21, 1, 0, 7, 0, 0, 0, 1)
MUX_CFG("U20_1610_MPUIO14", 9, 6, 6, 0, 30, 0, 0, 0, 1)
MUX_CFG("E19_1610_MPUIO15", 3, 18, 1, 0, 6, 0, 0, 0, 1)
/* MCBSP2 */
MUX_CFG("MCBSP2_CLKR", C, 6, 0, 2, 27, 1, NA, 0, 1)
MUX_CFG("MCBSP2_CLKX", C, 9, 0, 2, 29, 1, NA, 0, 1)
MUX_CFG("MCBSP2_DR", C, 0, 0, 2, 26, 1, NA, 0, 1)
MUX_CFG("MCBSP2_DX", C, 15, 0, 2, 31, 1, NA, 0, 1)
MUX_CFG("MCBSP2_FSR", C, 12, 0, 2, 30, 1, NA, 0, 1)
MUX_CFG("MCBSP2_FSX", C, 3, 0, 2, 27, 1, NA, 0, 1)
/* MCBSP3 NOTE: Mode must 1 for clock */
MUX_CFG("MCBSP3_CLKX", 9, 3, 1, 1, 29, 0, NA, 0, 1)
/* Misc ballouts */
MUX_CFG("BALLOUT_V8_ARMIO3", B, 18, 0, 2, 25, 1, NA, 0, 1)
MUX_CFG("N20_HDQ", 6, 18, 1, 1, 4, 0, 1, 4, 0)
/* OMAP-1610 MMC2 */
MUX_CFG("W8_1610_MMC2_DAT0", B, 21, 6, 2, 23, 1, 2, 1, 1)
MUX_CFG("V8_1610_MMC2_DAT1", B, 27, 6, 2, 25, 1, 2, 1, 1)
MUX_CFG("W15_1610_MMC2_DAT2", 9, 12, 6, 2, 5, 1, 2, 1, 1)
MUX_CFG("R10_1610_MMC2_DAT3", B, 18, 6, 2, 22, 1, 2, 1, 1)
MUX_CFG("Y10_1610_MMC2_CLK", B, 3, 6, 2, 17, 0, 2, 0, 1)
MUX_CFG("Y8_1610_MMC2_CMD", B, 24, 6, 2, 24, 1, 2, 1, 1)
MUX_CFG("V9_1610_MMC2_CMDDIR", B, 12, 6, 2, 20, 0, 2, 1, 1)
MUX_CFG("V5_1610_MMC2_DATDIR0", B, 15, 6, 2, 21, 0, 2, 1, 1)
MUX_CFG("W19_1610_MMC2_DATDIR1", 8, 15, 6, 1, 23, 0, 1, 1, 1)
MUX_CFG("R18_1610_MMC2_CLKIN", 7, 9, 6, 1, 11, 0, 1, 11, 1)
/* OMAP-1610 External Trace Interface */
MUX_CFG("M19_1610_ETM_PSTAT0", 5, 27, 1, 0, 29, 0, 0, 0, 1)
MUX_CFG("L15_1610_ETM_PSTAT1", 5, 24, 1, 0, 28, 0, 0, 0, 1)
MUX_CFG("L18_1610_ETM_PSTAT2", 5, 21, 1, 0, 27, 0, 0, 0, 1)
MUX_CFG("L19_1610_ETM_D0", 5, 18, 1, 0, 26, 0, 0, 0, 1)
MUX_CFG("J19_1610_ETM_D6", 5, 0, 1, 0, 20, 0, 0, 0, 1)
MUX_CFG("J18_1610_ETM_D7", 5, 27, 1, 0, 19, 0, 0, 0, 1)
/* OMAP16XX GPIO */
MUX_CFG("P20_1610_GPIO4", 6, 27, 0, 1, 7, 0, 1, 1, 1)
MUX_CFG("V9_1610_GPIO7", B, 12, 1, 2, 20, 0, 2, 1, 1)
MUX_CFG("W8_1610_GPIO9", B, 21, 0, 2, 23, 0, 2, 1, 1)
MUX_CFG("N20_1610_GPIO11", 6, 18, 0, 1, 4, 0, 1, 1, 1)
MUX_CFG("N19_1610_GPIO13", 6, 12, 0, 1, 2, 0, 1, 1, 1)
MUX_CFG("P10_1610_GPIO22", C, 0, 7, 2, 26, 0, 2, 1, 1)
MUX_CFG("V5_1610_GPIO24", B, 15, 7, 2, 21, 0, 2, 1, 1)
MUX_CFG("AA20_1610_GPIO_41", 9, 9, 7, 1, 31, 0, 1, 1, 1)
MUX_CFG("W19_1610_GPIO48", 8, 15, 7, 1, 23, 1, 1, 0, 1)
MUX_CFG("M7_1610_GPIO62", 10, 0, 0, 4, 24, 0, 4, 0, 1)
MUX_CFG("V14_16XX_GPIO37", 9, 18, 7, 2, 2, 0, 2, 2, 0)
MUX_CFG("R9_16XX_GPIO18", C, 18, 7, 3, 0, 0, 3, 0, 0)
MUX_CFG("L14_16XX_GPIO49", 6, 3, 7, 0, 31, 0, 0, 31, 0)
/* OMAP-1610 uWire */
MUX_CFG("V19_1610_UWIRE_SCLK", 8, 6, 0, 1, 20, 0, 1, 1, 1)
MUX_CFG("U18_1610_UWIRE_SDI", 8, 0, 0, 1, 18, 0, 1, 1, 1)
MUX_CFG("W21_1610_UWIRE_SDO", 8, 3, 0, 1, 19, 0, 1, 1, 1)
MUX_CFG("N14_1610_UWIRE_CS0", 8, 9, 1, 1, 21, 0, 1, 1, 1)
MUX_CFG("P15_1610_UWIRE_CS3", 8, 12, 1, 1, 22, 0, 1, 1, 1)
MUX_CFG("N15_1610_UWIRE_CS1", 7, 18, 2, 1, 14, 0, NA, 0, 1)
/* OMAP-1610 SPI */
MUX_CFG("U19_1610_SPIF_SCK", 7, 21, 6, 1, 15, 0, 1, 1, 1)
MUX_CFG("U18_1610_SPIF_DIN", 8, 0, 6, 1, 18, 1, 1, 0, 1)
MUX_CFG("P20_1610_SPIF_DIN", 6, 27, 4, 1, 7, 1, 1, 0, 1)
MUX_CFG("W21_1610_SPIF_DOUT", 8, 3, 6, 1, 19, 0, 1, 0, 1)
MUX_CFG("R18_1610_SPIF_DOUT", 7, 9, 3, 1, 11, 0, 1, 0, 1)
MUX_CFG("N14_1610_SPIF_CS0", 8, 9, 6, 1, 21, 0, 1, 1, 1)
MUX_CFG("N15_1610_SPIF_CS1", 7, 18, 6, 1, 14, 0, 1, 1, 1)
MUX_CFG("T19_1610_SPIF_CS2", 7, 15, 4, 1, 13, 0, 1, 1, 1)
MUX_CFG("P15_1610_SPIF_CS3", 8, 12, 3, 1, 22, 0, 1, 1, 1)
/* OMAP-1610 Flash */
MUX_CFG("L3_1610_FLASH_CS2B_OE",10, 6, 1, NA, 0, 0, NA, 0, 1)
MUX_CFG("M8_1610_FLASH_CS2B_WE",10, 3, 1, NA, 0, 0, NA, 0, 1)
/* First MMC interface, same on 1510, 1610 and 1710 */
MUX_CFG("MMC_CMD", A, 27, 0, 2, 15, 1, 2, 1, 1)
MUX_CFG("MMC_DAT1", A, 24, 0, 2, 14, 1, 2, 1, 1)
MUX_CFG("MMC_DAT2", A, 18, 0, 2, 12, 1, 2, 1, 1)
MUX_CFG("MMC_DAT0", B, 0, 0, 2, 16, 1, 2, 1, 1)
MUX_CFG("MMC_CLK", A, 21, 0, NA, 0, 0, NA, 0, 1)
MUX_CFG("MMC_DAT3", 10, 15, 0, 3, 8, 1, 3, 1, 1)
MUX_CFG("M15_1710_MMC_CLKI", 6, 21, 2, 0, 0, 0, NA, 0, 1)
MUX_CFG("P19_1710_MMC_CMDDIR", 6, 24, 6, 0, 0, 0, NA, 0, 1)
MUX_CFG("P20_1710_MMC_DATDIR0", 6, 27, 5, 0, 0, 0, NA, 0, 1)
/* OMAP-1610 USB0 alternate configuration */
MUX_CFG("W9_USB0_TXEN", B, 9, 5, 2, 19, 0, 2, 0, 1)
MUX_CFG("AA9_USB0_VP", B, 6, 5, 2, 18, 0, 2, 0, 1)
MUX_CFG("Y5_USB0_RCV", C, 21, 5, 3, 1, 0, 1, 0, 1)
MUX_CFG("R9_USB0_VM", C, 18, 5, 3, 0, 0, 3, 0, 1)
MUX_CFG("V6_USB0_TXD", C, 27, 5, 3, 3, 0, 3, 0, 1)
MUX_CFG("W5_USB0_SE0", C, 24, 5, 3, 2, 0, 3, 0, 1)
MUX_CFG("V9_USB0_SPEED", B, 12, 5, 2, 20, 0, 2, 0, 1)
MUX_CFG("Y10_USB0_SUSP", B, 3, 5, 2, 17, 0, 2, 0, 1)
/* USB2 interface */
MUX_CFG("W9_USB2_TXEN", B, 9, 1, NA, 0, 0, NA, 0, 1)
MUX_CFG("AA9_USB2_VP", B, 6, 1, NA, 0, 0, NA, 0, 1)
MUX_CFG("Y5_USB2_RCV", C, 21, 1, NA, 0, 0, NA, 0, 1)
MUX_CFG("R9_USB2_VM", C, 18, 1, NA, 0, 0, NA, 0, 1)
MUX_CFG("V6_USB2_TXD", C, 27, 2, NA, 0, 0, NA, 0, 1)
MUX_CFG("W5_USB2_SE0", C, 24, 2, NA, 0, 0, NA, 0, 1)
/* 16XX UART */
MUX_CFG("R13_1610_UART1_TX", A, 12, 6, 2, 10, 0, 2, 10, 1)
MUX_CFG("V14_16XX_UART1_RX", 9, 18, 0, 2, 2, 0, 2, 2, 1)
MUX_CFG("R14_1610_UART1_CTS", 9, 15, 0, 2, 1, 0, 2, 1, 1)
MUX_CFG("AA15_1610_UART1_RTS", 9, 12, 1, 2, 0, 0, 2, 0, 1)
MUX_CFG("R9_16XX_UART2_RX", C, 18, 0, 3, 0, 0, 3, 0, 1)
MUX_CFG("L14_16XX_UART3_RX", 6, 3, 0, 0, 31, 0, 0, 31, 1)
/* I2C interface */
MUX_CFG("I2C_SCL", 7, 24, 0, NA, 0, 0, NA, 0, 0)
MUX_CFG("I2C_SDA", 7, 27, 0, NA, 0, 0, NA, 0, 0)
/* Keypad */
MUX_CFG("F18_1610_KBC0", 3, 15, 0, 0, 5, 1, 0, 0, 0)
MUX_CFG("D20_1610_KBC1", 3, 12, 0, 0, 4, 1, 0, 0, 0)
MUX_CFG("D19_1610_KBC2", 3, 9, 0, 0, 3, 1, 0, 0, 0)
MUX_CFG("E18_1610_KBC3", 3, 6, 0, 0, 2, 1, 0, 0, 0)
MUX_CFG("C21_1610_KBC4", 3, 3, 0, 0, 1, 1, 0, 0, 0)
MUX_CFG("G18_1610_KBR0", 4, 0, 0, 0, 10, 1, 0, 1, 0)
MUX_CFG("F19_1610_KBR1", 3, 27, 0, 0, 9, 1, 0, 1, 0)
MUX_CFG("H14_1610_KBR2", 3, 24, 0, 0, 8, 1, 0, 1, 0)
MUX_CFG("E20_1610_KBR3", 3, 21, 0, 0, 7, 1, 0, 1, 0)
MUX_CFG("E19_1610_KBR4", 3, 18, 0, 0, 6, 1, 0, 1, 0)
MUX_CFG("N19_1610_KBR5", 6, 12, 1, 1, 2, 1, 1, 1, 0)
/* Power management */
MUX_CFG("T20_1610_LOW_PWR", 7, 12, 1, NA, 0, 0, NA, 0, 0)
/* MCLK Settings */
MUX_CFG("V5_1710_MCLK_ON", B, 15, 0, NA, 0, 0, NA, 0, 0)
MUX_CFG("V5_1710_MCLK_OFF", B, 15, 6, NA, 0, 0, NA, 0, 0)
MUX_CFG("R10_1610_MCLK_ON", B, 18, 0, NA, 22, 0, NA, 1, 0)
MUX_CFG("R10_1610_MCLK_OFF", B, 18, 6, 2, 22, 1, 2, 1, 1)
/* CompactFlash controller, conflicts with MMC1 */
MUX_CFG("P11_1610_CF_CD2", A, 27, 3, 2, 15, 1, 2, 1, 1)
MUX_CFG("R11_1610_CF_IOIS16", B, 0, 3, 2, 16, 1, 2, 1, 1)
MUX_CFG("V10_1610_CF_IREQ", A, 24, 3, 2, 14, 0, 2, 0, 1)
MUX_CFG("W10_1610_CF_RESET", A, 18, 3, 2, 12, 1, 2, 1, 1)
MUX_CFG("W11_1610_CF_CD1", 10, 15, 3, 3, 8, 1, 3, 1, 1)
/* parallel camera */
MUX_CFG("J15_1610_CAM_LCLK", 4, 24, 0, 0, 18, 1, 0, 0, 0)
MUX_CFG("J18_1610_CAM_D7", 4, 27, 0, 0, 19, 1, 0, 0, 0)
MUX_CFG("J19_1610_CAM_D6", 5, 0, 0, 0, 20, 1, 0, 0, 0)
MUX_CFG("J14_1610_CAM_D5", 5, 3, 0, 0, 21, 1, 0, 0, 0)
MUX_CFG("K18_1610_CAM_D4", 5, 6, 0, 0, 22, 1, 0, 0, 0)
MUX_CFG("K19_1610_CAM_D3", 5, 9, 0, 0, 23, 1, 0, 0, 0)
MUX_CFG("K15_1610_CAM_D2", 5, 12, 0, 0, 24, 1, 0, 0, 0)
MUX_CFG("K14_1610_CAM_D1", 5, 15, 0, 0, 25, 1, 0, 0, 0)
MUX_CFG("L19_1610_CAM_D0", 5, 18, 0, 0, 26, 1, 0, 0, 0)
MUX_CFG("L18_1610_CAM_VS", 5, 21, 0, 0, 27, 1, 0, 0, 0)
MUX_CFG("L15_1610_CAM_HS", 5, 24, 0, 0, 28, 1, 0, 0, 0)
MUX_CFG("M19_1610_CAM_RSTZ", 5, 27, 0, 0, 29, 0, 0, 0, 0)
MUX_CFG("Y15_1610_CAM_OUTCLK", A, 0, 6, 2, 6, 0, 2, 0, 0)
/* serial camera */
MUX_CFG("H19_1610_CAM_EXCLK", 4, 21, 0, 0, 17, 0, 0, 0, 0)
/* REVISIT 5912 spec sez CCP_* can't pullup or pulldown ... ? */
MUX_CFG("Y12_1610_CCP_CLKP", 8, 18, 6, 1, 24, 1, 1, 0, 0)
MUX_CFG("W13_1610_CCP_CLKM", 9, 0, 6, 1, 28, 1, 1, 0, 0)
MUX_CFG("W14_1610_CCP_DATAP", 9, 24, 6, 2, 4, 1, 2, 0, 0)
MUX_CFG("Y14_1610_CCP_DATAM", 9, 21, 6, 2, 3, 1, 2, 0, 0)
};
#define OMAP1XXX_PINS_SZ ARRAY_SIZE(omap1xxx_pins)
#else
#define omap1xxx_pins NULL
#define OMAP1XXX_PINS_SZ 0
#endif /* CONFIG_ARCH_OMAP15XX || CONFIG_ARCH_OMAP16XX */
static int __init_or_module omap1_cfg_reg(const struct pin_config *cfg)
{
static DEFINE_SPINLOCK(mux_spin_lock);
unsigned long flags;
unsigned int reg_orig = 0, reg = 0, pu_pd_orig = 0, pu_pd = 0,
pull_orig = 0, pull = 0;
unsigned int mask, warn = 0;
/* Check the mux register in question */
if (cfg->mux_reg) {
unsigned tmp1, tmp2;
spin_lock_irqsave(&mux_spin_lock, flags);
reg_orig = omap_readl(cfg->mux_reg);
/* The mux registers always seem to be 3 bits long */
mask = (0x7 << cfg->mask_offset);
tmp1 = reg_orig & mask;
reg = reg_orig & ~mask;
tmp2 = (cfg->mask << cfg->mask_offset);
reg |= tmp2;
if (tmp1 != tmp2)
warn = 1;
omap_writel(reg, cfg->mux_reg);
spin_unlock_irqrestore(&mux_spin_lock, flags);
}
/* Check for pull up or pull down selection on 1610 */
if (!cpu_is_omap15xx()) {
if (cfg->pu_pd_reg && cfg->pull_val) {
spin_lock_irqsave(&mux_spin_lock, flags);
pu_pd_orig = omap_readl(cfg->pu_pd_reg);
mask = 1 << cfg->pull_bit;
if (cfg->pu_pd_val) {
if (!(pu_pd_orig & mask))
warn = 1;
/* Use pull up */
pu_pd = pu_pd_orig | mask;
} else {
if (pu_pd_orig & mask)
warn = 1;
/* Use pull down */
pu_pd = pu_pd_orig & ~mask;
}
omap_writel(pu_pd, cfg->pu_pd_reg);
spin_unlock_irqrestore(&mux_spin_lock, flags);
}
}
/* Check for an associated pull down register */
if (cfg->pull_reg) {
spin_lock_irqsave(&mux_spin_lock, flags);
pull_orig = omap_readl(cfg->pull_reg);
mask = 1 << cfg->pull_bit;
if (cfg->pull_val) {
if (pull_orig & mask)
warn = 1;
/* Low bit = pull enabled */
pull = pull_orig & ~mask;
} else {
if (!(pull_orig & mask))
warn = 1;
/* High bit = pull disabled */
pull = pull_orig | mask;
}
omap_writel(pull, cfg->pull_reg);
spin_unlock_irqrestore(&mux_spin_lock, flags);
}
if (warn) {
#ifdef CONFIG_OMAP_MUX_WARNINGS
printk(KERN_WARNING "MUX: initialized %s\n", cfg->name);
#endif
}
#ifdef CONFIG_OMAP_MUX_DEBUG
if (cfg->debug || warn) {
printk("MUX: Setting register %s\n", cfg->name);
printk(" %s (0x%08x) = 0x%08x -> 0x%08x\n",
cfg->mux_reg_name, cfg->mux_reg, reg_orig, reg);
if (!cpu_is_omap15xx()) {
if (cfg->pu_pd_reg && cfg->pull_val) {
printk(" %s (0x%08x) = 0x%08x -> 0x%08x\n",
cfg->pu_pd_name, cfg->pu_pd_reg,
pu_pd_orig, pu_pd);
}
}
if (cfg->pull_reg)
printk(" %s (0x%08x) = 0x%08x -> 0x%08x\n",
cfg->pull_name, cfg->pull_reg, pull_orig, pull);
}
#endif
#ifdef CONFIG_OMAP_MUX_WARNINGS
return warn ? -ETXTBSY : 0;
#else
return 0;
#endif
}
int __init omap1_mux_init(void)
{
if (cpu_is_omap7xx()) {
arch_mux_cfg.pins = omap7xx_pins;
arch_mux_cfg.size = OMAP7XX_PINS_SZ;
arch_mux_cfg.cfg_reg = omap1_cfg_reg;
}
if (cpu_is_omap15xx() || cpu_is_omap16xx()) {
arch_mux_cfg.pins = omap1xxx_pins;
arch_mux_cfg.size = OMAP1XXX_PINS_SZ;
arch_mux_cfg.cfg_reg = omap1_cfg_reg;
}
return omap_mux_register(&arch_mux_cfg);
}
#endif
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.