repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
EZchip/linux | net/ipv6/sit.c | 43 | 45304 | /*
* IPv6 over IPv4 tunnel device - Simple Internet Transition (SIT)
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
* Roger Venning <r.venning@telstra.com>: 6to4 support
* Nate Thompson <nate@thebog.net>: 6to4 support
* Fred Templin <fred.l.templin@boeing.com>: isatap support
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/icmp.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/init.h>
#include <linux/netfilter_ipv4.h>
#include <linux/if_ether.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/ip.h>
#include <net/udp.h>
#include <net/icmp.h>
#include <net/ip_tunnels.h>
#include <net/inet_ecn.h>
#include <net/xfrm.h>
#include <net/dsfield.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
/*
This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
For comments look at net/ipv4/ip_gre.c --ANK
*/
#define HASH_SIZE 16
#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
static int ipip6_tunnel_init(struct net_device *dev);
static void ipip6_tunnel_setup(struct net_device *dev);
static void ipip6_dev_free(struct net_device *dev);
static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
__be32 *v4dst);
static struct rtnl_link_ops sit_link_ops __read_mostly;
static int sit_net_id __read_mostly;
struct sit_net {
struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
struct ip_tunnel __rcu *tunnels_wc[1];
struct ip_tunnel __rcu **tunnels[4];
struct net_device *fb_tunnel_dev;
};
/*
* Must be invoked with rcu_read_lock
*/
static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
struct net_device *dev, __be32 remote, __be32 local)
{
unsigned int h0 = HASH(remote);
unsigned int h1 = HASH(local);
struct ip_tunnel *t;
struct sit_net *sitn = net_generic(net, sit_net_id);
for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
(!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
(t->dev->flags & IFF_UP))
return t;
}
for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) {
if (remote == t->parms.iph.daddr &&
(!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
(t->dev->flags & IFF_UP))
return t;
}
for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) {
if (local == t->parms.iph.saddr &&
(!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
(t->dev->flags & IFF_UP))
return t;
}
t = rcu_dereference(sitn->tunnels_wc[0]);
if (t && (t->dev->flags & IFF_UP))
return t;
return NULL;
}
static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn,
struct ip_tunnel_parm *parms)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
unsigned int h = 0;
int prio = 0;
if (remote) {
prio |= 2;
h ^= HASH(remote);
}
if (local) {
prio |= 1;
h ^= HASH(local);
}
return &sitn->tunnels[prio][h];
}
static inline struct ip_tunnel __rcu **ipip6_bucket(struct sit_net *sitn,
struct ip_tunnel *t)
{
return __ipip6_bucket(sitn, &t->parms);
}
static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
{
struct ip_tunnel __rcu **tp;
struct ip_tunnel *iter;
for (tp = ipip6_bucket(sitn, t);
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
if (t == iter) {
rcu_assign_pointer(*tp, t->next);
break;
}
}
}
static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
{
struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
}
static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
{
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel *t = netdev_priv(dev);
if (t->dev == sitn->fb_tunnel_dev) {
ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
t->ip6rd.relay_prefix = 0;
t->ip6rd.prefixlen = 16;
t->ip6rd.relay_prefixlen = 0;
} else {
struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev);
memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd));
}
#endif
}
static int ipip6_tunnel_create(struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
struct net *net = dev_net(dev);
struct sit_net *sitn = net_generic(net, sit_net_id);
int err;
memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
if ((__force u16)t->parms.i_flags & SIT_ISATAP)
dev->priv_flags |= IFF_ISATAP;
dev->rtnl_link_ops = &sit_link_ops;
err = register_netdevice(dev);
if (err < 0)
goto out;
ipip6_tunnel_clone_6rd(dev, sitn);
dev_hold(dev);
ipip6_tunnel_link(sitn, t);
return 0;
out:
return err;
}
static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
struct ip_tunnel_parm *parms, int create)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
struct ip_tunnel *t, *nt;
struct ip_tunnel __rcu **tp;
struct net_device *dev;
char name[IFNAMSIZ];
struct sit_net *sitn = net_generic(net, sit_net_id);
for (tp = __ipip6_bucket(sitn, parms);
(t = rtnl_dereference(*tp)) != NULL;
tp = &t->next) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
parms->link == t->parms.link) {
if (create)
return NULL;
else
return t;
}
}
if (!create)
goto failed;
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
else
strcpy(name, "sit%d");
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ipip6_tunnel_setup);
if (!dev)
return NULL;
dev_net_set(dev, net);
nt = netdev_priv(dev);
nt->parms = *parms;
if (ipip6_tunnel_create(dev) < 0)
goto failed_free;
return nt;
failed_free:
ipip6_dev_free(dev);
failed:
return NULL;
}
#define for_each_prl_rcu(start) \
for (prl = rcu_dereference(start); \
prl; \
prl = rcu_dereference(prl->next))
static struct ip_tunnel_prl_entry *
__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
{
struct ip_tunnel_prl_entry *prl;
for_each_prl_rcu(t->prl)
if (prl->addr == addr)
break;
return prl;
}
static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
struct ip_tunnel_prl __user *a)
{
struct ip_tunnel_prl kprl, *kp;
struct ip_tunnel_prl_entry *prl;
unsigned int cmax, c = 0, ca, len;
int ret = 0;
if (copy_from_user(&kprl, a, sizeof(kprl)))
return -EFAULT;
cmax = kprl.datalen / sizeof(kprl);
if (cmax > 1 && kprl.addr != htonl(INADDR_ANY))
cmax = 1;
/* For simple GET or for root users,
* we try harder to allocate.
*/
kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
NULL;
rcu_read_lock();
ca = t->prl_count < cmax ? t->prl_count : cmax;
if (!kp) {
/* We don't try hard to allocate much memory for
* non-root users.
* For root users, retry allocating enough memory for
* the answer.
*/
kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC);
if (!kp) {
ret = -ENOMEM;
goto out;
}
}
c = 0;
for_each_prl_rcu(t->prl) {
if (c >= cmax)
break;
if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
continue;
kp[c].addr = prl->addr;
kp[c].flags = prl->flags;
c++;
if (kprl.addr != htonl(INADDR_ANY))
break;
}
out:
rcu_read_unlock();
len = sizeof(*kp) * c;
ret = 0;
if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen))
ret = -EFAULT;
kfree(kp);
return ret;
}
static int
ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
{
struct ip_tunnel_prl_entry *p;
int err = 0;
if (a->addr == htonl(INADDR_ANY))
return -EINVAL;
ASSERT_RTNL();
for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) {
if (p->addr == a->addr) {
if (chg) {
p->flags = a->flags;
goto out;
}
err = -EEXIST;
goto out;
}
}
if (chg) {
err = -ENXIO;
goto out;
}
p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL);
if (!p) {
err = -ENOBUFS;
goto out;
}
p->next = t->prl;
p->addr = a->addr;
p->flags = a->flags;
t->prl_count++;
rcu_assign_pointer(t->prl, p);
out:
return err;
}
static void prl_list_destroy_rcu(struct rcu_head *head)
{
struct ip_tunnel_prl_entry *p, *n;
p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
do {
n = rcu_dereference_protected(p->next, 1);
kfree(p);
p = n;
} while (p);
}
static int
ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
{
struct ip_tunnel_prl_entry *x;
struct ip_tunnel_prl_entry __rcu **p;
int err = 0;
ASSERT_RTNL();
if (a && a->addr != htonl(INADDR_ANY)) {
for (p = &t->prl;
(x = rtnl_dereference(*p)) != NULL;
p = &x->next) {
if (x->addr == a->addr) {
*p = x->next;
kfree_rcu(x, rcu_head);
t->prl_count--;
goto out;
}
}
err = -ENXIO;
} else {
x = rtnl_dereference(t->prl);
if (x) {
t->prl_count = 0;
call_rcu(&x->rcu_head, prl_list_destroy_rcu);
t->prl = NULL;
}
}
out:
return err;
}
static int
isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
{
struct ip_tunnel_prl_entry *p;
int ok = 1;
rcu_read_lock();
p = __ipip6_tunnel_locate_prl(t, iph->saddr);
if (p) {
if (p->flags & PRL_DEFAULT)
skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT;
else
skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
} else {
const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
if (ipv6_addr_is_isatap(addr6) &&
(addr6->s6_addr32[3] == iph->saddr) &&
ipv6_chk_prefix(addr6, t->dev))
skb->ndisc_nodetype = NDISC_NODETYPE_HOST;
else
ok = 0;
}
rcu_read_unlock();
return ok;
}
static void ipip6_tunnel_uninit(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct sit_net *sitn = net_generic(tunnel->net, sit_net_id);
if (dev == sitn->fb_tunnel_dev) {
RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL);
} else {
ipip6_tunnel_unlink(sitn, tunnel);
ipip6_tunnel_del_prl(tunnel, NULL);
}
dst_cache_reset(&tunnel->dst_cache);
dev_put(dev);
}
static int ipip6_err(struct sk_buff *skb, u32 info)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
unsigned int data_len = 0;
struct ip_tunnel *t;
int err;
switch (type) {
default:
case ICMP_PARAMETERPROB:
return 0;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
/* Impossible event. */
return 0;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
I believe they are just ether pollution. --ANK
*/
break;
}
break;
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
return 0;
data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
break;
case ICMP_REDIRECT:
break;
}
err = -ENOENT;
t = ipip6_tunnel_lookup(dev_net(skb->dev),
skb->dev,
iph->daddr,
iph->saddr);
if (!t)
goto out;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
t->parms.link, 0, iph->protocol, 0);
err = 0;
goto out;
}
if (type == ICMP_REDIRECT) {
ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
iph->protocol, 0);
err = 0;
goto out;
}
err = 0;
if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
goto out;
if (t->parms.iph.daddr == 0)
goto out;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
t->err_time = jiffies;
out:
return err;
}
static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr,
const struct in6_addr *v6addr)
{
__be32 v4embed = 0;
if (check_6rd(tunnel, v6addr, &v4embed) && v4addr != v4embed)
return true;
return false;
}
/* Checks if an address matches an address on the tunnel interface.
* Used to detect the NAT of proto 41 packets and let them pass spoofing test.
* Long story:
* This function is called after we considered the packet as spoofed
* in is_spoofed_6rd.
* We may have a router that is doing NAT for proto 41 packets
* for an internal station. Destination a.a.a.a/PREFIX:bbbb:bbbb
* will be translated to n.n.n.n/PREFIX:bbbb:bbbb. And is_spoofed_6rd
* function will return true, dropping the packet.
* But, we can still check if is spoofed against the IP
* addresses associated with the interface.
*/
static bool only_dnatted(const struct ip_tunnel *tunnel,
const struct in6_addr *v6dst)
{
int prefix_len;
#ifdef CONFIG_IPV6_SIT_6RD
prefix_len = tunnel->ip6rd.prefixlen + 32
- tunnel->ip6rd.relay_prefixlen;
#else
prefix_len = 48;
#endif
return ipv6_chk_custom_prefix(v6dst, prefix_len, tunnel->dev);
}
/* Returns true if a packet is spoofed */
static bool packet_is_spoofed(struct sk_buff *skb,
const struct iphdr *iph,
struct ip_tunnel *tunnel)
{
const struct ipv6hdr *ipv6h;
if (tunnel->dev->priv_flags & IFF_ISATAP) {
if (!isatap_chksrc(skb, iph, tunnel))
return true;
return false;
}
if (tunnel->dev->flags & IFF_POINTOPOINT)
return false;
ipv6h = ipv6_hdr(skb);
if (unlikely(is_spoofed_6rd(tunnel, iph->saddr, &ipv6h->saddr))) {
net_warn_ratelimited("Src spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
&iph->saddr, &ipv6h->saddr,
&iph->daddr, &ipv6h->daddr);
return true;
}
if (likely(!is_spoofed_6rd(tunnel, iph->daddr, &ipv6h->daddr)))
return false;
if (only_dnatted(tunnel, &ipv6h->daddr))
return false;
net_warn_ratelimited("Dst spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
&iph->saddr, &ipv6h->saddr,
&iph->daddr, &ipv6h->daddr);
return true;
}
static int ipip6_rcv(struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
struct ip_tunnel *tunnel;
int err;
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr);
if (tunnel) {
struct pcpu_sw_netstats *tstats;
if (tunnel->parms.iph.protocol != IPPROTO_IPV6 &&
tunnel->parms.iph.protocol != 0)
goto out;
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
IPCB(skb)->flags = 0;
skb->dev = tunnel->dev;
if (packet_is_spoofed(skb, iph, tunnel)) {
tunnel->dev->stats.rx_errors++;
goto out;
}
if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6),
!net_eq(tunnel->net, dev_net(tunnel->dev))))
goto out;
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&iph->saddr, iph->tos);
if (err > 1) {
++tunnel->dev->stats.rx_frame_errors;
++tunnel->dev->stats.rx_errors;
goto out;
}
}
tstats = this_cpu_ptr(tunnel->dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
netif_rx(skb);
return 0;
}
/* no tunnel matched, let upstream know, ipsec may handle it */
return 1;
out:
kfree_skb(skb);
return 0;
}
static const struct tnl_ptk_info ipip_tpi = {
/* no tunnel info required for ipip. */
.proto = htons(ETH_P_IP),
};
#if IS_ENABLED(CONFIG_MPLS)
static const struct tnl_ptk_info mplsip_tpi = {
/* no tunnel info required for mplsip. */
.proto = htons(ETH_P_MPLS_UC),
};
#endif
static int sit_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
{
const struct iphdr *iph;
struct ip_tunnel *tunnel;
iph = ip_hdr(skb);
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr);
if (tunnel) {
const struct tnl_ptk_info *tpi;
if (tunnel->parms.iph.protocol != ipproto &&
tunnel->parms.iph.protocol != 0)
goto drop;
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
#if IS_ENABLED(CONFIG_MPLS)
if (ipproto == IPPROTO_MPLS)
tpi = &mplsip_tpi;
else
#endif
tpi = &ipip_tpi;
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
return ip_tunnel_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
}
return 1;
drop:
kfree_skb(skb);
return 0;
}
static int ipip_rcv(struct sk_buff *skb)
{
return sit_tunnel_rcv(skb, IPPROTO_IPIP);
}
#if IS_ENABLED(CONFIG_MPLS)
static int mplsip_rcv(struct sk_buff *skb)
{
return sit_tunnel_rcv(skb, IPPROTO_MPLS);
}
#endif
/*
* If the IPv6 address comes from 6rd / 6to4 (RFC 3056) addr space this function
* stores the embedded IPv4 address in v4dst and returns true.
*/
static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
__be32 *v4dst)
{
#ifdef CONFIG_IPV6_SIT_6RD
if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
tunnel->ip6rd.prefixlen)) {
unsigned int pbw0, pbi0;
int pbi1;
u32 d;
pbw0 = tunnel->ip6rd.prefixlen >> 5;
pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
tunnel->ip6rd.relay_prefixlen;
pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
if (pbi1 > 0)
d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >>
(32 - pbi1);
*v4dst = tunnel->ip6rd.relay_prefix | htonl(d);
return true;
}
#else
if (v6dst->s6_addr16[0] == htons(0x2002)) {
/* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
memcpy(v4dst, &v6dst->s6_addr16[1], 4);
return true;
}
#endif
return false;
}
static inline __be32 try_6rd(struct ip_tunnel *tunnel,
const struct in6_addr *v6dst)
{
__be32 dst = 0;
check_6rd(tunnel, v6dst, &dst);
return dst;
}
/*
* This function assumes it is being called from dev_queue_xmit()
* and that skb is filled properly by that function.
*/
static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tiph = &tunnel->parms.iph;
const struct ipv6hdr *iph6 = ipv6_hdr(skb);
u8 tos = tunnel->parms.iph.tos;
__be16 df = tiph->frag_off;
struct rtable *rt; /* Route to the other host */
struct net_device *tdev; /* Device to other host */
unsigned int max_headroom; /* The extra header space needed */
__be32 dst = tiph->daddr;
struct flowi4 fl4;
int mtu;
const struct in6_addr *addr6;
int addr_type;
u8 ttl;
u8 protocol = IPPROTO_IPV6;
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
if (tos == 1)
tos = ipv6_get_dsfield(iph6);
/* ISATAP (RFC4214) - must come before 6to4 */
if (dev->priv_flags & IFF_ISATAP) {
struct neighbour *neigh = NULL;
bool do_tx_error = false;
if (skb_dst(skb))
neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
if (!neigh) {
net_dbg_ratelimited("nexthop == NULL\n");
goto tx_error;
}
addr6 = (const struct in6_addr *)&neigh->primary_key;
addr_type = ipv6_addr_type(addr6);
if ((addr_type & IPV6_ADDR_UNICAST) &&
ipv6_addr_is_isatap(addr6))
dst = addr6->s6_addr32[3];
else
do_tx_error = true;
neigh_release(neigh);
if (do_tx_error)
goto tx_error;
}
if (!dst)
dst = try_6rd(tunnel, &iph6->daddr);
if (!dst) {
struct neighbour *neigh = NULL;
bool do_tx_error = false;
if (skb_dst(skb))
neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
if (!neigh) {
net_dbg_ratelimited("nexthop == NULL\n");
goto tx_error;
}
addr6 = (const struct in6_addr *)&neigh->primary_key;
addr_type = ipv6_addr_type(addr6);
if (addr_type == IPV6_ADDR_ANY) {
addr6 = &ipv6_hdr(skb)->daddr;
addr_type = ipv6_addr_type(addr6);
}
if ((addr_type & IPV6_ADDR_COMPATv4) != 0)
dst = addr6->s6_addr32[3];
else
do_tx_error = true;
neigh_release(neigh);
if (do_tx_error)
goto tx_error;
}
rt = ip_route_output_ports(tunnel->net, &fl4, NULL,
dst, tiph->saddr,
0, 0,
IPPROTO_IPV6, RT_TOS(tos),
tunnel->parms.link);
if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
if (rt->rt_type != RTN_UNICAST) {
ip_rt_put(rt);
dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
tdev = rt->dst.dev;
if (tdev == dev) {
ip_rt_put(rt);
dev->stats.collisions++;
goto tx_error;
}
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4)) {
ip_rt_put(rt);
goto tx_error;
}
if (df) {
mtu = dst_mtu(&rt->dst) - t_hlen;
if (mtu < 68) {
dev->stats.collisions++;
ip_rt_put(rt);
goto tx_error;
}
if (mtu < IPV6_MIN_MTU) {
mtu = IPV6_MIN_MTU;
df = 0;
}
if (tunnel->parms.iph.daddr && skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ip_rt_put(rt);
goto tx_error;
}
}
if (tunnel->err_count > 0) {
if (time_before(jiffies,
tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
tunnel->err_count--;
dst_link_failure(skb);
} else
tunnel->err_count = 0;
}
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
max_headroom = LL_RESERVED_SPACE(tdev) + t_hlen;
if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
dev_kfree_skb(skb);
skb = new_skb;
iph6 = ipv6_hdr(skb);
}
ttl = tiph->ttl;
if (ttl == 0)
ttl = iph6->hop_limit;
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) {
ip_rt_put(rt);
goto tx_error;
}
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
tx_error:
kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb,
struct net_device *dev, u8 ipproto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tiph = &tunnel->parms.iph;
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4))
goto tx_error;
skb_set_inner_ipproto(skb, ipproto);
ip_tunnel_xmit(skb, dev, tiph, ipproto);
return NETDEV_TX_OK;
tx_error:
kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
switch (skb->protocol) {
case htons(ETH_P_IP):
sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
break;
case htons(ETH_P_IPV6):
ipip6_tunnel_xmit(skb, dev);
break;
#if IS_ENABLED(CONFIG_MPLS)
case htons(ETH_P_MPLS_UC):
sit_tunnel_xmit__(skb, dev, IPPROTO_MPLS);
break;
#endif
default:
goto tx_err;
}
return NETDEV_TX_OK;
tx_err:
dev->stats.tx_errors++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
static void ipip6_tunnel_bind_dev(struct net_device *dev)
{
struct net_device *tdev = NULL;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
struct flowi4 fl4;
tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
if (iph->daddr) {
struct rtable *rt = ip_route_output_ports(tunnel->net, &fl4,
NULL,
iph->daddr, iph->saddr,
0, 0,
IPPROTO_IPV6,
RT_TOS(iph->tos),
tunnel->parms.link);
if (!IS_ERR(rt)) {
tdev = rt->dst.dev;
ip_rt_put(rt);
}
dev->flags |= IFF_POINTOPOINT;
}
if (!tdev && tunnel->parms.link)
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
if (tdev) {
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
dev->mtu = tdev->mtu - t_hlen;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
}
}
static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
{
struct net *net = t->net;
struct sit_net *sitn = net_generic(net, sit_net_id);
ipip6_tunnel_unlink(sitn, t);
synchronize_net();
t->parms.iph.saddr = p->iph.saddr;
t->parms.iph.daddr = p->iph.daddr;
memcpy(t->dev->dev_addr, &p->iph.saddr, 4);
memcpy(t->dev->broadcast, &p->iph.daddr, 4);
ipip6_tunnel_link(sitn, t);
t->parms.iph.ttl = p->iph.ttl;
t->parms.iph.tos = p->iph.tos;
if (t->parms.link != p->link) {
t->parms.link = p->link;
ipip6_tunnel_bind_dev(t->dev);
}
dst_cache_reset(&t->dst_cache);
netdev_state_change(t->dev);
}
#ifdef CONFIG_IPV6_SIT_6RD
static int ipip6_tunnel_update_6rd(struct ip_tunnel *t,
struct ip_tunnel_6rd *ip6rd)
{
struct in6_addr prefix;
__be32 relay_prefix;
if (ip6rd->relay_prefixlen > 32 ||
ip6rd->prefixlen + (32 - ip6rd->relay_prefixlen) > 64)
return -EINVAL;
ipv6_addr_prefix(&prefix, &ip6rd->prefix, ip6rd->prefixlen);
if (!ipv6_addr_equal(&prefix, &ip6rd->prefix))
return -EINVAL;
if (ip6rd->relay_prefixlen)
relay_prefix = ip6rd->relay_prefix &
htonl(0xffffffffUL <<
(32 - ip6rd->relay_prefixlen));
else
relay_prefix = 0;
if (relay_prefix != ip6rd->relay_prefix)
return -EINVAL;
t->ip6rd.prefix = prefix;
t->ip6rd.relay_prefix = relay_prefix;
t->ip6rd.prefixlen = ip6rd->prefixlen;
t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen;
dst_cache_reset(&t->dst_cache);
netdev_state_change(t->dev);
return 0;
}
#endif
bool ipip6_valid_ip_proto(u8 ipproto)
{
return ipproto == IPPROTO_IPV6 ||
ipproto == IPPROTO_IPIP ||
#if IS_ENABLED(CONFIG_MPLS)
ipproto == IPPROTO_MPLS ||
#endif
ipproto == 0;
}
static int
ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int err = 0;
struct ip_tunnel_parm p;
struct ip_tunnel_prl prl;
struct ip_tunnel *t = netdev_priv(dev);
struct net *net = t->net;
struct sit_net *sitn = net_generic(net, sit_net_id);
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd ip6rd;
#endif
switch (cmd) {
case SIOCGETTUNNEL:
#ifdef CONFIG_IPV6_SIT_6RD
case SIOCGET6RD:
#endif
if (dev == sitn->fb_tunnel_dev) {
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
err = -EFAULT;
break;
}
t = ipip6_tunnel_locate(net, &p, 0);
if (!t)
t = netdev_priv(dev);
}
err = -EFAULT;
if (cmd == SIOCGETTUNNEL) {
memcpy(&p, &t->parms, sizeof(p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p,
sizeof(p)))
goto done;
#ifdef CONFIG_IPV6_SIT_6RD
} else {
ip6rd.prefix = t->ip6rd.prefix;
ip6rd.relay_prefix = t->ip6rd.relay_prefix;
ip6rd.prefixlen = t->ip6rd.prefixlen;
ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd,
sizeof(ip6rd)))
goto done;
#endif
}
err = 0;
break;
case SIOCADDTUNNEL:
case SIOCCHGTUNNEL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto done;
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -EINVAL;
if (!ipip6_valid_ip_proto(p.iph.protocol))
goto done;
if (p.iph.version != 4 ||
p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
goto done;
if (p.iph.ttl)
p.iph.frag_off |= htons(IP_DF);
t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
if (t) {
if (t->dev != dev) {
err = -EEXIST;
break;
}
} else {
if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
(!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
err = -EINVAL;
break;
}
t = netdev_priv(dev);
}
ipip6_tunnel_update(t, &p);
}
if (t) {
err = 0;
if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
err = -EFAULT;
} else
err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
break;
case SIOCDELTUNNEL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto done;
if (dev == sitn->fb_tunnel_dev) {
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -ENOENT;
t = ipip6_tunnel_locate(net, &p, 0);
if (!t)
goto done;
err = -EPERM;
if (t == netdev_priv(sitn->fb_tunnel_dev))
goto done;
dev = t->dev;
}
unregister_netdevice(dev);
err = 0;
break;
case SIOCGETPRL:
err = -EINVAL;
if (dev == sitn->fb_tunnel_dev)
goto done;
err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data);
break;
case SIOCADDPRL:
case SIOCDELPRL:
case SIOCCHGPRL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto done;
err = -EINVAL;
if (dev == sitn->fb_tunnel_dev)
goto done;
err = -EFAULT;
if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl)))
goto done;
switch (cmd) {
case SIOCDELPRL:
err = ipip6_tunnel_del_prl(t, &prl);
break;
case SIOCADDPRL:
case SIOCCHGPRL:
err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
break;
}
dst_cache_reset(&t->dst_cache);
netdev_state_change(dev);
break;
#ifdef CONFIG_IPV6_SIT_6RD
case SIOCADD6RD:
case SIOCCHG6RD:
case SIOCDEL6RD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto done;
err = -EFAULT;
if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data,
sizeof(ip6rd)))
goto done;
if (cmd != SIOCDEL6RD) {
err = ipip6_tunnel_update_6rd(t, &ip6rd);
if (err < 0)
goto done;
} else
ipip6_tunnel_clone_6rd(dev, sitn);
err = 0;
break;
#endif
default:
err = -EINVAL;
}
done:
return err;
}
static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - t_hlen)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static const struct net_device_ops ipip6_netdev_ops = {
.ndo_init = ipip6_tunnel_init,
.ndo_uninit = ipip6_tunnel_uninit,
.ndo_start_xmit = sit_tunnel_xmit,
.ndo_do_ioctl = ipip6_tunnel_ioctl,
.ndo_change_mtu = ipip6_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
};
static void ipip6_dev_free(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
dst_cache_destroy(&tunnel->dst_cache);
free_percpu(dev->tstats);
free_netdev(dev);
}
#define SIT_FEATURES (NETIF_F_SG | \
NETIF_F_FRAGLIST | \
NETIF_F_HIGHDMA | \
NETIF_F_GSO_SOFTWARE | \
NETIF_F_HW_CSUM)
static void ipip6_tunnel_setup(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->netdev_ops = &ipip6_netdev_ops;
dev->destructor = ipip6_dev_free;
dev->type = ARPHRD_SIT;
dev->hard_header_len = LL_MAX_HEADER + t_hlen;
dev->mtu = ETH_DATA_LEN - t_hlen;
dev->flags = IFF_NOARP;
netif_keep_dst(dev);
dev->addr_len = 4;
dev->features |= NETIF_F_LLTX;
dev->features |= SIT_FEATURES;
dev->hw_features |= SIT_FEATURES;
}
static int ipip6_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int err;
tunnel->dev = dev;
tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
ipip6_tunnel_bind_dev(dev);
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
if (err) {
free_percpu(dev->tstats);
return err;
}
return 0;
}
static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
struct net *net = dev_net(dev);
struct sit_net *sitn = net_generic(net, sit_net_id);
iph->version = 4;
iph->protocol = IPPROTO_IPV6;
iph->ihl = 5;
iph->ttl = 64;
dev_hold(dev);
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
}
static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[])
{
u8 proto;
if (!data || !data[IFLA_IPTUN_PROTO])
return 0;
proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
if (!ipip6_valid_ip_proto(proto))
return -EINVAL;
return 0;
}
static void ipip6_netlink_parms(struct nlattr *data[],
struct ip_tunnel_parm *parms)
{
memset(parms, 0, sizeof(*parms));
parms->iph.version = 4;
parms->iph.protocol = IPPROTO_IPV6;
parms->iph.ihl = 5;
parms->iph.ttl = 64;
if (!data)
return;
if (data[IFLA_IPTUN_LINK])
parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
if (data[IFLA_IPTUN_LOCAL])
parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]);
if (data[IFLA_IPTUN_REMOTE])
parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]);
if (data[IFLA_IPTUN_TTL]) {
parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
if (parms->iph.ttl)
parms->iph.frag_off = htons(IP_DF);
}
if (data[IFLA_IPTUN_TOS])
parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
parms->iph.frag_off = htons(IP_DF);
if (data[IFLA_IPTUN_FLAGS])
parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
if (data[IFLA_IPTUN_PROTO])
parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
}
/* This function returns true when ENCAP attributes are present in the nl msg */
static bool ipip6_netlink_encap_parms(struct nlattr *data[],
struct ip_tunnel_encap *ipencap)
{
bool ret = false;
memset(ipencap, 0, sizeof(*ipencap));
if (!data)
return ret;
if (data[IFLA_IPTUN_ENCAP_TYPE]) {
ret = true;
ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
}
if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
ret = true;
ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
}
if (data[IFLA_IPTUN_ENCAP_SPORT]) {
ret = true;
ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
}
if (data[IFLA_IPTUN_ENCAP_DPORT]) {
ret = true;
ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
}
return ret;
}
#ifdef CONFIG_IPV6_SIT_6RD
/* This function returns true when 6RD attributes are present in the nl msg */
static bool ipip6_netlink_6rd_parms(struct nlattr *data[],
struct ip_tunnel_6rd *ip6rd)
{
bool ret = false;
memset(ip6rd, 0, sizeof(*ip6rd));
if (!data)
return ret;
if (data[IFLA_IPTUN_6RD_PREFIX]) {
ret = true;
ip6rd->prefix = nla_get_in6_addr(data[IFLA_IPTUN_6RD_PREFIX]);
}
if (data[IFLA_IPTUN_6RD_RELAY_PREFIX]) {
ret = true;
ip6rd->relay_prefix =
nla_get_be32(data[IFLA_IPTUN_6RD_RELAY_PREFIX]);
}
if (data[IFLA_IPTUN_6RD_PREFIXLEN]) {
ret = true;
ip6rd->prefixlen = nla_get_u16(data[IFLA_IPTUN_6RD_PREFIXLEN]);
}
if (data[IFLA_IPTUN_6RD_RELAY_PREFIXLEN]) {
ret = true;
ip6rd->relay_prefixlen =
nla_get_u16(data[IFLA_IPTUN_6RD_RELAY_PREFIXLEN]);
}
return ret;
}
#endif
static int ipip6_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net *net = dev_net(dev);
struct ip_tunnel *nt;
struct ip_tunnel_encap ipencap;
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd ip6rd;
#endif
int err;
nt = netdev_priv(dev);
if (ipip6_netlink_encap_parms(data, &ipencap)) {
err = ip_tunnel_encap_setup(nt, &ipencap);
if (err < 0)
return err;
}
ipip6_netlink_parms(data, &nt->parms);
if (ipip6_tunnel_locate(net, &nt->parms, 0))
return -EEXIST;
err = ipip6_tunnel_create(dev);
if (err < 0)
return err;
#ifdef CONFIG_IPV6_SIT_6RD
if (ipip6_netlink_6rd_parms(data, &ip6rd))
err = ipip6_tunnel_update_6rd(nt, &ip6rd);
#endif
return err;
}
static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
struct net *net = t->net;
struct sit_net *sitn = net_generic(net, sit_net_id);
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd ip6rd;
#endif
int err;
if (dev == sitn->fb_tunnel_dev)
return -EINVAL;
if (ipip6_netlink_encap_parms(data, &ipencap)) {
err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0)
return err;
}
ipip6_netlink_parms(data, &p);
if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
(!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
return -EINVAL;
t = ipip6_tunnel_locate(net, &p, 0);
if (t) {
if (t->dev != dev)
return -EEXIST;
} else
t = netdev_priv(dev);
ipip6_tunnel_update(t, &p);
#ifdef CONFIG_IPV6_SIT_6RD
if (ipip6_netlink_6rd_parms(data, &ip6rd))
return ipip6_tunnel_update_6rd(t, &ip6rd);
#endif
return 0;
}
static size_t ipip6_get_size(const struct net_device *dev)
{
return
/* IFLA_IPTUN_LINK */
nla_total_size(4) +
/* IFLA_IPTUN_LOCAL */
nla_total_size(4) +
/* IFLA_IPTUN_REMOTE */
nla_total_size(4) +
/* IFLA_IPTUN_TTL */
nla_total_size(1) +
/* IFLA_IPTUN_TOS */
nla_total_size(1) +
/* IFLA_IPTUN_PMTUDISC */
nla_total_size(1) +
/* IFLA_IPTUN_FLAGS */
nla_total_size(2) +
/* IFLA_IPTUN_PROTO */
nla_total_size(1) +
#ifdef CONFIG_IPV6_SIT_6RD
/* IFLA_IPTUN_6RD_PREFIX */
nla_total_size(sizeof(struct in6_addr)) +
/* IFLA_IPTUN_6RD_RELAY_PREFIX */
nla_total_size(4) +
/* IFLA_IPTUN_6RD_PREFIXLEN */
nla_total_size(2) +
/* IFLA_IPTUN_6RD_RELAY_PREFIXLEN */
nla_total_size(2) +
#endif
/* IFLA_IPTUN_ENCAP_TYPE */
nla_total_size(2) +
/* IFLA_IPTUN_ENCAP_FLAGS */
nla_total_size(2) +
/* IFLA_IPTUN_ENCAP_SPORT */
nla_total_size(2) +
/* IFLA_IPTUN_ENCAP_DPORT */
nla_total_size(2) +
0;
}
static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_parm *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
!!(parm->iph.frag_off & htons(IP_DF))) ||
nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags))
goto nla_put_failure;
#ifdef CONFIG_IPV6_SIT_6RD
if (nla_put_in6_addr(skb, IFLA_IPTUN_6RD_PREFIX,
&tunnel->ip6rd.prefix) ||
nla_put_in_addr(skb, IFLA_IPTUN_6RD_RELAY_PREFIX,
tunnel->ip6rd.relay_prefix) ||
nla_put_u16(skb, IFLA_IPTUN_6RD_PREFIXLEN,
tunnel->ip6rd.prefixlen) ||
nla_put_u16(skb, IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
tunnel->ip6rd.relay_prefixlen))
goto nla_put_failure;
#endif
if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
tunnel->encap.type) ||
nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
tunnel->encap.sport) ||
nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
tunnel->encap.dport) ||
nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
tunnel->encap.flags))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_LINK] = { .type = NLA_U32 },
[IFLA_IPTUN_LOCAL] = { .type = NLA_U32 },
[IFLA_IPTUN_REMOTE] = { .type = NLA_U32 },
[IFLA_IPTUN_TTL] = { .type = NLA_U8 },
[IFLA_IPTUN_TOS] = { .type = NLA_U8 },
[IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
[IFLA_IPTUN_FLAGS] = { .type = NLA_U16 },
[IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
#ifdef CONFIG_IPV6_SIT_6RD
[IFLA_IPTUN_6RD_PREFIX] = { .len = sizeof(struct in6_addr) },
[IFLA_IPTUN_6RD_RELAY_PREFIX] = { .type = NLA_U32 },
[IFLA_IPTUN_6RD_PREFIXLEN] = { .type = NLA_U16 },
[IFLA_IPTUN_6RD_RELAY_PREFIXLEN] = { .type = NLA_U16 },
#endif
[IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
};
static void ipip6_dellink(struct net_device *dev, struct list_head *head)
{
struct net *net = dev_net(dev);
struct sit_net *sitn = net_generic(net, sit_net_id);
if (dev != sitn->fb_tunnel_dev)
unregister_netdevice_queue(dev, head);
}
static struct rtnl_link_ops sit_link_ops __read_mostly = {
.kind = "sit",
.maxtype = IFLA_IPTUN_MAX,
.policy = ipip6_policy,
.priv_size = sizeof(struct ip_tunnel),
.setup = ipip6_tunnel_setup,
.validate = ipip6_validate,
.newlink = ipip6_newlink,
.changelink = ipip6_changelink,
.get_size = ipip6_get_size,
.fill_info = ipip6_fill_info,
.dellink = ipip6_dellink,
.get_link_net = ip_tunnel_get_link_net,
};
static struct xfrm_tunnel sit_handler __read_mostly = {
.handler = ipip6_rcv,
.err_handler = ipip6_err,
.priority = 1,
};
static struct xfrm_tunnel ipip_handler __read_mostly = {
.handler = ipip_rcv,
.err_handler = ipip6_err,
.priority = 2,
};
#if IS_ENABLED(CONFIG_MPLS)
static struct xfrm_tunnel mplsip_handler __read_mostly = {
.handler = mplsip_rcv,
.err_handler = ipip6_err,
.priority = 2,
};
#endif
static void __net_exit sit_destroy_tunnels(struct net *net,
struct list_head *head)
{
struct sit_net *sitn = net_generic(net, sit_net_id);
struct net_device *dev, *aux;
int prio;
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &sit_link_ops)
unregister_netdevice_queue(dev, head);
for (prio = 1; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
struct ip_tunnel *t;
t = rtnl_dereference(sitn->tunnels[prio][h]);
while (t) {
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev,
head);
t = rtnl_dereference(t->next);
}
}
}
}
static int __net_init sit_init_net(struct net *net)
{
struct sit_net *sitn = net_generic(net, sit_net_id);
struct ip_tunnel *t;
int err;
sitn->tunnels[0] = sitn->tunnels_wc;
sitn->tunnels[1] = sitn->tunnels_l;
sitn->tunnels[2] = sitn->tunnels_r;
sitn->tunnels[3] = sitn->tunnels_r_l;
sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0",
NET_NAME_UNKNOWN,
ipip6_tunnel_setup);
if (!sitn->fb_tunnel_dev) {
err = -ENOMEM;
goto err_alloc_dev;
}
dev_net_set(sitn->fb_tunnel_dev, net);
sitn->fb_tunnel_dev->rtnl_link_ops = &sit_link_ops;
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
*/
sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
err = register_netdev(sitn->fb_tunnel_dev);
if (err)
goto err_reg_dev;
ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
t = netdev_priv(sitn->fb_tunnel_dev);
strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
return 0;
err_reg_dev:
ipip6_dev_free(sitn->fb_tunnel_dev);
err_alloc_dev:
return err;
}
static void __net_exit sit_exit_net(struct net *net)
{
LIST_HEAD(list);
rtnl_lock();
sit_destroy_tunnels(net, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations sit_net_ops = {
.init = sit_init_net,
.exit = sit_exit_net,
.id = &sit_net_id,
.size = sizeof(struct sit_net),
};
static void __exit sit_cleanup(void)
{
rtnl_link_unregister(&sit_link_ops);
xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
#if IS_ENABLED(CONFIG_MPLS)
xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS);
#endif
unregister_pernet_device(&sit_net_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
static int __init sit_init(void)
{
int err;
pr_info("IPv6, IPv4 and MPLS over IPv4 tunneling driver\n");
err = register_pernet_device(&sit_net_ops);
if (err < 0)
return err;
err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
if (err < 0) {
pr_info("%s: can't register ip6ip4\n", __func__);
goto xfrm_tunnel_failed;
}
err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
if (err < 0) {
pr_info("%s: can't register ip4ip4\n", __func__);
goto xfrm_tunnel4_failed;
}
#if IS_ENABLED(CONFIG_MPLS)
err = xfrm4_tunnel_register(&mplsip_handler, AF_MPLS);
if (err < 0) {
pr_info("%s: can't register mplsip\n", __func__);
goto xfrm_tunnel_mpls_failed;
}
#endif
err = rtnl_link_register(&sit_link_ops);
if (err < 0)
goto rtnl_link_failed;
out:
return err;
rtnl_link_failed:
#if IS_ENABLED(CONFIG_MPLS)
xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS);
xfrm_tunnel_mpls_failed:
#endif
xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
xfrm_tunnel4_failed:
xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
xfrm_tunnel_failed:
unregister_pernet_device(&sit_net_ops);
goto out;
}
module_init(sit_init);
module_exit(sit_cleanup);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("sit");
MODULE_ALIAS_NETDEV("sit0");
| gpl-2.0 |
tmatsuya/milkymist-linux | drivers/net/irda/au1k_ir.c | 43 | 20314 | /*
* Alchemy Semi Au1000 IrDA driver
*
* Copyright 2001 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* ppopov@mvista.com or source@mvista.com
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/bitops.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/au1000.h>
#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100)
#include <asm/pb1000.h>
#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
#include <asm/db1x00.h>
#else
#error au1k_ir: unsupported board
#endif
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>
#include "au1000_ircc.h"
static int au1k_irda_net_init(struct net_device *);
static int au1k_irda_start(struct net_device *);
static int au1k_irda_stop(struct net_device *dev);
static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *);
static int au1k_irda_rx(struct net_device *);
static void au1k_irda_interrupt(int, void *);
static void au1k_tx_timeout(struct net_device *);
static struct net_device_stats *au1k_irda_stats(struct net_device *);
static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int);
static int au1k_irda_set_speed(struct net_device *dev, int speed);
static void *dma_alloc(size_t, dma_addr_t *);
static void dma_free(void *, size_t);
static int qos_mtt_bits = 0x07; /* 1 ms or more */
static struct net_device *ir_devs[NUM_IR_IFF];
static char version[] __devinitdata =
"au1k_ircc:1.2 ppopov@mvista.com\n";
#define RUN_AT(x) (jiffies + (x))
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
static BCSR * const bcsr = (BCSR *)0xAE000000;
#endif
static DEFINE_SPINLOCK(ir_lock);
/*
* IrDA peripheral bug. You have to read the register
* twice to get the right value.
*/
u32 read_ir_reg(u32 addr)
{
readl(addr);
return readl(addr);
}
/*
* Buffer allocation/deallocation routines. The buffer descriptor returned
* has the virtual and dma address of a buffer suitable for
* both, receive and transmit operations.
*/
static db_dest_t *GetFreeDB(struct au1k_private *aup)
{
db_dest_t *pDB;
pDB = aup->pDBfree;
if (pDB) {
aup->pDBfree = pDB->pnext;
}
return pDB;
}
static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB)
{
db_dest_t *pDBfree = aup->pDBfree;
if (pDBfree)
pDBfree->pnext = pDB;
aup->pDBfree = pDB;
}
/*
DMA memory allocation, derived from pci_alloc_consistent.
However, the Au1000 data cache is coherent (when programmed
so), therefore we return KSEG0 address, not KSEG1.
*/
static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
{
void *ret;
int gfp = GFP_ATOMIC | GFP_DMA;
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = virt_to_bus(ret);
ret = (void *)KSEG0ADDR(ret);
}
return ret;
}
static void dma_free(void *vaddr, size_t size)
{
vaddr = (void *)KSEG0ADDR(vaddr);
free_pages((unsigned long) vaddr, get_order(size));
}
static void
setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
{
int i;
for (i=0; i<NUM_IR_DESC; i++) {
aup->rx_ring[i] = (volatile ring_dest_t *)
(rx_base + sizeof(ring_dest_t)*i);
}
for (i=0; i<NUM_IR_DESC; i++) {
aup->tx_ring[i] = (volatile ring_dest_t *)
(tx_base + sizeof(ring_dest_t)*i);
}
}
static int au1k_irda_init(void)
{
static unsigned version_printed = 0;
struct au1k_private *aup;
struct net_device *dev;
int err;
if (version_printed++ == 0) printk(version);
dev = alloc_irdadev(sizeof(struct au1k_private));
if (!dev)
return -ENOMEM;
dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */
err = au1k_irda_net_init(dev);
if (err)
goto out;
err = register_netdev(dev);
if (err)
goto out1;
ir_devs[0] = dev;
printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
return 0;
out1:
aup = netdev_priv(dev);
dma_free((void *)aup->db[0].vaddr,
MAX_BUF_SIZE * 2*NUM_IR_DESC);
dma_free((void *)aup->rx_ring[0],
2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
kfree(aup->rx_buff.head);
out:
free_netdev(dev);
return err;
}
static int au1k_irda_init_iobuf(iobuff_t *io, int size)
{
io->head = kmalloc(size, GFP_KERNEL);
if (io->head != NULL) {
io->truesize = size;
io->in_frame = FALSE;
io->state = OUTSIDE_FRAME;
io->data = io->head;
}
return io->head ? 0 : -ENOMEM;
}
static int au1k_irda_net_init(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
int i, retval = 0, err;
db_dest_t *pDB, *pDBfree;
dma_addr_t temp;
err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
if (err)
goto out1;
dev->open = au1k_irda_start;
dev->hard_start_xmit = au1k_irda_hard_xmit;
dev->stop = au1k_irda_stop;
dev->get_stats = au1k_irda_stats;
dev->do_ioctl = au1k_irda_ioctl;
dev->tx_timeout = au1k_tx_timeout;
irda_init_max_qos_capabilies(&aup->qos);
/* The only value we must override it the baudrate */
aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
IR_115200|IR_576000 |(IR_4000000 << 8);
aup->qos.min_turn_time.bits = qos_mtt_bits;
irda_qos_bits_to_value(&aup->qos);
retval = -ENOMEM;
/* Tx ring follows rx ring + 512 bytes */
/* we need a 1k aligned buffer */
aup->rx_ring[0] = (ring_dest_t *)
dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp);
if (!aup->rx_ring[0])
goto out2;
/* allocate the data buffers */
aup->db[0].vaddr =
(void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp);
if (!aup->db[0].vaddr)
goto out3;
setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
pDBfree = NULL;
pDB = aup->db;
for (i=0; i<(2*NUM_IR_DESC); i++) {
pDB->pnext = pDBfree;
pDBfree = pDB;
pDB->vaddr =
(u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i);
pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
pDB++;
}
aup->pDBfree = pDBfree;
/* attach a data buffer to each descriptor */
for (i=0; i<NUM_IR_DESC; i++) {
pDB = GetFreeDB(aup);
if (!pDB) goto out;
aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
aup->rx_db_inuse[i] = pDB;
}
for (i=0; i<NUM_IR_DESC; i++) {
pDB = GetFreeDB(aup);
if (!pDB) goto out;
aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
aup->tx_ring[i]->count_0 = 0;
aup->tx_ring[i]->count_1 = 0;
aup->tx_ring[i]->flags = 0;
aup->tx_db_inuse[i] = pDB;
}
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
/* power on */
bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK;
bcsr->resets |= BCSR_RESETS_IRDA_MODE_FULL;
au_sync();
#endif
return 0;
out3:
dma_free((void *)aup->rx_ring[0],
2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
out2:
kfree(aup->rx_buff.head);
out1:
printk(KERN_ERR "au1k_init_module failed. Returns %d\n", retval);
return retval;
}
static int au1k_init(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
int i;
u32 control;
u32 ring_address;
/* bring the device out of reset */
control = 0xe; /* coherent, clock enable, one half system clock */
#ifndef CONFIG_CPU_LITTLE_ENDIAN
control |= 1;
#endif
aup->tx_head = 0;
aup->tx_tail = 0;
aup->rx_head = 0;
for (i=0; i<NUM_IR_DESC; i++) {
aup->rx_ring[i]->flags = AU_OWN;
}
writel(control, IR_INTERFACE_CONFIG);
au_sync_delay(10);
writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */
au_sync_delay(1);
writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN);
ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
writel(ring_address >> 26, IR_RING_BASE_ADDR_H);
writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L);
writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE);
writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */
writel(0, IR_RING_ADDR_CMPR);
au1k_irda_set_speed(dev, 9600);
return 0;
}
static int au1k_irda_start(struct net_device *dev)
{
int retval;
char hwname[32];
struct au1k_private *aup = netdev_priv(dev);
if ((retval = au1k_init(dev))) {
printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
return retval;
}
if ((retval = request_irq(AU1000_IRDA_TX_INT, &au1k_irda_interrupt,
0, dev->name, dev))) {
printk(KERN_ERR "%s: unable to get IRQ %d\n",
dev->name, dev->irq);
return retval;
}
if ((retval = request_irq(AU1000_IRDA_RX_INT, &au1k_irda_interrupt,
0, dev->name, dev))) {
free_irq(AU1000_IRDA_TX_INT, dev);
printk(KERN_ERR "%s: unable to get IRQ %d\n",
dev->name, dev->irq);
return retval;
}
/* Give self a hardware name */
sprintf(hwname, "Au1000 SIR/FIR");
aup->irlap = irlap_open(dev, &aup->qos, hwname);
netif_start_queue(dev);
writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */
aup->timer.expires = RUN_AT((3*HZ));
aup->timer.data = (unsigned long)dev;
return 0;
}
static int au1k_irda_stop(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
/* disable interrupts */
writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2);
writel(0, IR_CONFIG_1);
writel(0, IR_INTERFACE_CONFIG); /* disable clock */
au_sync();
if (aup->irlap) {
irlap_close(aup->irlap);
aup->irlap = NULL;
}
netif_stop_queue(dev);
del_timer(&aup->timer);
/* disable the interrupt */
free_irq(AU1000_IRDA_TX_INT, dev);
free_irq(AU1000_IRDA_RX_INT, dev);
return 0;
}
static void __exit au1k_irda_exit(void)
{
struct net_device *dev = ir_devs[0];
struct au1k_private *aup = netdev_priv(dev);
unregister_netdev(dev);
dma_free((void *)aup->db[0].vaddr,
MAX_BUF_SIZE * 2*NUM_IR_DESC);
dma_free((void *)aup->rx_ring[0],
2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
kfree(aup->rx_buff.head);
free_netdev(dev);
}
static inline void
update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
{
struct au1k_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &aup->stats;
ps->tx_packets++;
ps->tx_bytes += pkt_len;
if (status & IR_TX_ERROR) {
ps->tx_errors++;
ps->tx_aborted_errors++;
}
}
static void au1k_tx_ack(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
volatile ring_dest_t *ptxd;
ptxd = aup->tx_ring[aup->tx_tail];
while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
update_tx_stats(dev, ptxd->flags,
ptxd->count_1<<8 | ptxd->count_0);
ptxd->count_0 = 0;
ptxd->count_1 = 0;
au_sync();
aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
ptxd = aup->tx_ring[aup->tx_tail];
if (aup->tx_full) {
aup->tx_full = 0;
netif_wake_queue(dev);
}
}
if (aup->tx_tail == aup->tx_head) {
if (aup->newspeed) {
au1k_irda_set_speed(dev, aup->newspeed);
aup->newspeed = 0;
}
else {
writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE,
IR_CONFIG_1);
au_sync();
writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE,
IR_CONFIG_1);
writel(0, IR_RING_PROMPT);
au_sync();
}
}
}
/*
* Au1000 transmit routine.
*/
static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
int speed = irda_get_next_speed(skb);
volatile ring_dest_t *ptxd;
u32 len;
u32 flags;
db_dest_t *pDB;
if (speed != aup->speed && speed != -1) {
aup->newspeed = speed;
}
if ((skb->len == 0) && (aup->newspeed)) {
if (aup->tx_tail == aup->tx_head) {
au1k_irda_set_speed(dev, speed);
aup->newspeed = 0;
}
dev_kfree_skb(skb);
return 0;
}
ptxd = aup->tx_ring[aup->tx_head];
flags = ptxd->flags;
if (flags & AU_OWN) {
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
netif_stop_queue(dev);
aup->tx_full = 1;
return 1;
}
else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
netif_stop_queue(dev);
aup->tx_full = 1;
return 1;
}
pDB = aup->tx_db_inuse[aup->tx_head];
#if 0
if (read_ir_reg(IR_RX_BYTE_CNT) != 0) {
printk("tx warning: rx byte cnt %x\n",
read_ir_reg(IR_RX_BYTE_CNT));
}
#endif
if (aup->speed == 4000000) {
/* FIR */
skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
ptxd->count_0 = skb->len & 0xff;
ptxd->count_1 = (skb->len >> 8) & 0xff;
}
else {
/* SIR */
len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
ptxd->count_0 = len & 0xff;
ptxd->count_1 = (len >> 8) & 0xff;
ptxd->flags |= IR_DIS_CRC;
au_writel(au_readl(0xae00000c) & ~(1<<13), 0xae00000c);
}
ptxd->flags |= AU_OWN;
au_sync();
writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1);
writel(0, IR_RING_PROMPT);
au_sync();
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
dev->trans_start = jiffies;
return 0;
}
static inline void
update_rx_stats(struct net_device *dev, u32 status, u32 count)
{
struct au1k_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &aup->stats;
ps->rx_packets++;
if (status & IR_RX_ERROR) {
ps->rx_errors++;
if (status & (IR_PHY_ERROR|IR_FIFO_OVER))
ps->rx_missed_errors++;
if (status & IR_MAX_LEN)
ps->rx_length_errors++;
if (status & IR_CRC_ERROR)
ps->rx_crc_errors++;
}
else
ps->rx_bytes += count;
}
/*
* Au1000 receive routine.
*/
static int au1k_irda_rx(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
struct sk_buff *skb;
volatile ring_dest_t *prxd;
u32 flags, count;
db_dest_t *pDB;
prxd = aup->rx_ring[aup->rx_head];
flags = prxd->flags;
while (!(flags & AU_OWN)) {
pDB = aup->rx_db_inuse[aup->rx_head];
count = prxd->count_1<<8 | prxd->count_0;
if (!(flags & IR_RX_ERROR)) {
/* good frame */
update_rx_stats(dev, flags, count);
skb=alloc_skb(count+1,GFP_ATOMIC);
if (skb == NULL) {
aup->stats.rx_dropped++;
continue;
}
skb_reserve(skb, 1);
if (aup->speed == 4000000)
skb_put(skb, count);
else
skb_put(skb, count-2);
skb_copy_to_linear_data(skb, pDB->vaddr, count - 2);
skb->dev = dev;
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IRDA);
netif_rx(skb);
prxd->count_0 = 0;
prxd->count_1 = 0;
}
prxd->flags |= AU_OWN;
aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
writel(0, IR_RING_PROMPT);
au_sync();
/* next descriptor */
prxd = aup->rx_ring[aup->rx_head];
flags = prxd->flags;
dev->last_rx = jiffies;
}
return 0;
}
void au1k_irda_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
if (dev == NULL) {
printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
return;
}
writel(0, IR_INT_CLEAR); /* ack irda interrupts */
au1k_irda_rx(dev);
au1k_tx_ack(dev);
}
/*
* The Tx ring has been full longer than the watchdog timeout
* value. The transmitter must be hung?
*/
static void au1k_tx_timeout(struct net_device *dev)
{
u32 speed;
struct au1k_private *aup = netdev_priv(dev);
printk(KERN_ERR "%s: tx timeout\n", dev->name);
speed = aup->speed;
aup->speed = 0;
au1k_irda_set_speed(dev, speed);
aup->tx_full = 0;
netif_wake_queue(dev);
}
/*
* Set the IrDA communications speed.
*/
static int
au1k_irda_set_speed(struct net_device *dev, int speed)
{
unsigned long flags;
struct au1k_private *aup = netdev_priv(dev);
u32 control;
int ret = 0, timeout = 10, i;
volatile ring_dest_t *ptxd;
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
unsigned long irda_resets;
#endif
if (speed == aup->speed)
return ret;
spin_lock_irqsave(&ir_lock, flags);
/* disable PHY first */
writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE);
/* disable RX/TX */
writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE),
IR_CONFIG_1);
au_sync_delay(1);
while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) {
mdelay(1);
if (!timeout--) {
printk(KERN_ERR "%s: rx/tx disable timeout\n",
dev->name);
break;
}
}
/* disable DMA */
writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1);
au_sync_delay(1);
/*
* After we disable tx/rx. the index pointers
* go back to zero.
*/
aup->tx_head = aup->tx_tail = aup->rx_head = 0;
for (i=0; i<NUM_IR_DESC; i++) {
ptxd = aup->tx_ring[i];
ptxd->flags = 0;
ptxd->count_0 = 0;
ptxd->count_1 = 0;
}
for (i=0; i<NUM_IR_DESC; i++) {
ptxd = aup->rx_ring[i];
ptxd->count_0 = 0;
ptxd->count_1 = 0;
ptxd->flags = AU_OWN;
}
if (speed == 4000000) {
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
bcsr->resets |= BCSR_RESETS_FIR_SEL;
#else /* Pb1000 and Pb1100 */
writel(1<<13, CPLD_AUX1);
#endif
}
else {
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
bcsr->resets &= ~BCSR_RESETS_FIR_SEL;
#else /* Pb1000 and Pb1100 */
writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
#endif
}
switch (speed) {
case 9600:
writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
writel(IR_SIR_MODE, IR_CONFIG_1);
break;
case 19200:
writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
writel(IR_SIR_MODE, IR_CONFIG_1);
break;
case 38400:
writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
writel(IR_SIR_MODE, IR_CONFIG_1);
break;
case 57600:
writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
writel(IR_SIR_MODE, IR_CONFIG_1);
break;
case 115200:
writel(12<<5, IR_WRITE_PHY_CONFIG);
writel(IR_SIR_MODE, IR_CONFIG_1);
break;
case 4000000:
writel(0xF, IR_WRITE_PHY_CONFIG);
writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1);
break;
default:
printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
ret = -EINVAL;
break;
}
aup->speed = speed;
writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE);
au_sync();
control = read_ir_reg(IR_ENABLE);
writel(0, IR_RING_PROMPT);
au_sync();
if (control & (1<<14)) {
printk(KERN_ERR "%s: configuration error\n", dev->name);
}
else {
if (control & (1<<11))
printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
if (control & (1<<12))
printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
if (control & (1<<13))
printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
if (control & (1<<10))
printk(KERN_DEBUG "%s TX enabled\n", dev->name);
if (control & (1<<9))
printk(KERN_DEBUG "%s RX enabled\n", dev->name);
}
spin_unlock_irqrestore(&ir_lock, flags);
return ret;
}
static int
au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
{
struct if_irda_req *rq = (struct if_irda_req *)ifreq;
struct au1k_private *aup = netdev_priv(dev);
int ret = -EOPNOTSUPP;
switch (cmd) {
case SIOCSBANDWIDTH:
if (capable(CAP_NET_ADMIN)) {
/*
* We are unable to set the speed if the
* device is not running.
*/
if (aup->open)
ret = au1k_irda_set_speed(dev,
rq->ifr_baudrate);
else {
printk(KERN_ERR "%s ioctl: !netif_running\n",
dev->name);
ret = 0;
}
}
break;
case SIOCSMEDIABUSY:
ret = -EPERM;
if (capable(CAP_NET_ADMIN)) {
irda_device_set_media_busy(dev, TRUE);
ret = 0;
}
break;
case SIOCGRECEIVING:
rq->ifr_receiving = 0;
break;
default:
break;
}
return ret;
}
static struct net_device_stats *au1k_irda_stats(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
return &aup->stats;
}
MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
module_init(au1k_irda_init);
module_exit(au1k_irda_exit);
| gpl-2.0 |
ericli1989/ali_kernel | net/sunrpc/auth_gss/gss_krb5_crypto.c | 43 | 25005 | /*
* linux/net/sunrpc/gss_krb5_crypto.c
*
* Copyright (c) 2000-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
* Bruce Fields <bfields@umich.edu>
*/
/*
* Copyright (C) 1998 by the FundsXpress, INC.
*
* All rights reserved.
*
* Export of this software from the United States of America may require
* a specific license from the United States Government. It is the
* responsibility of any person or organization contemplating export to
* obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of FundsXpress. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. FundsXpress makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/err.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/random.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
u32
krb5_encrypt(
struct crypto_blkcipher *tfm,
void * iv,
void * in,
void * out,
int length)
{
u32 ret = -EINVAL;
struct scatterlist sg[1];
u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
if (length % crypto_blkcipher_blocksize(tfm) != 0)
goto out;
if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
sg_init_one(sg, out, length);
ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
out:
dprintk("RPC: krb5_encrypt returns %d\n", ret);
return ret;
}
u32
krb5_decrypt(
struct crypto_blkcipher *tfm,
void * iv,
void * in,
void * out,
int length)
{
u32 ret = -EINVAL;
struct scatterlist sg[1];
u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
if (length % crypto_blkcipher_blocksize(tfm) != 0)
goto out;
if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
sg_init_one(sg, out, length);
ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
out:
dprintk("RPC: gss_k5decrypt returns %d\n",ret);
return ret;
}
static int
checksummer(struct scatterlist *sg, void *data)
{
struct hash_desc *desc = data;
return crypto_hash_update(desc, sg, sg->length);
}
static int
arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
{
unsigned int ms_usage;
switch (usage) {
case KG_USAGE_SIGN:
ms_usage = 15;
break;
case KG_USAGE_SEAL:
ms_usage = 13;
break;
default:
return EINVAL;;
}
salt[0] = (ms_usage >> 0) & 0xff;
salt[1] = (ms_usage >> 8) & 0xff;
salt[2] = (ms_usage >> 16) & 0xff;
salt[3] = (ms_usage >> 24) & 0xff;
return 0;
}
static u32
make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *cksumkey,
unsigned int usage, struct xdr_netobj *cksumout)
{
struct hash_desc desc;
struct scatterlist sg[1];
int err;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
u8 rc4salt[4];
struct crypto_hash *md5;
struct crypto_hash *hmac_md5;
if (cksumkey == NULL)
return GSS_S_FAILURE;
if (cksumout->len < kctx->gk5e->cksumlength) {
dprintk("%s: checksum buffer length, %u, too small for %s\n",
__func__, cksumout->len, kctx->gk5e->name);
return GSS_S_FAILURE;
}
if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
dprintk("%s: invalid usage value %u\n", __func__, usage);
return GSS_S_FAILURE;
}
md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(md5))
return GSS_S_FAILURE;
hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac_md5)) {
crypto_free_hash(md5);
return GSS_S_FAILURE;
}
desc.tfm = md5;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_init(&desc);
if (err)
goto out;
sg_init_one(sg, rc4salt, 4);
err = crypto_hash_update(&desc, sg, 4);
if (err)
goto out;
sg_init_one(sg, header, hdrlen);
err = crypto_hash_update(&desc, sg, hdrlen);
if (err)
goto out;
err = xdr_process_buf(body, body_offset, body->len - body_offset,
checksummer, &desc);
if (err)
goto out;
err = crypto_hash_final(&desc, checksumdata);
if (err)
goto out;
desc.tfm = hmac_md5;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_init(&desc);
if (err)
goto out;
err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
if (err)
goto out;
sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
checksumdata);
if (err)
goto out;
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
cksumout->len = kctx->gk5e->cksumlength;
out:
crypto_free_hash(md5);
crypto_free_hash(hmac_md5);
return err ? GSS_S_FAILURE : 0;
}
/*
* checksum the plaintext data and hdrlen bytes of the token header
* The checksum is performed over the first 8 bytes of the
* gss token header and then over the data body
*/
u32
make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *cksumkey,
unsigned int usage, struct xdr_netobj *cksumout)
{
struct hash_desc desc;
struct scatterlist sg[1];
int err;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
unsigned int checksumlen;
if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
return make_checksum_hmac_md5(kctx, header, hdrlen,
body, body_offset,
cksumkey, usage, cksumout);
if (cksumout->len < kctx->gk5e->cksumlength) {
dprintk("%s: checksum buffer length, %u, too small for %s\n",
__func__, cksumout->len, kctx->gk5e->name);
return GSS_S_FAILURE;
}
desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm))
return GSS_S_FAILURE;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
checksumlen = crypto_hash_digestsize(desc.tfm);
if (cksumkey != NULL) {
err = crypto_hash_setkey(desc.tfm, cksumkey,
kctx->gk5e->keylength);
if (err)
goto out;
}
err = crypto_hash_init(&desc);
if (err)
goto out;
sg_init_one(sg, header, hdrlen);
err = crypto_hash_update(&desc, sg, hdrlen);
if (err)
goto out;
err = xdr_process_buf(body, body_offset, body->len - body_offset,
checksummer, &desc);
if (err)
goto out;
err = crypto_hash_final(&desc, checksumdata);
if (err)
goto out;
switch (kctx->gk5e->ctype) {
case CKSUMTYPE_RSA_MD5:
err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
checksumdata, checksumlen);
if (err)
goto out;
memcpy(cksumout->data,
checksumdata + checksumlen - kctx->gk5e->cksumlength,
kctx->gk5e->cksumlength);
break;
case CKSUMTYPE_HMAC_SHA1_DES3:
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
break;
default:
BUG();
break;
}
cksumout->len = kctx->gk5e->cksumlength;
out:
crypto_free_hash(desc.tfm);
return err ? GSS_S_FAILURE : 0;
}
/*
* checksum the plaintext data and hdrlen bytes of the token header
* Per rfc4121, sec. 4.2.4, the checksum is performed over the data
* body then over the first 16 octets of the MIC token
* Inclusion of the header data in the calculation of the
* checksum is optional.
*/
u32
make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *cksumkey,
unsigned int usage, struct xdr_netobj *cksumout)
{
struct hash_desc desc;
struct scatterlist sg[1];
int err;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
unsigned int checksumlen;
if (kctx->gk5e->keyed_cksum == 0) {
dprintk("%s: expected keyed hash for %s\n",
__func__, kctx->gk5e->name);
return GSS_S_FAILURE;
}
if (cksumkey == NULL) {
dprintk("%s: no key supplied for %s\n",
__func__, kctx->gk5e->name);
return GSS_S_FAILURE;
}
desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm))
return GSS_S_FAILURE;
checksumlen = crypto_hash_digestsize(desc.tfm);
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
if (err)
goto out;
err = crypto_hash_init(&desc);
if (err)
goto out;
err = xdr_process_buf(body, body_offset, body->len - body_offset,
checksummer, &desc);
if (err)
goto out;
if (header != NULL) {
sg_init_one(sg, header, hdrlen);
err = crypto_hash_update(&desc, sg, hdrlen);
if (err)
goto out;
}
err = crypto_hash_final(&desc, checksumdata);
if (err)
goto out;
cksumout->len = kctx->gk5e->cksumlength;
switch (kctx->gk5e->ctype) {
case CKSUMTYPE_HMAC_SHA1_96_AES128:
case CKSUMTYPE_HMAC_SHA1_96_AES256:
/* note that this truncates the hash */
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
break;
default:
BUG();
break;
}
out:
crypto_free_hash(desc.tfm);
return err ? GSS_S_FAILURE : 0;
}
struct encryptor_desc {
u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct blkcipher_desc desc;
int pos;
struct xdr_buf *outbuf;
struct page **pages;
struct scatterlist infrags[4];
struct scatterlist outfrags[4];
int fragno;
int fraglen;
};
static int
encryptor(struct scatterlist *sg, void *data)
{
struct encryptor_desc *desc = data;
struct xdr_buf *outbuf = desc->outbuf;
struct page *in_page;
int thislen = desc->fraglen + sg->length;
int fraglen, ret;
int page_pos;
/* Worst case is 4 fragments: head, end of page 1, start
* of page 2, tail. Anything more is a bug. */
BUG_ON(desc->fragno > 3);
page_pos = desc->pos - outbuf->head[0].iov_len;
if (page_pos >= 0 && page_pos < outbuf->page_len) {
/* pages are not in place: */
int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
in_page = desc->pages[i];
} else {
in_page = sg_page(sg);
}
sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
sg->offset);
sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
sg->offset);
desc->fragno++;
desc->fraglen += sg->length;
desc->pos += sg->length;
fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
return 0;
sg_mark_end(&desc->infrags[desc->fragno - 1]);
sg_mark_end(&desc->outfrags[desc->fragno - 1]);
ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
desc->infrags, thislen);
if (ret)
return ret;
sg_init_table(desc->infrags, 4);
sg_init_table(desc->outfrags, 4);
if (fraglen) {
sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
sg->offset + sg->length - fraglen);
desc->infrags[0] = desc->outfrags[0];
sg_assign_page(&desc->infrags[0], in_page);
desc->fragno = 1;
desc->fraglen = fraglen;
} else {
desc->fragno = 0;
desc->fraglen = 0;
}
return 0;
}
int
gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
int offset, struct page **pages)
{
int ret;
struct encryptor_desc desc;
BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
memset(desc.iv, 0, sizeof(desc.iv));
desc.desc.tfm = tfm;
desc.desc.info = desc.iv;
desc.desc.flags = 0;
desc.pos = offset;
desc.outbuf = buf;
desc.pages = pages;
desc.fragno = 0;
desc.fraglen = 0;
sg_init_table(desc.infrags, 4);
sg_init_table(desc.outfrags, 4);
ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
return ret;
}
struct decryptor_desc {
u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct blkcipher_desc desc;
struct scatterlist frags[4];
int fragno;
int fraglen;
};
static int
decryptor(struct scatterlist *sg, void *data)
{
struct decryptor_desc *desc = data;
int thislen = desc->fraglen + sg->length;
int fraglen, ret;
/* Worst case is 4 fragments: head, end of page 1, start
* of page 2, tail. Anything more is a bug. */
BUG_ON(desc->fragno > 3);
sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
sg->offset);
desc->fragno++;
desc->fraglen += sg->length;
fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
return 0;
sg_mark_end(&desc->frags[desc->fragno - 1]);
ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
desc->frags, thislen);
if (ret)
return ret;
sg_init_table(desc->frags, 4);
if (fraglen) {
sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
sg->offset + sg->length - fraglen);
desc->fragno = 1;
desc->fraglen = fraglen;
} else {
desc->fragno = 0;
desc->fraglen = 0;
}
return 0;
}
int
gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
int offset)
{
struct decryptor_desc desc;
/* XXXJBF: */
BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
memset(desc.iv, 0, sizeof(desc.iv));
desc.desc.tfm = tfm;
desc.desc.info = desc.iv;
desc.desc.flags = 0;
desc.fragno = 0;
desc.fraglen = 0;
sg_init_table(desc.frags, 4);
return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
}
/*
* This function makes the assumption that it was ultimately called
* from gss_wrap().
*
* The client auth_gss code moves any existing tail data into a
* separate page before calling gss_wrap.
* The server svcauth_gss code ensures that both the head and the
* tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
*
* Even with that guarantee, this function may be called more than
* once in the processing of gss_wrap(). The best we can do is
* verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
* largest expected shift will fit within RPC_MAX_AUTH_SIZE.
* At run-time we can verify that a single invocation of this
* function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
*/
int
xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
{
u8 *p;
if (shiftlen == 0)
return 0;
BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
p = buf->head[0].iov_base + base;
memmove(p + shiftlen, p, buf->head[0].iov_len - base);
buf->head[0].iov_len += shiftlen;
buf->len += shiftlen;
return 0;
}
static u32
gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
u32 offset, u8 *iv, struct page **pages, int encrypt)
{
u32 ret;
struct scatterlist sg[1];
struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
u8 data[crypto_blkcipher_blocksize(cipher) * 2];
struct page **save_pages;
u32 len = buf->len - offset;
BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);
/*
* For encryption, we want to read from the cleartext
* page cache pages, and write the encrypted data to
* the supplied xdr_buf pages.
*/
save_pages = buf->pages;
if (encrypt)
buf->pages = pages;
ret = read_bytes_from_xdr_buf(buf, offset, data, len);
buf->pages = save_pages;
if (ret)
goto out;
sg_init_one(sg, data, len);
if (encrypt)
ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
else
ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
if (ret)
goto out;
ret = write_bytes_to_xdr_buf(buf, offset, data, len);
out:
return ret;
}
u32
gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, int ec, struct page **pages)
{
u32 err;
struct xdr_netobj hmac;
u8 *cksumkey;
u8 *ecptr;
struct crypto_blkcipher *cipher, *aux_cipher;
int blocksize;
struct page **save_pages;
int nblocks, nbytes;
struct encryptor_desc desc;
u32 cbcbytes;
unsigned int usage;
if (kctx->initiate) {
cipher = kctx->initiator_enc;
aux_cipher = kctx->initiator_enc_aux;
cksumkey = kctx->initiator_integ;
usage = KG_USAGE_INITIATOR_SEAL;
} else {
cipher = kctx->acceptor_enc;
aux_cipher = kctx->acceptor_enc_aux;
cksumkey = kctx->acceptor_integ;
usage = KG_USAGE_ACCEPTOR_SEAL;
}
blocksize = crypto_blkcipher_blocksize(cipher);
/* hide the gss token header and insert the confounder */
offset += GSS_KRB5_TOK_HDR_LEN;
if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
return GSS_S_FAILURE;
gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
offset -= GSS_KRB5_TOK_HDR_LEN;
if (buf->tail[0].iov_base != NULL) {
ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
} else {
buf->tail[0].iov_base = buf->head[0].iov_base
+ buf->head[0].iov_len;
buf->tail[0].iov_len = 0;
ecptr = buf->tail[0].iov_base;
}
memset(ecptr, 'X', ec);
buf->tail[0].iov_len += ec;
buf->len += ec;
/* copy plaintext gss token header after filler (if any) */
memcpy(ecptr + ec, buf->head[0].iov_base + offset,
GSS_KRB5_TOK_HDR_LEN);
buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
buf->len += GSS_KRB5_TOK_HDR_LEN;
/* Do the HMAC */
hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
/*
* When we are called, pages points to the real page cache
* data -- which we can't go and encrypt! buf->pages points
* to scratch pages which we are going to send off to the
* client/server. Swap in the plaintext pages to calculate
* the hmac.
*/
save_pages = buf->pages;
buf->pages = pages;
err = make_checksum_v2(kctx, NULL, 0, buf,
offset + GSS_KRB5_TOK_HDR_LEN,
cksumkey, usage, &hmac);
buf->pages = save_pages;
if (err)
return GSS_S_FAILURE;
nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
nblocks = (nbytes + blocksize - 1) / blocksize;
cbcbytes = 0;
if (nblocks > 2)
cbcbytes = (nblocks - 2) * blocksize;
memset(desc.iv, 0, sizeof(desc.iv));
if (cbcbytes) {
desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
desc.fragno = 0;
desc.fraglen = 0;
desc.pages = pages;
desc.outbuf = buf;
desc.desc.info = desc.iv;
desc.desc.flags = 0;
desc.desc.tfm = aux_cipher;
sg_init_table(desc.infrags, 4);
sg_init_table(desc.outfrags, 4);
err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
cbcbytes, encryptor, &desc);
if (err)
goto out_err;
}
/* Make sure IV carries forward from any CBC results. */
err = gss_krb5_cts_crypt(cipher, buf,
offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
desc.iv, pages, 1);
if (err) {
err = GSS_S_FAILURE;
goto out_err;
}
/* Now update buf to account for HMAC */
buf->tail[0].iov_len += kctx->gk5e->cksumlength;
buf->len += kctx->gk5e->cksumlength;
out_err:
if (err)
err = GSS_S_FAILURE;
return err;
}
u32
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
u32 *headskip, u32 *tailskip)
{
struct xdr_buf subbuf;
u32 ret = 0;
u8 *cksum_key;
struct crypto_blkcipher *cipher, *aux_cipher;
struct xdr_netobj our_hmac_obj;
u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
int nblocks, blocksize, cbcbytes;
struct decryptor_desc desc;
unsigned int usage;
if (kctx->initiate) {
cipher = kctx->acceptor_enc;
aux_cipher = kctx->acceptor_enc_aux;
cksum_key = kctx->acceptor_integ;
usage = KG_USAGE_ACCEPTOR_SEAL;
} else {
cipher = kctx->initiator_enc;
aux_cipher = kctx->initiator_enc_aux;
cksum_key = kctx->initiator_integ;
usage = KG_USAGE_INITIATOR_SEAL;
}
blocksize = crypto_blkcipher_blocksize(cipher);
/* create a segment skipping the header and leaving out the checksum */
xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
(buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
kctx->gk5e->cksumlength));
nblocks = (subbuf.len + blocksize - 1) / blocksize;
cbcbytes = 0;
if (nblocks > 2)
cbcbytes = (nblocks - 2) * blocksize;
memset(desc.iv, 0, sizeof(desc.iv));
if (cbcbytes) {
desc.fragno = 0;
desc.fraglen = 0;
desc.desc.info = desc.iv;
desc.desc.flags = 0;
desc.desc.tfm = aux_cipher;
sg_init_table(desc.frags, 4);
ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
if (ret)
goto out_err;
}
/* Make sure IV carries forward from any CBC results. */
ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
if (ret)
goto out_err;
/* Calculate our hmac over the plaintext data */
our_hmac_obj.len = sizeof(our_hmac);
our_hmac_obj.data = our_hmac;
ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
cksum_key, usage, &our_hmac_obj);
if (ret)
goto out_err;
/* Get the packet's hmac value */
ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
pkt_hmac, kctx->gk5e->cksumlength);
if (ret)
goto out_err;
if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
ret = GSS_S_BAD_SIG;
goto out_err;
}
*headskip = kctx->gk5e->conflen;
*tailskip = kctx->gk5e->cksumlength;
out_err:
if (ret && ret != GSS_S_BAD_SIG)
ret = GSS_S_FAILURE;
return ret;
}
/*
* Compute Kseq given the initial session key and the checksum.
* Set the key of the given cipher.
*/
int
krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
unsigned char *cksum)
{
struct crypto_hash *hmac;
struct hash_desc desc;
struct scatterlist sg[1];
u8 Kseq[GSS_KRB5_MAX_KEYLEN];
u32 zeroconstant = 0;
int err;
dprintk("%s: entered\n", __func__);
hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac)) {
dprintk("%s: error %ld, allocating hash '%s'\n",
__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
return PTR_ERR(hmac);
}
desc.tfm = hmac;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (err)
goto out_err;
/* Compute intermediate Kseq from session key */
err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
if (err)
goto out_err;
sg_init_table(sg, 1);
sg_set_buf(sg, &zeroconstant, 4);
err = crypto_hash_digest(&desc, sg, 4, Kseq);
if (err)
goto out_err;
/* Compute final Kseq from the checksum and intermediate Kseq */
err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
if (err)
goto out_err;
sg_set_buf(sg, cksum, 8);
err = crypto_hash_digest(&desc, sg, 8, Kseq);
if (err)
goto out_err;
err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
if (err)
goto out_err;
err = 0;
out_err:
crypto_free_hash(hmac);
dprintk("%s: returning %d\n", __func__, err);
return err;
}
/*
* Compute Kcrypt given the initial session key and the plaintext seqnum.
* Set the key of cipher kctx->enc.
*/
int
krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
s32 seqnum)
{
struct crypto_hash *hmac;
struct hash_desc desc;
struct scatterlist sg[1];
u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
u8 zeroconstant[4] = {0};
u8 seqnumarray[4];
int err, i;
dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac)) {
dprintk("%s: error %ld, allocating hash '%s'\n",
__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
return PTR_ERR(hmac);
}
desc.tfm = hmac;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (err)
goto out_err;
/* Compute intermediate Kcrypt from session key */
for (i = 0; i < kctx->gk5e->keylength; i++)
Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
if (err)
goto out_err;
sg_init_table(sg, 1);
sg_set_buf(sg, zeroconstant, 4);
err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
if (err)
goto out_err;
/* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
if (err)
goto out_err;
seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
sg_set_buf(sg, seqnumarray, 4);
err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
if (err)
goto out_err;
err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
if (err)
goto out_err;
err = 0;
out_err:
crypto_free_hash(hmac);
dprintk("%s: returning %d\n", __func__, err);
return err;
}
| gpl-2.0 |
d3trax/asuswrt-merlin | release/src/router/gdb/readline/kill.c | 43 | 15729 | /* kill.c -- kill ring management. */
/* Copyright (C) 1994 Free Software Foundation, Inc.
This file is part of the GNU Readline Library, a library for
reading lines of text with interactive input and history editing.
The GNU Readline Library is free software; you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2, or
(at your option) any later version.
The GNU Readline Library is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
The GNU General Public License is often shipped with GNU software, and
is generally kept in a file called COPYING or LICENSE. If you do not
have a copy of the license, write to the Free Software Foundation,
59 Temple Place, Suite 330, Boston, MA 02111 USA. */
#define READLINE_LIBRARY
#if defined (HAVE_CONFIG_H)
# include <config.h>
#endif
#include <sys/types.h>
#if defined (HAVE_UNISTD_H)
# include <unistd.h> /* for _POSIX_VERSION */
#endif /* HAVE_UNISTD_H */
#if defined (HAVE_STDLIB_H)
# include <stdlib.h>
#else
# include "ansi_stdlib.h"
#endif /* HAVE_STDLIB_H */
#include <stdio.h>
/* System-specific feature definitions and include files. */
#include "rldefs.h"
/* Some standard library routines. */
#include "readline.h"
#include "history.h"
#include "rlprivate.h"
#include "xmalloc.h"
/* **************************************************************** */
/* */
/* Killing Mechanism */
/* */
/* **************************************************************** */
/* What we assume for a max number of kills. */
#define DEFAULT_MAX_KILLS 10
/* The real variable to look at to find out when to flush kills. */
static int rl_max_kills = DEFAULT_MAX_KILLS;
/* Where to store killed text. */
static char **rl_kill_ring = (char **)NULL;
/* Where we are in the kill ring. */
static int rl_kill_index;
/* How many slots we have in the kill ring. */
static int rl_kill_ring_length;
static int _rl_copy_to_kill_ring PARAMS((char *, int));
static int region_kill_internal PARAMS((int));
static int _rl_copy_word_as_kill PARAMS((int, int));
static int rl_yank_nth_arg_internal PARAMS((int, int, int));
/* How to say that you only want to save a certain amount
of kill material. */
int
rl_set_retained_kills (num)
int num;
{
return 0;
}
/* Add TEXT to the kill ring, allocating a new kill ring slot as necessary.
This uses TEXT directly, so the caller must not free it. If APPEND is
non-zero, and the last command was a kill, the text is appended to the
current kill ring slot, otherwise prepended. */
static int
_rl_copy_to_kill_ring (text, append)
char *text;
int append;
{
char *old, *new;
int slot;
/* First, find the slot to work with. */
if (_rl_last_command_was_kill == 0)
{
/* Get a new slot. */
if (rl_kill_ring == 0)
{
/* If we don't have any defined, then make one. */
rl_kill_ring = (char **)
xmalloc (((rl_kill_ring_length = 1) + 1) * sizeof (char *));
rl_kill_ring[slot = 0] = (char *)NULL;
}
else
{
/* We have to add a new slot on the end, unless we have
exceeded the max limit for remembering kills. */
slot = rl_kill_ring_length;
if (slot == rl_max_kills)
{
register int i;
free (rl_kill_ring[0]);
for (i = 0; i < slot; i++)
rl_kill_ring[i] = rl_kill_ring[i + 1];
}
else
{
slot = rl_kill_ring_length += 1;
rl_kill_ring = (char **)xrealloc (rl_kill_ring, slot * sizeof (char *));
}
rl_kill_ring[--slot] = (char *)NULL;
}
}
else
slot = rl_kill_ring_length - 1;
/* If the last command was a kill, prepend or append. */
if (_rl_last_command_was_kill && rl_editing_mode != vi_mode)
{
old = rl_kill_ring[slot];
new = (char *)xmalloc (1 + strlen (old) + strlen (text));
if (append)
{
strcpy (new, old);
strcat (new, text);
}
else
{
strcpy (new, text);
strcat (new, old);
}
free (old);
free (text);
rl_kill_ring[slot] = new;
}
else
rl_kill_ring[slot] = text;
rl_kill_index = slot;
return 0;
}
/* The way to kill something. This appends or prepends to the last
kill, if the last command was a kill command. if FROM is less
than TO, then the text is appended, otherwise prepended. If the
last command was not a kill command, then a new slot is made for
this kill. */
int
rl_kill_text (from, to)
int from, to;
{
char *text;
/* Is there anything to kill? */
if (from == to)
{
_rl_last_command_was_kill++;
return 0;
}
text = rl_copy_text (from, to);
/* Delete the copied text from the line. */
rl_delete_text (from, to);
_rl_copy_to_kill_ring (text, from < to);
_rl_last_command_was_kill++;
return 0;
}
/* Now REMEMBER! In order to do prepending or appending correctly, kill
commands always make rl_point's original position be the FROM argument,
and rl_point's extent be the TO argument. */
/* **************************************************************** */
/* */
/* Killing Commands */
/* */
/* **************************************************************** */
/* Delete the word at point, saving the text in the kill ring. */
int
rl_kill_word (count, key)
int count, key;
{
int orig_point;
if (count < 0)
return (rl_backward_kill_word (-count, key));
else
{
orig_point = rl_point;
rl_forward_word (count, key);
if (rl_point != orig_point)
rl_kill_text (orig_point, rl_point);
rl_point = orig_point;
if (rl_editing_mode == emacs_mode)
rl_mark = rl_point;
}
return 0;
}
/* Rubout the word before point, placing it on the kill ring. */
int
rl_backward_kill_word (count, ignore)
int count, ignore;
{
int orig_point;
if (count < 0)
return (rl_kill_word (-count, ignore));
else
{
orig_point = rl_point;
rl_backward_word (count, ignore);
if (rl_point != orig_point)
rl_kill_text (orig_point, rl_point);
if (rl_editing_mode == emacs_mode)
rl_mark = rl_point;
}
return 0;
}
/* Kill from here to the end of the line. If DIRECTION is negative, kill
back to the line start instead. */
int
rl_kill_line (direction, ignore)
int direction, ignore;
{
int orig_point;
if (direction < 0)
return (rl_backward_kill_line (1, ignore));
else
{
orig_point = rl_point;
rl_end_of_line (1, ignore);
if (orig_point != rl_point)
rl_kill_text (orig_point, rl_point);
rl_point = orig_point;
if (rl_editing_mode == emacs_mode)
rl_mark = rl_point;
}
return 0;
}
/* Kill backwards to the start of the line. If DIRECTION is negative, kill
forwards to the line end instead. */
int
rl_backward_kill_line (direction, ignore)
int direction, ignore;
{
int orig_point;
if (direction < 0)
return (rl_kill_line (1, ignore));
else
{
if (!rl_point)
rl_ding ();
else
{
orig_point = rl_point;
rl_beg_of_line (1, ignore);
if (rl_point != orig_point)
rl_kill_text (orig_point, rl_point);
if (rl_editing_mode == emacs_mode)
rl_mark = rl_point;
}
}
return 0;
}
/* Kill the whole line, no matter where point is. */
int
rl_kill_full_line (count, ignore)
int count, ignore;
{
rl_begin_undo_group ();
rl_point = 0;
rl_kill_text (rl_point, rl_end);
rl_mark = 0;
rl_end_undo_group ();
return 0;
}
/* The next two functions mimic unix line editing behaviour, except they
save the deleted text on the kill ring. This is safer than not saving
it, and since we have a ring, nobody should get screwed. */
/* This does what C-w does in Unix. We can't prevent people from
using behaviour that they expect. */
int
rl_unix_word_rubout (count, key)
int count, key;
{
int orig_point;
if (rl_point == 0)
rl_ding ();
else
{
orig_point = rl_point;
if (count <= 0)
count = 1;
while (count--)
{
while (rl_point && whitespace (rl_line_buffer[rl_point - 1]))
rl_point--;
while (rl_point && (whitespace (rl_line_buffer[rl_point - 1]) == 0))
rl_point--;
}
rl_kill_text (orig_point, rl_point);
if (rl_editing_mode == emacs_mode)
rl_mark = rl_point;
}
return 0;
}
/* This deletes one filename component in a Unix pathname. That is, it
deletes backward to directory separator (`/') or whitespace. */
int
rl_unix_filename_rubout (count, key)
int count, key;
{
int orig_point, c;
if (rl_point == 0)
rl_ding ();
else
{
orig_point = rl_point;
if (count <= 0)
count = 1;
while (count--)
{
c = rl_line_buffer[rl_point - 1];
while (rl_point && (whitespace (c) || c == '/'))
{
rl_point--;
c = rl_line_buffer[rl_point - 1];
}
while (rl_point && (whitespace (c) == 0) && c != '/')
{
rl_point--;
c = rl_line_buffer[rl_point - 1];
}
}
rl_kill_text (orig_point, rl_point);
if (rl_editing_mode == emacs_mode)
rl_mark = rl_point;
}
return 0;
}
/* Here is C-u doing what Unix does. You don't *have* to use these
key-bindings. We have a choice of killing the entire line, or
killing from where we are to the start of the line. We choose the
latter, because if you are a Unix weenie, then you haven't backspaced
into the line at all, and if you aren't, then you know what you are
doing. */
int
rl_unix_line_discard (count, key)
int count, key;
{
if (rl_point == 0)
rl_ding ();
else
{
rl_kill_text (rl_point, 0);
rl_point = 0;
if (rl_editing_mode == emacs_mode)
rl_mark = rl_point;
}
return 0;
}
/* Copy the text in the `region' to the kill ring. If DELETE is non-zero,
delete the text from the line as well. */
static int
region_kill_internal (delete)
int delete;
{
char *text;
if (rl_mark != rl_point)
{
text = rl_copy_text (rl_point, rl_mark);
if (delete)
rl_delete_text (rl_point, rl_mark);
_rl_copy_to_kill_ring (text, rl_point < rl_mark);
}
_rl_last_command_was_kill++;
return 0;
}
/* Copy the text in the region to the kill ring. */
int
rl_copy_region_to_kill (count, ignore)
int count, ignore;
{
return (region_kill_internal (0));
}
/* Kill the text between the point and mark. */
int
rl_kill_region (count, ignore)
int count, ignore;
{
int r, npoint;
npoint = (rl_point < rl_mark) ? rl_point : rl_mark;
r = region_kill_internal (1);
_rl_fix_point (1);
rl_point = npoint;
return r;
}
/* Copy COUNT words to the kill ring. DIR says which direction we look
to find the words. */
static int
_rl_copy_word_as_kill (count, dir)
int count, dir;
{
int om, op, r;
om = rl_mark;
op = rl_point;
if (dir > 0)
rl_forward_word (count, 0);
else
rl_backward_word (count, 0);
rl_mark = rl_point;
if (dir > 0)
rl_backward_word (count, 0);
else
rl_forward_word (count, 0);
r = region_kill_internal (0);
rl_mark = om;
rl_point = op;
return r;
}
int
rl_copy_forward_word (count, key)
int count, key;
{
if (count < 0)
return (rl_copy_backward_word (-count, key));
return (_rl_copy_word_as_kill (count, 1));
}
int
rl_copy_backward_word (count, key)
int count, key;
{
if (count < 0)
return (rl_copy_forward_word (-count, key));
return (_rl_copy_word_as_kill (count, -1));
}
/* Yank back the last killed text. This ignores arguments. */
int
rl_yank (count, ignore)
int count, ignore;
{
if (rl_kill_ring == 0)
{
_rl_abort_internal ();
return -1;
}
_rl_set_mark_at_pos (rl_point);
rl_insert_text (rl_kill_ring[rl_kill_index]);
return 0;
}
/* If the last command was yank, or yank_pop, and the text just
before point is identical to the current kill item, then
delete that text from the line, rotate the index down, and
yank back some other text. */
int
rl_yank_pop (count, key)
int count, key;
{
int l, n;
if (((rl_last_func != rl_yank_pop) && (rl_last_func != rl_yank)) ||
!rl_kill_ring)
{
_rl_abort_internal ();
return -1;
}
l = strlen (rl_kill_ring[rl_kill_index]);
n = rl_point - l;
if (n >= 0 && STREQN (rl_line_buffer + n, rl_kill_ring[rl_kill_index], l))
{
rl_delete_text (n, rl_point);
rl_point = n;
rl_kill_index--;
if (rl_kill_index < 0)
rl_kill_index = rl_kill_ring_length - 1;
rl_yank (1, 0);
return 0;
}
else
{
_rl_abort_internal ();
return -1;
}
}
/* Yank the COUNTh argument from the previous history line, skipping
HISTORY_SKIP lines before looking for the `previous line'. */
static int
rl_yank_nth_arg_internal (count, ignore, history_skip)
int count, ignore, history_skip;
{
register HIST_ENTRY *entry;
char *arg;
int i, pos;
pos = where_history ();
if (history_skip)
{
for (i = 0; i < history_skip; i++)
entry = previous_history ();
}
entry = previous_history ();
history_set_pos (pos);
if (entry == 0)
{
rl_ding ();
return -1;
}
arg = history_arg_extract (count, count, entry->line);
if (!arg || !*arg)
{
rl_ding ();
return -1;
}
rl_begin_undo_group ();
_rl_set_mark_at_pos (rl_point);
#if defined (VI_MODE)
/* Vi mode always inserts a space before yanking the argument, and it
inserts it right *after* rl_point. */
if (rl_editing_mode == vi_mode)
{
rl_vi_append_mode (1, ignore);
rl_insert_text (" ");
}
#endif /* VI_MODE */
rl_insert_text (arg);
free (arg);
rl_end_undo_group ();
return 0;
}
/* Yank the COUNTth argument from the previous history line. */
int
rl_yank_nth_arg (count, ignore)
int count, ignore;
{
return (rl_yank_nth_arg_internal (count, ignore, 0));
}
/* Yank the last argument from the previous history line. This `knows'
how rl_yank_nth_arg treats a count of `$'. With an argument, this
behaves the same as rl_yank_nth_arg. */
int
rl_yank_last_arg (count, key)
int count, key;
{
static int history_skip = 0;
static int explicit_arg_p = 0;
static int count_passed = 1;
static int direction = 1;
static int undo_needed = 0;
int retval;
if (rl_last_func != rl_yank_last_arg)
{
history_skip = 0;
explicit_arg_p = rl_explicit_arg;
count_passed = count;
direction = 1;
}
else
{
if (undo_needed)
rl_do_undo ();
if (count < 1)
direction = -direction;
history_skip += direction;
if (history_skip < 0)
history_skip = 0;
}
if (explicit_arg_p)
retval = rl_yank_nth_arg_internal (count_passed, key, history_skip);
else
retval = rl_yank_nth_arg_internal ('$', key, history_skip);
undo_needed = retval == 0;
return retval;
}
/* A special paste command for users of Cygnus's cygwin32. */
#if defined (__CYGWIN__)
#include <windows.h>
int
rl_paste_from_clipboard (count, key)
int count, key;
{
char *data, *ptr;
int len;
if (OpenClipboard (NULL) == 0)
return (0);
data = (char *)GetClipboardData (CF_TEXT);
if (data)
{
ptr = strchr (data, '\r');
if (ptr)
{
len = ptr - data;
ptr = (char *)xmalloc (len + 1);
ptr[len] = '\0';
strncpy (ptr, data, len);
}
else
ptr = data;
_rl_set_mark_at_pos (rl_point);
rl_insert_text (ptr);
if (ptr != data)
free (ptr);
CloseClipboard ();
}
return (0);
}
#endif /* __CYGWIN__ */
| gpl-2.0 |
adam-lee/linux | drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c | 43 | 18241 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#include "socklnd.h"
int
ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
{
int rc = libcfs_sock_getaddr(conn->ksnc_sock, 1,
&conn->ksnc_ipaddr,
&conn->ksnc_port);
/* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
LASSERT (!conn->ksnc_closing);
if (rc != 0) {
CERROR ("Error %d getting sock peer IP\n", rc);
return rc;
}
rc = libcfs_sock_getaddr(conn->ksnc_sock, 0,
&conn->ksnc_myipaddr, NULL);
if (rc != 0) {
CERROR ("Error %d getting sock local IP\n", rc);
return rc;
}
return 0;
}
int
ksocknal_lib_zc_capable(ksock_conn_t *conn)
{
int caps = conn->ksnc_sock->sk->sk_route_caps;
if (conn->ksnc_proto == &ksocknal_protocol_v1x)
return 0;
/* ZC if the socket supports scatter/gather and doesn't need software
* checksums */
return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_ALL_CSUM) != 0);
}
int
ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
{
struct socket *sock = conn->ksnc_sock;
int nob;
int rc;
if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
tx->tx_nob == tx->tx_resid && /* frist sending */
tx->tx_msg.ksm_csum == 0) /* not checksummed */
ksocknal_lib_csum_tx(tx);
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone. */
{
#if SOCKNAL_SINGLE_FRAG_TX
struct iovec scratch;
struct iovec *scratchiov = &scratch;
unsigned int niov = 1;
#else
struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = tx->tx_niov;
#endif
struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
int i;
for (nob = i = 0; i < niov; i++) {
scratchiov[i] = tx->tx_iov[i];
nob += scratchiov[i].iov_len;
}
if (!list_empty(&conn->ksnc_tx_queue) ||
nob < tx->tx_resid)
msg.msg_flags |= MSG_MORE;
rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
}
return rc;
}
int
ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
{
struct socket *sock = conn->ksnc_sock;
lnet_kiov_t *kiov = tx->tx_kiov;
int rc;
int nob;
/* Not NOOP message */
LASSERT (tx->tx_lnetmsg != NULL);
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone. */
if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
/* Zero copy is enabled */
struct sock *sk = sock->sk;
struct page *page = kiov->kiov_page;
int offset = kiov->kiov_offset;
int fragsize = kiov->kiov_len;
int msgflg = MSG_DONTWAIT;
CDEBUG(D_NET, "page %p + offset %x for %d\n",
page, offset, kiov->kiov_len);
if (!list_empty(&conn->ksnc_tx_queue) ||
fragsize < tx->tx_resid)
msgflg |= MSG_MORE;
if (sk->sk_prot->sendpage != NULL) {
rc = sk->sk_prot->sendpage(sk, page,
offset, fragsize, msgflg);
} else {
rc = cfs_tcp_sendpage(sk, page, offset, fragsize,
msgflg);
}
} else {
#if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
struct iovec scratch;
struct iovec *scratchiov = &scratch;
unsigned int niov = 1;
#else
#ifdef CONFIG_HIGHMEM
#warning "XXX risk of kmap deadlock on multiple frags..."
#endif
struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = tx->tx_nkiov;
#endif
struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
int i;
for (nob = i = 0; i < niov; i++) {
scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
kiov[i].kiov_offset;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
if (!list_empty(&conn->ksnc_tx_queue) ||
nob < tx->tx_resid)
msg.msg_flags |= MSG_MORE;
rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
for (i = 0; i < niov; i++)
kunmap(kiov[i].kiov_page);
}
return rc;
}
void
ksocknal_lib_eager_ack (ksock_conn_t *conn)
{
int opt = 1;
mm_segment_t oldmm = get_fs();
struct socket *sock = conn->ksnc_sock;
/* Remind the socket to ACK eagerly. If I don't, the socket might
* think I'm about to send something it could piggy-back the ACK
* on, introducing delay in completing zero-copy sends in my
* peer. */
set_fs(KERNEL_DS);
sock->ops->setsockopt (sock, SOL_TCP, TCP_QUICKACK,
(char *)&opt, sizeof (opt));
set_fs(oldmm);
}
int
ksocknal_lib_recv_iov (ksock_conn_t *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX
struct iovec scratch;
struct iovec *scratchiov = &scratch;
unsigned int niov = 1;
#else
struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = conn->ksnc_rx_niov;
#endif
struct iovec *iov = conn->ksnc_rx_iov;
struct msghdr msg = {
.msg_flags = 0
};
int nob;
int i;
int rc;
int fragnob;
int sum;
__u32 saved_csum;
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone. */
LASSERT (niov > 0);
for (nob = i = 0; i < niov; i++) {
scratchiov[i] = iov[i];
nob += scratchiov[i].iov_len;
}
LASSERT (nob <= conn->ksnc_rx_nob_wanted);
rc = kernel_recvmsg(conn->ksnc_sock, &msg,
(struct kvec *)scratchiov, niov, nob, MSG_DONTWAIT);
saved_csum = 0;
if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
saved_csum = conn->ksnc_msg.ksm_csum;
conn->ksnc_msg.ksm_csum = 0;
}
if (saved_csum != 0) {
/* accumulate checksum */
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT (i < niov);
fragnob = iov[i].iov_len;
if (fragnob > sum)
fragnob = sum;
conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
iov[i].iov_base, fragnob);
}
conn->ksnc_msg.ksm_csum = saved_csum;
}
return rc;
}
static void
ksocknal_lib_kiov_vunmap(void *addr)
{
if (addr == NULL)
return;
vunmap(addr);
}
static void *
ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
struct iovec *iov, struct page **pages)
{
void *addr;
int nob;
int i;
if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
return NULL;
LASSERT (niov <= LNET_MAX_IOV);
if (niov < 2 ||
niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
return NULL;
for (nob = i = 0; i < niov; i++) {
if ((kiov[i].kiov_offset != 0 && i > 0) ||
(kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
return NULL;
pages[i] = kiov[i].kiov_page;
nob += kiov[i].kiov_len;
}
addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
if (addr == NULL)
return NULL;
iov->iov_base = addr + kiov[0].kiov_offset;
iov->iov_len = nob;
return addr;
}
int
ksocknal_lib_recv_kiov (ksock_conn_t *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
struct iovec scratch;
struct iovec *scratchiov = &scratch;
struct page **pages = NULL;
unsigned int niov = 1;
#else
#ifdef CONFIG_HIGHMEM
#warning "XXX risk of kmap deadlock on multiple frags..."
#endif
struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
unsigned int niov = conn->ksnc_rx_nkiov;
#endif
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
struct msghdr msg = {
.msg_flags = 0
};
int nob;
int i;
int rc;
void *base;
void *addr;
int sum;
int fragnob;
int n;
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone. */
addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
if (addr != NULL) {
nob = scratchiov[0].iov_len;
n = 1;
} else {
for (nob = i = 0; i < niov; i++) {
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
kiov[i].kiov_offset;
}
n = niov;
}
LASSERT (nob <= conn->ksnc_rx_nob_wanted);
rc = kernel_recvmsg(conn->ksnc_sock, &msg,
(struct kvec *)scratchiov, n, nob, MSG_DONTWAIT);
if (conn->ksnc_msg.ksm_csum != 0) {
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT (i < niov);
/* Dang! have to kmap again because I have nowhere to stash the
* mapped address. But by doing it while the page is still
* mapped, the kernel just bumps the map count and returns me
* the address it stashed. */
base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
fragnob = kiov[i].kiov_len;
if (fragnob > sum)
fragnob = sum;
conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
base, fragnob);
kunmap(kiov[i].kiov_page);
}
}
if (addr != NULL) {
ksocknal_lib_kiov_vunmap(addr);
} else {
for (i = 0; i < niov; i++)
kunmap(kiov[i].kiov_page);
}
return (rc);
}
void
ksocknal_lib_csum_tx(ksock_tx_t *tx)
{
int i;
__u32 csum;
void *base;
LASSERT(tx->tx_iov[0].iov_base == (void *)&tx->tx_msg);
LASSERT(tx->tx_conn != NULL);
LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
tx->tx_msg.ksm_csum = 0;
csum = ksocknal_csum(~0, (void *)tx->tx_iov[0].iov_base,
tx->tx_iov[0].iov_len);
if (tx->tx_kiov != NULL) {
for (i = 0; i < tx->tx_nkiov; i++) {
base = kmap(tx->tx_kiov[i].kiov_page) +
tx->tx_kiov[i].kiov_offset;
csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
kunmap(tx->tx_kiov[i].kiov_page);
}
} else {
for (i = 1; i < tx->tx_niov; i++)
csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
tx->tx_iov[i].iov_len);
}
if (*ksocknal_tunables.ksnd_inject_csum_error) {
csum++;
*ksocknal_tunables.ksnd_inject_csum_error = 0;
}
tx->tx_msg.ksm_csum = csum;
}
int
ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
{
mm_segment_t oldmm = get_fs ();
struct socket *sock = conn->ksnc_sock;
int len;
int rc;
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
LASSERT (conn->ksnc_closing);
*txmem = *rxmem = *nagle = 0;
return (-ESHUTDOWN);
}
rc = libcfs_sock_getbuf(sock, txmem, rxmem);
if (rc == 0) {
len = sizeof(*nagle);
set_fs(KERNEL_DS);
rc = sock->ops->getsockopt(sock, SOL_TCP, TCP_NODELAY,
(char *)nagle, &len);
set_fs(oldmm);
}
ksocknal_connsock_decref(conn);
if (rc == 0)
*nagle = !*nagle;
else
*txmem = *rxmem = *nagle = 0;
return (rc);
}
int
ksocknal_lib_setup_sock (struct socket *sock)
{
mm_segment_t oldmm = get_fs ();
int rc;
int option;
int keep_idle;
int keep_intvl;
int keep_count;
int do_keepalive;
struct linger linger;
sock->sk->sk_allocation = GFP_NOFS;
/* Ensure this socket aborts active sends immediately when we close
* it. */
linger.l_onoff = 0;
linger.l_linger = 0;
set_fs (KERNEL_DS);
rc = sock_setsockopt (sock, SOL_SOCKET, SO_LINGER,
(char *)&linger, sizeof (linger));
set_fs (oldmm);
if (rc != 0) {
CERROR ("Can't set SO_LINGER: %d\n", rc);
return (rc);
}
option = -1;
set_fs (KERNEL_DS);
rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_LINGER2,
(char *)&option, sizeof (option));
set_fs (oldmm);
if (rc != 0) {
CERROR ("Can't set SO_LINGER2: %d\n", rc);
return (rc);
}
if (!*ksocknal_tunables.ksnd_nagle) {
option = 1;
set_fs (KERNEL_DS);
rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_NODELAY,
(char *)&option, sizeof (option));
set_fs (oldmm);
if (rc != 0) {
CERROR ("Can't disable nagle: %d\n", rc);
return (rc);
}
}
rc = libcfs_sock_setbuf(sock,
*ksocknal_tunables.ksnd_tx_buffer_size,
*ksocknal_tunables.ksnd_rx_buffer_size);
if (rc != 0) {
CERROR ("Can't set buffer tx %d, rx %d buffers: %d\n",
*ksocknal_tunables.ksnd_tx_buffer_size,
*ksocknal_tunables.ksnd_rx_buffer_size, rc);
return (rc);
}
/* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
/* snapshot tunables */
keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
keep_count = *ksocknal_tunables.ksnd_keepalive_count;
keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
option = (do_keepalive ? 1 : 0);
set_fs (KERNEL_DS);
rc = sock_setsockopt (sock, SOL_SOCKET, SO_KEEPALIVE,
(char *)&option, sizeof (option));
set_fs (oldmm);
if (rc != 0) {
CERROR ("Can't set SO_KEEPALIVE: %d\n", rc);
return (rc);
}
if (!do_keepalive)
return (0);
set_fs (KERNEL_DS);
rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPIDLE,
(char *)&keep_idle, sizeof (keep_idle));
set_fs (oldmm);
if (rc != 0) {
CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc);
return (rc);
}
set_fs (KERNEL_DS);
rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPINTVL,
(char *)&keep_intvl, sizeof (keep_intvl));
set_fs (oldmm);
if (rc != 0) {
CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc);
return (rc);
}
set_fs (KERNEL_DS);
rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPCNT,
(char *)&keep_count, sizeof (keep_count));
set_fs (oldmm);
if (rc != 0) {
CERROR ("Can't set TCP_KEEPCNT: %d\n", rc);
return (rc);
}
return (0);
}
void
ksocknal_lib_push_conn (ksock_conn_t *conn)
{
struct sock *sk;
struct tcp_sock *tp;
int nonagle;
int val = 1;
int rc;
mm_segment_t oldmm;
rc = ksocknal_connsock_addref(conn);
if (rc != 0) /* being shut down */
return;
sk = conn->ksnc_sock->sk;
tp = tcp_sk(sk);
lock_sock (sk);
nonagle = tp->nonagle;
tp->nonagle = 1;
release_sock (sk);
oldmm = get_fs ();
set_fs (KERNEL_DS);
rc = sk->sk_prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
(char *)&val, sizeof (val));
LASSERT (rc == 0);
set_fs (oldmm);
lock_sock (sk);
tp->nonagle = nonagle;
release_sock (sk);
ksocknal_connsock_decref(conn);
}
extern void ksocknal_read_callback (ksock_conn_t *conn);
extern void ksocknal_write_callback (ksock_conn_t *conn);
/*
* socket call back in Linux
*/
static void
ksocknal_data_ready (struct sock *sk)
{
ksock_conn_t *conn;
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
read_lock(&ksocknal_data.ksnd_global_lock);
conn = sk->sk_user_data;
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
sk->sk_data_ready (sk);
} else
ksocknal_read_callback(conn);
read_unlock(&ksocknal_data.ksnd_global_lock);
}
static void
ksocknal_write_space (struct sock *sk)
{
ksock_conn_t *conn;
int wspace;
int min_wpace;
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
read_lock(&ksocknal_data.ksnd_global_lock);
conn = sk->sk_user_data;
wspace = SOCKNAL_WSPACE(sk);
min_wpace = SOCKNAL_MIN_WSPACE(sk);
CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
sk, wspace, min_wpace, conn,
(conn == NULL) ? "" : (conn->ksnc_tx_ready ?
" ready" : " blocked"),
(conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
" scheduled" : " idle"),
(conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ?
" empty" : " queued"));
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
LASSERT (sk->sk_write_space != &ksocknal_write_space);
sk->sk_write_space (sk);
read_unlock(&ksocknal_data.ksnd_global_lock);
return;
}
if (wspace >= min_wpace) { /* got enough space */
ksocknal_write_callback(conn);
/* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
* ENOMEM check in ksocknal_transmit is race-free (think about
* it). */
clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
}
read_unlock(&ksocknal_data.ksnd_global_lock);
}
void
ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
{
conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
conn->ksnc_saved_write_space = sock->sk->sk_write_space;
}
void
ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
{
sock->sk->sk_user_data = conn;
sock->sk->sk_data_ready = ksocknal_data_ready;
sock->sk->sk_write_space = ksocknal_write_space;
return;
}
void
ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
{
/* Remove conn's network callbacks.
* NB I _have_ to restore the callback, rather than storing a noop,
* since the socket could survive past this module being unloaded!! */
sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
sock->sk->sk_write_space = conn->ksnc_saved_write_space;
/* A callback could be in progress already; they hold a read lock
* on ksnd_global_lock (to serialise with me) and NOOP if
* sk_user_data is NULL. */
sock->sk->sk_user_data = NULL;
return ;
}
int
ksocknal_lib_memory_pressure(ksock_conn_t *conn)
{
int rc = 0;
ksock_sched_t *sched;
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
!conn->ksnc_tx_ready) {
/* SOCK_NOSPACE is set when the socket fills
* and cleared in the write_space callback
* (which also sets ksnc_tx_ready). If
* SOCK_NOSPACE and ksnc_tx_ready are BOTH
* zero, I didn't fill the socket and
* write_space won't reschedule me, so I
* return -ENOMEM to get my caller to retry
* after a timeout */
rc = -ENOMEM;
}
spin_unlock_bh(&sched->kss_lock);
return rc;
}
| gpl-2.0 |
speef/linux | drivers/usb/otg/otg.c | 43 | 11515 | /*
* otg.c -- USB OTG utility code
*
* Copyright (C) 2004 Texas Instruments
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/usb/otg.h>
static LIST_HEAD(phy_list);
static LIST_HEAD(phy_bind_list);
static DEFINE_SPINLOCK(phy_lock);
static struct usb_phy *__usb_find_phy(struct list_head *list,
enum usb_phy_type type)
{
struct usb_phy *phy = NULL;
list_for_each_entry(phy, list, head) {
if (phy->type != type)
continue;
return phy;
}
return ERR_PTR(-ENODEV);
}
static struct usb_phy *__usb_find_phy_dev(struct device *dev,
struct list_head *list, u8 index)
{
struct usb_phy_bind *phy_bind = NULL;
list_for_each_entry(phy_bind, list, list) {
if (!(strcmp(phy_bind->dev_name, dev_name(dev))) &&
phy_bind->index == index) {
if (phy_bind->phy)
return phy_bind->phy;
else
return ERR_PTR(-EPROBE_DEFER);
}
}
return ERR_PTR(-ENODEV);
}
static struct usb_phy *__of_usb_find_phy(struct device_node *node)
{
struct usb_phy *phy;
list_for_each_entry(phy, &phy_list, head) {
if (node != phy->dev->of_node)
continue;
return phy;
}
return ERR_PTR(-ENODEV);
}
static void devm_usb_phy_release(struct device *dev, void *res)
{
struct usb_phy *phy = *(struct usb_phy **)res;
usb_put_phy(phy);
}
static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
{
return res == match_data;
}
/**
* devm_usb_get_phy - find the USB PHY
* @dev - device that requests this phy
* @type - the type of the phy the controller requires
*
* Gets the phy using usb_get_phy(), and associates a device with it using
* devres. On driver detach, release function is invoked on the devres data,
* then, devres data is freed.
*
* For use by USB host and peripheral drivers.
*/
struct usb_phy *devm_usb_get_phy(struct device *dev, enum usb_phy_type type)
{
struct usb_phy **ptr, *phy;
ptr = devres_alloc(devm_usb_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
phy = usb_get_phy(type);
if (!IS_ERR(phy)) {
*ptr = phy;
devres_add(dev, ptr);
} else
devres_free(ptr);
return phy;
}
EXPORT_SYMBOL(devm_usb_get_phy);
/**
* usb_get_phy - find the USB PHY
* @type - the type of the phy the controller requires
*
* Returns the phy driver, after getting a refcount to it; or
* -ENODEV if there is no such phy. The caller is responsible for
* calling usb_put_phy() to release that count.
*
* For use by USB host and peripheral drivers.
*/
struct usb_phy *usb_get_phy(enum usb_phy_type type)
{
struct usb_phy *phy = NULL;
unsigned long flags;
spin_lock_irqsave(&phy_lock, flags);
phy = __usb_find_phy(&phy_list, type);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
pr_err("unable to find transceiver of type %s\n",
usb_phy_type_string(type));
goto err0;
}
get_device(phy->dev);
err0:
spin_unlock_irqrestore(&phy_lock, flags);
return phy;
}
EXPORT_SYMBOL(usb_get_phy);
/**
* devm_usb_get_phy_by_phandle - find the USB PHY by phandle
* @dev - device that requests this phy
* @phandle - name of the property holding the phy phandle value
* @index - the index of the phy
*
* Returns the phy driver associated with the given phandle value,
* after getting a refcount to it, -ENODEV if there is no such phy or
* -EPROBE_DEFER if there is a phandle to the phy, but the device is
* not yet loaded. While at that, it also associates the device with
* the phy using devres. On driver detach, release function is invoked
* on the devres data, then, devres data is freed.
*
* For use by USB host and peripheral drivers.
*/
struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
const char *phandle, u8 index)
{
struct usb_phy *phy = ERR_PTR(-ENOMEM), **ptr;
unsigned long flags;
struct device_node *node;
if (!dev->of_node) {
dev_dbg(dev, "device does not have a device node entry\n");
return ERR_PTR(-EINVAL);
}
node = of_parse_phandle(dev->of_node, phandle, index);
if (!node) {
dev_dbg(dev, "failed to get %s phandle in %s node\n", phandle,
dev->of_node->full_name);
return ERR_PTR(-ENODEV);
}
ptr = devres_alloc(devm_usb_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr) {
dev_dbg(dev, "failed to allocate memory for devres\n");
goto err0;
}
spin_lock_irqsave(&phy_lock, flags);
phy = __of_usb_find_phy(node);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
phy = ERR_PTR(-EPROBE_DEFER);
devres_free(ptr);
goto err1;
}
*ptr = phy;
devres_add(dev, ptr);
get_device(phy->dev);
err1:
spin_unlock_irqrestore(&phy_lock, flags);
err0:
of_node_put(node);
return phy;
}
EXPORT_SYMBOL(devm_usb_get_phy_by_phandle);
/**
* usb_get_phy_dev - find the USB PHY
* @dev - device that requests this phy
* @index - the index of the phy
*
* Returns the phy driver, after getting a refcount to it; or
* -ENODEV if there is no such phy. The caller is responsible for
* calling usb_put_phy() to release that count.
*
* For use by USB host and peripheral drivers.
*/
struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
{
struct usb_phy *phy = NULL;
unsigned long flags;
spin_lock_irqsave(&phy_lock, flags);
phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
pr_err("unable to find transceiver\n");
goto err0;
}
get_device(phy->dev);
err0:
spin_unlock_irqrestore(&phy_lock, flags);
return phy;
}
EXPORT_SYMBOL(usb_get_phy_dev);
/**
* devm_usb_get_phy_dev - find the USB PHY using device ptr and index
* @dev - device that requests this phy
* @index - the index of the phy
*
* Gets the phy using usb_get_phy_dev(), and associates a device with it using
* devres. On driver detach, release function is invoked on the devres data,
* then, devres data is freed.
*
* For use by USB host and peripheral drivers.
*/
struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index)
{
struct usb_phy **ptr, *phy;
ptr = devres_alloc(devm_usb_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
phy = usb_get_phy_dev(dev, index);
if (!IS_ERR(phy)) {
*ptr = phy;
devres_add(dev, ptr);
} else
devres_free(ptr);
return phy;
}
EXPORT_SYMBOL(devm_usb_get_phy_dev);
/**
* devm_usb_put_phy - release the USB PHY
* @dev - device that wants to release this phy
* @phy - the phy returned by devm_usb_get_phy()
*
* destroys the devres associated with this phy and invokes usb_put_phy
* to release the phy.
*
* For use by USB host and peripheral drivers.
*/
void devm_usb_put_phy(struct device *dev, struct usb_phy *phy)
{
int r;
r = devres_destroy(dev, devm_usb_phy_release, devm_usb_phy_match, phy);
dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
}
EXPORT_SYMBOL(devm_usb_put_phy);
/**
* usb_put_phy - release the USB PHY
* @x: the phy returned by usb_get_phy()
*
* Releases a refcount the caller received from usb_get_phy().
*
* For use by USB host and peripheral drivers.
*/
void usb_put_phy(struct usb_phy *x)
{
if (x) {
struct module *owner = x->dev->driver->owner;
put_device(x->dev);
module_put(owner);
}
}
EXPORT_SYMBOL(usb_put_phy);
/**
* usb_add_phy - declare the USB PHY
* @x: the USB phy to be used; or NULL
* @type - the type of this PHY
*
* This call is exclusively for use by phy drivers, which
* coordinate the activities of drivers for host and peripheral
* controllers, and in some cases for VBUS current regulation.
*/
int usb_add_phy(struct usb_phy *x, enum usb_phy_type type)
{
int ret = 0;
unsigned long flags;
struct usb_phy *phy;
if (x->type != USB_PHY_TYPE_UNDEFINED) {
dev_err(x->dev, "not accepting initialized PHY %s\n", x->label);
return -EINVAL;
}
spin_lock_irqsave(&phy_lock, flags);
list_for_each_entry(phy, &phy_list, head) {
if (phy->type == type) {
ret = -EBUSY;
dev_err(x->dev, "transceiver type %s already exists\n",
usb_phy_type_string(type));
goto out;
}
}
x->type = type;
list_add_tail(&x->head, &phy_list);
out:
spin_unlock_irqrestore(&phy_lock, flags);
return ret;
}
EXPORT_SYMBOL(usb_add_phy);
/**
* usb_add_phy_dev - declare the USB PHY
* @x: the USB phy to be used; or NULL
*
* This call is exclusively for use by phy drivers, which
* coordinate the activities of drivers for host and peripheral
* controllers, and in some cases for VBUS current regulation.
*/
int usb_add_phy_dev(struct usb_phy *x)
{
struct usb_phy_bind *phy_bind;
unsigned long flags;
if (!x->dev) {
dev_err(x->dev, "no device provided for PHY\n");
return -EINVAL;
}
spin_lock_irqsave(&phy_lock, flags);
list_for_each_entry(phy_bind, &phy_bind_list, list)
if (!(strcmp(phy_bind->phy_dev_name, dev_name(x->dev))))
phy_bind->phy = x;
list_add_tail(&x->head, &phy_list);
spin_unlock_irqrestore(&phy_lock, flags);
return 0;
}
EXPORT_SYMBOL(usb_add_phy_dev);
/**
* usb_remove_phy - remove the OTG PHY
* @x: the USB OTG PHY to be removed;
*
* This reverts the effects of usb_add_phy
*/
void usb_remove_phy(struct usb_phy *x)
{
unsigned long flags;
struct usb_phy_bind *phy_bind;
spin_lock_irqsave(&phy_lock, flags);
if (x) {
list_for_each_entry(phy_bind, &phy_bind_list, list)
if (phy_bind->phy == x)
phy_bind->phy = NULL;
list_del(&x->head);
}
spin_unlock_irqrestore(&phy_lock, flags);
}
EXPORT_SYMBOL(usb_remove_phy);
/**
* usb_bind_phy - bind the phy and the controller that uses the phy
* @dev_name: the device name of the device that will bind to the phy
* @index: index to specify the port number
* @phy_dev_name: the device name of the phy
*
* Fills the phy_bind structure with the dev_name and phy_dev_name. This will
* be used when the phy driver registers the phy and when the controller
* requests this phy.
*
* To be used by platform specific initialization code.
*/
int __init usb_bind_phy(const char *dev_name, u8 index,
const char *phy_dev_name)
{
struct usb_phy_bind *phy_bind;
unsigned long flags;
phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL);
if (!phy_bind) {
pr_err("phy_bind(): No memory for phy_bind");
return -ENOMEM;
}
phy_bind->dev_name = dev_name;
phy_bind->phy_dev_name = phy_dev_name;
phy_bind->index = index;
spin_lock_irqsave(&phy_lock, flags);
list_add_tail(&phy_bind->list, &phy_bind_list);
spin_unlock_irqrestore(&phy_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(usb_bind_phy);
const char *otg_state_string(enum usb_otg_state state)
{
switch (state) {
case OTG_STATE_A_IDLE:
return "a_idle";
case OTG_STATE_A_WAIT_VRISE:
return "a_wait_vrise";
case OTG_STATE_A_WAIT_BCON:
return "a_wait_bcon";
case OTG_STATE_A_HOST:
return "a_host";
case OTG_STATE_A_SUSPEND:
return "a_suspend";
case OTG_STATE_A_PERIPHERAL:
return "a_peripheral";
case OTG_STATE_A_WAIT_VFALL:
return "a_wait_vfall";
case OTG_STATE_A_VBUS_ERR:
return "a_vbus_err";
case OTG_STATE_B_IDLE:
return "b_idle";
case OTG_STATE_B_SRP_INIT:
return "b_srp_init";
case OTG_STATE_B_PERIPHERAL:
return "b_peripheral";
case OTG_STATE_B_WAIT_ACON:
return "b_wait_acon";
case OTG_STATE_B_HOST:
return "b_host";
default:
return "UNDEFINED";
}
}
EXPORT_SYMBOL(otg_state_string);
| gpl-2.0 |
lordeko/Alucard-Kernel-jfltexx | arch/arm/kernel/perf_event_v7.c | 299 | 37375 | /*
* ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
*
* ARMv7 support: Jean Pihet <jpihet@mvista.com>
* 2010 (c) MontaVista Software, LLC.
*
* Copied from ARMv6 code, with the low level code inspired
* by the ARMv7 Oprofile code.
*
* Cortex-A8 has up to 4 configurable performance counters and
* a single cycle counter.
* Cortex-A9 has up to 31 configurable performance counters and
* a single cycle counter.
*
* All counters can be enabled/disabled and IRQ masked separately. The cycle
* counter and all 4 performance counters together can be reset separately.
*/
#ifdef CONFIG_CPU_V7
static struct arm_pmu armv7pmu;
/*
* Common ARMv7 event types
*
* Note: An implementation may not be able to count all of these events
* but the encodings are considered to be `reserved' in the case that
* they are not available.
*/
enum armv7_perf_types {
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
ARMV7_PERFCTR_ITLB_REFILL = 0x02,
ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
ARMV7_PERFCTR_DTLB_REFILL = 0x05,
ARMV7_PERFCTR_MEM_READ = 0x06,
ARMV7_PERFCTR_MEM_WRITE = 0x07,
ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
ARMV7_PERFCTR_EXC_TAKEN = 0x09,
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
ARMV7_PERFCTR_CID_WRITE = 0x0B,
/*
* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
* It counts:
* - all (taken) branch instructions,
* - instructions that explicitly write the PC,
* - exception generating instructions.
*/
ARMV7_PERFCTR_PC_WRITE = 0x0C,
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
ARMV7_PERFCTR_MEM_ACCESS = 0x13,
ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
ARMV7_PERFCTR_BUS_ACCESS = 0x19,
ARMV7_PERFCTR_MEM_ERROR = 0x1A,
ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
};
/* ARMv7 Cortex-A8 specific event types */
enum armv7_a8_perf_types {
ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
};
/* ARMv7 Cortex-A9 specific event types */
enum armv7_a9_perf_types {
ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
};
/* ARMv7 Cortex-A5 specific event types */
enum armv7_a5_perf_types {
ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
};
/* ARMv7 Cortex-A15 specific event types */
enum armv7_a15_perf_types {
ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
};
/*
* Cortex-A8 HW events mapping
*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
*/
static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Cortex-A9 HW events mapping
*/
static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
};
static unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Cortex-A5 HW events mapping
*/
static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
[C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
/*
* The prefetch counters don't differentiate between the I
* side and the D side.
*/
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
[C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Cortex-A15 HW events mapping
*/
static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
/*
* Not all performance counters differentiate between read
* and write accesses/misses so we're not always strictly
* correct, but it's the best we can do. Writes and reads get
* combined in these cases.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Cortex-A7 HW events mapping
*/
static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Perf Events' indices
*/
#define ARMV7_IDX_CYCLE_COUNTER 0
#define ARMV7_IDX_COUNTER0 1
#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
#define ARMV7_MAX_COUNTERS 32
#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
/*
* ARMv7 low level PMNC access
*/
/*
* Perf Event to low level counters mapping
*/
#define ARMV7_IDX_TO_COUNTER(x) \
(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
/*
* Per-CPU PMNC: config reg
*/
#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
#define ARMV7_PMNC_N_MASK 0x1f
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/*
* FLAG: counters overflow flag status reg
*/
#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
/*
* PMXEVTYPER: Event selection reg
*/
#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
/*
* Event filters for PMUv2
*/
#define ARMV7_EXCLUDE_PL1 (1 << 31)
#define ARMV7_EXCLUDE_USER (1 << 30)
#define ARMV7_INCLUDE_HYP (1 << 27)
static inline u32 armv7_pmnc_read(void)
{
u32 val;
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
return val;
}
static inline void armv7_pmnc_write(u32 val)
{
val &= ARMV7_PMNC_MASK;
isb();
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
}
static inline int armv7_pmnc_has_overflowed(u32 pmnc)
{
return pmnc & ARMV7_OVERFLOWED_MASK;
}
static inline int armv7_pmnc_counter_valid(int idx)
{
return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
}
static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
{
int ret = 0;
u32 counter;
if (!armv7_pmnc_counter_valid(idx)) {
pr_err("CPU%u checking wrong counter %d overflow status\n",
smp_processor_id(), idx);
} else {
counter = ARMV7_IDX_TO_COUNTER(idx);
ret = pmnc & BIT(counter);
}
return ret;
}
static inline int armv7_pmnc_select_counter(int idx)
{
u32 counter;
if (!armv7_pmnc_counter_valid(idx)) {
pr_err("CPU%u selecting wrong PMNC counter %d\n",
smp_processor_id(), idx);
return -EINVAL;
}
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
isb();
return idx;
}
static inline u32 armv7pmu_read_counter(int idx)
{
u32 value = 0;
if (!armv7_pmnc_counter_valid(idx))
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
else if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
return value;
}
static inline void armv7pmu_write_counter(int idx, u32 value)
{
if (!armv7_pmnc_counter_valid(idx))
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
else if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
}
static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
{
if (armv7_pmnc_select_counter(idx) == idx) {
val &= ARMV7_EVTYPE_MASK;
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
}
}
static inline int armv7_pmnc_enable_counter(int idx)
{
u32 counter;
if (!armv7_pmnc_counter_valid(idx)) {
pr_err("CPU%u enabling wrong PMNC counter %d\n",
smp_processor_id(), idx);
return -EINVAL;
}
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
return idx;
}
static inline int armv7_pmnc_disable_counter(int idx)
{
u32 counter;
if (!armv7_pmnc_counter_valid(idx)) {
pr_err("CPU%u disabling wrong PMNC counter %d\n",
smp_processor_id(), idx);
return -EINVAL;
}
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
return idx;
}
static inline int armv7_pmnc_enable_intens(int idx)
{
u32 counter;
if (!armv7_pmnc_counter_valid(idx)) {
pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
smp_processor_id(), idx);
return -EINVAL;
}
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
return idx;
}
static inline int armv7_pmnc_disable_intens(int idx)
{
u32 counter;
if (!armv7_pmnc_counter_valid(idx)) {
pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
smp_processor_id(), idx);
return -EINVAL;
}
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
isb();
/* Clear the overflow flag in case an interrupt is pending. */
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
isb();
return idx;
}
static inline u32 armv7_pmnc_getreset_flags(void)
{
u32 val;
/* Read */
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
/* Write to clear flags */
val &= ARMV7_FLAG_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
return val;
}
#ifdef DEBUG
static void armv7_pmnc_dump_regs(void)
{
u32 val;
unsigned int cnt;
printk(KERN_INFO "PMNC registers dump:\n");
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
printk(KERN_INFO "PMNC =0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
printk(KERN_INFO "CNTENS=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
printk(KERN_INFO "INTENS=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
printk(KERN_INFO "FLAGS =0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
printk(KERN_INFO "SELECT=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
printk(KERN_INFO "CCNT =0x%08x\n", val);
for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
armv7_pmnc_select_counter(cnt);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
printk(KERN_INFO "CNT[%d] count =0x%08x\n",
ARMV7_IDX_TO_COUNTER(cnt), val);
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
ARMV7_IDX_TO_COUNTER(cnt), val);
}
}
#endif
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx, int cpu)
{
unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
unsigned long long prev_count = local64_read(&hwc->prev_count);
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
*/
armv7_pmnc_disable_counter(idx);
/*
* Set event (if destined for PMNx counters)
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
*/
if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
/*
* Enable interrupt for this counter
*/
armv7_pmnc_enable_intens(idx);
/* Restore prev val */
armv7pmu_write_counter(idx, prev_count & 0xffffffff);
/*
* Enable counter
*/
armv7_pmnc_enable_counter(idx);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/*
* Disable counter and interrupt
*/
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
*/
armv7_pmnc_disable_counter(idx);
/*
* Disable interrupt for this counter
*/
armv7_pmnc_disable_intens(idx);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
{
u32 pmnc;
struct perf_sample_data data;
struct pmu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/*
* Get and reset the IRQ flags
*/
pmnc = armv7_pmnc_getreset_flags();
/*
* Did an overflow occur?
*/
if (!armv7_pmnc_has_overflowed(pmnc))
return IRQ_NONE;
/*
* Handle the counter(s) overflow(s)
*/
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
/* Ignore if we don't have an event. */
if (!event)
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
cpu_pmu->disable(hwc, idx);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
}
static void armv7pmu_start(void)
{
unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv7pmu_stop(void)
{
unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx;
unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */
if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return ARMV7_IDX_CYCLE_COUNTER;
}
/*
* For anything other than a cycle counter, try and use
* the events counters
*/
for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
/* The counters are all in use. */
return -EAGAIN;
}
/*
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/
static int armv7pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
{
unsigned long config_base = 0;
if (attr->exclude_idle)
return -EPERM;
if (attr->exclude_user)
config_base |= ARMV7_EXCLUDE_USER;
if (attr->exclude_kernel)
config_base |= ARMV7_EXCLUDE_PL1;
if (!attr->exclude_hv)
config_base |= ARMV7_INCLUDE_HYP;
/*
* Install the filter into config_base as this is used to
* construct the event type.
*/
event->config_base = config_base;
return 0;
}
static void armv7pmu_reset(void *info)
{
u32 idx, nb_cnt = cpu_pmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */
for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
armv7pmu_disable_event(NULL, idx);
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
}
static int armv7_a8_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a8_perf_map,
&armv7_a8_perf_cache_map, 0xFF);
}
static int armv7_a9_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a9_perf_map,
&armv7_a9_perf_cache_map, 0xFF);
}
static int armv7_a5_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a5_perf_map,
&armv7_a5_perf_cache_map, 0xFF);
}
static int armv7_a15_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a15_perf_map,
&armv7_a15_perf_cache_map, 0xFF);
}
static int armv7_a7_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv7_a7_perf_map,
&armv7_a7_perf_cache_map, 0xFF);
}
static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event,
.disable = armv7pmu_disable_event,
.read_counter = armv7pmu_read_counter,
.write_counter = armv7pmu_write_counter,
.get_event_idx = armv7pmu_get_event_idx,
.start = armv7pmu_start,
.stop = armv7pmu_stop,
.reset = armv7pmu_reset,
.max_period = (1LLU << 32) - 1,
};
static u32 __init armv7_read_num_pmnc_events(void)
{
u32 nb_cnt;
/* Read the nb of CNTx counters supported from PMNC */
nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
/* Add the CPU cycles counter and return */
return nb_cnt + 1;
}
static struct arm_pmu *__init armv7_a8_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
armv7pmu.name = "ARMv7 Cortex-A8";
armv7pmu.map_event = armv7_a8_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
static struct arm_pmu *__init armv7_a9_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
armv7pmu.name = "ARMv7 Cortex-A9";
armv7pmu.map_event = armv7_a9_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
static struct arm_pmu *__init armv7_a5_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA5;
armv7pmu.name = "ARMv7 Cortex-A5";
armv7pmu.map_event = armv7_a5_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
static struct arm_pmu *__init armv7_a15_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA15;
armv7pmu.name = "ARMv7 Cortex-A15";
armv7pmu.map_event = armv7_a15_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
armv7pmu.set_event_filter = armv7pmu_set_event_filter;
return &armv7pmu;
}
static struct arm_pmu *__init armv7_a7_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA7;
armv7pmu.name = "ARMv7 Cortex-A7";
armv7pmu.map_event = armv7_a7_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
armv7pmu.set_event_filter = armv7pmu_set_event_filter;
return &armv7pmu;
}
#else
static struct arm_pmu *__init armv7_a8_pmu_init(void)
{
return NULL;
}
static struct arm_pmu *__init armv7_a9_pmu_init(void)
{
return NULL;
}
static struct arm_pmu *__init armv7_a5_pmu_init(void)
{
return NULL;
}
static struct arm_pmu *__init armv7_a15_pmu_init(void)
{
return NULL;
}
static struct arm_pmu *__init armv7_a7_pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_V7 */
| gpl-2.0 |
jekkos/android_kernel_htc_msm8960 | drivers/staging/prima_jb3.2/CORE/MAC/src/pe/sch/schApi.c | 811 | 16006 | /*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
*
* Airgo Networks, Inc proprietary. All rights reserved.
* This file schApi.cc contains functions related to the API exposed
* by scheduler module
*
* Author: Sandesh Goel
* Date: 02/25/02
* History:-
* Date Modified by Modification Information
* --------------------------------------------------------------------
*
*/
#include "palTypes.h"
#include "aniGlobal.h"
#include "wniCfgSta.h"
#include "sirMacProtDef.h"
#include "sirMacPropExts.h"
#include "sirCommon.h"
#include "cfgApi.h"
#include "pmmApi.h"
#include "limApi.h"
#include "schApi.h"
#include "schDebug.h"
#include "schSysParams.h"
#include "limTrace.h"
#include "limTypes.h"
#include "wlan_qct_wda.h"
//--------------------------------------------------------------------
//
// Static Variables
//
//-------------------------------------------------------------------
static tANI_U8 gSchProbeRspTemplate[SCH_MAX_PROBE_RESP_SIZE];
static tANI_U8 gSchBeaconFrameBegin[SCH_MAX_BEACON_SIZE];
static tANI_U8 gSchBeaconFrameEnd[SCH_MAX_BEACON_SIZE];
// --------------------------------------------------------------------
/**
* schGetCFPCount
*
* FUNCTION:
* Function used by other Sirius modules to read CFPcount
*
* LOGIC:
*
* ASSUMPTIONS:
*
* NOTE:
*
* @param None
* @return None
*/
tANI_U8
schGetCFPCount(tpAniSirGlobal pMac)
{
return pMac->sch.schObject.gSchCFPCount;
}
// --------------------------------------------------------------------
/**
* schGetCFPDurRemaining
*
* FUNCTION:
* Function used by other Sirius modules to read CFPDuration remaining
*
* LOGIC:
*
* ASSUMPTIONS:
*
* NOTE:
*
* @param None
* @return None
*/
tANI_U16
schGetCFPDurRemaining(tpAniSirGlobal pMac)
{
return pMac->sch.schObject.gSchCFPDurRemaining;
}
// --------------------------------------------------------------------
/**
* schInitialize
*
* FUNCTION:
* Initialize
*
* LOGIC:
*
* ASSUMPTIONS:
*
* NOTE:
*
* @param None
* @return None
*/
void
schInitialize(tpAniSirGlobal pMac)
{
pmmInitialize(pMac);
}
// --------------------------------------------------------------------
/**
* schInitGlobals
*
* FUNCTION:
* Initialize globals
*
* LOGIC:
*
* ASSUMPTIONS:
*
* NOTE:
*
* @param None
* @return None
*/
void
schInitGlobals(tpAniSirGlobal pMac)
{
pMac->sch.gSchHcfEnabled = false;
pMac->sch.gSchScanRequested = false;
pMac->sch.gSchScanReqRcvd = false;
pMac->sch.gSchGenBeacon = 1;
pMac->sch.gSchBeaconsSent = 0;
pMac->sch.gSchBeaconsWritten = 0;
pMac->sch.gSchBcnParseErrorCnt = 0;
pMac->sch.gSchBcnIgnored = 0;
pMac->sch.gSchBBXportRcvCnt = 0;
pMac->sch.gSchUnknownRcvCnt = 0;
pMac->sch.gSchBcnRcvCnt = 0;
pMac->sch.gSchRRRcvCnt = 0;
pMac->sch.qosNullCnt = 0;
pMac->sch.numData = 0;
pMac->sch.numPoll = 0;
pMac->sch.numCorrupt = 0;
pMac->sch.numBogusInt = 0;
pMac->sch.numTxAct0 = 0;
pMac->sch.rrTimeout = SCH_RR_TIMEOUT;
pMac->sch.pollPeriod = SCH_POLL_PERIOD;
pMac->sch.keepAlive = 0;
pMac->sch.multipleSched = 1;
pMac->sch.maxPollTimeouts = 20;
pMac->sch.checkCfbFlagStuck = 0;
pMac->sch.schObject.gSchProbeRspTemplate = gSchProbeRspTemplate;
pMac->sch.schObject.gSchBeaconFrameBegin = gSchBeaconFrameBegin;
pMac->sch.schObject.gSchBeaconFrameEnd = gSchBeaconFrameEnd;
}
// --------------------------------------------------------------------
/**
* schPostMessage
*
* FUNCTION:
* Post the beacon message to the scheduler message queue
*
* LOGIC:
*
* ASSUMPTIONS:
*
* NOTE:
*
* @param pMsg pointer to message
* @return None
*/
tSirRetStatus
schPostMessage(tpAniSirGlobal pMac, tpSirMsgQ pMsg)
{
schProcessMessage(pMac, pMsg);
return eSIR_SUCCESS;
}
// ---------------------------------------------------------------------------
/**
* schSendStartScanRsp
*
* FUNCTION:
*
* LOGIC:
*
* ASSUMPTIONS:
*
* NOTE:
*
* @param None
* @return None
*/
void
schSendStartScanRsp(tpAniSirGlobal pMac)
{
tSirMsgQ msgQ;
tANI_U32 retCode;
PELOG1(schLog(pMac, LOG1, FL("Sending LIM message to go into scan"));)
msgQ.type = SIR_SCH_START_SCAN_RSP;
if ((retCode = limPostMsgApi(pMac, &msgQ)) != eSIR_SUCCESS)
schLog(pMac, LOGE,
FL("Posting START_SCAN_RSP to LIM failed, reason=%X"), retCode);
}
/**
* schSendBeaconReq
*
* FUNCTION:
*
* LOGIC:
* 1) SCH received SIR_SCH_BEACON_GEN_IND
* 2) SCH updates TIM IE and other beacon related IE's
* 3) SCH sends WDA_SEND_BEACON_REQ to HAL. HAL then copies the beacon
* template to memory
*
* ASSUMPTIONS:
* Memory allocation is reqd to send this message and SCH allocates memory.
* The assumption is that HAL will "free" this memory.
*
* NOTE:
*
* @param pMac global
*
* @param beaconPayload
*
* @param size - Length of the beacon
*
* @return eHalStatus
*/
tSirRetStatus schSendBeaconReq( tpAniSirGlobal pMac, tANI_U8 *beaconPayload, tANI_U16 size, tpPESession psessionEntry)
{
tSirMsgQ msgQ;
tpSendbeaconParams beaconParams = NULL;
tSirRetStatus retCode;
schLog( pMac, LOG2,
FL( "Indicating HAL to copy the beacon template [%d bytes] to memory" ),
size );
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd,
(void **) &beaconParams,
sizeof( tSendbeaconParams )))
return eSIR_FAILURE;
msgQ.type = WDA_SEND_BEACON_REQ;
// No Dialog Token reqd, as a response is not solicited
msgQ.reserved = 0;
// Fill in tSendbeaconParams members
/* Knock off all pMac global addresses */
// limGetBssid( pMac, beaconParams->bssId);
palCopyMemory(pMac, beaconParams->bssId, psessionEntry->bssId, sizeof(psessionEntry->bssId));
beaconParams->timIeOffset = pMac->sch.schObject.gSchBeaconOffsetBegin;
/* p2pIeOffset should be atleast greater than timIeOffset */
if ((pMac->sch.schObject.p2pIeOffset != 0) &&
(pMac->sch.schObject.p2pIeOffset <
pMac->sch.schObject.gSchBeaconOffsetBegin))
{
schLog(pMac, LOGE,FL("Invalid p2pIeOffset:[%d]"),
pMac->sch.schObject.p2pIeOffset);
VOS_ASSERT( 0 );
return eSIR_FAILURE;
}
beaconParams->p2pIeOffset = pMac->sch.schObject.p2pIeOffset;
#ifdef WLAN_SOFTAP_FW_BEACON_TX_PRNT_LOG
schLog(pMac, LOGE,FL("TimIeOffset:[%d]"),beaconParams->TimIeOffset );
#endif
beaconParams->beacon = beaconPayload;
beaconParams->beaconLength = (tANI_U32) size;
msgQ.bodyptr = beaconParams;
msgQ.bodyval = 0;
// Keep a copy of recent beacon frame sent
// free previous copy of the beacon
if (psessionEntry->beacon )
{
palFreeMemory(pMac->hHdd, psessionEntry->beacon);
}
psessionEntry->bcnLen = 0;
psessionEntry->beacon = NULL;
if ( eHAL_STATUS_SUCCESS == palAllocateMemory( pMac->hHdd,(void **) &psessionEntry->beacon, size))
{
palCopyMemory(pMac->hHdd, psessionEntry->beacon, beaconPayload, size);
psessionEntry->bcnLen = size;
}
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type));
if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ )))
{
schLog( pMac, LOGE,
FL("Posting SEND_BEACON_REQ to HAL failed, reason=%X"),
retCode );
} else
{
schLog( pMac, LOG2,
FL("Successfully posted WDA_SEND_BEACON_REQ to HAL"));
if( (psessionEntry->limSystemRole == eLIM_AP_ROLE )
&& (psessionEntry->proxyProbeRspEn)
&& (pMac->sch.schObject.fBeaconChanged))
{
if(eSIR_SUCCESS != (retCode = limSendProbeRspTemplateToHal(pMac,psessionEntry,
&psessionEntry->DefProbeRspIeBitmap[0])))
{
/* check whether we have to free any memory */
schLog(pMac, LOGE, FL("FAILED to send probe response template with retCode %d"), retCode);
}
}
}
return retCode;
}
tANI_U32 limSendProbeRspTemplateToHal(tpAniSirGlobal pMac,tpPESession psessionEntry
,tANI_U32* IeBitmap)
{
tSirMsgQ msgQ;
tANI_U8 *pFrame2Hal = pMac->sch.schObject.gSchProbeRspTemplate;
tpSendProbeRespParams pprobeRespParams=NULL;
tANI_U32 retCode = eSIR_FAILURE;
tANI_U32 nPayload,nBytes,nStatus;
tpSirMacMgmtHdr pMacHdr;
tANI_U32 addnIEPresent;
tANI_U32 addnIELen=0;
tSirRetStatus nSirStatus;
tANI_U8 *addIE = NULL;
nStatus = dot11fGetPackedProbeResponseSize( pMac, &psessionEntry->probeRespFrame, &nPayload );
if ( DOT11F_FAILED( nStatus ) )
{
schLog( pMac, LOGE, FL("Failed to calculate the packed size f"
"or a Probe Response (0x%08x)."),
nStatus );
// We'll fall back on the worst case scenario:
nPayload = sizeof( tDot11fProbeResponse );
}
else if ( DOT11F_WARNED( nStatus ) )
{
schLog( pMac, LOGE, FL("There were warnings while calculating"
"the packed size for a Probe Response "
"(0x%08x)."), nStatus );
}
nBytes = nPayload + sizeof( tSirMacMgmtHdr );
//Check if probe response IE is present or not
if (wlan_cfgGetInt(pMac, WNI_CFG_PROBE_RSP_ADDNIE_FLAG, &addnIEPresent) != eSIR_SUCCESS)
{
schLog(pMac, LOGE, FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_FLAG"));
return retCode;
}
if (addnIEPresent)
{
//Probe rsp IE available
if ( (palAllocateMemory(pMac->hHdd, (void**)&addIE,
WNI_CFG_PROBE_RSP_ADDNIE_DATA1_LEN )) != eHAL_STATUS_SUCCESS)
{
schLog(pMac, LOGE,
FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_DATA1 length"));
return retCode;
}
if (wlan_cfgGetStrLen(pMac, WNI_CFG_PROBE_RSP_ADDNIE_DATA1,
&addnIELen) != eSIR_SUCCESS)
{
schLog(pMac, LOGE,
FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_DATA1 length"));
palFreeMemory(pMac->hHdd, addIE);
return retCode;
}
if (addnIELen <= WNI_CFG_PROBE_RSP_ADDNIE_DATA1_LEN && addnIELen &&
(nBytes + addnIELen) <= SIR_MAX_PACKET_SIZE)
{
if ( eSIR_SUCCESS != wlan_cfgGetStr(pMac,
WNI_CFG_PROBE_RSP_ADDNIE_DATA1, &addIE[0],
&addnIELen) )
{
schLog(pMac, LOGE,
FL("Unable to get WNI_CFG_PROBE_RSP_ADDNIE_DATA1 String"));
palFreeMemory(pMac->hHdd, addIE);
return retCode;
}
}
}
if (addnIEPresent)
{
if ((nBytes + addnIELen) <= SIR_MAX_PACKET_SIZE )
nBytes += addnIELen;
else
addnIEPresent = false; //Dont include the IE.
}
// Paranoia:
palZeroMemory( pMac->hHdd, pFrame2Hal, nBytes );
// Next, we fill out the buffer descriptor:
nSirStatus = limPopulateMacHeader( pMac, pFrame2Hal, SIR_MAC_MGMT_FRAME,
SIR_MAC_MGMT_PROBE_RSP, psessionEntry->selfMacAddr,psessionEntry->selfMacAddr);
if ( eSIR_SUCCESS != nSirStatus )
{
schLog( pMac, LOGE, FL("Failed to populate the buffer descrip"
"tor for a Probe Response (%d)."),
nSirStatus );
palFreeMemory(pMac->hHdd, addIE);
return retCode;
}
pMacHdr = ( tpSirMacMgmtHdr ) pFrame2Hal;
sirCopyMacAddr(pMacHdr->bssId,psessionEntry->bssId);
// That done, pack the Probe Response:
nStatus = dot11fPackProbeResponse( pMac, &psessionEntry->probeRespFrame, pFrame2Hal + sizeof(tSirMacMgmtHdr),
nPayload, &nPayload );
if ( DOT11F_FAILED( nStatus ) )
{
schLog( pMac, LOGE, FL("Failed to pack a Probe Response (0x%08x)."),
nStatus );
palFreeMemory(pMac->hHdd, addIE);
return retCode; // allocated!
}
else if ( DOT11F_WARNED( nStatus ) )
{
schLog( pMac, LOGE, FL("There were warnings while packing a P"
"robe Response (0x%08x).") );
}
if (addnIEPresent)
{
if (palCopyMemory ( pMac->hHdd, &pFrame2Hal[nBytes - addnIELen],
&addIE[0], addnIELen) != eHAL_STATUS_SUCCESS)
{
schLog( pMac, LOGE,
FL("Additional Probe Rsp IE request failed while Appending "));
palFreeMemory(pMac->hHdd, addIE);
return retCode;
}
}
/* free the allocated Memory */
palFreeMemory(pMac->hHdd, addIE);
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd,
(void **) &pprobeRespParams,
sizeof( tSendProbeRespParams )))
{
schLog( pMac, LOGE, FL("limSendProbeRspTemplateToHal: HAL probe response params malloc failed for bytes %d"), nBytes );
}
else
{
/*
PELOGE(sirDumpBuf(pMac, SIR_LIM_MODULE_ID, LOGE,
pFrame2Hal,
nBytes);)
*/
sirCopyMacAddr( pprobeRespParams->bssId , psessionEntry->bssId);
pprobeRespParams->pProbeRespTemplate = pFrame2Hal;
pprobeRespParams->probeRespTemplateLen = nBytes;
palCopyMemory(pMac,pprobeRespParams->ucProxyProbeReqValidIEBmap,IeBitmap,
(sizeof(tANI_U32) * 8));
msgQ.type = WDA_UPDATE_PROBE_RSP_TEMPLATE_IND;
msgQ.reserved = 0;
msgQ.bodyptr = pprobeRespParams;
msgQ.bodyval = 0;
if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ )))
{
/* free the allocated Memory */
schLog( pMac,LOGE, FL("limSendProbeRspTemplateToHal: FAIL bytes %d retcode[%X]"), nBytes , retCode );
palFreeMemory(pMac->hHdd,pprobeRespParams);
}
else
{
schLog( pMac,LOG1, FL("limSendProbeRspTemplateToHal: Probe response template msg posted to HAL of bytes %d"),nBytes );
}
}
return retCode;
}
| gpl-2.0 |
roalex/sgs3-kernel | drivers/spi/spi_fsl_spi.c | 1579 | 28621 | /*
* Freescale SPI controller driver.
*
* Maintainer: Kumar Gala
*
* Copyright (C) 2006 Polycom, Inc.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* CPM SPI and QE buffer descriptors mode support:
* Copyright (c) 2009 MontaVista Software, Inc.
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>
#include <asm/qe.h>
#include "spi_fsl_lib.h"
/* CPM1 and CPM2 are mutually exclusive. */
#ifdef CONFIG_CPM1
#include <asm/cpm1.h>
#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
#else
#include <asm/cpm2.h>
#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
#endif
/* SPI Controller registers */
struct fsl_spi_reg {
u8 res1[0x20];
__be32 mode;
__be32 event;
__be32 mask;
__be32 command;
__be32 transmit;
__be32 receive;
};
/* SPI Controller mode register definitions */
#define SPMODE_LOOP (1 << 30)
#define SPMODE_CI_INACTIVEHIGH (1 << 29)
#define SPMODE_CP_BEGIN_EDGECLK (1 << 28)
#define SPMODE_DIV16 (1 << 27)
#define SPMODE_REV (1 << 26)
#define SPMODE_MS (1 << 25)
#define SPMODE_ENABLE (1 << 24)
#define SPMODE_LEN(x) ((x) << 20)
#define SPMODE_PM(x) ((x) << 16)
#define SPMODE_OP (1 << 14)
#define SPMODE_CG(x) ((x) << 7)
/*
* Default for SPI Mode:
* SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
*/
#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
/* SPIE register values */
#define SPIE_NE 0x00000200 /* Not empty */
#define SPIE_NF 0x00000100 /* Not full */
/* SPIM register values */
#define SPIM_NE 0x00000200 /* Not empty */
#define SPIM_NF 0x00000100 /* Not full */
#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
/* SPCOM register values */
#define SPCOM_STR (1 << 23) /* Start transmit */
#define SPI_PRAM_SIZE 0x100
#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
static void *fsl_dummy_rx;
static DEFINE_MUTEX(fsl_dummy_rx_lock);
static int fsl_dummy_rx_refcnt;
static void fsl_spi_change_mode(struct spi_device *spi)
{
struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
struct spi_mpc8xxx_cs *cs = spi->controller_state;
struct fsl_spi_reg *reg_base = mspi->reg_base;
__be32 __iomem *mode = ®_base->mode;
unsigned long flags;
if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
return;
/* Turn off IRQs locally to minimize time that SPI is disabled. */
local_irq_save(flags);
/* Turn off SPI unit prior changing mode */
mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
/* When in CPM mode, we need to reinit tx and rx. */
if (mspi->flags & SPI_CPM_MODE) {
if (mspi->flags & SPI_QE) {
qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
QE_CR_PROTOCOL_UNSPECIFIED, 0);
} else {
cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
if (mspi->flags & SPI_CPM1) {
out_be16(&mspi->pram->rbptr,
in_be16(&mspi->pram->rbase));
out_be16(&mspi->pram->tbptr,
in_be16(&mspi->pram->tbase));
}
}
}
mpc8xxx_spi_write_reg(mode, cs->hw_mode);
local_irq_restore(flags);
}
static void fsl_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
bool pol = spi->mode & SPI_CS_HIGH;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
if (value == BITBANG_CS_INACTIVE) {
if (pdata->cs_control)
pdata->cs_control(spi, !pol);
}
if (value == BITBANG_CS_ACTIVE) {
mpc8xxx_spi->rx_shift = cs->rx_shift;
mpc8xxx_spi->tx_shift = cs->tx_shift;
mpc8xxx_spi->get_rx = cs->get_rx;
mpc8xxx_spi->get_tx = cs->get_tx;
fsl_spi_change_mode(spi);
if (pdata->cs_control)
pdata->cs_control(spi, pol);
}
}
static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
struct spi_device *spi,
struct mpc8xxx_spi *mpc8xxx_spi,
int bits_per_word)
{
cs->rx_shift = 0;
cs->tx_shift = 0;
if (bits_per_word <= 8) {
cs->get_rx = mpc8xxx_spi_rx_buf_u8;
cs->get_tx = mpc8xxx_spi_tx_buf_u8;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
cs->rx_shift = 16;
cs->tx_shift = 24;
}
} else if (bits_per_word <= 16) {
cs->get_rx = mpc8xxx_spi_rx_buf_u16;
cs->get_tx = mpc8xxx_spi_tx_buf_u16;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
cs->rx_shift = 16;
cs->tx_shift = 16;
}
} else if (bits_per_word <= 32) {
cs->get_rx = mpc8xxx_spi_rx_buf_u32;
cs->get_tx = mpc8xxx_spi_tx_buf_u32;
} else
return -EINVAL;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
spi->mode & SPI_LSB_FIRST) {
cs->tx_shift = 0;
if (bits_per_word <= 8)
cs->rx_shift = 8;
else
cs->rx_shift = 0;
}
mpc8xxx_spi->rx_shift = cs->rx_shift;
mpc8xxx_spi->tx_shift = cs->tx_shift;
mpc8xxx_spi->get_rx = cs->get_rx;
mpc8xxx_spi->get_tx = cs->get_tx;
return bits_per_word;
}
static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
struct spi_device *spi,
int bits_per_word)
{
/* QE uses Little Endian for words > 8
* so transform all words > 8 into 8 bits
* Unfortnatly that doesn't work for LSB so
* reject these for now */
/* Note: 32 bits word, LSB works iff
* tfcr/rfcr is set to CPMFCR_GBL */
if (spi->mode & SPI_LSB_FIRST &&
bits_per_word > 8)
return -EINVAL;
if (bits_per_word > 8)
return 8; /* pretend its 8 bits */
return bits_per_word;
}
static int fsl_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc8xxx_spi *mpc8xxx_spi;
int bits_per_word = 0;
u8 pm;
u32 hz = 0;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
mpc8xxx_spi = spi_master_get_devdata(spi->master);
if (t) {
bits_per_word = t->bits_per_word;
hz = t->speed_hz;
}
/* spi_transfer level calls that work per-word */
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
/* Make sure its a bit width we support [4..16, 32] */
if ((bits_per_word < 4)
|| ((bits_per_word > 16) && (bits_per_word != 32)))
return -EINVAL;
if (!hz)
hz = spi->max_speed_hz;
if (!(mpc8xxx_spi->flags & SPI_CPM_MODE))
bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
mpc8xxx_spi,
bits_per_word);
else if (mpc8xxx_spi->flags & SPI_QE)
bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
bits_per_word);
if (bits_per_word < 0)
return bits_per_word;
if (bits_per_word == 32)
bits_per_word = 0;
else
bits_per_word = bits_per_word - 1;
/* mask out bits we are going to set */
cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16
| SPMODE_PM(0xF));
cs->hw_mode |= SPMODE_LEN(bits_per_word);
if ((mpc8xxx_spi->spibrg / hz) > 64) {
cs->hw_mode |= SPMODE_DIV16;
pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
"Will use %d Hz instead.\n", dev_name(&spi->dev),
hz, mpc8xxx_spi->spibrg / 1024);
if (pm > 16)
pm = 16;
} else {
pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
}
if (pm)
pm--;
cs->hw_mode |= SPMODE_PM(pm);
fsl_spi_change_mode(spi);
return 0;
}
static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
{
struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
unsigned int xfer_ofs;
struct fsl_spi_reg *reg_base = mspi->reg_base;
xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
if (mspi->rx_dma == mspi->dma_dummy_rx)
out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
else
out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
out_be16(&rx_bd->cbd_datlen, 0);
out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
if (mspi->tx_dma == mspi->dma_dummy_tx)
out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
else
out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
out_be16(&tx_bd->cbd_datlen, xfer_len);
out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
BD_SC_LAST);
/* start transfer */
mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR);
}
static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, bool is_dma_mapped)
{
struct device *dev = mspi->dev;
struct fsl_spi_reg *reg_base = mspi->reg_base;
if (is_dma_mapped) {
mspi->map_tx_dma = 0;
mspi->map_rx_dma = 0;
} else {
mspi->map_tx_dma = 1;
mspi->map_rx_dma = 1;
}
if (!t->tx_buf) {
mspi->tx_dma = mspi->dma_dummy_tx;
mspi->map_tx_dma = 0;
}
if (!t->rx_buf) {
mspi->rx_dma = mspi->dma_dummy_rx;
mspi->map_rx_dma = 0;
}
if (mspi->map_tx_dma) {
void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, mspi->tx_dma)) {
dev_err(dev, "unable to map tx dma\n");
return -ENOMEM;
}
} else if (t->tx_buf) {
mspi->tx_dma = t->tx_dma;
}
if (mspi->map_rx_dma) {
mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, mspi->rx_dma)) {
dev_err(dev, "unable to map rx dma\n");
goto err_rx_dma;
}
} else if (t->rx_buf) {
mspi->rx_dma = t->rx_dma;
}
/* enable rx ints */
mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB);
mspi->xfer_in_progress = t;
mspi->count = t->len;
/* start CPM transfers */
fsl_spi_cpm_bufs_start(mspi);
return 0;
err_rx_dma:
if (mspi->map_tx_dma)
dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
return -ENOMEM;
}
static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct spi_transfer *t = mspi->xfer_in_progress;
if (mspi->map_tx_dma)
dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
if (mspi->map_rx_dma)
dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
mspi->xfer_in_progress = NULL;
}
static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, unsigned int len)
{
u32 word;
struct fsl_spi_reg *reg_base = mspi->reg_base;
mspi->count = len;
/* enable rx ints */
mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE);
/* transmit word */
word = mspi->get_tx(mspi);
mpc8xxx_spi_write_reg(®_base->transmit, word);
return 0;
}
static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
bool is_dma_mapped)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_spi_reg *reg_base;
unsigned int len = t->len;
u8 bits_per_word;
int ret;
reg_base = mpc8xxx_spi->reg_base;
bits_per_word = spi->bits_per_word;
if (t->bits_per_word)
bits_per_word = t->bits_per_word;
if (bits_per_word > 8) {
/* invalid length? */
if (len & 1)
return -EINVAL;
len /= 2;
}
if (bits_per_word > 16) {
/* invalid length? */
if (len & 1)
return -EINVAL;
len /= 2;
}
mpc8xxx_spi->tx = t->tx_buf;
mpc8xxx_spi->rx = t->rx_buf;
INIT_COMPLETION(mpc8xxx_spi->done);
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
else
ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len);
if (ret)
return ret;
wait_for_completion(&mpc8xxx_spi->done);
/* disable rx ints */
mpc8xxx_spi_write_reg(®_base->mask, 0);
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
fsl_spi_cpm_bufs_complete(mpc8xxx_spi);
return mpc8xxx_spi->count;
}
static void fsl_spi_do_one_msg(struct spi_message *m)
{
struct spi_device *spi = m->spi;
struct spi_transfer *t;
unsigned int cs_change;
const int nsecs = 50;
int status;
cs_change = 1;
status = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->bits_per_word || t->speed_hz) {
/* Don't allow changes if CS is active */
status = -EINVAL;
if (cs_change)
status = fsl_spi_setup_transfer(spi, t);
if (status < 0)
break;
}
if (cs_change) {
fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE);
ndelay(nsecs);
}
cs_change = t->cs_change;
if (t->len)
status = fsl_spi_bufs(spi, t, m->is_dma_mapped);
if (status) {
status = -EMSGSIZE;
break;
}
m->actual_length += t->len;
if (t->delay_usecs)
udelay(t->delay_usecs);
if (cs_change) {
ndelay(nsecs);
fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
ndelay(nsecs);
}
}
m->status = status;
m->complete(m->context);
if (status || !cs_change) {
ndelay(nsecs);
fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
}
fsl_spi_setup_transfer(spi, NULL);
}
static int fsl_spi_setup(struct spi_device *spi)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg *reg_base;
int retval;
u32 hw_mode;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
if (!spi->max_speed_hz)
return -EINVAL;
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
}
mpc8xxx_spi = spi_master_get_devdata(spi->master);
reg_base = mpc8xxx_spi->reg_base;
hw_mode = cs->hw_mode; /* Save original settings */
cs->hw_mode = mpc8xxx_spi_read_reg(®_base->mode);
/* mask out bits we are going to set */
cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
| SPMODE_REV | SPMODE_LOOP);
if (spi->mode & SPI_CPHA)
cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK;
if (spi->mode & SPI_CPOL)
cs->hw_mode |= SPMODE_CI_INACTIVEHIGH;
if (!(spi->mode & SPI_LSB_FIRST))
cs->hw_mode |= SPMODE_REV;
if (spi->mode & SPI_LOOP)
cs->hw_mode |= SPMODE_LOOP;
retval = fsl_spi_setup_transfer(spi, NULL);
if (retval < 0) {
cs->hw_mode = hw_mode; /* Restore settings */
return retval;
}
return 0;
}
static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
{
u16 len;
struct fsl_spi_reg *reg_base = mspi->reg_base;
dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
len = in_be16(&mspi->rx_bd->cbd_datlen);
if (len > mspi->count) {
WARN_ON(1);
len = mspi->count;
}
/* Clear the events */
mpc8xxx_spi_write_reg(®_base->event, events);
mspi->count -= len;
if (mspi->count)
fsl_spi_cpm_bufs_start(mspi);
else
complete(&mspi->done);
}
static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
{
struct fsl_spi_reg *reg_base = mspi->reg_base;
/* We need handle RX first */
if (events & SPIE_NE) {
u32 rx_data = mpc8xxx_spi_read_reg(®_base->receive);
if (mspi->rx)
mspi->get_rx(rx_data, mspi);
}
if ((events & SPIE_NF) == 0)
/* spin until TX is done */
while (((events =
mpc8xxx_spi_read_reg(®_base->event)) &
SPIE_NF) == 0)
cpu_relax();
/* Clear the events */
mpc8xxx_spi_write_reg(®_base->event, events);
mspi->count -= 1;
if (mspi->count) {
u32 word = mspi->get_tx(mspi);
mpc8xxx_spi_write_reg(®_base->transmit, word);
} else {
complete(&mspi->done);
}
}
static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
{
struct mpc8xxx_spi *mspi = context_data;
irqreturn_t ret = IRQ_NONE;
u32 events;
struct fsl_spi_reg *reg_base = mspi->reg_base;
/* Get interrupt events(tx/rx) */
events = mpc8xxx_spi_read_reg(®_base->event);
if (events)
ret = IRQ_HANDLED;
dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
if (mspi->flags & SPI_CPM_MODE)
fsl_spi_cpm_irq(mspi, events);
else
fsl_spi_cpu_irq(mspi, events);
return ret;
}
static void *fsl_spi_alloc_dummy_rx(void)
{
mutex_lock(&fsl_dummy_rx_lock);
if (!fsl_dummy_rx)
fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
if (fsl_dummy_rx)
fsl_dummy_rx_refcnt++;
mutex_unlock(&fsl_dummy_rx_lock);
return fsl_dummy_rx;
}
static void fsl_spi_free_dummy_rx(void)
{
mutex_lock(&fsl_dummy_rx_lock);
switch (fsl_dummy_rx_refcnt) {
case 0:
WARN_ON(1);
break;
case 1:
kfree(fsl_dummy_rx);
fsl_dummy_rx = NULL;
/* fall through */
default:
fsl_dummy_rx_refcnt--;
break;
}
mutex_unlock(&fsl_dummy_rx_lock);
}
static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
unsigned long spi_base_ofs;
unsigned long pram_ofs = -ENOMEM;
/* Can't use of_address_to_resource(), QE muram isn't at 0. */
iprop = of_get_property(np, "reg", &size);
/* QE with a fixed pram location? */
if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
/* QE but with a dynamic pram location? */
if (mspi->flags & SPI_QE) {
pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
return pram_ofs;
}
/* CPM1 and CPM2 pram must be at a fixed addr. */
if (!iprop || size != sizeof(*iprop) * 4)
return -ENOMEM;
spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2);
if (IS_ERR_VALUE(spi_base_ofs))
return -ENOMEM;
if (mspi->flags & SPI_CPM2) {
pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
if (!IS_ERR_VALUE(pram_ofs)) {
u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs);
out_be16(spi_base, pram_ofs);
}
} else {
struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs);
u16 rpbase = in_be16(&pram->rpbase);
/* Microcode relocation patch applied? */
if (rpbase)
pram_ofs = rpbase;
else
return spi_base_ofs;
}
cpm_muram_free(spi_base_ofs);
return pram_ofs;
}
static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
unsigned long pram_ofs;
unsigned long bds_ofs;
if (!(mspi->flags & SPI_CPM_MODE))
return 0;
if (!fsl_spi_alloc_dummy_rx())
return -ENOMEM;
if (mspi->flags & SPI_QE) {
iprop = of_get_property(np, "cell-index", &size);
if (iprop && size == sizeof(*iprop))
mspi->subblock = *iprop;
switch (mspi->subblock) {
default:
dev_warn(dev, "cell-index unspecified, assuming SPI1");
/* fall through */
case 0:
mspi->subblock = QE_CR_SUBBLOCK_SPI1;
break;
case 1:
mspi->subblock = QE_CR_SUBBLOCK_SPI2;
break;
}
}
pram_ofs = fsl_spi_cpm_get_pram(mspi);
if (IS_ERR_VALUE(pram_ofs)) {
dev_err(dev, "can't allocate spi parameter ram\n");
goto err_pram;
}
bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
sizeof(*mspi->rx_bd), 8);
if (IS_ERR_VALUE(bds_ofs)) {
dev_err(dev, "can't allocate bds\n");
goto err_bds;
}
mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
dev_err(dev, "unable to map dummy tx buffer\n");
goto err_dummy_tx;
}
mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
dev_err(dev, "unable to map dummy rx buffer\n");
goto err_dummy_rx;
}
mspi->pram = cpm_muram_addr(pram_ofs);
mspi->tx_bd = cpm_muram_addr(bds_ofs);
mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
/* Initialize parameter ram. */
out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
out_be16(&mspi->pram->mrblr, SPI_MRBLR);
out_be32(&mspi->pram->rstate, 0);
out_be32(&mspi->pram->rdp, 0);
out_be16(&mspi->pram->rbptr, 0);
out_be16(&mspi->pram->rbc, 0);
out_be32(&mspi->pram->rxtmp, 0);
out_be32(&mspi->pram->tstate, 0);
out_be32(&mspi->pram->tdp, 0);
out_be16(&mspi->pram->tbptr, 0);
out_be16(&mspi->pram->tbc, 0);
out_be32(&mspi->pram->txtmp, 0);
return 0;
err_dummy_rx:
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
err_dummy_tx:
cpm_muram_free(bds_ofs);
err_bds:
cpm_muram_free(pram_ofs);
err_pram:
fsl_spi_free_dummy_rx();
return -ENOMEM;
}
static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
cpm_muram_free(cpm_muram_offset(mspi->pram));
fsl_spi_free_dummy_rx();
}
static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
{
iounmap(mspi->reg_base);
fsl_spi_cpm_free(mspi);
}
static struct spi_master * __devinit fsl_spi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg *reg_base;
u32 regval;
int ret = 0;
master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
if (master == NULL) {
ret = -ENOMEM;
goto err;
}
dev_set_drvdata(dev, master);
ret = mpc8xxx_spi_probe(dev, mem, irq);
if (ret)
goto err_probe;
master->setup = fsl_spi_setup;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg;
mpc8xxx_spi->spi_remove = fsl_spi_remove;
ret = fsl_spi_cpm_init(mpc8xxx_spi);
if (ret)
goto err_cpm_init;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
mpc8xxx_spi->rx_shift = 16;
mpc8xxx_spi->tx_shift = 24;
}
mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
if (mpc8xxx_spi->reg_base == NULL) {
ret = -ENOMEM;
goto err_ioremap;
}
/* Register for SPI Interrupt */
ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq,
0, "fsl_spi", mpc8xxx_spi);
if (ret != 0)
goto free_irq;
reg_base = mpc8xxx_spi->reg_base;
/* SPI controller initializations */
mpc8xxx_spi_write_reg(®_base->mode, 0);
mpc8xxx_spi_write_reg(®_base->mask, 0);
mpc8xxx_spi_write_reg(®_base->command, 0);
mpc8xxx_spi_write_reg(®_base->event, 0xffffffff);
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
regval |= SPMODE_OP;
mpc8xxx_spi_write_reg(®_base->mode, regval);
ret = spi_register_master(master);
if (ret < 0)
goto unreg_master;
dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
return master;
unreg_master:
free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
free_irq:
iounmap(mpc8xxx_spi->reg_base);
err_ioremap:
fsl_spi_cpm_free(mpc8xxx_spi);
err_cpm_init:
err_probe:
spi_master_put(master);
err:
return ERR_PTR(ret);
}
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
{
struct device *dev = spi->dev.parent;
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
u16 cs = spi->chip_select;
int gpio = pinfo->gpios[cs];
bool alow = pinfo->alow_flags[cs];
gpio_set_value(gpio, on ^ alow);
}
static int of_fsl_spi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
unsigned int ngpios;
int i = 0;
int ret;
ngpios = of_gpio_count(np);
if (!ngpios) {
/*
* SPI w/o chip-select line. One SPI device is still permitted
* though.
*/
pdata->max_chipselect = 1;
return 0;
}
pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL);
if (!pinfo->gpios)
return -ENOMEM;
memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags),
GFP_KERNEL);
if (!pinfo->alow_flags) {
ret = -ENOMEM;
goto err_alloc_flags;
}
for (; i < ngpios; i++) {
int gpio;
enum of_gpio_flags flags;
gpio = of_get_gpio_flags(np, i, &flags);
if (!gpio_is_valid(gpio)) {
dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
ret = gpio;
goto err_loop;
}
ret = gpio_request(gpio, dev_name(dev));
if (ret) {
dev_err(dev, "can't request gpio #%d: %d\n", i, ret);
goto err_loop;
}
pinfo->gpios[i] = gpio;
pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW;
ret = gpio_direction_output(pinfo->gpios[i],
pinfo->alow_flags[i]);
if (ret) {
dev_err(dev, "can't set output direction for gpio "
"#%d: %d\n", i, ret);
goto err_loop;
}
}
pdata->max_chipselect = ngpios;
pdata->cs_control = fsl_spi_cs_control;
return 0;
err_loop:
while (i >= 0) {
if (gpio_is_valid(pinfo->gpios[i]))
gpio_free(pinfo->gpios[i]);
i--;
}
kfree(pinfo->alow_flags);
pinfo->alow_flags = NULL;
err_alloc_flags:
kfree(pinfo->gpios);
pinfo->gpios = NULL;
return ret;
}
static int of_fsl_spi_free_chipselects(struct device *dev)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
int i;
if (!pinfo->gpios)
return 0;
for (i = 0; i < pdata->max_chipselect; i++) {
if (gpio_is_valid(pinfo->gpios[i]))
gpio_free(pinfo->gpios[i]);
}
kfree(pinfo->gpios);
kfree(pinfo->alow_flags);
return 0;
}
static int __devinit of_fsl_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
struct resource irq;
int ret = -ENOMEM;
ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
return ret;
ret = of_fsl_spi_get_chipselects(dev);
if (ret)
goto err;
ret = of_address_to_resource(np, 0, &mem);
if (ret)
goto err;
ret = of_irq_to_resource(np, 0, &irq);
if (!ret) {
ret = -EINVAL;
goto err;
}
master = fsl_spi_probe(dev, &mem, irq.start);
if (IS_ERR(master)) {
ret = PTR_ERR(master);
goto err;
}
return 0;
err:
of_fsl_spi_free_chipselects(dev);
return ret;
}
static int __devexit of_fsl_spi_remove(struct platform_device *ofdev)
{
int ret;
ret = mpc8xxx_spi_remove(&ofdev->dev);
if (ret)
return ret;
of_fsl_spi_free_chipselects(&ofdev->dev);
return 0;
}
static const struct of_device_id of_fsl_spi_match[] = {
{ .compatible = "fsl,spi" },
{}
};
MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
static struct platform_driver of_fsl_spi_driver = {
.driver = {
.name = "fsl_spi",
.owner = THIS_MODULE,
.of_match_table = of_fsl_spi_match,
},
.probe = of_fsl_spi_probe,
.remove = __devexit_p(of_fsl_spi_remove),
};
#ifdef CONFIG_MPC832x_RDB
/*
* XXX XXX XXX
* This is "legacy" platform driver, was used by the MPC8323E-RDB boards
* only. The driver should go away soon, since newer MPC8323E-RDB's device
* tree can work with OpenFirmware driver. But for now we support old trees
* as well.
*/
static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
{
struct resource *mem;
int irq;
struct spi_master *master;
if (!pdev->dev.platform_data)
return -EINVAL;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -EINVAL;
master = fsl_spi_probe(&pdev->dev, mem, irq);
if (IS_ERR(master))
return PTR_ERR(master);
return 0;
}
static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
{
return mpc8xxx_spi_remove(&pdev->dev);
}
MODULE_ALIAS("platform:mpc8xxx_spi");
static struct platform_driver mpc8xxx_spi_driver = {
.probe = plat_mpc8xxx_spi_probe,
.remove = __devexit_p(plat_mpc8xxx_spi_remove),
.driver = {
.name = "mpc8xxx_spi",
.owner = THIS_MODULE,
},
};
static bool legacy_driver_failed;
static void __init legacy_driver_register(void)
{
legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver);
}
static void __exit legacy_driver_unregister(void)
{
if (legacy_driver_failed)
return;
platform_driver_unregister(&mpc8xxx_spi_driver);
}
#else
static void __init legacy_driver_register(void) {}
static void __exit legacy_driver_unregister(void) {}
#endif /* CONFIG_MPC832x_RDB */
static int __init fsl_spi_init(void)
{
legacy_driver_register();
return platform_driver_register(&of_fsl_spi_driver);
}
module_init(fsl_spi_init);
static void __exit fsl_spi_exit(void)
{
platform_driver_unregister(&of_fsl_spi_driver);
legacy_driver_unregister();
}
module_exit(fsl_spi_exit);
MODULE_AUTHOR("Kumar Gala");
MODULE_DESCRIPTION("Simple Freescale SPI Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
grancier/linux-3.10.33-chromeos | drivers/watchdog/s3c2410_wdt.c | 1835 | 12441 | /* linux/drivers/char/watchdog/s3c2410_wdt.c
*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2410 Watchdog Timer Support
*
* Based on, softdog.c by Alan Cox,
* (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/miscdevice.h> /* for MODULE_ALIAS_MISCDEV */
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
#include <mach/map.h>
#undef S3C_VA_WATCHDOG
#define S3C_VA_WATCHDOG (0)
#include <plat/regs-watchdog.h>
#define CONFIG_S3C2410_WATCHDOG_ATBOOT (0)
#define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME (15)
static bool nowayout = WATCHDOG_NOWAYOUT;
static int tmr_margin;
static int tmr_atboot = CONFIG_S3C2410_WATCHDOG_ATBOOT;
static int soft_noboot;
static int debug;
module_param(tmr_margin, int, 0);
module_param(tmr_atboot, int, 0);
module_param(nowayout, bool, 0);
module_param(soft_noboot, int, 0);
module_param(debug, int, 0);
MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. (default="
__MODULE_STRING(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME) ")");
MODULE_PARM_DESC(tmr_atboot,
"Watchdog is started at boot time if set to 1, default="
__MODULE_STRING(CONFIG_S3C2410_WATCHDOG_ATBOOT));
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
"0 to reboot (default 0)");
MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
static struct device *wdt_dev; /* platform device attached to */
static struct resource *wdt_mem;
static struct resource *wdt_irq;
static struct clk *wdt_clock;
static void __iomem *wdt_base;
static unsigned int wdt_count;
static DEFINE_SPINLOCK(wdt_lock);
/* watchdog control routines */
#define DBG(fmt, ...) \
do { \
if (debug) \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
/* functions */
static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
{
spin_lock(&wdt_lock);
writel(wdt_count, wdt_base + S3C2410_WTCNT);
spin_unlock(&wdt_lock);
return 0;
}
static void __s3c2410wdt_stop(void)
{
unsigned long wtcon;
wtcon = readl(wdt_base + S3C2410_WTCON);
wtcon &= ~(S3C2410_WTCON_ENABLE | S3C2410_WTCON_RSTEN);
writel(wtcon, wdt_base + S3C2410_WTCON);
}
static int s3c2410wdt_stop(struct watchdog_device *wdd)
{
spin_lock(&wdt_lock);
__s3c2410wdt_stop();
spin_unlock(&wdt_lock);
return 0;
}
static int s3c2410wdt_start(struct watchdog_device *wdd)
{
unsigned long wtcon;
spin_lock(&wdt_lock);
__s3c2410wdt_stop();
wtcon = readl(wdt_base + S3C2410_WTCON);
wtcon |= S3C2410_WTCON_ENABLE | S3C2410_WTCON_DIV128;
if (soft_noboot) {
wtcon |= S3C2410_WTCON_INTEN;
wtcon &= ~S3C2410_WTCON_RSTEN;
} else {
wtcon &= ~S3C2410_WTCON_INTEN;
wtcon |= S3C2410_WTCON_RSTEN;
}
DBG("%s: wdt_count=0x%08x, wtcon=%08lx\n",
__func__, wdt_count, wtcon);
writel(wdt_count, wdt_base + S3C2410_WTDAT);
writel(wdt_count, wdt_base + S3C2410_WTCNT);
writel(wtcon, wdt_base + S3C2410_WTCON);
spin_unlock(&wdt_lock);
return 0;
}
static inline int s3c2410wdt_is_running(void)
{
return readl(wdt_base + S3C2410_WTCON) & S3C2410_WTCON_ENABLE;
}
static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeout)
{
unsigned long freq = clk_get_rate(wdt_clock);
unsigned int count;
unsigned int divisor = 1;
unsigned long wtcon;
if (timeout < 1)
return -EINVAL;
freq /= 128;
count = timeout * freq;
DBG("%s: count=%d, timeout=%d, freq=%lu\n",
__func__, count, timeout, freq);
/* if the count is bigger than the watchdog register,
then work out what we need to do (and if) we can
actually make this value
*/
if (count >= 0x10000) {
for (divisor = 1; divisor <= 0x100; divisor++) {
if ((count / divisor) < 0x10000)
break;
}
if ((count / divisor) >= 0x10000) {
dev_err(wdt_dev, "timeout %d too big\n", timeout);
return -EINVAL;
}
}
DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n",
__func__, timeout, divisor, count, count/divisor);
count /= divisor;
wdt_count = count;
/* update the pre-scaler */
wtcon = readl(wdt_base + S3C2410_WTCON);
wtcon &= ~S3C2410_WTCON_PRESCALE_MASK;
wtcon |= S3C2410_WTCON_PRESCALE(divisor-1);
writel(count, wdt_base + S3C2410_WTDAT);
writel(wtcon, wdt_base + S3C2410_WTCON);
wdd->timeout = (count * divisor) / freq;
return 0;
}
#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE)
static const struct watchdog_info s3c2410_wdt_ident = {
.options = OPTIONS,
.firmware_version = 0,
.identity = "S3C2410 Watchdog",
};
static struct watchdog_ops s3c2410wdt_ops = {
.owner = THIS_MODULE,
.start = s3c2410wdt_start,
.stop = s3c2410wdt_stop,
.ping = s3c2410wdt_keepalive,
.set_timeout = s3c2410wdt_set_heartbeat,
};
static struct watchdog_device s3c2410_wdd = {
.info = &s3c2410_wdt_ident,
.ops = &s3c2410wdt_ops,
.timeout = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME,
};
/* interrupt handler code */
static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
{
dev_info(wdt_dev, "watchdog timer expired (irq)\n");
s3c2410wdt_keepalive(&s3c2410_wdd);
return IRQ_HANDLED;
}
#ifdef CONFIG_CPU_FREQ
static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
int ret;
if (!s3c2410wdt_is_running())
goto done;
if (val == CPUFREQ_PRECHANGE) {
/* To ensure that over the change we don't cause the
* watchdog to trigger, we perform an keep-alive if
* the watchdog is running.
*/
s3c2410wdt_keepalive(&s3c2410_wdd);
} else if (val == CPUFREQ_POSTCHANGE) {
s3c2410wdt_stop(&s3c2410_wdd);
ret = s3c2410wdt_set_heartbeat(&s3c2410_wdd, s3c2410_wdd.timeout);
if (ret >= 0)
s3c2410wdt_start(&s3c2410_wdd);
else
goto err;
}
done:
return 0;
err:
dev_err(wdt_dev, "cannot set new value for timeout %d\n",
s3c2410_wdd.timeout);
return ret;
}
static struct notifier_block s3c2410wdt_cpufreq_transition_nb = {
.notifier_call = s3c2410wdt_cpufreq_transition,
};
static inline int s3c2410wdt_cpufreq_register(void)
{
return cpufreq_register_notifier(&s3c2410wdt_cpufreq_transition_nb,
CPUFREQ_TRANSITION_NOTIFIER);
}
static inline void s3c2410wdt_cpufreq_deregister(void)
{
cpufreq_unregister_notifier(&s3c2410wdt_cpufreq_transition_nb,
CPUFREQ_TRANSITION_NOTIFIER);
}
#else
static inline int s3c2410wdt_cpufreq_register(void)
{
return 0;
}
static inline void s3c2410wdt_cpufreq_deregister(void)
{
}
#endif
static int s3c2410wdt_probe(struct platform_device *pdev)
{
struct device *dev;
unsigned int wtcon;
int started = 0;
int ret;
DBG("%s: probe=%p\n", __func__, pdev);
dev = &pdev->dev;
wdt_dev = &pdev->dev;
wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (wdt_mem == NULL) {
dev_err(dev, "no memory resource specified\n");
return -ENOENT;
}
wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (wdt_irq == NULL) {
dev_err(dev, "no irq resource specified\n");
ret = -ENOENT;
goto err;
}
/* get the memory region for the watchdog timer */
wdt_base = devm_ioremap_resource(dev, wdt_mem);
if (IS_ERR(wdt_base)) {
ret = PTR_ERR(wdt_base);
goto err;
}
DBG("probe: mapped wdt_base=%p\n", wdt_base);
wdt_clock = devm_clk_get(dev, "watchdog");
if (IS_ERR(wdt_clock)) {
dev_err(dev, "failed to find watchdog clock source\n");
ret = PTR_ERR(wdt_clock);
goto err;
}
clk_prepare_enable(wdt_clock);
ret = s3c2410wdt_cpufreq_register();
if (ret < 0) {
pr_err("failed to register cpufreq\n");
goto err_clk;
}
/* see if we can actually set the requested timer margin, and if
* not, try the default value */
watchdog_init_timeout(&s3c2410_wdd, tmr_margin, &pdev->dev);
if (s3c2410wdt_set_heartbeat(&s3c2410_wdd, s3c2410_wdd.timeout)) {
started = s3c2410wdt_set_heartbeat(&s3c2410_wdd,
CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
if (started == 0)
dev_info(dev,
"tmr_margin value out of range, default %d used\n",
CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
else
dev_info(dev, "default timer value is out of range, "
"cannot start\n");
}
ret = devm_request_irq(dev, wdt_irq->start, s3c2410wdt_irq, 0,
pdev->name, pdev);
if (ret != 0) {
dev_err(dev, "failed to install irq (%d)\n", ret);
goto err_cpufreq;
}
watchdog_set_nowayout(&s3c2410_wdd, nowayout);
ret = watchdog_register_device(&s3c2410_wdd);
if (ret) {
dev_err(dev, "cannot register watchdog (%d)\n", ret);
goto err_cpufreq;
}
if (tmr_atboot && started == 0) {
dev_info(dev, "starting watchdog timer\n");
s3c2410wdt_start(&s3c2410_wdd);
} else if (!tmr_atboot) {
/* if we're not enabling the watchdog, then ensure it is
* disabled if it has been left running from the bootloader
* or other source */
s3c2410wdt_stop(&s3c2410_wdd);
}
/* print out a statement of readiness */
wtcon = readl(wdt_base + S3C2410_WTCON);
dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
(wtcon & S3C2410_WTCON_ENABLE) ? "" : "in",
(wtcon & S3C2410_WTCON_RSTEN) ? "en" : "dis",
(wtcon & S3C2410_WTCON_INTEN) ? "en" : "dis");
return 0;
err_cpufreq:
s3c2410wdt_cpufreq_deregister();
err_clk:
clk_disable_unprepare(wdt_clock);
wdt_clock = NULL;
err:
wdt_irq = NULL;
wdt_mem = NULL;
return ret;
}
static int s3c2410wdt_remove(struct platform_device *dev)
{
watchdog_unregister_device(&s3c2410_wdd);
s3c2410wdt_cpufreq_deregister();
clk_disable_unprepare(wdt_clock);
wdt_clock = NULL;
wdt_irq = NULL;
wdt_mem = NULL;
return 0;
}
static void s3c2410wdt_shutdown(struct platform_device *dev)
{
s3c2410wdt_stop(&s3c2410_wdd);
}
#ifdef CONFIG_PM
static unsigned long wtcon_save;
static unsigned long wtdat_save;
static int s3c2410wdt_suspend(struct platform_device *dev, pm_message_t state)
{
/* Save watchdog state, and turn it off. */
wtcon_save = readl(wdt_base + S3C2410_WTCON);
wtdat_save = readl(wdt_base + S3C2410_WTDAT);
/* Note that WTCNT doesn't need to be saved. */
s3c2410wdt_stop(&s3c2410_wdd);
return 0;
}
static int s3c2410wdt_resume(struct platform_device *dev)
{
/* Restore watchdog state. */
writel(wtdat_save, wdt_base + S3C2410_WTDAT);
writel(wtdat_save, wdt_base + S3C2410_WTCNT); /* Reset count */
writel(wtcon_save, wdt_base + S3C2410_WTCON);
pr_info("watchdog %sabled\n",
(wtcon_save & S3C2410_WTCON_ENABLE) ? "en" : "dis");
return 0;
}
#else
#define s3c2410wdt_suspend NULL
#define s3c2410wdt_resume NULL
#endif /* CONFIG_PM */
#ifdef CONFIG_OF
static const struct of_device_id s3c2410_wdt_match[] = {
{ .compatible = "samsung,s3c2410-wdt" },
{},
};
MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
#endif
static struct platform_driver s3c2410wdt_driver = {
.probe = s3c2410wdt_probe,
.remove = s3c2410wdt_remove,
.shutdown = s3c2410wdt_shutdown,
.suspend = s3c2410wdt_suspend,
.resume = s3c2410wdt_resume,
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-wdt",
.of_match_table = of_match_ptr(s3c2410_wdt_match),
},
};
module_platform_driver(s3c2410wdt_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, "
"Dimitry Andric <dimitry.andric@tomtom.com>");
MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:s3c2410-wdt");
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_zeroflteskt | drivers/net/wireless/rtlwifi/rtl8192ce/dm.c | 3371 | 3632 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../base.h"
#include "../pci.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "dm.h"
#include "../rtl8192c/fw_common.h"
void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long undec_sm_pwdb;
if (!rtlpriv->dm.dynamic_txpower_enable)
return;
if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
undec_sm_pwdb);
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
undec_sm_pwdb);
}
} else {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
}
| gpl-2.0 |
androidarmv6/android_hardware_atheros_ath6kl-compat | drivers/net/wireless/rtlwifi/rtl8192ce/dm.c | 3371 | 3632 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../base.h"
#include "../pci.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "dm.h"
#include "../rtl8192c/fw_common.h"
void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long undec_sm_pwdb;
if (!rtlpriv->dm.dynamic_txpower_enable)
return;
if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
undec_sm_pwdb);
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
undec_sm_pwdb);
}
} else {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
}
| gpl-2.0 |
AOSP-YU/platform_kernel_cyanogen_msm8916 | drivers/net/wireless/ath/ath5k/desc.c | 4907 | 22515 | /*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/******************************\
Hardware Descriptor Functions
\******************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* DOC: Hardware descriptor functions
*
* Here we handle the processing of the low-level hw descriptors
* that hw reads and writes via DMA for each TX and RX attempt (that means
* we can also have descriptors for failed TX/RX tries). We have two kind of
* descriptors for RX and TX, control descriptors tell the hw how to send or
* receive a packet where to read/write it from/to etc and status descriptors
* that contain information about how the packet was sent or received (errors
* included).
*
* Descriptor format is not exactly the same for each MAC chip version so we
* have function pointers on &struct ath5k_hw we initialize at runtime based on
* the chip used.
*/
/************************\
* TX Control descriptors *
\************************/
/**
* ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @pkt_len: Frame length in bytes
* @hdr_len: Header length in bytes (only used on AR5210)
* @padsize: Any padding we've added to the frame length
* @type: One of enum ath5k_pkt_type
* @tx_power: Tx power in 0.5dB steps
* @tx_rate0: HW idx for transmission rate
* @tx_tries0: Max number of retransmissions
* @key_index: Index on key table to use for encryption
* @antenna_mode: Which antenna to use (0 for auto)
* @flags: One of AR5K_TXDESC_* flags (desc.h)
* @rtscts_rate: HW idx for RTS/CTS transmission rate
* @rtscts_duration: What to put on duration field on the header of RTS/CTS
*
* Internal function to initialize a 2-Word TX control descriptor
* found on AR5210 and AR5211 MACs chips.
*
* Returns 0 on success or -EINVAL on false input
*/
static int
ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
unsigned int pkt_len, unsigned int hdr_len,
int padsize,
enum ath5k_pkt_type type,
unsigned int tx_power,
unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index,
unsigned int antenna_mode,
unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
{
u32 frame_type;
struct ath5k_hw_2w_tx_ctl *tx_ctl;
unsigned int frame_len;
tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
/*
* Validate input
* - Zero retries don't make sense.
* - A zero rate will put the HW into a mode where it continuously sends
* noise on the channel, so it is important to avoid this.
*/
if (unlikely(tx_tries0 == 0)) {
ATH5K_ERR(ah, "zero retries\n");
WARN_ON(1);
return -EINVAL;
}
if (unlikely(tx_rate0 == 0)) {
ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
/* Clear descriptor */
memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
/* Setup control descriptor */
/* Verify and set frame length */
/* remove padding we might have added before */
frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
/* Verify and set buffer length */
/* NB: beacon's BufLen must be a multiple of 4 bytes */
if (type == AR5K_PKT_TYPE_BEACON)
pkt_len = roundup(pkt_len, 4);
if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
return -EINVAL;
tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
/*
* Verify and set header length (only 5210)
*/
if (ah->ah_version == AR5K_AR5210) {
if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210)
return -EINVAL;
tx_ctl->tx_control_0 |=
AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210);
}
/*Differences between 5210-5211*/
if (ah->ah_version == AR5K_AR5210) {
switch (type) {
case AR5K_PKT_TYPE_BEACON:
case AR5K_PKT_TYPE_PROBE_RESP:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
break;
case AR5K_PKT_TYPE_PIFS:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
break;
default:
frame_type = type;
break;
}
tx_ctl->tx_control_0 |=
AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) |
AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
} else {
tx_ctl->tx_control_0 |=
AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
AR5K_REG_SM(antenna_mode,
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
tx_ctl->tx_control_1 |=
AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211);
}
#define _TX_FLAGS(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
tx_ctl->tx_control_##_c |= \
AR5K_2W_TX_DESC_CTL##_c##_##_flag; \
}
#define _TX_FLAGS_5211(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
tx_ctl->tx_control_##_c |= \
AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211; \
}
_TX_FLAGS(0, CLRDMASK);
_TX_FLAGS(0, INTREQ);
_TX_FLAGS(0, RTSENA);
if (ah->ah_version == AR5K_AR5211) {
_TX_FLAGS_5211(0, VEOL);
_TX_FLAGS_5211(1, NOACK);
}
#undef _TX_FLAGS
#undef _TX_FLAGS_5211
/*
* WEP crap
*/
if (key_index != AR5K_TXKEYIX_INVALID) {
tx_ctl->tx_control_0 |=
AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
tx_ctl->tx_control_1 |=
AR5K_REG_SM(key_index,
AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX);
}
/*
* RTS/CTS Duration [5210 ?]
*/
if ((ah->ah_version == AR5K_AR5210) &&
(flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
tx_ctl->tx_control_1 |= rtscts_duration &
AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210;
return 0;
}
/**
* ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @pkt_len: Frame length in bytes
* @hdr_len: Header length in bytes (only used on AR5210)
* @padsize: Any padding we've added to the frame length
* @type: One of enum ath5k_pkt_type
* @tx_power: Tx power in 0.5dB steps
* @tx_rate0: HW idx for transmission rate
* @tx_tries0: Max number of retransmissions
* @key_index: Index on key table to use for encryption
* @antenna_mode: Which antenna to use (0 for auto)
* @flags: One of AR5K_TXDESC_* flags (desc.h)
* @rtscts_rate: HW idx for RTS/CTS transmission rate
* @rtscts_duration: What to put on duration field on the header of RTS/CTS
*
* Internal function to initialize a 4-Word TX control descriptor
* found on AR5212 and later MACs chips.
*
* Returns 0 on success or -EINVAL on false input
*/
static int
ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
unsigned int pkt_len, unsigned int hdr_len,
int padsize,
enum ath5k_pkt_type type,
unsigned int tx_power,
unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index,
unsigned int antenna_mode,
unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
unsigned int frame_len;
/*
* Use local variables for these to reduce load/store access on
* uncached memory
*/
u32 txctl0 = 0, txctl1 = 0, txctl2 = 0, txctl3 = 0;
tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
/*
* Validate input
* - Zero retries don't make sense.
* - A zero rate will put the HW into a mode where it continuously sends
* noise on the channel, so it is important to avoid this.
*/
if (unlikely(tx_tries0 == 0)) {
ATH5K_ERR(ah, "zero retries\n");
WARN_ON(1);
return -EINVAL;
}
if (unlikely(tx_rate0 == 0)) {
ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
tx_power += ah->ah_txpower.txp_offset;
if (tx_power > AR5K_TUNE_MAX_TXPOWER)
tx_power = AR5K_TUNE_MAX_TXPOWER;
/* Clear descriptor status area */
memset(&desc->ud.ds_tx5212.tx_stat, 0,
sizeof(desc->ud.ds_tx5212.tx_stat));
/* Setup control descriptor */
/* Verify and set frame length */
/* remove padding we might have added before */
frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
txctl0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
/* Verify and set buffer length */
/* NB: beacon's BufLen must be a multiple of 4 bytes */
if (type == AR5K_PKT_TYPE_BEACON)
pkt_len = roundup(pkt_len, 4);
if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
return -EINVAL;
txctl1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
txctl0 |= AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
txctl1 |= AR5K_REG_SM(type, AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
txctl2 = AR5K_REG_SM(tx_tries0, AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
txctl3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
#define _TX_FLAGS(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
txctl##_c |= AR5K_4W_TX_DESC_CTL##_c##_##_flag; \
}
_TX_FLAGS(0, CLRDMASK);
_TX_FLAGS(0, VEOL);
_TX_FLAGS(0, INTREQ);
_TX_FLAGS(0, RTSENA);
_TX_FLAGS(0, CTSENA);
_TX_FLAGS(1, NOACK);
#undef _TX_FLAGS
/*
* WEP crap
*/
if (key_index != AR5K_TXKEYIX_INVALID) {
txctl0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
txctl1 |= AR5K_REG_SM(key_index,
AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX);
}
/*
* RTS/CTS
*/
if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) {
if ((flags & AR5K_TXDESC_RTSENA) &&
(flags & AR5K_TXDESC_CTSENA))
return -EINVAL;
txctl2 |= rtscts_duration & AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
txctl3 |= AR5K_REG_SM(rtscts_rate,
AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
}
tx_ctl->tx_control_0 = txctl0;
tx_ctl->tx_control_1 = txctl1;
tx_ctl->tx_control_2 = txctl2;
tx_ctl->tx_control_3 = txctl3;
return 0;
}
/**
* ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @tx_rate1: HW idx for rate used on transmission series 1
* @tx_tries1: Max number of retransmissions for transmission series 1
* @tx_rate2: HW idx for rate used on transmission series 2
* @tx_tries2: Max number of retransmissions for transmission series 2
* @tx_rate3: HW idx for rate used on transmission series 3
* @tx_tries3: Max number of retransmissions for transmission series 3
*
* Multi rate retry (MRR) tx control descriptors are available only on AR5212
* MACs, they are part of the normal 4-word tx control descriptor (see above)
* but we handle them through a separate function for better abstraction.
*
* Returns 0 on success or -EINVAL on invalid input
*/
int
ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
u_int tx_rate1, u_int tx_tries1,
u_int tx_rate2, u_int tx_tries2,
u_int tx_rate3, u_int tx_tries3)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
/* no mrr support for cards older than 5212 */
if (ah->ah_version < AR5K_AR5212)
return 0;
/*
* Rates can be 0 as long as the retry count is 0 too.
* A zero rate and nonzero retry count will put the HW into a mode where
* it continuously sends noise on the channel, so it is important to
* avoid this.
*/
if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
(tx_rate2 == 0 && tx_tries2 != 0) ||
(tx_rate3 == 0 && tx_tries3 != 0))) {
ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
if (ah->ah_version == AR5K_AR5212) {
tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
#define _XTX_TRIES(_n) \
if (tx_tries##_n) { \
tx_ctl->tx_control_2 |= \
AR5K_REG_SM(tx_tries##_n, \
AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \
tx_ctl->tx_control_3 |= \
AR5K_REG_SM(tx_rate##_n, \
AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \
}
_XTX_TRIES(1);
_XTX_TRIES(2);
_XTX_TRIES(3);
#undef _XTX_TRIES
return 1;
}
return 0;
}
/***********************\
* TX Status descriptors *
\***********************/
/**
* ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @ts: The &struct ath5k_tx_status
*/
static int
ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_tx_status *ts)
{
struct ath5k_hw_tx_status *tx_status;
tx_status = &desc->ud.ds_tx5210.tx_stat;
/* No frame has been send or error */
if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
return -EINPROGRESS;
/*
* Get descriptor status
*/
ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
ts->ts_final_retry = AR5K_REG_MS(tx_status->tx_status_0,
AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
/*TODO: ts->ts_virtcol + test*/
ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
AR5K_DESC_TX_STATUS1_SEQ_NUM);
ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
ts->ts_antenna = 1;
ts->ts_status = 0;
ts->ts_final_idx = 0;
if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
if (tx_status->tx_status_0 &
AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
ts->ts_status |= AR5K_TXERR_XRETRY;
if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
ts->ts_status |= AR5K_TXERR_FIFO;
if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
ts->ts_status |= AR5K_TXERR_FILT;
}
return 0;
}
/**
* ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @ts: The &struct ath5k_tx_status
*/
static int
ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_tx_status *ts)
{
struct ath5k_hw_tx_status *tx_status;
u32 txstat0, txstat1;
tx_status = &desc->ud.ds_tx5212.tx_stat;
txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
/* No frame has been send or error */
if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
return -EINPROGRESS;
txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
/*
* Get descriptor status
*/
ts->ts_tstamp = AR5K_REG_MS(txstat0,
AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
ts->ts_shortretry = AR5K_REG_MS(txstat0,
AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
ts->ts_final_retry = AR5K_REG_MS(txstat0,
AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
ts->ts_seqnum = AR5K_REG_MS(txstat1,
AR5K_DESC_TX_STATUS1_SEQ_NUM);
ts->ts_rssi = AR5K_REG_MS(txstat1,
AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
ts->ts_antenna = (txstat1 &
AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1;
ts->ts_status = 0;
ts->ts_final_idx = AR5K_REG_MS(txstat1,
AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212);
/* TX error */
if (!(txstat0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
if (txstat0 & AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
ts->ts_status |= AR5K_TXERR_XRETRY;
if (txstat0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
ts->ts_status |= AR5K_TXERR_FIFO;
if (txstat0 & AR5K_DESC_TX_STATUS0_FILTERED)
ts->ts_status |= AR5K_TXERR_FILT;
}
return 0;
}
/****************\
* RX Descriptors *
\****************/
/**
* ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @size: RX buffer length in bytes
* @flags: One of AR5K_RXDESC_* flags
*/
int
ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
u32 size, unsigned int flags)
{
struct ath5k_hw_rx_ctl *rx_ctl;
rx_ctl = &desc->ud.ds_rx.rx_ctl;
/*
* Clear the descriptor
* If we don't clean the status descriptor,
* while scanning we get too many results,
* most of them virtual, after some secs
* of scanning system hangs. M.F.
*/
memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN))
return -EINVAL;
/* Setup descriptor */
rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
if (flags & AR5K_RXDESC_INTREQ)
rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
return 0;
}
/**
* ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @rs: The &struct ath5k_rx_status
*
* Internal function used to process an RX status descriptor
* on AR5210/5211 MAC.
*
* Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
* frame yet.
*/
static int
ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
rx_status = &desc->ud.ds_rx.rx_stat;
/* No frame received / not ready */
if (unlikely(!(rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
/*
* Frame receive status
*/
rs->rs_datalen = rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
rs->rs_more = !!(rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_MORE);
/* TODO: this timestamp is 13 bit, later on we assume 15 bit!
* also the HAL code for 5210 says the timestamp is bits [10..22] of the
* TSF, and extends the timestamp here to 15 bit.
* we need to check on 5210...
*/
rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
if (ah->ah_version == AR5K_AR5211)
rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211);
else
rs->rs_antenna = (rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210)
? 2 : 1;
/*
* Key table status
*/
if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
else
rs->rs_keyix = AR5K_RXKEYIX_INVALID;
/*
* Receive/descriptor errors
*/
if (!(rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_CRC;
/* only on 5210 */
if ((ah->ah_version == AR5K_AR5210) &&
(rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210))
rs->rs_status |= AR5K_RXERR_FIFO;
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
}
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_DECRYPT;
}
return 0;
}
/**
* ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @rs: The &struct ath5k_rx_status
*
* Internal function used to process an RX status descriptor
* on AR5212 and later MAC.
*
* Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
* frame yet.
*/
static int
ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
u32 rxstat0, rxstat1;
rx_status = &desc->ud.ds_rx.rx_stat;
rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
/* No frame received / not ready */
if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
/*
* Frame receive status
*/
rs->rs_datalen = rxstat0 & AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
rs->rs_rssi = AR5K_REG_MS(rxstat0,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
rs->rs_rate = AR5K_REG_MS(rxstat0,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
rs->rs_antenna = AR5K_REG_MS(rxstat0,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA);
rs->rs_more = !!(rxstat0 & AR5K_5212_RX_DESC_STATUS0_MORE);
rs->rs_tstamp = AR5K_REG_MS(rxstat1,
AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
/*
* Key table status
*/
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
rs->rs_keyix = AR5K_REG_MS(rxstat1,
AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
else
rs->rs_keyix = AR5K_RXKEYIX_INVALID;
/*
* Receive/descriptor errors
*/
if (!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_CRC;
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr = AR5K_REG_MS(rxstat1,
AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE);
if (!ah->ah_capabilities.cap_has_phyerr_counters)
ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
}
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_DECRYPT;
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
rs->rs_status |= AR5K_RXERR_MIC;
}
return 0;
}
/********\
* Attach *
\********/
/**
* ath5k_hw_init_desc_functions() - Init function pointers inside ah
* @ah: The &struct ath5k_hw
*
* Maps the internal descriptor functions to the function pointers on ah, used
* from above. This is used as an abstraction layer to handle the various chips
* the same way.
*/
int
ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
} else if (ah->ah_version <= AR5K_AR5211) {
ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
} else
return -ENOTSUPP;
return 0;
}
| gpl-2.0 |
MassStash/m8whl_sense | net/unix/garbage.c | 7723 | 10621 | /*
* NET3: Garbage Collector For AF_UNIX sockets
*
* Garbage Collector:
* Copyright (C) Barak A. Pearlmutter.
* Released under the GPL version 2 or later.
*
* Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
* If it doesn't work blame me, it worked when Barak sent it.
*
* Assumptions:
*
* - object w/ a bit
* - free list
*
* Current optimizations:
*
* - explicit stack instead of recursion
* - tail recurse on first born instead of immediate push/pop
* - we gather the stuff that should not be killed into tree
* and stack is just a path from root to the current pointer.
*
* Future optimizations:
*
* - don't just push entire root set; process in place
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
* Cope with changing max_files.
* Al Viro 11 Oct 1998
* Graph may have cycles. That is, we can send the descriptor
* of foo to bar and vice versa. Current code chokes on that.
* Fix: move SCM_RIGHTS ones into the separate list and then
* skb_free() them all instead of doing explicit fput's.
* Another problem: since fput() may block somebody may
* create a new unix_socket when we are in the middle of sweep
* phase. Fix: revert the logic wrt MARKED. Mark everything
* upon the beginning and unmark non-junk ones.
*
* [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
* sent to connect()'ed but still not accept()'ed sockets.
* Fixed. Old code had slightly different problem here:
* extra fput() in situation when we passed the descriptor via
* such socket and closed it (descriptor). That would happen on
* each unix_gc() until the accept(). Since the struct file in
* question would go to the free list and might be reused...
* That might be the reason of random oopses on filp_close()
* in unrelated processes.
*
* AV 28 Feb 1999
* Kill the explicit allocation of stack. Now we keep the tree
* with root in dummy + pointer (gc_current) to one of the nodes.
* Stack is represented as path from gc_current to dummy. Unmark
* now means "add to tree". Push == "make it a son of gc_current".
* Pop == "move gc_current to parent". We keep only pointers to
* parents (->gc_tree).
* AV 1 Mar 1999
* Damn. Added missing check for ->dead in listen queues scanning.
*
* Miklos Szeredi 25 Jun 2007
* Reimplement with a cycle collecting algorithm. This should
* solve several problems with the previous code, like being racy
* wrt receive and holding up unrelated socket operations.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <net/sock.h>
#include <net/af_unix.h>
#include <net/scm.h>
#include <net/tcp_states.h>
/* Internal data structures and random procedures: */
static LIST_HEAD(gc_inflight_list);
static LIST_HEAD(gc_candidates);
static DEFINE_SPINLOCK(unix_gc_lock);
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
unsigned int unix_tot_inflight;
struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
struct inode *inode = filp->f_path.dentry->d_inode;
/*
* Socket ?
*/
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
struct socket *sock = SOCKET_I(inode);
struct sock *s = sock->sk;
/*
* PF_UNIX ?
*/
if (s && sock->ops && sock->ops->family == PF_UNIX)
u_sock = s;
}
return u_sock;
}
/*
* Keep the number of times in flight count for the file
* descriptor if it is for an AF_UNIX socket.
*/
void unix_inflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
if (s) {
struct unix_sock *u = unix_sk(s);
spin_lock(&unix_gc_lock);
if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
} else {
BUG_ON(list_empty(&u->link));
}
unix_tot_inflight++;
spin_unlock(&unix_gc_lock);
}
}
void unix_notinflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
if (s) {
struct unix_sock *u = unix_sk(s);
spin_lock(&unix_gc_lock);
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
unix_tot_inflight--;
spin_unlock(&unix_gc_lock);
}
}
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
struct sk_buff *skb;
struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
/*
* Do we have file descriptors ?
*/
if (UNIXCB(skb).fp) {
bool hit = false;
/*
* Process the descriptors of this socket
*/
int nfd = UNIXCB(skb).fp->count;
struct file **fp = UNIXCB(skb).fp->fp;
while (nfd--) {
/*
* Get the socket the fd matches
* if it indeed does so
*/
struct sock *sk = unix_get_socket(*fp++);
if (sk) {
struct unix_sock *u = unix_sk(sk);
/*
* Ignore non-candidates, they could
* have been added to the queues after
* starting the garbage collection
*/
if (u->gc_candidate) {
hit = true;
func(u);
}
}
}
if (hit && hitlist != NULL) {
__skb_unlink(skb, &x->sk_receive_queue);
__skb_queue_tail(hitlist, skb);
}
}
}
spin_unlock(&x->sk_receive_queue.lock);
}
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
if (x->sk_state != TCP_LISTEN)
scan_inflight(x, func, hitlist);
else {
struct sk_buff *skb;
struct sk_buff *next;
struct unix_sock *u;
LIST_HEAD(embryos);
/*
* For a listening socket collect the queued embryos
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
/*
* An embryo cannot be in-flight, so it's safe
* to use the list link.
*/
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &embryos);
}
spin_unlock(&x->sk_receive_queue.lock);
while (!list_empty(&embryos)) {
u = list_entry(embryos.next, struct unix_sock, link);
scan_inflight(&u->sk, func, hitlist);
list_del_init(&u->link);
}
}
}
static void dec_inflight(struct unix_sock *usk)
{
atomic_long_dec(&usk->inflight);
}
static void inc_inflight(struct unix_sock *usk)
{
atomic_long_inc(&usk->inflight);
}
static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
/*
* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
*/
if (u->gc_maybe_cycle)
list_move_tail(&u->link, &gc_candidates);
}
static bool gc_in_progress = false;
#define UNIX_INFLIGHT_TRIGGER_GC 16000
void wait_for_unix_gc(void)
{
/*
* If number of inflight sockets is insane,
* force a garbage collect right now.
*/
if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
unix_gc();
wait_event(unix_gc_wait, gc_in_progress == false);
}
/* The external entry point: unix_gc() */
void unix_gc(void)
{
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
struct list_head cursor;
LIST_HEAD(not_cycle_list);
spin_lock(&unix_gc_lock);
/* Avoid a recursive GC. */
if (gc_in_progress)
goto out;
gc_in_progress = true;
/*
* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
*
* Holding unix_gc_lock will protect these candidates from
* being detached, and hence from gaining an external
* reference. Since there are no possible receivers, all
* buffers currently on the candidates' queues stay there
* during the garbage collection.
*
* We also know that no new candidate can be added onto the
* receive queues. Other, non candidate sockets _can_ be
* added to queue, so we must make sure only to touch
* candidates.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
inflight_refs = atomic_long_read(&u->inflight);
BUG_ON(inflight_refs < 1);
BUG_ON(total_refs < inflight_refs);
if (total_refs == inflight_refs) {
list_move_tail(&u->link, &gc_candidates);
u->gc_candidate = 1;
u->gc_maybe_cycle = 1;
}
}
/*
* Now remove all internal in-flight reference to children of
* the candidates.
*/
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, dec_inflight, NULL);
/*
* Restore the references for children of all candidates,
* which have remaining references. Do this recursively, so
* only those remain, which form cyclic references.
*
* Use a "cursor" link, to make the list traversal safe, even
* though elements might be moved about.
*/
list_add(&cursor, &gc_candidates);
while (cursor.next != &gc_candidates) {
u = list_entry(cursor.next, struct unix_sock, link);
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) {
list_move_tail(&u->link, ¬_cycle_list);
u->gc_maybe_cycle = 0;
scan_children(&u->sk, inc_inflight_move_tail, NULL);
}
}
list_del(&cursor);
/*
* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
while (!list_empty(¬_cycle_list)) {
u = list_entry(not_cycle_list.next, struct unix_sock, link);
u->gc_candidate = 0;
list_move_tail(&u->link, &gc_inflight_list);
}
/*
* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, inc_inflight, &hitlist);
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
gc_in_progress = false;
wake_up(&unix_gc_wait);
out:
spin_unlock(&unix_gc_lock);
}
| gpl-2.0 |
CyanogenMod/android_kernel_htc_msm8974 | net/unix/garbage.c | 7723 | 10621 | /*
* NET3: Garbage Collector For AF_UNIX sockets
*
* Garbage Collector:
* Copyright (C) Barak A. Pearlmutter.
* Released under the GPL version 2 or later.
*
* Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
* If it doesn't work blame me, it worked when Barak sent it.
*
* Assumptions:
*
* - object w/ a bit
* - free list
*
* Current optimizations:
*
* - explicit stack instead of recursion
* - tail recurse on first born instead of immediate push/pop
* - we gather the stuff that should not be killed into tree
* and stack is just a path from root to the current pointer.
*
* Future optimizations:
*
* - don't just push entire root set; process in place
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
* Cope with changing max_files.
* Al Viro 11 Oct 1998
* Graph may have cycles. That is, we can send the descriptor
* of foo to bar and vice versa. Current code chokes on that.
* Fix: move SCM_RIGHTS ones into the separate list and then
* skb_free() them all instead of doing explicit fput's.
* Another problem: since fput() may block somebody may
* create a new unix_socket when we are in the middle of sweep
* phase. Fix: revert the logic wrt MARKED. Mark everything
* upon the beginning and unmark non-junk ones.
*
* [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
* sent to connect()'ed but still not accept()'ed sockets.
* Fixed. Old code had slightly different problem here:
* extra fput() in situation when we passed the descriptor via
* such socket and closed it (descriptor). That would happen on
* each unix_gc() until the accept(). Since the struct file in
* question would go to the free list and might be reused...
* That might be the reason of random oopses on filp_close()
* in unrelated processes.
*
* AV 28 Feb 1999
* Kill the explicit allocation of stack. Now we keep the tree
* with root in dummy + pointer (gc_current) to one of the nodes.
* Stack is represented as path from gc_current to dummy. Unmark
* now means "add to tree". Push == "make it a son of gc_current".
* Pop == "move gc_current to parent". We keep only pointers to
* parents (->gc_tree).
* AV 1 Mar 1999
* Damn. Added missing check for ->dead in listen queues scanning.
*
* Miklos Szeredi 25 Jun 2007
* Reimplement with a cycle collecting algorithm. This should
* solve several problems with the previous code, like being racy
* wrt receive and holding up unrelated socket operations.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <net/sock.h>
#include <net/af_unix.h>
#include <net/scm.h>
#include <net/tcp_states.h>
/* Internal data structures and random procedures: */
static LIST_HEAD(gc_inflight_list);
static LIST_HEAD(gc_candidates);
static DEFINE_SPINLOCK(unix_gc_lock);
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
unsigned int unix_tot_inflight;
struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
struct inode *inode = filp->f_path.dentry->d_inode;
/*
* Socket ?
*/
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
struct socket *sock = SOCKET_I(inode);
struct sock *s = sock->sk;
/*
* PF_UNIX ?
*/
if (s && sock->ops && sock->ops->family == PF_UNIX)
u_sock = s;
}
return u_sock;
}
/*
* Keep the number of times in flight count for the file
* descriptor if it is for an AF_UNIX socket.
*/
void unix_inflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
if (s) {
struct unix_sock *u = unix_sk(s);
spin_lock(&unix_gc_lock);
if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
} else {
BUG_ON(list_empty(&u->link));
}
unix_tot_inflight++;
spin_unlock(&unix_gc_lock);
}
}
void unix_notinflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
if (s) {
struct unix_sock *u = unix_sk(s);
spin_lock(&unix_gc_lock);
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
unix_tot_inflight--;
spin_unlock(&unix_gc_lock);
}
}
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
struct sk_buff *skb;
struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
/*
* Do we have file descriptors ?
*/
if (UNIXCB(skb).fp) {
bool hit = false;
/*
* Process the descriptors of this socket
*/
int nfd = UNIXCB(skb).fp->count;
struct file **fp = UNIXCB(skb).fp->fp;
while (nfd--) {
/*
* Get the socket the fd matches
* if it indeed does so
*/
struct sock *sk = unix_get_socket(*fp++);
if (sk) {
struct unix_sock *u = unix_sk(sk);
/*
* Ignore non-candidates, they could
* have been added to the queues after
* starting the garbage collection
*/
if (u->gc_candidate) {
hit = true;
func(u);
}
}
}
if (hit && hitlist != NULL) {
__skb_unlink(skb, &x->sk_receive_queue);
__skb_queue_tail(hitlist, skb);
}
}
}
spin_unlock(&x->sk_receive_queue.lock);
}
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
if (x->sk_state != TCP_LISTEN)
scan_inflight(x, func, hitlist);
else {
struct sk_buff *skb;
struct sk_buff *next;
struct unix_sock *u;
LIST_HEAD(embryos);
/*
* For a listening socket collect the queued embryos
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
/*
* An embryo cannot be in-flight, so it's safe
* to use the list link.
*/
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &embryos);
}
spin_unlock(&x->sk_receive_queue.lock);
while (!list_empty(&embryos)) {
u = list_entry(embryos.next, struct unix_sock, link);
scan_inflight(&u->sk, func, hitlist);
list_del_init(&u->link);
}
}
}
static void dec_inflight(struct unix_sock *usk)
{
atomic_long_dec(&usk->inflight);
}
static void inc_inflight(struct unix_sock *usk)
{
atomic_long_inc(&usk->inflight);
}
static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
/*
* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
*/
if (u->gc_maybe_cycle)
list_move_tail(&u->link, &gc_candidates);
}
static bool gc_in_progress = false;
#define UNIX_INFLIGHT_TRIGGER_GC 16000
void wait_for_unix_gc(void)
{
/*
* If number of inflight sockets is insane,
* force a garbage collect right now.
*/
if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
unix_gc();
wait_event(unix_gc_wait, gc_in_progress == false);
}
/* The external entry point: unix_gc() */
void unix_gc(void)
{
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
struct list_head cursor;
LIST_HEAD(not_cycle_list);
spin_lock(&unix_gc_lock);
/* Avoid a recursive GC. */
if (gc_in_progress)
goto out;
gc_in_progress = true;
/*
* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
*
* Holding unix_gc_lock will protect these candidates from
* being detached, and hence from gaining an external
* reference. Since there are no possible receivers, all
* buffers currently on the candidates' queues stay there
* during the garbage collection.
*
* We also know that no new candidate can be added onto the
* receive queues. Other, non candidate sockets _can_ be
* added to queue, so we must make sure only to touch
* candidates.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
inflight_refs = atomic_long_read(&u->inflight);
BUG_ON(inflight_refs < 1);
BUG_ON(total_refs < inflight_refs);
if (total_refs == inflight_refs) {
list_move_tail(&u->link, &gc_candidates);
u->gc_candidate = 1;
u->gc_maybe_cycle = 1;
}
}
/*
* Now remove all internal in-flight reference to children of
* the candidates.
*/
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, dec_inflight, NULL);
/*
* Restore the references for children of all candidates,
* which have remaining references. Do this recursively, so
* only those remain, which form cyclic references.
*
* Use a "cursor" link, to make the list traversal safe, even
* though elements might be moved about.
*/
list_add(&cursor, &gc_candidates);
while (cursor.next != &gc_candidates) {
u = list_entry(cursor.next, struct unix_sock, link);
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) {
list_move_tail(&u->link, ¬_cycle_list);
u->gc_maybe_cycle = 0;
scan_children(&u->sk, inc_inflight_move_tail, NULL);
}
}
list_del(&cursor);
/*
* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
while (!list_empty(¬_cycle_list)) {
u = list_entry(not_cycle_list.next, struct unix_sock, link);
u->gc_candidate = 0;
list_move_tail(&u->link, &gc_inflight_list);
}
/*
* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, inc_inflight, &hitlist);
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
gc_in_progress = false;
wake_up(&unix_gc_wait);
out:
spin_unlock(&unix_gc_lock);
}
| gpl-2.0 |
ticklechicken/linux | drivers/video/fbdev/geode/suspend_gx.c | 14123 | 6242 | /*
* Copyright (C) 2007 Advanced Micro Devices, Inc.
* Copyright (C) 2008 Andres Salomon <dilinger@debian.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/fb.h>
#include <asm/io.h>
#include <asm/msr.h>
#include <linux/cs5535.h>
#include <asm/delay.h>
#include "gxfb.h"
#ifdef CONFIG_PM
static void gx_save_regs(struct gxfb_par *par)
{
int i;
/* wait for the BLT engine to stop being busy */
do {
i = read_gp(par, GP_BLT_STATUS);
} while (i & (GP_BLT_STATUS_BLT_PENDING | GP_BLT_STATUS_BLT_BUSY));
/* save MSRs */
rdmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
/* save registers */
memcpy(par->gp, par->gp_regs, sizeof(par->gp));
memcpy(par->dc, par->dc_regs, sizeof(par->dc));
memcpy(par->vp, par->vid_regs, sizeof(par->vp));
memcpy(par->fp, par->vid_regs + VP_FP_START, sizeof(par->fp));
/* save the palette */
write_dc(par, DC_PAL_ADDRESS, 0);
for (i = 0; i < ARRAY_SIZE(par->pal); i++)
par->pal[i] = read_dc(par, DC_PAL_DATA);
}
static void gx_set_dotpll(uint32_t dotpll_hi)
{
uint32_t dotpll_lo;
int i;
rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET;
dotpll_lo &= ~MSR_GLCP_DOTPLL_BYPASS;
wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
/* wait for the PLL to lock */
for (i = 0; i < 200; i++) {
rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK)
break;
udelay(1);
}
/* PLL set, unlock */
dotpll_lo &= ~MSR_GLCP_DOTPLL_DOTRESET;
wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
}
static void gx_restore_gfx_proc(struct gxfb_par *par)
{
int i;
for (i = 0; i < ARRAY_SIZE(par->gp); i++) {
switch (i) {
case GP_VECTOR_MODE:
case GP_BLT_MODE:
case GP_BLT_STATUS:
case GP_HST_SRC:
/* don't restore these registers */
break;
default:
write_gp(par, i, par->gp[i]);
}
}
}
static void gx_restore_display_ctlr(struct gxfb_par *par)
{
int i;
for (i = 0; i < ARRAY_SIZE(par->dc); i++) {
switch (i) {
case DC_UNLOCK:
/* unlock the DC; runs first */
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
break;
case DC_GENERAL_CFG:
/* write without the enables */
write_dc(par, i, par->dc[i] & ~(DC_GENERAL_CFG_VIDE |
DC_GENERAL_CFG_ICNE |
DC_GENERAL_CFG_CURE |
DC_GENERAL_CFG_DFLE));
break;
case DC_DISPLAY_CFG:
/* write without the enables */
write_dc(par, i, par->dc[i] & ~(DC_DISPLAY_CFG_VDEN |
DC_DISPLAY_CFG_GDEN |
DC_DISPLAY_CFG_TGEN));
break;
case DC_RSVD_0:
case DC_RSVD_1:
case DC_RSVD_2:
case DC_RSVD_3:
case DC_RSVD_4:
case DC_LINE_CNT:
case DC_PAL_ADDRESS:
case DC_PAL_DATA:
case DC_DFIFO_DIAG:
case DC_CFIFO_DIAG:
case DC_RSVD_5:
/* don't restore these registers */
break;
default:
write_dc(par, i, par->dc[i]);
}
}
/* restore the palette */
write_dc(par, DC_PAL_ADDRESS, 0);
for (i = 0; i < ARRAY_SIZE(par->pal); i++)
write_dc(par, DC_PAL_DATA, par->pal[i]);
}
static void gx_restore_video_proc(struct gxfb_par *par)
{
int i;
wrmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
for (i = 0; i < ARRAY_SIZE(par->vp); i++) {
switch (i) {
case VP_VCFG:
/* don't enable video yet */
write_vp(par, i, par->vp[i] & ~VP_VCFG_VID_EN);
break;
case VP_DCFG:
/* don't enable CRT yet */
write_vp(par, i, par->vp[i] &
~(VP_DCFG_DAC_BL_EN | VP_DCFG_VSYNC_EN |
VP_DCFG_HSYNC_EN | VP_DCFG_CRT_EN));
break;
case VP_GAR:
case VP_GDR:
case VP_RSVD_0:
case VP_RSVD_1:
case VP_RSVD_2:
case VP_RSVD_3:
case VP_CRC32:
case VP_AWT:
case VP_VTM:
/* don't restore these registers */
break;
default:
write_vp(par, i, par->vp[i]);
}
}
}
static void gx_restore_regs(struct gxfb_par *par)
{
int i;
gx_set_dotpll((uint32_t) (par->msr.dotpll >> 32));
gx_restore_gfx_proc(par);
gx_restore_display_ctlr(par);
gx_restore_video_proc(par);
/* Flat Panel */
for (i = 0; i < ARRAY_SIZE(par->fp); i++) {
if (i != FP_PM && i != FP_RSVD_0)
write_fp(par, i, par->fp[i]);
}
}
static void gx_disable_graphics(struct gxfb_par *par)
{
/* shut down the engine */
write_vp(par, VP_VCFG, par->vp[VP_VCFG] & ~VP_VCFG_VID_EN);
write_vp(par, VP_DCFG, par->vp[VP_DCFG] & ~(VP_DCFG_DAC_BL_EN |
VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN | VP_DCFG_CRT_EN));
/* turn off the flat panel */
write_fp(par, FP_PM, par->fp[FP_PM] & ~FP_PM_P);
/* turn off display */
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG] &
~(DC_GENERAL_CFG_VIDE | DC_GENERAL_CFG_ICNE |
DC_GENERAL_CFG_CURE | DC_GENERAL_CFG_DFLE));
write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG] &
~(DC_DISPLAY_CFG_VDEN | DC_DISPLAY_CFG_GDEN |
DC_DISPLAY_CFG_TGEN));
write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
}
static void gx_enable_graphics(struct gxfb_par *par)
{
uint32_t fp;
fp = read_fp(par, FP_PM);
if (par->fp[FP_PM] & FP_PM_P) {
/* power on the panel if not already power{ed,ing} on */
if (!(fp & (FP_PM_PANEL_ON|FP_PM_PANEL_PWR_UP)))
write_fp(par, FP_PM, par->fp[FP_PM]);
} else {
/* power down the panel if not already power{ed,ing} down */
if (!(fp & (FP_PM_PANEL_OFF|FP_PM_PANEL_PWR_DOWN)))
write_fp(par, FP_PM, par->fp[FP_PM]);
}
/* turn everything on */
write_vp(par, VP_VCFG, par->vp[VP_VCFG]);
write_vp(par, VP_DCFG, par->vp[VP_DCFG]);
write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG]);
/* do this last; it will enable the FIFO load */
write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG]);
/* lock the door behind us */
write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
}
int gx_powerdown(struct fb_info *info)
{
struct gxfb_par *par = info->par;
if (par->powered_down)
return 0;
gx_save_regs(par);
gx_disable_graphics(par);
par->powered_down = 1;
return 0;
}
int gx_powerup(struct fb_info *info)
{
struct gxfb_par *par = info->par;
if (!par->powered_down)
return 0;
gx_restore_regs(par);
gx_enable_graphics(par);
par->powered_down = 0;
return 0;
}
#endif
| gpl-2.0 |
up2wing/fox-qemu-comment | qemu-2.2.0/roms/u-boot/drivers/net/calxedaxgmac.c | 44 | 14916 | /*
* Copyright 2010-2011 Calxeda, Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <malloc.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <asm/io.h>
#define TX_NUM_DESC 1
#define RX_NUM_DESC 32
#define MAC_TIMEOUT (5*CONFIG_SYS_HZ)
#define ETH_BUF_SZ 2048
#define TX_BUF_SZ (ETH_BUF_SZ * TX_NUM_DESC)
#define RX_BUF_SZ (ETH_BUF_SZ * RX_NUM_DESC)
#define RXSTART 0x00000002
#define TXSTART 0x00002000
#define RXENABLE 0x00000004
#define TXENABLE 0x00000008
#define XGMAC_CONTROL_SPD 0x40000000
#define XGMAC_CONTROL_SPD_MASK 0x60000000
#define XGMAC_CONTROL_SARC 0x10000000
#define XGMAC_CONTROL_SARK_MASK 0x18000000
#define XGMAC_CONTROL_CAR 0x04000000
#define XGMAC_CONTROL_CAR_MASK 0x06000000
#define XGMAC_CONTROL_CAR_SHIFT 25
#define XGMAC_CONTROL_DP 0x01000000
#define XGMAC_CONTROL_WD 0x00800000
#define XGMAC_CONTROL_JD 0x00400000
#define XGMAC_CONTROL_JE 0x00100000
#define XGMAC_CONTROL_LM 0x00001000
#define XGMAC_CONTROL_IPC 0x00000400
#define XGMAC_CONTROL_ACS 0x00000080
#define XGMAC_CONTROL_DDIC 0x00000010
#define XGMAC_CONTROL_TE 0x00000008
#define XGMAC_CONTROL_RE 0x00000004
#define XGMAC_DMA_BUSMODE_RESET 0x00000001
#define XGMAC_DMA_BUSMODE_DSL 0x00000004
#define XGMAC_DMA_BUSMODE_DSL_MASK 0x0000007c
#define XGMAC_DMA_BUSMODE_DSL_SHIFT 2
#define XGMAC_DMA_BUSMODE_ATDS 0x00000080
#define XGMAC_DMA_BUSMODE_PBL_MASK 0x00003f00
#define XGMAC_DMA_BUSMODE_PBL_SHIFT 8
#define XGMAC_DMA_BUSMODE_FB 0x00010000
#define XGMAC_DMA_BUSMODE_USP 0x00800000
#define XGMAC_DMA_BUSMODE_8PBL 0x01000000
#define XGMAC_DMA_BUSMODE_AAL 0x02000000
#define XGMAC_DMA_AXIMODE_ENLPI 0x80000000
#define XGMAC_DMA_AXIMODE_MGK 0x40000000
#define XGMAC_DMA_AXIMODE_WROSR 0x00100000
#define XGMAC_DMA_AXIMODE_WROSR_MASK 0x00F00000
#define XGMAC_DMA_AXIMODE_WROSR_SHIFT 20
#define XGMAC_DMA_AXIMODE_RDOSR 0x00010000
#define XGMAC_DMA_AXIMODE_RDOSR_MASK 0x000F0000
#define XGMAC_DMA_AXIMODE_RDOSR_SHIFT 16
#define XGMAC_DMA_AXIMODE_AAL 0x00001000
#define XGMAC_DMA_AXIMODE_BLEN256 0x00000080
#define XGMAC_DMA_AXIMODE_BLEN128 0x00000040
#define XGMAC_DMA_AXIMODE_BLEN64 0x00000020
#define XGMAC_DMA_AXIMODE_BLEN32 0x00000010
#define XGMAC_DMA_AXIMODE_BLEN16 0x00000008
#define XGMAC_DMA_AXIMODE_BLEN8 0x00000004
#define XGMAC_DMA_AXIMODE_BLEN4 0x00000002
#define XGMAC_DMA_AXIMODE_UNDEF 0x00000001
#define XGMAC_CORE_OMR_RTC_SHIFT 3
#define XGMAC_CORE_OMR_RTC_MASK 0x00000018
#define XGMAC_CORE_OMR_RTC 0x00000010
#define XGMAC_CORE_OMR_RSF 0x00000020
#define XGMAC_CORE_OMR_DT 0x00000040
#define XGMAC_CORE_OMR_FEF 0x00000080
#define XGMAC_CORE_OMR_EFC 0x00000100
#define XGMAC_CORE_OMR_RFA_SHIFT 9
#define XGMAC_CORE_OMR_RFA_MASK 0x00000E00
#define XGMAC_CORE_OMR_RFD_SHIFT 12
#define XGMAC_CORE_OMR_RFD_MASK 0x00007000
#define XGMAC_CORE_OMR_TTC_SHIFT 16
#define XGMAC_CORE_OMR_TTC_MASK 0x00030000
#define XGMAC_CORE_OMR_TTC 0x00020000
#define XGMAC_CORE_OMR_FTF 0x00100000
#define XGMAC_CORE_OMR_TSF 0x00200000
#define FIFO_MINUS_1K 0x0
#define FIFO_MINUS_2K 0x1
#define FIFO_MINUS_3K 0x2
#define FIFO_MINUS_4K 0x3
#define FIFO_MINUS_6K 0x4
#define FIFO_MINUS_8K 0x5
#define FIFO_MINUS_12K 0x6
#define FIFO_MINUS_16K 0x7
#define XGMAC_CORE_FLOW_PT_SHIFT 16
#define XGMAC_CORE_FLOW_PT_MASK 0xFFFF0000
#define XGMAC_CORE_FLOW_PT 0x00010000
#define XGMAC_CORE_FLOW_DZQP 0x00000080
#define XGMAC_CORE_FLOW_PLT_SHIFT 4
#define XGMAC_CORE_FLOW_PLT_MASK 0x00000030
#define XGMAC_CORE_FLOW_PLT 0x00000010
#define XGMAC_CORE_FLOW_UP 0x00000008
#define XGMAC_CORE_FLOW_RFE 0x00000004
#define XGMAC_CORE_FLOW_TFE 0x00000002
#define XGMAC_CORE_FLOW_FCB 0x00000001
/* XGMAC Descriptor Defines */
#define MAX_DESC_BUF_SZ (0x2000 - 8)
#define RXDESC_EXT_STATUS 0x00000001
#define RXDESC_CRC_ERR 0x00000002
#define RXDESC_RX_ERR 0x00000008
#define RXDESC_RX_WDOG 0x00000010
#define RXDESC_FRAME_TYPE 0x00000020
#define RXDESC_GIANT_FRAME 0x00000080
#define RXDESC_LAST_SEG 0x00000100
#define RXDESC_FIRST_SEG 0x00000200
#define RXDESC_VLAN_FRAME 0x00000400
#define RXDESC_OVERFLOW_ERR 0x00000800
#define RXDESC_LENGTH_ERR 0x00001000
#define RXDESC_SA_FILTER_FAIL 0x00002000
#define RXDESC_DESCRIPTOR_ERR 0x00004000
#define RXDESC_ERROR_SUMMARY 0x00008000
#define RXDESC_FRAME_LEN_OFFSET 16
#define RXDESC_FRAME_LEN_MASK 0x3fff0000
#define RXDESC_DA_FILTER_FAIL 0x40000000
#define RXDESC1_END_RING 0x00008000
#define RXDESC_IP_PAYLOAD_MASK 0x00000003
#define RXDESC_IP_PAYLOAD_UDP 0x00000001
#define RXDESC_IP_PAYLOAD_TCP 0x00000002
#define RXDESC_IP_PAYLOAD_ICMP 0x00000003
#define RXDESC_IP_HEADER_ERR 0x00000008
#define RXDESC_IP_PAYLOAD_ERR 0x00000010
#define RXDESC_IPV4_PACKET 0x00000040
#define RXDESC_IPV6_PACKET 0x00000080
#define TXDESC_UNDERFLOW_ERR 0x00000001
#define TXDESC_JABBER_TIMEOUT 0x00000002
#define TXDESC_LOCAL_FAULT 0x00000004
#define TXDESC_REMOTE_FAULT 0x00000008
#define TXDESC_VLAN_FRAME 0x00000010
#define TXDESC_FRAME_FLUSHED 0x00000020
#define TXDESC_IP_HEADER_ERR 0x00000040
#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
#define TXDESC_ERROR_SUMMARY 0x00008000
#define TXDESC_SA_CTRL_INSERT 0x00040000
#define TXDESC_SA_CTRL_REPLACE 0x00080000
#define TXDESC_2ND_ADDR_CHAINED 0x00100000
#define TXDESC_END_RING 0x00200000
#define TXDESC_CSUM_IP 0x00400000
#define TXDESC_CSUM_IP_PAYLD 0x00800000
#define TXDESC_CSUM_ALL 0x00C00000
#define TXDESC_CRC_EN_REPLACE 0x01000000
#define TXDESC_CRC_EN_APPEND 0x02000000
#define TXDESC_DISABLE_PAD 0x04000000
#define TXDESC_FIRST_SEG 0x10000000
#define TXDESC_LAST_SEG 0x20000000
#define TXDESC_INTERRUPT 0x40000000
#define DESC_OWN 0x80000000
#define DESC_BUFFER1_SZ_MASK 0x00001fff
#define DESC_BUFFER2_SZ_MASK 0x1fff0000
#define DESC_BUFFER2_SZ_OFFSET 16
struct xgmac_regs {
u32 config;
u32 framefilter;
u32 resv_1[4];
u32 flow_control;
u32 vlantag;
u32 version;
u32 vlaninclude;
u32 resv_2[2];
u32 pacestretch;
u32 vlanhash;
u32 resv_3;
u32 intreg;
struct {
u32 hi; /* 0x40 */
u32 lo; /* 0x44 */
} macaddr[16];
u32 resv_4[0xd0];
u32 core_opmode; /* 0x400 */
u32 resv_5[0x2bf];
u32 busmode; /* 0xf00 */
u32 txpoll;
u32 rxpoll;
u32 rxdesclist;
u32 txdesclist;
u32 dma_status;
u32 dma_opmode;
u32 intenable;
u32 resv_6[2];
u32 axi_mode; /* 0xf28 */
};
struct xgmac_dma_desc {
__le32 flags;
__le32 buf_size;
__le32 buf1_addr; /* Buffer 1 Address Pointer */
__le32 buf2_addr; /* Buffer 2 Address Pointer */
__le32 ext_status;
__le32 res[3];
};
/* XGMAC Descriptor Access Helpers */
static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
{
if (buf_sz > MAX_DESC_BUF_SZ)
p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
(buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
else
p->buf_size = cpu_to_le32(buf_sz);
}
static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
{
u32 len = le32_to_cpu(p->buf_size);
return (len & DESC_BUFFER1_SZ_MASK) +
((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
}
static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
int buf_sz)
{
struct xgmac_dma_desc *end = p + ring_size - 1;
memset(p, 0, sizeof(*p) * ring_size);
for (; p <= end; p++)
desc_set_buf_len(p, buf_sz);
end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
}
static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
{
memset(p, 0, sizeof(*p) * ring_size);
p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
}
static inline int desc_get_owner(struct xgmac_dma_desc *p)
{
return le32_to_cpu(p->flags) & DESC_OWN;
}
static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
{
/* Clear all fields and set the owner */
p->flags = cpu_to_le32(DESC_OWN);
}
static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
{
u32 tmpflags = le32_to_cpu(p->flags);
tmpflags &= TXDESC_END_RING;
tmpflags |= flags | DESC_OWN;
p->flags = cpu_to_le32(tmpflags);
}
static inline void *desc_get_buf_addr(struct xgmac_dma_desc *p)
{
return (void *)le32_to_cpu(p->buf1_addr);
}
static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
void *paddr, int len)
{
p->buf1_addr = cpu_to_le32(paddr);
if (len > MAX_DESC_BUF_SZ)
p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
}
static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
void *paddr, int len)
{
desc_set_buf_len(p, len);
desc_set_buf_addr(p, paddr, len);
}
static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
{
u32 data = le32_to_cpu(p->flags);
u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
if (data & RXDESC_FRAME_TYPE)
len -= 4;
return len;
}
struct calxeda_eth_dev {
struct xgmac_dma_desc rx_chain[RX_NUM_DESC];
struct xgmac_dma_desc tx_chain[TX_NUM_DESC];
char rxbuffer[RX_BUF_SZ];
u32 tx_currdesc;
u32 rx_currdesc;
struct eth_device *dev;
} __aligned(32);
/*
* Initialize a descriptor ring. Calxeda XGMAC is configured to use
* advanced descriptors.
*/
static void init_rx_desc(struct calxeda_eth_dev *priv)
{
struct xgmac_dma_desc *rxdesc = priv->rx_chain;
struct xgmac_regs *regs = (struct xgmac_regs *)priv->dev->iobase;
void *rxbuffer = priv->rxbuffer;
int i;
desc_init_rx_desc(rxdesc, RX_NUM_DESC, ETH_BUF_SZ);
writel((ulong)rxdesc, ®s->rxdesclist);
for (i = 0; i < RX_NUM_DESC; i++) {
desc_set_buf_addr(rxdesc + i, rxbuffer + (i * ETH_BUF_SZ),
ETH_BUF_SZ);
desc_set_rx_owner(rxdesc + i);
}
}
static void init_tx_desc(struct calxeda_eth_dev *priv)
{
struct xgmac_regs *regs = (struct xgmac_regs *)priv->dev->iobase;
desc_init_tx_desc(priv->tx_chain, TX_NUM_DESC);
writel((ulong)priv->tx_chain, ®s->txdesclist);
}
static int xgmac_reset(struct eth_device *dev)
{
struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
int timeout = MAC_TIMEOUT;
u32 value;
value = readl(®s->config) & XGMAC_CONTROL_SPD_MASK;
writel(XGMAC_DMA_BUSMODE_RESET, ®s->busmode);
while ((timeout-- >= 0) &&
(readl(®s->busmode) & XGMAC_DMA_BUSMODE_RESET))
udelay(1);
writel(value, ®s->config);
return timeout;
}
static void xgmac_hwmacaddr(struct eth_device *dev)
{
struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
u32 macaddr[2];
memcpy(macaddr, dev->enetaddr, 6);
writel(macaddr[1], ®s->macaddr[0].hi);
writel(macaddr[0], ®s->macaddr[0].lo);
}
static int xgmac_init(struct eth_device *dev, bd_t * bis)
{
struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
struct calxeda_eth_dev *priv = dev->priv;
int value;
if (xgmac_reset(dev) < 0)
return -1;
/* set the hardware MAC address */
xgmac_hwmacaddr(dev);
/* set the AXI bus modes */
value = XGMAC_DMA_BUSMODE_ATDS |
(16 << XGMAC_DMA_BUSMODE_PBL_SHIFT) |
XGMAC_DMA_BUSMODE_FB | XGMAC_DMA_BUSMODE_AAL;
writel(value, ®s->busmode);
value = XGMAC_DMA_AXIMODE_AAL | XGMAC_DMA_AXIMODE_BLEN16 |
XGMAC_DMA_AXIMODE_BLEN8 | XGMAC_DMA_AXIMODE_BLEN4;
writel(value, ®s->axi_mode);
/* set flow control parameters and store and forward mode */
value = (FIFO_MINUS_12K << XGMAC_CORE_OMR_RFD_SHIFT) |
(FIFO_MINUS_4K << XGMAC_CORE_OMR_RFA_SHIFT) |
XGMAC_CORE_OMR_EFC | XGMAC_CORE_OMR_TSF;
writel(value, ®s->core_opmode);
/* enable pause frames */
value = (1024 << XGMAC_CORE_FLOW_PT_SHIFT) |
(1 << XGMAC_CORE_FLOW_PLT_SHIFT) |
XGMAC_CORE_FLOW_UP | XGMAC_CORE_FLOW_RFE | XGMAC_CORE_FLOW_TFE;
writel(value, ®s->flow_control);
/* Initialize the descriptor chains */
init_rx_desc(priv);
init_tx_desc(priv);
/* must set to 0, or when started up will cause issues */
priv->tx_currdesc = 0;
priv->rx_currdesc = 0;
/* set default core values */
value = readl(®s->config);
value &= XGMAC_CONTROL_SPD_MASK;
value |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_ACS |
XGMAC_CONTROL_IPC | XGMAC_CONTROL_CAR;
/* Everything is ready enable both mac and DMA */
value |= RXENABLE | TXENABLE;
writel(value, ®s->config);
value = readl(®s->dma_opmode);
value |= RXSTART | TXSTART;
writel(value, ®s->dma_opmode);
return 0;
}
static int xgmac_tx(struct eth_device *dev, void *packet, int length)
{
struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
struct calxeda_eth_dev *priv = dev->priv;
u32 currdesc = priv->tx_currdesc;
struct xgmac_dma_desc *txdesc = &priv->tx_chain[currdesc];
int timeout;
desc_set_buf_addr_and_size(txdesc, packet, length);
desc_set_tx_owner(txdesc, TXDESC_FIRST_SEG |
TXDESC_LAST_SEG | TXDESC_CRC_EN_APPEND);
/* write poll demand */
writel(1, ®s->txpoll);
timeout = 1000000;
while (desc_get_owner(txdesc)) {
if (timeout-- < 0) {
printf("xgmac: TX timeout\n");
return -ETIMEDOUT;
}
udelay(1);
}
priv->tx_currdesc = (currdesc + 1) & (TX_NUM_DESC - 1);
return 0;
}
static int xgmac_rx(struct eth_device *dev)
{
struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
struct calxeda_eth_dev *priv = dev->priv;
u32 currdesc = priv->rx_currdesc;
struct xgmac_dma_desc *rxdesc = &priv->rx_chain[currdesc];
int length = 0;
/* check if the host has the desc */
if (desc_get_owner(rxdesc))
return -1; /* something bad happened */
length = desc_get_rx_frame_len(rxdesc);
NetReceive(desc_get_buf_addr(rxdesc), length);
/* set descriptor back to owned by XGMAC */
desc_set_rx_owner(rxdesc);
writel(1, ®s->rxpoll);
priv->rx_currdesc = (currdesc + 1) & (RX_NUM_DESC - 1);
return length;
}
static void xgmac_halt(struct eth_device *dev)
{
struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
struct calxeda_eth_dev *priv = dev->priv;
int value;
/* Disable TX/RX */
value = readl(®s->config);
value &= ~(RXENABLE | TXENABLE);
writel(value, ®s->config);
/* Disable DMA */
value = readl(®s->dma_opmode);
value &= ~(RXSTART | TXSTART);
writel(value, ®s->dma_opmode);
/* must set to 0, or when started up will cause issues */
priv->tx_currdesc = 0;
priv->rx_currdesc = 0;
}
int calxedaxgmac_initialize(u32 id, ulong base_addr)
{
struct eth_device *dev;
struct calxeda_eth_dev *priv;
struct xgmac_regs *regs;
u32 macaddr[2];
regs = (struct xgmac_regs *)base_addr;
/* check hardware version */
if (readl(®s->version) != 0x1012)
return -1;
dev = malloc(sizeof(*dev));
if (!dev)
return 0;
memset(dev, 0, sizeof(*dev));
/* Structure must be aligned, because it contains the descriptors */
priv = memalign(32, sizeof(*priv));
if (!priv) {
free(dev);
return 0;
}
dev->iobase = (int)base_addr;
dev->priv = priv;
priv->dev = dev;
sprintf(dev->name, "xgmac%d", id);
/* The MAC address is already configured, so read it from registers. */
macaddr[1] = readl(®s->macaddr[0].hi);
macaddr[0] = readl(®s->macaddr[0].lo);
memcpy(dev->enetaddr, macaddr, 6);
dev->init = xgmac_init;
dev->send = xgmac_tx;
dev->recv = xgmac_rx;
dev->halt = xgmac_halt;
eth_register(dev);
return 1;
}
| gpl-2.0 |
davet321/rpi-linux | arch/powerpc/kernel/ptrace.c | 44 | 91881 | /*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/m68k/kernel/ptrace.c"
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* Modified by Cort Dougan (cort@hq.fsmlabs.com)
* and Paul Mackerras (paulus@samba.org).
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file README.legal in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/seccomp.h>
#include <linux/audit.h>
#include <trace/syscall.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <linux/context_tracking.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* The parameter save area on the stack is used to store arguments being passed
* to callee function and is located at fixed offset from stack pointer.
*/
#ifdef CONFIG_PPC32
#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
#else /* CONFIG_PPC32 */
#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
#endif
struct pt_regs_offset {
const char *name;
int offset;
};
#define STR(s) #s /* convert to string */
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define GPR_OFFSET_NAME(num) \
{.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
#define TVSO(f) (offsetof(struct thread_vr_state, f))
#define TFSO(f) (offsetof(struct thread_fp_state, f))
#define TSO(f) (offsetof(struct thread_struct, f))
static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0),
GPR_OFFSET_NAME(1),
GPR_OFFSET_NAME(2),
GPR_OFFSET_NAME(3),
GPR_OFFSET_NAME(4),
GPR_OFFSET_NAME(5),
GPR_OFFSET_NAME(6),
GPR_OFFSET_NAME(7),
GPR_OFFSET_NAME(8),
GPR_OFFSET_NAME(9),
GPR_OFFSET_NAME(10),
GPR_OFFSET_NAME(11),
GPR_OFFSET_NAME(12),
GPR_OFFSET_NAME(13),
GPR_OFFSET_NAME(14),
GPR_OFFSET_NAME(15),
GPR_OFFSET_NAME(16),
GPR_OFFSET_NAME(17),
GPR_OFFSET_NAME(18),
GPR_OFFSET_NAME(19),
GPR_OFFSET_NAME(20),
GPR_OFFSET_NAME(21),
GPR_OFFSET_NAME(22),
GPR_OFFSET_NAME(23),
GPR_OFFSET_NAME(24),
GPR_OFFSET_NAME(25),
GPR_OFFSET_NAME(26),
GPR_OFFSET_NAME(27),
GPR_OFFSET_NAME(28),
GPR_OFFSET_NAME(29),
GPR_OFFSET_NAME(30),
GPR_OFFSET_NAME(31),
REG_OFFSET_NAME(nip),
REG_OFFSET_NAME(msr),
REG_OFFSET_NAME(ctr),
REG_OFFSET_NAME(link),
REG_OFFSET_NAME(xer),
REG_OFFSET_NAME(ccr),
#ifdef CONFIG_PPC64
REG_OFFSET_NAME(softe),
#else
REG_OFFSET_NAME(mq),
#endif
REG_OFFSET_NAME(trap),
REG_OFFSET_NAME(dar),
REG_OFFSET_NAME(dsisr),
REG_OFFSET_END,
};
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static void flush_tmregs_to_thread(struct task_struct *tsk)
{
/*
* If task is not current, it will have been flushed already to
* it's thread_struct during __switch_to().
*
* A reclaim flushes ALL the state.
*/
if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(TM_CAUSE_SIGNAL);
}
#else
static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
#endif
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* Set of msr bits that gdb can change on behalf of a process.
*/
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
#define MSR_DEBUGCHANGE 0
#else
#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
#endif
/*
* Max register writeable via put_reg
*/
#ifdef CONFIG_PPC32
#define PT_MAX_PUT_REG PT_MQ
#else
#define PT_MAX_PUT_REG PT_CCR
#endif
static unsigned long get_user_msr(struct task_struct *task)
{
return task->thread.regs->msr | task->thread.fpexc_mode;
}
static int set_user_msr(struct task_struct *task, unsigned long msr)
{
task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
return 0;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static unsigned long get_user_ckpt_msr(struct task_struct *task)
{
return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
}
static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
{
task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
return 0;
}
static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
{
task->thread.ckpt_regs.trap = trap & 0xfff0;
return 0;
}
#endif
#ifdef CONFIG_PPC64
static int get_user_dscr(struct task_struct *task, unsigned long *data)
{
*data = task->thread.dscr;
return 0;
}
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
{
task->thread.dscr = dscr;
task->thread.dscr_inherit = 1;
return 0;
}
#else
static int get_user_dscr(struct task_struct *task, unsigned long *data)
{
return -EIO;
}
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
{
return -EIO;
}
#endif
/*
* We prevent mucking around with the reserved area of trap
* which are used internally by the kernel.
*/
static int set_user_trap(struct task_struct *task, unsigned long trap)
{
task->thread.regs->trap = trap & 0xfff0;
return 0;
}
/*
* Get contents of register REGNO in task TASK.
*/
int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
{
if ((task->thread.regs == NULL) || !data)
return -EIO;
if (regno == PT_MSR) {
*data = get_user_msr(task);
return 0;
}
if (regno == PT_DSCR)
return get_user_dscr(task, data);
if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
*data = ((unsigned long *)task->thread.regs)[regno];
return 0;
}
return -EIO;
}
/*
* Write contents of register REGNO in task TASK.
*/
int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
{
if (task->thread.regs == NULL)
return -EIO;
if (regno == PT_MSR)
return set_user_msr(task, data);
if (regno == PT_TRAP)
return set_user_trap(task, data);
if (regno == PT_DSCR)
return set_user_dscr(task, data);
if (regno <= PT_MAX_PUT_REG) {
((unsigned long *)task->thread.regs)[regno] = data;
return 0;
}
return -EIO;
}
static int gpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int i, ret;
if (target->thread.regs == NULL)
return -EIO;
if (!FULL_REGS(target->thread.regs)) {
/* We have a partial register set. Fill 14-31 with bogus values */
for (i = 14; i < 32; i++)
target->thread.regs->gpr[i] = NV_REG_POISON;
}
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
target->thread.regs,
0, offsetof(struct pt_regs, msr));
if (!ret) {
unsigned long msr = get_user_msr(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
offsetof(struct pt_regs, msr),
offsetof(struct pt_regs, msr) +
sizeof(msr));
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->orig_gpr3,
offsetof(struct pt_regs, orig_gpr3),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int gpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long reg;
int ret;
if (target->thread.regs == NULL)
return -EIO;
CHECK_FULL_REGS(target->thread.regs);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.regs,
0, PT_MSR * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_MSR * sizeof(reg),
(PT_MSR + 1) * sizeof(reg));
if (!ret)
ret = set_user_msr(target, reg);
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->orig_gpr3,
PT_ORIG_R3 * sizeof(reg),
(PT_MAX_PUT_REG + 1) * sizeof(reg));
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_MAX_PUT_REG + 1) * sizeof(reg),
PT_TRAP * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_TRAP * sizeof(reg),
(PT_TRAP + 1) * sizeof(reg));
if (!ret)
ret = set_user_trap(target, reg);
}
if (!ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
return ret;
}
/*
* When the transaction is active, 'transact_fp' holds the current running
* value of all FPR registers and 'fp_state' holds the last checkpointed
* value of all FPR registers for the current transaction. When transaction
* is not active 'fp_state' holds the current running state of all the FPR
* registers. So this function which returns the current running values of
* all the FPR registers, needs to know whether any transaction is active
* or not.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
* };
*
* There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
* which determines the final code in this function. All the combinations of
* these two config options are possible except the one below as transactional
* memory config pulls in CONFIG_VSX automatically.
*
* !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
*/
static int fpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
#ifdef CONFIG_VSX
u64 buf[33];
int i;
#endif
flush_fp_to_thread(target);
#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_TRANS_FPR(i);
buf[32] = target->thread.transact_fp.fpscr;
} else {
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr;
}
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
#endif
#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
#endif
#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
#endif
}
/*
* When the transaction is active, 'transact_fp' holds the current running
* value of all FPR registers and 'fp_state' holds the last checkpointed
* value of all FPR registers for the current transaction. When transaction
* is not active 'fp_state' holds the current running state of all the FPR
* registers. So this function which setss the current running values of
* all the FPR registers, needs to know whether any transaction is active
* or not.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
* };
*
* There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
* which determines the final code in this function. All the combinations of
* these two config options are possible except the one below as transactional
* memory config pulls in CONFIG_VSX automatically.
*
* !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
*/
static int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
#ifdef CONFIG_VSX
u64 buf[33];
int i;
#endif
flush_fp_to_thread(target);
#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
for (i = 0; i < 32 ; i++)
target->thread.TS_TRANS_FPR(i) = buf[i];
target->thread.transact_fp.fpscr = buf[32];
} else {
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
target->thread.fp_state.fpscr = buf[32];
}
return 0;
#endif
#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
target->thread.fp_state.fpscr = buf[32];
return 0;
#endif
#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
#endif
}
#ifdef CONFIG_ALTIVEC
/*
* Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
* The transfer totals 34 quadword. Quadwords 0-31 contain the
* corresponding vector registers. Quadword 32 contains the vscr as the
* last word (offset 12) within that quadword. Quadword 33 contains the
* vrsave as the first word (offset 0) within the quadword.
*
* This definition of the VMX state is compatible with the current PPC32
* ptrace interface. This allows signal handling and ptrace to use the
* same structures. This also simplifies the implementation of a bi-arch
* (combined (32- and 64-bit) gdb.
*/
static int vr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_altivec_to_thread(target);
return target->thread.used_vr ? regset->n : 0;
}
/*
* When the transaction is active, 'transact_vr' holds the current running
* value of all the VMX registers and 'vr_state' holds the last checkpointed
* value of all the VMX registers for the current transaction to fall back
* on in case it aborts. When transaction is not active 'vr_state' holds
* the current running state of all the VMX registers. So this function which
* gets the current running values of all the VMX registers, needs to know
* whether any transaction is active or not.
*
* Userspace interface buffer layout:
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
* };
*/
static int vr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct thread_vr_state *addr;
int ret;
flush_altivec_to_thread(target);
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
offsetof(struct thread_vr_state, vr[32]));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
flush_fp_to_thread(target);
flush_tmregs_to_thread(target);
addr = &target->thread.transact_vr;
} else {
addr = &target->thread.vr_state;
}
#else
addr = &target->thread.vr_state;
#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
addr, 0,
33 * sizeof(vector128));
if (!ret) {
/*
* Copy out only the low-order word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr))
vrsave.word = target->thread.transact_vrsave;
else
vrsave.word = target->thread.vrsave;
#else
vrsave.word = target->thread.vrsave;
#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
}
return ret;
}
/*
* When the transaction is active, 'transact_vr' holds the current running
* value of all the VMX registers and 'vr_state' holds the last checkpointed
* value of all the VMX registers for the current transaction to fall back
* on in case it aborts. When transaction is not active 'vr_state' holds
* the current running state of all the VMX registers. So this function which
* sets the current running values of all the VMX registers, needs to know
* whether any transaction is active or not.
*
* Userspace interface buffer layout:
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
* };
*/
static int vr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct thread_vr_state *addr;
int ret;
flush_altivec_to_thread(target);
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
offsetof(struct thread_vr_state, vr[32]));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
flush_fp_to_thread(target);
flush_tmregs_to_thread(target);
addr = &target->thread.transact_vr;
} else {
addr = &target->thread.vr_state;
}
#else
addr = &target->thread.vr_state;
#endif
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
addr, 0,
33 * sizeof(vector128));
if (!ret && count > 0) {
/*
* We use only the first word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr))
vrsave.word = target->thread.transact_vrsave;
else
vrsave.word = target->thread.vrsave;
#else
vrsave.word = target->thread.vrsave;
#endif
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
if (!ret) {
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr))
target->thread.transact_vrsave = vrsave.word;
else
target->thread.vrsave = vrsave.word;
#else
target->thread.vrsave = vrsave.word;
#endif
}
}
return ret;
}
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
/*
* Currently to set and and get all the vsx state, you need to call
* the fp and VMX calls as well. This only get/sets the lower 32
* 128bit VSX registers.
*/
static int vsr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_vsx_to_thread(target);
return target->thread.used_vsr ? regset->n : 0;
}
/*
* When the transaction is active, 'transact_fp' holds the current running
* value of all FPR registers and 'fp_state' holds the last checkpointed
* value of all FPR registers for the current transaction. When transaction
* is not active 'fp_state' holds the current running state of all the FPR
* registers. So this function which returns the current running values of
* all the FPR registers, needs to know whether any transaction is active
* or not.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 vsx[32];
* };
*/
static int vsr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
u64 buf[32];
int ret, i;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
#endif
flush_vsx_to_thread(target);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.
transact_fp.fpr[i][TS_VSRLOWOFFSET];
} else {
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.
fp_state.fpr[i][TS_VSRLOWOFFSET];
}
#else
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
return ret;
}
/*
* When the transaction is active, 'transact_fp' holds the current running
* value of all FPR registers and 'fp_state' holds the last checkpointed
* value of all FPR registers for the current transaction. When transaction
* is not active 'fp_state' holds the current running state of all the FPR
* registers. So this function which sets the current running values of all
* the FPR registers, needs to know whether any transaction is active or not.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 vsx[32];
* };
*/
static int vsr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
u64 buf[32];
int ret,i;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
#endif
flush_vsx_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
for (i = 0; i < 32 ; i++)
target->thread.transact_fp.
fpr[i][TS_VSRLOWOFFSET] = buf[i];
} else {
for (i = 0; i < 32 ; i++)
target->thread.fp_state.
fpr[i][TS_VSRLOWOFFSET] = buf[i];
}
#else
for (i = 0; i < 32 ; i++)
target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
#endif
return ret;
}
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/*
* For get_evrregs/set_evrregs functions 'data' has the following layout:
*
* struct {
* u32 evr[32];
* u64 acc;
* u32 spefscr;
* }
*/
static int evr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_spe_to_thread(target);
return target->thread.used_spe ? regset->n : 0;
}
static int evr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
flush_spe_to_thread(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.evr,
0, sizeof(target->thread.evr));
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
offsetof(struct thread_struct, spefscr));
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.acc,
sizeof(target->thread.evr), -1);
return ret;
}
static int evr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
flush_spe_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.evr,
0, sizeof(target->thread.evr));
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
offsetof(struct thread_struct, spefscr));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.acc,
sizeof(target->thread.evr), -1);
return ret;
}
#endif /* CONFIG_SPE */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/**
* tm_cgpr_active - get active number of registers in CGPR
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in transaction checkpointed GPR category.
*/
static int tm_cgpr_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
return regset->n;
}
/**
* tm_cgpr_get - get CGPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy from.
* @ubuf: User buffer to copy into.
*
* This function gets transaction checkpointed GPR registers.
*
* When the transaction is active, 'ckpt_regs' holds all the checkpointed
* GPR register values for the current transaction to fall back on if it
* aborts in between. This function gets those checkpointed GPR registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* struct pt_regs ckpt_regs;
* };
*/
static int tm_cgpr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs,
0, offsetof(struct pt_regs, msr));
if (!ret) {
unsigned long msr = get_user_ckpt_msr(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
offsetof(struct pt_regs, msr),
offsetof(struct pt_regs, msr) +
sizeof(msr));
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs.orig_gpr3,
offsetof(struct pt_regs, orig_gpr3),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
/*
* tm_cgpr_set - set the CGPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed GPR registers.
*
* When the transaction is active, 'ckpt_regs' holds the checkpointed
* GPR register values for the current transaction to fall back on if it
* aborts in between. This function sets those checkpointed GPR registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* struct pt_regs ckpt_regs;
* };
*/
static int tm_cgpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long reg;
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs,
0, PT_MSR * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_MSR * sizeof(reg),
(PT_MSR + 1) * sizeof(reg));
if (!ret)
ret = set_user_ckpt_msr(target, reg);
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs.orig_gpr3,
PT_ORIG_R3 * sizeof(reg),
(PT_MAX_PUT_REG + 1) * sizeof(reg));
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_MAX_PUT_REG + 1) * sizeof(reg),
PT_TRAP * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_TRAP * sizeof(reg),
(PT_TRAP + 1) * sizeof(reg));
if (!ret)
ret = set_user_ckpt_trap(target, reg);
}
if (!ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
return ret;
}
/**
* tm_cfpr_active - get active number of registers in CFPR
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in transaction checkpointed FPR category.
*/
static int tm_cfpr_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
return regset->n;
}
/**
* tm_cfpr_get - get CFPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy from.
* @ubuf: User buffer to copy into.
*
* This function gets in transaction checkpointed FPR registers.
*
* When the transaction is active 'fp_state' holds the checkpointed
* values for the current transaction to fall back on if it aborts
* in between. This function gets those checkpointed FPR registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
*};
*/
static int tm_cfpr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
u64 buf[33];
int i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
}
/**
* tm_cfpr_set - set CFPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed FPR registers.
*
* When the transaction is active 'fp_state' holds the checkpointed
* FPR register values for the current transaction to fall back on
* if it aborts in between. This function sets these checkpointed
* FPR registers. The userspace interface buffer layout is as follows.
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
*};
*/
static int tm_cfpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
u64 buf[33];
int i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
target->thread.fp_state.fpscr = buf[32];
return 0;
}
/**
* tm_cvmx_active - get active number of registers in CVMX
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in checkpointed VMX category.
*/
static int tm_cvmx_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
return regset->n;
}
/**
* tm_cvmx_get - get CMVX registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy from.
* @ubuf: User buffer to copy into.
*
* This function gets in transaction checkpointed VMX registers.
*
* When the transaction is active 'vr_state' and 'vr_save' hold
* the checkpointed values for the current transaction to fall
* back on if it aborts in between. The userspace interface buffer
* layout is as follows.
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
*};
*/
static int tm_cvmx_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
/* Flush the state */
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.vr_state, 0,
33 * sizeof(vector128));
if (!ret) {
/*
* Copy out only the low-order word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
}
return ret;
}
/**
* tm_cvmx_set - set CMVX registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed VMX registers.
*
* When the transaction is active 'vr_state' and 'vr_save' hold
* the checkpointed values for the current transaction to fall
* back on if it aborts in between. The userspace interface buffer
* layout is as follows.
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
*};
*/
static int tm_cvmx_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.vr_state, 0,
33 * sizeof(vector128));
if (!ret && count > 0) {
/*
* We use only the low-order word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
if (!ret)
target->thread.vrsave = vrsave.word;
}
return ret;
}
/**
* tm_cvsx_active - get active number of registers in CVSX
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in transaction checkpointed VSX category.
*/
static int tm_cvsx_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
flush_vsx_to_thread(target);
return target->thread.used_vsr ? regset->n : 0;
}
/**
* tm_cvsx_get - get CVSX registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy from.
* @ubuf: User buffer to copy into.
*
* This function gets in transaction checkpointed VSX registers.
*
* When the transaction is active 'fp_state' holds the checkpointed
* values for the current transaction to fall back on if it aborts
* in between. This function gets those checkpointed VSX registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* u64 vsx[32];
*};
*/
static int tm_cvsx_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
u64 buf[32];
int ret, i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
/* Flush the state */
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
return ret;
}
/**
* tm_cvsx_set - set CFPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed VSX registers.
*
* When the transaction is active 'fp_state' holds the checkpointed
* VSX register values for the current transaction to fall back on
* if it aborts in between. This function sets these checkpointed
* FPR registers. The userspace interface buffer layout is as follows.
*
* struct data {
* u64 vsx[32];
*};
*/
static int tm_cvsx_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
u64 buf[32];
int ret, i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
/* Flush the state */
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
flush_vsx_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
for (i = 0; i < 32 ; i++)
target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
}
/**
* tm_spr_active - get active number of registers in TM SPR
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks the active number of available
* regisers in the transactional memory SPR category.
*/
static int tm_spr_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
return regset->n;
}
/**
* tm_spr_get - get the TM related SPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy from.
* @ubuf: User buffer to copy into.
*
* This function gets transactional memory related SPR registers.
* The userspace interface buffer layout is as follows.
*
* struct {
* u64 tm_tfhar;
* u64 tm_texasr;
* u64 tm_tfiar;
* };
*/
static int tm_spr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
/* Build tests */
BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
/* Flush the states */
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
/* TFHAR register */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tfhar, 0, sizeof(u64));
/* TEXASR register */
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_texasr, sizeof(u64),
2 * sizeof(u64));
/* TFIAR register */
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tfiar,
2 * sizeof(u64), 3 * sizeof(u64));
return ret;
}
/**
* tm_spr_set - set the TM related SPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets transactional memory related SPR registers.
* The userspace interface buffer layout is as follows.
*
* struct {
* u64 tm_tfhar;
* u64 tm_texasr;
* u64 tm_tfiar;
* };
*/
static int tm_spr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
/* Build tests */
BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
/* Flush the states */
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
/* TFHAR register */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tfhar, 0, sizeof(u64));
/* TEXASR register */
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_texasr, sizeof(u64),
2 * sizeof(u64));
/* TFIAR register */
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tfiar,
2 * sizeof(u64), 3 * sizeof(u64));
return ret;
}
static int tm_tar_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (MSR_TM_ACTIVE(target->thread.regs->msr))
return regset->n;
return 0;
}
static int tm_tar_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tar, 0, sizeof(u64));
return ret;
}
static int tm_tar_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tar, 0, sizeof(u64));
return ret;
}
static int tm_ppr_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (MSR_TM_ACTIVE(target->thread.regs->msr))
return regset->n;
return 0;
}
static int tm_ppr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_ppr, 0, sizeof(u64));
return ret;
}
static int tm_ppr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_ppr, 0, sizeof(u64));
return ret;
}
static int tm_dscr_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (MSR_TM_ACTIVE(target->thread.regs->msr))
return regset->n;
return 0;
}
static int tm_dscr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_dscr, 0, sizeof(u64));
return ret;
}
static int tm_dscr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_dscr, 0, sizeof(u64));
return ret;
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_PPC64
static int ppr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.ppr, 0, sizeof(u64));
return ret;
}
static int ppr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ppr, 0, sizeof(u64));
return ret;
}
static int dscr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.dscr, 0, sizeof(u64));
return ret;
}
static int dscr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.dscr, 0, sizeof(u64));
return ret;
}
#endif
#ifdef CONFIG_PPC_BOOK3S_64
static int tar_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tar, 0, sizeof(u64));
return ret;
}
static int tar_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tar, 0, sizeof(u64));
return ret;
}
static int ebb_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
if (target->thread.used_ebb)
return regset->n;
return 0;
}
static int ebb_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
/* Build tests */
BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
if (!target->thread.used_ebb)
return -ENODATA;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
}
static int ebb_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret = 0;
/* Build tests */
BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
if (target->thread.used_ebb)
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ebbrr, 0, sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ebbhr, sizeof(unsigned long),
2 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.bescr,
2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
return ret;
}
static int pmu_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
return regset->n;
}
static int pmu_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
/* Build tests */
BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.siar, 0,
5 * sizeof(unsigned long));
}
static int pmu_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret = 0;
/* Build tests */
BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.siar, 0,
sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.sdar, sizeof(unsigned long),
2 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.sier, 2 * sizeof(unsigned long),
3 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.mmcr2, 3 * sizeof(unsigned long),
4 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.mmcr0, 4 * sizeof(unsigned long),
5 * sizeof(unsigned long));
return ret;
}
#endif
/*
* These are our native regset flavors.
*/
enum powerpc_regset {
REGSET_GPR,
REGSET_FPR,
#ifdef CONFIG_ALTIVEC
REGSET_VMX,
#endif
#ifdef CONFIG_VSX
REGSET_VSX,
#endif
#ifdef CONFIG_SPE
REGSET_SPE,
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
REGSET_TM_CGPR, /* TM checkpointed GPR registers */
REGSET_TM_CFPR, /* TM checkpointed FPR registers */
REGSET_TM_CVMX, /* TM checkpointed VMX registers */
REGSET_TM_CVSX, /* TM checkpointed VSX registers */
REGSET_TM_SPR, /* TM specific SPR registers */
REGSET_TM_CTAR, /* TM checkpointed TAR register */
REGSET_TM_CPPR, /* TM checkpointed PPR register */
REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
#endif
#ifdef CONFIG_PPC64
REGSET_PPR, /* PPR register */
REGSET_DSCR, /* DSCR register */
#endif
#ifdef CONFIG_PPC_BOOK3S_64
REGSET_TAR, /* TAR register */
REGSET_EBB, /* EBB registers */
REGSET_PMR, /* Performance Monitor Registers */
#endif
};
static const struct user_regset native_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.get = gpr_get, .set = gpr_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_VSX
[REGSET_VSX] = {
.core_note_type = NT_PPC_VSX, .n = 32,
.size = sizeof(double), .align = sizeof(double),
.active = vsr_active, .get = vsr_get, .set = vsr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.core_note_type = NT_PPC_SPE, .n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
[REGSET_TM_CGPR] = {
.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
},
[REGSET_TM_CFPR] = {
.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
},
[REGSET_TM_CVMX] = {
.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
},
[REGSET_TM_CVSX] = {
.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
},
[REGSET_TM_SPR] = {
.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
},
[REGSET_TM_CTAR] = {
.core_note_type = NT_PPC_TM_CTAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
},
[REGSET_TM_CPPR] = {
.core_note_type = NT_PPC_TM_CPPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
},
[REGSET_TM_CDSCR] = {
.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
},
#endif
#ifdef CONFIG_PPC64
[REGSET_PPR] = {
.core_note_type = NT_PPC_PPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = ppr_get, .set = ppr_set
},
[REGSET_DSCR] = {
.core_note_type = NT_PPC_DSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = dscr_get, .set = dscr_set
},
#endif
#ifdef CONFIG_PPC_BOOK3S_64
[REGSET_TAR] = {
.core_note_type = NT_PPC_TAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = tar_get, .set = tar_set
},
[REGSET_EBB] = {
.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
.size = sizeof(u64), .align = sizeof(u64),
.active = ebb_active, .get = ebb_get, .set = ebb_set
},
[REGSET_PMR] = {
.core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
.size = sizeof(u64), .align = sizeof(u64),
.active = pmu_active, .get = pmu_get, .set = pmu_set
},
#endif
};
static const struct user_regset_view user_ppc_native_view = {
.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
};
#ifdef CONFIG_PPC64
#include <linux/compat.h>
static int gpr32_get_common(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf, bool tm_active)
{
const unsigned long *regs = &target->thread.regs->gpr[0];
const unsigned long *ckpt_regs;
compat_ulong_t *k = kbuf;
compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
int i;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
ckpt_regs = &target->thread.ckpt_regs.gpr[0];
#endif
if (tm_active) {
regs = ckpt_regs;
} else {
if (target->thread.regs == NULL)
return -EIO;
if (!FULL_REGS(target->thread.regs)) {
/*
* We have a partial register set.
* Fill 14-31 with bogus values.
*/
for (i = 14; i < 32; i++)
target->thread.regs->gpr[i] = NV_REG_POISON;
}
}
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < PT_MSR; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_MSR; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
if (count > 0 && pos == PT_MSR) {
reg = get_user_msr(target);
if (kbuf)
*k++ = reg;
else if (__put_user(reg, u++))
return -EFAULT;
++pos;
--count;
}
if (kbuf)
for (; count > 0 && pos < PT_REGS_COUNT; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_REGS_COUNT; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
PT_REGS_COUNT * sizeof(reg), -1);
}
static int gpr32_set_common(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf, bool tm_active)
{
unsigned long *regs = &target->thread.regs->gpr[0];
unsigned long *ckpt_regs;
const compat_ulong_t *k = kbuf;
const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
ckpt_regs = &target->thread.ckpt_regs.gpr[0];
#endif
if (tm_active) {
regs = ckpt_regs;
} else {
regs = &target->thread.regs->gpr[0];
if (target->thread.regs == NULL)
return -EIO;
CHECK_FULL_REGS(target->thread.regs);
}
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < PT_MSR; --count)
regs[pos++] = *k++;
else
for (; count > 0 && pos < PT_MSR; --count) {
if (__get_user(reg, u++))
return -EFAULT;
regs[pos++] = reg;
}
if (count > 0 && pos == PT_MSR) {
if (kbuf)
reg = *k++;
else if (__get_user(reg, u++))
return -EFAULT;
set_user_msr(target, reg);
++pos;
--count;
}
if (kbuf) {
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
regs[pos++] = *k++;
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
++k;
} else {
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
if (__get_user(reg, u++))
return -EFAULT;
regs[pos++] = reg;
}
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
if (__get_user(reg, u++))
return -EFAULT;
}
if (count > 0 && pos == PT_TRAP) {
if (kbuf)
reg = *k++;
else if (__get_user(reg, u++))
return -EFAULT;
set_user_trap(target, reg);
++pos;
--count;
}
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static int tm_cgpr32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 1);
}
static int tm_cgpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 1);
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 0);
}
static int gpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 0);
}
/*
* These are the regset flavors matching the CONFIG_PPC32 native set.
*/
static const struct user_regset compat_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
.get = gpr32_get, .set = gpr32_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.core_note_type = NT_PPC_SPE, .n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
[REGSET_TM_CGPR] = {
.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.active = tm_cgpr_active,
.get = tm_cgpr32_get, .set = tm_cgpr32_set
},
[REGSET_TM_CFPR] = {
.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
},
[REGSET_TM_CVMX] = {
.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
},
[REGSET_TM_CVSX] = {
.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
},
[REGSET_TM_SPR] = {
.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
},
[REGSET_TM_CTAR] = {
.core_note_type = NT_PPC_TM_CTAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
},
[REGSET_TM_CPPR] = {
.core_note_type = NT_PPC_TM_CPPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
},
[REGSET_TM_CDSCR] = {
.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
},
#endif
#ifdef CONFIG_PPC64
[REGSET_PPR] = {
.core_note_type = NT_PPC_PPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = ppr_get, .set = ppr_set
},
[REGSET_DSCR] = {
.core_note_type = NT_PPC_DSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = dscr_get, .set = dscr_set
},
#endif
#ifdef CONFIG_PPC_BOOK3S_64
[REGSET_TAR] = {
.core_note_type = NT_PPC_TAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = tar_get, .set = tar_set
},
[REGSET_EBB] = {
.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
.size = sizeof(u64), .align = sizeof(u64),
.active = ebb_active, .get = ebb_get, .set = ebb_set
},
#endif
};
static const struct user_regset_view user_ppc_compat_view = {
.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
};
#endif /* CONFIG_PPC64 */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_PPC64
if (test_tsk_thread_flag(task, TIF_32BIT))
return &user_ppc_compat_view;
#endif
return &user_ppc_native_view;
}
void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.debug.dbcr0 &= ~DBCR0_BT;
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_BE;
regs->msr |= MSR_SE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.debug.dbcr0 &= ~DBCR0_IC;
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_SE;
regs->msr |= MSR_BE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* The logic to disable single stepping should be as
* simple as turning off the Instruction Complete flag.
* And, after doing so, if all debug flags are off, turn
* off DBCR0(IDM) and MSR(DE) .... Torez
*/
task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
/*
* Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
*/
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
task->thread.debug.dbcr1)) {
/*
* All debug events were off.....
*/
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
regs->msr &= ~MSR_DE;
}
#else
regs->msr &= ~(MSR_SE | MSR_BE);
#endif
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions in PPC64.
* The SIGTRAP signal is generated automatically for us in do_dabr().
* We don't have to do anything about that here
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
unsigned long data)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret;
struct thread_struct *thread = &(task->thread);
struct perf_event *bp;
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
struct arch_hw_breakpoint hw_brk;
#endif
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
* For embedded processors we support one DAC and no IAC's at the
* moment.
*/
if (addr > 0)
return -EINVAL;
/* The bottom 3 bits in dabr are flags */
if ((data & ~0x7UL) >= TASK_SIZE)
return -EIO;
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
* It was assumed, on previous implementations, that 3 bits were
* passed together with the data address, fitting the design of the
* DABR register, as follows:
*
* bit 0: Read flag
* bit 1: Write flag
* bit 2: Breakpoint translation
*
* Thus, we use them here as so.
*/
/* Ensure breakpoint translation bit is set */
if (data && !(data & HW_BRK_TYPE_TRANSLATE))
return -EIO;
hw_brk.address = data & (~HW_BRK_TYPE_DABR);
hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
hw_brk.len = 8;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
bp = thread->ptrace_bps[0];
if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
if (bp) {
unregister_hw_breakpoint(bp);
thread->ptrace_bps[0] = NULL;
}
return 0;
}
if (bp) {
attr = bp->attr;
attr.bp_addr = hw_brk.address;
arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
/* Enable breakpoint */
attr.disabled = false;
ret = modify_user_hw_breakpoint(bp, &attr);
if (ret) {
return ret;
}
thread->ptrace_bps[0] = bp;
thread->hw_brk = hw_brk;
return 0;
}
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = hw_brk.address;
arch_bp_generic_fields(hw_brk.type,
&attr.bp_type);
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
ptrace_triggered, NULL, task);
if (IS_ERR(bp)) {
thread->ptrace_bps[0] = NULL;
return PTR_ERR(bp);
}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
task->thread.hw_brk = hw_brk;
#else /* CONFIG_PPC_ADV_DEBUG_REGS */
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
* as to not cause alignment restrictions for DAC-based processors.
*/
/* DAC's hold the whole address without any mode flags */
task->thread.debug.dac1 = data & ~0x3UL;
if (task->thread.debug.dac1 == 0) {
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
task->thread.debug.dbcr1)) {
task->thread.regs->msr &= ~MSR_DE;
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
}
return 0;
}
/* Read or Write bits must be set */
if (!(data & 0x3UL))
return -EINVAL;
/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
register */
task->thread.debug.dbcr0 |= DBCR0_IDM;
/* Check for write and read flags and set DBCR0
accordingly */
dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
if (data & 0x1UL)
dbcr_dac(task) |= DBCR_DAC1R;
if (data & 0x2UL)
dbcr_dac(task) |= DBCR_DAC1W;
task->thread.regs->msr |= MSR_DE;
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
return 0;
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* make sure the single step bit is not set. */
user_disable_single_step(child);
}
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
static long set_instruction_bp(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int slot;
int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
slot2_in_use = 1;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
slot4_in_use = 1;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
/* Make sure range is valid. */
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
/* We need a pair of IAC regsisters */
if ((!slot1_in_use) && (!slot2_in_use)) {
slot = 1;
child->thread.debug.iac1 = bp_info->addr;
child->thread.debug.iac2 = bp_info->addr2;
child->thread.debug.dbcr0 |= DBCR0_IAC1;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC12X;
else
dbcr_iac_range(child) |= DBCR_IAC12I;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if ((!slot3_in_use) && (!slot4_in_use)) {
slot = 3;
child->thread.debug.iac3 = bp_info->addr;
child->thread.debug.iac4 = bp_info->addr2;
child->thread.debug.dbcr0 |= DBCR0_IAC3;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC34X;
else
dbcr_iac_range(child) |= DBCR_IAC34I;
#endif
} else
return -ENOSPC;
} else {
/* We only need one. If possible leave a pair free in
* case a range is needed later
*/
if (!slot1_in_use) {
/*
* Don't use iac1 if iac1-iac2 are free and either
* iac3 or iac4 (but not both) are free
*/
if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
slot = 1;
child->thread.debug.iac1 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC1;
goto out;
}
}
if (!slot2_in_use) {
slot = 2;
child->thread.debug.iac2 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC2;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if (!slot3_in_use) {
slot = 3;
child->thread.debug.iac3 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC3;
} else if (!slot4_in_use) {
slot = 4;
child->thread.debug.iac4 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC4;
#endif
} else
return -ENOSPC;
}
out:
child->thread.debug.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot;
}
static int del_instruction_bp(struct task_struct *child, int slot)
{
switch (slot) {
case 1:
if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
/* address range - clear slots 1 & 2 */
child->thread.debug.iac2 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
}
child->thread.debug.iac1 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
break;
case 2:
if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
/* used in a range */
return -EINVAL;
child->thread.debug.iac2 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case 3:
if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
/* address range - clear slots 3 & 4 */
child->thread.debug.iac4 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
}
child->thread.debug.iac3 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
break;
case 4:
if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
/* Used in a range */
return -EINVAL;
child->thread.debug.iac4 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
break;
#endif
default:
return -EINVAL;
}
return 0;
}
static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
{
int byte_enable =
(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
& 0xf;
int condition_mode =
bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
int slot;
if (byte_enable && (condition_mode == 0))
return -EINVAL;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
slot = 1;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC1R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC1W;
child->thread.debug.dac1 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.debug.dvc1 =
(unsigned long)bp_info->condition_value;
child->thread.debug.dbcr2 |=
((byte_enable << DBCR2_DVC1BE_SHIFT) |
(condition_mode << DBCR2_DVC1M_SHIFT));
}
#endif
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
/* Both dac1 and dac2 are part of a range */
return -ENOSPC;
#endif
} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
slot = 2;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC2R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC2W;
child->thread.debug.dac2 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.debug.dvc2 =
(unsigned long)bp_info->condition_value;
child->thread.debug.dbcr2 |=
((byte_enable << DBCR2_DVC2BE_SHIFT) |
(condition_mode << DBCR2_DVC2M_SHIFT));
}
#endif
} else
return -ENOSPC;
child->thread.debug.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot + 4;
}
static int del_dac(struct task_struct *child, int slot)
{
if (slot == 1) {
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
return -ENOENT;
child->thread.debug.dac1 = 0;
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
child->thread.debug.dac2 = 0;
child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
}
child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.debug.dvc1 = 0;
#endif
} else if (slot == 2) {
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
return -ENOENT;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
/* Part of a range */
return -EINVAL;
child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.debug.dvc2 = 0;
#endif
child->thread.debug.dac2 = 0;
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
} else
return -EINVAL;
return 0;
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
static int set_dac_range(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
/* We don't allow range watchpoints to be used with DVC */
if (bp_info->condition_mode)
return -EINVAL;
/*
* Best effort to verify the address range. The user/supervisor bits
* prevent trapping in kernel space, but let's fail on an obvious bad
* range. The simple test on the mask is not fool-proof, and any
* exclusive range will spill over into kernel space.
*/
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (mode == PPC_BREAKPOINT_MODE_MASK) {
/*
* dac2 is a bitmask. Don't allow a mask that makes a
* kernel space address from a valid dac1 value
*/
if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
return -EIO;
} else {
/*
* For range breakpoints, addr2 must also be a valid address
*/
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
}
if (child->thread.debug.dbcr0 &
(DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
return -ENOSPC;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
child->thread.debug.dac1 = bp_info->addr;
child->thread.debug.dac2 = bp_info->addr2;
if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
child->thread.debug.dbcr2 |= DBCR2_DAC12M;
else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
else /* PPC_BREAKPOINT_MODE_MASK */
child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
child->thread.regs->msr |= MSR_DE;
return 5;
}
#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
static long ppc_set_hwdebug(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int len = 0;
struct thread_struct *thread = &(child->thread);
struct perf_event *bp;
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
struct arch_hw_breakpoint brk;
#endif
if (bp_info->version != 1)
return -ENOTSUPP;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* Check for invalid flags and combinations
*/
if ((bp_info->trigger_type == 0) ||
(bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
PPC_BREAKPOINT_TRIGGER_RW)) ||
(bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
(bp_info->condition_mode &
~(PPC_BREAKPOINT_CONDITION_MODE |
PPC_BREAKPOINT_CONDITION_BE_ALL)))
return -EINVAL;
#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
#endif
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
(bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
return -EINVAL;
return set_instruction_bp(child, bp_info);
}
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
return set_dac(child, bp_info);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
return set_dac_range(child, bp_info);
#else
return -EINVAL;
#endif
#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
/*
* We only support one data breakpoint
*/
if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
(bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
if ((unsigned long)bp_info->addr >= TASK_SIZE)
return -EIO;
brk.address = bp_info->addr & ~7UL;
brk.type = HW_BRK_TYPE_TRANSLATE;
brk.len = 8;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
brk.type |= HW_BRK_TYPE_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
brk.type |= HW_BRK_TYPE_WRITE;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* Check if the request is for 'range' breakpoints. We can
* support it if range < 8 bytes.
*/
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
len = bp_info->addr2 - bp_info->addr;
else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
len = 1;
else
return -EINVAL;
bp = thread->ptrace_bps[0];
if (bp)
return -ENOSPC;
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
attr.bp_len = len;
arch_bp_generic_fields(brk.type, &attr.bp_type);
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
ptrace_triggered, NULL, child);
if (IS_ERR(bp)) {
thread->ptrace_bps[0] = NULL;
return PTR_ERR(bp);
}
return 1;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
return -EINVAL;
if (child->thread.hw_brk.address)
return -ENOSPC;
child->thread.hw_brk = brk;
return 1;
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
}
static long ppc_del_hwdebug(struct task_struct *child, long data)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret = 0;
struct thread_struct *thread = &(child->thread);
struct perf_event *bp;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
int rc;
if (data <= 4)
rc = del_instruction_bp(child, (int)data);
else
rc = del_dac(child, (int)data - 4);
if (!rc) {
if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
child->thread.debug.dbcr1)) {
child->thread.debug.dbcr0 &= ~DBCR0_IDM;
child->thread.regs->msr &= ~MSR_DE;
}
}
return rc;
#else
if (data != 1)
return -EINVAL;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
bp = thread->ptrace_bps[0];
if (bp) {
unregister_hw_breakpoint(bp);
thread->ptrace_bps[0] = NULL;
} else
ret = -ENOENT;
return ret;
#else /* CONFIG_HAVE_HW_BREAKPOINT */
if (child->thread.hw_brk.address == 0)
return -ENOENT;
child->thread.hw_brk.address = 0;
child->thread.hw_brk.type = 0;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
return 0;
#endif
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret = -EPERM;
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = datavp;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long index, tmp;
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_get_reg(child, (int) index, &tmp);
if (ret)
break;
} else {
unsigned int fpidx = index - PT_FPR0;
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
memcpy(&tmp, &child->thread.TS_FPR(fpidx),
sizeof(long));
else
tmp = child->thread.fp_state.fpscr;
}
ret = put_user(tmp, datalp);
break;
}
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR: {
unsigned long index;
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_put_reg(child, index, data);
} else {
unsigned int fpidx = index - PT_FPR0;
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
memcpy(&child->thread.TS_FPR(fpidx), &data,
sizeof(long));
else
child->thread.fp_state.fpscr = data;
ret = 0;
}
break;
}
case PPC_PTRACE_GETHWDBGINFO: {
struct ppc_debug_info dbginfo;
dbginfo.version = 1;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
dbginfo.data_bp_alignment = 4;
dbginfo.sizeof_condition = 4;
dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
PPC_DEBUG_FEATURE_INSN_BP_MASK;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
dbginfo.features |=
PPC_DEBUG_FEATURE_DATA_BP_RANGE |
PPC_DEBUG_FEATURE_DATA_BP_MASK;
#endif
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
dbginfo.num_instruction_bps = 0;
dbginfo.num_data_bps = 1;
dbginfo.num_condition_regs = 0;
#ifdef CONFIG_PPC64
dbginfo.data_bp_alignment = 8;
#else
dbginfo.data_bp_alignment = 4;
#endif
dbginfo.sizeof_condition = 0;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
if (cpu_has_feature(CPU_FTR_DAWR))
dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
#else
dbginfo.features = 0;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
if (!access_ok(VERIFY_WRITE, datavp,
sizeof(struct ppc_debug_info)))
return -EFAULT;
ret = __copy_to_user(datavp, &dbginfo,
sizeof(struct ppc_debug_info)) ?
-EFAULT : 0;
break;
}
case PPC_PTRACE_SETHWDEBUG: {
struct ppc_hw_breakpoint bp_info;
if (!access_ok(VERIFY_READ, datavp,
sizeof(struct ppc_hw_breakpoint)))
return -EFAULT;
ret = __copy_from_user(&bp_info, datavp,
sizeof(struct ppc_hw_breakpoint)) ?
-EFAULT : 0;
if (!ret)
ret = ppc_set_hwdebug(child, &bp_info);
break;
}
case PPC_PTRACE_DELHWDEBUG: {
ret = ppc_del_hwdebug(child, data);
break;
}
case PTRACE_GET_DEBUGREG: {
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
unsigned long dabr_fake;
#endif
ret = -EINVAL;
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
ret = put_user(child->thread.debug.dac1, datalp);
#else
dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
(child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
ret = put_user(dabr_fake, datalp);
#endif
break;
}
case PTRACE_SET_DEBUGREG:
ret = ptrace_set_debugreg(child, addr, data);
break;
#ifdef CONFIG_PPC64
case PTRACE_GETREGS64:
#endif
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
datavp);
#ifdef CONFIG_PPC64
case PTRACE_SETREGS64:
#endif
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
datavp);
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
datavp);
case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
datavp);
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
datavp);
case PTRACE_SETVRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
datavp);
#endif
#ifdef CONFIG_VSX
case PTRACE_GETVSRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
datavp);
case PTRACE_SETVSRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
datavp);
#endif
#ifdef CONFIG_SPE
case PTRACE_GETEVRREGS:
/* Get the child spe register state. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
datavp);
case PTRACE_SETEVRREGS:
/* Set the child spe register state. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
datavp);
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#ifdef CONFIG_SECCOMP
static int do_seccomp(struct pt_regs *regs)
{
if (!test_thread_flag(TIF_SECCOMP))
return 0;
/*
* The ABI we present to seccomp tracers is that r3 contains
* the syscall return value and orig_gpr3 contains the first
* syscall parameter. This is different to the ptrace ABI where
* both r3 and orig_gpr3 contain the first syscall parameter.
*/
regs->gpr[3] = -ENOSYS;
/*
* We use the __ version here because we have already checked
* TIF_SECCOMP. If this fails, there is nothing left to do, we
* have already loaded -ENOSYS into r3, or seccomp has put
* something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
*/
if (__secure_computing(NULL))
return -1;
/*
* The syscall was allowed by seccomp, restore the register
* state to what audit expects.
* Note that we use orig_gpr3, which means a seccomp tracer can
* modify the first syscall parameter (in orig_gpr3) and also
* allow the syscall to proceed.
*/
regs->gpr[3] = regs->orig_gpr3;
return 0;
}
#else
static inline int do_seccomp(struct pt_regs *regs) { return 0; }
#endif /* CONFIG_SECCOMP */
/**
* do_syscall_trace_enter() - Do syscall tracing on kernel entry.
* @regs: the pt_regs of the task to trace (current)
*
* Performs various types of tracing on syscall entry. This includes seccomp,
* ptrace, syscall tracepoints and audit.
*
* The pt_regs are potentially visible to userspace via ptrace, so their
* contents is ABI.
*
* One or more of the tracers may modify the contents of pt_regs, in particular
* to modify arguments or even the syscall number itself.
*
* It's also possible that a tracer can choose to reject the system call. In
* that case this function will return an illegal syscall number, and will put
* an appropriate return value in regs->r3.
*
* Return: the (possibly changed) syscall number.
*/
long do_syscall_trace_enter(struct pt_regs *regs)
{
user_exit();
/*
* The tracer may decide to abort the syscall, if so tracehook
* will return !0. Note that the tracer may also just change
* regs->gpr[0] to an invalid syscall number, that is handled
* below on the exit path.
*/
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
goto skip;
/* Run seccomp after ptrace; allow it to set gpr[3]. */
if (do_seccomp(regs))
return -1;
/* Avoid trace and audit when syscall is invalid. */
if (regs->gpr[0] >= NR_syscalls)
goto skip;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gpr[0]);
#ifdef CONFIG_PPC64
if (!is_32bit_task())
audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
regs->gpr[5], regs->gpr[6]);
else
#endif
audit_syscall_entry(regs->gpr[0],
regs->gpr[3] & 0xffffffff,
regs->gpr[4] & 0xffffffff,
regs->gpr[5] & 0xffffffff,
regs->gpr[6] & 0xffffffff);
/* Return the possibly modified but valid syscall number */
return regs->gpr[0];
skip:
/*
* If we are aborting explicitly, or if the syscall number is
* now invalid, set the return value to -ENOSYS.
*/
regs->gpr[3] = -ENOSYS;
return -1;
}
void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->result);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
user_enter();
}
| gpl-2.0 |
logicbricks/linux-xlnx | mm/mempool.c | 556 | 10965 | /*
* linux/mm/mempool.c
*
* memory buffer pool support. Such pools are mostly used
* for guaranteed, deadlock-free memory allocations during
* extreme VM load.
*
* started by Ingo Molnar, Copyright (C) 2001
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kmemleak.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
static void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
pool->elements[pool->curr_nr++] = element;
}
static void *remove_element(mempool_t *pool)
{
BUG_ON(pool->curr_nr <= 0);
return pool->elements[--pool->curr_nr];
}
/**
* mempool_destroy - deallocate a memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
*
* Free all reserved elements in @pool and @pool itself. This function
* only sleeps if the free_fn() function sleeps.
*/
void mempool_destroy(mempool_t *pool)
{
while (pool->curr_nr) {
void *element = remove_element(pool);
pool->free(element, pool->pool_data);
}
kfree(pool->elements);
kfree(pool);
}
EXPORT_SYMBOL(mempool_destroy);
/**
* mempool_create - create a memory pool
* @min_nr: the minimum number of elements guaranteed to be
* allocated for this pool.
* @alloc_fn: user-defined element-allocation function.
* @free_fn: user-defined element-freeing function.
* @pool_data: optional private data available to the user-defined functions.
*
* this function creates and allocates a guaranteed size, preallocated
* memory pool. The pool can be used from the mempool_alloc() and mempool_free()
* functions. This function might sleep. Both the alloc_fn() and the free_fn()
* functions might sleep - as long as the mempool_alloc() function is not called
* from IRQ contexts.
*/
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data)
{
return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
GFP_KERNEL, NUMA_NO_NODE);
}
EXPORT_SYMBOL(mempool_create);
mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int node_id)
{
mempool_t *pool;
pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
if (!pool)
return NULL;
pool->elements = kmalloc_node(min_nr * sizeof(void *),
gfp_mask, node_id);
if (!pool->elements) {
kfree(pool);
return NULL;
}
spin_lock_init(&pool->lock);
pool->min_nr = min_nr;
pool->pool_data = pool_data;
init_waitqueue_head(&pool->wait);
pool->alloc = alloc_fn;
pool->free = free_fn;
/*
* First pre-allocate the guaranteed number of buffers.
*/
while (pool->curr_nr < pool->min_nr) {
void *element;
element = pool->alloc(gfp_mask, pool->pool_data);
if (unlikely(!element)) {
mempool_destroy(pool);
return NULL;
}
add_element(pool, element);
}
return pool;
}
EXPORT_SYMBOL(mempool_create_node);
/**
* mempool_resize - resize an existing memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
* @new_min_nr: the new minimum number of elements guaranteed to be
* allocated for this pool.
* @gfp_mask: the usual allocation bitmask.
*
* This function shrinks/grows the pool. In the case of growing,
* it cannot be guaranteed that the pool will be grown to the new
* size immediately, but new mempool_free() calls will refill it.
*
* Note, the caller must guarantee that no mempool_destroy is called
* while this function is running. mempool_alloc() & mempool_free()
* might be called (eg. from IRQ contexts) while this function executes.
*/
int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
{
void *element;
void **new_elements;
unsigned long flags;
BUG_ON(new_min_nr <= 0);
spin_lock_irqsave(&pool->lock, flags);
if (new_min_nr <= pool->min_nr) {
while (new_min_nr < pool->curr_nr) {
element = remove_element(pool);
spin_unlock_irqrestore(&pool->lock, flags);
pool->free(element, pool->pool_data);
spin_lock_irqsave(&pool->lock, flags);
}
pool->min_nr = new_min_nr;
goto out_unlock;
}
spin_unlock_irqrestore(&pool->lock, flags);
/* Grow the pool */
new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
if (!new_elements)
return -ENOMEM;
spin_lock_irqsave(&pool->lock, flags);
if (unlikely(new_min_nr <= pool->min_nr)) {
/* Raced, other resize will do our work */
spin_unlock_irqrestore(&pool->lock, flags);
kfree(new_elements);
goto out;
}
memcpy(new_elements, pool->elements,
pool->curr_nr * sizeof(*new_elements));
kfree(pool->elements);
pool->elements = new_elements;
pool->min_nr = new_min_nr;
while (pool->curr_nr < pool->min_nr) {
spin_unlock_irqrestore(&pool->lock, flags);
element = pool->alloc(gfp_mask, pool->pool_data);
if (!element)
goto out;
spin_lock_irqsave(&pool->lock, flags);
if (pool->curr_nr < pool->min_nr) {
add_element(pool, element);
} else {
spin_unlock_irqrestore(&pool->lock, flags);
pool->free(element, pool->pool_data); /* Raced */
goto out;
}
}
out_unlock:
spin_unlock_irqrestore(&pool->lock, flags);
out:
return 0;
}
EXPORT_SYMBOL(mempool_resize);
/**
* mempool_alloc - allocate an element from a specific memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
* @gfp_mask: the usual allocation bitmask.
*
* this function only sleeps if the alloc_fn() function sleeps or
* returns NULL. Note that due to preallocation, this function
* *never* fails when called from process contexts. (it might
* fail if called from an IRQ context.)
* Note: using __GFP_ZERO is not supported.
*/
void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
void *element;
unsigned long flags;
wait_queue_t wait;
gfp_t gfp_temp;
VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
might_sleep_if(gfp_mask & __GFP_WAIT);
gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
gfp_mask |= __GFP_NOWARN; /* failures are OK */
gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
repeat_alloc:
element = pool->alloc(gfp_temp, pool->pool_data);
if (likely(element != NULL))
return element;
spin_lock_irqsave(&pool->lock, flags);
if (likely(pool->curr_nr)) {
element = remove_element(pool);
spin_unlock_irqrestore(&pool->lock, flags);
/* paired with rmb in mempool_free(), read comment there */
smp_wmb();
/*
* Update the allocation stack trace as this is more useful
* for debugging.
*/
kmemleak_update_trace(element);
return element;
}
/*
* We use gfp mask w/o __GFP_WAIT or IO for the first round. If
* alloc failed with that and @pool was empty, retry immediately.
*/
if (gfp_temp != gfp_mask) {
spin_unlock_irqrestore(&pool->lock, flags);
gfp_temp = gfp_mask;
goto repeat_alloc;
}
/* We must not sleep if !__GFP_WAIT */
if (!(gfp_mask & __GFP_WAIT)) {
spin_unlock_irqrestore(&pool->lock, flags);
return NULL;
}
/* Let's wait for someone else to return an element to @pool */
init_wait(&wait);
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(&pool->lock, flags);
/*
* FIXME: this should be io_schedule(). The timeout is there as a
* workaround for some DM problems in 2.6.18.
*/
io_schedule_timeout(5*HZ);
finish_wait(&pool->wait, &wait);
goto repeat_alloc;
}
EXPORT_SYMBOL(mempool_alloc);
/**
* mempool_free - return an element to the pool.
* @element: pool element pointer.
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
*
* this function only sleeps if the free_fn() function sleeps.
*/
void mempool_free(void *element, mempool_t *pool)
{
unsigned long flags;
if (unlikely(element == NULL))
return;
/*
* Paired with the wmb in mempool_alloc(). The preceding read is
* for @element and the following @pool->curr_nr. This ensures
* that the visible value of @pool->curr_nr is from after the
* allocation of @element. This is necessary for fringe cases
* where @element was passed to this task without going through
* barriers.
*
* For example, assume @p is %NULL at the beginning and one task
* performs "p = mempool_alloc(...);" while another task is doing
* "while (!p) cpu_relax(); mempool_free(p, ...);". This function
* may end up using curr_nr value which is from before allocation
* of @p without the following rmb.
*/
smp_rmb();
/*
* For correctness, we need a test which is guaranteed to trigger
* if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
* without locking achieves that and refilling as soon as possible
* is desirable.
*
* Because curr_nr visible here is always a value after the
* allocation of @element, any task which decremented curr_nr below
* min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
* incremented to min_nr afterwards. If curr_nr gets incremented
* to min_nr after the allocation of @element, the elements
* allocated after that are subject to the same guarantee.
*
* Waiters happen iff curr_nr is 0 and the above guarantee also
* ensures that there will be frees which return elements to the
* pool waking up the waiters.
*/
if (unlikely(pool->curr_nr < pool->min_nr)) {
spin_lock_irqsave(&pool->lock, flags);
if (likely(pool->curr_nr < pool->min_nr)) {
add_element(pool, element);
spin_unlock_irqrestore(&pool->lock, flags);
wake_up(&pool->wait);
return;
}
spin_unlock_irqrestore(&pool->lock, flags);
}
pool->free(element, pool->pool_data);
}
EXPORT_SYMBOL(mempool_free);
/*
* A commonly used alloc and free fn.
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
{
struct kmem_cache *mem = pool_data;
return kmem_cache_alloc(mem, gfp_mask);
}
EXPORT_SYMBOL(mempool_alloc_slab);
void mempool_free_slab(void *element, void *pool_data)
{
struct kmem_cache *mem = pool_data;
kmem_cache_free(mem, element);
}
EXPORT_SYMBOL(mempool_free_slab);
/*
* A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
* specified by pool_data
*/
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
{
size_t size = (size_t)pool_data;
return kmalloc(size, gfp_mask);
}
EXPORT_SYMBOL(mempool_kmalloc);
void mempool_kfree(void *element, void *pool_data)
{
kfree(element);
}
EXPORT_SYMBOL(mempool_kfree);
/*
* A simple mempool-backed page allocator that allocates pages
* of the order specified by pool_data.
*/
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
{
int order = (int)(long)pool_data;
return alloc_pages(gfp_mask, order);
}
EXPORT_SYMBOL(mempool_alloc_pages);
void mempool_free_pages(void *element, void *pool_data)
{
int order = (int)(long)pool_data;
__free_pages(element, order);
}
EXPORT_SYMBOL(mempool_free_pages);
| gpl-2.0 |
MeltedButter/kernel_msm | drivers/gpu/msm/kgsl_cffdump.c | 556 | 18334 | /* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/* #define DEBUG */
#define ALIGN_CPU
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/sched.h>
#include "kgsl.h"
#include "kgsl_cffdump.h"
#include "kgsl_debugfs.h"
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
#include "adreno_pm4types.h"
#include "adreno.h"
#include "adreno_cp_parser.h"
static struct rchan *chan;
static struct dentry *dir;
static int suspended;
static size_t dropped;
static size_t subbuf_size = 256*1024;
static size_t n_subbufs = 64;
/* forward declarations */
static void destroy_channel(void);
static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs);
static spinlock_t cffdump_lock;
static ulong serial_nr;
static ulong total_bytes;
static ulong total_syncmem;
static long last_sec;
/* Some simulators have start address of gmem at this offset */
#define KGSL_CFF_GMEM_OFFSET 0x100000
#define MEMBUF_SIZE 64
#define CFF_OP_WRITE_REG 0x00000002
struct cff_op_write_reg {
unsigned char op;
uint addr;
uint value;
} __packed;
#define CFF_OP_POLL_REG 0x00000004
struct cff_op_poll_reg {
unsigned char op;
uint addr;
uint value;
uint mask;
} __packed;
#define CFF_OP_WAIT_IRQ 0x00000005
struct cff_op_wait_irq {
unsigned char op;
} __packed;
#define CFF_OP_RMW 0x0000000a
struct cff_op_write_mem {
unsigned char op;
uint addr;
uint value;
} __packed;
#define CFF_OP_WRITE_MEMBUF 0x0000000c
struct cff_op_write_membuf {
unsigned char op;
uint addr;
ushort count;
uint buffer[MEMBUF_SIZE];
} __packed;
#define CFF_OP_MEMORY_BASE 0x0000000d
struct cff_op_memory_base {
unsigned char op;
uint base;
uint size;
uint gmemsize;
} __packed;
#define CFF_OP_HANG 0x0000000e
struct cff_op_hang {
unsigned char op;
} __packed;
#define CFF_OP_EOF 0xffffffff
struct cff_op_eof {
unsigned char op;
} __packed;
#define CFF_OP_VERIFY_MEM_FILE 0x00000007
#define CFF_OP_WRITE_SURFACE_PARAMS 0x00000011
struct cff_op_user_event {
unsigned char op;
unsigned int op1;
unsigned int op2;
unsigned int op3;
unsigned int op4;
unsigned int op5;
} __packed;
static void b64_encodeblock(unsigned char in[3], unsigned char out[4], int len)
{
static const char tob64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno"
"pqrstuvwxyz0123456789+/";
out[0] = tob64[in[0] >> 2];
out[1] = tob64[((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4)];
out[2] = (unsigned char) (len > 1 ? tob64[((in[1] & 0x0f) << 2)
| ((in[2] & 0xc0) >> 6)] : '=');
out[3] = (unsigned char) (len > 2 ? tob64[in[2] & 0x3f] : '=');
}
static void b64_encode(const unsigned char *in_buf, int in_size,
unsigned char *out_buf, int out_bufsize, int *out_size)
{
unsigned char in[3], out[4];
int i, len;
*out_size = 0;
while (in_size > 0) {
len = 0;
for (i = 0; i < 3; ++i) {
if (in_size-- > 0) {
in[i] = *in_buf++;
++len;
} else
in[i] = 0;
}
if (len) {
b64_encodeblock(in, out, len);
if (out_bufsize < 4) {
pr_warn("kgsl: cffdump: %s: out of buffer\n",
__func__);
return;
}
for (i = 0; i < 4; ++i)
*out_buf++ = out[i];
*out_size += 4;
out_bufsize -= 4;
}
}
}
#define KLOG_TMPBUF_SIZE (1024)
static void klog_printk(const char *fmt, ...)
{
/* per-cpu klog formatting temporary buffer */
static char klog_buf[NR_CPUS][KLOG_TMPBUF_SIZE];
va_list args;
int len;
char *cbuf;
unsigned long flags;
local_irq_save(flags);
cbuf = klog_buf[smp_processor_id()];
va_start(args, fmt);
len = vsnprintf(cbuf, KLOG_TMPBUF_SIZE, fmt, args);
total_bytes += len;
va_end(args);
relay_write(chan, cbuf, len);
local_irq_restore(flags);
}
static struct cff_op_write_membuf cff_op_write_membuf;
static void cffdump_membuf(int id, unsigned char *out_buf, int out_bufsize)
{
void *data;
int len, out_size;
struct cff_op_write_mem cff_op_write_mem;
uint addr = cff_op_write_membuf.addr
- sizeof(uint)*cff_op_write_membuf.count;
if (!cff_op_write_membuf.count) {
pr_warn("kgsl: cffdump: membuf: count == 0, skipping");
return;
}
if (cff_op_write_membuf.count != 1) {
cff_op_write_membuf.op = CFF_OP_WRITE_MEMBUF;
cff_op_write_membuf.addr = addr;
len = sizeof(cff_op_write_membuf) -
sizeof(uint)*(MEMBUF_SIZE - cff_op_write_membuf.count);
data = &cff_op_write_membuf;
} else {
cff_op_write_mem.op = CFF_OP_WRITE_MEM;
cff_op_write_mem.addr = addr;
cff_op_write_mem.value = cff_op_write_membuf.buffer[0];
data = &cff_op_write_mem;
len = sizeof(cff_op_write_mem);
}
b64_encode(data, len, out_buf, out_bufsize, &out_size);
out_buf[out_size] = 0;
klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
cff_op_write_membuf.count = 0;
cff_op_write_membuf.addr = 0;
}
void kgsl_cffdump_printline(int id, uint opcode, uint op1, uint op2,
uint op3, uint op4, uint op5)
{
struct cff_op_write_reg cff_op_write_reg;
struct cff_op_poll_reg cff_op_poll_reg;
struct cff_op_wait_irq cff_op_wait_irq;
struct cff_op_memory_base cff_op_memory_base;
struct cff_op_hang cff_op_hang;
struct cff_op_eof cff_op_eof;
struct cff_op_user_event cff_op_user_event;
unsigned char out_buf[sizeof(cff_op_write_membuf)/3*4 + 16];
void *data;
int len = 0, out_size;
long cur_secs;
spin_lock(&cffdump_lock);
if (opcode == CFF_OP_WRITE_MEM) {
if ((cff_op_write_membuf.addr != op1 &&
cff_op_write_membuf.count)
|| (cff_op_write_membuf.count == MEMBUF_SIZE))
cffdump_membuf(id, out_buf, sizeof(out_buf));
cff_op_write_membuf.buffer[cff_op_write_membuf.count++] = op2;
cff_op_write_membuf.addr = op1 + sizeof(uint);
spin_unlock(&cffdump_lock);
return;
} else if (cff_op_write_membuf.count)
cffdump_membuf(id, out_buf, sizeof(out_buf));
spin_unlock(&cffdump_lock);
switch (opcode) {
case CFF_OP_WRITE_REG:
cff_op_write_reg.op = opcode;
cff_op_write_reg.addr = op1;
cff_op_write_reg.value = op2;
data = &cff_op_write_reg;
len = sizeof(cff_op_write_reg);
break;
case CFF_OP_POLL_REG:
cff_op_poll_reg.op = opcode;
cff_op_poll_reg.addr = op1;
cff_op_poll_reg.value = op2;
cff_op_poll_reg.mask = op3;
data = &cff_op_poll_reg;
len = sizeof(cff_op_poll_reg);
break;
case CFF_OP_WAIT_IRQ:
cff_op_wait_irq.op = opcode;
data = &cff_op_wait_irq;
len = sizeof(cff_op_wait_irq);
break;
case CFF_OP_MEMORY_BASE:
cff_op_memory_base.op = opcode;
cff_op_memory_base.base = op1;
cff_op_memory_base.size = op2;
cff_op_memory_base.gmemsize = op3;
data = &cff_op_memory_base;
len = sizeof(cff_op_memory_base);
break;
case CFF_OP_HANG:
cff_op_hang.op = opcode;
data = &cff_op_hang;
len = sizeof(cff_op_hang);
break;
case CFF_OP_EOF:
cff_op_eof.op = opcode;
data = &cff_op_eof;
len = sizeof(cff_op_eof);
break;
case CFF_OP_WRITE_SURFACE_PARAMS:
case CFF_OP_VERIFY_MEM_FILE:
cff_op_user_event.op = opcode;
cff_op_user_event.op1 = op1;
cff_op_user_event.op2 = op2;
cff_op_user_event.op3 = op3;
cff_op_user_event.op4 = op4;
cff_op_user_event.op5 = op5;
data = &cff_op_user_event;
len = sizeof(cff_op_user_event);
break;
}
if (len) {
b64_encode(data, len, out_buf, sizeof(out_buf), &out_size);
out_buf[out_size] = 0;
klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
} else
pr_warn("kgsl: cffdump: unhandled opcode: %d\n", opcode);
cur_secs = get_seconds();
if ((cur_secs - last_sec) > 10 || (last_sec - cur_secs) > 10) {
pr_info("kgsl: cffdump: total [bytes:%lu kB, syncmem:%lu kB], "
"seq#: %lu\n", total_bytes/1024, total_syncmem/1024,
serial_nr);
last_sec = cur_secs;
}
}
EXPORT_SYMBOL(kgsl_cffdump_printline);
void kgsl_cffdump_init()
{
struct dentry *debugfs_dir = kgsl_get_debugfs_dir();
#ifdef ALIGN_CPU
cpumask_t mask;
cpumask_clear(&mask);
cpumask_set_cpu(0, &mask);
sched_setaffinity(0, &mask);
#endif
if (!debugfs_dir || IS_ERR(debugfs_dir)) {
KGSL_CORE_ERR("Debugfs directory is bad\n");
return;
}
spin_lock_init(&cffdump_lock);
dir = debugfs_create_dir("cff", debugfs_dir);
if (!dir) {
KGSL_CORE_ERR("debugfs_create_dir failed\n");
return;
}
chan = create_channel(subbuf_size, n_subbufs);
}
void kgsl_cffdump_destroy()
{
if (chan)
relay_flush(chan);
destroy_channel();
if (dir)
debugfs_remove(dir);
}
void kgsl_cffdump_open(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (!device->cff_dump_enable)
return;
/* Set the maximum possible address range */
kgsl_cffdump_memory_base(device,
adreno_dev->gmem_size + KGSL_CFF_GMEM_OFFSET,
0xFFFFFFFF -
(adreno_dev->gmem_size + KGSL_CFF_GMEM_OFFSET),
adreno_dev->gmem_size);
}
void kgsl_cffdump_memory_base(struct kgsl_device *device, unsigned int base,
unsigned int range, unsigned gmemsize)
{
if (!device->cff_dump_enable)
return;
kgsl_cffdump_printline(device->id, CFF_OP_MEMORY_BASE, base,
range, gmemsize, 0, 0);
}
void kgsl_cffdump_hang(struct kgsl_device *device)
{
if (!device->cff_dump_enable)
return;
kgsl_cffdump_printline(device->id, CFF_OP_HANG, 0, 0, 0, 0, 0);
}
void kgsl_cffdump_close(struct kgsl_device *device)
{
if (!device->cff_dump_enable)
return;
kgsl_cffdump_printline(device->id, CFF_OP_EOF, 0, 0, 0, 0, 0);
}
void kgsl_cffdump_user_event(struct kgsl_device *device,
unsigned int cff_opcode, unsigned int op1,
unsigned int op2, unsigned int op3,
unsigned int op4, unsigned int op5)
{
if (!device->cff_dump_enable)
return;
kgsl_cffdump_printline(-1, cff_opcode, op1, op2, op3, op4, op5);
}
void kgsl_cffdump_memcpy(struct kgsl_device *device,
unsigned int gpuaddr, unsigned int *ptr, size_t sizebytes)
{
int i;
if (!device || !device->cff_dump_enable)
return;
for (i = 0; i < ALIGN(sizebytes, 4) / 4; gpuaddr += 4, ptr++, i++)
kgsl_cffdump_write(device, gpuaddr, *ptr);
}
void kgsl_cffdump_syncmem(struct kgsl_device *device,
struct kgsl_memdesc *memdesc, uint gpuaddr,
size_t sizebytes, bool clean_cache)
{
unsigned int *src;
if (!device || device->cff_dump_enable)
return;
if (!memdesc)
return;
total_syncmem += sizebytes;
src = kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr);
if (memdesc->hostptr == NULL) {
KGSL_CORE_ERR(
"cffdump: no kernel mapping for GPU address 0x%08X\n",
gpuaddr);
return;
}
if (clean_cache) {
/* Ensure that this memory region is not read from the
* cache but fetched fresh */
mb();
kgsl_cache_range_op((struct kgsl_memdesc *)memdesc,
gpuaddr - memdesc->gpuaddr,
sizebytes,
KGSL_CACHE_OP_INV);
}
kgsl_cffdump_memcpy(device, gpuaddr, src, sizebytes);
/* Unmap memory since kgsl_gpuaddr_to_vaddr was called */
kgsl_memdesc_unmap(memdesc);
}
void kgsl_cffdump_memset(struct kgsl_device *device,
unsigned int gpuaddr, unsigned char ch, size_t sizebytes)
{
int i;
if (!device || !device->cff_dump_enable)
return;
/* Expand the input char into a dword and output it */
for (i = 0; i < ALIGN(sizebytes, 4) / 4; gpuaddr += 4, i++)
kgsl_cffdump_write(device, gpuaddr,
(ch << 24) | (ch << 16) | (ch << 8) | ch);
}
void kgsl_cffdump_regwrite(struct kgsl_device *device, uint addr,
uint value)
{
if (!device->cff_dump_enable)
return;
kgsl_cffdump_printline(device->id, CFF_OP_WRITE_REG, addr, value,
0, 0, 0);
}
void kgsl_cffdump_regpoll(struct kgsl_device *device, uint addr,
uint value, uint mask)
{
if (!device->cff_dump_enable)
return;
kgsl_cffdump_printline(device->id, CFF_OP_POLL_REG, addr, value,
mask, 0, 0);
}
void kgsl_cffdump_slavewrite(struct kgsl_device *device, uint addr, uint value)
{
if (!device->cff_dump_enable)
return;
kgsl_cffdump_printline(-1, CFF_OP_WRITE_REG, addr, value, 0, 0, 0);
}
int kgsl_cffdump_waitirq(struct kgsl_device *device)
{
if (!device->cff_dump_enable)
return 0;
kgsl_cffdump_printline(-1, CFF_OP_WAIT_IRQ, 0, 0, 0, 0, 0);
return 1;
}
EXPORT_SYMBOL(kgsl_cffdump_waitirq);
static int subbuf_start_handler(struct rchan_buf *buf,
void *subbuf, void *prev_subbuf, size_t prev_padding)
{
pr_debug("kgsl: cffdump: subbuf_start_handler(subbuf=%p, prev_subbuf"
"=%p, prev_padding=%08zx)\n", subbuf, prev_subbuf,
prev_padding);
if (relay_buf_full(buf)) {
if (!suspended) {
suspended = 1;
pr_warn("kgsl: cffdump: relay: cpu %d buffer full!!!\n",
smp_processor_id());
}
dropped++;
return 0;
} else if (suspended) {
suspended = 0;
pr_warn("kgsl: cffdump: relay: cpu %d buffer no longer full.\n",
smp_processor_id());
}
subbuf_start_reserve(buf, 0);
return 1;
}
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent, unsigned short mode, struct rchan_buf *buf,
int *is_global)
{
return debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
}
/*
* file_remove() default callback. Removes relay file in debugfs.
*/
static int remove_buf_file_handler(struct dentry *dentry)
{
pr_info("kgsl: cffdump: %s()\n", __func__);
debugfs_remove(dentry);
return 0;
}
/*
* relay callbacks
*/
static struct rchan_callbacks relay_callbacks = {
.subbuf_start = subbuf_start_handler,
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
/**
* create_channel - creates channel /debug/klog/cpuXXX
*
* Creates channel along with associated produced/consumed control files
*
* Returns channel on success, NULL otherwise
*/
static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs)
{
struct rchan *chan;
pr_info("kgsl: cffdump: relay: create_channel: subbuf_size %u, "
"n_subbufs %u, dir 0x%p\n", subbuf_size, n_subbufs, dir);
chan = relay_open("cpu", dir, subbuf_size,
n_subbufs, &relay_callbacks, NULL);
if (!chan) {
KGSL_CORE_ERR("relay_open failed\n");
return NULL;
}
suspended = 0;
dropped = 0;
return chan;
}
/**
* destroy_channel - destroys channel /debug/kgsl/cff/cpuXXX
*
* Destroys channel along with associated produced/consumed control files
*/
static void destroy_channel(void)
{
pr_info("kgsl: cffdump: relay: destroy_channel\n");
if (chan) {
relay_close(chan);
chan = NULL;
}
}
int kgsl_cff_dump_enable_set(void *data, u64 val)
{
int ret = 0;
struct kgsl_device *device = (struct kgsl_device *)data;
int i;
mutex_lock(&kgsl_driver.devlock);
if (val) {
/* Check if CFF is on for some other device already */
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
if (kgsl_driver.devp[i]) {
struct kgsl_device *device_temp =
kgsl_driver.devp[i];
if (device_temp->cff_dump_enable &&
device != device_temp) {
KGSL_CORE_ERR(
"CFF is on for another device %d\n",
device_temp->id);
ret = -EINVAL;
goto done;
}
}
}
if (!device->cff_dump_enable) {
device->cff_dump_enable = 1;
/*
* force device to slumber so that we ensure that the
* start opcode in CFF is present
*/
mutex_lock(&device->mutex);
ret = kgsl_pwrctrl_change_state(device,
KGSL_STATE_SUSPEND);
ret |= kgsl_pwrctrl_change_state(device,
KGSL_STATE_SLUMBER);
if (ret)
device->cff_dump_enable = 0;
mutex_unlock(&device->mutex);
}
} else if (device->cff_dump_enable && !val) {
device->cff_dump_enable = 0;
}
done:
mutex_unlock(&kgsl_driver.devlock);
return ret;
}
EXPORT_SYMBOL(kgsl_cff_dump_enable_set);
int kgsl_cff_dump_enable_get(void *data, u64 *val)
{
struct kgsl_device *device = (struct kgsl_device *)data;
*val = device->cff_dump_enable;
return 0;
}
EXPORT_SYMBOL(kgsl_cff_dump_enable_get);
/*
* kgsl_cffdump_capture_adreno_ib_cff() - Capture CFF for an IB
* @device: Device for which CFF is to be captured
* @ptbase: The pagetable in which the IB is mapped
* @gpuaddr: Address of IB
* @dwords: Size of the IB
*
* Dumps the CFF format of the IB including all objects in it like, IB2,
* shaders, etc.
*
* Returns 0 on success else error code
*/
static int kgsl_cffdump_capture_adreno_ib_cff(struct kgsl_device *device,
struct kgsl_process_private *process,
unsigned int gpuaddr, unsigned int dwords)
{
int ret;
struct adreno_ib_object_list *ib_obj_list;
struct adreno_ib_object *ib_obj;
int i;
if (!device->cff_dump_enable)
return 0;
ret = adreno_ib_create_object_list(device, process, gpuaddr, dwords,
&ib_obj_list);
if (ret) {
KGSL_DRV_ERR(device,
"Fail to create object list for IB %x, size(dwords) %x\n",
gpuaddr, dwords);
return ret;
}
for (i = 0; i < ib_obj_list->num_objs; i++) {
ib_obj = &(ib_obj_list->obj_list[i]);
kgsl_cffdump_syncmem(device, &(ib_obj->entry->memdesc),
ib_obj->gpuaddr, ib_obj->size, false);
}
adreno_ib_destroy_obj_list(ib_obj_list);
return 0;
}
/*
* kgsl_cffdump_capture_ib_desc() - Capture CFF for a list of IB's
* @device: Device for which CFF is to be captured
* @context: The context under which the IB list executes on device
* @ibdesc: The IB list
* @numibs: Number of IB's in ibdesc
*
* Returns 0 on success else error code
*/
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
struct kgsl_cmdbatch *cmdbatch)
{
int ret = 0;
struct kgsl_memobj_node *ib;
if (!device->cff_dump_enable)
return 0;
/* Dump CFF for IB and all objects in it */
list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
ret = kgsl_cffdump_capture_adreno_ib_cff(
device, context->proc_priv, ib->gpuaddr,
ib->sizedwords);
if (ret) {
KGSL_DRV_ERR(device,
"Fail cff capture, IB %lx, size %zx\n",
ib->gpuaddr,
ib->sizedwords << 2);
break;
}
}
return ret;
}
EXPORT_SYMBOL(kgsl_cffdump_capture_ib_desc);
DEFINE_SIMPLE_ATTRIBUTE(kgsl_cff_dump_enable_fops, kgsl_cff_dump_enable_get,
kgsl_cff_dump_enable_set, "%llu\n");
void kgsl_cffdump_debugfs_create(struct kgsl_device *device)
{
debugfs_create_file("cff_dump", 0644, device->d_debugfs, device,
&kgsl_cff_dump_enable_fops);
}
| gpl-2.0 |
gautamMalu/linux-samsung-arndale-xen | drivers/net/wireless/hostap/hostap_ap.c | 812 | 88514 | /*
* Intersil Prism2 driver with Host AP (software access point) support
* Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
* <j@w1.fi>
* Copyright (c) 2002-2005, Jouni Malinen <j@w1.fi>
*
* This file is to be included into hostap.c when S/W AP functionality is
* compiled.
*
* AP: FIX:
* - if unicast Class 2 (assoc,reassoc,disassoc) frame received from
* unauthenticated STA, send deauth. frame (8802.11: 5.5)
* - if unicast Class 3 (data with to/from DS,deauth,pspoll) frame received
* from authenticated, but unassoc STA, send disassoc frame (8802.11: 5.5)
* - if unicast Class 3 received from unauthenticated STA, send deauth. frame
* (8802.11: 5.5)
*/
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/random.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/etherdevice.h>
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
DEF_INTS };
module_param_array(other_ap_policy, int, NULL, 0444);
MODULE_PARM_DESC(other_ap_policy, "Other AP beacon monitoring policy (0-3)");
static int ap_max_inactivity[MAX_PARM_DEVICES] = { AP_MAX_INACTIVITY_SEC,
DEF_INTS };
module_param_array(ap_max_inactivity, int, NULL, 0444);
MODULE_PARM_DESC(ap_max_inactivity, "AP timeout (in seconds) for station "
"inactivity");
static int ap_bridge_packets[MAX_PARM_DEVICES] = { 1, DEF_INTS };
module_param_array(ap_bridge_packets, int, NULL, 0444);
MODULE_PARM_DESC(ap_bridge_packets, "Bridge packets directly between "
"stations");
static int autom_ap_wds[MAX_PARM_DEVICES] = { 0, DEF_INTS };
module_param_array(autom_ap_wds, int, NULL, 0444);
MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs "
"automatically");
static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
static void hostap_event_expired_sta(struct net_device *dev,
struct sta_info *sta);
static void handle_add_proc_queue(struct work_struct *work);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
static void handle_wds_oper_queue(struct work_struct *work);
static void prism2_send_mgmt(struct net_device *dev,
u16 type_subtype, char *body,
int body_len, u8 *addr, u16 tx_cb_idx);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
#ifndef PRISM2_NO_PROCFS_DEBUG
static int ap_debug_proc_show(struct seq_file *m, void *v)
{
struct ap_data *ap = m->private;
seq_printf(m, "BridgedUnicastFrames=%u\n", ap->bridged_unicast);
seq_printf(m, "BridgedMulticastFrames=%u\n", ap->bridged_multicast);
seq_printf(m, "max_inactivity=%u\n", ap->max_inactivity / HZ);
seq_printf(m, "bridge_packets=%u\n", ap->bridge_packets);
seq_printf(m, "nullfunc_ack=%u\n", ap->nullfunc_ack);
seq_printf(m, "autom_ap_wds=%u\n", ap->autom_ap_wds);
seq_printf(m, "auth_algs=%u\n", ap->local->auth_algs);
seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
return 0;
}
static int ap_debug_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, ap_debug_proc_show, PDE_DATA(inode));
}
static const struct file_operations ap_debug_proc_fops = {
.open = ap_debug_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* PRISM2_NO_PROCFS_DEBUG */
static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
{
sta->hnext = ap->sta_hash[STA_HASH(sta->addr)];
ap->sta_hash[STA_HASH(sta->addr)] = sta;
}
static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta)
{
struct sta_info *s;
s = ap->sta_hash[STA_HASH(sta->addr)];
if (s == NULL) return;
if (ether_addr_equal(s->addr, sta->addr)) {
ap->sta_hash[STA_HASH(sta->addr)] = s->hnext;
return;
}
while (s->hnext != NULL && !ether_addr_equal(s->hnext->addr, sta->addr))
s = s->hnext;
if (s->hnext != NULL)
s->hnext = s->hnext->hnext;
else
printk("AP: could not remove STA %pM from hash table\n",
sta->addr);
}
static void ap_free_sta(struct ap_data *ap, struct sta_info *sta)
{
if (sta->ap && sta->local)
hostap_event_expired_sta(sta->local->dev, sta);
if (ap->proc != NULL) {
char name[20];
sprintf(name, "%pM", sta->addr);
remove_proc_entry(name, ap->proc);
}
if (sta->crypt) {
sta->crypt->ops->deinit(sta->crypt->priv);
kfree(sta->crypt);
sta->crypt = NULL;
}
skb_queue_purge(&sta->tx_buf);
ap->num_sta--;
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
if (sta->aid > 0)
ap->sta_aid[sta->aid - 1] = NULL;
if (!sta->ap && sta->u.sta.challenge)
kfree(sta->u.sta.challenge);
del_timer_sync(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
kfree(sta);
}
static void hostap_set_tim(local_info_t *local, int aid, int set)
{
if (local->func->set_tim)
local->func->set_tim(local->dev, aid, set);
}
static void hostap_event_new_sta(struct net_device *dev, struct sta_info *sta)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof(wrqu));
memcpy(wrqu.addr.sa_data, sta->addr, ETH_ALEN);
wrqu.addr.sa_family = ARPHRD_ETHER;
wireless_send_event(dev, IWEVREGISTERED, &wrqu, NULL);
}
static void hostap_event_expired_sta(struct net_device *dev,
struct sta_info *sta)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof(wrqu));
memcpy(wrqu.addr.sa_data, sta->addr, ETH_ALEN);
wrqu.addr.sa_family = ARPHRD_ETHER;
wireless_send_event(dev, IWEVEXPIRED, &wrqu, NULL);
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
static void ap_handle_timer(unsigned long data)
{
struct sta_info *sta = (struct sta_info *) data;
local_info_t *local;
struct ap_data *ap;
unsigned long next_time = 0;
int was_assoc;
if (sta == NULL || sta->local == NULL || sta->local->ap == NULL) {
PDEBUG(DEBUG_AP, "ap_handle_timer() called with NULL data\n");
return;
}
local = sta->local;
ap = local->ap;
was_assoc = sta->flags & WLAN_STA_ASSOC;
if (atomic_read(&sta->users) != 0)
next_time = jiffies + HZ;
else if ((sta->flags & WLAN_STA_PERM) && !(sta->flags & WLAN_STA_AUTH))
next_time = jiffies + ap->max_inactivity;
if (time_before(jiffies, sta->last_rx + ap->max_inactivity)) {
/* station activity detected; reset timeout state */
sta->timeout_next = STA_NULLFUNC;
next_time = sta->last_rx + ap->max_inactivity;
} else if (sta->timeout_next == STA_DISASSOC &&
!(sta->flags & WLAN_STA_PENDING_POLL)) {
/* STA ACKed data nullfunc frame poll */
sta->timeout_next = STA_NULLFUNC;
next_time = jiffies + ap->max_inactivity;
}
if (next_time) {
sta->timer.expires = next_time;
add_timer(&sta->timer);
return;
}
if (sta->ap)
sta->timeout_next = STA_DEAUTH;
if (sta->timeout_next == STA_DEAUTH && !(sta->flags & WLAN_STA_PERM)) {
spin_lock(&ap->sta_table_lock);
ap_sta_hash_del(ap, sta);
list_del(&sta->list);
spin_unlock(&ap->sta_table_lock);
sta->flags &= ~(WLAN_STA_AUTH | WLAN_STA_ASSOC);
} else if (sta->timeout_next == STA_DISASSOC)
sta->flags &= ~WLAN_STA_ASSOC;
if (was_assoc && !(sta->flags & WLAN_STA_ASSOC) && !sta->ap)
hostap_event_expired_sta(local->dev, sta);
if (sta->timeout_next == STA_DEAUTH && sta->aid > 0 &&
!skb_queue_empty(&sta->tx_buf)) {
hostap_set_tim(local, sta->aid, 0);
sta->flags &= ~WLAN_STA_TIM;
}
if (sta->ap) {
if (ap->autom_ap_wds) {
PDEBUG(DEBUG_AP, "%s: removing automatic WDS "
"connection to AP %pM\n",
local->dev->name, sta->addr);
hostap_wds_link_oper(local, sta->addr, WDS_DEL);
}
} else if (sta->timeout_next == STA_NULLFUNC) {
/* send data frame to poll STA and check whether this frame
* is ACKed */
/* FIX: IEEE80211_STYPE_NULLFUNC would be more appropriate, but
* it is apparently not retried so TX Exc events are not
* received for it */
sta->flags |= WLAN_STA_PENDING_POLL;
prism2_send_mgmt(local->dev, IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_DATA, NULL, 0,
sta->addr, ap->tx_callback_poll);
} else {
int deauth = sta->timeout_next == STA_DEAUTH;
__le16 resp;
PDEBUG(DEBUG_AP, "%s: sending %s info to STA %pM"
"(last=%lu, jiffies=%lu)\n",
local->dev->name,
deauth ? "deauthentication" : "disassociation",
sta->addr, sta->last_rx, jiffies);
resp = cpu_to_le16(deauth ? WLAN_REASON_PREV_AUTH_NOT_VALID :
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
prism2_send_mgmt(local->dev, IEEE80211_FTYPE_MGMT |
(deauth ? IEEE80211_STYPE_DEAUTH :
IEEE80211_STYPE_DISASSOC),
(char *) &resp, 2, sta->addr, 0);
}
if (sta->timeout_next == STA_DEAUTH) {
if (sta->flags & WLAN_STA_PERM) {
PDEBUG(DEBUG_AP, "%s: STA %pM"
" would have been removed, "
"but it has 'perm' flag\n",
local->dev->name, sta->addr);
} else
ap_free_sta(ap, sta);
return;
}
if (sta->timeout_next == STA_NULLFUNC) {
sta->timeout_next = STA_DISASSOC;
sta->timer.expires = jiffies + AP_DISASSOC_DELAY;
} else {
sta->timeout_next = STA_DEAUTH;
sta->timer.expires = jiffies + AP_DEAUTH_DELAY;
}
add_timer(&sta->timer);
}
void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
int resend)
{
u8 addr[ETH_ALEN];
__le16 resp;
int i;
PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name);
memset(addr, 0xff, ETH_ALEN);
resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
/* deauth message sent; try to resend it few times; the message is
* broadcast, so it may be delayed until next DTIM; there is not much
* else we can do at this point since the driver is going to be shut
* down */
for (i = 0; i < 5; i++) {
prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_DEAUTH,
(char *) &resp, 2, addr, 0);
if (!resend || ap->num_sta <= 0)
return;
mdelay(50);
}
}
static int ap_control_proc_show(struct seq_file *m, void *v)
{
struct ap_data *ap = m->private;
char *policy_txt;
struct mac_entry *entry;
if (v == SEQ_START_TOKEN) {
switch (ap->mac_restrictions.policy) {
case MAC_POLICY_OPEN:
policy_txt = "open";
break;
case MAC_POLICY_ALLOW:
policy_txt = "allow";
break;
case MAC_POLICY_DENY:
policy_txt = "deny";
break;
default:
policy_txt = "unknown";
break;
}
seq_printf(m, "MAC policy: %s\n", policy_txt);
seq_printf(m, "MAC entries: %u\n", ap->mac_restrictions.entries);
seq_puts(m, "MAC list:\n");
return 0;
}
entry = v;
seq_printf(m, "%pM\n", entry->addr);
return 0;
}
static void *ap_control_proc_start(struct seq_file *m, loff_t *_pos)
{
struct ap_data *ap = m->private;
spin_lock_bh(&ap->mac_restrictions.lock);
return seq_list_start_head(&ap->mac_restrictions.mac_list, *_pos);
}
static void *ap_control_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
struct ap_data *ap = m->private;
return seq_list_next(v, &ap->mac_restrictions.mac_list, _pos);
}
static void ap_control_proc_stop(struct seq_file *m, void *v)
{
struct ap_data *ap = m->private;
spin_unlock_bh(&ap->mac_restrictions.lock);
}
static const struct seq_operations ap_control_proc_seqops = {
.start = ap_control_proc_start,
.next = ap_control_proc_next,
.stop = ap_control_proc_stop,
.show = ap_control_proc_show,
};
static int ap_control_proc_open(struct inode *inode, struct file *file)
{
int ret = seq_open(file, &ap_control_proc_seqops);
if (ret == 0) {
struct seq_file *m = file->private_data;
m->private = PDE_DATA(inode);
}
return ret;
}
static const struct file_operations ap_control_proc_fops = {
.open = ap_control_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
{
struct mac_entry *entry;
entry = kmalloc(sizeof(struct mac_entry), GFP_KERNEL);
if (entry == NULL)
return -ENOMEM;
memcpy(entry->addr, mac, ETH_ALEN);
spin_lock_bh(&mac_restrictions->lock);
list_add_tail(&entry->list, &mac_restrictions->mac_list);
mac_restrictions->entries++;
spin_unlock_bh(&mac_restrictions->lock);
return 0;
}
int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
{
struct list_head *ptr;
struct mac_entry *entry;
spin_lock_bh(&mac_restrictions->lock);
for (ptr = mac_restrictions->mac_list.next;
ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
entry = list_entry(ptr, struct mac_entry, list);
if (ether_addr_equal(entry->addr, mac)) {
list_del(ptr);
kfree(entry);
mac_restrictions->entries--;
spin_unlock_bh(&mac_restrictions->lock);
return 0;
}
}
spin_unlock_bh(&mac_restrictions->lock);
return -1;
}
static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
u8 *mac)
{
struct mac_entry *entry;
int found = 0;
if (mac_restrictions->policy == MAC_POLICY_OPEN)
return 0;
spin_lock_bh(&mac_restrictions->lock);
list_for_each_entry(entry, &mac_restrictions->mac_list, list) {
if (ether_addr_equal(entry->addr, mac)) {
found = 1;
break;
}
}
spin_unlock_bh(&mac_restrictions->lock);
if (mac_restrictions->policy == MAC_POLICY_ALLOW)
return !found;
else
return found;
}
void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
{
struct list_head *ptr, *n;
struct mac_entry *entry;
if (mac_restrictions->entries == 0)
return;
spin_lock_bh(&mac_restrictions->lock);
for (ptr = mac_restrictions->mac_list.next, n = ptr->next;
ptr != &mac_restrictions->mac_list;
ptr = n, n = ptr->next) {
entry = list_entry(ptr, struct mac_entry, list);
list_del(ptr);
kfree(entry);
}
mac_restrictions->entries = 0;
spin_unlock_bh(&mac_restrictions->lock);
}
int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac)
{
struct sta_info *sta;
__le16 resp;
spin_lock_bh(&ap->sta_table_lock);
sta = ap_get_sta(ap, mac);
if (sta) {
ap_sta_hash_del(ap, sta);
list_del(&sta->list);
}
spin_unlock_bh(&ap->sta_table_lock);
if (!sta)
return -EINVAL;
resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH,
(char *) &resp, 2, sta->addr, 0);
if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
hostap_event_expired_sta(dev, sta);
ap_free_sta(ap, sta);
return 0;
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
void ap_control_kickall(struct ap_data *ap)
{
struct list_head *ptr, *n;
struct sta_info *sta;
spin_lock_bh(&ap->sta_table_lock);
for (ptr = ap->sta_list.next, n = ptr->next; ptr != &ap->sta_list;
ptr = n, n = ptr->next) {
sta = list_entry(ptr, struct sta_info, list);
ap_sta_hash_del(ap, sta);
list_del(&sta->list);
if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
hostap_event_expired_sta(sta->local->dev, sta);
ap_free_sta(ap, sta);
}
spin_unlock_bh(&ap->sta_table_lock);
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
static int prism2_ap_proc_show(struct seq_file *m, void *v)
{
struct sta_info *sta = v;
int i;
if (v == SEQ_START_TOKEN) {
seq_printf(m, "# BSSID CHAN SIGNAL NOISE RATE SSID FLAGS\n");
return 0;
}
if (!sta->ap)
return 0;
seq_printf(m, "%pM %d %d %d %d '",
sta->addr,
sta->u.ap.channel, sta->last_rx_signal,
sta->last_rx_silence, sta->last_rx_rate);
for (i = 0; i < sta->u.ap.ssid_len; i++) {
if (sta->u.ap.ssid[i] >= 32 && sta->u.ap.ssid[i] < 127)
seq_putc(m, sta->u.ap.ssid[i]);
else
seq_printf(m, "<%02x>", sta->u.ap.ssid[i]);
}
seq_putc(m, '\'');
if (sta->capability & WLAN_CAPABILITY_ESS)
seq_puts(m, " [ESS]");
if (sta->capability & WLAN_CAPABILITY_IBSS)
seq_puts(m, " [IBSS]");
if (sta->capability & WLAN_CAPABILITY_PRIVACY)
seq_puts(m, " [WEP]");
seq_putc(m, '\n');
return 0;
}
static void *prism2_ap_proc_start(struct seq_file *m, loff_t *_pos)
{
struct ap_data *ap = m->private;
spin_lock_bh(&ap->sta_table_lock);
return seq_list_start_head(&ap->sta_list, *_pos);
}
static void *prism2_ap_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
struct ap_data *ap = m->private;
return seq_list_next(v, &ap->sta_list, _pos);
}
static void prism2_ap_proc_stop(struct seq_file *m, void *v)
{
struct ap_data *ap = m->private;
spin_unlock_bh(&ap->sta_table_lock);
}
static const struct seq_operations prism2_ap_proc_seqops = {
.start = prism2_ap_proc_start,
.next = prism2_ap_proc_next,
.stop = prism2_ap_proc_stop,
.show = prism2_ap_proc_show,
};
static int prism2_ap_proc_open(struct inode *inode, struct file *file)
{
int ret = seq_open(file, &prism2_ap_proc_seqops);
if (ret == 0) {
struct seq_file *m = file->private_data;
m->private = PDE_DATA(inode);
}
return ret;
}
static const struct file_operations prism2_ap_proc_fops = {
.open = prism2_ap_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
void hostap_check_sta_fw_version(struct ap_data *ap, int sta_fw_ver)
{
if (!ap)
return;
if (sta_fw_ver == PRISM2_FW_VER(0,8,0)) {
PDEBUG(DEBUG_AP, "Using data::nullfunc ACK workaround - "
"firmware upgrade recommended\n");
ap->nullfunc_ack = 1;
} else
ap->nullfunc_ack = 0;
if (sta_fw_ver == PRISM2_FW_VER(1,4,2)) {
printk(KERN_WARNING "%s: Warning: secondary station firmware "
"version 1.4.2 does not seem to work in Host AP mode\n",
ap->local->dev->name);
}
}
/* Called only as a tasklet (software IRQ) */
static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data)
{
struct ap_data *ap = data;
struct ieee80211_hdr *hdr;
if (!ap->local->hostapd || !ap->local->apdev) {
dev_kfree_skb(skb);
return;
}
/* Pass the TX callback frame to the hostapd; use 802.11 header version
* 1 to indicate failure (no ACK) and 2 success (frame ACKed) */
hdr = (struct ieee80211_hdr *) skb->data;
hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_VERS);
hdr->frame_control |= cpu_to_le16(ok ? BIT(1) : BIT(0));
skb->dev = ap->local->apdev;
skb_pull(skb, hostap_80211_get_hdrlen(hdr->frame_control));
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = cpu_to_be16(ETH_P_802_2);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
/* Called only as a tasklet (software IRQ) */
static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
{
struct ap_data *ap = data;
struct net_device *dev = ap->local->dev;
struct ieee80211_hdr *hdr;
u16 auth_alg, auth_transaction, status;
__le16 *pos;
struct sta_info *sta = NULL;
char *txt = NULL;
if (ap->local->hostapd) {
dev_kfree_skb(skb);
return;
}
hdr = (struct ieee80211_hdr *) skb->data;
if (!ieee80211_is_auth(hdr->frame_control) ||
skb->len < IEEE80211_MGMT_HDR_LEN + 6) {
printk(KERN_DEBUG "%s: hostap_ap_tx_cb_auth received invalid "
"frame\n", dev->name);
dev_kfree_skb(skb);
return;
}
pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
auth_alg = le16_to_cpu(*pos++);
auth_transaction = le16_to_cpu(*pos++);
status = le16_to_cpu(*pos++);
if (!ok) {
txt = "frame was not ACKed";
goto done;
}
spin_lock(&ap->sta_table_lock);
sta = ap_get_sta(ap, hdr->addr1);
if (sta)
atomic_inc(&sta->users);
spin_unlock(&ap->sta_table_lock);
if (!sta) {
txt = "STA not found";
goto done;
}
if (status == WLAN_STATUS_SUCCESS &&
((auth_alg == WLAN_AUTH_OPEN && auth_transaction == 2) ||
(auth_alg == WLAN_AUTH_SHARED_KEY && auth_transaction == 4))) {
txt = "STA authenticated";
sta->flags |= WLAN_STA_AUTH;
sta->last_auth = jiffies;
} else if (status != WLAN_STATUS_SUCCESS)
txt = "authentication failed";
done:
if (sta)
atomic_dec(&sta->users);
if (txt) {
PDEBUG(DEBUG_AP, "%s: %pM auth_cb - alg=%d "
"trans#=%d status=%d - %s\n",
dev->name, hdr->addr1,
auth_alg, auth_transaction, status, txt);
}
dev_kfree_skb(skb);
}
/* Called only as a tasklet (software IRQ) */
static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
{
struct ap_data *ap = data;
struct net_device *dev = ap->local->dev;
struct ieee80211_hdr *hdr;
u16 status;
__le16 *pos;
struct sta_info *sta = NULL;
char *txt = NULL;
if (ap->local->hostapd) {
dev_kfree_skb(skb);
return;
}
hdr = (struct ieee80211_hdr *) skb->data;
if ((!ieee80211_is_assoc_resp(hdr->frame_control) &&
!ieee80211_is_reassoc_resp(hdr->frame_control)) ||
skb->len < IEEE80211_MGMT_HDR_LEN + 4) {
printk(KERN_DEBUG "%s: hostap_ap_tx_cb_assoc received invalid "
"frame\n", dev->name);
dev_kfree_skb(skb);
return;
}
if (!ok) {
txt = "frame was not ACKed";
goto done;
}
spin_lock(&ap->sta_table_lock);
sta = ap_get_sta(ap, hdr->addr1);
if (sta)
atomic_inc(&sta->users);
spin_unlock(&ap->sta_table_lock);
if (!sta) {
txt = "STA not found";
goto done;
}
pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
pos++;
status = le16_to_cpu(*pos++);
if (status == WLAN_STATUS_SUCCESS) {
if (!(sta->flags & WLAN_STA_ASSOC))
hostap_event_new_sta(dev, sta);
txt = "STA associated";
sta->flags |= WLAN_STA_ASSOC;
sta->last_assoc = jiffies;
} else
txt = "association failed";
done:
if (sta)
atomic_dec(&sta->users);
if (txt) {
PDEBUG(DEBUG_AP, "%s: %pM assoc_cb - %s\n",
dev->name, hdr->addr1, txt);
}
dev_kfree_skb(skb);
}
/* Called only as a tasklet (software IRQ); TX callback for poll frames used
* in verifying whether the STA is still present. */
static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data)
{
struct ap_data *ap = data;
struct ieee80211_hdr *hdr;
struct sta_info *sta;
if (skb->len < 24)
goto fail;
hdr = (struct ieee80211_hdr *) skb->data;
if (ok) {
spin_lock(&ap->sta_table_lock);
sta = ap_get_sta(ap, hdr->addr1);
if (sta)
sta->flags &= ~WLAN_STA_PENDING_POLL;
spin_unlock(&ap->sta_table_lock);
} else {
PDEBUG(DEBUG_AP,
"%s: STA %pM did not ACK activity poll frame\n",
ap->local->dev->name, hdr->addr1);
}
fail:
dev_kfree_skb(skb);
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
void hostap_init_data(local_info_t *local)
{
struct ap_data *ap = local->ap;
if (ap == NULL) {
printk(KERN_WARNING "hostap_init_data: ap == NULL\n");
return;
}
memset(ap, 0, sizeof(struct ap_data));
ap->local = local;
ap->ap_policy = GET_INT_PARM(other_ap_policy, local->card_idx);
ap->bridge_packets = GET_INT_PARM(ap_bridge_packets, local->card_idx);
ap->max_inactivity =
GET_INT_PARM(ap_max_inactivity, local->card_idx) * HZ;
ap->autom_ap_wds = GET_INT_PARM(autom_ap_wds, local->card_idx);
spin_lock_init(&ap->sta_table_lock);
INIT_LIST_HEAD(&ap->sta_list);
/* Initialize task queue structure for AP management */
INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue);
ap->tx_callback_idx =
hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
if (ap->tx_callback_idx == 0)
printk(KERN_WARNING "%s: failed to register TX callback for "
"AP\n", local->dev->name);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue);
ap->tx_callback_auth =
hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
ap->tx_callback_assoc =
hostap_tx_callback_register(local, hostap_ap_tx_cb_assoc, ap);
ap->tx_callback_poll =
hostap_tx_callback_register(local, hostap_ap_tx_cb_poll, ap);
if (ap->tx_callback_auth == 0 || ap->tx_callback_assoc == 0 ||
ap->tx_callback_poll == 0)
printk(KERN_WARNING "%s: failed to register TX callback for "
"AP\n", local->dev->name);
spin_lock_init(&ap->mac_restrictions.lock);
INIT_LIST_HEAD(&ap->mac_restrictions.mac_list);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
ap->initialized = 1;
}
void hostap_init_ap_proc(local_info_t *local)
{
struct ap_data *ap = local->ap;
ap->proc = local->proc;
if (ap->proc == NULL)
return;
#ifndef PRISM2_NO_PROCFS_DEBUG
proc_create_data("ap_debug", 0, ap->proc, &ap_debug_proc_fops, ap);
#endif /* PRISM2_NO_PROCFS_DEBUG */
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
proc_create_data("ap_control", 0, ap->proc, &ap_control_proc_fops, ap);
proc_create_data("ap", 0, ap->proc, &prism2_ap_proc_fops, ap);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
}
void hostap_free_data(struct ap_data *ap)
{
struct sta_info *n, *sta;
if (ap == NULL || !ap->initialized) {
printk(KERN_DEBUG "hostap_free_data: ap has not yet been "
"initialized - skip resource freeing\n");
return;
}
flush_work(&ap->add_sta_proc_queue);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
flush_work(&ap->wds_oper_queue);
if (ap->crypt)
ap->crypt->deinit(ap->crypt_priv);
ap->crypt = ap->crypt_priv = NULL;
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
list_for_each_entry_safe(sta, n, &ap->sta_list, list) {
ap_sta_hash_del(ap, sta);
list_del(&sta->list);
if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
hostap_event_expired_sta(sta->local->dev, sta);
ap_free_sta(ap, sta);
}
#ifndef PRISM2_NO_PROCFS_DEBUG
if (ap->proc != NULL) {
remove_proc_entry("ap_debug", ap->proc);
}
#endif /* PRISM2_NO_PROCFS_DEBUG */
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
if (ap->proc != NULL) {
remove_proc_entry("ap", ap->proc);
remove_proc_entry("ap_control", ap->proc);
}
ap_control_flush_macs(&ap->mac_restrictions);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
ap->initialized = 0;
}
/* caller should have mutex for AP STA list handling */
static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta)
{
struct sta_info *s;
s = ap->sta_hash[STA_HASH(sta)];
while (s != NULL && !ether_addr_equal(s->addr, sta))
s = s->hnext;
return s;
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
/* Called from timer handler and from scheduled AP queue handlers */
static void prism2_send_mgmt(struct net_device *dev,
u16 type_subtype, char *body,
int body_len, u8 *addr, u16 tx_cb_idx)
{
struct hostap_interface *iface;
local_info_t *local;
struct ieee80211_hdr *hdr;
u16 fc;
struct sk_buff *skb;
struct hostap_skb_tx_data *meta;
int hdrlen;
iface = netdev_priv(dev);
local = iface->local;
dev = local->dev; /* always use master radio device */
iface = netdev_priv(dev);
if (!(dev->flags & IFF_UP)) {
PDEBUG(DEBUG_AP, "%s: prism2_send_mgmt - device is not UP - "
"cannot send frame\n", dev->name);
return;
}
skb = dev_alloc_skb(sizeof(*hdr) + body_len);
if (skb == NULL) {
PDEBUG(DEBUG_AP, "%s: prism2_send_mgmt failed to allocate "
"skb\n", dev->name);
return;
}
fc = type_subtype;
hdrlen = hostap_80211_get_hdrlen(cpu_to_le16(type_subtype));
hdr = (struct ieee80211_hdr *) skb_put(skb, hdrlen);
if (body)
memcpy(skb_put(skb, body_len), body, body_len);
memset(hdr, 0, hdrlen);
/* FIX: ctrl::ack sending used special HFA384X_TX_CTRL_802_11
* tx_control instead of using local->tx_control */
memcpy(hdr->addr1, addr, ETH_ALEN); /* DA / RA */
if (ieee80211_is_data(hdr->frame_control)) {
fc |= IEEE80211_FCTL_FROMDS;
memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* BSSID */
memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */
} else if (ieee80211_is_ctl(hdr->frame_control)) {
/* control:ACK does not have addr2 or addr3 */
memset(hdr->addr2, 0, ETH_ALEN);
memset(hdr->addr3, 0, ETH_ALEN);
} else {
memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */
memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */
}
hdr->frame_control = cpu_to_le16(fc);
meta = (struct hostap_skb_tx_data *) skb->cb;
memset(meta, 0, sizeof(*meta));
meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
meta->iface = iface;
meta->tx_cb_idx = tx_cb_idx;
skb->dev = dev;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
dev_queue_xmit(skb);
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
static int prism2_sta_proc_show(struct seq_file *m, void *v)
{
struct sta_info *sta = m->private;
int i;
/* FIX: possible race condition.. the STA data could have just expired,
* but proc entry was still here so that the read could have started;
* some locking should be done here.. */
seq_printf(m,
"%s=%pM\nusers=%d\naid=%d\n"
"flags=0x%04x%s%s%s%s%s%s%s\n"
"capability=0x%02x\nlisten_interval=%d\nsupported_rates=",
sta->ap ? "AP" : "STA",
sta->addr, atomic_read(&sta->users), sta->aid,
sta->flags,
sta->flags & WLAN_STA_AUTH ? " AUTH" : "",
sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "",
sta->flags & WLAN_STA_PS ? " PS" : "",
sta->flags & WLAN_STA_TIM ? " TIM" : "",
sta->flags & WLAN_STA_PERM ? " PERM" : "",
sta->flags & WLAN_STA_AUTHORIZED ? " AUTHORIZED" : "",
sta->flags & WLAN_STA_PENDING_POLL ? " POLL" : "",
sta->capability, sta->listen_interval);
/* supported_rates: 500 kbit/s units with msb ignored */
for (i = 0; i < sizeof(sta->supported_rates); i++)
if (sta->supported_rates[i] != 0)
seq_printf(m, "%d%sMbps ",
(sta->supported_rates[i] & 0x7f) / 2,
sta->supported_rates[i] & 1 ? ".5" : "");
seq_printf(m,
"\njiffies=%lu\nlast_auth=%lu\nlast_assoc=%lu\n"
"last_rx=%lu\nlast_tx=%lu\nrx_packets=%lu\n"
"tx_packets=%lu\n"
"rx_bytes=%lu\ntx_bytes=%lu\nbuffer_count=%d\n"
"last_rx: silence=%d dBm signal=%d dBm rate=%d%s Mbps\n"
"tx_rate=%d\ntx[1M]=%d\ntx[2M]=%d\ntx[5.5M]=%d\n"
"tx[11M]=%d\n"
"rx[1M]=%d\nrx[2M]=%d\nrx[5.5M]=%d\nrx[11M]=%d\n",
jiffies, sta->last_auth, sta->last_assoc, sta->last_rx,
sta->last_tx,
sta->rx_packets, sta->tx_packets, sta->rx_bytes,
sta->tx_bytes, skb_queue_len(&sta->tx_buf),
sta->last_rx_silence,
sta->last_rx_signal, sta->last_rx_rate / 10,
sta->last_rx_rate % 10 ? ".5" : "",
sta->tx_rate, sta->tx_count[0], sta->tx_count[1],
sta->tx_count[2], sta->tx_count[3], sta->rx_count[0],
sta->rx_count[1], sta->rx_count[2], sta->rx_count[3]);
if (sta->crypt && sta->crypt->ops && sta->crypt->ops->print_stats)
sta->crypt->ops->print_stats(m, sta->crypt->priv);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
if (sta->ap) {
if (sta->u.ap.channel >= 0)
seq_printf(m, "channel=%d\n", sta->u.ap.channel);
seq_puts(m, "ssid=");
for (i = 0; i < sta->u.ap.ssid_len; i++) {
if (sta->u.ap.ssid[i] >= 32 && sta->u.ap.ssid[i] < 127)
seq_putc(m, sta->u.ap.ssid[i]);
else
seq_printf(m, "<%02x>", sta->u.ap.ssid[i]);
}
seq_putc(m, '\n');
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
return 0;
}
static int prism2_sta_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, prism2_sta_proc_show, PDE_DATA(inode));
}
static const struct file_operations prism2_sta_proc_fops = {
.open = prism2_sta_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void handle_add_proc_queue(struct work_struct *work)
{
struct ap_data *ap = container_of(work, struct ap_data,
add_sta_proc_queue);
struct sta_info *sta;
char name[20];
struct add_sta_proc_data *entry, *prev;
entry = ap->add_sta_proc_entries;
ap->add_sta_proc_entries = NULL;
while (entry) {
spin_lock_bh(&ap->sta_table_lock);
sta = ap_get_sta(ap, entry->addr);
if (sta)
atomic_inc(&sta->users);
spin_unlock_bh(&ap->sta_table_lock);
if (sta) {
sprintf(name, "%pM", sta->addr);
sta->proc = proc_create_data(
name, 0, ap->proc,
&prism2_sta_proc_fops, sta);
atomic_dec(&sta->users);
}
prev = entry;
entry = entry->next;
kfree(prev);
}
}
static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr)
{
struct sta_info *sta;
sta = kzalloc(sizeof(struct sta_info), GFP_ATOMIC);
if (sta == NULL) {
PDEBUG(DEBUG_AP, "AP: kmalloc failed\n");
return NULL;
}
/* initialize STA info data */
sta->local = ap->local;
skb_queue_head_init(&sta->tx_buf);
memcpy(sta->addr, addr, ETH_ALEN);
atomic_inc(&sta->users);
spin_lock_bh(&ap->sta_table_lock);
list_add(&sta->list, &ap->sta_list);
ap->num_sta++;
ap_sta_hash_add(ap, sta);
spin_unlock_bh(&ap->sta_table_lock);
if (ap->proc) {
struct add_sta_proc_data *entry;
/* schedule a non-interrupt context process to add a procfs
* entry for the STA since procfs code use GFP_KERNEL */
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (entry) {
memcpy(entry->addr, sta->addr, ETH_ALEN);
entry->next = ap->add_sta_proc_entries;
ap->add_sta_proc_entries = entry;
schedule_work(&ap->add_sta_proc_queue);
} else
printk(KERN_DEBUG "Failed to add STA proc data\n");
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
init_timer(&sta->timer);
sta->timer.expires = jiffies + ap->max_inactivity;
sta->timer.data = (unsigned long) sta;
sta->timer.function = ap_handle_timer;
if (!ap->local->hostapd)
add_timer(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
return sta;
}
static int ap_tx_rate_ok(int rateidx, struct sta_info *sta,
local_info_t *local)
{
if (rateidx > sta->tx_max_rate ||
!(sta->tx_supp_rates & (1 << rateidx)))
return 0;
if (local->tx_rate_control != 0 &&
!(local->tx_rate_control & (1 << rateidx)))
return 0;
return 1;
}
static void prism2_check_tx_rates(struct sta_info *sta)
{
int i;
sta->tx_supp_rates = 0;
for (i = 0; i < sizeof(sta->supported_rates); i++) {
if ((sta->supported_rates[i] & 0x7f) == 2)
sta->tx_supp_rates |= WLAN_RATE_1M;
if ((sta->supported_rates[i] & 0x7f) == 4)
sta->tx_supp_rates |= WLAN_RATE_2M;
if ((sta->supported_rates[i] & 0x7f) == 11)
sta->tx_supp_rates |= WLAN_RATE_5M5;
if ((sta->supported_rates[i] & 0x7f) == 22)
sta->tx_supp_rates |= WLAN_RATE_11M;
}
sta->tx_max_rate = sta->tx_rate = sta->tx_rate_idx = 0;
if (sta->tx_supp_rates & WLAN_RATE_1M) {
sta->tx_max_rate = 0;
if (ap_tx_rate_ok(0, sta, sta->local)) {
sta->tx_rate = 10;
sta->tx_rate_idx = 0;
}
}
if (sta->tx_supp_rates & WLAN_RATE_2M) {
sta->tx_max_rate = 1;
if (ap_tx_rate_ok(1, sta, sta->local)) {
sta->tx_rate = 20;
sta->tx_rate_idx = 1;
}
}
if (sta->tx_supp_rates & WLAN_RATE_5M5) {
sta->tx_max_rate = 2;
if (ap_tx_rate_ok(2, sta, sta->local)) {
sta->tx_rate = 55;
sta->tx_rate_idx = 2;
}
}
if (sta->tx_supp_rates & WLAN_RATE_11M) {
sta->tx_max_rate = 3;
if (ap_tx_rate_ok(3, sta, sta->local)) {
sta->tx_rate = 110;
sta->tx_rate_idx = 3;
}
}
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
static void ap_crypt_init(struct ap_data *ap)
{
ap->crypt = lib80211_get_crypto_ops("WEP");
if (ap->crypt) {
if (ap->crypt->init) {
ap->crypt_priv = ap->crypt->init(0);
if (ap->crypt_priv == NULL)
ap->crypt = NULL;
else {
u8 key[WEP_KEY_LEN];
get_random_bytes(key, WEP_KEY_LEN);
ap->crypt->set_key(key, WEP_KEY_LEN, NULL,
ap->crypt_priv);
}
}
}
if (ap->crypt == NULL) {
printk(KERN_WARNING "AP could not initialize WEP: load module "
"lib80211_crypt_wep.ko\n");
}
}
/* Generate challenge data for shared key authentication. IEEE 802.11 specifies
* that WEP algorithm is used for generating challenge. This should be unique,
* but otherwise there is not really need for randomness etc. Initialize WEP
* with pseudo random key and then use increasing IV to get unique challenge
* streams.
*
* Called only as a scheduled task for pending AP frames.
*/
static char * ap_auth_make_challenge(struct ap_data *ap)
{
char *tmpbuf;
struct sk_buff *skb;
if (ap->crypt == NULL) {
ap_crypt_init(ap);
if (ap->crypt == NULL)
return NULL;
}
tmpbuf = kmalloc(WLAN_AUTH_CHALLENGE_LEN, GFP_ATOMIC);
if (tmpbuf == NULL) {
PDEBUG(DEBUG_AP, "AP: kmalloc failed for challenge\n");
return NULL;
}
skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN +
ap->crypt->extra_mpdu_prefix_len +
ap->crypt->extra_mpdu_postfix_len);
if (skb == NULL) {
kfree(tmpbuf);
return NULL;
}
skb_reserve(skb, ap->crypt->extra_mpdu_prefix_len);
memset(skb_put(skb, WLAN_AUTH_CHALLENGE_LEN), 0,
WLAN_AUTH_CHALLENGE_LEN);
if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) {
dev_kfree_skb(skb);
kfree(tmpbuf);
return NULL;
}
skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len,
tmpbuf, WLAN_AUTH_CHALLENGE_LEN);
dev_kfree_skb(skb);
return tmpbuf;
}
/* Called only as a scheduled task for pending AP frames. */
static void handle_authen(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
struct net_device *dev = local->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
size_t hdrlen;
struct ap_data *ap = local->ap;
char body[8 + WLAN_AUTH_CHALLENGE_LEN], *challenge = NULL;
int len, olen;
u16 auth_alg, auth_transaction, status_code;
__le16 *pos;
u16 resp = WLAN_STATUS_SUCCESS;
struct sta_info *sta = NULL;
struct lib80211_crypt_data *crypt;
char *txt = "";
len = skb->len - IEEE80211_MGMT_HDR_LEN;
hdrlen = hostap_80211_get_hdrlen(hdr->frame_control);
if (len < 6) {
PDEBUG(DEBUG_AP, "%s: handle_authen - too short payload "
"(len=%d) from %pM\n", dev->name, len, hdr->addr2);
return;
}
spin_lock_bh(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr2);
if (sta)
atomic_inc(&sta->users);
spin_unlock_bh(&local->ap->sta_table_lock);
if (sta && sta->crypt)
crypt = sta->crypt;
else {
int idx = 0;
if (skb->len >= hdrlen + 3)
idx = skb->data[hdrlen + 3] >> 6;
crypt = local->crypt_info.crypt[idx];
}
pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
auth_alg = __le16_to_cpu(*pos);
pos++;
auth_transaction = __le16_to_cpu(*pos);
pos++;
status_code = __le16_to_cpu(*pos);
pos++;
if (ether_addr_equal(dev->dev_addr, hdr->addr2) ||
ap_control_mac_deny(&ap->mac_restrictions, hdr->addr2)) {
txt = "authentication denied";
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto fail;
}
if (((local->auth_algs & PRISM2_AUTH_OPEN) &&
auth_alg == WLAN_AUTH_OPEN) ||
((local->auth_algs & PRISM2_AUTH_SHARED_KEY) &&
crypt && auth_alg == WLAN_AUTH_SHARED_KEY)) {
} else {
txt = "unsupported algorithm";
resp = WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
goto fail;
}
if (len >= 8) {
u8 *u = (u8 *) pos;
if (*u == WLAN_EID_CHALLENGE) {
if (*(u + 1) != WLAN_AUTH_CHALLENGE_LEN) {
txt = "invalid challenge len";
resp = WLAN_STATUS_CHALLENGE_FAIL;
goto fail;
}
if (len - 8 < WLAN_AUTH_CHALLENGE_LEN) {
txt = "challenge underflow";
resp = WLAN_STATUS_CHALLENGE_FAIL;
goto fail;
}
challenge = (char *) (u + 2);
}
}
if (sta && sta->ap) {
if (time_after(jiffies, sta->u.ap.last_beacon +
(10 * sta->listen_interval * HZ) / 1024)) {
PDEBUG(DEBUG_AP, "%s: no beacons received for a while,"
" assuming AP %pM is now STA\n",
dev->name, sta->addr);
sta->ap = 0;
sta->flags = 0;
sta->u.sta.challenge = NULL;
} else {
txt = "AP trying to authenticate?";
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto fail;
}
}
if ((auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1) ||
(auth_alg == WLAN_AUTH_SHARED_KEY &&
(auth_transaction == 1 ||
(auth_transaction == 3 && sta != NULL &&
sta->u.sta.challenge != NULL)))) {
} else {
txt = "unknown authentication transaction number";
resp = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
goto fail;
}
if (sta == NULL) {
txt = "new STA";
if (local->ap->num_sta >= MAX_STA_COUNT) {
/* FIX: might try to remove some old STAs first? */
txt = "no more room for new STAs";
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto fail;
}
sta = ap_add_sta(local->ap, hdr->addr2);
if (sta == NULL) {
txt = "ap_add_sta failed";
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto fail;
}
}
switch (auth_alg) {
case WLAN_AUTH_OPEN:
txt = "authOK";
/* IEEE 802.11 standard is not completely clear about
* whether STA is considered authenticated after
* authentication OK frame has been send or after it
* has been ACKed. In order to reduce interoperability
* issues, mark the STA authenticated before ACK. */
sta->flags |= WLAN_STA_AUTH;
break;
case WLAN_AUTH_SHARED_KEY:
if (auth_transaction == 1) {
if (sta->u.sta.challenge == NULL) {
sta->u.sta.challenge =
ap_auth_make_challenge(local->ap);
if (sta->u.sta.challenge == NULL) {
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto fail;
}
}
} else {
if (sta->u.sta.challenge == NULL ||
challenge == NULL ||
memcmp(sta->u.sta.challenge, challenge,
WLAN_AUTH_CHALLENGE_LEN) != 0 ||
!ieee80211_has_protected(hdr->frame_control)) {
txt = "challenge response incorrect";
resp = WLAN_STATUS_CHALLENGE_FAIL;
goto fail;
}
txt = "challenge OK - authOK";
/* IEEE 802.11 standard is not completely clear about
* whether STA is considered authenticated after
* authentication OK frame has been send or after it
* has been ACKed. In order to reduce interoperability
* issues, mark the STA authenticated before ACK. */
sta->flags |= WLAN_STA_AUTH;
kfree(sta->u.sta.challenge);
sta->u.sta.challenge = NULL;
}
break;
}
fail:
pos = (__le16 *) body;
*pos = cpu_to_le16(auth_alg);
pos++;
*pos = cpu_to_le16(auth_transaction + 1);
pos++;
*pos = cpu_to_le16(resp); /* status_code */
pos++;
olen = 6;
if (resp == WLAN_STATUS_SUCCESS && sta != NULL &&
sta->u.sta.challenge != NULL &&
auth_alg == WLAN_AUTH_SHARED_KEY && auth_transaction == 1) {
u8 *tmp = (u8 *) pos;
*tmp++ = WLAN_EID_CHALLENGE;
*tmp++ = WLAN_AUTH_CHALLENGE_LEN;
pos++;
memcpy(pos, sta->u.sta.challenge, WLAN_AUTH_CHALLENGE_LEN);
olen += 2 + WLAN_AUTH_CHALLENGE_LEN;
}
prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH,
body, olen, hdr->addr2, ap->tx_callback_auth);
if (sta) {
sta->last_rx = jiffies;
atomic_dec(&sta->users);
}
if (resp) {
PDEBUG(DEBUG_AP, "%s: %pM auth (alg=%d "
"trans#=%d stat=%d len=%d fc=%04x) ==> %d (%s)\n",
dev->name, hdr->addr2,
auth_alg, auth_transaction, status_code, len,
le16_to_cpu(hdr->frame_control), resp, txt);
}
}
/* Called only as a scheduled task for pending AP frames. */
static void handle_assoc(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats, int reassoc)
{
struct net_device *dev = local->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
char body[12], *p, *lpos;
int len, left;
__le16 *pos;
u16 resp = WLAN_STATUS_SUCCESS;
struct sta_info *sta = NULL;
int send_deauth = 0;
char *txt = "";
u8 prev_ap[ETH_ALEN];
left = len = skb->len - IEEE80211_MGMT_HDR_LEN;
if (len < (reassoc ? 10 : 4)) {
PDEBUG(DEBUG_AP, "%s: handle_assoc - too short payload "
"(len=%d, reassoc=%d) from %pM\n",
dev->name, len, reassoc, hdr->addr2);
return;
}
spin_lock_bh(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr2);
if (sta == NULL || (sta->flags & WLAN_STA_AUTH) == 0) {
spin_unlock_bh(&local->ap->sta_table_lock);
txt = "trying to associate before authentication";
send_deauth = 1;
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
sta = NULL; /* do not decrement sta->users */
goto fail;
}
atomic_inc(&sta->users);
spin_unlock_bh(&local->ap->sta_table_lock);
pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
sta->capability = __le16_to_cpu(*pos);
pos++; left -= 2;
sta->listen_interval = __le16_to_cpu(*pos);
pos++; left -= 2;
if (reassoc) {
memcpy(prev_ap, pos, ETH_ALEN);
pos++; pos++; pos++; left -= 6;
} else
memset(prev_ap, 0, ETH_ALEN);
if (left >= 2) {
unsigned int ileft;
unsigned char *u = (unsigned char *) pos;
if (*u == WLAN_EID_SSID) {
u++; left--;
ileft = *u;
u++; left--;
if (ileft > left || ileft > MAX_SSID_LEN) {
txt = "SSID overflow";
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto fail;
}
if (ileft != strlen(local->essid) ||
memcmp(local->essid, u, ileft) != 0) {
txt = "not our SSID";
resp = WLAN_STATUS_ASSOC_DENIED_UNSPEC;
goto fail;
}
u += ileft;
left -= ileft;
}
if (left >= 2 && *u == WLAN_EID_SUPP_RATES) {
u++; left--;
ileft = *u;
u++; left--;
if (ileft > left || ileft == 0 ||
ileft > WLAN_SUPP_RATES_MAX) {
txt = "SUPP_RATES len error";
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto fail;
}
memset(sta->supported_rates, 0,
sizeof(sta->supported_rates));
memcpy(sta->supported_rates, u, ileft);
prism2_check_tx_rates(sta);
u += ileft;
left -= ileft;
}
if (left > 0) {
PDEBUG(DEBUG_AP, "%s: assoc from %pM"
" with extra data (%d bytes) [",
dev->name, hdr->addr2, left);
while (left > 0) {
PDEBUG2(DEBUG_AP, "<%02x>", *u);
u++; left--;
}
PDEBUG2(DEBUG_AP, "]\n");
}
} else {
txt = "frame underflow";
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto fail;
}
/* get a unique AID */
if (sta->aid > 0)
txt = "OK, old AID";
else {
spin_lock_bh(&local->ap->sta_table_lock);
for (sta->aid = 1; sta->aid <= MAX_AID_TABLE_SIZE; sta->aid++)
if (local->ap->sta_aid[sta->aid - 1] == NULL)
break;
if (sta->aid > MAX_AID_TABLE_SIZE) {
sta->aid = 0;
spin_unlock_bh(&local->ap->sta_table_lock);
resp = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
txt = "no room for more AIDs";
} else {
local->ap->sta_aid[sta->aid - 1] = sta;
spin_unlock_bh(&local->ap->sta_table_lock);
txt = "OK, new AID";
}
}
fail:
pos = (__le16 *) body;
if (send_deauth) {
*pos = cpu_to_le16(WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH);
pos++;
} else {
/* FIX: CF-Pollable and CF-PollReq should be set to match the
* values in beacons/probe responses */
/* FIX: how about privacy and WEP? */
/* capability */
*pos = cpu_to_le16(WLAN_CAPABILITY_ESS);
pos++;
/* status_code */
*pos = cpu_to_le16(resp);
pos++;
*pos = cpu_to_le16((sta && sta->aid > 0 ? sta->aid : 0) |
BIT(14) | BIT(15)); /* AID */
pos++;
/* Supported rates (Information element) */
p = (char *) pos;
*p++ = WLAN_EID_SUPP_RATES;
lpos = p;
*p++ = 0; /* len */
if (local->tx_rate_control & WLAN_RATE_1M) {
*p++ = local->basic_rates & WLAN_RATE_1M ? 0x82 : 0x02;
(*lpos)++;
}
if (local->tx_rate_control & WLAN_RATE_2M) {
*p++ = local->basic_rates & WLAN_RATE_2M ? 0x84 : 0x04;
(*lpos)++;
}
if (local->tx_rate_control & WLAN_RATE_5M5) {
*p++ = local->basic_rates & WLAN_RATE_5M5 ?
0x8b : 0x0b;
(*lpos)++;
}
if (local->tx_rate_control & WLAN_RATE_11M) {
*p++ = local->basic_rates & WLAN_RATE_11M ?
0x96 : 0x16;
(*lpos)++;
}
pos = (__le16 *) p;
}
prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
(send_deauth ? IEEE80211_STYPE_DEAUTH :
(reassoc ? IEEE80211_STYPE_REASSOC_RESP :
IEEE80211_STYPE_ASSOC_RESP)),
body, (u8 *) pos - (u8 *) body,
hdr->addr2,
send_deauth ? 0 : local->ap->tx_callback_assoc);
if (sta) {
if (resp == WLAN_STATUS_SUCCESS) {
sta->last_rx = jiffies;
/* STA will be marked associated from TX callback, if
* AssocResp is ACKed */
}
atomic_dec(&sta->users);
}
#if 0
PDEBUG(DEBUG_AP, "%s: %pM %sassoc (len=%d "
"prev_ap=%pM) => %d(%d) (%s)\n",
dev->name,
hdr->addr2,
reassoc ? "re" : "", len,
prev_ap,
resp, send_deauth, txt);
#endif
}
/* Called only as a scheduled task for pending AP frames. */
static void handle_deauth(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
struct net_device *dev = local->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN);
int len;
u16 reason_code;
__le16 *pos;
struct sta_info *sta = NULL;
len = skb->len - IEEE80211_MGMT_HDR_LEN;
if (len < 2) {
printk("handle_deauth - too short payload (len=%d)\n", len);
return;
}
pos = (__le16 *) body;
reason_code = le16_to_cpu(*pos);
PDEBUG(DEBUG_AP, "%s: deauthentication: %pM len=%d, "
"reason_code=%d\n", dev->name, hdr->addr2,
len, reason_code);
spin_lock_bh(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr2);
if (sta != NULL) {
if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
hostap_event_expired_sta(local->dev, sta);
sta->flags &= ~(WLAN_STA_AUTH | WLAN_STA_ASSOC);
}
spin_unlock_bh(&local->ap->sta_table_lock);
if (sta == NULL) {
printk("%s: deauthentication from %pM, "
"reason_code=%d, but STA not authenticated\n", dev->name,
hdr->addr2, reason_code);
}
}
/* Called only as a scheduled task for pending AP frames. */
static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
struct net_device *dev = local->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
int len;
u16 reason_code;
__le16 *pos;
struct sta_info *sta = NULL;
len = skb->len - IEEE80211_MGMT_HDR_LEN;
if (len < 2) {
printk("handle_disassoc - too short payload (len=%d)\n", len);
return;
}
pos = (__le16 *) body;
reason_code = le16_to_cpu(*pos);
PDEBUG(DEBUG_AP, "%s: disassociation: %pM len=%d, "
"reason_code=%d\n", dev->name, hdr->addr2,
len, reason_code);
spin_lock_bh(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr2);
if (sta != NULL) {
if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
hostap_event_expired_sta(local->dev, sta);
sta->flags &= ~WLAN_STA_ASSOC;
}
spin_unlock_bh(&local->ap->sta_table_lock);
if (sta == NULL) {
printk("%s: disassociation from %pM, "
"reason_code=%d, but STA not authenticated\n",
dev->name, hdr->addr2, reason_code);
}
}
/* Called only as a scheduled task for pending AP frames. */
static void ap_handle_data_nullfunc(local_info_t *local,
struct ieee80211_hdr *hdr)
{
struct net_device *dev = local->dev;
/* some STA f/w's seem to require control::ACK frame for
* data::nullfunc, but at least Prism2 station f/w version 0.8.0 does
* not send this..
* send control::ACK for the data::nullfunc */
printk(KERN_DEBUG "Sending control::ACK for data::nullfunc\n");
prism2_send_mgmt(dev, IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK,
NULL, 0, hdr->addr2, 0);
}
/* Called only as a scheduled task for pending AP frames. */
static void ap_handle_dropped_data(local_info_t *local,
struct ieee80211_hdr *hdr)
{
struct net_device *dev = local->dev;
struct sta_info *sta;
__le16 reason;
spin_lock_bh(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr2);
if (sta)
atomic_inc(&sta->users);
spin_unlock_bh(&local->ap->sta_table_lock);
if (sta != NULL && (sta->flags & WLAN_STA_ASSOC)) {
PDEBUG(DEBUG_AP, "ap_handle_dropped_data: STA is now okay?\n");
atomic_dec(&sta->users);
return;
}
reason = cpu_to_le16(WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
((sta == NULL || !(sta->flags & WLAN_STA_ASSOC)) ?
IEEE80211_STYPE_DEAUTH : IEEE80211_STYPE_DISASSOC),
(char *) &reason, sizeof(reason), hdr->addr2, 0);
if (sta)
atomic_dec(&sta->users);
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
/* Called only as a scheduled task for pending AP frames. */
static void pspoll_send_buffered(local_info_t *local, struct sta_info *sta,
struct sk_buff *skb)
{
struct hostap_skb_tx_data *meta;
if (!(sta->flags & WLAN_STA_PS)) {
/* Station has moved to non-PS mode, so send all buffered
* frames using normal device queue. */
dev_queue_xmit(skb);
return;
}
/* add a flag for hostap_handle_sta_tx() to know that this skb should
* be passed through even though STA is using PS */
meta = (struct hostap_skb_tx_data *) skb->cb;
meta->flags |= HOSTAP_TX_FLAGS_BUFFERED_FRAME;
if (!skb_queue_empty(&sta->tx_buf)) {
/* indicate to STA that more frames follow */
meta->flags |= HOSTAP_TX_FLAGS_ADD_MOREDATA;
}
dev_queue_xmit(skb);
}
/* Called only as a scheduled task for pending AP frames. */
static void handle_pspoll(local_info_t *local,
struct ieee80211_hdr *hdr,
struct hostap_80211_rx_status *rx_stats)
{
struct net_device *dev = local->dev;
struct sta_info *sta;
u16 aid;
struct sk_buff *skb;
PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=%pM, TA=%pM PWRMGT=%d\n",
hdr->addr1, hdr->addr2, !!ieee80211_has_pm(hdr->frame_control));
if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP,
"handle_pspoll - addr1(BSSID)=%pM not own MAC\n",
hdr->addr1);
return;
}
aid = le16_to_cpu(hdr->duration_id);
if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) {
PDEBUG(DEBUG_PS, " PSPOLL and AID[15:14] not set\n");
return;
}
aid &= ~(BIT(15) | BIT(14));
if (aid == 0 || aid > MAX_AID_TABLE_SIZE) {
PDEBUG(DEBUG_PS, " invalid aid=%d\n", aid);
return;
}
PDEBUG(DEBUG_PS2, " aid=%d\n", aid);
spin_lock_bh(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr2);
if (sta)
atomic_inc(&sta->users);
spin_unlock_bh(&local->ap->sta_table_lock);
if (sta == NULL) {
PDEBUG(DEBUG_PS, " STA not found\n");
return;
}
if (sta->aid != aid) {
PDEBUG(DEBUG_PS, " received aid=%i does not match with "
"assoc.aid=%d\n", aid, sta->aid);
return;
}
/* FIX: todo:
* - add timeout for buffering (clear aid in TIM vector if buffer timed
* out (expiry time must be longer than ListenInterval for
* the corresponding STA; "8802-11: 11.2.1.9 AP aging function"
* - what to do, if buffered, pspolled, and sent frame is not ACKed by
* sta; store buffer for later use and leave TIM aid bit set? use
* TX event to check whether frame was ACKed?
*/
while ((skb = skb_dequeue(&sta->tx_buf)) != NULL) {
/* send buffered frame .. */
PDEBUG(DEBUG_PS2, "Sending buffered frame to STA after PS POLL"
" (buffer_count=%d)\n", skb_queue_len(&sta->tx_buf));
pspoll_send_buffered(local, sta, skb);
if (sta->flags & WLAN_STA_PS) {
/* send only one buffered packet per PS Poll */
/* FIX: should ignore further PS Polls until the
* buffered packet that was just sent is acknowledged
* (Tx or TxExc event) */
break;
}
}
if (skb_queue_empty(&sta->tx_buf)) {
/* try to clear aid from TIM */
if (!(sta->flags & WLAN_STA_TIM))
PDEBUG(DEBUG_PS2, "Re-unsetting TIM for aid %d\n",
aid);
hostap_set_tim(local, aid, 0);
sta->flags &= ~WLAN_STA_TIM;
}
atomic_dec(&sta->users);
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
static void handle_wds_oper_queue(struct work_struct *work)
{
struct ap_data *ap = container_of(work, struct ap_data,
wds_oper_queue);
local_info_t *local = ap->local;
struct wds_oper_data *entry, *prev;
spin_lock_bh(&local->lock);
entry = local->ap->wds_oper_entries;
local->ap->wds_oper_entries = NULL;
spin_unlock_bh(&local->lock);
while (entry) {
PDEBUG(DEBUG_AP, "%s: %s automatic WDS connection "
"to AP %pM\n",
local->dev->name,
entry->type == WDS_ADD ? "adding" : "removing",
entry->addr);
if (entry->type == WDS_ADD)
prism2_wds_add(local, entry->addr, 0);
else if (entry->type == WDS_DEL)
prism2_wds_del(local, entry->addr, 0, 1);
prev = entry;
entry = entry->next;
kfree(prev);
}
}
/* Called only as a scheduled task for pending AP frames. */
static void handle_beacon(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
int len, left;
u16 beacon_int, capability;
__le16 *pos;
char *ssid = NULL;
unsigned char *supp_rates = NULL;
int ssid_len = 0, supp_rates_len = 0;
struct sta_info *sta = NULL;
int new_sta = 0, channel = -1;
len = skb->len - IEEE80211_MGMT_HDR_LEN;
if (len < 8 + 2 + 2) {
printk(KERN_DEBUG "handle_beacon - too short payload "
"(len=%d)\n", len);
return;
}
pos = (__le16 *) body;
left = len;
/* Timestamp (8 octets) */
pos += 4; left -= 8;
/* Beacon interval (2 octets) */
beacon_int = le16_to_cpu(*pos);
pos++; left -= 2;
/* Capability information (2 octets) */
capability = le16_to_cpu(*pos);
pos++; left -= 2;
if (local->ap->ap_policy != AP_OTHER_AP_EVEN_IBSS &&
capability & WLAN_CAPABILITY_IBSS)
return;
if (left >= 2) {
unsigned int ileft;
unsigned char *u = (unsigned char *) pos;
if (*u == WLAN_EID_SSID) {
u++; left--;
ileft = *u;
u++; left--;
if (ileft > left || ileft > MAX_SSID_LEN) {
PDEBUG(DEBUG_AP, "SSID: overflow\n");
return;
}
if (local->ap->ap_policy == AP_OTHER_AP_SAME_SSID &&
(ileft != strlen(local->essid) ||
memcmp(local->essid, u, ileft) != 0)) {
/* not our SSID */
return;
}
ssid = u;
ssid_len = ileft;
u += ileft;
left -= ileft;
}
if (*u == WLAN_EID_SUPP_RATES) {
u++; left--;
ileft = *u;
u++; left--;
if (ileft > left || ileft == 0 || ileft > 8) {
PDEBUG(DEBUG_AP, " - SUPP_RATES len error\n");
return;
}
supp_rates = u;
supp_rates_len = ileft;
u += ileft;
left -= ileft;
}
if (*u == WLAN_EID_DS_PARAMS) {
u++; left--;
ileft = *u;
u++; left--;
if (ileft > left || ileft != 1) {
PDEBUG(DEBUG_AP, " - DS_PARAMS len error\n");
return;
}
channel = *u;
u += ileft;
left -= ileft;
}
}
spin_lock_bh(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr2);
if (sta != NULL)
atomic_inc(&sta->users);
spin_unlock_bh(&local->ap->sta_table_lock);
if (sta == NULL) {
/* add new AP */
new_sta = 1;
sta = ap_add_sta(local->ap, hdr->addr2);
if (sta == NULL) {
printk(KERN_INFO "prism2: kmalloc failed for AP "
"data structure\n");
return;
}
hostap_event_new_sta(local->dev, sta);
/* mark APs authentication and associated for pseudo ad-hoc
* style communication */
sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
if (local->ap->autom_ap_wds) {
hostap_wds_link_oper(local, sta->addr, WDS_ADD);
}
}
sta->ap = 1;
if (ssid) {
sta->u.ap.ssid_len = ssid_len;
memcpy(sta->u.ap.ssid, ssid, ssid_len);
sta->u.ap.ssid[ssid_len] = '\0';
} else {
sta->u.ap.ssid_len = 0;
sta->u.ap.ssid[0] = '\0';
}
sta->u.ap.channel = channel;
sta->rx_packets++;
sta->rx_bytes += len;
sta->u.ap.last_beacon = sta->last_rx = jiffies;
sta->capability = capability;
sta->listen_interval = beacon_int;
atomic_dec(&sta->users);
if (new_sta) {
memset(sta->supported_rates, 0, sizeof(sta->supported_rates));
memcpy(sta->supported_rates, supp_rates, supp_rates_len);
prism2_check_tx_rates(sta);
}
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
/* Called only as a tasklet. */
static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
struct net_device *dev = local->dev;
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
u16 fc, type, stype;
struct ieee80211_hdr *hdr;
/* FIX: should give skb->len to handler functions and check that the
* buffer is long enough */
hdr = (struct ieee80211_hdr *) skb->data;
fc = le16_to_cpu(hdr->frame_control);
type = fc & IEEE80211_FCTL_FTYPE;
stype = fc & IEEE80211_FCTL_STYPE;
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
if (!local->hostapd && type == IEEE80211_FTYPE_DATA) {
PDEBUG(DEBUG_AP, "handle_ap_item - data frame\n");
if (!(fc & IEEE80211_FCTL_TODS) ||
(fc & IEEE80211_FCTL_FROMDS)) {
if (stype == IEEE80211_STYPE_NULLFUNC) {
/* no ToDS nullfunc seems to be used to check
* AP association; so send reject message to
* speed up re-association */
ap_handle_dropped_data(local, hdr);
goto done;
}
PDEBUG(DEBUG_AP, " not ToDS frame (fc=0x%04x)\n",
fc);
goto done;
}
if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=%pM"
" not own MAC\n", hdr->addr1);
goto done;
}
if (local->ap->nullfunc_ack &&
stype == IEEE80211_STYPE_NULLFUNC)
ap_handle_data_nullfunc(local, hdr);
else
ap_handle_dropped_data(local, hdr);
goto done;
}
if (type == IEEE80211_FTYPE_MGMT && stype == IEEE80211_STYPE_BEACON) {
handle_beacon(local, skb, rx_stats);
goto done;
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
if (type == IEEE80211_FTYPE_CTL && stype == IEEE80211_STYPE_PSPOLL) {
handle_pspoll(local, hdr, rx_stats);
goto done;
}
if (local->hostapd) {
PDEBUG(DEBUG_AP, "Unknown frame in AP queue: type=0x%02x "
"subtype=0x%02x\n", type, stype);
goto done;
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
if (type != IEEE80211_FTYPE_MGMT) {
PDEBUG(DEBUG_AP, "handle_ap_item - not a management frame?\n");
goto done;
}
if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=%pM"
" not own MAC\n", hdr->addr1);
goto done;
}
if (!ether_addr_equal(hdr->addr3, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=%pM"
" not own MAC\n", hdr->addr3);
goto done;
}
switch (stype) {
case IEEE80211_STYPE_ASSOC_REQ:
handle_assoc(local, skb, rx_stats, 0);
break;
case IEEE80211_STYPE_ASSOC_RESP:
PDEBUG(DEBUG_AP, "==> ASSOC RESP (ignored)\n");
break;
case IEEE80211_STYPE_REASSOC_REQ:
handle_assoc(local, skb, rx_stats, 1);
break;
case IEEE80211_STYPE_REASSOC_RESP:
PDEBUG(DEBUG_AP, "==> REASSOC RESP (ignored)\n");
break;
case IEEE80211_STYPE_ATIM:
PDEBUG(DEBUG_AP, "==> ATIM (ignored)\n");
break;
case IEEE80211_STYPE_DISASSOC:
handle_disassoc(local, skb, rx_stats);
break;
case IEEE80211_STYPE_AUTH:
handle_authen(local, skb, rx_stats);
break;
case IEEE80211_STYPE_DEAUTH:
handle_deauth(local, skb, rx_stats);
break;
default:
PDEBUG(DEBUG_AP, "Unknown mgmt frame subtype 0x%02x\n",
stype >> 4);
break;
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
done:
dev_kfree_skb(skb);
}
/* Called only as a tasklet (software IRQ) */
void hostap_rx(struct net_device *dev, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
struct hostap_interface *iface;
local_info_t *local;
struct ieee80211_hdr *hdr;
iface = netdev_priv(dev);
local = iface->local;
if (skb->len < 16)
goto drop;
dev->stats.rx_packets++;
hdr = (struct ieee80211_hdr *) skb->data;
if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL &&
ieee80211_is_beacon(hdr->frame_control))
goto drop;
skb->protocol = cpu_to_be16(ETH_P_HOSTAP);
handle_ap_item(local, skb, rx_stats);
return;
drop:
dev_kfree_skb(skb);
}
/* Called only as a tasklet (software IRQ) */
static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
{
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
struct hostap_80211_rx_status rx_stats;
if (skb_queue_empty(&sta->tx_buf))
return;
skb = dev_alloc_skb(16);
if (skb == NULL) {
printk(KERN_DEBUG "%s: schedule_packet_send: skb alloc "
"failed\n", local->dev->name);
return;
}
hdr = (struct ieee80211_hdr *) skb_put(skb, 16);
/* Generate a fake pspoll frame to start packet delivery */
hdr->frame_control = cpu_to_le16(
IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
memcpy(hdr->addr1, local->dev->dev_addr, ETH_ALEN);
memcpy(hdr->addr2, sta->addr, ETH_ALEN);
hdr->duration_id = cpu_to_le16(sta->aid | BIT(15) | BIT(14));
PDEBUG(DEBUG_PS2,
"%s: Scheduling buffered packet delivery for STA %pM\n",
local->dev->name, sta->addr);
skb->dev = local->dev;
memset(&rx_stats, 0, sizeof(rx_stats));
hostap_rx(local->dev, skb, &rx_stats);
}
int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
struct iw_quality qual[], int buf_size,
int aplist)
{
struct ap_data *ap = local->ap;
struct list_head *ptr;
int count = 0;
spin_lock_bh(&ap->sta_table_lock);
for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list;
ptr = ptr->next) {
struct sta_info *sta = (struct sta_info *) ptr;
if (aplist && !sta->ap)
continue;
addr[count].sa_family = ARPHRD_ETHER;
memcpy(addr[count].sa_data, sta->addr, ETH_ALEN);
if (sta->last_rx_silence == 0)
qual[count].qual = sta->last_rx_signal < 27 ?
0 : (sta->last_rx_signal - 27) * 92 / 127;
else
qual[count].qual = sta->last_rx_signal -
sta->last_rx_silence - 35;
qual[count].level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
qual[count].updated = sta->last_rx_updated;
sta->last_rx_updated = IW_QUAL_DBM;
count++;
if (count >= buf_size)
break;
}
spin_unlock_bh(&ap->sta_table_lock);
return count;
}
/* Translate our list of Access Points & Stations to a card independent
* format that the Wireless Tools will understand - Jean II */
int prism2_ap_translate_scan(struct net_device *dev,
struct iw_request_info *info, char *buffer)
{
struct hostap_interface *iface;
local_info_t *local;
struct ap_data *ap;
struct list_head *ptr;
struct iw_event iwe;
char *current_ev = buffer;
char *end_buf = buffer + IW_SCAN_MAX_DATA;
#if !defined(PRISM2_NO_KERNEL_IEEE80211_MGMT)
char buf[64];
#endif
iface = netdev_priv(dev);
local = iface->local;
ap = local->ap;
spin_lock_bh(&ap->sta_table_lock);
for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list;
ptr = ptr->next) {
struct sta_info *sta = (struct sta_info *) ptr;
/* First entry *MUST* be the AP MAC address */
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, sta->addr, ETH_ALEN);
iwe.len = IW_EV_ADDR_LEN;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_ADDR_LEN);
/* Use the mode to indicate if it's a station or
* an Access Point */
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWMODE;
if (sta->ap)
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_INFRA;
iwe.len = IW_EV_UINT_LEN;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_UINT_LEN);
/* Some quality */
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVQUAL;
if (sta->last_rx_silence == 0)
iwe.u.qual.qual = sta->last_rx_signal < 27 ?
0 : (sta->last_rx_signal - 27) * 92 / 127;
else
iwe.u.qual.qual = sta->last_rx_signal -
sta->last_rx_silence - 35;
iwe.u.qual.level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
iwe.u.qual.updated = sta->last_rx_updated;
iwe.len = IW_EV_QUAL_LEN;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_QUAL_LEN);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
if (sta->ap) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWESSID;
iwe.u.data.length = sta->u.ap.ssid_len;
iwe.u.data.flags = 1;
current_ev = iwe_stream_add_point(info, current_ev,
end_buf, &iwe,
sta->u.ap.ssid);
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWENCODE;
if (sta->capability & WLAN_CAPABILITY_PRIVACY)
iwe.u.data.flags =
IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
current_ev = iwe_stream_add_point(info, current_ev,
end_buf, &iwe,
sta->u.ap.ssid);
if (sta->u.ap.channel > 0 &&
sta->u.ap.channel <= FREQ_COUNT) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = freq_list[sta->u.ap.channel - 1]
* 100000;
iwe.u.freq.e = 1;
current_ev = iwe_stream_add_event(
info, current_ev, end_buf, &iwe,
IW_EV_FREQ_LEN);
}
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
sprintf(buf, "beacon_interval=%d",
sta->listen_interval);
iwe.u.data.length = strlen(buf);
current_ev = iwe_stream_add_point(info, current_ev,
end_buf, &iwe, buf);
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
sta->last_rx_updated = IW_QUAL_DBM;
/* To be continued, we should make good use of IWEVCUSTOM */
}
spin_unlock_bh(&ap->sta_table_lock);
return current_ev - buffer;
}
static int prism2_hostapd_add_sta(struct ap_data *ap,
struct prism2_hostapd_param *param)
{
struct sta_info *sta;
spin_lock_bh(&ap->sta_table_lock);
sta = ap_get_sta(ap, param->sta_addr);
if (sta)
atomic_inc(&sta->users);
spin_unlock_bh(&ap->sta_table_lock);
if (sta == NULL) {
sta = ap_add_sta(ap, param->sta_addr);
if (sta == NULL)
return -1;
}
if (!(sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
hostap_event_new_sta(sta->local->dev, sta);
sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC;
sta->last_rx = jiffies;
sta->aid = param->u.add_sta.aid;
sta->capability = param->u.add_sta.capability;
sta->tx_supp_rates = param->u.add_sta.tx_supp_rates;
if (sta->tx_supp_rates & WLAN_RATE_1M)
sta->supported_rates[0] = 2;
if (sta->tx_supp_rates & WLAN_RATE_2M)
sta->supported_rates[1] = 4;
if (sta->tx_supp_rates & WLAN_RATE_5M5)
sta->supported_rates[2] = 11;
if (sta->tx_supp_rates & WLAN_RATE_11M)
sta->supported_rates[3] = 22;
prism2_check_tx_rates(sta);
atomic_dec(&sta->users);
return 0;
}
static int prism2_hostapd_remove_sta(struct ap_data *ap,
struct prism2_hostapd_param *param)
{
struct sta_info *sta;
spin_lock_bh(&ap->sta_table_lock);
sta = ap_get_sta(ap, param->sta_addr);
if (sta) {
ap_sta_hash_del(ap, sta);
list_del(&sta->list);
}
spin_unlock_bh(&ap->sta_table_lock);
if (!sta)
return -ENOENT;
if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
hostap_event_expired_sta(sta->local->dev, sta);
ap_free_sta(ap, sta);
return 0;
}
static int prism2_hostapd_get_info_sta(struct ap_data *ap,
struct prism2_hostapd_param *param)
{
struct sta_info *sta;
spin_lock_bh(&ap->sta_table_lock);
sta = ap_get_sta(ap, param->sta_addr);
if (sta)
atomic_inc(&sta->users);
spin_unlock_bh(&ap->sta_table_lock);
if (!sta)
return -ENOENT;
param->u.get_info_sta.inactive_sec = (jiffies - sta->last_rx) / HZ;
atomic_dec(&sta->users);
return 1;
}
static int prism2_hostapd_set_flags_sta(struct ap_data *ap,
struct prism2_hostapd_param *param)
{
struct sta_info *sta;
spin_lock_bh(&ap->sta_table_lock);
sta = ap_get_sta(ap, param->sta_addr);
if (sta) {
sta->flags |= param->u.set_flags_sta.flags_or;
sta->flags &= param->u.set_flags_sta.flags_and;
}
spin_unlock_bh(&ap->sta_table_lock);
if (!sta)
return -ENOENT;
return 0;
}
static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
struct prism2_hostapd_param *param)
{
struct sta_info *sta;
int rate;
spin_lock_bh(&ap->sta_table_lock);
sta = ap_get_sta(ap, param->sta_addr);
if (sta) {
sta->rx_packets = sta->tx_packets = 0;
sta->rx_bytes = sta->tx_bytes = 0;
for (rate = 0; rate < WLAN_RATE_COUNT; rate++) {
sta->tx_count[rate] = 0;
sta->rx_count[rate] = 0;
}
}
spin_unlock_bh(&ap->sta_table_lock);
if (!sta)
return -ENOENT;
return 0;
}
int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param)
{
switch (param->cmd) {
case PRISM2_HOSTAPD_FLUSH:
ap_control_kickall(ap);
return 0;
case PRISM2_HOSTAPD_ADD_STA:
return prism2_hostapd_add_sta(ap, param);
case PRISM2_HOSTAPD_REMOVE_STA:
return prism2_hostapd_remove_sta(ap, param);
case PRISM2_HOSTAPD_GET_INFO_STA:
return prism2_hostapd_get_info_sta(ap, param);
case PRISM2_HOSTAPD_SET_FLAGS_STA:
return prism2_hostapd_set_flags_sta(ap, param);
case PRISM2_HOSTAPD_STA_CLEAR_STATS:
return prism2_hostapd_sta_clear_stats(ap, param);
default:
printk(KERN_WARNING "prism2_hostapd: unknown cmd=%d\n",
param->cmd);
return -EOPNOTSUPP;
}
}
/* Update station info for host-based TX rate control and return current
* TX rate */
static int ap_update_sta_tx_rate(struct sta_info *sta, struct net_device *dev)
{
int ret = sta->tx_rate;
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
sta->tx_count[sta->tx_rate_idx]++;
sta->tx_since_last_failure++;
sta->tx_consecutive_exc = 0;
if (sta->tx_since_last_failure >= WLAN_RATE_UPDATE_COUNT &&
sta->tx_rate_idx < sta->tx_max_rate) {
/* use next higher rate */
int old_rate, new_rate;
old_rate = new_rate = sta->tx_rate_idx;
while (new_rate < sta->tx_max_rate) {
new_rate++;
if (ap_tx_rate_ok(new_rate, sta, local)) {
sta->tx_rate_idx = new_rate;
break;
}
}
if (old_rate != sta->tx_rate_idx) {
switch (sta->tx_rate_idx) {
case 0: sta->tx_rate = 10; break;
case 1: sta->tx_rate = 20; break;
case 2: sta->tx_rate = 55; break;
case 3: sta->tx_rate = 110; break;
default: sta->tx_rate = 0; break;
}
PDEBUG(DEBUG_AP, "%s: STA %pM TX rate raised to %d\n",
dev->name, sta->addr, sta->tx_rate);
}
sta->tx_since_last_failure = 0;
}
return ret;
}
/* Called only from software IRQ. Called for each TX frame prior possible
* encryption and transmit. */
ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
{
struct sta_info *sta = NULL;
struct sk_buff *skb = tx->skb;
int set_tim, ret;
struct ieee80211_hdr *hdr;
struct hostap_skb_tx_data *meta;
meta = (struct hostap_skb_tx_data *) skb->cb;
ret = AP_TX_CONTINUE;
if (local->ap == NULL || skb->len < 10 ||
meta->iface->type == HOSTAP_INTERFACE_STA)
goto out;
hdr = (struct ieee80211_hdr *) skb->data;
if (hdr->addr1[0] & 0x01) {
/* broadcast/multicast frame - no AP related processing */
if (local->ap->num_sta <= 0)
ret = AP_TX_DROP;
goto out;
}
/* unicast packet - check whether destination STA is associated */
spin_lock(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr1);
if (sta)
atomic_inc(&sta->users);
spin_unlock(&local->ap->sta_table_lock);
if (local->iw_mode == IW_MODE_MASTER && sta == NULL &&
!(meta->flags & HOSTAP_TX_FLAGS_WDS) &&
meta->iface->type != HOSTAP_INTERFACE_MASTER &&
meta->iface->type != HOSTAP_INTERFACE_AP) {
#if 0
/* This can happen, e.g., when wlan0 is added to a bridge and
* bridging code does not know which port is the correct target
* for a unicast frame. In this case, the packet is send to all
* ports of the bridge. Since this is a valid scenario, do not
* print out any errors here. */
if (net_ratelimit()) {
printk(KERN_DEBUG "AP: drop packet to non-associated "
"STA %pM\n", hdr->addr1);
}
#endif
local->ap->tx_drop_nonassoc++;
ret = AP_TX_DROP;
goto out;
}
if (sta == NULL)
goto out;
if (!(sta->flags & WLAN_STA_AUTHORIZED))
ret = AP_TX_CONTINUE_NOT_AUTHORIZED;
/* Set tx_rate if using host-based TX rate control */
if (!local->fw_tx_rate_control)
local->ap->last_tx_rate = meta->rate =
ap_update_sta_tx_rate(sta, local->dev);
if (local->iw_mode != IW_MODE_MASTER)
goto out;
if (!(sta->flags & WLAN_STA_PS))
goto out;
if (meta->flags & HOSTAP_TX_FLAGS_ADD_MOREDATA) {
/* indicate to STA that more frames follow */
hdr->frame_control |=
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
}
if (meta->flags & HOSTAP_TX_FLAGS_BUFFERED_FRAME) {
/* packet was already buffered and now send due to
* PS poll, so do not rebuffer it */
goto out;
}
if (skb_queue_len(&sta->tx_buf) >= STA_MAX_TX_BUFFER) {
PDEBUG(DEBUG_PS, "%s: No more space in STA (%pM)'s"
"PS mode buffer\n",
local->dev->name, sta->addr);
/* Make sure that TIM is set for the station (it might not be
* after AP wlan hw reset). */
/* FIX: should fix hw reset to restore bits based on STA
* buffer state.. */
hostap_set_tim(local, sta->aid, 1);
sta->flags |= WLAN_STA_TIM;
ret = AP_TX_DROP;
goto out;
}
/* STA in PS mode, buffer frame for later delivery */
set_tim = skb_queue_empty(&sta->tx_buf);
skb_queue_tail(&sta->tx_buf, skb);
/* FIX: could save RX time to skb and expire buffered frames after
* some time if STA does not poll for them */
if (set_tim) {
if (sta->flags & WLAN_STA_TIM)
PDEBUG(DEBUG_PS2, "Re-setting TIM for aid %d\n",
sta->aid);
hostap_set_tim(local, sta->aid, 1);
sta->flags |= WLAN_STA_TIM;
}
ret = AP_TX_BUFFERED;
out:
if (sta != NULL) {
if (ret == AP_TX_CONTINUE ||
ret == AP_TX_CONTINUE_NOT_AUTHORIZED) {
sta->tx_packets++;
sta->tx_bytes += skb->len;
sta->last_tx = jiffies;
}
if ((ret == AP_TX_CONTINUE ||
ret == AP_TX_CONTINUE_NOT_AUTHORIZED) &&
sta->crypt && tx->host_encrypt) {
tx->crypt = sta->crypt;
tx->sta_ptr = sta; /* hostap_handle_sta_release() will
* be called to release sta info
* later */
} else
atomic_dec(&sta->users);
}
return ret;
}
void hostap_handle_sta_release(void *ptr)
{
struct sta_info *sta = ptr;
atomic_dec(&sta->users);
}
/* Called only as a tasklet (software IRQ) */
void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
{
struct sta_info *sta;
struct ieee80211_hdr *hdr;
struct hostap_skb_tx_data *meta;
hdr = (struct ieee80211_hdr *) skb->data;
meta = (struct hostap_skb_tx_data *) skb->cb;
spin_lock(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr1);
if (!sta) {
spin_unlock(&local->ap->sta_table_lock);
PDEBUG(DEBUG_AP, "%s: Could not find STA %pM"
" for this TX error (@%lu)\n",
local->dev->name, hdr->addr1, jiffies);
return;
}
sta->tx_since_last_failure = 0;
sta->tx_consecutive_exc++;
if (sta->tx_consecutive_exc >= WLAN_RATE_DECREASE_THRESHOLD &&
sta->tx_rate_idx > 0 && meta->rate <= sta->tx_rate) {
/* use next lower rate */
int old, rate;
old = rate = sta->tx_rate_idx;
while (rate > 0) {
rate--;
if (ap_tx_rate_ok(rate, sta, local)) {
sta->tx_rate_idx = rate;
break;
}
}
if (old != sta->tx_rate_idx) {
switch (sta->tx_rate_idx) {
case 0: sta->tx_rate = 10; break;
case 1: sta->tx_rate = 20; break;
case 2: sta->tx_rate = 55; break;
case 3: sta->tx_rate = 110; break;
default: sta->tx_rate = 0; break;
}
PDEBUG(DEBUG_AP,
"%s: STA %pM TX rate lowered to %d\n",
local->dev->name, sta->addr, sta->tx_rate);
}
sta->tx_consecutive_exc = 0;
}
spin_unlock(&local->ap->sta_table_lock);
}
static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta,
int pwrmgt, int type, int stype)
{
if (pwrmgt && !(sta->flags & WLAN_STA_PS)) {
sta->flags |= WLAN_STA_PS;
PDEBUG(DEBUG_PS2, "STA %pM changed to use PS "
"mode (type=0x%02X, stype=0x%02X)\n",
sta->addr, type >> 2, stype >> 4);
} else if (!pwrmgt && (sta->flags & WLAN_STA_PS)) {
sta->flags &= ~WLAN_STA_PS;
PDEBUG(DEBUG_PS2, "STA %pM changed to not use "
"PS mode (type=0x%02X, stype=0x%02X)\n",
sta->addr, type >> 2, stype >> 4);
if (type != IEEE80211_FTYPE_CTL ||
stype != IEEE80211_STYPE_PSPOLL)
schedule_packet_send(local, sta);
}
}
/* Called only as a tasklet (software IRQ). Called for each RX frame to update
* STA power saving state. pwrmgt is a flag from 802.11 frame_control field. */
int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr)
{
struct sta_info *sta;
u16 fc;
spin_lock(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr2);
if (sta)
atomic_inc(&sta->users);
spin_unlock(&local->ap->sta_table_lock);
if (!sta)
return -1;
fc = le16_to_cpu(hdr->frame_control);
hostap_update_sta_ps2(local, sta, fc & IEEE80211_FCTL_PM,
fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
atomic_dec(&sta->users);
return 0;
}
/* Called only as a tasklet (software IRQ). Called for each RX frame after
* getting RX header and payload from hardware. */
ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats,
int wds)
{
int ret;
struct sta_info *sta;
u16 fc, type, stype;
struct ieee80211_hdr *hdr;
if (local->ap == NULL)
return AP_RX_CONTINUE;
hdr = (struct ieee80211_hdr *) skb->data;
fc = le16_to_cpu(hdr->frame_control);
type = fc & IEEE80211_FCTL_FTYPE;
stype = fc & IEEE80211_FCTL_STYPE;
spin_lock(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr2);
if (sta)
atomic_inc(&sta->users);
spin_unlock(&local->ap->sta_table_lock);
if (sta && !(sta->flags & WLAN_STA_AUTHORIZED))
ret = AP_RX_CONTINUE_NOT_AUTHORIZED;
else
ret = AP_RX_CONTINUE;
if (fc & IEEE80211_FCTL_TODS) {
if (!wds && (sta == NULL || !(sta->flags & WLAN_STA_ASSOC))) {
if (local->hostapd) {
prism2_rx_80211(local->apdev, skb, rx_stats,
PRISM2_RX_NON_ASSOC);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
} else {
printk(KERN_DEBUG "%s: dropped received packet"
" from non-associated STA %pM"
" (type=0x%02x, subtype=0x%02x)\n",
dev->name, hdr->addr2,
type >> 2, stype >> 4);
hostap_rx(dev, skb, rx_stats);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
}
ret = AP_RX_EXIT;
goto out;
}
} else if (fc & IEEE80211_FCTL_FROMDS) {
if (!wds) {
/* FromDS frame - not for us; probably
* broadcast/multicast in another BSS - drop */
if (ether_addr_equal(hdr->addr1, dev->dev_addr)) {
printk(KERN_DEBUG "Odd.. FromDS packet "
"received with own BSSID\n");
hostap_dump_rx_80211(dev->name, skb, rx_stats);
}
ret = AP_RX_DROP;
goto out;
}
} else if (stype == IEEE80211_STYPE_NULLFUNC && sta == NULL &&
ether_addr_equal(hdr->addr1, dev->dev_addr)) {
if (local->hostapd) {
prism2_rx_80211(local->apdev, skb, rx_stats,
PRISM2_RX_NON_ASSOC);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
} else {
/* At least Lucent f/w seems to send data::nullfunc
* frames with no ToDS flag when the current AP returns
* after being unavailable for some time. Speed up
* re-association by informing the station about it not
* being associated. */
printk(KERN_DEBUG "%s: rejected received nullfunc frame"
" without ToDS from not associated STA %pM\n",
dev->name, hdr->addr2);
hostap_rx(dev, skb, rx_stats);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
}
ret = AP_RX_EXIT;
goto out;
} else if (stype == IEEE80211_STYPE_NULLFUNC) {
/* At least Lucent cards seem to send periodic nullfunc
* frames with ToDS. Let these through to update SQ
* stats and PS state. Nullfunc frames do not contain
* any data and they will be dropped below. */
} else {
/* If BSSID (Addr3) is foreign, this frame is a normal
* broadcast frame from an IBSS network. Drop it silently.
* If BSSID is own, report the dropping of this frame. */
if (ether_addr_equal(hdr->addr3, dev->dev_addr)) {
printk(KERN_DEBUG "%s: dropped received packet from %pM"
" with no ToDS flag "
"(type=0x%02x, subtype=0x%02x)\n", dev->name,
hdr->addr2, type >> 2, stype >> 4);
hostap_dump_rx_80211(dev->name, skb, rx_stats);
}
ret = AP_RX_DROP;
goto out;
}
if (sta) {
hostap_update_sta_ps2(local, sta, fc & IEEE80211_FCTL_PM,
type, stype);
sta->rx_packets++;
sta->rx_bytes += skb->len;
sta->last_rx = jiffies;
}
if (local->ap->nullfunc_ack && stype == IEEE80211_STYPE_NULLFUNC &&
fc & IEEE80211_FCTL_TODS) {
if (local->hostapd) {
prism2_rx_80211(local->apdev, skb, rx_stats,
PRISM2_RX_NULLFUNC_ACK);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
} else {
/* some STA f/w's seem to require control::ACK frame
* for data::nullfunc, but Prism2 f/w 0.8.0 (at least
* from Compaq) does not send this.. Try to generate
* ACK for these frames from the host driver to make
* power saving work with, e.g., Lucent WaveLAN f/w */
hostap_rx(dev, skb, rx_stats);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
}
ret = AP_RX_EXIT;
goto out;
}
out:
if (sta)
atomic_dec(&sta->users);
return ret;
}
/* Called only as a tasklet (software IRQ) */
int hostap_handle_sta_crypto(local_info_t *local,
struct ieee80211_hdr *hdr,
struct lib80211_crypt_data **crypt,
void **sta_ptr)
{
struct sta_info *sta;
spin_lock(&local->ap->sta_table_lock);
sta = ap_get_sta(local->ap, hdr->addr2);
if (sta)
atomic_inc(&sta->users);
spin_unlock(&local->ap->sta_table_lock);
if (!sta)
return -1;
if (sta->crypt) {
*crypt = sta->crypt;
*sta_ptr = sta;
/* hostap_handle_sta_release() will be called to release STA
* info */
} else
atomic_dec(&sta->users);
return 0;
}
/* Called only as a tasklet (software IRQ) */
int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr)
{
struct sta_info *sta;
int ret = 0;
spin_lock(&ap->sta_table_lock);
sta = ap_get_sta(ap, sta_addr);
if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap)
ret = 1;
spin_unlock(&ap->sta_table_lock);
return ret;
}
/* Called only as a tasklet (software IRQ) */
int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr)
{
struct sta_info *sta;
int ret = 0;
spin_lock(&ap->sta_table_lock);
sta = ap_get_sta(ap, sta_addr);
if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap &&
((sta->flags & WLAN_STA_AUTHORIZED) ||
ap->local->ieee_802_1x == 0))
ret = 1;
spin_unlock(&ap->sta_table_lock);
return ret;
}
/* Called only as a tasklet (software IRQ) */
int hostap_add_sta(struct ap_data *ap, u8 *sta_addr)
{
struct sta_info *sta;
int ret = 1;
if (!ap)
return -1;
spin_lock(&ap->sta_table_lock);
sta = ap_get_sta(ap, sta_addr);
if (sta)
ret = 0;
spin_unlock(&ap->sta_table_lock);
if (ret == 1) {
sta = ap_add_sta(ap, sta_addr);
if (!sta)
return -1;
sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
sta->ap = 1;
memset(sta->supported_rates, 0, sizeof(sta->supported_rates));
/* No way of knowing which rates are supported since we did not
* get supported rates element from beacon/assoc req. Assume
* that remote end supports all 802.11b rates. */
sta->supported_rates[0] = 0x82;
sta->supported_rates[1] = 0x84;
sta->supported_rates[2] = 0x0b;
sta->supported_rates[3] = 0x16;
sta->tx_supp_rates = WLAN_RATE_1M | WLAN_RATE_2M |
WLAN_RATE_5M5 | WLAN_RATE_11M;
sta->tx_rate = 110;
sta->tx_max_rate = sta->tx_rate_idx = 3;
}
return ret;
}
/* Called only as a tasklet (software IRQ) */
int hostap_update_rx_stats(struct ap_data *ap,
struct ieee80211_hdr *hdr,
struct hostap_80211_rx_status *rx_stats)
{
struct sta_info *sta;
if (!ap)
return -1;
spin_lock(&ap->sta_table_lock);
sta = ap_get_sta(ap, hdr->addr2);
if (sta) {
sta->last_rx_silence = rx_stats->noise;
sta->last_rx_signal = rx_stats->signal;
sta->last_rx_rate = rx_stats->rate;
sta->last_rx_updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
if (rx_stats->rate == 10)
sta->rx_count[0]++;
else if (rx_stats->rate == 20)
sta->rx_count[1]++;
else if (rx_stats->rate == 55)
sta->rx_count[2]++;
else if (rx_stats->rate == 110)
sta->rx_count[3]++;
}
spin_unlock(&ap->sta_table_lock);
return sta ? 0 : -1;
}
void hostap_update_rates(local_info_t *local)
{
struct sta_info *sta;
struct ap_data *ap = local->ap;
if (!ap)
return;
spin_lock_bh(&ap->sta_table_lock);
list_for_each_entry(sta, &ap->sta_list, list) {
prism2_check_tx_rates(sta);
}
spin_unlock_bh(&ap->sta_table_lock);
}
void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
struct lib80211_crypt_data ***crypt)
{
struct sta_info *sta;
spin_lock_bh(&ap->sta_table_lock);
sta = ap_get_sta(ap, addr);
if (sta)
atomic_inc(&sta->users);
spin_unlock_bh(&ap->sta_table_lock);
if (!sta && permanent)
sta = ap_add_sta(ap, addr);
if (!sta)
return NULL;
if (permanent)
sta->flags |= WLAN_STA_PERM;
*crypt = &sta->crypt;
return sta;
}
void hostap_add_wds_links(local_info_t *local)
{
struct ap_data *ap = local->ap;
struct sta_info *sta;
spin_lock_bh(&ap->sta_table_lock);
list_for_each_entry(sta, &ap->sta_list, list) {
if (sta->ap)
hostap_wds_link_oper(local, sta->addr, WDS_ADD);
}
spin_unlock_bh(&ap->sta_table_lock);
schedule_work(&local->ap->wds_oper_queue);
}
void hostap_wds_link_oper(local_info_t *local, u8 *addr, wds_oper_type type)
{
struct wds_oper_data *entry;
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return;
memcpy(entry->addr, addr, ETH_ALEN);
entry->type = type;
spin_lock_bh(&local->lock);
entry->next = local->ap->wds_oper_entries;
local->ap->wds_oper_entries = entry;
spin_unlock_bh(&local->lock);
schedule_work(&local->ap->wds_oper_queue);
}
EXPORT_SYMBOL(hostap_init_data);
EXPORT_SYMBOL(hostap_init_ap_proc);
EXPORT_SYMBOL(hostap_free_data);
EXPORT_SYMBOL(hostap_check_sta_fw_version);
EXPORT_SYMBOL(hostap_handle_sta_tx_exc);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
| gpl-2.0 |
s0be/kernel_htc_msm7227 | arch/ia64/xen/irq_xen.c | 1068 | 11882 | /******************************************************************************
* arch/ia64/xen/irq_xen.c
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/cpu.h>
#include <xen/interface/xen.h>
#include <xen/interface/callback.h>
#include <xen/events.h>
#include <asm/xen/privop.h>
#include "irq_xen.h"
/***************************************************************************
* pv_irq_ops
* irq operations
*/
static int
xen_assign_irq_vector(int irq)
{
struct physdev_irq irq_op;
irq_op.irq = irq;
if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
return -ENOSPC;
return irq_op.vector;
}
static void
xen_free_irq_vector(int vector)
{
struct physdev_irq irq_op;
if (vector < IA64_FIRST_DEVICE_VECTOR ||
vector > IA64_LAST_DEVICE_VECTOR)
return;
irq_op.vector = vector;
if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n",
__func__, vector);
}
static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
#define NAME_SIZE 15
static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
#undef NAME_SIZE
struct saved_irq {
unsigned int irq;
struct irqaction *action;
};
/* 16 should be far optimistic value, since only several percpu irqs
* are registered early.
*/
#define MAX_LATE_IRQ 16
static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
static unsigned short late_irq_cnt;
static unsigned short saved_irq_cnt;
static int xen_slab_ready;
#ifdef CONFIG_SMP
/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
* it ends up to issue several memory accesses upon percpu data and
* thus adds unnecessary traffic to other paths.
*/
static irqreturn_t
xen_dummy_handler(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
static struct irqaction xen_ipi_irqaction = {
.handler = handle_IPI,
.flags = IRQF_DISABLED,
.name = "IPI"
};
static struct irqaction xen_resched_irqaction = {
.handler = xen_dummy_handler,
.flags = IRQF_DISABLED,
.name = "resched"
};
static struct irqaction xen_tlb_irqaction = {
.handler = xen_dummy_handler,
.flags = IRQF_DISABLED,
.name = "tlb_flush"
};
#endif
/*
* This is xen version percpu irq registration, which needs bind
* to xen specific evtchn sub-system. One trick here is that xen
* evtchn binding interface depends on kmalloc because related
* port needs to be freed at device/cpu down. So we cache the
* registration on BSP before slab is ready and then deal them
* at later point. For rest instances happening after slab ready,
* we hook them to xen evtchn immediately.
*
* FIXME: MCA is not supported by far, and thus "nomca" boot param is
* required.
*/
static void
__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
struct irqaction *action, int save)
{
struct irq_desc *desc;
int irq = 0;
if (xen_slab_ready) {
switch (vec) {
case IA64_TIMER_VECTOR:
snprintf(per_cpu(xen_timer_name, cpu),
sizeof(per_cpu(xen_timer_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
action->handler, action->flags,
per_cpu(xen_timer_name, cpu), action->dev_id);
per_cpu(xen_timer_irq, cpu) = irq;
break;
case IA64_IPI_RESCHEDULE:
snprintf(per_cpu(xen_resched_name, cpu),
sizeof(per_cpu(xen_resched_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
action->handler, action->flags,
per_cpu(xen_resched_name, cpu), action->dev_id);
per_cpu(xen_resched_irq, cpu) = irq;
break;
case IA64_IPI_VECTOR:
snprintf(per_cpu(xen_ipi_name, cpu),
sizeof(per_cpu(xen_ipi_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
action->handler, action->flags,
per_cpu(xen_ipi_name, cpu), action->dev_id);
per_cpu(xen_ipi_irq, cpu) = irq;
break;
case IA64_CMC_VECTOR:
snprintf(per_cpu(xen_cmc_name, cpu),
sizeof(per_cpu(xen_cmc_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
action->handler,
action->flags,
per_cpu(xen_cmc_name, cpu),
action->dev_id);
per_cpu(xen_cmc_irq, cpu) = irq;
break;
case IA64_CMCP_VECTOR:
snprintf(per_cpu(xen_cmcp_name, cpu),
sizeof(per_cpu(xen_cmcp_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
action->handler,
action->flags,
per_cpu(xen_cmcp_name, cpu),
action->dev_id);
per_cpu(xen_cmcp_irq, cpu) = irq;
break;
case IA64_CPEP_VECTOR:
snprintf(per_cpu(xen_cpep_name, cpu),
sizeof(per_cpu(xen_cpep_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
action->handler,
action->flags,
per_cpu(xen_cpep_name, cpu),
action->dev_id);
per_cpu(xen_cpep_irq, cpu) = irq;
break;
case IA64_CPE_VECTOR:
case IA64_MCA_RENDEZ_VECTOR:
case IA64_PERFMON_VECTOR:
case IA64_MCA_WAKEUP_VECTOR:
case IA64_SPURIOUS_INT_VECTOR:
/* No need to complain, these aren't supported. */
break;
default:
printk(KERN_WARNING "Percpu irq %d is unsupported "
"by xen!\n", vec);
break;
}
BUG_ON(irq < 0);
if (irq > 0) {
/*
* Mark percpu. Without this, migrate_irqs() will
* mark the interrupt for migrations and trigger it
* on cpu hotplug.
*/
desc = irq_desc + irq;
desc->status |= IRQ_PER_CPU;
}
}
/* For BSP, we cache registered percpu irqs, and then re-walk
* them when initializing APs
*/
if (!cpu && save) {
BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
saved_percpu_irqs[saved_irq_cnt].irq = vec;
saved_percpu_irqs[saved_irq_cnt].action = action;
saved_irq_cnt++;
if (!xen_slab_ready)
late_irq_cnt++;
}
}
static void
xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
{
__xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
}
static void
xen_bind_early_percpu_irq(void)
{
int i;
xen_slab_ready = 1;
/* There's no race when accessing this cached array, since only
* BSP will face with such step shortly
*/
for (i = 0; i < late_irq_cnt; i++)
__xen_register_percpu_irq(smp_processor_id(),
saved_percpu_irqs[i].irq,
saved_percpu_irqs[i].action, 0);
}
/* FIXME: There's no obvious point to check whether slab is ready. So
* a hack is used here by utilizing a late time hook.
*/
#ifdef CONFIG_HOTPLUG_CPU
static int __devinit
unbind_evtchn_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD) {
/* Unregister evtchn. */
if (per_cpu(xen_cpep_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
NULL);
per_cpu(xen_cpep_irq, cpu) = -1;
}
if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
NULL);
per_cpu(xen_cmcp_irq, cpu) = -1;
}
if (per_cpu(xen_cmc_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
per_cpu(xen_cmc_irq, cpu) = -1;
}
if (per_cpu(xen_ipi_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
per_cpu(xen_ipi_irq, cpu) = -1;
}
if (per_cpu(xen_resched_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
NULL);
per_cpu(xen_resched_irq, cpu) = -1;
}
if (per_cpu(xen_timer_irq, cpu) >= 0) {
unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
NULL);
per_cpu(xen_timer_irq, cpu) = -1;
}
}
return NOTIFY_OK;
}
static struct notifier_block unbind_evtchn_notifier = {
.notifier_call = unbind_evtchn_callback,
.priority = 0
};
#endif
void xen_smp_intr_init_early(unsigned int cpu)
{
#ifdef CONFIG_SMP
unsigned int i;
for (i = 0; i < saved_irq_cnt; i++)
__xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
saved_percpu_irqs[i].action, 0);
#endif
}
void xen_smp_intr_init(void)
{
#ifdef CONFIG_SMP
unsigned int cpu = smp_processor_id();
struct callback_register event = {
.type = CALLBACKTYPE_event,
.address = { .ip = (unsigned long)&xen_event_callback },
};
if (cpu == 0) {
/* Initialization was already done for boot cpu. */
#ifdef CONFIG_HOTPLUG_CPU
/* Register the notifier only once. */
register_cpu_notifier(&unbind_evtchn_notifier);
#endif
return;
}
/* This should be piggyback when setup vcpu guest context */
BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
#endif /* CONFIG_SMP */
}
void __init
xen_irq_init(void)
{
struct callback_register event = {
.type = CALLBACKTYPE_event,
.address = { .ip = (unsigned long)&xen_event_callback },
};
xen_init_IRQ();
BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
late_time_init = xen_bind_early_percpu_irq;
}
void
xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
{
#ifdef CONFIG_SMP
/* TODO: we need to call vcpu_up here */
if (unlikely(vector == ap_wakeup_vector)) {
/* XXX
* This should be in __cpu_up(cpu) in ia64 smpboot.c
* like x86. But don't want to modify it,
* keep it untouched.
*/
xen_smp_intr_init_early(cpu);
xen_send_ipi(cpu, vector);
/* vcpu_prepare_and_up(cpu); */
return;
}
#endif
switch (vector) {
case IA64_IPI_VECTOR:
xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
break;
case IA64_IPI_RESCHEDULE:
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
break;
case IA64_CMCP_VECTOR:
xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
break;
case IA64_CPEP_VECTOR:
xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
break;
case IA64_TIMER_VECTOR: {
/* this is used only once by check_sal_cache_flush()
at boot time */
static int used = 0;
if (!used) {
xen_send_ipi(cpu, IA64_TIMER_VECTOR);
used = 1;
break;
}
/* fallthrough */
}
default:
printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
vector);
notify_remote_via_irq(0); /* defaults to 0 irq */
break;
}
}
static void __init
xen_register_ipi(void)
{
#ifdef CONFIG_SMP
register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
#endif
}
static void
xen_resend_irq(unsigned int vector)
{
(void)resend_irq_on_evtchn(vector);
}
const struct pv_irq_ops xen_irq_ops __initdata = {
.register_ipi = xen_register_ipi,
.assign_irq_vector = xen_assign_irq_vector,
.free_irq_vector = xen_free_irq_vector,
.register_percpu_irq = xen_register_percpu_irq,
.resend_irq = xen_resend_irq,
};
| gpl-2.0 |
NaughtyMonkey75/kernel_htc_msm8974 | arch/arm/mach-msm/pm-stats.c | 1324 | 8113 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "pm.h"
struct msm_pm_time_stats {
const char *name;
int64_t first_bucket_time;
int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
int count;
int64_t total_time;
bool enabled;
};
struct msm_pm_cpu_time_stats {
struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
};
static struct msm_pm_time_stats suspend_stats;
static DEFINE_SPINLOCK(msm_pm_stats_lock);
static DEFINE_PER_CPU_SHARED_ALIGNED(
struct msm_pm_cpu_time_stats, msm_pm_stats);
/*
* Function to update stats
*/
static void update_stats(struct msm_pm_time_stats *stats, int64_t t)
{
int64_t bt;
int i;
if (!stats)
return;
stats->total_time += t;
stats->count++;
bt = t;
do_div(bt, stats->first_bucket_time);
if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
(CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
i = DIV_ROUND_UP(fls((uint32_t)bt),
CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
else
i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
stats->bucket[i]++;
if (t < stats->min_time[i] || !stats->max_time[i])
stats->min_time[i] = t;
if (t > stats->max_time[i])
stats->max_time[i] = t;
}
/*
* Add the given time data to the statistics collection.
*/
void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
{
struct msm_pm_time_stats *stats;
unsigned long flags;
spin_lock_irqsave(&msm_pm_stats_lock, flags);
if (id == MSM_PM_STAT_SUSPEND) {
stats = &suspend_stats;
} else {
stats = __get_cpu_var(msm_pm_stats).stats;
if (!stats[id].enabled)
goto add_bail;
stats = &stats[id];
}
update_stats(stats, t);
add_bail:
spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
}
static void stats_show(struct seq_file *m,
struct msm_pm_time_stats *stats,
int cpu, int suspend)
{
int64_t bucket_time;
int64_t s;
uint32_t ns;
int i;
int bucket_count = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
int bucket_shift = CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
if (!stats || !m)
return;
s = stats->total_time;
ns = do_div(s, NSEC_PER_SEC);
if (suspend)
seq_printf(m,
"%s:\n"
" count: %7d\n"
" total_time: %lld.%09u\n",
stats->name,
stats->count,
s, ns);
else
seq_printf(m,
"[cpu %u] %s:\n"
" count: %7d\n"
" total_time: %lld.%09u\n",
cpu, stats->name,
stats->count,
s, ns);
bucket_time = stats->first_bucket_time;
for (i = 0; i < bucket_count; i++) {
s = bucket_time;
ns = do_div(s, NSEC_PER_SEC);
seq_printf(m,
" <%6lld.%09u: %7d (%lld-%lld)\n",
s, ns, stats->bucket[i],
stats->min_time[i],
stats->max_time[i]);
bucket_time <<= bucket_shift;
}
seq_printf(m, " >=%6lld.%09u: %7d (%lld-%lld)\n",
s, ns, stats->bucket[i],
stats->min_time[i],
stats->max_time[i]);
}
/*
* Write out the power management statistics.
*/
static int msm_pm_stats_show(struct seq_file *m, void *v)
{
int cpu;
int id;
unsigned long flags;
spin_lock_irqsave(&msm_pm_stats_lock, flags);
for_each_possible_cpu(cpu) {
struct msm_pm_time_stats *stats;
stats = per_cpu(msm_pm_stats, cpu).stats;
for (id = 0; id < MSM_PM_STAT_COUNT; id++) {
/* Skip the disabled ones */
if (!stats[id].enabled)
continue;
if (id == MSM_PM_STAT_SUSPEND)
continue;
stats_show(m, &stats[id], cpu, false);
}
}
stats_show(m, &suspend_stats, cpu, true);
spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
return 0;
}
#define MSM_PM_STATS_RESET "reset"
/*
* Reset the power management statistics values.
*/
static ssize_t msm_pm_write_proc(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
char buf[sizeof(MSM_PM_STATS_RESET)];
int ret;
unsigned long flags;
unsigned int cpu;
size_t len = strnlen(MSM_PM_STATS_RESET, sizeof(MSM_PM_STATS_RESET));
if (count < sizeof(MSM_PM_STATS_RESET)) {
ret = -EINVAL;
goto write_proc_failed;
}
if (copy_from_user(buf, buffer, len)) {
ret = -EFAULT;
goto write_proc_failed;
}
if (strncmp(buf, MSM_PM_STATS_RESET, len)) {
ret = -EINVAL;
goto write_proc_failed;
}
spin_lock_irqsave(&msm_pm_stats_lock, flags);
for_each_possible_cpu(cpu) {
struct msm_pm_time_stats *stats;
int i;
stats = per_cpu(msm_pm_stats, cpu).stats;
for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
memset(stats[i].bucket,
0, sizeof(stats[i].bucket));
memset(stats[i].min_time,
0, sizeof(stats[i].min_time));
memset(stats[i].max_time,
0, sizeof(stats[i].max_time));
stats[i].count = 0;
stats[i].total_time = 0;
}
}
memset(suspend_stats.bucket,
0, sizeof(suspend_stats.bucket));
memset(suspend_stats.min_time,
0, sizeof(suspend_stats.min_time));
memset(suspend_stats.max_time,
0, sizeof(suspend_stats.max_time));
suspend_stats.count = 0;
suspend_stats.total_time = 0;
spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
return count;
write_proc_failed:
return ret;
}
#undef MSM_PM_STATS_RESET
static int msm_pm_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, msm_pm_stats_show, NULL);
}
static const struct file_operations msm_pm_stats_fops = {
.open = msm_pm_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = msm_pm_write_proc,
};
void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size)
{
unsigned int cpu;
struct proc_dir_entry *d_entry;
int i = 0;
for_each_possible_cpu(cpu) {
struct msm_pm_time_stats *stats =
per_cpu(msm_pm_stats, cpu).stats;
stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
stats[MSM_PM_STAT_IDLE_SPIN].name = "idle-spin";
stats[MSM_PM_STAT_IDLE_SPIN].first_bucket_time =
CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
stats[MSM_PM_STAT_RETENTION].name = "retention";
stats[MSM_PM_STAT_RETENTION].first_bucket_time =
CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
"idle-standalone-power-collapse";
stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
first_bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].name =
"idle-failed-standalone-power-collapse";
stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].
first_bucket_time =
CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
"idle-power-collapse";
stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].name =
"idle-failed-power-collapse";
stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].
first_bucket_time =
CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
stats[MSM_PM_STAT_NOT_IDLE].name = "not-idle";
stats[MSM_PM_STAT_NOT_IDLE].first_bucket_time =
CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
for (i = 0; i < size; i++)
stats[enable_stats[i]].enabled = true;
}
suspend_stats.name = "system_suspend";
suspend_stats.first_bucket_time =
CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
d_entry = proc_create_data("msm_pm_stats", S_IRUGO | S_IWUSR | S_IWGRP,
NULL, &msm_pm_stats_fops, NULL);
}
| gpl-2.0 |
invisiblek/android_kernel_samsung_jaspervzw | arch/blackfin/kernel/ftrace.c | 2604 | 3249 | /*
* ftrace graph code
*
* Copyright (C) 2009-2010 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_DYNAMIC_FTRACE
static const unsigned char mnop[] = {
0x03, 0xc0, 0x00, 0x18, /* MNOP; */
0x03, 0xc0, 0x00, 0x18, /* MNOP; */
};
static void bfin_make_pcrel24(unsigned char *insn, unsigned long src,
unsigned long dst)
{
uint32_t pcrel = (dst - src) >> 1;
insn[0] = pcrel >> 16;
insn[1] = 0xe3;
insn[2] = pcrel;
insn[3] = pcrel >> 8;
}
#define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst))
static int ftrace_modify_code(unsigned long ip, const unsigned char *code,
unsigned long len)
{
int ret = probe_kernel_write((void *)ip, (void *)code, len);
flush_icache_range(ip, ip + len);
return ret;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
/* Turn the mcount call site into two MNOPs as those are 32bit insns */
return ftrace_modify_code(rec->ip, mnop, sizeof(mnop));
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
/* Restore the mcount call site */
unsigned char call[8];
call[0] = 0x67; /* [--SP] = RETS; */
call[1] = 0x01;
bfin_make_pcrel24(&call[2], rec->ip + 2, addr);
call[6] = 0x27; /* RETS = [SP++]; */
call[7] = 0x01;
return ftrace_modify_code(rec->ip, call, sizeof(call));
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned char call[4];
unsigned long ip = (unsigned long)&ftrace_call;
bfin_make_pcrel24(call, ip, func);
return ftrace_modify_code(ip, call, sizeof(call));
}
int __init ftrace_dyn_arch_init(void *data)
{
/* return value is done indirectly via data */
*(unsigned long *)data = 0;
return 0;
}
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)&ftrace_graph_call;
uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1;
jump_pcrel12 |= 0x2000;
return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12));
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_code((unsigned long)&ftrace_graph_call, empty_zero_page, 2);
}
# endif
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)&return_to_handler;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
if (ftrace_push_return_trace(*parent, self_addr, &trace.depth,
frame_pointer) == -EBUSY)
return;
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
return;
}
/* all is well in the world ! hijack RETS ... */
*parent = return_hooker;
}
#endif
| gpl-2.0 |
frenkowski/Tyrannus_Kernel_MM_SM-G925F | tools/perf/util/pstack.c | 2860 | 1493 | /*
* Simple pointer stack
*
* (c) 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
*/
#include "util.h"
#include "pstack.h"
#include <linux/kernel.h>
#include <stdlib.h>
struct pstack {
unsigned short top;
unsigned short max_nr_entries;
void *entries[0];
};
struct pstack *pstack__new(unsigned short max_nr_entries)
{
struct pstack *pstack = zalloc((sizeof(*pstack) +
max_nr_entries * sizeof(void *)));
if (pstack != NULL)
pstack->max_nr_entries = max_nr_entries;
return pstack;
}
void pstack__delete(struct pstack *pstack)
{
free(pstack);
}
bool pstack__empty(const struct pstack *pstack)
{
return pstack->top == 0;
}
void pstack__remove(struct pstack *pstack, void *key)
{
unsigned short i = pstack->top, last_index = pstack->top - 1;
while (i-- != 0) {
if (pstack->entries[i] == key) {
if (i < last_index)
memmove(pstack->entries + i,
pstack->entries + i + 1,
(last_index - i) * sizeof(void *));
--pstack->top;
return;
}
}
pr_err("%s: %p not on the pstack!\n", __func__, key);
}
void pstack__push(struct pstack *pstack, void *key)
{
if (pstack->top == pstack->max_nr_entries) {
pr_err("%s: top=%d, overflow!\n", __func__, pstack->top);
return;
}
pstack->entries[pstack->top++] = key;
}
void *pstack__pop(struct pstack *pstack)
{
void *ret;
if (pstack->top == 0) {
pr_err("%s: underflow!\n", __func__);
return NULL;
}
ret = pstack->entries[--pstack->top];
pstack->entries[pstack->top] = NULL;
return ret;
}
| gpl-2.0 |
ridon/ridon-kernel-mediatek-sprout | fs/jffs2/compr_rtime.c | 4908 | 2913 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by Arjan van de Ven <arjanv@redhat.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*
*
* Very simple lz77-ish encoder.
*
* Theory of operation: Both encoder and decoder have a list of "last
* occurrences" for every possible source-value; after sending the
* first source-byte, the second byte indicated the "run" length of
* matches
*
* The algorithm is intended to only send "whole bytes", no bit-messing.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/jffs2.h>
#include "compr.h"
/* _compress returns the compressed size, -1 if bigger */
static int jffs2_rtime_compress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen)
{
unsigned short positions[256];
int outpos = 0;
int pos=0;
memset(positions,0,sizeof(positions));
while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
int backpos, runlen=0;
unsigned char value;
value = data_in[pos];
cpage_out[outpos++] = data_in[pos++];
backpos = positions[value];
positions[value]=pos;
while ((backpos < pos) && (pos < (*sourcelen)) &&
(data_in[pos]==data_in[backpos++]) && (runlen<255)) {
pos++;
runlen++;
}
cpage_out[outpos++] = runlen;
}
if (outpos >= pos) {
/* We failed */
return -1;
}
/* Tell the caller how much we managed to compress, and how much space it took */
*sourcelen = pos;
*dstlen = outpos;
return 0;
}
static int jffs2_rtime_decompress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t srclen, uint32_t destlen)
{
unsigned short positions[256];
int outpos = 0;
int pos=0;
memset(positions,0,sizeof(positions));
while (outpos<destlen) {
unsigned char value;
int backoffs;
int repeat;
value = data_in[pos++];
cpage_out[outpos++] = value; /* first the verbatim copied byte */
repeat = data_in[pos++];
backoffs = positions[value];
positions[value]=outpos;
if (repeat) {
if (backoffs + repeat >= outpos) {
while(repeat) {
cpage_out[outpos++] = cpage_out[backoffs++];
repeat--;
}
} else {
memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat);
outpos+=repeat;
}
}
}
return 0;
}
static struct jffs2_compressor jffs2_rtime_comp = {
.priority = JFFS2_RTIME_PRIORITY,
.name = "rtime",
.compr = JFFS2_COMPR_RTIME,
.compress = &jffs2_rtime_compress,
.decompress = &jffs2_rtime_decompress,
#ifdef JFFS2_RTIME_DISABLED
.disabled = 1,
#else
.disabled = 0,
#endif
};
int jffs2_rtime_init(void)
{
return jffs2_register_compressor(&jffs2_rtime_comp);
}
void jffs2_rtime_exit(void)
{
jffs2_unregister_compressor(&jffs2_rtime_comp);
}
| gpl-2.0 |
t2m-foxfone/kernel_msm | arch/arm/plat-versatile/sched-clock.c | 5932 | 1251 | /*
* linux/arch/arm/plat-versatile/sched-clock.c
*
* Copyright (C) 1999 - 2003 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/sched_clock.h>
#include <plat/sched_clock.h>
static void __iomem *ctr;
static u32 notrace versatile_read_sched_clock(void)
{
if (ctr)
return readl(ctr);
return 0;
}
void __init versatile_sched_clock_init(void __iomem *reg, unsigned long rate)
{
ctr = reg;
setup_sched_clock(versatile_read_sched_clock, 32, rate);
}
| gpl-2.0 |
uzairabdulmajeed/uam-kernel | drivers/media/platform/s5p-tv/mixer_grp_layer.c | 7468 | 6674 | /*
* Samsung TV Mixer driver
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
*
* Tomasz Stanislawski, <t.stanislaws@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundiation. either version 2 of the License,
* or (at your option) any later version
*/
#include "mixer.h"
#include <media/videobuf2-dma-contig.h>
/* FORMAT DEFINITIONS */
static const struct mxr_format mxr_fb_fmt_rgb565 = {
.name = "RGB565",
.fourcc = V4L2_PIX_FMT_RGB565,
.colorspace = V4L2_COLORSPACE_SRGB,
.num_planes = 1,
.plane = {
{ .width = 1, .height = 1, .size = 2 },
},
.num_subframes = 1,
.cookie = 4,
};
static const struct mxr_format mxr_fb_fmt_argb1555 = {
.name = "ARGB1555",
.num_planes = 1,
.fourcc = V4L2_PIX_FMT_RGB555,
.colorspace = V4L2_COLORSPACE_SRGB,
.plane = {
{ .width = 1, .height = 1, .size = 2 },
},
.num_subframes = 1,
.cookie = 5,
};
static const struct mxr_format mxr_fb_fmt_argb4444 = {
.name = "ARGB4444",
.num_planes = 1,
.fourcc = V4L2_PIX_FMT_RGB444,
.colorspace = V4L2_COLORSPACE_SRGB,
.plane = {
{ .width = 1, .height = 1, .size = 2 },
},
.num_subframes = 1,
.cookie = 6,
};
static const struct mxr_format mxr_fb_fmt_argb8888 = {
.name = "ARGB8888",
.fourcc = V4L2_PIX_FMT_BGR32,
.colorspace = V4L2_COLORSPACE_SRGB,
.num_planes = 1,
.plane = {
{ .width = 1, .height = 1, .size = 4 },
},
.num_subframes = 1,
.cookie = 7,
};
static const struct mxr_format *mxr_graph_format[] = {
&mxr_fb_fmt_rgb565,
&mxr_fb_fmt_argb1555,
&mxr_fb_fmt_argb4444,
&mxr_fb_fmt_argb8888,
};
/* AUXILIARY CALLBACKS */
static void mxr_graph_layer_release(struct mxr_layer *layer)
{
mxr_base_layer_unregister(layer);
mxr_base_layer_release(layer);
}
static void mxr_graph_buffer_set(struct mxr_layer *layer,
struct mxr_buffer *buf)
{
dma_addr_t addr = 0;
if (buf)
addr = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
}
static void mxr_graph_stream_set(struct mxr_layer *layer, int en)
{
mxr_reg_graph_layer_stream(layer->mdev, layer->idx, en);
}
static void mxr_graph_format_set(struct mxr_layer *layer)
{
mxr_reg_graph_format(layer->mdev, layer->idx,
layer->fmt, &layer->geo);
}
static inline unsigned int closest(unsigned int x, unsigned int a,
unsigned int b, unsigned long flags)
{
unsigned int mid = (a + b) / 2;
/* choosing closest value with constraints according to table:
* -------------+-----+-----+-----+-------+
* flags | 0 | LE | GE | LE|GE |
* -------------+-----+-----+-----+-------+
* x <= a | a | a | a | a |
* a < x <= mid | a | a | b | a |
* mid < x < b | b | a | b | b |
* b <= x | b | b | b | b |
* -------------+-----+-----+-----+-------+
*/
/* remove all non-constraint flags */
flags &= V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE;
if (x <= a)
return a;
if (x >= b)
return b;
if (flags == V4L2_SEL_FLAG_LE)
return a;
if (flags == V4L2_SEL_FLAG_GE)
return b;
if (x <= mid)
return a;
return b;
}
static inline unsigned int do_center(unsigned int center,
unsigned int size, unsigned int upper, unsigned int flags)
{
unsigned int lower;
if (flags & MXR_NO_OFFSET)
return 0;
lower = center - min(center, size / 2);
return min(lower, upper - size);
}
static void mxr_graph_fix_geometry(struct mxr_layer *layer,
enum mxr_geometry_stage stage, unsigned long flags)
{
struct mxr_geometry *geo = &layer->geo;
struct mxr_crop *src = &geo->src;
struct mxr_crop *dst = &geo->dst;
unsigned int x_center, y_center;
switch (stage) {
case MXR_GEOMETRY_SINK: /* nothing to be fixed here */
flags = 0;
/* fall through */
case MXR_GEOMETRY_COMPOSE:
/* remember center of the area */
x_center = dst->x_offset + dst->width / 2;
y_center = dst->y_offset + dst->height / 2;
/* round up/down to 2 multiple depending on flags */
if (flags & V4L2_SEL_FLAG_LE) {
dst->width = round_down(dst->width, 2);
dst->height = round_down(dst->height, 2);
} else {
dst->width = round_up(dst->width, 2);
dst->height = round_up(dst->height, 2);
}
/* assure that compose rect is inside display area */
dst->width = min(dst->width, dst->full_width);
dst->height = min(dst->height, dst->full_height);
/* ensure that compose is reachable using 2x scaling */
dst->width = min(dst->width, 2 * src->full_width);
dst->height = min(dst->height, 2 * src->full_height);
/* setup offsets */
dst->x_offset = do_center(x_center, dst->width,
dst->full_width, flags);
dst->y_offset = do_center(y_center, dst->height,
dst->full_height, flags);
flags = 0;
/* fall through */
case MXR_GEOMETRY_CROP:
/* remember center of the area */
x_center = src->x_offset + src->width / 2;
y_center = src->y_offset + src->height / 2;
/* ensure that cropping area lies inside the buffer */
if (src->full_width < dst->width)
src->width = dst->width / 2;
else
src->width = closest(src->width, dst->width / 2,
dst->width, flags);
if (src->width == dst->width)
geo->x_ratio = 0;
else
geo->x_ratio = 1;
if (src->full_height < dst->height)
src->height = dst->height / 2;
else
src->height = closest(src->height, dst->height / 2,
dst->height, flags);
if (src->height == dst->height)
geo->y_ratio = 0;
else
geo->y_ratio = 1;
/* setup offsets */
src->x_offset = do_center(x_center, src->width,
src->full_width, flags);
src->y_offset = do_center(y_center, src->height,
src->full_height, flags);
flags = 0;
/* fall through */
case MXR_GEOMETRY_SOURCE:
src->full_width = clamp_val(src->full_width,
src->width + src->x_offset, 32767);
src->full_height = clamp_val(src->full_height,
src->height + src->y_offset, 2047);
};
}
/* PUBLIC API */
struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
{
struct mxr_layer *layer;
int ret;
struct mxr_layer_ops ops = {
.release = mxr_graph_layer_release,
.buffer_set = mxr_graph_buffer_set,
.stream_set = mxr_graph_stream_set,
.format_set = mxr_graph_format_set,
.fix_geometry = mxr_graph_fix_geometry,
};
char name[32];
sprintf(name, "graph%d", idx);
layer = mxr_base_layer_create(mdev, idx, name, &ops);
if (layer == NULL) {
mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
goto fail;
}
layer->fmt_array = mxr_graph_format;
layer->fmt_array_size = ARRAY_SIZE(mxr_graph_format);
ret = mxr_base_layer_register(layer);
if (ret)
goto fail_layer;
return layer;
fail_layer:
mxr_base_layer_release(layer);
fail:
return NULL;
}
| gpl-2.0 |
jtpoo3/kangaroo-m7 | fs/ext2/xattr_trusted.c | 8492 | 1419 | /*
* linux/fs/ext2/xattr_trusted.c
* Handler for trusted extended attributes.
*
* Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
#include "ext2.h"
#include "xattr.h"
static size_t
ext2_xattr_trusted_list(struct dentry *dentry, char *list, size_t list_size,
const char *name, size_t name_len, int type)
{
const int prefix_len = XATTR_TRUSTED_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
if (!capable(CAP_SYS_ADMIN))
return 0;
if (list && total_len <= list_size) {
memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
return total_len;
}
static int
ext2_xattr_trusted_get(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_TRUSTED, name,
buffer, size);
}
static int
ext2_xattr_trusted_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_TRUSTED, name,
value, size, flags);
}
const struct xattr_handler ext2_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.list = ext2_xattr_trusted_list,
.get = ext2_xattr_trusted_get,
.set = ext2_xattr_trusted_set,
};
| gpl-2.0 |
michaelspeed/EntityMobile_hammerhead_kernel | drivers/infiniband/hw/cxgb3/cxio_hal.c | 11052 | 38223 | /*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <asm/delay.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include "cxio_resource.h"
#include "cxio_hal.h"
#include "cxgb3_offload.h"
#include "sge_defs.h"
static LIST_HEAD(rdev_list);
static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
{
struct cxio_rdev *rdev;
list_for_each_entry(rdev, &rdev_list, entry)
if (!strcmp(rdev->dev_name, dev_name))
return rdev;
return NULL;
}
static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
{
struct cxio_rdev *rdev;
list_for_each_entry(rdev, &rdev_list, entry)
if (rdev->t3cdev_p == tdev)
return rdev;
return NULL;
}
int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
enum t3_cq_opcode op, u32 credit)
{
int ret;
struct t3_cqe *cqe;
u32 rptr;
struct rdma_cq_op setup;
setup.id = cq->cqid;
setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;
setup.op = op;
ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);
if ((ret < 0) || (op == CQ_CREDIT_UPDATE))
return ret;
/*
* If the rearm returned an index other than our current index,
* then there might be CQE's in flight (being DMA'd). We must wait
* here for them to complete or the consumer can miss a notification.
*/
if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
int i=0;
rptr = cq->rptr;
/*
* Keep the generation correct by bumping rptr until it
* matches the index returned by the rearm - 1.
*/
while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
rptr++;
/*
* Now rptr is the index for the (last) cqe that was
* in-flight at the time the HW rearmed the CQ. We
* spin until that CQE is valid.
*/
cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
udelay(1);
if (i++ > 1000000) {
printk(KERN_ERR "%s: stalled rnic\n",
rdev_p->dev_name);
return -EIO;
}
}
return 1;
}
return 0;
}
static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
{
struct rdma_cq_setup setup;
setup.id = cqid;
setup.base_addr = 0; /* NULL address */
setup.size = 0; /* disaable the CQ */
setup.credits = 0;
setup.credit_thres = 0;
setup.ovfl_mode = 0;
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
{
u64 sge_cmd;
struct t3_modify_qp_wr *wqe;
struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
if (!skb) {
PDBG("%s alloc_skb failed\n", __func__);
return -ENOMEM;
}
wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof(*wqe));
build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD,
T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7,
T3_SOPEOP);
wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
sge_cmd = qpid << 8 | 3;
wqe->sge_cmd = cpu_to_be64(sge_cmd);
skb->priority = CPL_PRIORITY_CONTROL;
return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
}
int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
{
struct rdma_cq_setup setup;
int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
size += 1; /* one extra page for storing cq-in-err state */
cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
if (!cq->cqid)
return -ENOMEM;
if (kernel) {
cq->sw_queue = kzalloc(size, GFP_KERNEL);
if (!cq->sw_queue)
return -ENOMEM;
}
cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
&(cq->dma_addr), GFP_KERNEL);
if (!cq->queue) {
kfree(cq->sw_queue);
return -ENOMEM;
}
dma_unmap_addr_set(cq, mapping, cq->dma_addr);
memset(cq->queue, 0, size);
setup.id = cq->cqid;
setup.base_addr = (u64) (cq->dma_addr);
setup.size = 1UL << cq->size_log2;
setup.credits = 65535;
setup.credit_thres = 1;
if (rdev_p->t3cdev_p->type != T3A)
setup.ovfl_mode = 0;
else
setup.ovfl_mode = 1;
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
#ifdef notyet
int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
{
struct rdma_cq_setup setup;
setup.id = cq->cqid;
setup.base_addr = (u64) (cq->dma_addr);
setup.size = 1UL << cq->size_log2;
setup.credits = setup.size;
setup.credit_thres = setup.size; /* TBD: overflow recovery */
setup.ovfl_mode = 1;
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
#endif
static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
{
struct cxio_qpid_list *entry;
u32 qpid;
int i;
mutex_lock(&uctx->lock);
if (!list_empty(&uctx->qpids)) {
entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,
entry);
list_del(&entry->entry);
qpid = entry->qpid;
kfree(entry);
} else {
qpid = cxio_hal_get_qpid(rdev_p->rscp);
if (!qpid)
goto out;
for (i = qpid+1; i & rdev_p->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
break;
entry->qpid = i;
list_add_tail(&entry->entry, &uctx->qpids);
}
}
out:
mutex_unlock(&uctx->lock);
PDBG("%s qpid 0x%x\n", __func__, qpid);
return qpid;
}
static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
struct cxio_ucontext *uctx)
{
struct cxio_qpid_list *entry;
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
PDBG("%s qpid 0x%x\n", __func__, qpid);
entry->qpid = qpid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->qpids);
mutex_unlock(&uctx->lock);
}
void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
{
struct list_head *pos, *nxt;
struct cxio_qpid_list *entry;
mutex_lock(&uctx->lock);
list_for_each_safe(pos, nxt, &uctx->qpids) {
entry = list_entry(pos, struct cxio_qpid_list, entry);
list_del_init(&entry->entry);
if (!(entry->qpid & rdev_p->qpmask))
cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);
kfree(entry);
}
mutex_unlock(&uctx->lock);
}
void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
{
INIT_LIST_HEAD(&uctx->qpids);
mutex_init(&uctx->lock);
}
int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
struct t3_wq *wq, struct cxio_ucontext *uctx)
{
int depth = 1UL << wq->size_log2;
int rqsize = 1UL << wq->rq_size_log2;
wq->qpid = get_qpid(rdev_p, uctx);
if (!wq->qpid)
return -ENOMEM;
wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL);
if (!wq->rq)
goto err1;
wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
if (!wq->rq_addr)
goto err2;
wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL);
if (!wq->sq)
goto err3;
wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
depth * sizeof(union t3_wr),
&(wq->dma_addr), GFP_KERNEL);
if (!wq->queue)
goto err4;
memset(wq->queue, 0, depth * sizeof(union t3_wr));
dma_unmap_addr_set(wq, mapping, wq->dma_addr);
wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
if (!kernel_domain)
wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
(wq->qpid << rdev_p->qpshift);
wq->rdev = rdev_p;
PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
return 0;
err4:
kfree(wq->sq);
err3:
cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);
err2:
kfree(wq->rq);
err1:
put_qpid(rdev_p, wq->qpid, uctx);
return -ENOMEM;
}
int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
{
int err;
err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
kfree(cq->sw_queue);
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (cq->size_log2))
* sizeof(struct t3_cqe), cq->queue,
dma_unmap_addr(cq, mapping));
cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
return err;
}
int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
struct cxio_ucontext *uctx)
{
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (wq->size_log2))
* sizeof(union t3_wr), wq->queue,
dma_unmap_addr(wq, mapping));
kfree(wq->sq);
cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
kfree(wq->rq);
put_qpid(rdev_p, wq->qpid, uctx);
return 0;
}
static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
{
struct t3_cqe cqe;
PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
wq, cq, cq->sw_rptr, cq->sw_wptr);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
V_CQE_OPCODE(T3_SEND) |
V_CQE_TYPE(0) |
V_CQE_SWCQE(1) |
V_CQE_QPID(wq->qpid) |
V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
cq->size_log2)));
*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
cq->sw_wptr++;
}
int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
{
u32 ptr;
int flushed = 0;
PDBG("%s wq %p cq %p\n", __func__, wq, cq);
/* flush RQ */
PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
wq->rq_rptr, wq->rq_wptr, count);
ptr = wq->rq_rptr + count;
while (ptr++ != wq->rq_wptr) {
insert_recv_cqe(wq, cq);
flushed++;
}
return flushed;
}
static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
struct t3_swsq *sqp)
{
struct t3_cqe cqe;
PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
wq, cq, cq->sw_rptr, cq->sw_wptr);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
V_CQE_OPCODE(sqp->opcode) |
V_CQE_TYPE(1) |
V_CQE_SWCQE(1) |
V_CQE_QPID(wq->qpid) |
V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
cq->size_log2)));
cqe.u.scqe.wrid_hi = sqp->sq_wptr;
*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
cq->sw_wptr++;
}
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
{
__u32 ptr;
int flushed = 0;
struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
ptr = wq->sq_rptr + count;
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
while (ptr != wq->sq_wptr) {
sqp->signaled = 0;
insert_sq_cqe(wq, cq, sqp);
ptr++;
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
flushed++;
}
return flushed;
}
/*
* Move all CQEs from the HWCQ into the SWCQ.
*/
void cxio_flush_hw_cq(struct t3_cq *cq)
{
struct t3_cqe *cqe, *swcqe;
PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
cqe = cxio_next_hw_cqe(cq);
while (cqe) {
PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
__func__, cq->rptr, cq->sw_wptr);
swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
*swcqe = *cqe;
swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
cq->sw_wptr++;
cq->rptr++;
cqe = cxio_next_hw_cqe(cq);
}
}
static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
{
if (CQE_OPCODE(*cqe) == T3_TERMINATE)
return 0;
if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
return 0;
if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
return 0;
if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
return 0;
return 1;
}
void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
{
struct t3_cqe *cqe;
u32 ptr;
*count = 0;
ptr = cq->sw_rptr;
while (!Q_EMPTY(ptr, cq->sw_wptr)) {
cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
if ((SQ_TYPE(*cqe) ||
((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
(CQE_QPID(*cqe) == wq->qpid))
(*count)++;
ptr++;
}
PDBG("%s cq %p count %d\n", __func__, cq, *count);
}
void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
{
struct t3_cqe *cqe;
u32 ptr;
*count = 0;
PDBG("%s count zero %d\n", __func__, *count);
ptr = cq->sw_rptr;
while (!Q_EMPTY(ptr, cq->sw_wptr)) {
cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
(CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
(*count)++;
ptr++;
}
PDBG("%s cq %p count %d\n", __func__, cq, *count);
}
static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
{
struct rdma_cq_setup setup;
setup.id = 0;
setup.base_addr = 0; /* NULL address */
setup.size = 1; /* enable the CQ */
setup.credits = 0;
/* force SGE to redirect to RspQ and interrupt */
setup.credit_thres = 0;
setup.ovfl_mode = 1;
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
{
int err;
u64 sge_cmd, ctx0, ctx1;
u64 base_addr;
struct t3_modify_qp_wr *wqe;
struct sk_buff *skb;
skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
if (!skb) {
PDBG("%s alloc_skb failed\n", __func__);
return -ENOMEM;
}
err = cxio_hal_init_ctrl_cq(rdev_p);
if (err) {
PDBG("%s err %d initializing ctrl_cq\n", __func__, err);
goto err;
}
rdev_p->ctrl_qp.workq = dma_alloc_coherent(
&(rdev_p->rnic_info.pdev->dev),
(1 << T3_CTRL_QP_SIZE_LOG2) *
sizeof(union t3_wr),
&(rdev_p->ctrl_qp.dma_addr),
GFP_KERNEL);
if (!rdev_p->ctrl_qp.workq) {
PDBG("%s dma_alloc_coherent failed\n", __func__);
err = -ENOMEM;
goto err;
}
dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
rdev_p->ctrl_qp.dma_addr);
rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
memset(rdev_p->ctrl_qp.workq, 0,
(1 << T3_CTRL_QP_SIZE_LOG2) * sizeof(union t3_wr));
mutex_init(&rdev_p->ctrl_qp.lock);
init_waitqueue_head(&rdev_p->ctrl_qp.waitq);
/* update HW Ctrl QP context */
base_addr = rdev_p->ctrl_qp.dma_addr;
base_addr >>= 12;
ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |
V_EC_BASE_LO((u32) base_addr & 0xffff));
ctx0 <<= 32;
ctx0 |= V_EC_CREDITS(FW_WR_NUM);
base_addr >>= 16;
ctx1 = (u32) base_addr;
base_addr >>= 32;
ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
V_EC_TYPE(0) | V_EC_GEN(1) |
V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof(*wqe));
build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
T3_CTL_QP_TID, 7, T3_SOPEOP);
wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
wqe->sge_cmd = cpu_to_be64(sge_cmd);
wqe->ctx1 = cpu_to_be64(ctx1);
wqe->ctx0 = cpu_to_be64(ctx0);
PDBG("CtrlQP dma_addr 0x%llx workq %p size %d\n",
(unsigned long long) rdev_p->ctrl_qp.dma_addr,
rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);
skb->priority = CPL_PRIORITY_CONTROL;
return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
err:
kfree_skb(skb);
return err;
}
static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
{
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << T3_CTRL_QP_SIZE_LOG2)
* sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
}
/* write len bytes of data into addr (32B aligned address)
* If data is NULL, clear len byte of memory to zero.
* caller acquires the ctrl_qp lock before the call
*/
static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
u32 len, void *data)
{
u32 i, nr_wqe, copy_len;
u8 *copy_data;
u8 wr_len, utx_len; /* length in 8 byte flit */
enum t3_wr_flags flag;
__be64 *wqe;
u64 utx_cmd;
addr &= 0x7FFFFFF;
nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
__func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
nr_wqe, data, addr);
utx_len = 3; /* in 32B unit */
for (i = 0; i < nr_wqe; i++) {
if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
T3_CTRL_QP_SIZE_LOG2)) {
PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
"wait for more space i %d\n", __func__,
rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
!Q_FULL(rdev_p->ctrl_qp.rptr,
rdev_p->ctrl_qp.wptr,
T3_CTRL_QP_SIZE_LOG2))) {
PDBG("%s ctrl_qp workq interrupted\n",
__func__);
return -ERESTARTSYS;
}
PDBG("%s ctrl_qp wakeup, continue posting work request "
"i %d\n", __func__, i);
}
wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
(1 << T3_CTRL_QP_SIZE_LOG2)));
flag = 0;
if (i == (nr_wqe - 1)) {
/* last WQE */
flag = T3_COMPLETION_FLAG;
if (len % 32)
utx_len = len / 32 + 1;
else
utx_len = len / 32;
}
/*
* Force a CQE to return the credit to the workq in case
* we posted more than half the max QP size of WRs
*/
if ((i != 0) &&
(i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
flag = T3_COMPLETION_FLAG;
PDBG("%s force completion at i %d\n", __func__, i);
}
/* build the utx mem command */
wqe += (sizeof(struct t3_bypass_wr) >> 3);
utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);
utx_cmd <<= 32;
utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
*wqe = cpu_to_be64(utx_cmd);
wqe++;
copy_data = (u8 *) data + i * 96;
copy_len = len > 96 ? 96 : len;
/* clear memory content if data is NULL */
if (data)
memcpy(wqe, copy_data, copy_len);
else
memset(wqe, 0, copy_len);
if (copy_len % 32)
memset(((u8 *) wqe) + copy_len, 0,
32 - (copy_len % 32));
wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +
(utx_len << 2);
wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
(1 << T3_CTRL_QP_SIZE_LOG2)));
/* wptr in the WRID[31:0] */
((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;
/*
* This must be the last write with a memory barrier
* for the genbit
*/
build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
Q_GENBIT(rdev_p->ctrl_qp.wptr,
T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
wr_len, T3_SOPEOP);
if (flag == T3_COMPLETION_FLAG)
ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
len -= 96;
rdev_p->ctrl_qp.wptr++;
}
return 0;
}
/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
* OUT: stag index
* TBD: shared memory region support
*/
static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
u32 *stag, u8 stag_state, u32 pdid,
enum tpt_mem_type type, enum tpt_mem_perm perm,
u32 zbva, u64 to, u32 len, u8 page_size,
u32 pbl_size, u32 pbl_addr)
{
int err;
struct tpt_entry tpt;
u32 stag_idx;
u32 wptr;
if (cxio_fatal_error(rdev_p))
return -EIO;
stag_state = stag_state > 0;
stag_idx = (*stag) >> 8;
if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {
stag_idx = cxio_hal_get_stag(rdev_p->rscp);
if (!stag_idx)
return -ENOMEM;
*stag = (stag_idx << 8) | ((*stag) & 0xFF);
}
PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
__func__, stag_state, type, pdid, stag_idx);
mutex_lock(&rdev_p->ctrl_qp.lock);
/* write TPT entry */
if (reset_tpt_entry)
memset(&tpt, 0, sizeof(tpt));
else {
tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |
V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |
V_TPT_STAG_STATE(stag_state) |
V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
BUG_ON(page_size >= 28);
tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
V_TPT_PAGE_SIZE(page_size));
tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
tpt.len = cpu_to_be32(len);
tpt.va_hi = cpu_to_be32((u32) (to >> 32));
tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
tpt.rsvd_bind_cnt_or_pstag = 0;
tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
}
err = cxio_hal_ctrl_qp_write_mem(rdev_p,
stag_idx +
(rdev_p->rnic_info.tpt_base >> 5),
sizeof(tpt), &tpt);
/* release the stag index to free pool */
if (reset_tpt_entry)
cxio_hal_put_stag(rdev_p->rscp, stag_idx);
wptr = rdev_p->ctrl_qp.wptr;
mutex_unlock(&rdev_p->ctrl_qp.lock);
if (!err)
if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
SEQ32_GE(rdev_p->ctrl_qp.rptr,
wptr)))
return -ERESTARTSYS;
return err;
}
int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
u32 pbl_addr, u32 pbl_size)
{
u32 wptr;
int err;
PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
__func__, pbl_addr, rdev_p->rnic_info.pbl_base,
pbl_size);
mutex_lock(&rdev_p->ctrl_qp.lock);
err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
pbl);
wptr = rdev_p->ctrl_qp.wptr;
mutex_unlock(&rdev_p->ctrl_qp.lock);
if (err)
return err;
if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
SEQ32_GE(rdev_p->ctrl_qp.rptr,
wptr)))
return -ERESTARTSYS;
return 0;
}
int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
u8 page_size, u32 pbl_size, u32 pbl_addr)
{
*stag = T3_STAG_UNSET;
return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
zbva, to, len, page_size, pbl_size, pbl_addr);
}
int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
u8 page_size, u32 pbl_size, u32 pbl_addr)
{
return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
zbva, to, len, page_size, pbl_size, pbl_addr);
}
int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
u32 pbl_addr)
{
return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
pbl_size, pbl_addr);
}
int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
{
*stag = T3_STAG_UNSET;
return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
0, 0);
}
int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
{
return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
0, 0);
}
int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr)
{
*stag = T3_STAG_UNSET;
return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
0, 0, 0ULL, 0, 0, pbl_size, pbl_addr);
}
int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
{
struct t3_rdma_init_wr *wqe;
struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
PDBG("%s rdev_p %p\n", __func__, rdev_p);
wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
wqe->wrid.id1 = 0;
wqe->qpid = cpu_to_be32(attr->qpid);
wqe->pdid = cpu_to_be32(attr->pdid);
wqe->scqid = cpu_to_be32(attr->scqid);
wqe->rcqid = cpu_to_be32(attr->rcqid);
wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);
wqe->rq_size = cpu_to_be32(attr->rq_size);
wqe->mpaattrs = attr->mpaattrs;
wqe->qpcaps = attr->qpcaps;
wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
wqe->rqe_count = cpu_to_be16(attr->rqe_count);
wqe->flags_rtr_type = cpu_to_be16(attr->flags |
V_RTR_TYPE(attr->rtr_type) |
V_CHAN(attr->chan));
wqe->ord = cpu_to_be32(attr->ord);
wqe->ird = cpu_to_be32(attr->ird);
wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
wqe->irs = cpu_to_be32(attr->irs);
skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
}
void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
{
cxio_ev_cb = ev_cb;
}
void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
{
cxio_ev_cb = NULL;
}
static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
{
static int cnt;
struct cxio_rdev *rdev_p = NULL;
struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
" se %0x notify %0x cqbranch %0x creditth %0x\n",
cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
RSPQ_CREDIT_THRESH(rsp_msg));
PDBG("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d "
"len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
if (!rdev_p) {
PDBG("%s called by t3cdev %p with null ulp\n", __func__,
t3cdev_p);
return 0;
}
if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
wake_up_interruptible(&rdev_p->ctrl_qp.waitq);
dev_kfree_skb_irq(skb);
} else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
dev_kfree_skb_irq(skb);
else if (cxio_ev_cb)
(*cxio_ev_cb) (rdev_p, skb);
else
dev_kfree_skb_irq(skb);
cnt++;
return 0;
}
/* Caller takes care of locking if needed */
int cxio_rdev_open(struct cxio_rdev *rdev_p)
{
struct net_device *netdev_p = NULL;
int err = 0;
if (strlen(rdev_p->dev_name)) {
if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) {
return -EBUSY;
}
netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name);
if (!netdev_p) {
return -EINVAL;
}
dev_put(netdev_p);
} else if (rdev_p->t3cdev_p) {
if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) {
return -EBUSY;
}
netdev_p = rdev_p->t3cdev_p->lldev;
strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
T3_MAX_DEV_NAME_LEN);
} else {
PDBG("%s t3cdev_p or dev_name must be set\n", __func__);
return -EINVAL;
}
list_add_tail(&rdev_p->entry, &rdev_list);
PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
if (!rdev_p->t3cdev_p)
rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
rdev_p->t3cdev_p->ulp = (void *) rdev_p;
err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
&(rdev_p->fw_info));
if (err) {
printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
__func__, rdev_p->t3cdev_p, err);
goto err1;
}
if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
printk(KERN_ERR MOD "fatal firmware version mismatch: "
"need version %u but adapter has version %u\n",
CXIO_FW_MAJ,
G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
err = -EINVAL;
goto err1;
}
err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
&(rdev_p->rnic_info));
if (err) {
printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
__func__, rdev_p->t3cdev_p, err);
goto err1;
}
err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
&(rdev_p->port_info));
if (err) {
printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
__func__, rdev_p->t3cdev_p, err);
goto err1;
}
/*
* qpshift is the number of bits to shift the qpid left in order
* to get the correct address of the doorbell for that qp.
*/
cxio_init_ucontext(rdev_p, &rdev_p->uctx);
rdev_p->qpshift = PAGE_SHIFT -
ilog2(65536 >>
ilog2(rdev_p->rnic_info.udbell_len >>
PAGE_SHIFT));
rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT;
rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
"pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
__func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
rdev_p->rnic_info.pbl_base,
rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
rdev_p->rnic_info.rqt_top);
PDBG("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu "
"qpnr %d qpmask 0x%x\n",
rdev_p->rnic_info.udbell_len,
rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr,
rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask);
err = cxio_hal_init_ctrl_qp(rdev_p);
if (err) {
printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
__func__, err);
goto err1;
}
err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ,
T3_MAX_NUM_PD);
if (err) {
printk(KERN_ERR "%s error %d initializing hal resources.\n",
__func__, err);
goto err2;
}
err = cxio_hal_pblpool_create(rdev_p);
if (err) {
printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
__func__, err);
goto err3;
}
err = cxio_hal_rqtpool_create(rdev_p);
if (err) {
printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
__func__, err);
goto err4;
}
return 0;
err4:
cxio_hal_pblpool_destroy(rdev_p);
err3:
cxio_hal_destroy_resource(rdev_p->rscp);
err2:
cxio_hal_destroy_ctrl_qp(rdev_p);
err1:
rdev_p->t3cdev_p->ulp = NULL;
list_del(&rdev_p->entry);
return err;
}
void cxio_rdev_close(struct cxio_rdev *rdev_p)
{
if (rdev_p) {
cxio_hal_pblpool_destroy(rdev_p);
cxio_hal_rqtpool_destroy(rdev_p);
list_del(&rdev_p->entry);
cxio_hal_destroy_ctrl_qp(rdev_p);
cxio_hal_destroy_resource(rdev_p->rscp);
rdev_p->t3cdev_p->ulp = NULL;
}
}
int __init cxio_hal_init(void)
{
if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI))
return -ENOMEM;
t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler);
return 0;
}
void __exit cxio_hal_exit(void)
{
struct cxio_rdev *rdev, *tmp;
t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL);
list_for_each_entry_safe(rdev, tmp, &rdev_list, entry)
cxio_rdev_close(rdev);
cxio_hal_destroy_rhdl_resource();
}
static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
{
struct t3_swsq *sqp;
__u32 ptr = wq->sq_rptr;
int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
while (count--)
if (!sqp->signaled) {
ptr++;
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
} else if (sqp->complete) {
/*
* Insert this completed cqe into the swcq.
*/
PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
__func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
= sqp->cqe;
cq->sw_wptr++;
sqp->signaled = 0;
break;
} else
break;
}
static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
struct t3_cqe *read_cqe)
{
read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
read_cqe->len = wq->oldest_read->read_len;
read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
V_CQE_OPCODE(T3_READ_REQ) |
V_CQE_TYPE(1));
}
/*
* Return a ptr to the next read wr in the SWSQ or NULL.
*/
static void advance_oldest_read(struct t3_wq *wq)
{
u32 rptr = wq->oldest_read - wq->sq + 1;
u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
if (wq->oldest_read->opcode == T3_READ_REQ)
return;
rptr++;
}
wq->oldest_read = NULL;
}
/*
* cxio_poll_cq
*
* Caller must:
* check the validity of the first CQE,
* supply the wq assicated with the qpid.
*
* credit: cq credit to return to sge.
* cqe_flushed: 1 iff the CQE is flushed.
* cqe: copy of the polled CQE.
*
* return value:
* 0 CQE returned,
* -1 CQE skipped, try again.
*/
int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
u8 *cqe_flushed, u64 *cookie, u32 *credit)
{
int ret = 0;
struct t3_cqe *hw_cqe, read_cqe;
*cqe_flushed = 0;
*credit = 0;
hw_cqe = cxio_next_cqe(cq);
PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
" opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
__func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
CQE_WRID_LOW(*hw_cqe));
/*
* skip cqe's not affiliated with a QP.
*/
if (wq == NULL) {
ret = -1;
goto skip_cqe;
}
/*
* Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr.
* 2) opcode not reflected from the wr.
* 3) read_len not reflected from the wr.
* 4) cq_type is RQ_TYPE not SQ_TYPE.
*/
if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
/*
* If this is an unsolicited read response, then the read
* was generated by the kernel driver as part of peer-2-peer
* connection setup. So ignore the completion.
*/
if (!wq->oldest_read) {
if (CQE_STATUS(*hw_cqe))
wq->error = 1;
ret = -1;
goto skip_cqe;
}
/*
* Don't write to the HWCQ, so create a new read req CQE
* in local memory.
*/
create_read_req_cqe(wq, hw_cqe, &read_cqe);
hw_cqe = &read_cqe;
advance_oldest_read(wq);
}
/*
* T3A: Discard TERMINATE CQEs.
*/
if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) {
ret = -1;
wq->error = 1;
goto skip_cqe;
}
if (CQE_STATUS(*hw_cqe) || wq->error) {
*cqe_flushed = wq->error;
wq->error = 1;
/*
* T3A inserts errors into the CQE. We cannot return
* these as work completions.
*/
/* incoming write failures */
if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE)
&& RQ_TYPE(*hw_cqe)) {
ret = -1;
goto skip_cqe;
}
/* incoming read request failures */
if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) {
ret = -1;
goto skip_cqe;
}
/* incoming SEND with no receive posted failures */
if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
ret = -1;
goto skip_cqe;
}
BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
goto proc_cqe;
}
/*
* RECV completion.
*/
if (RQ_TYPE(*hw_cqe)) {
/*
* HW only validates 4 bits of MSN. So we must validate that
* the MSN in the SEND is the next expected MSN. If its not,
* then we complete this with TPT_ERR_MSN and mark the wq in
* error.
*/
if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
wq->error = 1;
ret = -1;
goto skip_cqe;
}
if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
wq->error = 1;
hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
goto proc_cqe;
}
goto proc_cqe;
}
/*
* If we get here its a send completion.
*
* Handle out of order completion. These get stuffed
* in the SW SQ. Then the SW SQ is walked to move any
* now in-order completions into the SW CQ. This handles
* 2 cases:
* 1) reaping unsignaled WRs when the first subsequent
* signaled WR is completed.
* 2) out of order read completions.
*/
if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
struct t3_swsq *sqp;
PDBG("%s out of order completion going in swsq at idx %ld\n",
__func__,
Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
sqp = wq->sq +
Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
sqp->cqe = *hw_cqe;
sqp->complete = 1;
ret = -1;
goto flush_wq;
}
proc_cqe:
*cqe = *hw_cqe;
/*
* Reap the associated WR(s) that are freed up with this
* completion.
*/
if (SQ_TYPE(*hw_cqe)) {
wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
PDBG("%s completing sq idx %ld\n", __func__,
Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
*cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id;
wq->sq_rptr++;
} else {
PDBG("%s completing rq idx %ld\n", __func__,
Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
*cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id;
if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr)
cxio_hal_pblpool_free(wq->rdev,
wq->rq[Q_PTR2IDX(wq->rq_rptr,
wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
wq->rq_rptr++;
}
flush_wq:
/*
* Flush any completed cqes that are now in-order.
*/
flush_completed_wrs(wq, cq);
skip_cqe:
if (SW_CQE(*hw_cqe)) {
PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
__func__, cq, cq->cqid, cq->sw_rptr);
++cq->sw_rptr;
} else {
PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
__func__, cq, cq->cqid, cq->rptr);
++cq->rptr;
/*
* T3A: compute credits.
*/
if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1)))
|| ((cq->rptr - cq->wptr) >= 128)) {
*credit = cq->rptr - cq->wptr;
cq->wptr = cq->rptr;
}
}
return ret;
}
| gpl-2.0 |
lycanthia/Find7-Kernel-Source-4.3 | arch/m32r/lib/delay.c | 13612 | 2985 | /*
* linux/arch/m32r/lib/delay.c
*
* Copyright (c) 2002 Hitoshi Yamamoto, Hirokazu Takata
* Copyright (c) 2004 Hirokazu Takata
*/
#include <linux/param.h>
#include <linux/module.h>
#ifdef CONFIG_SMP
#include <linux/sched.h>
#include <asm/current.h>
#include <asm/smp.h>
#endif /* CONFIG_SMP */
#include <asm/processor.h>
void __delay(unsigned long loops)
{
#ifdef CONFIG_ISA_DUAL_ISSUE
__asm__ __volatile__ (
"beqz %0, 2f \n\t"
"addi %0, #-1 \n\t"
" .fillinsn \n\t"
"1: \n\t"
"cmpz %0 || addi %0, #-1 \n\t"
"bc 2f || cmpz %0 \n\t"
"bc 2f || addi %0, #-1 \n\t"
"cmpz %0 || addi %0, #-1 \n\t"
"bc 2f || cmpz %0 \n\t"
"bnc 1b || addi %0, #-1 \n\t"
" .fillinsn \n\t"
"2: \n\t"
: "+r" (loops)
: "r" (0)
: "cbit"
);
#else
__asm__ __volatile__ (
"beqz %0, 2f \n\t"
" .fillinsn \n\t"
"1: \n\t"
"addi %0, #-1 \n\t"
"blez %0, 2f \n\t"
"addi %0, #-1 \n\t"
"blez %0, 2f \n\t"
"addi %0, #-1 \n\t"
"blez %0, 2f \n\t"
"addi %0, #-1 \n\t"
"bgtz %0, 1b \n\t"
" .fillinsn \n\t"
"2: \n\t"
: "+r" (loops)
: "r" (0)
);
#endif
}
void __const_udelay(unsigned long xloops)
{
#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
/*
* loops [1] = (xloops >> 32) [sec] * loops_per_jiffy [1/jiffy]
* * HZ [jiffy/sec]
* = (xloops >> 32) [sec] * (loops_per_jiffy * HZ) [1/sec]
* = (((xloops * loops_per_jiffy) >> 32) * HZ) [1]
*
* NOTE:
* - '[]' depicts variable's dimension in the above equation.
* - "rac" instruction rounds the accumulator in word size.
*/
__asm__ __volatile__ (
"srli %0, #1 \n\t"
"mulwhi %0, %1 ; a0 \n\t"
"mulwu1 %0, %1 ; a1 \n\t"
"sadd ; a0 += (a1 >> 16) \n\t"
"rac a0, a0, #1 \n\t"
"mvfacmi %0, a0 \n\t"
: "+r" (xloops)
: "r" (current_cpu_data.loops_per_jiffy)
: "a0", "a1"
);
#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
/*
* u64 ull;
* ull = (u64)xloops * (u64)current_cpu_data.loops_per_jiffy;
* xloops = (ull >> 32);
*/
__asm__ __volatile__ (
"and3 r4, %0, #0xffff \n\t"
"and3 r5, %1, #0xffff \n\t"
"mul r4, r5 \n\t"
"srl3 r6, %0, #16 \n\t"
"srli r4, #16 \n\t"
"mul r5, r6 \n\t"
"add r4, r5 \n\t"
"and3 r5, %0, #0xffff \n\t"
"srl3 r6, %1, #16 \n\t"
"mul r5, r6 \n\t"
"add r4, r5 \n\t"
"srl3 r5, %0, #16 \n\t"
"srli r4, #16 \n\t"
"mul r5, r6 \n\t"
"add r4, r5 \n\t"
"mv %0, r4 \n\t"
: "+r" (xloops)
: "r" (current_cpu_data.loops_per_jiffy)
: "r4", "r5", "r6"
);
#else
#error unknown isa configuration
#endif
__delay(xloops * HZ);
}
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
}
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
EXPORT_SYMBOL(__delay);
EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
| gpl-2.0 |
cm-maya/android_kernel_hp_maya | arch/m68k/platform/5407/gpio.c | 14892 | 1361 | /*
* Coldfire generic GPIO support
*
* (C) Copyright 2009, Steven King <sfking@fdwdc.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfgpio.h>
static struct mcf_gpio_chip mcf_gpio_chips[] = {
{
.gpio_chip = {
.label = "PP",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value,
.ngpio = 16,
},
.pddr = (void __iomem *) MCFSIM_PADDR,
.podr = (void __iomem *) MCFSIM_PADAT,
.ppdr = (void __iomem *) MCFSIM_PADAT,
},
};
static int __init mcf_gpio_init(void)
{
unsigned i = 0;
while (i < ARRAY_SIZE(mcf_gpio_chips))
(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
return 0;
}
core_initcall(mcf_gpio_init);
| gpl-2.0 |
alive4ever/source | target/linux/ar71xx/files/arch/mips/ath79/mach-gl-ar750.c | 45 | 3800 | /*
* GL.iNet GL-AR750 board support
*
* Copyright (C) 2018 Piotr Dymacz <pepe2k@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
#include "dev-ap9x-pci.h"
#include "dev-eth.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-m25p80.h"
#include "dev-usb.h"
#include "dev-wmac.h"
#include "machtypes.h"
#define GL_AR750_GPIO_LED_POWER 12
#define GL_AR750_GPIO_LED_WLAN2G 14
#define GL_AR750_GPIO_LED_WLAN5G 13
#define GL_AR750_GPIO_BTN_RESET 3
#define GL_AR750_GPIO_BTN_SW1 0
#define GL_AR750_GPIO_I2C_SCL 16
#define GL_AR750_GPIO_I2C_SDA 17
#define GL_AR750_GPIO_USB_POWER 2
#define GL_AR750_KEYS_POLL_INTERVAL 20
#define GL_AR750_KEYS_DEBOUNCE_INTERVAL (3 * GL_AR750_KEYS_POLL_INTERVAL)
#define GL_AR750_MAC0_OFFSET 0
#define GL_AR750_WMAC2G_CALDATA_OFFSET 0x1000
#define GL_AR750_WMAC5G_CALDATA_OFFSET 0x5000
static struct gpio_led gl_ar750_leds_gpio[] __initdata = {
{
.name = "gl-ar750:white:power",
.gpio = GL_AR750_GPIO_LED_POWER,
.default_state = LEDS_GPIO_DEFSTATE_KEEP,
.active_low = 1,
}, {
.name = "gl-ar750:white:wlan2g",
.gpio = GL_AR750_GPIO_LED_WLAN2G,
.active_low = 1,
}, {
.name = "gl-ar750:white:wlan5g",
.gpio = GL_AR750_GPIO_LED_WLAN5G,
.active_low = 1,
},
};
static struct gpio_keys_button gl_ar750_gpio_keys[] __initdata = {
{
.desc = "reset",
.type = EV_KEY,
.code = KEY_RESTART,
.debounce_interval = GL_AR750_KEYS_DEBOUNCE_INTERVAL,
.gpio = GL_AR750_GPIO_BTN_RESET,
.active_low = 1,
}, {
.desc = "sw1",
.type = EV_KEY,
.code = BTN_0,
.debounce_interval = GL_AR750_KEYS_DEBOUNCE_INTERVAL,
.gpio = GL_AR750_GPIO_BTN_SW1,
.active_low = 1,
},
};
static struct i2c_gpio_platform_data gl_ar750_i2c_gpio_data = {
.sda_pin = GL_AR750_GPIO_I2C_SDA,
.scl_pin = GL_AR750_GPIO_I2C_SCL,
};
static struct platform_device gl_ar750_i2c_gpio = {
.name = "i2c-gpio",
.id = 0,
.dev = {
.platform_data = &gl_ar750_i2c_gpio_data,
},
};
static void __init gl_ar750_setup(void)
{
u8 *art = (u8 *) KSEG1ADDR(0x1f050000);
ath79_register_m25p80(NULL);
ath79_setup_ar933x_phy4_switch(false, false);
ath79_register_mdio(0, 0x0);
ath79_switch_data.phy4_mii_en = 1;
ath79_switch_data.phy_poll_mask = 0xfc;
/* WAN */
ath79_eth0_data.duplex = DUPLEX_FULL;
ath79_eth0_data.phy_if_mode = PHY_INTERFACE_MODE_MII;
ath79_eth0_data.phy_mask = BIT(4);
ath79_eth0_data.speed = SPEED_100;
ath79_init_mac(ath79_eth0_data.mac_addr, art + GL_AR750_MAC0_OFFSET, 0);
ath79_register_eth(0);
/* LAN */
ath79_eth1_data.duplex = DUPLEX_FULL;
ath79_eth1_data.phy_if_mode = PHY_INTERFACE_MODE_GMII;
ath79_init_mac(ath79_eth1_data.mac_addr, art + GL_AR750_MAC0_OFFSET, 1);
ath79_register_eth(1);
/* Disable JTAG (enables GPIO0-3) */
ath79_gpio_function_enable(AR934X_GPIO_FUNC_JTAG_DISABLE);
ath79_register_leds_gpio(-1, ARRAY_SIZE(gl_ar750_leds_gpio),
gl_ar750_leds_gpio);
ath79_register_gpio_keys_polled(-1, GL_AR750_KEYS_POLL_INTERVAL,
ARRAY_SIZE(gl_ar750_gpio_keys),
gl_ar750_gpio_keys);
gpio_request_one(GL_AR750_GPIO_USB_POWER,
GPIOF_OUT_INIT_HIGH | GPIOF_EXPORT_DIR_FIXED,
"USB power");
platform_device_register(&gl_ar750_i2c_gpio);
ath79_register_usb();
ath79_register_wmac(art + GL_AR750_WMAC2G_CALDATA_OFFSET, NULL);
ap91_pci_init(art + GL_AR750_WMAC5G_CALDATA_OFFSET, NULL);
}
MIPS_MACHINE(ATH79_MACH_GL_AR750, "GL-AR750", "GL.iNet GL-AR750",
gl_ar750_setup);
| gpl-2.0 |
virtuoso/linux-perf | drivers/clk/shmobile/r8a7795-cpg-mssr.c | 45 | 12276 | /*
* r8a7795 Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2015 Glider bvba
*
* Based on clk-rcar-gen3.c
*
* Copyright (C) 2015 Renesas Electronics Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*/
#include <linux/bug.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <dt-bindings/clock/r8a7795-cpg-mssr.h>
#include "renesas-cpg-mssr.h"
enum clk_ids {
/* Core Clock Outputs exported to DT */
LAST_DT_CORE_CLK = R8A7795_CLK_OSC,
/* External Input Clocks */
CLK_EXTAL,
CLK_EXTALR,
/* Internal Core Clocks */
CLK_MAIN,
CLK_PLL0,
CLK_PLL1,
CLK_PLL2,
CLK_PLL3,
CLK_PLL4,
CLK_PLL1_DIV2,
CLK_PLL1_DIV4,
CLK_S0,
CLK_S1,
CLK_S2,
CLK_S3,
CLK_SDSRC,
CLK_SSPSRC,
/* Module Clocks */
MOD_CLK_BASE
};
enum r8a7795_clk_types {
CLK_TYPE_GEN3_MAIN = CLK_TYPE_CUSTOM,
CLK_TYPE_GEN3_PLL0,
CLK_TYPE_GEN3_PLL1,
CLK_TYPE_GEN3_PLL2,
CLK_TYPE_GEN3_PLL3,
CLK_TYPE_GEN3_PLL4,
};
static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
DEF_INPUT("extalr", CLK_EXTALR),
/* Internal Core Clocks */
DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN),
DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN3_PLL2, CLK_MAIN),
DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
/* Core Clock Outputs */
DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A7795_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A7795_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED("zx", R8A7795_CLK_ZX, CLK_PLL1_DIV2, 2, 1),
DEF_FIXED("s0d1", R8A7795_CLK_S0D1, CLK_S0, 1, 1),
DEF_FIXED("s0d4", R8A7795_CLK_S0D4, CLK_S0, 4, 1),
DEF_FIXED("s1d1", R8A7795_CLK_S1D1, CLK_S1, 1, 1),
DEF_FIXED("s1d2", R8A7795_CLK_S1D2, CLK_S1, 2, 1),
DEF_FIXED("s1d4", R8A7795_CLK_S1D4, CLK_S1, 4, 1),
DEF_FIXED("s2d1", R8A7795_CLK_S2D1, CLK_S2, 1, 1),
DEF_FIXED("s2d2", R8A7795_CLK_S2D2, CLK_S2, 2, 1),
DEF_FIXED("s2d4", R8A7795_CLK_S2D4, CLK_S2, 4, 1),
DEF_FIXED("s3d1", R8A7795_CLK_S3D1, CLK_S3, 1, 1),
DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1),
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
DEF_DIV6P1("mso", R8A7795_CLK_MSO, CLK_PLL1_DIV4, 0x014),
DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV2, 0x250),
};
static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("scif5", 202, R8A7795_CLK_S3D4),
DEF_MOD("scif4", 203, R8A7795_CLK_S3D4),
DEF_MOD("scif3", 204, R8A7795_CLK_S3D4),
DEF_MOD("scif1", 206, R8A7795_CLK_S3D4),
DEF_MOD("scif0", 207, R8A7795_CLK_S3D4),
DEF_MOD("msiof3", 208, R8A7795_CLK_MSO),
DEF_MOD("msiof2", 209, R8A7795_CLK_MSO),
DEF_MOD("msiof1", 210, R8A7795_CLK_MSO),
DEF_MOD("msiof0", 211, R8A7795_CLK_MSO),
DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S3D1),
DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S3D1),
DEF_MOD("scif2", 310, R8A7795_CLK_S3D4),
DEF_MOD("pcie1", 318, R8A7795_CLK_S3D1),
DEF_MOD("pcie0", 319, R8A7795_CLK_S3D1),
DEF_MOD("intc-ap", 408, R8A7795_CLK_S3D1),
DEF_MOD("audmac0", 502, R8A7795_CLK_S3D4),
DEF_MOD("audmac1", 501, R8A7795_CLK_S3D4),
DEF_MOD("hscif4", 516, R8A7795_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A7795_CLK_S3D1),
DEF_MOD("hscif2", 518, R8A7795_CLK_S3D1),
DEF_MOD("hscif1", 519, R8A7795_CLK_S3D1),
DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1),
DEF_MOD("vspd3", 620, R8A7795_CLK_S2D1),
DEF_MOD("vspd2", 621, R8A7795_CLK_S2D1),
DEF_MOD("vspd1", 622, R8A7795_CLK_S2D1),
DEF_MOD("vspd0", 623, R8A7795_CLK_S2D1),
DEF_MOD("vspbc", 624, R8A7795_CLK_S2D1),
DEF_MOD("vspbd", 626, R8A7795_CLK_S2D1),
DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1),
DEF_MOD("vspi1", 630, R8A7795_CLK_S2D1),
DEF_MOD("vspi0", 631, R8A7795_CLK_S2D1),
DEF_MOD("ehci2", 701, R8A7795_CLK_S3D4),
DEF_MOD("ehci1", 702, R8A7795_CLK_S3D4),
DEF_MOD("ehci0", 703, R8A7795_CLK_S3D4),
DEF_MOD("hsusb", 704, R8A7795_CLK_S3D4),
DEF_MOD("du3", 721, R8A7795_CLK_S2D1),
DEF_MOD("du2", 722, R8A7795_CLK_S2D1),
DEF_MOD("du1", 723, R8A7795_CLK_S2D1),
DEF_MOD("du0", 724, R8A7795_CLK_S2D1),
DEF_MOD("hdmi1", 728, R8A7795_CLK_HDMI),
DEF_MOD("hdmi0", 729, R8A7795_CLK_HDMI),
DEF_MOD("etheravb", 812, R8A7795_CLK_S3D2),
DEF_MOD("sata0", 815, R8A7795_CLK_S3D2),
DEF_MOD("gpio7", 905, R8A7795_CLK_CP),
DEF_MOD("gpio6", 906, R8A7795_CLK_CP),
DEF_MOD("gpio5", 907, R8A7795_CLK_CP),
DEF_MOD("gpio4", 908, R8A7795_CLK_CP),
DEF_MOD("gpio3", 909, R8A7795_CLK_CP),
DEF_MOD("gpio2", 910, R8A7795_CLK_CP),
DEF_MOD("gpio1", 911, R8A7795_CLK_CP),
DEF_MOD("gpio0", 912, R8A7795_CLK_CP),
DEF_MOD("i2c6", 918, R8A7795_CLK_S3D2),
DEF_MOD("i2c5", 919, R8A7795_CLK_S3D2),
DEF_MOD("i2c4", 927, R8A7795_CLK_S3D2),
DEF_MOD("i2c3", 928, R8A7795_CLK_S3D2),
DEF_MOD("i2c2", 929, R8A7795_CLK_S3D2),
DEF_MOD("i2c1", 930, R8A7795_CLK_S3D2),
DEF_MOD("i2c0", 931, R8A7795_CLK_S3D2),
DEF_MOD("ssi-all", 1005, R8A7795_CLK_S3D4),
DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
DEF_MOD("scu-all", 1017, R8A7795_CLK_S3D4),
DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
};
static const unsigned int r8a7795_crit_mod_clks[] __initconst = {
MOD_CLK_ID(408), /* INTC-AP (GIC) */
};
#define CPG_PLL0CR 0x00d8
#define CPG_PLL2CR 0x002c
#define CPG_PLL4CR 0x01f4
/*
* CPG Clock Data
*/
/*
* MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4
* 14 13 19 17 (MHz)
*-------------------------------------------------------------------
* 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144
* 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144
* 0 0 1 0 Prohibited setting
* 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144
* 0 1 0 0 20 x 1 x150 x160 x120 x160 x120
* 0 1 0 1 20 x 1 x150 x160 x120 x106 x120
* 0 1 1 0 Prohibited setting
* 0 1 1 1 20 x 1 x150 x160 x120 x160 x120
* 1 0 0 0 25 x 1 x120 x128 x96 x128 x96
* 1 0 0 1 25 x 1 x120 x128 x96 x84 x96
* 1 0 1 0 Prohibited setting
* 1 0 1 1 25 x 1 x120 x128 x96 x128 x96
* 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144
* 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144
* 1 1 1 0 Prohibited setting
* 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \
(((md) & BIT(13)) >> 11) | \
(((md) & BIT(19)) >> 18) | \
(((md) & BIT(17)) >> 17))
struct cpg_pll_config {
unsigned int extal_div;
unsigned int pll1_mult;
unsigned int pll3_mult;
};
static const struct cpg_pll_config cpg_pll_configs[16] __initconst = {
/* EXTAL div PLL1 mult PLL3 mult */
{ 1, 192, 192, },
{ 1, 192, 128, },
{ 0, /* Prohibited setting */ },
{ 1, 192, 192, },
{ 1, 160, 160, },
{ 1, 160, 106, },
{ 0, /* Prohibited setting */ },
{ 1, 160, 160, },
{ 1, 128, 128, },
{ 1, 128, 84, },
{ 0, /* Prohibited setting */ },
{ 1, 128, 128, },
{ 2, 192, 192, },
{ 2, 192, 128, },
{ 0, /* Prohibited setting */ },
{ 2, 192, 192, },
};
static const struct cpg_pll_config *cpg_pll_config __initdata;
static
struct clk * __init r8a7795_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core,
const struct cpg_mssr_info *info,
struct clk **clks,
void __iomem *base)
{
const struct clk *parent;
unsigned int mult = 1;
unsigned int div = 1;
u32 value;
parent = clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
switch (core->type) {
case CLK_TYPE_GEN3_MAIN:
div = cpg_pll_config->extal_div;
break;
case CLK_TYPE_GEN3_PLL0:
/*
* PLL0 is a configurable multiplier clock. Register it as a
* fixed factor clock for now as there's no generic multiplier
* clock implementation and we currently have no need to change
* the multiplier value.
*/
value = readl(base + CPG_PLL0CR);
mult = (((value >> 24) & 0x7f) + 1) * 2;
break;
case CLK_TYPE_GEN3_PLL1:
mult = cpg_pll_config->pll1_mult;
break;
case CLK_TYPE_GEN3_PLL2:
/*
* PLL2 is a configurable multiplier clock. Register it as a
* fixed factor clock for now as there's no generic multiplier
* clock implementation and we currently have no need to change
* the multiplier value.
*/
value = readl(base + CPG_PLL2CR);
mult = (((value >> 24) & 0x7f) + 1) * 2;
break;
case CLK_TYPE_GEN3_PLL3:
mult = cpg_pll_config->pll3_mult;
break;
case CLK_TYPE_GEN3_PLL4:
/*
* PLL4 is a configurable multiplier clock. Register it as a
* fixed factor clock for now as there's no generic multiplier
* clock implementation and we currently have no need to change
* the multiplier value.
*/
value = readl(base + CPG_PLL4CR);
mult = (((value >> 24) & 0x7f) + 1) * 2;
break;
default:
return ERR_PTR(-EINVAL);
}
return clk_register_fixed_factor(NULL, core->name,
__clk_get_name(parent), 0, mult, div);
}
/*
* Reset register definitions.
*/
#define MODEMR 0xe6160060
static u32 rcar_gen3_read_mode_pins(void)
{
void __iomem *modemr = ioremap_nocache(MODEMR, 4);
u32 mode;
BUG_ON(!modemr);
mode = ioread32(modemr);
iounmap(modemr);
return mode;
}
static int __init r8a7795_cpg_mssr_init(struct device *dev)
{
u32 cpg_mode = rcar_gen3_read_mode_pins();
cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
if (!cpg_pll_config->extal_div) {
dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
return -EINVAL;
}
return 0;
}
const struct cpg_mssr_info r8a7795_cpg_mssr_info __initconst = {
/* Core Clocks */
.core_clks = r8a7795_core_clks,
.num_core_clks = ARRAY_SIZE(r8a7795_core_clks),
.last_dt_core_clk = LAST_DT_CORE_CLK,
.num_total_core_clks = MOD_CLK_BASE,
/* Module Clocks */
.mod_clks = r8a7795_mod_clks,
.num_mod_clks = ARRAY_SIZE(r8a7795_mod_clks),
.num_hw_mod_clks = 12 * 32,
/* Critical Module Clocks */
.crit_mod_clks = r8a7795_crit_mod_clks,
.num_crit_mod_clks = ARRAY_SIZE(r8a7795_crit_mod_clks),
/* Callbacks */
.init = r8a7795_cpg_mssr_init,
.cpg_clk_register = r8a7795_cpg_clk_register,
};
| gpl-2.0 |
mynew4/Trinitycore-1 | src/server/scripts/Kalimdor/RazorfenKraul/instance_razorfen_kraul.cpp | 45 | 3018 | /*
* Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* ScriptData
SDName: Instance_Razorfen_Kraul
SD%Complete:
SDComment:
SDCategory: Razorfen Kraul
EndScriptData */
#include "ScriptPCH.h"
#include "razorfen_kraul.h"
#define WARD_KEEPERS_NR 2
class instance_razorfen_kraul : public InstanceMapScript
{
public:
instance_razorfen_kraul() : InstanceMapScript("instance_razorfen_kraul", 47) { }
InstanceScript* GetInstanceScript(InstanceMap* map) const
{
return new instance_razorfen_kraul_InstanceMapScript(map);
}
struct instance_razorfen_kraul_InstanceMapScript : public InstanceScript
{
instance_razorfen_kraul_InstanceMapScript(Map* map) : InstanceScript(map) {}
uint64 DoorWardGUID;
int WardKeeperDeath;
void Initialize()
{
WardKeeperDeath = 0;
DoorWardGUID = 0;
}
Player* GetPlayerInMap()
{
Map::PlayerList const& players = instance->GetPlayers();
if (!players.isEmpty())
{
for (Map::PlayerList::const_iterator itr = players.begin(); itr != players.end(); ++itr)
{
if (Player* player = itr->getSource())
return player;
}
}
sLog->outDebug(LOG_FILTER_TSCR, "TSCR: Instance Razorfen Kraul: GetPlayerInMap, but PlayerList is empty!");
return NULL;
}
void OnGameObjectCreate(GameObject* go)
{
switch (go->GetEntry())
{
case 21099: DoorWardGUID = go->GetGUID(); break;
}
}
void Update(uint32 /*diff*/)
{
if (WardKeeperDeath == WARD_KEEPERS_NR)
if (GameObject* go = instance->GetGameObject(DoorWardGUID))
{
go->SetUInt32Value(GAMEOBJECT_FLAGS, 33);
go->SetGoState(GO_STATE_ACTIVE);
}
}
void SetData(uint32 type, uint32 /*data*/)
{
switch (type)
{
case EVENT_WARD_KEEPER: WardKeeperDeath++; break;
}
}
};
};
void AddSC_instance_razorfen_kraul()
{
new instance_razorfen_kraul();
}
| gpl-2.0 |
larks/linux-rcu | drivers/net/irda/w83977af_ir.c | 557 | 32914 | /*********************************************************************
*
* Filename: w83977af_ir.c
* Version: 1.0
* Description: FIR driver for the Winbond W83977AF Super I/O chip
* Status: Experimental.
* Author: Paul VanderSpek
* Created at: Wed Nov 4 11:46:16 1998
* Modified at: Fri Jan 28 12:10:59 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
* Copyright (c) 1998-1999 Rebel.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Paul VanderSpek nor Rebel.com admit liability nor provide
* warranty for any of this software. This material is provided "AS-IS"
* and at no charge.
*
* If you find bugs in this file, its very likely that the same bug
* will also be in pc87108.c since the implementations are quite
* similar.
*
* Notice that all functions that needs to access the chip in _any_
* way, must save BSR register on entry, and restore it on exit.
* It is _very_ important to follow this policy!
*
* __u8 bank;
*
* bank = inb( iobase+BSR);
*
* do_your_stuff_here();
*
* outb( bank, iobase+BSR);
*
********************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>
#include "w83977af.h"
#include "w83977af_ir.h"
#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
#endif
#undef CONFIG_USE_INTERNAL_TIMER /* Just cannot make that timer work */
#define CONFIG_USE_W977_PNP /* Currently needed */
#define PIO_MAX_SPEED 115200
static char *driver_name = "w83977af_ir";
static int qos_mtt_bits = 0x07; /* 1 ms or more */
#define CHIP_IO_EXTENT 8
static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
static unsigned int irq[] = { 6, 0, 0, 0 };
#else
static unsigned int irq[] = { 11, 0, 0, 0 };
#endif
static unsigned int dma[] = { 1, 0, 0, 0 };
static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
static unsigned int efio = W977_EFIO_BASE;
static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
/* Some prototypes */
static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
unsigned int dma);
static int w83977af_close(struct w83977af_ir *self);
static int w83977af_probe(int iobase, int irq, int dma);
static int w83977af_dma_receive(struct w83977af_ir *self);
static int w83977af_dma_receive_complete(struct w83977af_ir *self);
static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
struct net_device *dev);
static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
static int w83977af_is_receiving(struct w83977af_ir *self);
static int w83977af_net_open(struct net_device *dev);
static int w83977af_net_close(struct net_device *dev);
static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
/*
* Function w83977af_init ()
*
* Initialize chip. Just try to find out how many chips we are dealing with
* and where they are
*/
static int __init w83977af_init(void)
{
int i;
IRDA_DEBUG(0, "%s()\n", __func__ );
for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
return 0;
}
return -ENODEV;
}
/*
* Function w83977af_cleanup ()
*
* Close all configured chips
*
*/
static void __exit w83977af_cleanup(void)
{
int i;
IRDA_DEBUG(4, "%s()\n", __func__ );
for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
w83977af_close(dev_self[i]);
}
}
static const struct net_device_ops w83977_netdev_ops = {
.ndo_open = w83977af_net_open,
.ndo_stop = w83977af_net_close,
.ndo_start_xmit = w83977af_hard_xmit,
.ndo_do_ioctl = w83977af_net_ioctl,
};
/*
* Function w83977af_open (iobase, irq)
*
* Open driver instance
*
*/
static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
unsigned int dma)
{
struct net_device *dev;
struct w83977af_ir *self;
int err;
IRDA_DEBUG(0, "%s()\n", __func__ );
/* Lock the port that we need */
if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
__func__ , iobase);
return -ENODEV;
}
if (w83977af_probe(iobase, irq, dma) == -1) {
err = -1;
goto err_out;
}
/*
* Allocate new instance of the driver
*/
dev = alloc_irdadev(sizeof(struct w83977af_ir));
if (dev == NULL) {
printk( KERN_ERR "IrDA: Can't allocate memory for "
"IrDA control block!\n");
err = -ENOMEM;
goto err_out;
}
self = netdev_priv(dev);
spin_lock_init(&self->lock);
/* Initialize IO */
self->io.fir_base = iobase;
self->io.irq = irq;
self->io.fir_ext = CHIP_IO_EXTENT;
self->io.dma = dma;
self->io.fifo_size = 32;
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&self->qos);
/* The only value we must override it the baudrate */
/* FIXME: The HP HDLS-1100 does not support 1152000! */
self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
/* The HP HDLS-1100 needs 1 ms according to the specs */
self->qos.min_turn_time.bits = qos_mtt_bits;
irda_qos_bits_to_value(&self->qos);
/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
self->rx_buff.truesize = 14384;
self->tx_buff.truesize = 4000;
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out1;
}
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
self->tx_buff.data = self->tx_buff.head;
self->rx_buff.data = self->rx_buff.head;
self->netdev = dev;
dev->netdev_ops = &w83977_netdev_ops;
err = register_netdev(dev);
if (err) {
IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
goto err_out3;
}
IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
/* Need to store self somewhere */
dev_self[i] = self;
return 0;
err_out3:
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
err_out2:
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
err_out1:
free_netdev(dev);
err_out:
release_region(iobase, CHIP_IO_EXTENT);
return err;
}
/*
* Function w83977af_close (self)
*
* Close driver instance
*
*/
static int w83977af_close(struct w83977af_ir *self)
{
int iobase;
IRDA_DEBUG(0, "%s()\n", __func__ );
iobase = self->io.fir_base;
#ifdef CONFIG_USE_W977_PNP
/* enter PnP configuration mode */
w977_efm_enter(efio);
w977_select_device(W977_DEVICE_IR, efio);
/* Deactivate device */
w977_write_reg(0x30, 0x00, efio);
w977_efm_exit(efio);
#endif /* CONFIG_USE_W977_PNP */
/* Remove netdevice */
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
__func__ , self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
if (self->rx_buff.head)
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
free_netdev(self->netdev);
return 0;
}
static int w83977af_probe(int iobase, int irq, int dma)
{
int version;
int i;
for (i=0; i < 2; i++) {
IRDA_DEBUG( 0, "%s()\n", __func__ );
#ifdef CONFIG_USE_W977_PNP
/* Enter PnP configuration mode */
w977_efm_enter(efbase[i]);
w977_select_device(W977_DEVICE_IR, efbase[i]);
/* Configure PnP port, IRQ, and DMA channel */
w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
w977_write_reg(0x70, irq, efbase[i]);
#ifdef CONFIG_ARCH_NETWINDER
/* Netwinder uses 1 higher than Linux */
w977_write_reg(0x74, dma+1, efbase[i]);
#else
w977_write_reg(0x74, dma, efbase[i]);
#endif /*CONFIG_ARCH_NETWINDER */
w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
/* Set append hardware CRC, enable IR bank selection */
w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
/* Activate device */
w977_write_reg(0x30, 0x01, efbase[i]);
w977_efm_exit(efbase[i]);
#endif /* CONFIG_USE_W977_PNP */
/* Disable Advanced mode */
switch_bank(iobase, SET2);
outb(iobase+2, 0x00);
/* Turn on UART (global) interrupts */
switch_bank(iobase, SET0);
outb(HCR_EN_IRQ, iobase+HCR);
/* Switch to advanced mode */
switch_bank(iobase, SET2);
outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
/* Set default IR-mode */
switch_bank(iobase, SET0);
outb(HCR_SIR, iobase+HCR);
/* Read the Advanced IR ID */
switch_bank(iobase, SET3);
version = inb(iobase+AUID);
/* Should be 0x1? */
if (0x10 == (version & 0xf0)) {
efio = efbase[i];
/* Set FIFO size to 32 */
switch_bank(iobase, SET2);
outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
/* Set FIFO threshold to TX17, RX16 */
switch_bank(iobase, SET0);
outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
UFR_EN_FIFO,iobase+UFR);
/* Receiver frame length */
switch_bank(iobase, SET4);
outb(2048 & 0xff, iobase+6);
outb((2048 >> 8) & 0x1f, iobase+7);
/*
* Init HP HSDL-1100 transceiver.
*
* Set IRX_MSL since we have 2 * receive paths IRRX,
* and IRRXH. Clear IRSL0D since we want IRSL0 * to
* be a input pin used for IRRXH
*
* IRRX pin 37 connected to receiver
* IRTX pin 38 connected to transmitter
* FIRRX pin 39 connected to receiver (IRSL0)
* CIRRX pin 40 connected to pin 37
*/
switch_bank(iobase, SET7);
outb(0x40, iobase+7);
IRDA_MESSAGE("W83977AF (IR) driver loaded. "
"Version: 0x%02x\n", version);
return 0;
} else {
/* Try next extented function register address */
IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
}
}
return -1;
}
static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
{
int ir_mode = HCR_SIR;
int iobase;
__u8 set;
iobase = self->io.fir_base;
/* Update accounting for new speed */
self->io.speed = speed;
/* Save current bank */
set = inb(iobase+SSR);
/* Disable interrupts */
switch_bank(iobase, SET0);
outb(0, iobase+ICR);
/* Select Set 2 */
switch_bank(iobase, SET2);
outb(0x00, iobase+ABHL);
switch (speed) {
case 9600: outb(0x0c, iobase+ABLL); break;
case 19200: outb(0x06, iobase+ABLL); break;
case 38400: outb(0x03, iobase+ABLL); break;
case 57600: outb(0x02, iobase+ABLL); break;
case 115200: outb(0x01, iobase+ABLL); break;
case 576000:
ir_mode = HCR_MIR_576;
IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
break;
case 1152000:
ir_mode = HCR_MIR_1152;
IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
break;
case 4000000:
ir_mode = HCR_FIR;
IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
break;
default:
ir_mode = HCR_FIR;
IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
break;
}
/* Set speed mode */
switch_bank(iobase, SET0);
outb(ir_mode, iobase+HCR);
/* set FIFO size to 32 */
switch_bank(iobase, SET2);
outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
/* set FIFO threshold to TX17, RX16 */
switch_bank(iobase, SET0);
outb(0x00, iobase+UFR); /* Reset */
outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
outb(0xa7, iobase+UFR);
netif_wake_queue(self->netdev);
/* Enable some interrupts so we can receive frames */
switch_bank(iobase, SET0);
if (speed > PIO_MAX_SPEED) {
outb(ICR_EFSFI, iobase+ICR);
w83977af_dma_receive(self);
} else
outb(ICR_ERBRI, iobase+ICR);
/* Restore SSR */
outb(set, iobase+SSR);
}
/*
* Function w83977af_hard_xmit (skb, dev)
*
* Sets up a DMA transfer to send the current frame.
*
*/
static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct w83977af_ir *self;
__s32 speed;
int iobase;
__u8 set;
int mtt;
self = netdev_priv(dev);
iobase = self->io.fir_base;
IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
(int) skb->len);
/* Lock transmit buffer */
netif_stop_queue(dev);
/* Check if we need to change the speed */
speed = irda_get_next_speed(skb);
if ((speed != self->io.speed) && (speed != -1)) {
/* Check for empty frame */
if (!skb->len) {
w83977af_change_speed(self, speed);
dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
self->new_speed = speed;
}
/* Save current set */
set = inb(iobase+SSR);
/* Decide if we should use PIO or DMA transfer */
if (self->io.speed > PIO_MAX_SPEED) {
self->tx_buff.data = self->tx_buff.head;
skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
self->tx_buff.len = skb->len;
mtt = irda_get_mtt(skb);
#ifdef CONFIG_USE_INTERNAL_TIMER
if (mtt > 50) {
/* Adjust for timer resolution */
mtt /= 1000+1;
/* Setup timer */
switch_bank(iobase, SET4);
outb(mtt & 0xff, iobase+TMRL);
outb((mtt >> 8) & 0x0f, iobase+TMRH);
/* Start timer */
outb(IR_MSL_EN_TMR, iobase+IR_MSL);
self->io.direction = IO_XMIT;
/* Enable timer interrupt */
switch_bank(iobase, SET0);
outb(ICR_ETMRI, iobase+ICR);
} else {
#endif
IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
if (mtt)
udelay(mtt);
/* Enable DMA interrupt */
switch_bank(iobase, SET0);
outb(ICR_EDMAI, iobase+ICR);
w83977af_dma_write(self, iobase);
#ifdef CONFIG_USE_INTERNAL_TIMER
}
#endif
} else {
self->tx_buff.data = self->tx_buff.head;
self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
self->tx_buff.truesize);
/* Add interrupt on tx low level (will fire immediately) */
switch_bank(iobase, SET0);
outb(ICR_ETXTHI, iobase+ICR);
}
dev->trans_start = jiffies;
dev_kfree_skb(skb);
/* Restore set register */
outb(set, iobase+SSR);
return NETDEV_TX_OK;
}
/*
* Function w83977af_dma_write (self, iobase)
*
* Send frame using DMA
*
*/
static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
{
__u8 set;
#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
unsigned long flags;
__u8 hcr;
#endif
IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
/* Save current set */
set = inb(iobase+SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
/* Choose transmit DMA channel */
switch_bank(iobase, SET2);
outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
spin_lock_irqsave(&self->lock, flags);
disable_dma(self->io.dma);
clear_dma_ff(self->io.dma);
set_dma_mode(self->io.dma, DMA_MODE_READ);
set_dma_addr(self->io.dma, self->tx_buff_dma);
set_dma_count(self->io.dma, self->tx_buff.len);
#else
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
DMA_MODE_WRITE);
#endif
self->io.direction = IO_XMIT;
/* Enable DMA */
switch_bank(iobase, SET0);
#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
hcr = inb(iobase+HCR);
outb(hcr | HCR_EN_DMA, iobase+HCR);
enable_dma(self->io.dma);
spin_unlock_irqrestore(&self->lock, flags);
#else
outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
#endif
/* Restore set register */
outb(set, iobase+SSR);
}
/*
* Function w83977af_pio_write (iobase, buf, len, fifo_size)
*
*
*
*/
static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
{
int actual = 0;
__u8 set;
IRDA_DEBUG(4, "%s()\n", __func__ );
/* Save current bank */
set = inb(iobase+SSR);
switch_bank(iobase, SET0);
if (!(inb_p(iobase+USR) & USR_TSRE)) {
IRDA_DEBUG(4,
"%s(), warning, FIFO not empty yet!\n", __func__ );
fifo_size -= 17;
IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
__func__ , fifo_size);
}
/* Fill FIFO with current frame */
while ((fifo_size-- > 0) && (actual < len)) {
/* Transmit next byte */
outb(buf[actual++], iobase+TBR);
}
IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
__func__ , fifo_size, actual, len);
/* Restore bank */
outb(set, iobase+SSR);
return actual;
}
/*
* Function w83977af_dma_xmit_complete (self)
*
* The transfer of a frame in finished. So do the necessary things
*
*
*/
static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
{
int iobase;
__u8 set;
IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.fir_base;
/* Save current set */
set = inb(iobase+SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
/* Check for underrrun! */
if (inb(iobase+AUDR) & AUDR_UNDR) {
IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
/* Clear bit, by writing 1 to it */
outb(AUDR_UNDR, iobase+AUDR);
} else
self->netdev->stats.tx_packets++;
if (self->new_speed) {
w83977af_change_speed(self, self->new_speed);
self->new_speed = 0;
}
/* Unlock tx_buff and request another frame */
/* Tell the network layer, that we want more frames */
netif_wake_queue(self->netdev);
/* Restore set */
outb(set, iobase+SSR);
}
/*
* Function w83977af_dma_receive (self)
*
* Get ready for receiving a frame. The device will initiate a DMA
* if it starts to receive a frame.
*
*/
static int w83977af_dma_receive(struct w83977af_ir *self)
{
int iobase;
__u8 set;
#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
unsigned long flags;
__u8 hcr;
#endif
IRDA_ASSERT(self != NULL, return -1;);
IRDA_DEBUG(4, "%s\n", __func__ );
iobase= self->io.fir_base;
/* Save current set */
set = inb(iobase+SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
/* Choose DMA Rx, DMA Fairness, and Advanced mode */
switch_bank(iobase, SET2);
outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
iobase+ADCR1);
self->io.direction = IO_RECV;
self->rx_buff.data = self->rx_buff.head;
#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
spin_lock_irqsave(&self->lock, flags);
disable_dma(self->io.dma);
clear_dma_ff(self->io.dma);
set_dma_mode(self->io.dma, DMA_MODE_READ);
set_dma_addr(self->io.dma, self->rx_buff_dma);
set_dma_count(self->io.dma, self->rx_buff.truesize);
#else
irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
DMA_MODE_READ);
#endif
/*
* Reset Rx FIFO. This will also flush the ST_FIFO, it's very
* important that we don't reset the Tx FIFO since it might not
* be finished transmitting yet
*/
switch_bank(iobase, SET0);
outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
/* Enable DMA */
switch_bank(iobase, SET0);
#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
hcr = inb(iobase+HCR);
outb(hcr | HCR_EN_DMA, iobase+HCR);
enable_dma(self->io.dma);
spin_unlock_irqrestore(&self->lock, flags);
#else
outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
#endif
/* Restore set */
outb(set, iobase+SSR);
return 0;
}
/*
* Function w83977af_receive_complete (self)
*
* Finished with receiving a frame
*
*/
static int w83977af_dma_receive_complete(struct w83977af_ir *self)
{
struct sk_buff *skb;
struct st_fifo *st_fifo;
int len;
int iobase;
__u8 set;
__u8 status;
IRDA_DEBUG(4, "%s\n", __func__ );
st_fifo = &self->st_fifo;
iobase = self->io.fir_base;
/* Save current set */
set = inb(iobase+SSR);
iobase = self->io.fir_base;
/* Read status FIFO */
switch_bank(iobase, SET5);
while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
st_fifo->entries[st_fifo->tail].status = status;
st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
st_fifo->tail++;
st_fifo->len++;
}
while (st_fifo->len) {
/* Get first entry */
status = st_fifo->entries[st_fifo->head].status;
len = st_fifo->entries[st_fifo->head].len;
st_fifo->head++;
st_fifo->len--;
/* Check for errors */
if (status & FS_FO_ERR_MSK) {
if (status & FS_FO_LST_FR) {
/* Add number of lost frames to stats */
self->netdev->stats.rx_errors += len;
} else {
/* Skip frame */
self->netdev->stats.rx_errors++;
self->rx_buff.data += len;
if (status & FS_FO_MX_LEX)
self->netdev->stats.rx_length_errors++;
if (status & FS_FO_PHY_ERR)
self->netdev->stats.rx_frame_errors++;
if (status & FS_FO_CRC_ERR)
self->netdev->stats.rx_crc_errors++;
}
/* The errors below can be reported in both cases */
if (status & FS_FO_RX_OV)
self->netdev->stats.rx_fifo_errors++;
if (status & FS_FO_FSF_OV)
self->netdev->stats.rx_fifo_errors++;
} else {
/* Check if we have transferred all data to memory */
switch_bank(iobase, SET0);
if (inb(iobase+USR) & USR_RDR) {
#ifdef CONFIG_USE_INTERNAL_TIMER
/* Put this entry back in fifo */
st_fifo->head--;
st_fifo->len++;
st_fifo->entries[st_fifo->head].status = status;
st_fifo->entries[st_fifo->head].len = len;
/* Restore set register */
outb(set, iobase+SSR);
return FALSE; /* I'll be back! */
#else
udelay(80); /* Should be enough!? */
#endif
}
skb = dev_alloc_skb(len+1);
if (skb == NULL) {
printk(KERN_INFO
"%s(), memory squeeze, dropping frame.\n", __func__);
/* Restore set register */
outb(set, iobase+SSR);
return FALSE;
}
/* Align to 20 bytes */
skb_reserve(skb, 1);
/* Copy frame without CRC */
if (self->io.speed < 4000000) {
skb_put(skb, len-2);
skb_copy_to_linear_data(skb,
self->rx_buff.data,
len - 2);
} else {
skb_put(skb, len-4);
skb_copy_to_linear_data(skb,
self->rx_buff.data,
len - 4);
}
/* Move to next frame */
self->rx_buff.data += len;
self->netdev->stats.rx_packets++;
skb->dev = self->netdev;
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IRDA);
netif_rx(skb);
}
}
/* Restore set register */
outb(set, iobase+SSR);
return TRUE;
}
/*
* Function pc87108_pio_receive (self)
*
* Receive all data in receiver FIFO
*
*/
static void w83977af_pio_receive(struct w83977af_ir *self)
{
__u8 byte = 0x00;
int iobase;
IRDA_DEBUG(4, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.fir_base;
/* Receive all characters in Rx FIFO */
do {
byte = inb(iobase+RBR);
async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
byte);
} while (inb(iobase+USR) & USR_RDR); /* Data available */
}
/*
* Function w83977af_sir_interrupt (self, eir)
*
* Handle SIR interrupt
*
*/
static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
{
int actual;
__u8 new_icr = 0;
__u8 set;
int iobase;
IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
iobase = self->io.fir_base;
/* Transmit FIFO low on data */
if (isr & ISR_TXTH_I) {
/* Write data left in transmit buffer */
actual = w83977af_pio_write(self->io.fir_base,
self->tx_buff.data,
self->tx_buff.len,
self->io.fifo_size);
self->tx_buff.data += actual;
self->tx_buff.len -= actual;
self->io.direction = IO_XMIT;
/* Check if finished */
if (self->tx_buff.len > 0) {
new_icr |= ICR_ETXTHI;
} else {
set = inb(iobase+SSR);
switch_bank(iobase, SET0);
outb(AUDR_SFEND, iobase+AUDR);
outb(set, iobase+SSR);
self->netdev->stats.tx_packets++;
/* Feed me more packets */
netif_wake_queue(self->netdev);
new_icr |= ICR_ETBREI;
}
}
/* Check if transmission has completed */
if (isr & ISR_TXEMP_I) {
/* Check if we need to change the speed? */
if (self->new_speed) {
IRDA_DEBUG(2,
"%s(), Changing speed!\n", __func__ );
w83977af_change_speed(self, self->new_speed);
self->new_speed = 0;
}
/* Turn around and get ready to receive some data */
self->io.direction = IO_RECV;
new_icr |= ICR_ERBRI;
}
/* Rx FIFO threshold or timeout */
if (isr & ISR_RXTH_I) {
w83977af_pio_receive(self);
/* Keep receiving */
new_icr |= ICR_ERBRI;
}
return new_icr;
}
/*
* Function pc87108_fir_interrupt (self, eir)
*
* Handle MIR/FIR interrupt
*
*/
static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
{
__u8 new_icr = 0;
__u8 set;
int iobase;
iobase = self->io.fir_base;
set = inb(iobase+SSR);
/* End of frame detected in FIFO */
if (isr & (ISR_FEND_I|ISR_FSF_I)) {
if (w83977af_dma_receive_complete(self)) {
/* Wait for next status FIFO interrupt */
new_icr |= ICR_EFSFI;
} else {
/* DMA not finished yet */
/* Set timer value, resolution 1 ms */
switch_bank(iobase, SET4);
outb(0x01, iobase+TMRL); /* 1 ms */
outb(0x00, iobase+TMRH);
/* Start timer */
outb(IR_MSL_EN_TMR, iobase+IR_MSL);
new_icr |= ICR_ETMRI;
}
}
/* Timer finished */
if (isr & ISR_TMR_I) {
/* Disable timer */
switch_bank(iobase, SET4);
outb(0, iobase+IR_MSL);
/* Clear timer event */
/* switch_bank(iobase, SET0); */
/* outb(ASCR_CTE, iobase+ASCR); */
/* Check if this is a TX timer interrupt */
if (self->io.direction == IO_XMIT) {
w83977af_dma_write(self, iobase);
new_icr |= ICR_EDMAI;
} else {
/* Check if DMA has now finished */
w83977af_dma_receive_complete(self);
new_icr |= ICR_EFSFI;
}
}
/* Finished with DMA */
if (isr & ISR_DMA_I) {
w83977af_dma_xmit_complete(self);
/* Check if there are more frames to be transmitted */
/* if (irda_device_txqueue_empty(self)) { */
/* Prepare for receive
*
* ** Netwinder Tx DMA likes that we do this anyway **
*/
w83977af_dma_receive(self);
new_icr = ICR_EFSFI;
/* } */
}
/* Restore set */
outb(set, iobase+SSR);
return new_icr;
}
/*
* Function w83977af_interrupt (irq, dev_id, regs)
*
* An interrupt from the chip has arrived. Time to do some work
*
*/
static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct w83977af_ir *self;
__u8 set, icr, isr;
int iobase;
self = netdev_priv(dev);
iobase = self->io.fir_base;
/* Save current bank */
set = inb(iobase+SSR);
switch_bank(iobase, SET0);
icr = inb(iobase+ICR);
isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
outb(0, iobase+ICR); /* Disable interrupts */
if (isr) {
/* Dispatch interrupt handler for the current speed */
if (self->io.speed > PIO_MAX_SPEED )
icr = w83977af_fir_interrupt(self, isr);
else
icr = w83977af_sir_interrupt(self, isr);
}
outb(icr, iobase+ICR); /* Restore (new) interrupts */
outb(set, iobase+SSR); /* Restore bank register */
return IRQ_RETVAL(isr);
}
/*
* Function w83977af_is_receiving (self)
*
* Return TRUE is we are currently receiving a frame
*
*/
static int w83977af_is_receiving(struct w83977af_ir *self)
{
int status = FALSE;
int iobase;
__u8 set;
IRDA_ASSERT(self != NULL, return FALSE;);
if (self->io.speed > 115200) {
iobase = self->io.fir_base;
/* Check if rx FIFO is not empty */
set = inb(iobase+SSR);
switch_bank(iobase, SET2);
if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
/* We are receiving something */
status = TRUE;
}
outb(set, iobase+SSR);
} else
status = (self->rx_buff.state != OUTSIDE_FRAME);
return status;
}
/*
* Function w83977af_net_open (dev)
*
* Start the device
*
*/
static int w83977af_net_open(struct net_device *dev)
{
struct w83977af_ir *self;
int iobase;
char hwname[32];
__u8 set;
IRDA_DEBUG(0, "%s()\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return 0;);
iobase = self->io.fir_base;
if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
(void *) dev)) {
return -EAGAIN;
}
/*
* Always allocate the DMA channel after the IRQ,
* and clean up on failure.
*/
if (request_dma(self->io.dma, dev->name)) {
free_irq(self->io.irq, self);
return -EAGAIN;
}
/* Save current set */
set = inb(iobase+SSR);
/* Enable some interrupts so we can receive frames again */
switch_bank(iobase, SET0);
if (self->io.speed > 115200) {
outb(ICR_EFSFI, iobase+ICR);
w83977af_dma_receive(self);
} else
outb(ICR_ERBRI, iobase+ICR);
/* Restore bank register */
outb(set, iobase+SSR);
/* Ready to play! */
netif_start_queue(dev);
/* Give self a hardware name */
sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
/*
* Open new IrLAP layer instance, now that everything should be
* initialized properly
*/
self->irlap = irlap_open(dev, &self->qos, hwname);
return 0;
}
/*
* Function w83977af_net_close (dev)
*
* Stop the device
*
*/
static int w83977af_net_close(struct net_device *dev)
{
struct w83977af_ir *self;
int iobase;
__u8 set;
IRDA_DEBUG(0, "%s()\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return 0;);
iobase = self->io.fir_base;
/* Stop device */
netif_stop_queue(dev);
/* Stop and remove instance of IrLAP */
if (self->irlap)
irlap_close(self->irlap);
self->irlap = NULL;
disable_dma(self->io.dma);
/* Save current set */
set = inb(iobase+SSR);
/* Disable interrupts */
switch_bank(iobase, SET0);
outb(0, iobase+ICR);
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
/* Restore bank register */
outb(set, iobase+SSR);
return 0;
}
/*
* Function w83977af_net_ioctl (dev, rq, cmd)
*
* Process IOCTL commands for this device
*
*/
static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct if_irda_req *irq = (struct if_irda_req *) rq;
struct w83977af_ir *self;
unsigned long flags;
int ret = 0;
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
spin_lock_irqsave(&self->lock, flags);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
goto out;
}
w83977af_change_speed(self, irq->ifr_baudrate);
break;
case SIOCSMEDIABUSY: /* Set media busy */
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
goto out;
}
irda_device_set_media_busy(self->netdev, TRUE);
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
irq->ifr_receiving = w83977af_is_receiving(self);
break;
default:
ret = -EOPNOTSUPP;
}
out:
spin_unlock_irqrestore(&self->lock, flags);
return ret;
}
MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
MODULE_LICENSE("GPL");
module_param(qos_mtt_bits, int, 0);
MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
module_param_array(io, int, NULL, 0);
MODULE_PARM_DESC(io, "Base I/O addresses");
module_param_array(irq, int, NULL, 0);
MODULE_PARM_DESC(irq, "IRQ lines");
/*
* Function init_module (void)
*
*
*
*/
module_init(w83977af_init);
/*
* Function cleanup_module (void)
*
*
*
*/
module_exit(w83977af_cleanup);
| gpl-2.0 |
ericli1989/linux-2.6.32.67 | drivers/pcmcia/au1000_xxs1500.c | 557 | 4352 | /*
*
* MyCable board specific pcmcia routines.
*
* Copyright 2003 MontaVista Software Inc.
* Author: Pete Popov, MontaVista Software, Inc.
* ppopov@mvista.com or source@mvista.com
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/types.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/bus_ops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/au1000.h>
#include <asm/au1000_pcmcia.h>
#define PCMCIA_MAX_SOCK 0
#define PCMCIA_NUM_SOCKS (PCMCIA_MAX_SOCK + 1)
#define PCMCIA_IRQ AU1000_GPIO_4
#if 0
#define DEBUG(x, args...) printk(__func__ ": " x, ##args)
#else
#define DEBUG(x,args...)
#endif
static int xxs1500_pcmcia_init(struct pcmcia_init *init)
{
return PCMCIA_NUM_SOCKS;
}
static int xxs1500_pcmcia_shutdown(void)
{
/* turn off power */
au_writel(au_readl(GPIO2_PINSTATE) | (1<<14)|(1<<30),
GPIO2_OUTPUT);
au_sync_delay(100);
/* assert reset */
au_writel(au_readl(GPIO2_PINSTATE) | (1<<4)|(1<<20),
GPIO2_OUTPUT);
au_sync_delay(100);
return 0;
}
static int
xxs1500_pcmcia_socket_state(unsigned sock, struct pcmcia_state *state)
{
u32 inserted; u32 vs;
unsigned long gpio, gpio2;
if(sock > PCMCIA_MAX_SOCK) return -1;
gpio = au_readl(SYS_PINSTATERD);
gpio2 = au_readl(GPIO2_PINSTATE);
vs = gpio2 & ((1<<8) | (1<<9));
inserted = (!(gpio & 0x1) && !(gpio & 0x2));
state->ready = 0;
state->vs_Xv = 0;
state->vs_3v = 0;
state->detect = 0;
if (inserted) {
switch (vs) {
case 0:
case 1:
case 2:
state->vs_3v=1;
break;
case 3: /* 5V */
default:
/* return without setting 'detect' */
printk(KERN_ERR "au1x00_cs: unsupported VS\n",
vs);
return;
}
state->detect = 1;
}
if (state->detect) {
state->ready = 1;
}
state->bvd1= gpio2 & (1<<10);
state->bvd2 = gpio2 & (1<<11);
state->wrprot=0;
return 1;
}
static int xxs1500_pcmcia_get_irq_info(struct pcmcia_irq_info *info)
{
if(info->sock > PCMCIA_MAX_SOCK) return -1;
info->irq = PCMCIA_IRQ;
return 0;
}
static int
xxs1500_pcmcia_configure_socket(const struct pcmcia_configure *configure)
{
if(configure->sock > PCMCIA_MAX_SOCK) return -1;
DEBUG("Vcc %dV Vpp %dV, reset %d\n",
configure->vcc, configure->vpp, configure->reset);
switch(configure->vcc){
case 33: /* Vcc 3.3V */
/* turn on power */
DEBUG("turn on power\n");
au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<14))|(1<<30),
GPIO2_OUTPUT);
au_sync_delay(100);
break;
case 50: /* Vcc 5V */
default: /* what's this ? */
printk(KERN_ERR "au1x00_cs: unsupported VCC\n");
case 0: /* Vcc 0 */
/* turn off power */
au_sync_delay(100);
au_writel(au_readl(GPIO2_PINSTATE) | (1<<14)|(1<<30),
GPIO2_OUTPUT);
break;
}
if (!configure->reset) {
DEBUG("deassert reset\n");
au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<4))|(1<<20),
GPIO2_OUTPUT);
au_sync_delay(100);
au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<5))|(1<<21),
GPIO2_OUTPUT);
}
else {
DEBUG("assert reset\n");
au_writel(au_readl(GPIO2_PINSTATE) | (1<<4)|(1<<20),
GPIO2_OUTPUT);
}
au_sync_delay(100);
return 0;
}
struct pcmcia_low_level xxs1500_pcmcia_ops = {
xxs1500_pcmcia_init,
xxs1500_pcmcia_shutdown,
xxs1500_pcmcia_socket_state,
xxs1500_pcmcia_get_irq_info,
xxs1500_pcmcia_configure_socket
};
| gpl-2.0 |
uwehermann/easybox-904-xdsl-firmware | linux/linux-2.6.32.32/drivers/rtc/rtc-wm8350.c | 557 | 12575 | /*
* Real Time Clock driver for Wolfson Microelectronics WM8350
*
* Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
*
* Author: Liam Girdwood
* linux@wolfsonmicro.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/completion.h>
#include <linux/mfd/wm8350/rtc.h>
#include <linux/mfd/wm8350/core.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#define WM8350_SET_ALM_RETRIES 5
#define WM8350_SET_TIME_RETRIES 5
#define WM8350_GET_TIME_RETRIES 5
#define to_wm8350_from_rtc_dev(d) container_of(d, struct wm8350, rtc.pdev.dev)
/*
* Read current time and date in RTC
*/
static int wm8350_rtc_readtime(struct device *dev, struct rtc_time *tm)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
u16 time1[4], time2[4];
int retries = WM8350_GET_TIME_RETRIES, ret;
/*
* Read the time twice and compare.
* If time1 == time2, then time is valid else retry.
*/
do {
ret = wm8350_block_read(wm8350, WM8350_RTC_SECONDS_MINUTES,
4, time1);
if (ret < 0)
return ret;
ret = wm8350_block_read(wm8350, WM8350_RTC_SECONDS_MINUTES,
4, time2);
if (ret < 0)
return ret;
if (memcmp(time1, time2, sizeof(time1)) == 0) {
tm->tm_sec = time1[0] & WM8350_RTC_SECS_MASK;
tm->tm_min = (time1[0] & WM8350_RTC_MINS_MASK)
>> WM8350_RTC_MINS_SHIFT;
tm->tm_hour = time1[1] & WM8350_RTC_HRS_MASK;
tm->tm_wday = ((time1[1] >> WM8350_RTC_DAY_SHIFT)
& 0x7) - 1;
tm->tm_mon = ((time1[2] & WM8350_RTC_MTH_MASK)
>> WM8350_RTC_MTH_SHIFT) - 1;
tm->tm_mday = (time1[2] & WM8350_RTC_DATE_MASK);
tm->tm_year = ((time1[3] & WM8350_RTC_YHUNDREDS_MASK)
>> WM8350_RTC_YHUNDREDS_SHIFT) * 100;
tm->tm_year += time1[3] & WM8350_RTC_YUNITS_MASK;
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon,
tm->tm_year);
tm->tm_year -= 1900;
dev_dbg(dev, "Read (%d left): %04x %04x %04x %04x\n",
retries,
time1[0], time1[1], time1[2], time1[3]);
return 0;
}
} while (retries--);
dev_err(dev, "timed out reading RTC time\n");
return -EIO;
}
/*
* Set current time and date in RTC
*/
static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
u16 time[4];
u16 rtc_ctrl;
int ret, retries = WM8350_SET_TIME_RETRIES;
time[0] = tm->tm_sec;
time[0] |= tm->tm_min << WM8350_RTC_MINS_SHIFT;
time[1] = tm->tm_hour;
time[1] |= (tm->tm_wday + 1) << WM8350_RTC_DAY_SHIFT;
time[2] = tm->tm_mday;
time[2] |= (tm->tm_mon + 1) << WM8350_RTC_MTH_SHIFT;
time[3] = ((tm->tm_year + 1900) / 100) << WM8350_RTC_YHUNDREDS_SHIFT;
time[3] |= (tm->tm_year + 1900) % 100;
dev_dbg(dev, "Setting: %04x %04x %04x %04x\n",
time[0], time[1], time[2], time[3]);
/* Set RTC_SET to stop the clock */
ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_SET);
if (ret < 0)
return ret;
/* Wait until confirmation of stopping */
do {
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
schedule_timeout_uninterruptible(msecs_to_jiffies(1));
} while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
if (!retries) {
dev_err(dev, "timed out on set confirmation\n");
return -EIO;
}
/* Write time to RTC */
ret = wm8350_block_write(wm8350, WM8350_RTC_SECONDS_MINUTES, 4, time);
if (ret < 0)
return ret;
/* Clear RTC_SET to start the clock */
ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL,
WM8350_RTC_SET);
return ret;
}
/*
* Read alarm time and date in RTC
*/
static int wm8350_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
u16 time[4];
int ret;
ret = wm8350_block_read(wm8350, WM8350_ALARM_SECONDS_MINUTES, 4, time);
if (ret < 0)
return ret;
tm->tm_sec = time[0] & WM8350_RTC_ALMSECS_MASK;
if (tm->tm_sec == WM8350_RTC_ALMSECS_MASK)
tm->tm_sec = -1;
tm->tm_min = time[0] & WM8350_RTC_ALMMINS_MASK;
if (tm->tm_min == WM8350_RTC_ALMMINS_MASK)
tm->tm_min = -1;
else
tm->tm_min >>= WM8350_RTC_ALMMINS_SHIFT;
tm->tm_hour = time[1] & WM8350_RTC_ALMHRS_MASK;
if (tm->tm_hour == WM8350_RTC_ALMHRS_MASK)
tm->tm_hour = -1;
tm->tm_wday = ((time[1] >> WM8350_RTC_ALMDAY_SHIFT) & 0x7) - 1;
if (tm->tm_wday > 7)
tm->tm_wday = -1;
tm->tm_mon = time[2] & WM8350_RTC_ALMMTH_MASK;
if (tm->tm_mon == WM8350_RTC_ALMMTH_MASK)
tm->tm_mon = -1;
else
tm->tm_mon = (tm->tm_mon >> WM8350_RTC_ALMMTH_SHIFT) - 1;
tm->tm_mday = (time[2] & WM8350_RTC_ALMDATE_MASK);
if (tm->tm_mday == WM8350_RTC_ALMDATE_MASK)
tm->tm_mday = -1;
tm->tm_year = -1;
alrm->enabled = !(time[3] & WM8350_RTC_ALMSTS);
return 0;
}
static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
{
int retries = WM8350_SET_ALM_RETRIES;
u16 rtc_ctrl;
int ret;
/* Set RTC_SET to stop the clock */
ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL,
WM8350_RTC_ALMSET);
if (ret < 0)
return ret;
/* Wait until confirmation of stopping */
do {
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
schedule_timeout_uninterruptible(msecs_to_jiffies(1));
} while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
return -ETIMEDOUT;
return 0;
}
static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
{
int ret;
int retries = WM8350_SET_ALM_RETRIES;
u16 rtc_ctrl;
ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL,
WM8350_RTC_ALMSET);
if (ret < 0)
return ret;
/* Wait until confirmation */
do {
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
schedule_timeout_uninterruptible(msecs_to_jiffies(1));
} while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
if (rtc_ctrl & WM8350_RTC_ALMSTS)
return -ETIMEDOUT;
return 0;
}
static int wm8350_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
if (enabled)
return wm8350_rtc_start_alarm(wm8350);
else
return wm8350_rtc_stop_alarm(wm8350);
}
static int wm8350_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
u16 time[3];
int ret;
memset(time, 0, sizeof(time));
if (tm->tm_sec != -1)
time[0] |= tm->tm_sec;
else
time[0] |= WM8350_RTC_ALMSECS_MASK;
if (tm->tm_min != -1)
time[0] |= tm->tm_min << WM8350_RTC_ALMMINS_SHIFT;
else
time[0] |= WM8350_RTC_ALMMINS_MASK;
if (tm->tm_hour != -1)
time[1] |= tm->tm_hour;
else
time[1] |= WM8350_RTC_ALMHRS_MASK;
if (tm->tm_wday != -1)
time[1] |= (tm->tm_wday + 1) << WM8350_RTC_ALMDAY_SHIFT;
else
time[1] |= WM8350_RTC_ALMDAY_MASK;
if (tm->tm_mday != -1)
time[2] |= tm->tm_mday;
else
time[2] |= WM8350_RTC_ALMDATE_MASK;
if (tm->tm_mon != -1)
time[2] |= (tm->tm_mon + 1) << WM8350_RTC_ALMMTH_SHIFT;
else
time[2] |= WM8350_RTC_ALMMTH_MASK;
ret = wm8350_rtc_stop_alarm(wm8350);
if (ret < 0)
return ret;
/* Write time to RTC */
ret = wm8350_block_write(wm8350, WM8350_ALARM_SECONDS_MINUTES,
3, time);
if (ret < 0)
return ret;
if (alrm->enabled)
ret = wm8350_rtc_start_alarm(wm8350);
return ret;
}
static int wm8350_rtc_update_irq_enable(struct device *dev,
unsigned int enabled)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
if (enabled)
wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_SEC);
else
wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
return 0;
}
static void wm8350_rtc_alarm_handler(struct wm8350 *wm8350, int irq,
void *data)
{
struct rtc_device *rtc = wm8350->rtc.rtc;
int ret;
rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
/* Make it one shot */
ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL,
WM8350_RTC_ALMSET);
if (ret != 0) {
dev_err(&(wm8350->rtc.pdev->dev),
"Failed to disable alarm: %d\n", ret);
}
}
static void wm8350_rtc_update_handler(struct wm8350 *wm8350, int irq,
void *data)
{
struct rtc_device *rtc = wm8350->rtc.rtc;
rtc_update_irq(rtc, 1, RTC_IRQF | RTC_UF);
}
static const struct rtc_class_ops wm8350_rtc_ops = {
.read_time = wm8350_rtc_readtime,
.set_time = wm8350_rtc_settime,
.read_alarm = wm8350_rtc_readalarm,
.set_alarm = wm8350_rtc_setalarm,
.alarm_irq_enable = wm8350_rtc_alarm_irq_enable,
.update_irq_enable = wm8350_rtc_update_irq_enable,
};
#ifdef CONFIG_PM
static int wm8350_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
int ret = 0;
u16 reg;
reg = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
if (device_may_wakeup(&wm8350->rtc.pdev->dev) &&
reg & WM8350_RTC_ALMSTS) {
ret = wm8350_rtc_stop_alarm(wm8350);
if (ret != 0)
dev_err(&pdev->dev, "Failed to stop RTC alarm: %d\n",
ret);
}
return ret;
}
static int wm8350_rtc_resume(struct platform_device *pdev)
{
struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
int ret;
if (wm8350->rtc.alarm_enabled) {
ret = wm8350_rtc_start_alarm(wm8350);
if (ret != 0)
dev_err(&pdev->dev,
"Failed to restart RTC alarm: %d\n", ret);
}
return 0;
}
#else
#define wm8350_rtc_suspend NULL
#define wm8350_rtc_resume NULL
#endif
static int wm8350_rtc_probe(struct platform_device *pdev)
{
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
struct wm8350_rtc *wm_rtc = &wm8350->rtc;
int ret = 0;
u16 timectl, power5;
timectl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
if (timectl & WM8350_RTC_BCD) {
dev_err(&pdev->dev, "RTC BCD mode not supported\n");
return -EINVAL;
}
if (timectl & WM8350_RTC_12HR) {
dev_err(&pdev->dev, "RTC 12 hour mode not supported\n");
return -EINVAL;
}
/* enable the RTC if it's not already enabled */
power5 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_5);
if (!(power5 & WM8350_RTC_TICK_ENA)) {
dev_info(wm8350->dev, "Starting RTC\n");
wm8350_reg_unlock(wm8350);
ret = wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5,
WM8350_RTC_TICK_ENA);
if (ret < 0) {
dev_err(&pdev->dev, "failed to enable RTC: %d\n", ret);
return ret;
}
wm8350_reg_lock(wm8350);
}
if (timectl & WM8350_RTC_STS) {
int retries;
ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL,
WM8350_RTC_SET);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start: %d\n", ret);
return ret;
}
retries = WM8350_SET_TIME_RETRIES;
do {
timectl = wm8350_reg_read(wm8350,
WM8350_RTC_TIME_CONTROL);
} while (timectl & WM8350_RTC_STS && --retries);
if (retries == 0) {
dev_err(&pdev->dev, "failed to start: timeout\n");
return -ENODEV;
}
}
device_init_wakeup(&pdev->dev, 1);
wm_rtc->rtc = rtc_device_register("wm8350", &pdev->dev,
&wm8350_rtc_ops, THIS_MODULE);
if (IS_ERR(wm_rtc->rtc)) {
ret = PTR_ERR(wm_rtc->rtc);
dev_err(&pdev->dev, "failed to register RTC: %d\n", ret);
return ret;
}
wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_PER);
wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
wm8350_rtc_update_handler, NULL);
wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
wm8350_rtc_alarm_handler, NULL);
wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_ALM);
return 0;
}
static int __devexit wm8350_rtc_remove(struct platform_device *pdev)
{
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
struct wm8350_rtc *wm_rtc = &wm8350->rtc;
wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC);
wm8350_free_irq(wm8350, WM8350_IRQ_RTC_ALM);
rtc_device_unregister(wm_rtc->rtc);
return 0;
}
static struct platform_driver wm8350_rtc_driver = {
.probe = wm8350_rtc_probe,
.remove = __devexit_p(wm8350_rtc_remove),
.suspend = wm8350_rtc_suspend,
.resume = wm8350_rtc_resume,
.driver = {
.name = "wm8350-rtc",
},
};
static int __init wm8350_rtc_init(void)
{
return platform_driver_register(&wm8350_rtc_driver);
}
module_init(wm8350_rtc_init);
static void __exit wm8350_rtc_exit(void)
{
platform_driver_unregister(&wm8350_rtc_driver);
}
module_exit(wm8350_rtc_exit);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("RTC driver for the WM8350");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm8350-rtc");
| gpl-2.0 |
androidbftab1/bf-kernel-4.2 | arch/parisc/kernel/process.c | 813 | 8186 | /*
* PARISC Architecture-dependent parts of process handling
* based on the work for i386
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
* Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdarg.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
#include <asm/io.h>
#include <asm/asm-offsets.h>
#include <asm/assembly.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/pgalloc.h>
#include <asm/unwind.h>
#include <asm/sections.h>
#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
#define CMD_RESET 5 /* reset any module */
/*
** The Wright Brothers and Gecko systems have a H/W problem
** (Lasi...'nuf said) may cause a broadcast reset to lockup
** the system. An HVERSION dependent PDC call was developed
** to perform a "safe", platform specific broadcast reset instead
** of kludging up all the code.
**
** Older machines which do not implement PDC_BROADCAST_RESET will
** return (with an error) and the regular broadcast reset can be
** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
** the PDC call will not return (the system will be reset).
*/
void machine_restart(char *cmd)
{
#ifdef FASTBOOT_SELFTEST_SUPPORT
/*
** If user has modified the Firmware Selftest Bitmap,
** run the tests specified in the bitmap after the
** system is rebooted w/PDC_DO_RESET.
**
** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
**
** Using "directed resets" at each processor with the MEM_TOC
** vector cleared will also avoid running destructive
** memory self tests. (Not implemented yet)
*/
if (ftc_bitmap) {
pdc_do_firm_test_reset(ftc_bitmap);
}
#endif
/* set up a new led state on systems shipped with a LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* "Normal" system reset */
pdc_do_reset();
/* Nope...box should reset with just CMD_RESET now */
gsc_writel(CMD_RESET, COMMAND_GLOBAL);
/* Wait for RESET to lay us to rest. */
while (1) ;
}
void machine_halt(void)
{
/*
** The LED/ChassisCodes are updated by the led_halt()
** function, called by the reboot notifier chain.
*/
}
void (*chassis_power_off)(void);
/*
* This routine is called from sys_reboot to actually turn off the
* machine
*/
void machine_power_off(void)
{
/* If there is a registered power off handler, call it. */
if (chassis_power_off)
chassis_power_off();
/* Put the soft power button back under hardware control.
* If the user had already pressed the power button, the
* following call will immediately power off. */
pdc_soft_power_button(0);
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
}
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
}
void flush_thread(void)
{
/* Only needs to handle fpu stuff or perf monitors.
** REVISIT: several arches implement a "lazy fpu state".
*/
}
void release_thread(struct task_struct *dead_task)
{
}
/*
* Fill in the FPU structure for a core dump.
*/
int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
{
if (regs == NULL)
return 0;
memcpy(r, regs->fr, sizeof *r);
return 1;
}
int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
{
memcpy(r, tsk->thread.regs.fr, sizeof(*r));
return 1;
}
/*
* Copy architecture-specific thread state
*/
int
copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long kthread_arg, struct task_struct *p)
{
struct pt_regs *cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
/* We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit.
* Make them const so the compiler knows they live in .text */
extern void * const ret_from_kernel_thread;
extern void * const child_return;
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(cregs, 0, sizeof(struct pt_regs));
if (!usp) /* idle thread */
return 0;
/* Must exit via ret_from_kernel_thread in order
* to call schedule_tail()
*/
cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
cregs->kpc = (unsigned long) &ret_from_kernel_thread;
/*
* Copy function and argument to be called from
* ret_from_kernel_thread.
*/
#ifdef CONFIG_64BIT
cregs->gr[27] = ((unsigned long *)usp)[3];
cregs->gr[26] = ((unsigned long *)usp)[2];
#else
cregs->gr[26] = usp;
#endif
cregs->gr[25] = kthread_arg;
} else {
/* user thread */
/* usp must be word aligned. This also prevents users from
* passing in the value 1 (which is the signal for a special
* return for a kernel thread) */
if (usp) {
usp = ALIGN(usp, 4);
if (likely(usp))
cregs->gr[30] = usp;
}
cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
cregs->kpc = (unsigned long) &child_return;
/* Setup thread TLS area from the 4th parameter in clone */
if (clone_flags & CLONE_SETTLS)
cregs->cr27 = cregs->gr[23];
}
return 0;
}
unsigned long thread_saved_pc(struct task_struct *t)
{
return t->thread.regs.kpc;
}
unsigned long
get_wchan(struct task_struct *p)
{
struct unwind_frame_info info;
unsigned long ip;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* These bracket the sleeping functions..
*/
unwind_frame_init_from_blocked_task(&info, p);
do {
if (unwind_once(&info) < 0)
return 0;
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
} while (count++ < 16);
return 0;
}
#ifdef CONFIG_64BIT
void *dereference_function_descriptor(void *ptr)
{
Elf64_Fdesc *desc = ptr;
void *p;
if (!probe_kernel_address(&desc->addr, p))
ptr = p;
return ptr;
}
#endif
static inline unsigned long brk_rnd(void)
{
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
else
return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
if (ret < mm->brk)
return mm->brk;
return ret;
}
| gpl-2.0 |
yazidkucrit/android_kernel_KeongBalap | security/selinux/hooks.c | 1837 | 144771 | /*
* NSA Security-Enhanced Linux (SELinux) security module
*
* This file contains the SELinux hook function implementations.
*
* Authors: Stephen Smalley, <sds@epoch.ncsc.mil>
* Chris Vance, <cvance@nai.com>
* Wayne Salamon, <wsalamon@nai.com>
* James Morris <jmorris@redhat.com>
*
* Copyright (C) 2001,2002 Networks Associates Technology, Inc.
* Copyright (C) 2003-2008 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Eric Paris <eparis@redhat.com>
* Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
* <dgoeddel@trustedcs.com>
* Copyright (C) 2006, 2007, 2009 Hewlett-Packard Development Company, L.P.
* Paul Moore <paul.moore@hp.com>
* Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
* Yuichi Nakamura <ynakam@hitachisoft.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kd.h>
#include <linux/kernel.h>
#include <linux/tracehook.h>
#include <linux/errno.h>
#include <linux/ext2_fs.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/xattr.h>
#include <linux/capability.h>
#include <linux/unistd.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/dcache.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/tty.h>
#include <net/icmp.h>
#include <net/ip.h> /* for local_port_range[] */
#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
#include <net/net_namespace.h>
#include <net/netlabel.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <asm/atomic.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h> /* for network interface checks */
#include <linux/netlink.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/dccp.h>
#include <linux/quota.h>
#include <linux/un.h> /* for Unix socket types */
#include <net/af_unix.h> /* for Unix socket types */
#include <linux/parser.h>
#include <linux/nfs_mount.h>
#include <net/ipv6.h>
#include <linux/hugetlb.h>
#include <linux/personality.h>
#include <linux/audit.h>
#include <linux/string.h>
#include <linux/selinux.h>
#include <linux/mutex.h>
#include <linux/posix-timers.h>
#include <linux/syslog.h>
#include <linux/user_namespace.h>
#include "avc.h"
#include "objsec.h"
#include "netif.h"
#include "netnode.h"
#include "netport.h"
#include "xfrm.h"
#include "netlabel.h"
#include "audit.h"
#define NUM_SEL_MNT_OPTS 5
extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
extern struct security_operations *security_ops;
/* SECMARK reference count */
atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
int selinux_enforcing;
static int __init enforcing_setup(char *str)
{
unsigned long enforcing;
if (!strict_strtoul(str, 0, &enforcing))
selinux_enforcing = enforcing ? 1 : 0;
return 1;
}
__setup("enforcing=", enforcing_setup);
#endif
#ifdef CONFIG_SECURITY_SELINUX_BOOTPARAM
int selinux_enabled = CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE;
static int __init selinux_enabled_setup(char *str)
{
unsigned long enabled;
if (!strict_strtoul(str, 0, &enabled))
selinux_enabled = enabled ? 1 : 0;
return 1;
}
__setup("selinux=", selinux_enabled_setup);
#else
int selinux_enabled = 1;
#endif
static struct kmem_cache *sel_inode_cache;
/**
* selinux_secmark_enabled - Check to see if SECMARK is currently enabled
*
* Description:
* This function checks the SECMARK reference counter to see if any SECMARK
* targets are currently configured, if the reference counter is greater than
* zero SECMARK is considered to be enabled. Returns true (1) if SECMARK is
* enabled, false (0) if SECMARK is disabled.
*
*/
static int selinux_secmark_enabled(void)
{
return (atomic_read(&selinux_secmark_refcount) > 0);
}
/*
* initialise the security for the init task
*/
static void cred_init_security(void)
{
struct cred *cred = (struct cred *) current->real_cred;
struct task_security_struct *tsec;
tsec = kzalloc(sizeof(struct task_security_struct), GFP_KERNEL);
if (!tsec)
panic("SELinux: Failed to initialize initial task.\n");
tsec->osid = tsec->sid = SECINITSID_KERNEL;
cred->security = tsec;
}
/*
* get the security ID of a set of credentials
*/
static inline u32 cred_sid(const struct cred *cred)
{
const struct task_security_struct *tsec;
tsec = cred->security;
return tsec->sid;
}
/*
* get the objective security ID of a task
*/
static inline u32 task_sid(const struct task_struct *task)
{
u32 sid;
rcu_read_lock();
sid = cred_sid(__task_cred(task));
rcu_read_unlock();
return sid;
}
/*
* get the subjective security ID of the current task
*/
static inline u32 current_sid(void)
{
const struct task_security_struct *tsec = current_security();
return tsec->sid;
}
/* Allocate and free functions for each kind of security blob. */
static int inode_alloc_security(struct inode *inode)
{
struct inode_security_struct *isec;
u32 sid = current_sid();
isec = kmem_cache_zalloc(sel_inode_cache, GFP_NOFS);
if (!isec)
return -ENOMEM;
mutex_init(&isec->lock);
INIT_LIST_HEAD(&isec->list);
isec->inode = inode;
isec->sid = SECINITSID_UNLABELED;
isec->sclass = SECCLASS_FILE;
isec->task_sid = sid;
inode->i_security = isec;
return 0;
}
static void inode_free_security(struct inode *inode)
{
struct inode_security_struct *isec = inode->i_security;
struct superblock_security_struct *sbsec = inode->i_sb->s_security;
spin_lock(&sbsec->isec_lock);
if (!list_empty(&isec->list))
list_del_init(&isec->list);
spin_unlock(&sbsec->isec_lock);
inode->i_security = NULL;
kmem_cache_free(sel_inode_cache, isec);
}
static int file_alloc_security(struct file *file)
{
struct file_security_struct *fsec;
u32 sid = current_sid();
fsec = kzalloc(sizeof(struct file_security_struct), GFP_KERNEL);
if (!fsec)
return -ENOMEM;
fsec->sid = sid;
fsec->fown_sid = sid;
file->f_security = fsec;
return 0;
}
static void file_free_security(struct file *file)
{
struct file_security_struct *fsec = file->f_security;
file->f_security = NULL;
kfree(fsec);
}
static int superblock_alloc_security(struct super_block *sb)
{
struct superblock_security_struct *sbsec;
sbsec = kzalloc(sizeof(struct superblock_security_struct), GFP_KERNEL);
if (!sbsec)
return -ENOMEM;
mutex_init(&sbsec->lock);
INIT_LIST_HEAD(&sbsec->isec_head);
spin_lock_init(&sbsec->isec_lock);
sbsec->sb = sb;
sbsec->sid = SECINITSID_UNLABELED;
sbsec->def_sid = SECINITSID_FILE;
sbsec->mntpoint_sid = SECINITSID_UNLABELED;
sb->s_security = sbsec;
return 0;
}
static void superblock_free_security(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
sb->s_security = NULL;
kfree(sbsec);
}
/* The security server must be initialized before
any labeling or access decisions can be provided. */
extern int ss_initialized;
/* The file system's label must be initialized prior to use. */
static const char *labeling_behaviors[6] = {
"uses xattr",
"uses transition SIDs",
"uses task SIDs",
"uses genfs_contexts",
"not configured for labeling",
"uses mountpoint labeling",
};
static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
static inline int inode_doinit(struct inode *inode)
{
return inode_doinit_with_dentry(inode, NULL);
}
enum {
Opt_error = -1,
Opt_context = 1,
Opt_fscontext = 2,
Opt_defcontext = 3,
Opt_rootcontext = 4,
Opt_labelsupport = 5,
};
static const match_table_t tokens = {
{Opt_context, CONTEXT_STR "%s"},
{Opt_fscontext, FSCONTEXT_STR "%s"},
{Opt_defcontext, DEFCONTEXT_STR "%s"},
{Opt_rootcontext, ROOTCONTEXT_STR "%s"},
{Opt_labelsupport, LABELSUPP_STR},
{Opt_error, NULL},
};
#define SEL_MOUNT_FAIL_MSG "SELinux: duplicate or incompatible mount options\n"
static int may_context_mount_sb_relabel(u32 sid,
struct superblock_security_struct *sbsec,
const struct cred *cred)
{
const struct task_security_struct *tsec = cred->security;
int rc;
rc = avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
FILESYSTEM__RELABELFROM, NULL);
if (rc)
return rc;
rc = avc_has_perm(tsec->sid, sid, SECCLASS_FILESYSTEM,
FILESYSTEM__RELABELTO, NULL);
return rc;
}
static int may_context_mount_inode_relabel(u32 sid,
struct superblock_security_struct *sbsec,
const struct cred *cred)
{
const struct task_security_struct *tsec = cred->security;
int rc;
rc = avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
FILESYSTEM__RELABELFROM, NULL);
if (rc)
return rc;
rc = avc_has_perm(sid, sbsec->sid, SECCLASS_FILESYSTEM,
FILESYSTEM__ASSOCIATE, NULL);
return rc;
}
static int sb_finish_set_opts(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
struct dentry *root = sb->s_root;
struct inode *root_inode = root->d_inode;
int rc = 0;
if (sbsec->behavior == SECURITY_FS_USE_XATTR) {
/* Make sure that the xattr handler exists and that no
error other than -ENODATA is returned by getxattr on
the root directory. -ENODATA is ok, as this may be
the first boot of the SELinux kernel before we have
assigned xattr values to the filesystem. */
if (!root_inode->i_op->getxattr) {
printk(KERN_WARNING "SELinux: (dev %s, type %s) has no "
"xattr support\n", sb->s_id, sb->s_type->name);
rc = -EOPNOTSUPP;
goto out;
}
rc = root_inode->i_op->getxattr(root, XATTR_NAME_SELINUX, NULL, 0);
if (rc < 0 && rc != -ENODATA) {
if (rc == -EOPNOTSUPP)
printk(KERN_WARNING "SELinux: (dev %s, type "
"%s) has no security xattr handler\n",
sb->s_id, sb->s_type->name);
else
printk(KERN_WARNING "SELinux: (dev %s, type "
"%s) getxattr errno %d\n", sb->s_id,
sb->s_type->name, -rc);
goto out;
}
}
sbsec->flags |= (SE_SBINITIALIZED | SE_SBLABELSUPP);
if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
sb->s_id, sb->s_type->name);
else
printk(KERN_DEBUG "SELinux: initialized (dev %s, type %s), %s\n",
sb->s_id, sb->s_type->name,
labeling_behaviors[sbsec->behavior-1]);
if (sbsec->behavior == SECURITY_FS_USE_GENFS ||
sbsec->behavior == SECURITY_FS_USE_MNTPOINT ||
sbsec->behavior == SECURITY_FS_USE_NONE ||
sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
sbsec->flags &= ~SE_SBLABELSUPP;
/* Special handling for sysfs. Is genfs but also has setxattr handler*/
if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
sbsec->flags |= SE_SBLABELSUPP;
/* Initialize the root inode. */
rc = inode_doinit_with_dentry(root_inode, root);
/* Initialize any other inodes associated with the superblock, e.g.
inodes created prior to initial policy load or inodes created
during get_sb by a pseudo filesystem that directly
populates itself. */
spin_lock(&sbsec->isec_lock);
next_inode:
if (!list_empty(&sbsec->isec_head)) {
struct inode_security_struct *isec =
list_entry(sbsec->isec_head.next,
struct inode_security_struct, list);
struct inode *inode = isec->inode;
spin_unlock(&sbsec->isec_lock);
inode = igrab(inode);
if (inode) {
if (!IS_PRIVATE(inode))
inode_doinit(inode);
iput(inode);
}
spin_lock(&sbsec->isec_lock);
list_del_init(&isec->list);
goto next_inode;
}
spin_unlock(&sbsec->isec_lock);
out:
return rc;
}
/*
* This function should allow an FS to ask what it's mount security
* options were so it can use those later for submounts, displaying
* mount options, or whatever.
*/
static int selinux_get_mnt_opts(const struct super_block *sb,
struct security_mnt_opts *opts)
{
int rc = 0, i;
struct superblock_security_struct *sbsec = sb->s_security;
char *context = NULL;
u32 len;
char tmp;
security_init_mnt_opts(opts);
if (!(sbsec->flags & SE_SBINITIALIZED))
return -EINVAL;
if (!ss_initialized)
return -EINVAL;
tmp = sbsec->flags & SE_MNTMASK;
/* count the number of mount options for this sb */
for (i = 0; i < 8; i++) {
if (tmp & 0x01)
opts->num_mnt_opts++;
tmp >>= 1;
}
/* Check if the Label support flag is set */
if (sbsec->flags & SE_SBLABELSUPP)
opts->num_mnt_opts++;
opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC);
if (!opts->mnt_opts) {
rc = -ENOMEM;
goto out_free;
}
opts->mnt_opts_flags = kcalloc(opts->num_mnt_opts, sizeof(int), GFP_ATOMIC);
if (!opts->mnt_opts_flags) {
rc = -ENOMEM;
goto out_free;
}
i = 0;
if (sbsec->flags & FSCONTEXT_MNT) {
rc = security_sid_to_context(sbsec->sid, &context, &len);
if (rc)
goto out_free;
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = FSCONTEXT_MNT;
}
if (sbsec->flags & CONTEXT_MNT) {
rc = security_sid_to_context(sbsec->mntpoint_sid, &context, &len);
if (rc)
goto out_free;
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = CONTEXT_MNT;
}
if (sbsec->flags & DEFCONTEXT_MNT) {
rc = security_sid_to_context(sbsec->def_sid, &context, &len);
if (rc)
goto out_free;
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = DEFCONTEXT_MNT;
}
if (sbsec->flags & ROOTCONTEXT_MNT) {
struct inode *root = sbsec->sb->s_root->d_inode;
struct inode_security_struct *isec = root->i_security;
rc = security_sid_to_context(isec->sid, &context, &len);
if (rc)
goto out_free;
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT;
}
if (sbsec->flags & SE_SBLABELSUPP) {
opts->mnt_opts[i] = NULL;
opts->mnt_opts_flags[i++] = SE_SBLABELSUPP;
}
BUG_ON(i != opts->num_mnt_opts);
return 0;
out_free:
security_free_mnt_opts(opts);
return rc;
}
static int bad_option(struct superblock_security_struct *sbsec, char flag,
u32 old_sid, u32 new_sid)
{
char mnt_flags = sbsec->flags & SE_MNTMASK;
/* check if the old mount command had the same options */
if (sbsec->flags & SE_SBINITIALIZED)
if (!(sbsec->flags & flag) ||
(old_sid != new_sid))
return 1;
/* check if we were passed the same options twice,
* aka someone passed context=a,context=b
*/
if (!(sbsec->flags & SE_SBINITIALIZED))
if (mnt_flags & flag)
return 1;
return 0;
}
/*
* Allow filesystems with binary mount data to explicitly set mount point
* labeling information.
*/
static int selinux_set_mnt_opts(struct super_block *sb,
struct security_mnt_opts *opts)
{
const struct cred *cred = current_cred();
int rc = 0, i;
struct superblock_security_struct *sbsec = sb->s_security;
const char *name = sb->s_type->name;
struct inode *inode = sbsec->sb->s_root->d_inode;
struct inode_security_struct *root_isec = inode->i_security;
u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
u32 defcontext_sid = 0;
char **mount_options = opts->mnt_opts;
int *flags = opts->mnt_opts_flags;
int num_opts = opts->num_mnt_opts;
mutex_lock(&sbsec->lock);
if (!ss_initialized) {
if (!num_opts) {
/* Defer initialization until selinux_complete_init,
after the initial policy is loaded and the security
server is ready to handle calls. */
goto out;
}
rc = -EINVAL;
printk(KERN_WARNING "SELinux: Unable to set superblock options "
"before the security server is initialized\n");
goto out;
}
/*
* Binary mount data FS will come through this function twice. Once
* from an explicit call and once from the generic calls from the vfs.
* Since the generic VFS calls will not contain any security mount data
* we need to skip the double mount verification.
*
* This does open a hole in which we will not notice if the first
* mount using this sb set explict options and a second mount using
* this sb does not set any security options. (The first options
* will be used for both mounts)
*/
if ((sbsec->flags & SE_SBINITIALIZED) && (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA)
&& (num_opts == 0))
goto out;
/*
* parse the mount options, check if they are valid sids.
* also check if someone is trying to mount the same sb more
* than once with different security options.
*/
for (i = 0; i < num_opts; i++) {
u32 sid;
if (flags[i] == SE_SBLABELSUPP)
continue;
rc = security_context_to_sid(mount_options[i],
strlen(mount_options[i]), &sid);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
mount_options[i], sb->s_id, name, rc);
goto out;
}
switch (flags[i]) {
case FSCONTEXT_MNT:
fscontext_sid = sid;
if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
fscontext_sid))
goto out_double_mount;
sbsec->flags |= FSCONTEXT_MNT;
break;
case CONTEXT_MNT:
context_sid = sid;
if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
context_sid))
goto out_double_mount;
sbsec->flags |= CONTEXT_MNT;
break;
case ROOTCONTEXT_MNT:
rootcontext_sid = sid;
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
rootcontext_sid))
goto out_double_mount;
sbsec->flags |= ROOTCONTEXT_MNT;
break;
case DEFCONTEXT_MNT:
defcontext_sid = sid;
if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
defcontext_sid))
goto out_double_mount;
sbsec->flags |= DEFCONTEXT_MNT;
break;
default:
rc = -EINVAL;
goto out;
}
}
if (sbsec->flags & SE_SBINITIALIZED) {
/* previously mounted with options, but not on this attempt? */
if ((sbsec->flags & SE_MNTMASK) && !num_opts)
goto out_double_mount;
rc = 0;
goto out;
}
if (strcmp(sb->s_type->name, "proc") == 0)
sbsec->flags |= SE_SBPROC;
/* Determine the labeling behavior to use for this filesystem type. */
rc = security_fs_use((sbsec->flags & SE_SBPROC) ? "proc" : sb->s_type->name, &sbsec->behavior, &sbsec->sid);
if (rc) {
printk(KERN_WARNING "%s: security_fs_use(%s) returned %d\n",
__func__, sb->s_type->name, rc);
goto out;
}
/* sets the context of the superblock for the fs being mounted. */
if (fscontext_sid) {
rc = may_context_mount_sb_relabel(fscontext_sid, sbsec, cred);
if (rc)
goto out;
sbsec->sid = fscontext_sid;
}
/*
* Switch to using mount point labeling behavior.
* sets the label used on all file below the mountpoint, and will set
* the superblock context if not already set.
*/
if (context_sid) {
if (!fscontext_sid) {
rc = may_context_mount_sb_relabel(context_sid, sbsec,
cred);
if (rc)
goto out;
sbsec->sid = context_sid;
} else {
rc = may_context_mount_inode_relabel(context_sid, sbsec,
cred);
if (rc)
goto out;
}
if (!rootcontext_sid)
rootcontext_sid = context_sid;
sbsec->mntpoint_sid = context_sid;
sbsec->behavior = SECURITY_FS_USE_MNTPOINT;
}
if (rootcontext_sid) {
rc = may_context_mount_inode_relabel(rootcontext_sid, sbsec,
cred);
if (rc)
goto out;
root_isec->sid = rootcontext_sid;
root_isec->initialized = 1;
}
if (defcontext_sid) {
if (sbsec->behavior != SECURITY_FS_USE_XATTR) {
rc = -EINVAL;
printk(KERN_WARNING "SELinux: defcontext option is "
"invalid for this filesystem type\n");
goto out;
}
if (defcontext_sid != sbsec->def_sid) {
rc = may_context_mount_inode_relabel(defcontext_sid,
sbsec, cred);
if (rc)
goto out;
}
sbsec->def_sid = defcontext_sid;
}
rc = sb_finish_set_opts(sb);
out:
mutex_unlock(&sbsec->lock);
return rc;
out_double_mount:
rc = -EINVAL;
printk(KERN_WARNING "SELinux: mount invalid. Same superblock, different "
"security settings for (dev %s, type %s)\n", sb->s_id, name);
goto out;
}
static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb)
{
const struct superblock_security_struct *oldsbsec = oldsb->s_security;
struct superblock_security_struct *newsbsec = newsb->s_security;
int set_fscontext = (oldsbsec->flags & FSCONTEXT_MNT);
int set_context = (oldsbsec->flags & CONTEXT_MNT);
int set_rootcontext = (oldsbsec->flags & ROOTCONTEXT_MNT);
/*
* if the parent was able to be mounted it clearly had no special lsm
* mount options. thus we can safely deal with this superblock later
*/
if (!ss_initialized)
return;
/* how can we clone if the old one wasn't set up?? */
BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
/* if fs is reusing a sb, just let its options stand... */
if (newsbsec->flags & SE_SBINITIALIZED)
return;
mutex_lock(&newsbsec->lock);
newsbsec->flags = oldsbsec->flags;
newsbsec->sid = oldsbsec->sid;
newsbsec->def_sid = oldsbsec->def_sid;
newsbsec->behavior = oldsbsec->behavior;
if (set_context) {
u32 sid = oldsbsec->mntpoint_sid;
if (!set_fscontext)
newsbsec->sid = sid;
if (!set_rootcontext) {
struct inode *newinode = newsb->s_root->d_inode;
struct inode_security_struct *newisec = newinode->i_security;
newisec->sid = sid;
}
newsbsec->mntpoint_sid = sid;
}
if (set_rootcontext) {
const struct inode *oldinode = oldsb->s_root->d_inode;
const struct inode_security_struct *oldisec = oldinode->i_security;
struct inode *newinode = newsb->s_root->d_inode;
struct inode_security_struct *newisec = newinode->i_security;
newisec->sid = oldisec->sid;
}
sb_finish_set_opts(newsb);
mutex_unlock(&newsbsec->lock);
}
static int selinux_parse_opts_str(char *options,
struct security_mnt_opts *opts)
{
char *p;
char *context = NULL, *defcontext = NULL;
char *fscontext = NULL, *rootcontext = NULL;
int rc, num_mnt_opts = 0;
opts->num_mnt_opts = 0;
/* Standard string-based options. */
while ((p = strsep(&options, "|")) != NULL) {
int token;
substring_t args[MAX_OPT_ARGS];
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_context:
if (context || defcontext) {
rc = -EINVAL;
printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
goto out_err;
}
context = match_strdup(&args[0]);
if (!context) {
rc = -ENOMEM;
goto out_err;
}
break;
case Opt_fscontext:
if (fscontext) {
rc = -EINVAL;
printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
goto out_err;
}
fscontext = match_strdup(&args[0]);
if (!fscontext) {
rc = -ENOMEM;
goto out_err;
}
break;
case Opt_rootcontext:
if (rootcontext) {
rc = -EINVAL;
printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
goto out_err;
}
rootcontext = match_strdup(&args[0]);
if (!rootcontext) {
rc = -ENOMEM;
goto out_err;
}
break;
case Opt_defcontext:
if (context || defcontext) {
rc = -EINVAL;
printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
goto out_err;
}
defcontext = match_strdup(&args[0]);
if (!defcontext) {
rc = -ENOMEM;
goto out_err;
}
break;
case Opt_labelsupport:
break;
default:
rc = -EINVAL;
printk(KERN_WARNING "SELinux: unknown mount option\n");
goto out_err;
}
}
rc = -ENOMEM;
opts->mnt_opts = kcalloc(NUM_SEL_MNT_OPTS, sizeof(char *), GFP_ATOMIC);
if (!opts->mnt_opts)
goto out_err;
opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), GFP_ATOMIC);
if (!opts->mnt_opts_flags) {
kfree(opts->mnt_opts);
goto out_err;
}
if (fscontext) {
opts->mnt_opts[num_mnt_opts] = fscontext;
opts->mnt_opts_flags[num_mnt_opts++] = FSCONTEXT_MNT;
}
if (context) {
opts->mnt_opts[num_mnt_opts] = context;
opts->mnt_opts_flags[num_mnt_opts++] = CONTEXT_MNT;
}
if (rootcontext) {
opts->mnt_opts[num_mnt_opts] = rootcontext;
opts->mnt_opts_flags[num_mnt_opts++] = ROOTCONTEXT_MNT;
}
if (defcontext) {
opts->mnt_opts[num_mnt_opts] = defcontext;
opts->mnt_opts_flags[num_mnt_opts++] = DEFCONTEXT_MNT;
}
opts->num_mnt_opts = num_mnt_opts;
return 0;
out_err:
kfree(context);
kfree(defcontext);
kfree(fscontext);
kfree(rootcontext);
return rc;
}
/*
* string mount options parsing and call set the sbsec
*/
static int superblock_doinit(struct super_block *sb, void *data)
{
int rc = 0;
char *options = data;
struct security_mnt_opts opts;
security_init_mnt_opts(&opts);
if (!data)
goto out;
BUG_ON(sb->s_type->fs_flags & FS_BINARY_MOUNTDATA);
rc = selinux_parse_opts_str(options, &opts);
if (rc)
goto out_err;
out:
rc = selinux_set_mnt_opts(sb, &opts);
out_err:
security_free_mnt_opts(&opts);
return rc;
}
static void selinux_write_opts(struct seq_file *m,
struct security_mnt_opts *opts)
{
int i;
char *prefix;
for (i = 0; i < opts->num_mnt_opts; i++) {
char *has_comma;
if (opts->mnt_opts[i])
has_comma = strchr(opts->mnt_opts[i], ',');
else
has_comma = NULL;
switch (opts->mnt_opts_flags[i]) {
case CONTEXT_MNT:
prefix = CONTEXT_STR;
break;
case FSCONTEXT_MNT:
prefix = FSCONTEXT_STR;
break;
case ROOTCONTEXT_MNT:
prefix = ROOTCONTEXT_STR;
break;
case DEFCONTEXT_MNT:
prefix = DEFCONTEXT_STR;
break;
case SE_SBLABELSUPP:
seq_putc(m, ',');
seq_puts(m, LABELSUPP_STR);
continue;
default:
BUG();
return;
};
/* we need a comma before each option */
seq_putc(m, ',');
seq_puts(m, prefix);
if (has_comma)
seq_putc(m, '\"');
seq_puts(m, opts->mnt_opts[i]);
if (has_comma)
seq_putc(m, '\"');
}
}
static int selinux_sb_show_options(struct seq_file *m, struct super_block *sb)
{
struct security_mnt_opts opts;
int rc;
rc = selinux_get_mnt_opts(sb, &opts);
if (rc) {
/* before policy load we may get EINVAL, don't show anything */
if (rc == -EINVAL)
rc = 0;
return rc;
}
selinux_write_opts(m, &opts);
security_free_mnt_opts(&opts);
return rc;
}
static inline u16 inode_mode_to_security_class(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFSOCK:
return SECCLASS_SOCK_FILE;
case S_IFLNK:
return SECCLASS_LNK_FILE;
case S_IFREG:
return SECCLASS_FILE;
case S_IFBLK:
return SECCLASS_BLK_FILE;
case S_IFDIR:
return SECCLASS_DIR;
case S_IFCHR:
return SECCLASS_CHR_FILE;
case S_IFIFO:
return SECCLASS_FIFO_FILE;
}
return SECCLASS_FILE;
}
static inline int default_protocol_stream(int protocol)
{
return (protocol == IPPROTO_IP || protocol == IPPROTO_TCP);
}
static inline int default_protocol_dgram(int protocol)
{
return (protocol == IPPROTO_IP || protocol == IPPROTO_UDP);
}
static inline u16 socket_type_to_security_class(int family, int type, int protocol)
{
switch (family) {
case PF_UNIX:
switch (type) {
case SOCK_STREAM:
case SOCK_SEQPACKET:
return SECCLASS_UNIX_STREAM_SOCKET;
case SOCK_DGRAM:
return SECCLASS_UNIX_DGRAM_SOCKET;
}
break;
case PF_INET:
case PF_INET6:
switch (type) {
case SOCK_STREAM:
if (default_protocol_stream(protocol))
return SECCLASS_TCP_SOCKET;
else
return SECCLASS_RAWIP_SOCKET;
case SOCK_DGRAM:
if (default_protocol_dgram(protocol))
return SECCLASS_UDP_SOCKET;
else
return SECCLASS_RAWIP_SOCKET;
case SOCK_DCCP:
return SECCLASS_DCCP_SOCKET;
default:
return SECCLASS_RAWIP_SOCKET;
}
break;
case PF_NETLINK:
switch (protocol) {
case NETLINK_ROUTE:
return SECCLASS_NETLINK_ROUTE_SOCKET;
case NETLINK_FIREWALL:
return SECCLASS_NETLINK_FIREWALL_SOCKET;
case NETLINK_INET_DIAG:
return SECCLASS_NETLINK_TCPDIAG_SOCKET;
case NETLINK_NFLOG:
return SECCLASS_NETLINK_NFLOG_SOCKET;
case NETLINK_XFRM:
return SECCLASS_NETLINK_XFRM_SOCKET;
case NETLINK_SELINUX:
return SECCLASS_NETLINK_SELINUX_SOCKET;
case NETLINK_AUDIT:
return SECCLASS_NETLINK_AUDIT_SOCKET;
case NETLINK_IP6_FW:
return SECCLASS_NETLINK_IP6FW_SOCKET;
case NETLINK_DNRTMSG:
return SECCLASS_NETLINK_DNRT_SOCKET;
case NETLINK_KOBJECT_UEVENT:
return SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET;
default:
return SECCLASS_NETLINK_SOCKET;
}
case PF_PACKET:
return SECCLASS_PACKET_SOCKET;
case PF_KEY:
return SECCLASS_KEY_SOCKET;
case PF_APPLETALK:
return SECCLASS_APPLETALK_SOCKET;
}
return SECCLASS_SOCKET;
}
#ifdef CONFIG_PROC_FS
static int selinux_proc_get_sid(struct dentry *dentry,
u16 tclass,
u32 *sid)
{
int rc;
char *buffer, *path;
buffer = (char *)__get_free_page(GFP_KERNEL);
if (!buffer)
return -ENOMEM;
path = dentry_path_raw(dentry, buffer, PAGE_SIZE);
if (IS_ERR(path))
rc = PTR_ERR(path);
else {
/* each process gets a /proc/PID/ entry. Strip off the
* PID part to get a valid selinux labeling.
* e.g. /proc/1/net/rpc/nfs -> /net/rpc/nfs */
while (path[1] >= '0' && path[1] <= '9') {
path[1] = '/';
path++;
}
rc = security_genfs_sid("proc", path, tclass, sid);
}
free_page((unsigned long)buffer);
return rc;
}
#else
static int selinux_proc_get_sid(struct dentry *dentry,
u16 tclass,
u32 *sid)
{
return -EINVAL;
}
#endif
/* The inode's security attributes must be initialized before first use. */
static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry)
{
struct superblock_security_struct *sbsec = NULL;
struct inode_security_struct *isec = inode->i_security;
u32 sid;
struct dentry *dentry;
#define INITCONTEXTLEN 255
char *context = NULL;
unsigned len = 0;
int rc = 0;
if (isec->initialized)
goto out;
mutex_lock(&isec->lock);
if (isec->initialized)
goto out_unlock;
sbsec = inode->i_sb->s_security;
if (!(sbsec->flags & SE_SBINITIALIZED)) {
/* Defer initialization until selinux_complete_init,
after the initial policy is loaded and the security
server is ready to handle calls. */
spin_lock(&sbsec->isec_lock);
if (list_empty(&isec->list))
list_add(&isec->list, &sbsec->isec_head);
spin_unlock(&sbsec->isec_lock);
goto out_unlock;
}
switch (sbsec->behavior) {
case SECURITY_FS_USE_XATTR:
if (!inode->i_op->getxattr) {
isec->sid = sbsec->def_sid;
break;
}
/* Need a dentry, since the xattr API requires one.
Life would be simpler if we could just pass the inode. */
if (opt_dentry) {
/* Called from d_instantiate or d_splice_alias. */
dentry = dget(opt_dentry);
} else {
/* Called from selinux_complete_init, try to find a dentry. */
dentry = d_find_alias(inode);
}
if (!dentry) {
/*
* this is can be hit on boot when a file is accessed
* before the policy is loaded. When we load policy we
* may find inodes that have no dentry on the
* sbsec->isec_head list. No reason to complain as these
* will get fixed up the next time we go through
* inode_doinit with a dentry, before these inodes could
* be used again by userspace.
*/
goto out_unlock;
}
len = INITCONTEXTLEN;
context = kmalloc(len+1, GFP_NOFS);
if (!context) {
rc = -ENOMEM;
dput(dentry);
goto out_unlock;
}
context[len] = '\0';
rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX,
context, len);
if (rc == -ERANGE) {
kfree(context);
/* Need a larger buffer. Query for the right size. */
rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX,
NULL, 0);
if (rc < 0) {
dput(dentry);
goto out_unlock;
}
len = rc;
context = kmalloc(len+1, GFP_NOFS);
if (!context) {
rc = -ENOMEM;
dput(dentry);
goto out_unlock;
}
context[len] = '\0';
rc = inode->i_op->getxattr(dentry,
XATTR_NAME_SELINUX,
context, len);
}
dput(dentry);
if (rc < 0) {
if (rc != -ENODATA) {
printk(KERN_WARNING "SELinux: %s: getxattr returned "
"%d for dev=%s ino=%ld\n", __func__,
-rc, inode->i_sb->s_id, inode->i_ino);
kfree(context);
goto out_unlock;
}
/* Map ENODATA to the default file SID */
sid = sbsec->def_sid;
rc = 0;
} else {
rc = security_context_to_sid_default(context, rc, &sid,
sbsec->def_sid,
GFP_NOFS);
if (rc) {
char *dev = inode->i_sb->s_id;
unsigned long ino = inode->i_ino;
if (rc == -EINVAL) {
if (printk_ratelimit())
printk(KERN_NOTICE "SELinux: inode=%lu on dev=%s was found to have an invalid "
"context=%s. This indicates you may need to relabel the inode or the "
"filesystem in question.\n", ino, dev, context);
} else {
printk(KERN_WARNING "SELinux: %s: context_to_sid(%s) "
"returned %d for dev=%s ino=%ld\n",
__func__, context, -rc, dev, ino);
}
kfree(context);
/* Leave with the unlabeled SID */
rc = 0;
break;
}
}
kfree(context);
isec->sid = sid;
break;
case SECURITY_FS_USE_TASK:
isec->sid = isec->task_sid;
break;
case SECURITY_FS_USE_TRANS:
/* Default to the fs SID. */
isec->sid = sbsec->sid;
/* Try to obtain a transition SID. */
isec->sclass = inode_mode_to_security_class(inode->i_mode);
rc = security_transition_sid(isec->task_sid, sbsec->sid,
isec->sclass, NULL, &sid);
if (rc)
goto out_unlock;
isec->sid = sid;
break;
case SECURITY_FS_USE_MNTPOINT:
isec->sid = sbsec->mntpoint_sid;
break;
default:
/* Default to the fs superblock SID. */
isec->sid = sbsec->sid;
if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
if (opt_dentry) {
isec->sclass = inode_mode_to_security_class(inode->i_mode);
rc = selinux_proc_get_sid(opt_dentry,
isec->sclass,
&sid);
if (rc)
goto out_unlock;
isec->sid = sid;
}
}
break;
}
isec->initialized = 1;
out_unlock:
mutex_unlock(&isec->lock);
out:
if (isec->sclass == SECCLASS_FILE)
isec->sclass = inode_mode_to_security_class(inode->i_mode);
return rc;
}
/* Convert a Linux signal to an access vector. */
static inline u32 signal_to_av(int sig)
{
u32 perm = 0;
switch (sig) {
case SIGCHLD:
/* Commonly granted from child to parent. */
perm = PROCESS__SIGCHLD;
break;
case SIGKILL:
/* Cannot be caught or ignored */
perm = PROCESS__SIGKILL;
break;
case SIGSTOP:
/* Cannot be caught or ignored */
perm = PROCESS__SIGSTOP;
break;
default:
/* All other signals. */
perm = PROCESS__SIGNAL;
break;
}
return perm;
}
/*
* Check permission between a pair of credentials
* fork check, ptrace check, etc.
*/
static int cred_has_perm(const struct cred *actor,
const struct cred *target,
u32 perms)
{
u32 asid = cred_sid(actor), tsid = cred_sid(target);
return avc_has_perm(asid, tsid, SECCLASS_PROCESS, perms, NULL);
}
/*
* Check permission between a pair of tasks, e.g. signal checks,
* fork check, ptrace check, etc.
* tsk1 is the actor and tsk2 is the target
* - this uses the default subjective creds of tsk1
*/
static int task_has_perm(const struct task_struct *tsk1,
const struct task_struct *tsk2,
u32 perms)
{
const struct task_security_struct *__tsec1, *__tsec2;
u32 sid1, sid2;
rcu_read_lock();
__tsec1 = __task_cred(tsk1)->security; sid1 = __tsec1->sid;
__tsec2 = __task_cred(tsk2)->security; sid2 = __tsec2->sid;
rcu_read_unlock();
return avc_has_perm(sid1, sid2, SECCLASS_PROCESS, perms, NULL);
}
/*
* Check permission between current and another task, e.g. signal checks,
* fork check, ptrace check, etc.
* current is the actor and tsk2 is the target
* - this uses current's subjective creds
*/
static int current_has_perm(const struct task_struct *tsk,
u32 perms)
{
u32 sid, tsid;
sid = current_sid();
tsid = task_sid(tsk);
return avc_has_perm(sid, tsid, SECCLASS_PROCESS, perms, NULL);
}
#if CAP_LAST_CAP > 63
#error Fix SELinux to handle capabilities > 63.
#endif
/* Check whether a task is allowed to use a capability. */
static int task_has_capability(struct task_struct *tsk,
const struct cred *cred,
int cap, int audit)
{
struct common_audit_data ad;
struct av_decision avd;
u16 sclass;
u32 sid = cred_sid(cred);
u32 av = CAP_TO_MASK(cap);
int rc;
COMMON_AUDIT_DATA_INIT(&ad, CAP);
ad.tsk = tsk;
ad.u.cap = cap;
switch (CAP_TO_INDEX(cap)) {
case 0:
sclass = SECCLASS_CAPABILITY;
break;
case 1:
sclass = SECCLASS_CAPABILITY2;
break;
default:
printk(KERN_ERR
"SELinux: out of range capability %d\n", cap);
BUG();
return -EINVAL;
}
rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
if (audit == SECURITY_CAP_AUDIT) {
int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0);
if (rc2)
return rc2;
}
return rc;
}
/* Check whether a task is allowed to use a system operation. */
static int task_has_system(struct task_struct *tsk,
u32 perms)
{
u32 sid = task_sid(tsk);
return avc_has_perm(sid, SECINITSID_KERNEL,
SECCLASS_SYSTEM, perms, NULL);
}
/* Check whether a task has a particular permission to an inode.
The 'adp' parameter is optional and allows other audit
data to be passed (e.g. the dentry). */
static int inode_has_perm(const struct cred *cred,
struct inode *inode,
u32 perms,
struct common_audit_data *adp,
unsigned flags)
{
struct inode_security_struct *isec;
u32 sid;
validate_creds(cred);
if (unlikely(IS_PRIVATE(inode)))
return 0;
sid = cred_sid(cred);
isec = inode->i_security;
return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags);
}
static int inode_has_perm_noadp(const struct cred *cred,
struct inode *inode,
u32 perms,
unsigned flags)
{
struct common_audit_data ad;
COMMON_AUDIT_DATA_INIT(&ad, INODE);
ad.u.inode = inode;
return inode_has_perm(cred, inode, perms, &ad, flags);
}
/* Same as inode_has_perm, but pass explicit audit data containing
the dentry to help the auditing code to more easily generate the
pathname if needed. */
static inline int dentry_has_perm(const struct cred *cred,
struct dentry *dentry,
u32 av)
{
struct inode *inode = dentry->d_inode;
struct common_audit_data ad;
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
ad.u.dentry = dentry;
return inode_has_perm(cred, inode, av, &ad, 0);
}
/* Same as inode_has_perm, but pass explicit audit data containing
the path to help the auditing code to more easily generate the
pathname if needed. */
static inline int path_has_perm(const struct cred *cred,
struct path *path,
u32 av)
{
struct inode *inode = path->dentry->d_inode;
struct common_audit_data ad;
COMMON_AUDIT_DATA_INIT(&ad, PATH);
ad.u.path = *path;
return inode_has_perm(cred, inode, av, &ad, 0);
}
/* Check whether a task can use an open file descriptor to
access an inode in a given way. Check access to the
descriptor itself, and then use dentry_has_perm to
check a particular permission to the file.
Access to the descriptor is implicitly granted if it
has the same SID as the process. If av is zero, then
access to the file is not checked, e.g. for cases
where only the descriptor is affected like seek. */
static int file_has_perm(const struct cred *cred,
struct file *file,
u32 av)
{
struct file_security_struct *fsec = file->f_security;
struct inode *inode = file->f_path.dentry->d_inode;
struct common_audit_data ad;
u32 sid = cred_sid(cred);
int rc;
COMMON_AUDIT_DATA_INIT(&ad, PATH);
ad.u.path = file->f_path;
if (sid != fsec->sid) {
rc = avc_has_perm(sid, fsec->sid,
SECCLASS_FD,
FD__USE,
&ad);
if (rc)
goto out;
}
/* av is zero if only checking access to the descriptor. */
rc = 0;
if (av)
rc = inode_has_perm(cred, inode, av, &ad, 0);
out:
return rc;
}
/* Check whether a task can create a file. */
static int may_create(struct inode *dir,
struct dentry *dentry,
u16 tclass)
{
const struct task_security_struct *tsec = current_security();
struct inode_security_struct *dsec;
struct superblock_security_struct *sbsec;
u32 sid, newsid;
struct common_audit_data ad;
int rc;
dsec = dir->i_security;
sbsec = dir->i_sb->s_security;
sid = tsec->sid;
newsid = tsec->create_sid;
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
ad.u.dentry = dentry;
rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR,
DIR__ADD_NAME | DIR__SEARCH,
&ad);
if (rc)
return rc;
if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
rc = security_transition_sid(sid, dsec->sid, tclass,
&dentry->d_name, &newsid);
if (rc)
return rc;
}
rc = avc_has_perm(sid, newsid, tclass, FILE__CREATE, &ad);
if (rc)
return rc;
return avc_has_perm(newsid, sbsec->sid,
SECCLASS_FILESYSTEM,
FILESYSTEM__ASSOCIATE, &ad);
}
/* Check whether a task can create a key. */
static int may_create_key(u32 ksid,
struct task_struct *ctx)
{
u32 sid = task_sid(ctx);
return avc_has_perm(sid, ksid, SECCLASS_KEY, KEY__CREATE, NULL);
}
#define MAY_LINK 0
#define MAY_UNLINK 1
#define MAY_RMDIR 2
/* Check whether a task can link, unlink, or rmdir a file/directory. */
static int may_link(struct inode *dir,
struct dentry *dentry,
int kind)
{
struct inode_security_struct *dsec, *isec;
struct common_audit_data ad;
u32 sid = current_sid();
u32 av;
int rc;
dsec = dir->i_security;
isec = dentry->d_inode->i_security;
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
ad.u.dentry = dentry;
av = DIR__SEARCH;
av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME);
rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR, av, &ad);
if (rc)
return rc;
switch (kind) {
case MAY_LINK:
av = FILE__LINK;
break;
case MAY_UNLINK:
av = FILE__UNLINK;
break;
case MAY_RMDIR:
av = DIR__RMDIR;
break;
default:
printk(KERN_WARNING "SELinux: %s: unrecognized kind %d\n",
__func__, kind);
return 0;
}
rc = avc_has_perm(sid, isec->sid, isec->sclass, av, &ad);
return rc;
}
static inline int may_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry)
{
struct inode_security_struct *old_dsec, *new_dsec, *old_isec, *new_isec;
struct common_audit_data ad;
u32 sid = current_sid();
u32 av;
int old_is_dir, new_is_dir;
int rc;
old_dsec = old_dir->i_security;
old_isec = old_dentry->d_inode->i_security;
old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
new_dsec = new_dir->i_security;
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
ad.u.dentry = old_dentry;
rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR,
DIR__REMOVE_NAME | DIR__SEARCH, &ad);
if (rc)
return rc;
rc = avc_has_perm(sid, old_isec->sid,
old_isec->sclass, FILE__RENAME, &ad);
if (rc)
return rc;
if (old_is_dir && new_dir != old_dir) {
rc = avc_has_perm(sid, old_isec->sid,
old_isec->sclass, DIR__REPARENT, &ad);
if (rc)
return rc;
}
ad.u.dentry = new_dentry;
av = DIR__ADD_NAME | DIR__SEARCH;
if (new_dentry->d_inode)
av |= DIR__REMOVE_NAME;
rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad);
if (rc)
return rc;
if (new_dentry->d_inode) {
new_isec = new_dentry->d_inode->i_security;
new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode);
rc = avc_has_perm(sid, new_isec->sid,
new_isec->sclass,
(new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad);
if (rc)
return rc;
}
return 0;
}
/* Check whether a task can perform a filesystem operation. */
static int superblock_has_perm(const struct cred *cred,
struct super_block *sb,
u32 perms,
struct common_audit_data *ad)
{
struct superblock_security_struct *sbsec;
u32 sid = cred_sid(cred);
sbsec = sb->s_security;
return avc_has_perm(sid, sbsec->sid, SECCLASS_FILESYSTEM, perms, ad);
}
/* Convert a Linux mode and permission mask to an access vector. */
static inline u32 file_mask_to_av(int mode, int mask)
{
u32 av = 0;
if ((mode & S_IFMT) != S_IFDIR) {
if (mask & MAY_EXEC)
av |= FILE__EXECUTE;
if (mask & MAY_READ)
av |= FILE__READ;
if (mask & MAY_APPEND)
av |= FILE__APPEND;
else if (mask & MAY_WRITE)
av |= FILE__WRITE;
} else {
if (mask & MAY_EXEC)
av |= DIR__SEARCH;
if (mask & MAY_WRITE)
av |= DIR__WRITE;
if (mask & MAY_READ)
av |= DIR__READ;
}
return av;
}
/* Convert a Linux file to an access vector. */
static inline u32 file_to_av(struct file *file)
{
u32 av = 0;
if (file->f_mode & FMODE_READ)
av |= FILE__READ;
if (file->f_mode & FMODE_WRITE) {
if (file->f_flags & O_APPEND)
av |= FILE__APPEND;
else
av |= FILE__WRITE;
}
if (!av) {
/*
* Special file opened with flags 3 for ioctl-only use.
*/
av = FILE__IOCTL;
}
return av;
}
/*
* Convert a file to an access vector and include the correct open
* open permission.
*/
static inline u32 open_file_to_av(struct file *file)
{
u32 av = file_to_av(file);
if (selinux_policycap_openperm)
av |= FILE__OPEN;
return av;
}
/* Hook functions begin here. */
static int selinux_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
int rc;
rc = cap_ptrace_access_check(child, mode);
if (rc)
return rc;
if (mode == PTRACE_MODE_READ) {
u32 sid = current_sid();
u32 csid = task_sid(child);
return avc_has_perm(sid, csid, SECCLASS_FILE, FILE__READ, NULL);
}
return current_has_perm(child, PROCESS__PTRACE);
}
static int selinux_ptrace_traceme(struct task_struct *parent)
{
int rc;
rc = cap_ptrace_traceme(parent);
if (rc)
return rc;
return task_has_perm(parent, current, PROCESS__PTRACE);
}
static int selinux_capget(struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
int error;
error = current_has_perm(target, PROCESS__GETCAP);
if (error)
return error;
return cap_capget(target, effective, inheritable, permitted);
}
static int selinux_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
int error;
error = cap_capset(new, old,
effective, inheritable, permitted);
if (error)
return error;
return cred_has_perm(old, new, PROCESS__SETCAP);
}
/*
* (This comment used to live with the selinux_task_setuid hook,
* which was removed).
*
* Since setuid only affects the current process, and since the SELinux
* controls are not based on the Linux identity attributes, SELinux does not
* need to control this operation. However, SELinux does control the use of
* the CAP_SETUID and CAP_SETGID capabilities using the capable hook.
*/
static int selinux_capable(struct task_struct *tsk, const struct cred *cred,
struct user_namespace *ns, int cap, int audit)
{
int rc;
rc = cap_capable(tsk, cred, ns, cap, audit);
if (rc)
return rc;
return task_has_capability(tsk, cred, cap, audit);
}
static int selinux_quotactl(int cmds, int type, int id, struct super_block *sb)
{
const struct cred *cred = current_cred();
int rc = 0;
if (!sb)
return 0;
switch (cmds) {
case Q_SYNC:
case Q_QUOTAON:
case Q_QUOTAOFF:
case Q_SETINFO:
case Q_SETQUOTA:
rc = superblock_has_perm(cred, sb, FILESYSTEM__QUOTAMOD, NULL);
break;
case Q_GETFMT:
case Q_GETINFO:
case Q_GETQUOTA:
rc = superblock_has_perm(cred, sb, FILESYSTEM__QUOTAGET, NULL);
break;
default:
rc = 0; /* let the kernel handle invalid cmds */
break;
}
return rc;
}
static int selinux_quota_on(struct dentry *dentry)
{
const struct cred *cred = current_cred();
return dentry_has_perm(cred, dentry, FILE__QUOTAON);
}
static int selinux_syslog(int type)
{
int rc;
switch (type) {
case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
rc = task_has_system(current, SYSTEM__SYSLOG_READ);
break;
case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
/* Set level of messages printed to console */
case SYSLOG_ACTION_CONSOLE_LEVEL:
rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
break;
case SYSLOG_ACTION_CLOSE: /* Close log */
case SYSLOG_ACTION_OPEN: /* Open log */
case SYSLOG_ACTION_READ: /* Read from log */
case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
default:
rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
break;
}
return rc;
}
/*
* Check that a process has enough memory to allocate a new virtual
* mapping. 0 means there is enough memory for the allocation to
* succeed and -ENOMEM implies there is not.
*
* Do not audit the selinux permission check, as this is applied to all
* processes that allocate mappings.
*/
static int selinux_vm_enough_memory(struct mm_struct *mm, long pages)
{
int rc, cap_sys_admin = 0;
rc = selinux_capable(current, current_cred(),
&init_user_ns, CAP_SYS_ADMIN,
SECURITY_CAP_NOAUDIT);
if (rc == 0)
cap_sys_admin = 1;
return __vm_enough_memory(mm, pages, cap_sys_admin);
}
/* binprm security operations */
static int selinux_bprm_set_creds(struct linux_binprm *bprm)
{
const struct task_security_struct *old_tsec;
struct task_security_struct *new_tsec;
struct inode_security_struct *isec;
struct common_audit_data ad;
struct inode *inode = bprm->file->f_path.dentry->d_inode;
int rc;
rc = cap_bprm_set_creds(bprm);
if (rc)
return rc;
/* SELinux context only depends on initial program or script and not
* the script interpreter */
if (bprm->cred_prepared)
return 0;
old_tsec = current_security();
new_tsec = bprm->cred->security;
isec = inode->i_security;
/* Default to the current task SID. */
new_tsec->sid = old_tsec->sid;
new_tsec->osid = old_tsec->sid;
/* Reset fs, key, and sock SIDs on execve. */
new_tsec->create_sid = 0;
new_tsec->keycreate_sid = 0;
new_tsec->sockcreate_sid = 0;
if (old_tsec->exec_sid) {
new_tsec->sid = old_tsec->exec_sid;
/* Reset exec SID on execve. */
new_tsec->exec_sid = 0;
} else {
/* Check for a default transition on this program. */
rc = security_transition_sid(old_tsec->sid, isec->sid,
SECCLASS_PROCESS, NULL,
&new_tsec->sid);
if (rc)
return rc;
}
COMMON_AUDIT_DATA_INIT(&ad, PATH);
ad.u.path = bprm->file->f_path;
if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
new_tsec->sid = old_tsec->sid;
if (new_tsec->sid == old_tsec->sid) {
rc = avc_has_perm(old_tsec->sid, isec->sid,
SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, &ad);
if (rc)
return rc;
} else {
/* Check permissions for the transition. */
rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
SECCLASS_PROCESS, PROCESS__TRANSITION, &ad);
if (rc)
return rc;
rc = avc_has_perm(new_tsec->sid, isec->sid,
SECCLASS_FILE, FILE__ENTRYPOINT, &ad);
if (rc)
return rc;
/* Check for shared state */
if (bprm->unsafe & LSM_UNSAFE_SHARE) {
rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
SECCLASS_PROCESS, PROCESS__SHARE,
NULL);
if (rc)
return -EPERM;
}
/* Make sure that anyone attempting to ptrace over a task that
* changes its SID has the appropriate permit */
if (bprm->unsafe &
(LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
struct task_struct *tracer;
struct task_security_struct *sec;
u32 ptsid = 0;
rcu_read_lock();
tracer = tracehook_tracer_task(current);
if (likely(tracer != NULL)) {
sec = __task_cred(tracer)->security;
ptsid = sec->sid;
}
rcu_read_unlock();
if (ptsid != 0) {
rc = avc_has_perm(ptsid, new_tsec->sid,
SECCLASS_PROCESS,
PROCESS__PTRACE, NULL);
if (rc)
return -EPERM;
}
}
/* Clear any possibly unsafe personality bits on exec: */
bprm->per_clear |= PER_CLEAR_ON_SETID;
}
return 0;
}
static int selinux_bprm_secureexec(struct linux_binprm *bprm)
{
const struct task_security_struct *tsec = current_security();
u32 sid, osid;
int atsecure = 0;
sid = tsec->sid;
osid = tsec->osid;
if (osid != sid) {
/* Enable secure mode for SIDs transitions unless
the noatsecure permission is granted between
the two SIDs, i.e. ahp returns 0. */
atsecure = avc_has_perm(osid, sid,
SECCLASS_PROCESS,
PROCESS__NOATSECURE, NULL);
}
return (atsecure || cap_bprm_secureexec(bprm));
}
extern struct vfsmount *selinuxfs_mount;
extern struct dentry *selinux_null;
/* Derived from fs/exec.c:flush_old_files. */
static inline void flush_unauthorized_files(const struct cred *cred,
struct files_struct *files)
{
struct common_audit_data ad;
struct file *file, *devnull = NULL;
struct tty_struct *tty;
struct fdtable *fdt;
long j = -1;
int drop_tty = 0;
tty = get_current_tty();
if (tty) {
spin_lock(&tty_files_lock);
if (!list_empty(&tty->tty_files)) {
struct tty_file_private *file_priv;
struct inode *inode;
/* Revalidate access to controlling tty.
Use inode_has_perm on the tty inode directly rather
than using file_has_perm, as this particular open
file may belong to another process and we are only
interested in the inode-based check here. */
file_priv = list_first_entry(&tty->tty_files,
struct tty_file_private, list);
file = file_priv->file;
inode = file->f_path.dentry->d_inode;
if (inode_has_perm_noadp(cred, inode,
FILE__READ | FILE__WRITE, 0)) {
drop_tty = 1;
}
}
spin_unlock(&tty_files_lock);
tty_kref_put(tty);
}
/* Reset controlling tty. */
if (drop_tty)
no_tty();
/* Revalidate access to inherited open files. */
COMMON_AUDIT_DATA_INIT(&ad, INODE);
spin_lock(&files->file_lock);
for (;;) {
unsigned long set, i;
int fd;
j++;
i = j * __NFDBITS;
fdt = files_fdtable(files);
if (i >= fdt->max_fds)
break;
set = fdt->open_fds->fds_bits[j];
if (!set)
continue;
spin_unlock(&files->file_lock);
for ( ; set ; i++, set >>= 1) {
if (set & 1) {
file = fget(i);
if (!file)
continue;
if (file_has_perm(cred,
file,
file_to_av(file))) {
sys_close(i);
fd = get_unused_fd();
if (fd != i) {
if (fd >= 0)
put_unused_fd(fd);
fput(file);
continue;
}
if (devnull) {
get_file(devnull);
} else {
devnull = dentry_open(
dget(selinux_null),
mntget(selinuxfs_mount),
O_RDWR, cred);
if (IS_ERR(devnull)) {
devnull = NULL;
put_unused_fd(fd);
fput(file);
continue;
}
}
fd_install(fd, devnull);
}
fput(file);
}
}
spin_lock(&files->file_lock);
}
spin_unlock(&files->file_lock);
}
/*
* Prepare a process for imminent new credential changes due to exec
*/
static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
{
struct task_security_struct *new_tsec;
struct rlimit *rlim, *initrlim;
int rc, i;
new_tsec = bprm->cred->security;
if (new_tsec->sid == new_tsec->osid)
return;
/* Close files for which the new task SID is not authorized. */
flush_unauthorized_files(bprm->cred, current->files);
/* Always clear parent death signal on SID transitions. */
current->pdeath_signal = 0;
/* Check whether the new SID can inherit resource limits from the old
* SID. If not, reset all soft limits to the lower of the current
* task's hard limit and the init task's soft limit.
*
* Note that the setting of hard limits (even to lower them) can be
* controlled by the setrlimit check. The inclusion of the init task's
* soft limit into the computation is to avoid resetting soft limits
* higher than the default soft limit for cases where the default is
* lower than the hard limit, e.g. RLIMIT_CORE or RLIMIT_STACK.
*/
rc = avc_has_perm(new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS,
PROCESS__RLIMITINH, NULL);
if (rc) {
/* protect against do_prlimit() */
task_lock(current);
for (i = 0; i < RLIM_NLIMITS; i++) {
rlim = current->signal->rlim + i;
initrlim = init_task.signal->rlim + i;
rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
}
task_unlock(current);
update_rlimit_cpu(current, rlimit(RLIMIT_CPU));
}
}
/*
* Clean up the process immediately after the installation of new credentials
* due to exec
*/
static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
{
const struct task_security_struct *tsec = current_security();
struct itimerval itimer;
u32 osid, sid;
int rc, i;
osid = tsec->osid;
sid = tsec->sid;
if (sid == osid)
return;
/* Check whether the new SID can inherit signal state from the old SID.
* If not, clear itimers to avoid subsequent signal generation and
* flush and unblock signals.
*
* This must occur _after_ the task SID has been updated so that any
* kill done after the flush will be checked against the new SID.
*/
rc = avc_has_perm(osid, sid, SECCLASS_PROCESS, PROCESS__SIGINH, NULL);
if (rc) {
memset(&itimer, 0, sizeof itimer);
for (i = 0; i < 3; i++)
do_setitimer(i, &itimer, NULL);
spin_lock_irq(¤t->sighand->siglock);
if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
__flush_signals(current);
flush_signal_handlers(current, 1);
sigemptyset(¤t->blocked);
}
spin_unlock_irq(¤t->sighand->siglock);
}
/* Wake up the parent if it is waiting so that it can recheck
* wait permission to the new task SID. */
read_lock(&tasklist_lock);
__wake_up_parent(current, current->real_parent);
read_unlock(&tasklist_lock);
}
/* superblock security operations */
static int selinux_sb_alloc_security(struct super_block *sb)
{
return superblock_alloc_security(sb);
}
static void selinux_sb_free_security(struct super_block *sb)
{
superblock_free_security(sb);
}
static inline int match_prefix(char *prefix, int plen, char *option, int olen)
{
if (plen > olen)
return 0;
return !memcmp(prefix, option, plen);
}
static inline int selinux_option(char *option, int len)
{
return (match_prefix(CONTEXT_STR, sizeof(CONTEXT_STR)-1, option, len) ||
match_prefix(FSCONTEXT_STR, sizeof(FSCONTEXT_STR)-1, option, len) ||
match_prefix(DEFCONTEXT_STR, sizeof(DEFCONTEXT_STR)-1, option, len) ||
match_prefix(ROOTCONTEXT_STR, sizeof(ROOTCONTEXT_STR)-1, option, len) ||
match_prefix(LABELSUPP_STR, sizeof(LABELSUPP_STR)-1, option, len));
}
static inline void take_option(char **to, char *from, int *first, int len)
{
if (!*first) {
**to = ',';
*to += 1;
} else
*first = 0;
memcpy(*to, from, len);
*to += len;
}
static inline void take_selinux_option(char **to, char *from, int *first,
int len)
{
int current_size = 0;
if (!*first) {
**to = '|';
*to += 1;
} else
*first = 0;
while (current_size < len) {
if (*from != '"') {
**to = *from;
*to += 1;
}
from += 1;
current_size += 1;
}
}
static int selinux_sb_copy_data(char *orig, char *copy)
{
int fnosec, fsec, rc = 0;
char *in_save, *in_curr, *in_end;
char *sec_curr, *nosec_save, *nosec;
int open_quote = 0;
in_curr = orig;
sec_curr = copy;
nosec = (char *)get_zeroed_page(GFP_KERNEL);
if (!nosec) {
rc = -ENOMEM;
goto out;
}
nosec_save = nosec;
fnosec = fsec = 1;
in_save = in_end = orig;
do {
if (*in_end == '"')
open_quote = !open_quote;
if ((*in_end == ',' && open_quote == 0) ||
*in_end == '\0') {
int len = in_end - in_curr;
if (selinux_option(in_curr, len))
take_selinux_option(&sec_curr, in_curr, &fsec, len);
else
take_option(&nosec, in_curr, &fnosec, len);
in_curr = in_end + 1;
}
} while (*in_end++);
strcpy(in_save, nosec_save);
free_page((unsigned long)nosec_save);
out:
return rc;
}
static int selinux_sb_remount(struct super_block *sb, void *data)
{
int rc, i, *flags;
struct security_mnt_opts opts;
char *secdata, **mount_options;
struct superblock_security_struct *sbsec = sb->s_security;
if (!(sbsec->flags & SE_SBINITIALIZED))
return 0;
if (!data)
return 0;
if (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA)
return 0;
security_init_mnt_opts(&opts);
secdata = alloc_secdata();
if (!secdata)
return -ENOMEM;
rc = selinux_sb_copy_data(data, secdata);
if (rc)
goto out_free_secdata;
rc = selinux_parse_opts_str(secdata, &opts);
if (rc)
goto out_free_secdata;
mount_options = opts.mnt_opts;
flags = opts.mnt_opts_flags;
for (i = 0; i < opts.num_mnt_opts; i++) {
u32 sid;
size_t len;
if (flags[i] == SE_SBLABELSUPP)
continue;
len = strlen(mount_options[i]);
rc = security_context_to_sid(mount_options[i], len, &sid);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
mount_options[i], sb->s_id, sb->s_type->name, rc);
goto out_free_opts;
}
rc = -EINVAL;
switch (flags[i]) {
case FSCONTEXT_MNT:
if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
goto out_bad_option;
break;
case CONTEXT_MNT:
if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
goto out_bad_option;
break;
case ROOTCONTEXT_MNT: {
struct inode_security_struct *root_isec;
root_isec = sb->s_root->d_inode->i_security;
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
goto out_bad_option;
break;
}
case DEFCONTEXT_MNT:
if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
goto out_bad_option;
break;
default:
goto out_free_opts;
}
}
rc = 0;
out_free_opts:
security_free_mnt_opts(&opts);
out_free_secdata:
free_secdata(secdata);
return rc;
out_bad_option:
printk(KERN_WARNING "SELinux: unable to change security options "
"during remount (dev %s, type=%s)\n", sb->s_id,
sb->s_type->name);
goto out_free_opts;
}
static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
const struct cred *cred = current_cred();
struct common_audit_data ad;
int rc;
rc = superblock_doinit(sb, data);
if (rc)
return rc;
/* Allow all mounts performed by the kernel */
if (flags & MS_KERNMOUNT)
return 0;
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
ad.u.dentry = sb->s_root;
return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad);
}
static int selinux_sb_statfs(struct dentry *dentry)
{
const struct cred *cred = current_cred();
struct common_audit_data ad;
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
ad.u.dentry = dentry->d_sb->s_root;
return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
}
static int selinux_mount(char *dev_name,
struct path *path,
char *type,
unsigned long flags,
void *data)
{
const struct cred *cred = current_cred();
if (flags & MS_REMOUNT)
return superblock_has_perm(cred, path->mnt->mnt_sb,
FILESYSTEM__REMOUNT, NULL);
else
return path_has_perm(cred, path, FILE__MOUNTON);
}
static int selinux_umount(struct vfsmount *mnt, int flags)
{
const struct cred *cred = current_cred();
return superblock_has_perm(cred, mnt->mnt_sb,
FILESYSTEM__UNMOUNT, NULL);
}
/* inode security operations */
static int selinux_inode_alloc_security(struct inode *inode)
{
return inode_alloc_security(inode);
}
static void selinux_inode_free_security(struct inode *inode)
{
inode_free_security(inode);
}
static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, char **name,
void **value, size_t *len)
{
const struct task_security_struct *tsec = current_security();
struct inode_security_struct *dsec;
struct superblock_security_struct *sbsec;
u32 sid, newsid, clen;
int rc;
char *namep = NULL, *context;
dsec = dir->i_security;
sbsec = dir->i_sb->s_security;
sid = tsec->sid;
newsid = tsec->create_sid;
if ((sbsec->flags & SE_SBINITIALIZED) &&
(sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
newsid = sbsec->mntpoint_sid;
else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
rc = security_transition_sid(sid, dsec->sid,
inode_mode_to_security_class(inode->i_mode),
qstr, &newsid);
if (rc) {
printk(KERN_WARNING "%s: "
"security_transition_sid failed, rc=%d (dev=%s "
"ino=%ld)\n",
__func__,
-rc, inode->i_sb->s_id, inode->i_ino);
return rc;
}
}
/* Possibly defer initialization to selinux_complete_init. */
if (sbsec->flags & SE_SBINITIALIZED) {
struct inode_security_struct *isec = inode->i_security;
isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
isec->initialized = 1;
}
if (!ss_initialized || !(sbsec->flags & SE_SBLABELSUPP))
return -EOPNOTSUPP;
if (name) {
namep = kstrdup(XATTR_SELINUX_SUFFIX, GFP_NOFS);
if (!namep)
return -ENOMEM;
*name = namep;
}
if (value && len) {
rc = security_sid_to_context_force(newsid, &context, &clen);
if (rc) {
kfree(namep);
return rc;
}
*value = context;
*len = clen;
}
return 0;
}
static int selinux_inode_create(struct inode *dir, struct dentry *dentry, int mask)
{
return may_create(dir, dentry, SECCLASS_FILE);
}
static int selinux_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
{
return may_link(dir, old_dentry, MAY_LINK);
}
static int selinux_inode_unlink(struct inode *dir, struct dentry *dentry)
{
return may_link(dir, dentry, MAY_UNLINK);
}
static int selinux_inode_symlink(struct inode *dir, struct dentry *dentry, const char *name)
{
return may_create(dir, dentry, SECCLASS_LNK_FILE);
}
static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, int mask)
{
return may_create(dir, dentry, SECCLASS_DIR);
}
static int selinux_inode_rmdir(struct inode *dir, struct dentry *dentry)
{
return may_link(dir, dentry, MAY_RMDIR);
}
static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{
return may_create(dir, dentry, inode_mode_to_security_class(mode));
}
static int selinux_inode_rename(struct inode *old_inode, struct dentry *old_dentry,
struct inode *new_inode, struct dentry *new_dentry)
{
return may_rename(old_inode, old_dentry, new_inode, new_dentry);
}
static int selinux_inode_readlink(struct dentry *dentry)
{
const struct cred *cred = current_cred();
return dentry_has_perm(cred, dentry, FILE__READ);
}
static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *nameidata)
{
const struct cred *cred = current_cred();
return dentry_has_perm(cred, dentry, FILE__READ);
}
static int selinux_inode_permission(struct inode *inode, int mask, unsigned flags)
{
const struct cred *cred = current_cred();
struct common_audit_data ad;
u32 perms;
bool from_access;
from_access = mask & MAY_ACCESS;
mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
/* No permission to check. Existence test. */
if (!mask)
return 0;
COMMON_AUDIT_DATA_INIT(&ad, INODE);
ad.u.inode = inode;
if (from_access)
ad.selinux_audit_data.auditdeny |= FILE__AUDIT_ACCESS;
perms = file_mask_to_av(inode->i_mode, mask);
return inode_has_perm(cred, inode, perms, &ad, flags);
}
static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
{
const struct cred *cred = current_cred();
unsigned int ia_valid = iattr->ia_valid;
/* ATTR_FORCE is just used for ATTR_KILL_S[UG]ID. */
if (ia_valid & ATTR_FORCE) {
ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_MODE |
ATTR_FORCE);
if (!ia_valid)
return 0;
}
if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
return dentry_has_perm(cred, dentry, FILE__SETATTR);
return dentry_has_perm(cred, dentry, FILE__WRITE);
}
static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
{
const struct cred *cred = current_cred();
struct path path;
path.dentry = dentry;
path.mnt = mnt;
return path_has_perm(cred, &path, FILE__GETATTR);
}
static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
{
const struct cred *cred = current_cred();
if (!strncmp(name, XATTR_SECURITY_PREFIX,
sizeof XATTR_SECURITY_PREFIX - 1)) {
if (!strcmp(name, XATTR_NAME_CAPS)) {
if (!capable(CAP_SETFCAP))
return -EPERM;
} else if (!capable(CAP_SYS_ADMIN)) {
/* A different attribute in the security namespace.
Restrict to administrator. */
return -EPERM;
}
}
/* Not an attribute we recognize, so just check the
ordinary setattr permission. */
return dentry_has_perm(cred, dentry, FILE__SETATTR);
}
static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct inode *inode = dentry->d_inode;
struct inode_security_struct *isec = inode->i_security;
struct superblock_security_struct *sbsec;
struct common_audit_data ad;
u32 newsid, sid = current_sid();
int rc = 0;
if (strcmp(name, XATTR_NAME_SELINUX))
return selinux_inode_setotherxattr(dentry, name);
sbsec = inode->i_sb->s_security;
if (!(sbsec->flags & SE_SBLABELSUPP))
return -EOPNOTSUPP;
if (!inode_owner_or_capable(inode))
return -EPERM;
COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
ad.u.dentry = dentry;
rc = avc_has_perm(sid, isec->sid, isec->sclass,
FILE__RELABELFROM, &ad);
if (rc)
return rc;
rc = security_context_to_sid(value, size, &newsid);
if (rc == -EINVAL) {
if (!capable(CAP_MAC_ADMIN))
return rc;
rc = security_context_to_sid_force(value, size, &newsid);
}
if (rc)
return rc;
rc = avc_has_perm(sid, newsid, isec->sclass,
FILE__RELABELTO, &ad);
if (rc)
return rc;
rc = security_validate_transition(isec->sid, newsid, sid,
isec->sclass);
if (rc)
return rc;
return avc_has_perm(newsid,
sbsec->sid,
SECCLASS_FILESYSTEM,
FILESYSTEM__ASSOCIATE,
&ad);
}
static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size,
int flags)
{
struct inode *inode = dentry->d_inode;
struct inode_security_struct *isec = inode->i_security;
u32 newsid;
int rc;
if (strcmp(name, XATTR_NAME_SELINUX)) {
/* Not an attribute we recognize, so nothing to do. */
return;
}
rc = security_context_to_sid_force(value, size, &newsid);
if (rc) {
printk(KERN_ERR "SELinux: unable to map context to SID"
"for (%s, %lu), rc=%d\n",
inode->i_sb->s_id, inode->i_ino, -rc);
return;
}
isec->sid = newsid;
return;
}
static int selinux_inode_getxattr(struct dentry *dentry, const char *name)
{
const struct cred *cred = current_cred();
return dentry_has_perm(cred, dentry, FILE__GETATTR);
}
static int selinux_inode_listxattr(struct dentry *dentry)
{
const struct cred *cred = current_cred();
return dentry_has_perm(cred, dentry, FILE__GETATTR);
}
static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
{
if (strcmp(name, XATTR_NAME_SELINUX))
return selinux_inode_setotherxattr(dentry, name);
/* No one is allowed to remove a SELinux security label.
You can change the label, but all data must be labeled. */
return -EACCES;
}
/*
* Copy the inode security context value to the user.
*
* Permission check is handled by selinux_inode_getxattr hook.
*/
static int selinux_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
u32 size;
int error;
char *context = NULL;
struct inode_security_struct *isec = inode->i_security;
if (strcmp(name, XATTR_SELINUX_SUFFIX))
return -EOPNOTSUPP;
/*
* If the caller has CAP_MAC_ADMIN, then get the raw context
* value even if it is not defined by current policy; otherwise,
* use the in-core value under current policy.
* Use the non-auditing forms of the permission checks since
* getxattr may be called by unprivileged processes commonly
* and lack of permission just means that we fall back to the
* in-core context value, not a denial.
*/
error = selinux_capable(current, current_cred(),
&init_user_ns, CAP_MAC_ADMIN,
SECURITY_CAP_NOAUDIT);
if (!error)
error = security_sid_to_context_force(isec->sid, &context,
&size);
else
error = security_sid_to_context(isec->sid, &context, &size);
if (error)
return error;
error = size;
if (alloc) {
*buffer = context;
goto out_nofree;
}
kfree(context);
out_nofree:
return error;
}
static int selinux_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
struct inode_security_struct *isec = inode->i_security;
u32 newsid;
int rc;
if (strcmp(name, XATTR_SELINUX_SUFFIX))
return -EOPNOTSUPP;
if (!value || !size)
return -EACCES;
rc = security_context_to_sid((void *)value, size, &newsid);
if (rc)
return rc;
isec->sid = newsid;
isec->initialized = 1;
return 0;
}
static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
const int len = sizeof(XATTR_NAME_SELINUX);
if (buffer && len <= buffer_size)
memcpy(buffer, XATTR_NAME_SELINUX, len);
return len;
}
static void selinux_inode_getsecid(const struct inode *inode, u32 *secid)
{
struct inode_security_struct *isec = inode->i_security;
*secid = isec->sid;
}
/* file security operations */
static int selinux_revalidate_file_permission(struct file *file, int mask)
{
const struct cred *cred = current_cred();
struct inode *inode = file->f_path.dentry->d_inode;
/* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */
if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
mask |= MAY_APPEND;
return file_has_perm(cred, file,
file_mask_to_av(inode->i_mode, mask));
}
static int selinux_file_permission(struct file *file, int mask)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct file_security_struct *fsec = file->f_security;
struct inode_security_struct *isec = inode->i_security;
u32 sid = current_sid();
if (!mask)
/* No permission to check. Existence test. */
return 0;
if (sid == fsec->sid && fsec->isid == isec->sid &&
fsec->pseqno == avc_policy_seqno())
/* No change since dentry_open check. */
return 0;
return selinux_revalidate_file_permission(file, mask);
}
static int selinux_file_alloc_security(struct file *file)
{
return file_alloc_security(file);
}
static void selinux_file_free_security(struct file *file)
{
file_free_security(file);
}
static int selinux_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
const struct cred *cred = current_cred();
int error = 0;
switch (cmd) {
case FIONREAD:
/* fall through */
case FIBMAP:
/* fall through */
case FIGETBSZ:
/* fall through */
case EXT2_IOC_GETFLAGS:
/* fall through */
case EXT2_IOC_GETVERSION:
error = file_has_perm(cred, file, FILE__GETATTR);
break;
case EXT2_IOC_SETFLAGS:
/* fall through */
case EXT2_IOC_SETVERSION:
error = file_has_perm(cred, file, FILE__SETATTR);
break;
/* sys_ioctl() checks */
case FIONBIO:
/* fall through */
case FIOASYNC:
error = file_has_perm(cred, file, 0);
break;
case KDSKBENT:
case KDSKBSENT:
error = task_has_capability(current, cred, CAP_SYS_TTY_CONFIG,
SECURITY_CAP_AUDIT);
break;
/* default case assumes that the command will go
* to the file's ioctl() function.
*/
default:
error = file_has_perm(cred, file, FILE__IOCTL);
}
return error;
}
static int default_noexec;
static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
{
const struct cred *cred = current_cred();
int rc = 0;
if (default_noexec &&
(prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
/*
* We are making executable an anonymous mapping or a
* private file mapping that will also be writable.
* This has an additional check.
*/
rc = cred_has_perm(cred, cred, PROCESS__EXECMEM);
if (rc)
goto error;
}
if (file) {
/* read access is always possible with a mapping */
u32 av = FILE__READ;
/* write access only matters if the mapping is shared */
if (shared && (prot & PROT_WRITE))
av |= FILE__WRITE;
if (prot & PROT_EXEC)
av |= FILE__EXECUTE;
return file_has_perm(cred, file, av);
}
error:
return rc;
}
static int selinux_file_mmap(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags,
unsigned long addr, unsigned long addr_only)
{
int rc = 0;
u32 sid = current_sid();
/*
* notice that we are intentionally putting the SELinux check before
* the secondary cap_file_mmap check. This is such a likely attempt
* at bad behaviour/exploit that we always want to get the AVC, even
* if DAC would have also denied the operation.
*/
if (addr < CONFIG_LSM_MMAP_MIN_ADDR) {
rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT,
MEMPROTECT__MMAP_ZERO, NULL);
if (rc)
return rc;
}
/* do DAC check on address space usage */
rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
if (rc || addr_only)
return rc;
if (selinux_checkreqprot)
prot = reqprot;
return file_map_prot_check(file, prot,
(flags & MAP_TYPE) == MAP_SHARED);
}
static int selinux_file_mprotect(struct vm_area_struct *vma,
unsigned long reqprot,
unsigned long prot)
{
const struct cred *cred = current_cred();
if (selinux_checkreqprot)
prot = reqprot;
if (default_noexec &&
(prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
int rc = 0;
if (vma->vm_start >= vma->vm_mm->start_brk &&
vma->vm_end <= vma->vm_mm->brk) {
rc = cred_has_perm(cred, cred, PROCESS__EXECHEAP);
} else if (!vma->vm_file &&
vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack) {
rc = current_has_perm(current, PROCESS__EXECSTACK);
} else if (vma->vm_file && vma->anon_vma) {
/*
* We are making executable a file mapping that has
* had some COW done. Since pages might have been
* written, check ability to execute the possibly
* modified content. This typically should only
* occur for text relocations.
*/
rc = file_has_perm(cred, vma->vm_file, FILE__EXECMOD);
}
if (rc)
return rc;
}
return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED);
}
static int selinux_file_lock(struct file *file, unsigned int cmd)
{
const struct cred *cred = current_cred();
return file_has_perm(cred, file, FILE__LOCK);
}
static int selinux_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
const struct cred *cred = current_cred();
int err = 0;
switch (cmd) {
case F_SETFL:
if (!file->f_path.dentry || !file->f_path.dentry->d_inode) {
err = -EINVAL;
break;
}
if ((file->f_flags & O_APPEND) && !(arg & O_APPEND)) {
err = file_has_perm(cred, file, FILE__WRITE);
break;
}
/* fall through */
case F_SETOWN:
case F_SETSIG:
case F_GETFL:
case F_GETOWN:
case F_GETSIG:
/* Just check FD__USE permission */
err = file_has_perm(cred, file, 0);
break;
case F_GETLK:
case F_SETLK:
case F_SETLKW:
#if BITS_PER_LONG == 32
case F_GETLK64:
case F_SETLK64:
case F_SETLKW64:
#endif
if (!file->f_path.dentry || !file->f_path.dentry->d_inode) {
err = -EINVAL;
break;
}
err = file_has_perm(cred, file, FILE__LOCK);
break;
}
return err;
}
static int selinux_file_set_fowner(struct file *file)
{
struct file_security_struct *fsec;
fsec = file->f_security;
fsec->fown_sid = current_sid();
return 0;
}
static int selinux_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int signum)
{
struct file *file;
u32 sid = task_sid(tsk);
u32 perm;
struct file_security_struct *fsec;
/* struct fown_struct is never outside the context of a struct file */
file = container_of(fown, struct file, f_owner);
fsec = file->f_security;
if (!signum)
perm = signal_to_av(SIGIO); /* as per send_sigio_to_task */
else
perm = signal_to_av(signum);
return avc_has_perm(fsec->fown_sid, sid,
SECCLASS_PROCESS, perm, NULL);
}
static int selinux_file_receive(struct file *file)
{
const struct cred *cred = current_cred();
return file_has_perm(cred, file, file_to_av(file));
}
static int selinux_dentry_open(struct file *file, const struct cred *cred)
{
struct file_security_struct *fsec;
struct inode *inode;
struct inode_security_struct *isec;
inode = file->f_path.dentry->d_inode;
fsec = file->f_security;
isec = inode->i_security;
/*
* Save inode label and policy sequence number
* at open-time so that selinux_file_permission
* can determine whether revalidation is necessary.
* Task label is already saved in the file security
* struct as its SID.
*/
fsec->isid = isec->sid;
fsec->pseqno = avc_policy_seqno();
/*
* Since the inode label or policy seqno may have changed
* between the selinux_inode_permission check and the saving
* of state above, recheck that access is still permitted.
* Otherwise, access might never be revalidated against the
* new inode label or new policy.
* This check is not redundant - do not remove.
*/
return inode_has_perm_noadp(cred, inode, open_file_to_av(file), 0);
}
/* task security operations */
static int selinux_task_create(unsigned long clone_flags)
{
return current_has_perm(current, PROCESS__FORK);
}
/*
* allocate the SELinux part of blank credentials
*/
static int selinux_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
struct task_security_struct *tsec;
tsec = kzalloc(sizeof(struct task_security_struct), gfp);
if (!tsec)
return -ENOMEM;
cred->security = tsec;
return 0;
}
/*
* detach and free the LSM part of a set of credentials
*/
static void selinux_cred_free(struct cred *cred)
{
struct task_security_struct *tsec = cred->security;
/*
* cred->security == NULL if security_cred_alloc_blank() or
* security_prepare_creds() returned an error.
*/
BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
cred->security = (void *) 0x7UL;
kfree(tsec);
}
/*
* prepare a new set of credentials for modification
*/
static int selinux_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
const struct task_security_struct *old_tsec;
struct task_security_struct *tsec;
old_tsec = old->security;
tsec = kmemdup(old_tsec, sizeof(struct task_security_struct), gfp);
if (!tsec)
return -ENOMEM;
new->security = tsec;
return 0;
}
/*
* transfer the SELinux data to a blank set of creds
*/
static void selinux_cred_transfer(struct cred *new, const struct cred *old)
{
const struct task_security_struct *old_tsec = old->security;
struct task_security_struct *tsec = new->security;
*tsec = *old_tsec;
}
/*
* set the security data for a kernel service
* - all the creation contexts are set to unlabelled
*/
static int selinux_kernel_act_as(struct cred *new, u32 secid)
{
struct task_security_struct *tsec = new->security;
u32 sid = current_sid();
int ret;
ret = avc_has_perm(sid, secid,
SECCLASS_KERNEL_SERVICE,
KERNEL_SERVICE__USE_AS_OVERRIDE,
NULL);
if (ret == 0) {
tsec->sid = secid;
tsec->create_sid = 0;
tsec->keycreate_sid = 0;
tsec->sockcreate_sid = 0;
}
return ret;
}
/*
* set the file creation context in a security record to the same as the
* objective context of the specified inode
*/
static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
{
struct inode_security_struct *isec = inode->i_security;
struct task_security_struct *tsec = new->security;
u32 sid = current_sid();
int ret;
ret = avc_has_perm(sid, isec->sid,
SECCLASS_KERNEL_SERVICE,
KERNEL_SERVICE__CREATE_FILES_AS,
NULL);
if (ret == 0)
tsec->create_sid = isec->sid;
return ret;
}
static int selinux_kernel_module_request(char *kmod_name)
{
u32 sid;
struct common_audit_data ad;
sid = task_sid(current);
COMMON_AUDIT_DATA_INIT(&ad, KMOD);
ad.u.kmod_name = kmod_name;
return avc_has_perm(sid, SECINITSID_KERNEL, SECCLASS_SYSTEM,
SYSTEM__MODULE_REQUEST, &ad);
}
static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
{
return current_has_perm(p, PROCESS__SETPGID);
}
static int selinux_task_getpgid(struct task_struct *p)
{
return current_has_perm(p, PROCESS__GETPGID);
}
static int selinux_task_getsid(struct task_struct *p)
{
return current_has_perm(p, PROCESS__GETSESSION);
}
static void selinux_task_getsecid(struct task_struct *p, u32 *secid)
{
*secid = task_sid(p);
}
static int selinux_task_setnice(struct task_struct *p, int nice)
{
int rc;
rc = cap_task_setnice(p, nice);
if (rc)
return rc;
return current_has_perm(p, PROCESS__SETSCHED);
}
static int selinux_task_setioprio(struct task_struct *p, int ioprio)
{
int rc;
rc = cap_task_setioprio(p, ioprio);
if (rc)
return rc;
return current_has_perm(p, PROCESS__SETSCHED);
}
static int selinux_task_getioprio(struct task_struct *p)
{
return current_has_perm(p, PROCESS__GETSCHED);
}
static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim)
{
struct rlimit *old_rlim = p->signal->rlim + resource;
/* Control the ability to change the hard limit (whether
lowering or raising it), so that the hard limit can
later be used as a safe reset point for the soft limit
upon context transitions. See selinux_bprm_committing_creds. */
if (old_rlim->rlim_max != new_rlim->rlim_max)
return current_has_perm(p, PROCESS__SETRLIMIT);
return 0;
}
static int selinux_task_setscheduler(struct task_struct *p)
{
int rc;
rc = cap_task_setscheduler(p);
if (rc)
return rc;
return current_has_perm(p, PROCESS__SETSCHED);
}
static int selinux_task_getscheduler(struct task_struct *p)
{
return current_has_perm(p, PROCESS__GETSCHED);
}
static int selinux_task_movememory(struct task_struct *p)
{
return current_has_perm(p, PROCESS__SETSCHED);
}
static int selinux_task_kill(struct task_struct *p, struct siginfo *info,
int sig, u32 secid)
{
u32 perm;
int rc;
if (!sig)
perm = PROCESS__SIGNULL; /* null signal; existence test */
else
perm = signal_to_av(sig);
if (secid)
rc = avc_has_perm(secid, task_sid(p),
SECCLASS_PROCESS, perm, NULL);
else
rc = current_has_perm(p, perm);
return rc;
}
static int selinux_task_wait(struct task_struct *p)
{
return task_has_perm(p, current, PROCESS__SIGCHLD);
}
static void selinux_task_to_inode(struct task_struct *p,
struct inode *inode)
{
struct inode_security_struct *isec = inode->i_security;
u32 sid = task_sid(p);
isec->sid = sid;
isec->initialized = 1;
}
/* Returns error only if unable to parse addresses */
static int selinux_parse_skb_ipv4(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto)
{
int offset, ihlen, ret = -EINVAL;
struct iphdr _iph, *ih;
offset = skb_network_offset(skb);
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
goto out;
ihlen = ih->ihl * 4;
if (ihlen < sizeof(_iph))
goto out;
ad->u.net.v4info.saddr = ih->saddr;
ad->u.net.v4info.daddr = ih->daddr;
ret = 0;
if (proto)
*proto = ih->protocol;
switch (ih->protocol) {
case IPPROTO_TCP: {
struct tcphdr _tcph, *th;
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
offset += ihlen;
th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
if (th == NULL)
break;
ad->u.net.sport = th->source;
ad->u.net.dport = th->dest;
break;
}
case IPPROTO_UDP: {
struct udphdr _udph, *uh;
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
offset += ihlen;
uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
if (uh == NULL)
break;
ad->u.net.sport = uh->source;
ad->u.net.dport = uh->dest;
break;
}
case IPPROTO_DCCP: {
struct dccp_hdr _dccph, *dh;
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
offset += ihlen;
dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
if (dh == NULL)
break;
ad->u.net.sport = dh->dccph_sport;
ad->u.net.dport = dh->dccph_dport;
break;
}
default:
break;
}
out:
return ret;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
/* Returns error only if unable to parse addresses */
static int selinux_parse_skb_ipv6(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto)
{
u8 nexthdr;
int ret = -EINVAL, offset;
struct ipv6hdr _ipv6h, *ip6;
offset = skb_network_offset(skb);
ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
if (ip6 == NULL)
goto out;
ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
ret = 0;
nexthdr = ip6->nexthdr;
offset += sizeof(_ipv6h);
offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
if (offset < 0)
goto out;
if (proto)
*proto = nexthdr;
switch (nexthdr) {
case IPPROTO_TCP: {
struct tcphdr _tcph, *th;
th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
if (th == NULL)
break;
ad->u.net.sport = th->source;
ad->u.net.dport = th->dest;
break;
}
case IPPROTO_UDP: {
struct udphdr _udph, *uh;
uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
if (uh == NULL)
break;
ad->u.net.sport = uh->source;
ad->u.net.dport = uh->dest;
break;
}
case IPPROTO_DCCP: {
struct dccp_hdr _dccph, *dh;
dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
if (dh == NULL)
break;
ad->u.net.sport = dh->dccph_sport;
ad->u.net.dport = dh->dccph_dport;
break;
}
/* includes fragments */
default:
break;
}
out:
return ret;
}
#endif /* IPV6 */
static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
char **_addrp, int src, u8 *proto)
{
char *addrp;
int ret;
switch (ad->u.net.family) {
case PF_INET:
ret = selinux_parse_skb_ipv4(skb, ad, proto);
if (ret)
goto parse_error;
addrp = (char *)(src ? &ad->u.net.v4info.saddr :
&ad->u.net.v4info.daddr);
goto okay;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case PF_INET6:
ret = selinux_parse_skb_ipv6(skb, ad, proto);
if (ret)
goto parse_error;
addrp = (char *)(src ? &ad->u.net.v6info.saddr :
&ad->u.net.v6info.daddr);
goto okay;
#endif /* IPV6 */
default:
addrp = NULL;
goto okay;
}
parse_error:
printk(KERN_WARNING
"SELinux: failure in selinux_parse_skb(),"
" unable to parse packet\n");
return ret;
okay:
if (_addrp)
*_addrp = addrp;
return 0;
}
/**
* selinux_skb_peerlbl_sid - Determine the peer label of a packet
* @skb: the packet
* @family: protocol family
* @sid: the packet's peer label SID
*
* Description:
* Check the various different forms of network peer labeling and determine
* the peer label/SID for the packet; most of the magic actually occurs in
* the security server function security_net_peersid_cmp(). The function
* returns zero if the value in @sid is valid (although it may be SECSID_NULL)
* or -EACCES if @sid is invalid due to inconsistencies with the different
* peer labels.
*
*/
static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
{
int err;
u32 xfrm_sid;
u32 nlbl_sid;
u32 nlbl_type;
selinux_skb_xfrm_sid(skb, &xfrm_sid);
selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
if (unlikely(err)) {
printk(KERN_WARNING
"SELinux: failure in selinux_skb_peerlbl_sid(),"
" unable to determine packet's peer label\n");
return -EACCES;
}
return 0;
}
/* socket security operations */
static int socket_sockcreate_sid(const struct task_security_struct *tsec,
u16 secclass, u32 *socksid)
{
if (tsec->sockcreate_sid > SECSID_NULL) {
*socksid = tsec->sockcreate_sid;
return 0;
}
return security_transition_sid(tsec->sid, tsec->sid, secclass, NULL,
socksid);
}
static int sock_has_perm(struct task_struct *task, struct sock *sk, u32 perms)
{
struct sk_security_struct *sksec = sk->sk_security;
struct common_audit_data ad;
u32 tsid = task_sid(task);
if (sksec->sid == SECINITSID_KERNEL)
return 0;
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.sk = sk;
return avc_has_perm(tsid, sksec->sid, sksec->sclass, perms, &ad);
}
static int selinux_socket_create(int family, int type,
int protocol, int kern)
{
const struct task_security_struct *tsec = current_security();
u32 newsid;
u16 secclass;
int rc;
if (kern)
return 0;
secclass = socket_type_to_security_class(family, type, protocol);
rc = socket_sockcreate_sid(tsec, secclass, &newsid);
if (rc)
return rc;
return avc_has_perm(tsec->sid, newsid, secclass, SOCKET__CREATE, NULL);
}
static int selinux_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
const struct task_security_struct *tsec = current_security();
struct inode_security_struct *isec = SOCK_INODE(sock)->i_security;
struct sk_security_struct *sksec;
int err = 0;
isec->sclass = socket_type_to_security_class(family, type, protocol);
if (kern)
isec->sid = SECINITSID_KERNEL;
else {
err = socket_sockcreate_sid(tsec, isec->sclass, &(isec->sid));
if (err)
return err;
}
isec->initialized = 1;
if (sock->sk) {
sksec = sock->sk->sk_security;
sksec->sid = isec->sid;
sksec->sclass = isec->sclass;
err = selinux_netlbl_socket_post_create(sock->sk, family);
}
return err;
}
/* Range of port numbers used to automatically bind.
Need to determine whether we should perform a name_bind
permission check between the socket and the port number. */
static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
{
struct sock *sk = sock->sk;
u16 family;
int err;
err = sock_has_perm(current, sk, SOCKET__BIND);
if (err)
goto out;
/*
* If PF_INET or PF_INET6, check name_bind permission for the port.
* Multiple address binding for SCTP is not supported yet: we just
* check the first address now.
*/
family = sk->sk_family;
if (family == PF_INET || family == PF_INET6) {
char *addrp;
struct sk_security_struct *sksec = sk->sk_security;
struct common_audit_data ad;
struct sockaddr_in *addr4 = NULL;
struct sockaddr_in6 *addr6 = NULL;
unsigned short snum;
u32 sid, node_perm;
if (family == PF_INET) {
addr4 = (struct sockaddr_in *)address;
snum = ntohs(addr4->sin_port);
addrp = (char *)&addr4->sin_addr.s_addr;
} else {
addr6 = (struct sockaddr_in6 *)address;
snum = ntohs(addr6->sin6_port);
addrp = (char *)&addr6->sin6_addr.s6_addr;
}
if (snum) {
int low, high;
inet_get_local_port_range(&low, &high);
if (snum < max(PROT_SOCK, low) || snum > high) {
err = sel_netport_sid(sk->sk_protocol,
snum, &sid);
if (err)
goto out;
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.sport = htons(snum);
ad.u.net.family = family;
err = avc_has_perm(sksec->sid, sid,
sksec->sclass,
SOCKET__NAME_BIND, &ad);
if (err)
goto out;
}
}
switch (sksec->sclass) {
case SECCLASS_TCP_SOCKET:
node_perm = TCP_SOCKET__NODE_BIND;
break;
case SECCLASS_UDP_SOCKET:
node_perm = UDP_SOCKET__NODE_BIND;
break;
case SECCLASS_DCCP_SOCKET:
node_perm = DCCP_SOCKET__NODE_BIND;
break;
default:
node_perm = RAWIP_SOCKET__NODE_BIND;
break;
}
err = sel_netnode_sid(addrp, family, &sid);
if (err)
goto out;
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.sport = htons(snum);
ad.u.net.family = family;
if (family == PF_INET)
ad.u.net.v4info.saddr = addr4->sin_addr.s_addr;
else
ipv6_addr_copy(&ad.u.net.v6info.saddr, &addr6->sin6_addr);
err = avc_has_perm(sksec->sid, sid,
sksec->sclass, node_perm, &ad);
if (err)
goto out;
}
out:
return err;
}
static int selinux_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen)
{
struct sock *sk = sock->sk;
struct sk_security_struct *sksec = sk->sk_security;
int err;
err = sock_has_perm(current, sk, SOCKET__CONNECT);
if (err)
return err;
/*
* If a TCP or DCCP socket, check name_connect permission for the port.
*/
if (sksec->sclass == SECCLASS_TCP_SOCKET ||
sksec->sclass == SECCLASS_DCCP_SOCKET) {
struct common_audit_data ad;
struct sockaddr_in *addr4 = NULL;
struct sockaddr_in6 *addr6 = NULL;
unsigned short snum;
u32 sid, perm;
if (sk->sk_family == PF_INET) {
addr4 = (struct sockaddr_in *)address;
if (addrlen < sizeof(struct sockaddr_in))
return -EINVAL;
snum = ntohs(addr4->sin_port);
} else {
addr6 = (struct sockaddr_in6 *)address;
if (addrlen < SIN6_LEN_RFC2133)
return -EINVAL;
snum = ntohs(addr6->sin6_port);
}
err = sel_netport_sid(sk->sk_protocol, snum, &sid);
if (err)
goto out;
perm = (sksec->sclass == SECCLASS_TCP_SOCKET) ?
TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT;
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.dport = htons(snum);
ad.u.net.family = sk->sk_family;
err = avc_has_perm(sksec->sid, sid, sksec->sclass, perm, &ad);
if (err)
goto out;
}
err = selinux_netlbl_socket_connect(sk, address);
out:
return err;
}
static int selinux_socket_listen(struct socket *sock, int backlog)
{
return sock_has_perm(current, sock->sk, SOCKET__LISTEN);
}
static int selinux_socket_accept(struct socket *sock, struct socket *newsock)
{
int err;
struct inode_security_struct *isec;
struct inode_security_struct *newisec;
err = sock_has_perm(current, sock->sk, SOCKET__ACCEPT);
if (err)
return err;
newisec = SOCK_INODE(newsock)->i_security;
isec = SOCK_INODE(sock)->i_security;
newisec->sclass = isec->sclass;
newisec->sid = isec->sid;
newisec->initialized = 1;
return 0;
}
static int selinux_socket_sendmsg(struct socket *sock, struct msghdr *msg,
int size)
{
return sock_has_perm(current, sock->sk, SOCKET__WRITE);
}
static int selinux_socket_recvmsg(struct socket *sock, struct msghdr *msg,
int size, int flags)
{
return sock_has_perm(current, sock->sk, SOCKET__READ);
}
static int selinux_socket_getsockname(struct socket *sock)
{
return sock_has_perm(current, sock->sk, SOCKET__GETATTR);
}
static int selinux_socket_getpeername(struct socket *sock)
{
return sock_has_perm(current, sock->sk, SOCKET__GETATTR);
}
static int selinux_socket_setsockopt(struct socket *sock, int level, int optname)
{
int err;
err = sock_has_perm(current, sock->sk, SOCKET__SETOPT);
if (err)
return err;
return selinux_netlbl_socket_setsockopt(sock, level, optname);
}
static int selinux_socket_getsockopt(struct socket *sock, int level,
int optname)
{
return sock_has_perm(current, sock->sk, SOCKET__GETOPT);
}
static int selinux_socket_shutdown(struct socket *sock, int how)
{
return sock_has_perm(current, sock->sk, SOCKET__SHUTDOWN);
}
static int selinux_socket_unix_stream_connect(struct sock *sock,
struct sock *other,
struct sock *newsk)
{
struct sk_security_struct *sksec_sock = sock->sk_security;
struct sk_security_struct *sksec_other = other->sk_security;
struct sk_security_struct *sksec_new = newsk->sk_security;
struct common_audit_data ad;
int err;
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.sk = other;
err = avc_has_perm(sksec_sock->sid, sksec_other->sid,
sksec_other->sclass,
UNIX_STREAM_SOCKET__CONNECTTO, &ad);
if (err)
return err;
/* server child socket */
sksec_new->peer_sid = sksec_sock->sid;
err = security_sid_mls_copy(sksec_other->sid, sksec_sock->sid,
&sksec_new->sid);
if (err)
return err;
/* connecting socket */
sksec_sock->peer_sid = sksec_new->sid;
return 0;
}
static int selinux_socket_unix_may_send(struct socket *sock,
struct socket *other)
{
struct sk_security_struct *ssec = sock->sk->sk_security;
struct sk_security_struct *osec = other->sk->sk_security;
struct common_audit_data ad;
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.sk = other->sk;
return avc_has_perm(ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO,
&ad);
}
static int selinux_inet_sys_rcv_skb(int ifindex, char *addrp, u16 family,
u32 peer_sid,
struct common_audit_data *ad)
{
int err;
u32 if_sid;
u32 node_sid;
err = sel_netif_sid(ifindex, &if_sid);
if (err)
return err;
err = avc_has_perm(peer_sid, if_sid,
SECCLASS_NETIF, NETIF__INGRESS, ad);
if (err)
return err;
err = sel_netnode_sid(addrp, family, &node_sid);
if (err)
return err;
return avc_has_perm(peer_sid, node_sid,
SECCLASS_NODE, NODE__RECVFROM, ad);
}
static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
u16 family)
{
int err = 0;
struct sk_security_struct *sksec = sk->sk_security;
u32 sk_sid = sksec->sid;
struct common_audit_data ad;
char *addrp;
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.netif = skb->skb_iif;
ad.u.net.family = family;
err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
if (err)
return err;
if (selinux_secmark_enabled()) {
err = avc_has_perm(sk_sid, skb->secmark, SECCLASS_PACKET,
PACKET__RECV, &ad);
if (err)
return err;
}
err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, &ad);
if (err)
return err;
err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad);
return err;
}
static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
struct sk_security_struct *sksec = sk->sk_security;
u16 family = sk->sk_family;
u32 sk_sid = sksec->sid;
struct common_audit_data ad;
char *addrp;
u8 secmark_active;
u8 peerlbl_active;
if (family != PF_INET && family != PF_INET6)
return 0;
/* Handle mapped IPv4 packets arriving via IPv6 sockets */
if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
family = PF_INET;
/* If any sort of compatibility mode is enabled then handoff processing
* to the selinux_sock_rcv_skb_compat() function to deal with the
* special handling. We do this in an attempt to keep this function
* as fast and as clean as possible. */
if (!selinux_policycap_netpeer)
return selinux_sock_rcv_skb_compat(sk, skb, family);
secmark_active = selinux_secmark_enabled();
peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
if (!secmark_active && !peerlbl_active)
return 0;
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.netif = skb->skb_iif;
ad.u.net.family = family;
err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
if (err)
return err;
if (peerlbl_active) {
u32 peer_sid;
err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
if (err)
return err;
err = selinux_inet_sys_rcv_skb(skb->skb_iif, addrp, family,
peer_sid, &ad);
if (err) {
selinux_netlbl_err(skb, err, 0);
return err;
}
err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
PEER__RECV, &ad);
if (err)
selinux_netlbl_err(skb, err, 0);
}
if (secmark_active) {
err = avc_has_perm(sk_sid, skb->secmark, SECCLASS_PACKET,
PACKET__RECV, &ad);
if (err)
return err;
}
return err;
}
static int selinux_socket_getpeersec_stream(struct socket *sock, char __user *optval,
int __user *optlen, unsigned len)
{
int err = 0;
char *scontext;
u32 scontext_len;
struct sk_security_struct *sksec = sock->sk->sk_security;
u32 peer_sid = SECSID_NULL;
if (sksec->sclass == SECCLASS_UNIX_STREAM_SOCKET ||
sksec->sclass == SECCLASS_TCP_SOCKET)
peer_sid = sksec->peer_sid;
if (peer_sid == SECSID_NULL)
return -ENOPROTOOPT;
err = security_sid_to_context(peer_sid, &scontext, &scontext_len);
if (err)
return err;
if (scontext_len > len) {
err = -ERANGE;
goto out_len;
}
if (copy_to_user(optval, scontext, scontext_len))
err = -EFAULT;
out_len:
if (put_user(scontext_len, optlen))
err = -EFAULT;
kfree(scontext);
return err;
}
static int selinux_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{
u32 peer_secid = SECSID_NULL;
u16 family;
if (skb && skb->protocol == htons(ETH_P_IP))
family = PF_INET;
else if (skb && skb->protocol == htons(ETH_P_IPV6))
family = PF_INET6;
else if (sock)
family = sock->sk->sk_family;
else
goto out;
if (sock && family == PF_UNIX)
selinux_inode_getsecid(SOCK_INODE(sock), &peer_secid);
else if (skb)
selinux_skb_peerlbl_sid(skb, family, &peer_secid);
out:
*secid = peer_secid;
if (peer_secid == SECSID_NULL)
return -EINVAL;
return 0;
}
static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority)
{
struct sk_security_struct *sksec;
sksec = kzalloc(sizeof(*sksec), priority);
if (!sksec)
return -ENOMEM;
sksec->peer_sid = SECINITSID_UNLABELED;
sksec->sid = SECINITSID_UNLABELED;
selinux_netlbl_sk_security_reset(sksec);
sk->sk_security = sksec;
return 0;
}
static void selinux_sk_free_security(struct sock *sk)
{
struct sk_security_struct *sksec = sk->sk_security;
sk->sk_security = NULL;
selinux_netlbl_sk_security_free(sksec);
kfree(sksec);
}
static void selinux_sk_clone_security(const struct sock *sk, struct sock *newsk)
{
struct sk_security_struct *sksec = sk->sk_security;
struct sk_security_struct *newsksec = newsk->sk_security;
newsksec->sid = sksec->sid;
newsksec->peer_sid = sksec->peer_sid;
newsksec->sclass = sksec->sclass;
selinux_netlbl_sk_security_reset(newsksec);
}
static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
{
if (!sk)
*secid = SECINITSID_ANY_SOCKET;
else {
struct sk_security_struct *sksec = sk->sk_security;
*secid = sksec->sid;
}
}
static void selinux_sock_graft(struct sock *sk, struct socket *parent)
{
struct inode_security_struct *isec = SOCK_INODE(parent)->i_security;
struct sk_security_struct *sksec = sk->sk_security;
if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6 ||
sk->sk_family == PF_UNIX)
isec->sid = sksec->sid;
sksec->sclass = isec->sclass;
}
static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
struct sk_security_struct *sksec = sk->sk_security;
int err;
u16 family = sk->sk_family;
u32 newsid;
u32 peersid;
/* handle mapped IPv4 packets arriving via IPv6 sockets */
if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
family = PF_INET;
err = selinux_skb_peerlbl_sid(skb, family, &peersid);
if (err)
return err;
if (peersid == SECSID_NULL) {
req->secid = sksec->sid;
req->peer_secid = SECSID_NULL;
} else {
err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
if (err)
return err;
req->secid = newsid;
req->peer_secid = peersid;
}
return selinux_netlbl_inet_conn_request(req, family);
}
static void selinux_inet_csk_clone(struct sock *newsk,
const struct request_sock *req)
{
struct sk_security_struct *newsksec = newsk->sk_security;
newsksec->sid = req->secid;
newsksec->peer_sid = req->peer_secid;
/* NOTE: Ideally, we should also get the isec->sid for the
new socket in sync, but we don't have the isec available yet.
So we will wait until sock_graft to do it, by which
time it will have been created and available. */
/* We don't need to take any sort of lock here as we are the only
* thread with access to newsksec */
selinux_netlbl_inet_csk_clone(newsk, req->rsk_ops->family);
}
static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
{
u16 family = sk->sk_family;
struct sk_security_struct *sksec = sk->sk_security;
/* handle mapped IPv4 packets arriving via IPv6 sockets */
if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
family = PF_INET;
selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
}
static int selinux_secmark_relabel_packet(u32 sid)
{
const struct task_security_struct *__tsec;
u32 tsid;
__tsec = current_security();
tsid = __tsec->sid;
return avc_has_perm(tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL);
}
static void selinux_secmark_refcount_inc(void)
{
atomic_inc(&selinux_secmark_refcount);
}
static void selinux_secmark_refcount_dec(void)
{
atomic_dec(&selinux_secmark_refcount);
}
static void selinux_req_classify_flow(const struct request_sock *req,
struct flowi *fl)
{
fl->flowi_secid = req->secid;
}
static int selinux_tun_dev_create(void)
{
u32 sid = current_sid();
/* we aren't taking into account the "sockcreate" SID since the socket
* that is being created here is not a socket in the traditional sense,
* instead it is a private sock, accessible only to the kernel, and
* representing a wide range of network traffic spanning multiple
* connections unlike traditional sockets - check the TUN driver to
* get a better understanding of why this socket is special */
return avc_has_perm(sid, sid, SECCLASS_TUN_SOCKET, TUN_SOCKET__CREATE,
NULL);
}
static void selinux_tun_dev_post_create(struct sock *sk)
{
struct sk_security_struct *sksec = sk->sk_security;
/* we don't currently perform any NetLabel based labeling here and it
* isn't clear that we would want to do so anyway; while we could apply
* labeling without the support of the TUN user the resulting labeled
* traffic from the other end of the connection would almost certainly
* cause confusion to the TUN user that had no idea network labeling
* protocols were being used */
/* see the comments in selinux_tun_dev_create() about why we don't use
* the sockcreate SID here */
sksec->sid = current_sid();
sksec->sclass = SECCLASS_TUN_SOCKET;
}
static int selinux_tun_dev_attach(struct sock *sk)
{
struct sk_security_struct *sksec = sk->sk_security;
u32 sid = current_sid();
int err;
err = avc_has_perm(sid, sksec->sid, SECCLASS_TUN_SOCKET,
TUN_SOCKET__RELABELFROM, NULL);
if (err)
return err;
err = avc_has_perm(sid, sid, SECCLASS_TUN_SOCKET,
TUN_SOCKET__RELABELTO, NULL);
if (err)
return err;
sksec->sid = sid;
return 0;
}
static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
{
int err = 0;
u32 perm;
struct nlmsghdr *nlh;
struct sk_security_struct *sksec = sk->sk_security;
if (skb->len < NLMSG_SPACE(0)) {
err = -EINVAL;
goto out;
}
nlh = nlmsg_hdr(skb);
err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
if (err) {
if (err == -EINVAL) {
audit_log(current->audit_context, GFP_KERNEL, AUDIT_SELINUX_ERR,
"SELinux: unrecognized netlink message"
" type=%hu for sclass=%hu\n",
nlh->nlmsg_type, sksec->sclass);
if (!selinux_enforcing || security_get_allow_unknown())
err = 0;
}
/* Ignore */
if (err == -ENOENT)
err = 0;
goto out;
}
err = sock_has_perm(current, sk, perm);
out:
return err;
}
#ifdef CONFIG_NETFILTER
static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
u16 family)
{
int err;
char *addrp;
u32 peer_sid;
struct common_audit_data ad;
u8 secmark_active;
u8 netlbl_active;
u8 peerlbl_active;
if (!selinux_policycap_netpeer)
return NF_ACCEPT;
secmark_active = selinux_secmark_enabled();
netlbl_active = netlbl_enabled();
peerlbl_active = netlbl_active || selinux_xfrm_enabled();
if (!secmark_active && !peerlbl_active)
return NF_ACCEPT;
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0)
return NF_DROP;
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.netif = ifindex;
ad.u.net.family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
return NF_DROP;
if (peerlbl_active) {
err = selinux_inet_sys_rcv_skb(ifindex, addrp, family,
peer_sid, &ad);
if (err) {
selinux_netlbl_err(skb, err, 1);
return NF_DROP;
}
}
if (secmark_active)
if (avc_has_perm(peer_sid, skb->secmark,
SECCLASS_PACKET, PACKET__FORWARD_IN, &ad))
return NF_DROP;
if (netlbl_active)
/* we do this in the FORWARD path and not the POST_ROUTING
* path because we want to make sure we apply the necessary
* labeling before IPsec is applied so we can leverage AH
* protection */
if (selinux_netlbl_skbuff_setsid(skb, family, peer_sid) != 0)
return NF_DROP;
return NF_ACCEPT;
}
static unsigned int selinux_ipv4_forward(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return selinux_ip_forward(skb, in->ifindex, PF_INET);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
static unsigned int selinux_ipv6_forward(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return selinux_ip_forward(skb, in->ifindex, PF_INET6);
}
#endif /* IPV6 */
static unsigned int selinux_ip_output(struct sk_buff *skb,
u16 family)
{
u32 sid;
if (!netlbl_enabled())
return NF_ACCEPT;
/* we do this in the LOCAL_OUT path and not the POST_ROUTING path
* because we want to make sure we apply the necessary labeling
* before IPsec is applied so we can leverage AH protection */
if (skb->sk) {
struct sk_security_struct *sksec = skb->sk->sk_security;
sid = sksec->sid;
} else
sid = SECINITSID_KERNEL;
if (selinux_netlbl_skbuff_setsid(skb, family, sid) != 0)
return NF_DROP;
return NF_ACCEPT;
}
static unsigned int selinux_ipv4_output(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return selinux_ip_output(skb, PF_INET);
}
static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
int ifindex,
u16 family)
{
struct sock *sk = skb->sk;
struct sk_security_struct *sksec;
struct common_audit_data ad;
char *addrp;
u8 proto;
if (sk == NULL)
return NF_ACCEPT;
sksec = sk->sk_security;
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.netif = ifindex;
ad.u.net.family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto))
return NF_DROP;
if (selinux_secmark_enabled())
if (avc_has_perm(sksec->sid, skb->secmark,
SECCLASS_PACKET, PACKET__SEND, &ad))
return NF_DROP_ERR(-ECONNREFUSED);
if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
return NF_DROP_ERR(-ECONNREFUSED);
return NF_ACCEPT;
}
static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
u16 family)
{
u32 secmark_perm;
u32 peer_sid;
struct sock *sk;
struct common_audit_data ad;
char *addrp;
u8 secmark_active;
u8 peerlbl_active;
/* If any sort of compatibility mode is enabled then handoff processing
* to the selinux_ip_postroute_compat() function to deal with the
* special handling. We do this in an attempt to keep this function
* as fast and as clean as possible. */
if (!selinux_policycap_netpeer)
return selinux_ip_postroute_compat(skb, ifindex, family);
#ifdef CONFIG_XFRM
/* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
* packet transformation so allow the packet to pass without any checks
* since we'll have another chance to perform access control checks
* when the packet is on it's final way out.
* NOTE: there appear to be some IPv6 multicast cases where skb->dst
* is NULL, in this case go ahead and apply access control. */
if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL)
return NF_ACCEPT;
#endif
secmark_active = selinux_secmark_enabled();
peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
if (!secmark_active && !peerlbl_active)
return NF_ACCEPT;
/* if the packet is being forwarded then get the peer label from the
* packet itself; otherwise check to see if it is from a local
* application or the kernel, if from an application get the peer label
* from the sending socket, otherwise use the kernel's sid */
sk = skb->sk;
if (sk == NULL) {
if (skb->skb_iif) {
secmark_perm = PACKET__FORWARD_OUT;
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
return NF_DROP;
} else {
secmark_perm = PACKET__SEND;
peer_sid = SECINITSID_KERNEL;
}
} else {
struct sk_security_struct *sksec = sk->sk_security;
peer_sid = sksec->sid;
secmark_perm = PACKET__SEND;
}
COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.netif = ifindex;
ad.u.net.family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
return NF_DROP;
if (secmark_active)
if (avc_has_perm(peer_sid, skb->secmark,
SECCLASS_PACKET, secmark_perm, &ad))
return NF_DROP_ERR(-ECONNREFUSED);
if (peerlbl_active) {
u32 if_sid;
u32 node_sid;
if (sel_netif_sid(ifindex, &if_sid))
return NF_DROP;
if (avc_has_perm(peer_sid, if_sid,
SECCLASS_NETIF, NETIF__EGRESS, &ad))
return NF_DROP_ERR(-ECONNREFUSED);
if (sel_netnode_sid(addrp, family, &node_sid))
return NF_DROP;
if (avc_has_perm(peer_sid, node_sid,
SECCLASS_NODE, NODE__SENDTO, &ad))
return NF_DROP_ERR(-ECONNREFUSED);
}
return NF_ACCEPT;
}
static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return selinux_ip_postroute(skb, out->ifindex, PF_INET);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
static unsigned int selinux_ipv6_postroute(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return selinux_ip_postroute(skb, out->ifindex, PF_INET6);
}
#endif /* IPV6 */
#endif /* CONFIG_NETFILTER */
static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
{
int err;
err = cap_netlink_send(sk, skb);
if (err)
return err;
return selinux_nlmsg_perm(sk, skb);
}
static int selinux_netlink_recv(struct sk_buff *skb, int capability)
{
int err;
struct common_audit_data ad;
u32 sid;
err = cap_netlink_recv(skb, capability);
if (err)
return err;
COMMON_AUDIT_DATA_INIT(&ad, CAP);
ad.u.cap = capability;
security_task_getsecid(current, &sid);
return avc_has_perm(sid, sid, SECCLASS_CAPABILITY,
CAP_TO_MASK(capability), &ad);
}
static int ipc_alloc_security(struct task_struct *task,
struct kern_ipc_perm *perm,
u16 sclass)
{
struct ipc_security_struct *isec;
u32 sid;
isec = kzalloc(sizeof(struct ipc_security_struct), GFP_KERNEL);
if (!isec)
return -ENOMEM;
sid = task_sid(task);
isec->sclass = sclass;
isec->sid = sid;
perm->security = isec;
return 0;
}
static void ipc_free_security(struct kern_ipc_perm *perm)
{
struct ipc_security_struct *isec = perm->security;
perm->security = NULL;
kfree(isec);
}
static int msg_msg_alloc_security(struct msg_msg *msg)
{
struct msg_security_struct *msec;
msec = kzalloc(sizeof(struct msg_security_struct), GFP_KERNEL);
if (!msec)
return -ENOMEM;
msec->sid = SECINITSID_UNLABELED;
msg->security = msec;
return 0;
}
static void msg_msg_free_security(struct msg_msg *msg)
{
struct msg_security_struct *msec = msg->security;
msg->security = NULL;
kfree(msec);
}
static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
u32 perms)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
u32 sid = current_sid();
isec = ipc_perms->security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = ipc_perms->key;
return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
}
static int selinux_msg_msg_alloc_security(struct msg_msg *msg)
{
return msg_msg_alloc_security(msg);
}
static void selinux_msg_msg_free_security(struct msg_msg *msg)
{
msg_msg_free_security(msg);
}
/* message queue security operations */
static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
u32 sid = current_sid();
int rc;
rc = ipc_alloc_security(current, &msq->q_perm, SECCLASS_MSGQ);
if (rc)
return rc;
isec = msq->q_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = msq->q_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
MSGQ__CREATE, &ad);
if (rc) {
ipc_free_security(&msq->q_perm);
return rc;
}
return 0;
}
static void selinux_msg_queue_free_security(struct msg_queue *msq)
{
ipc_free_security(&msq->q_perm);
}
static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
u32 sid = current_sid();
isec = msq->q_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = msq->q_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
MSGQ__ASSOCIATE, &ad);
}
static int selinux_msg_queue_msgctl(struct msg_queue *msq, int cmd)
{
int err;
int perms;
switch (cmd) {
case IPC_INFO:
case MSG_INFO:
/* No specific object, just general system-wide information. */
return task_has_system(current, SYSTEM__IPC_INFO);
case IPC_STAT:
case MSG_STAT:
perms = MSGQ__GETATTR | MSGQ__ASSOCIATE;
break;
case IPC_SET:
perms = MSGQ__SETATTR;
break;
case IPC_RMID:
perms = MSGQ__DESTROY;
break;
default:
return 0;
}
err = ipc_has_perm(&msq->q_perm, perms);
return err;
}
static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg, int msqflg)
{
struct ipc_security_struct *isec;
struct msg_security_struct *msec;
struct common_audit_data ad;
u32 sid = current_sid();
int rc;
isec = msq->q_perm.security;
msec = msg->security;
/*
* First time through, need to assign label to the message
*/
if (msec->sid == SECINITSID_UNLABELED) {
/*
* Compute new sid based on current process and
* message queue this message will be stored in
*/
rc = security_transition_sid(sid, isec->sid, SECCLASS_MSG,
NULL, &msec->sid);
if (rc)
return rc;
}
COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = msq->q_perm.key;
/* Can this process write to the queue? */
rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
MSGQ__WRITE, &ad);
if (!rc)
/* Can this process send the message */
rc = avc_has_perm(sid, msec->sid, SECCLASS_MSG,
MSG__SEND, &ad);
if (!rc)
/* Can the message be put in the queue? */
rc = avc_has_perm(msec->sid, isec->sid, SECCLASS_MSGQ,
MSGQ__ENQUEUE, &ad);
return rc;
}
static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
struct task_struct *target,
long type, int mode)
{
struct ipc_security_struct *isec;
struct msg_security_struct *msec;
struct common_audit_data ad;
u32 sid = task_sid(target);
int rc;
isec = msq->q_perm.security;
msec = msg->security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = msq->q_perm.key;
rc = avc_has_perm(sid, isec->sid,
SECCLASS_MSGQ, MSGQ__READ, &ad);
if (!rc)
rc = avc_has_perm(sid, msec->sid,
SECCLASS_MSG, MSG__RECEIVE, &ad);
return rc;
}
/* Shared Memory security operations */
static int selinux_shm_alloc_security(struct shmid_kernel *shp)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
u32 sid = current_sid();
int rc;
rc = ipc_alloc_security(current, &shp->shm_perm, SECCLASS_SHM);
if (rc)
return rc;
isec = shp->shm_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = shp->shm_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM,
SHM__CREATE, &ad);
if (rc) {
ipc_free_security(&shp->shm_perm);
return rc;
}
return 0;
}
static void selinux_shm_free_security(struct shmid_kernel *shp)
{
ipc_free_security(&shp->shm_perm);
}
static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
u32 sid = current_sid();
isec = shp->shm_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = shp->shm_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_SHM,
SHM__ASSOCIATE, &ad);
}
/* Note, at this point, shp is locked down */
static int selinux_shm_shmctl(struct shmid_kernel *shp, int cmd)
{
int perms;
int err;
switch (cmd) {
case IPC_INFO:
case SHM_INFO:
/* No specific object, just general system-wide information. */
return task_has_system(current, SYSTEM__IPC_INFO);
case IPC_STAT:
case SHM_STAT:
perms = SHM__GETATTR | SHM__ASSOCIATE;
break;
case IPC_SET:
perms = SHM__SETATTR;
break;
case SHM_LOCK:
case SHM_UNLOCK:
perms = SHM__LOCK;
break;
case IPC_RMID:
perms = SHM__DESTROY;
break;
default:
return 0;
}
err = ipc_has_perm(&shp->shm_perm, perms);
return err;
}
static int selinux_shm_shmat(struct shmid_kernel *shp,
char __user *shmaddr, int shmflg)
{
u32 perms;
if (shmflg & SHM_RDONLY)
perms = SHM__READ;
else
perms = SHM__READ | SHM__WRITE;
return ipc_has_perm(&shp->shm_perm, perms);
}
/* Semaphore security operations */
static int selinux_sem_alloc_security(struct sem_array *sma)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
u32 sid = current_sid();
int rc;
rc = ipc_alloc_security(current, &sma->sem_perm, SECCLASS_SEM);
if (rc)
return rc;
isec = sma->sem_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = sma->sem_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM,
SEM__CREATE, &ad);
if (rc) {
ipc_free_security(&sma->sem_perm);
return rc;
}
return 0;
}
static void selinux_sem_free_security(struct sem_array *sma)
{
ipc_free_security(&sma->sem_perm);
}
static int selinux_sem_associate(struct sem_array *sma, int semflg)
{
struct ipc_security_struct *isec;
struct common_audit_data ad;
u32 sid = current_sid();
isec = sma->sem_perm.security;
COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = sma->sem_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_SEM,
SEM__ASSOCIATE, &ad);
}
/* Note, at this point, sma is locked down */
static int selinux_sem_semctl(struct sem_array *sma, int cmd)
{
int err;
u32 perms;
switch (cmd) {
case IPC_INFO:
case SEM_INFO:
/* No specific object, just general system-wide information. */
return task_has_system(current, SYSTEM__IPC_INFO);
case GETPID:
case GETNCNT:
case GETZCNT:
perms = SEM__GETATTR;
break;
case GETVAL:
case GETALL:
perms = SEM__READ;
break;
case SETVAL:
case SETALL:
perms = SEM__WRITE;
break;
case IPC_RMID:
perms = SEM__DESTROY;
break;
case IPC_SET:
perms = SEM__SETATTR;
break;
case IPC_STAT:
case SEM_STAT:
perms = SEM__GETATTR | SEM__ASSOCIATE;
break;
default:
return 0;
}
err = ipc_has_perm(&sma->sem_perm, perms);
return err;
}
static int selinux_sem_semop(struct sem_array *sma,
struct sembuf *sops, unsigned nsops, int alter)
{
u32 perms;
if (alter)
perms = SEM__READ | SEM__WRITE;
else
perms = SEM__READ;
return ipc_has_perm(&sma->sem_perm, perms);
}
static int selinux_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
{
u32 av = 0;
av = 0;
if (flag & S_IRUGO)
av |= IPC__UNIX_READ;
if (flag & S_IWUGO)
av |= IPC__UNIX_WRITE;
if (av == 0)
return 0;
return ipc_has_perm(ipcp, av);
}
static void selinux_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
{
struct ipc_security_struct *isec = ipcp->security;
*secid = isec->sid;
}
static void selinux_d_instantiate(struct dentry *dentry, struct inode *inode)
{
if (inode)
inode_doinit_with_dentry(inode, dentry);
}
static int selinux_getprocattr(struct task_struct *p,
char *name, char **value)
{
const struct task_security_struct *__tsec;
u32 sid;
int error;
unsigned len;
if (current != p) {
error = current_has_perm(p, PROCESS__GETATTR);
if (error)
return error;
}
rcu_read_lock();
__tsec = __task_cred(p)->security;
if (!strcmp(name, "current"))
sid = __tsec->sid;
else if (!strcmp(name, "prev"))
sid = __tsec->osid;
else if (!strcmp(name, "exec"))
sid = __tsec->exec_sid;
else if (!strcmp(name, "fscreate"))
sid = __tsec->create_sid;
else if (!strcmp(name, "keycreate"))
sid = __tsec->keycreate_sid;
else if (!strcmp(name, "sockcreate"))
sid = __tsec->sockcreate_sid;
else
goto invalid;
rcu_read_unlock();
if (!sid)
return 0;
error = security_sid_to_context(sid, value, &len);
if (error)
return error;
return len;
invalid:
rcu_read_unlock();
return -EINVAL;
}
static int selinux_setprocattr(struct task_struct *p,
char *name, void *value, size_t size)
{
struct task_security_struct *tsec;
struct task_struct *tracer;
struct cred *new;
u32 sid = 0, ptsid;
int error;
char *str = value;
if (current != p) {
/* SELinux only allows a process to change its own
security attributes. */
return -EACCES;
}
/*
* Basic control over ability to set these attributes at all.
* current == p, but we'll pass them separately in case the
* above restriction is ever removed.
*/
if (!strcmp(name, "exec"))
error = current_has_perm(p, PROCESS__SETEXEC);
else if (!strcmp(name, "fscreate"))
error = current_has_perm(p, PROCESS__SETFSCREATE);
else if (!strcmp(name, "keycreate"))
error = current_has_perm(p, PROCESS__SETKEYCREATE);
else if (!strcmp(name, "sockcreate"))
error = current_has_perm(p, PROCESS__SETSOCKCREATE);
else if (!strcmp(name, "current"))
error = current_has_perm(p, PROCESS__SETCURRENT);
else
error = -EINVAL;
if (error)
return error;
/* Obtain a SID for the context, if one was specified. */
if (size && str[1] && str[1] != '\n') {
if (str[size-1] == '\n') {
str[size-1] = 0;
size--;
}
error = security_context_to_sid(value, size, &sid);
if (error == -EINVAL && !strcmp(name, "fscreate")) {
if (!capable(CAP_MAC_ADMIN))
return error;
error = security_context_to_sid_force(value, size,
&sid);
}
if (error)
return error;
}
new = prepare_creds();
if (!new)
return -ENOMEM;
/* Permission checking based on the specified context is
performed during the actual operation (execve,
open/mkdir/...), when we know the full context of the
operation. See selinux_bprm_set_creds for the execve
checks and may_create for the file creation checks. The
operation will then fail if the context is not permitted. */
tsec = new->security;
if (!strcmp(name, "exec")) {
tsec->exec_sid = sid;
} else if (!strcmp(name, "fscreate")) {
tsec->create_sid = sid;
} else if (!strcmp(name, "keycreate")) {
error = may_create_key(sid, p);
if (error)
goto abort_change;
tsec->keycreate_sid = sid;
} else if (!strcmp(name, "sockcreate")) {
tsec->sockcreate_sid = sid;
} else if (!strcmp(name, "current")) {
error = -EINVAL;
if (sid == 0)
goto abort_change;
/* Only allow single threaded processes to change context */
error = -EPERM;
if (!current_is_single_threaded()) {
error = security_bounded_transition(tsec->sid, sid);
if (error)
goto abort_change;
}
/* Check permissions for the transition. */
error = avc_has_perm(tsec->sid, sid, SECCLASS_PROCESS,
PROCESS__DYNTRANSITION, NULL);
if (error)
goto abort_change;
/* Check for ptracing, and update the task SID if ok.
Otherwise, leave SID unchanged and fail. */
ptsid = 0;
task_lock(p);
tracer = tracehook_tracer_task(p);
if (tracer)
ptsid = task_sid(tracer);
task_unlock(p);
if (tracer) {
error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
PROCESS__PTRACE, NULL);
if (error)
goto abort_change;
}
tsec->sid = sid;
} else {
error = -EINVAL;
goto abort_change;
}
commit_creds(new);
return size;
abort_change:
abort_creds(new);
return error;
}
static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return security_sid_to_context(secid, secdata, seclen);
}
static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
return security_context_to_sid(secdata, seclen, secid);
}
static void selinux_release_secctx(char *secdata, u32 seclen)
{
kfree(secdata);
}
/*
* called with inode->i_mutex locked
*/
static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
return selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX, ctx, ctxlen, 0);
}
/*
* called with inode->i_mutex locked
*/
static int selinux_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
return __vfs_setxattr_noperm(dentry, XATTR_NAME_SELINUX, ctx, ctxlen, 0);
}
static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
int len = 0;
len = selinux_inode_getsecurity(inode, XATTR_SELINUX_SUFFIX,
ctx, true);
if (len < 0)
return len;
*ctxlen = len;
return 0;
}
#ifdef CONFIG_KEYS
static int selinux_key_alloc(struct key *k, const struct cred *cred,
unsigned long flags)
{
const struct task_security_struct *tsec;
struct key_security_struct *ksec;
ksec = kzalloc(sizeof(struct key_security_struct), GFP_KERNEL);
if (!ksec)
return -ENOMEM;
tsec = cred->security;
if (tsec->keycreate_sid)
ksec->sid = tsec->keycreate_sid;
else
ksec->sid = tsec->sid;
k->security = ksec;
return 0;
}
static void selinux_key_free(struct key *k)
{
struct key_security_struct *ksec = k->security;
k->security = NULL;
kfree(ksec);
}
static int selinux_key_permission(key_ref_t key_ref,
const struct cred *cred,
key_perm_t perm)
{
struct key *key;
struct key_security_struct *ksec;
u32 sid;
/* if no specific permissions are requested, we skip the
permission check. No serious, additional covert channels
appear to be created. */
if (perm == 0)
return 0;
sid = cred_sid(cred);
key = key_ref_to_ptr(key_ref);
ksec = key->security;
return avc_has_perm(sid, ksec->sid, SECCLASS_KEY, perm, NULL);
}
static int selinux_key_getsecurity(struct key *key, char **_buffer)
{
struct key_security_struct *ksec = key->security;
char *context = NULL;
unsigned len;
int rc;
rc = security_sid_to_context(ksec->sid, &context, &len);
if (!rc)
rc = len;
*_buffer = context;
return rc;
}
#endif
static struct security_operations selinux_ops = {
.name = "selinux",
.ptrace_access_check = selinux_ptrace_access_check,
.ptrace_traceme = selinux_ptrace_traceme,
.capget = selinux_capget,
.capset = selinux_capset,
.capable = selinux_capable,
.quotactl = selinux_quotactl,
.quota_on = selinux_quota_on,
.syslog = selinux_syslog,
.vm_enough_memory = selinux_vm_enough_memory,
.netlink_send = selinux_netlink_send,
.netlink_recv = selinux_netlink_recv,
.bprm_set_creds = selinux_bprm_set_creds,
.bprm_committing_creds = selinux_bprm_committing_creds,
.bprm_committed_creds = selinux_bprm_committed_creds,
.bprm_secureexec = selinux_bprm_secureexec,
.sb_alloc_security = selinux_sb_alloc_security,
.sb_free_security = selinux_sb_free_security,
.sb_copy_data = selinux_sb_copy_data,
.sb_remount = selinux_sb_remount,
.sb_kern_mount = selinux_sb_kern_mount,
.sb_show_options = selinux_sb_show_options,
.sb_statfs = selinux_sb_statfs,
.sb_mount = selinux_mount,
.sb_umount = selinux_umount,
.sb_set_mnt_opts = selinux_set_mnt_opts,
.sb_clone_mnt_opts = selinux_sb_clone_mnt_opts,
.sb_parse_opts_str = selinux_parse_opts_str,
.inode_alloc_security = selinux_inode_alloc_security,
.inode_free_security = selinux_inode_free_security,
.inode_init_security = selinux_inode_init_security,
.inode_create = selinux_inode_create,
.inode_link = selinux_inode_link,
.inode_unlink = selinux_inode_unlink,
.inode_symlink = selinux_inode_symlink,
.inode_mkdir = selinux_inode_mkdir,
.inode_rmdir = selinux_inode_rmdir,
.inode_mknod = selinux_inode_mknod,
.inode_rename = selinux_inode_rename,
.inode_readlink = selinux_inode_readlink,
.inode_follow_link = selinux_inode_follow_link,
.inode_permission = selinux_inode_permission,
.inode_setattr = selinux_inode_setattr,
.inode_getattr = selinux_inode_getattr,
.inode_setxattr = selinux_inode_setxattr,
.inode_post_setxattr = selinux_inode_post_setxattr,
.inode_getxattr = selinux_inode_getxattr,
.inode_listxattr = selinux_inode_listxattr,
.inode_removexattr = selinux_inode_removexattr,
.inode_getsecurity = selinux_inode_getsecurity,
.inode_setsecurity = selinux_inode_setsecurity,
.inode_listsecurity = selinux_inode_listsecurity,
.inode_getsecid = selinux_inode_getsecid,
.file_permission = selinux_file_permission,
.file_alloc_security = selinux_file_alloc_security,
.file_free_security = selinux_file_free_security,
.file_ioctl = selinux_file_ioctl,
.file_mmap = selinux_file_mmap,
.file_mprotect = selinux_file_mprotect,
.file_lock = selinux_file_lock,
.file_fcntl = selinux_file_fcntl,
.file_set_fowner = selinux_file_set_fowner,
.file_send_sigiotask = selinux_file_send_sigiotask,
.file_receive = selinux_file_receive,
.dentry_open = selinux_dentry_open,
.task_create = selinux_task_create,
.cred_alloc_blank = selinux_cred_alloc_blank,
.cred_free = selinux_cred_free,
.cred_prepare = selinux_cred_prepare,
.cred_transfer = selinux_cred_transfer,
.kernel_act_as = selinux_kernel_act_as,
.kernel_create_files_as = selinux_kernel_create_files_as,
.kernel_module_request = selinux_kernel_module_request,
.task_setpgid = selinux_task_setpgid,
.task_getpgid = selinux_task_getpgid,
.task_getsid = selinux_task_getsid,
.task_getsecid = selinux_task_getsecid,
.task_setnice = selinux_task_setnice,
.task_setioprio = selinux_task_setioprio,
.task_getioprio = selinux_task_getioprio,
.task_setrlimit = selinux_task_setrlimit,
.task_setscheduler = selinux_task_setscheduler,
.task_getscheduler = selinux_task_getscheduler,
.task_movememory = selinux_task_movememory,
.task_kill = selinux_task_kill,
.task_wait = selinux_task_wait,
.task_to_inode = selinux_task_to_inode,
.ipc_permission = selinux_ipc_permission,
.ipc_getsecid = selinux_ipc_getsecid,
.msg_msg_alloc_security = selinux_msg_msg_alloc_security,
.msg_msg_free_security = selinux_msg_msg_free_security,
.msg_queue_alloc_security = selinux_msg_queue_alloc_security,
.msg_queue_free_security = selinux_msg_queue_free_security,
.msg_queue_associate = selinux_msg_queue_associate,
.msg_queue_msgctl = selinux_msg_queue_msgctl,
.msg_queue_msgsnd = selinux_msg_queue_msgsnd,
.msg_queue_msgrcv = selinux_msg_queue_msgrcv,
.shm_alloc_security = selinux_shm_alloc_security,
.shm_free_security = selinux_shm_free_security,
.shm_associate = selinux_shm_associate,
.shm_shmctl = selinux_shm_shmctl,
.shm_shmat = selinux_shm_shmat,
.sem_alloc_security = selinux_sem_alloc_security,
.sem_free_security = selinux_sem_free_security,
.sem_associate = selinux_sem_associate,
.sem_semctl = selinux_sem_semctl,
.sem_semop = selinux_sem_semop,
.d_instantiate = selinux_d_instantiate,
.getprocattr = selinux_getprocattr,
.setprocattr = selinux_setprocattr,
.secid_to_secctx = selinux_secid_to_secctx,
.secctx_to_secid = selinux_secctx_to_secid,
.release_secctx = selinux_release_secctx,
.inode_notifysecctx = selinux_inode_notifysecctx,
.inode_setsecctx = selinux_inode_setsecctx,
.inode_getsecctx = selinux_inode_getsecctx,
.unix_stream_connect = selinux_socket_unix_stream_connect,
.unix_may_send = selinux_socket_unix_may_send,
.socket_create = selinux_socket_create,
.socket_post_create = selinux_socket_post_create,
.socket_bind = selinux_socket_bind,
.socket_connect = selinux_socket_connect,
.socket_listen = selinux_socket_listen,
.socket_accept = selinux_socket_accept,
.socket_sendmsg = selinux_socket_sendmsg,
.socket_recvmsg = selinux_socket_recvmsg,
.socket_getsockname = selinux_socket_getsockname,
.socket_getpeername = selinux_socket_getpeername,
.socket_getsockopt = selinux_socket_getsockopt,
.socket_setsockopt = selinux_socket_setsockopt,
.socket_shutdown = selinux_socket_shutdown,
.socket_sock_rcv_skb = selinux_socket_sock_rcv_skb,
.socket_getpeersec_stream = selinux_socket_getpeersec_stream,
.socket_getpeersec_dgram = selinux_socket_getpeersec_dgram,
.sk_alloc_security = selinux_sk_alloc_security,
.sk_free_security = selinux_sk_free_security,
.sk_clone_security = selinux_sk_clone_security,
.sk_getsecid = selinux_sk_getsecid,
.sock_graft = selinux_sock_graft,
.inet_conn_request = selinux_inet_conn_request,
.inet_csk_clone = selinux_inet_csk_clone,
.inet_conn_established = selinux_inet_conn_established,
.secmark_relabel_packet = selinux_secmark_relabel_packet,
.secmark_refcount_inc = selinux_secmark_refcount_inc,
.secmark_refcount_dec = selinux_secmark_refcount_dec,
.req_classify_flow = selinux_req_classify_flow,
.tun_dev_create = selinux_tun_dev_create,
.tun_dev_post_create = selinux_tun_dev_post_create,
.tun_dev_attach = selinux_tun_dev_attach,
#ifdef CONFIG_SECURITY_NETWORK_XFRM
.xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
.xfrm_policy_clone_security = selinux_xfrm_policy_clone,
.xfrm_policy_free_security = selinux_xfrm_policy_free,
.xfrm_policy_delete_security = selinux_xfrm_policy_delete,
.xfrm_state_alloc_security = selinux_xfrm_state_alloc,
.xfrm_state_free_security = selinux_xfrm_state_free,
.xfrm_state_delete_security = selinux_xfrm_state_delete,
.xfrm_policy_lookup = selinux_xfrm_policy_lookup,
.xfrm_state_pol_flow_match = selinux_xfrm_state_pol_flow_match,
.xfrm_decode_session = selinux_xfrm_decode_session,
#endif
#ifdef CONFIG_KEYS
.key_alloc = selinux_key_alloc,
.key_free = selinux_key_free,
.key_permission = selinux_key_permission,
.key_getsecurity = selinux_key_getsecurity,
#endif
#ifdef CONFIG_AUDIT
.audit_rule_init = selinux_audit_rule_init,
.audit_rule_known = selinux_audit_rule_known,
.audit_rule_match = selinux_audit_rule_match,
.audit_rule_free = selinux_audit_rule_free,
#endif
};
static __init int selinux_init(void)
{
if (!security_module_enable(&selinux_ops)) {
selinux_enabled = 0;
return 0;
}
if (!selinux_enabled) {
printk(KERN_INFO "SELinux: Disabled at boot.\n");
return 0;
}
printk(KERN_INFO "SELinux: Initializing.\n");
/* Set the security state for the initial task. */
cred_init_security();
default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC);
sel_inode_cache = kmem_cache_create("selinux_inode_security",
sizeof(struct inode_security_struct),
0, SLAB_PANIC, NULL);
avc_init();
if (register_security(&selinux_ops))
panic("SELinux: Unable to register with kernel.\n");
if (selinux_enforcing)
printk(KERN_DEBUG "SELinux: Starting in enforcing mode\n");
else
printk(KERN_DEBUG "SELinux: Starting in permissive mode\n");
return 0;
}
static void delayed_superblock_init(struct super_block *sb, void *unused)
{
superblock_doinit(sb, NULL);
}
void selinux_complete_init(void)
{
printk(KERN_DEBUG "SELinux: Completing initialization.\n");
/* Set up any superblocks initialized prior to the policy load. */
printk(KERN_DEBUG "SELinux: Setting up existing superblocks.\n");
iterate_supers(delayed_superblock_init, NULL);
}
/* SELinux requires early initialization in order to label
all processes and objects when they are created. */
security_initcall(selinux_init);
#if defined(CONFIG_NETFILTER)
static struct nf_hook_ops selinux_ipv4_ops[] = {
{
.hook = selinux_ipv4_postroute,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_SELINUX_LAST,
},
{
.hook = selinux_ipv4_forward,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP_PRI_SELINUX_FIRST,
},
{
.hook = selinux_ipv4_output,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_SELINUX_FIRST,
}
};
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
static struct nf_hook_ops selinux_ipv6_ops[] = {
{
.hook = selinux_ipv6_postroute,
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_SELINUX_LAST,
},
{
.hook = selinux_ipv6_forward,
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP6_PRI_SELINUX_FIRST,
}
};
#endif /* IPV6 */
static int __init selinux_nf_ip_init(void)
{
int err = 0;
if (!selinux_enabled)
goto out;
printk(KERN_DEBUG "SELinux: Registering netfilter hooks\n");
err = nf_register_hooks(selinux_ipv4_ops, ARRAY_SIZE(selinux_ipv4_ops));
if (err)
panic("SELinux: nf_register_hooks for IPv4: error %d\n", err);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
err = nf_register_hooks(selinux_ipv6_ops, ARRAY_SIZE(selinux_ipv6_ops));
if (err)
panic("SELinux: nf_register_hooks for IPv6: error %d\n", err);
#endif /* IPV6 */
out:
return err;
}
__initcall(selinux_nf_ip_init);
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
static void selinux_nf_ip_exit(void)
{
printk(KERN_DEBUG "SELinux: Unregistering netfilter hooks\n");
nf_unregister_hooks(selinux_ipv4_ops, ARRAY_SIZE(selinux_ipv4_ops));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
nf_unregister_hooks(selinux_ipv6_ops, ARRAY_SIZE(selinux_ipv6_ops));
#endif /* IPV6 */
}
#endif
#else /* CONFIG_NETFILTER */
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
#define selinux_nf_ip_exit()
#endif
#endif /* CONFIG_NETFILTER */
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
static int selinux_disabled;
int selinux_disable(void)
{
extern void exit_sel_fs(void);
if (ss_initialized) {
/* Not permitted after initial policy load. */
return -EINVAL;
}
if (selinux_disabled) {
/* Only do this once. */
return -EINVAL;
}
printk(KERN_INFO "SELinux: Disabled at runtime.\n");
selinux_disabled = 1;
selinux_enabled = 0;
reset_security_ops();
/* Try to destroy the avc node cache */
avc_disable();
/* Unregister netfilter hooks. */
selinux_nf_ip_exit();
/* Unregister selinuxfs. */
exit_sel_fs();
return 0;
}
#endif
| gpl-2.0 |
NooNameR/qsd8x50-bravo- | drivers/net/irda/sir_dev.c | 1837 | 24832 | /*********************************************************************
*
* sir_dev.c: irda sir network device
*
* Copyright (c) 2002 Martin Diehl
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
********************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>
#include "sir-dev.h"
static struct workqueue_struct *irda_sir_wq;
/* STATE MACHINE */
/* substate handler of the config-fsm to handle the cases where we want
* to wait for transmit completion before changing the port configuration
*/
static int sirdev_tx_complete_fsm(struct sir_dev *dev)
{
struct sir_fsm *fsm = &dev->fsm;
unsigned next_state, delay;
unsigned bytes_left;
do {
next_state = fsm->substate; /* default: stay in current substate */
delay = 0;
switch(fsm->substate) {
case SIRDEV_STATE_WAIT_XMIT:
if (dev->drv->chars_in_buffer)
bytes_left = dev->drv->chars_in_buffer(dev);
else
bytes_left = 0;
if (!bytes_left) {
next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
break;
}
if (dev->speed > 115200)
delay = (bytes_left*8*10000) / (dev->speed/100);
else if (dev->speed > 0)
delay = (bytes_left*10*10000) / (dev->speed/100);
else
delay = 0;
/* expected delay (usec) until remaining bytes are sent */
if (delay < 100) {
udelay(delay);
delay = 0;
break;
}
/* sleep some longer delay (msec) */
delay = (delay+999) / 1000;
break;
case SIRDEV_STATE_WAIT_UNTIL_SENT:
/* block until underlaying hardware buffer are empty */
if (dev->drv->wait_until_sent)
dev->drv->wait_until_sent(dev);
next_state = SIRDEV_STATE_TX_DONE;
break;
case SIRDEV_STATE_TX_DONE:
return 0;
default:
IRDA_ERROR("%s - undefined state\n", __func__);
return -EINVAL;
}
fsm->substate = next_state;
} while (delay == 0);
return delay;
}
/*
* Function sirdev_config_fsm
*
* State machine to handle the configuration of the device (and attached dongle, if any).
* This handler is scheduled for execution in kIrDAd context, so we can sleep.
* however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
* long. Instead, for longer delays we start a timer to reschedule us later.
* On entry, fsm->sem is always locked and the netdev xmit queue stopped.
* Both must be unlocked/restarted on completion - but only on final exit.
*/
static void sirdev_config_fsm(struct work_struct *work)
{
struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
struct sir_fsm *fsm = &dev->fsm;
int next_state;
int ret = -1;
unsigned delay;
IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
do {
IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
__func__, fsm->state, fsm->substate);
next_state = fsm->state;
delay = 0;
switch(fsm->state) {
case SIRDEV_STATE_DONGLE_OPEN:
if (dev->dongle_drv != NULL) {
ret = sirdev_put_dongle(dev);
if (ret) {
fsm->result = -EINVAL;
next_state = SIRDEV_STATE_ERROR;
break;
}
}
/* Initialize dongle */
ret = sirdev_get_dongle(dev, fsm->param);
if (ret) {
fsm->result = ret;
next_state = SIRDEV_STATE_ERROR;
break;
}
/* Dongles are powered through the modem control lines which
* were just set during open. Before resetting, let's wait for
* the power to stabilize. This is what some dongle drivers did
* in open before, while others didn't - should be safe anyway.
*/
delay = 50;
fsm->substate = SIRDEV_STATE_DONGLE_RESET;
next_state = SIRDEV_STATE_DONGLE_RESET;
fsm->param = 9600;
break;
case SIRDEV_STATE_DONGLE_CLOSE:
/* shouldn't we just treat this as success=? */
if (dev->dongle_drv == NULL) {
fsm->result = -EINVAL;
next_state = SIRDEV_STATE_ERROR;
break;
}
ret = sirdev_put_dongle(dev);
if (ret) {
fsm->result = ret;
next_state = SIRDEV_STATE_ERROR;
break;
}
next_state = SIRDEV_STATE_DONE;
break;
case SIRDEV_STATE_SET_DTR_RTS:
ret = sirdev_set_dtr_rts(dev,
(fsm->param&0x02) ? TRUE : FALSE,
(fsm->param&0x01) ? TRUE : FALSE);
next_state = SIRDEV_STATE_DONE;
break;
case SIRDEV_STATE_SET_SPEED:
fsm->substate = SIRDEV_STATE_WAIT_XMIT;
next_state = SIRDEV_STATE_DONGLE_CHECK;
break;
case SIRDEV_STATE_DONGLE_CHECK:
ret = sirdev_tx_complete_fsm(dev);
if (ret < 0) {
fsm->result = ret;
next_state = SIRDEV_STATE_ERROR;
break;
}
if ((delay=ret) != 0)
break;
if (dev->dongle_drv) {
fsm->substate = SIRDEV_STATE_DONGLE_RESET;
next_state = SIRDEV_STATE_DONGLE_RESET;
}
else {
dev->speed = fsm->param;
next_state = SIRDEV_STATE_PORT_SPEED;
}
break;
case SIRDEV_STATE_DONGLE_RESET:
if (dev->dongle_drv->reset) {
ret = dev->dongle_drv->reset(dev);
if (ret < 0) {
fsm->result = ret;
next_state = SIRDEV_STATE_ERROR;
break;
}
}
else
ret = 0;
if ((delay=ret) == 0) {
/* set serial port according to dongle default speed */
if (dev->drv->set_speed)
dev->drv->set_speed(dev, dev->speed);
fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
next_state = SIRDEV_STATE_DONGLE_SPEED;
}
break;
case SIRDEV_STATE_DONGLE_SPEED:
if (dev->dongle_drv->reset) {
ret = dev->dongle_drv->set_speed(dev, fsm->param);
if (ret < 0) {
fsm->result = ret;
next_state = SIRDEV_STATE_ERROR;
break;
}
}
else
ret = 0;
if ((delay=ret) == 0)
next_state = SIRDEV_STATE_PORT_SPEED;
break;
case SIRDEV_STATE_PORT_SPEED:
/* Finally we are ready to change the serial port speed */
if (dev->drv->set_speed)
dev->drv->set_speed(dev, dev->speed);
dev->new_speed = 0;
next_state = SIRDEV_STATE_DONE;
break;
case SIRDEV_STATE_DONE:
/* Signal network layer so it can send more frames */
netif_wake_queue(dev->netdev);
next_state = SIRDEV_STATE_COMPLETE;
break;
default:
IRDA_ERROR("%s - undefined state\n", __func__);
fsm->result = -EINVAL;
/* fall thru */
case SIRDEV_STATE_ERROR:
IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
netif_stop_queue(dev->netdev);
#else
netif_wake_queue(dev->netdev);
#endif
/* fall thru */
case SIRDEV_STATE_COMPLETE:
/* config change finished, so we are not busy any longer */
sirdev_enable_rx(dev);
up(&fsm->sem);
return;
}
fsm->state = next_state;
} while(!delay);
queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
}
/* schedule some device configuration task for execution by kIrDAd
* on behalf of the above state machine.
* can be called from process or interrupt/tasklet context.
*/
int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
{
struct sir_fsm *fsm = &dev->fsm;
IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
initial_state, param);
if (down_trylock(&fsm->sem)) {
if (in_interrupt() || in_atomic() || irqs_disabled()) {
IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
return -EWOULDBLOCK;
} else
down(&fsm->sem);
}
if (fsm->state == SIRDEV_STATE_DEAD) {
/* race with sirdev_close should never happen */
IRDA_ERROR("%s(), instance staled!\n", __func__);
up(&fsm->sem);
return -ESTALE; /* or better EPIPE? */
}
netif_stop_queue(dev->netdev);
atomic_set(&dev->enable_rx, 0);
fsm->state = initial_state;
fsm->param = param;
fsm->result = 0;
INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
queue_delayed_work(irda_sir_wq, &fsm->work, 0);
return 0;
}
/***************************************************************************/
void sirdev_enable_rx(struct sir_dev *dev)
{
if (unlikely(atomic_read(&dev->enable_rx)))
return;
/* flush rx-buffer - should also help in case of problems with echo cancelation */
dev->rx_buff.data = dev->rx_buff.head;
dev->rx_buff.len = 0;
dev->rx_buff.in_frame = FALSE;
dev->rx_buff.state = OUTSIDE_FRAME;
atomic_set(&dev->enable_rx, 1);
}
static int sirdev_is_receiving(struct sir_dev *dev)
{
if (!atomic_read(&dev->enable_rx))
return 0;
return dev->rx_buff.state != OUTSIDE_FRAME;
}
int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
{
int err;
IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
err = sirdev_schedule_dongle_open(dev, type);
if (unlikely(err))
return err;
down(&dev->fsm.sem); /* block until config change completed */
err = dev->fsm.result;
up(&dev->fsm.sem);
return err;
}
EXPORT_SYMBOL(sirdev_set_dongle);
/* used by dongle drivers for dongle programming */
int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
{
unsigned long flags;
int ret;
if (unlikely(len > dev->tx_buff.truesize))
return -ENOSPC;
spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */
while (dev->tx_buff.len > 0) { /* wait until tx idle */
spin_unlock_irqrestore(&dev->tx_lock, flags);
msleep(10);
spin_lock_irqsave(&dev->tx_lock, flags);
}
dev->tx_buff.data = dev->tx_buff.head;
memcpy(dev->tx_buff.data, buf, len);
dev->tx_buff.len = len;
ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
if (ret > 0) {
IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
dev->tx_buff.data += ret;
dev->tx_buff.len -= ret;
dev->raw_tx = 1;
ret = len; /* all data is going to be sent */
}
spin_unlock_irqrestore(&dev->tx_lock, flags);
return ret;
}
EXPORT_SYMBOL(sirdev_raw_write);
/* seems some dongle drivers may need this */
int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
{
int count;
if (atomic_read(&dev->enable_rx))
return -EIO; /* fail if we expect irda-frames */
count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;
if (count > 0) {
memcpy(buf, dev->rx_buff.data, count);
dev->rx_buff.data += count;
dev->rx_buff.len -= count;
}
/* remaining stuff gets flushed when re-enabling normal rx */
return count;
}
EXPORT_SYMBOL(sirdev_raw_read);
int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
{
int ret = -ENXIO;
if (dev->drv->set_dtr_rts)
ret = dev->drv->set_dtr_rts(dev, dtr, rts);
return ret;
}
EXPORT_SYMBOL(sirdev_set_dtr_rts);
/**********************************************************************/
/* called from client driver - likely with bh-context - to indicate
* it made some progress with transmission. Hence we send the next
* chunk, if any, or complete the skb otherwise
*/
void sirdev_write_complete(struct sir_dev *dev)
{
unsigned long flags;
struct sk_buff *skb;
int actual = 0;
int err;
spin_lock_irqsave(&dev->tx_lock, flags);
IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
__func__, dev->tx_buff.len);
if (likely(dev->tx_buff.len > 0)) {
/* Write data left in transmit buffer */
actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
if (likely(actual>0)) {
dev->tx_buff.data += actual;
dev->tx_buff.len -= actual;
}
else if (unlikely(actual<0)) {
/* could be dropped later when we have tx_timeout to recover */
IRDA_ERROR("%s: drv->do_write failed (%d)\n",
__func__, actual);
if ((skb=dev->tx_skb) != NULL) {
dev->tx_skb = NULL;
dev_kfree_skb_any(skb);
dev->netdev->stats.tx_errors++;
dev->netdev->stats.tx_dropped++;
}
dev->tx_buff.len = 0;
}
if (dev->tx_buff.len > 0)
goto done; /* more data to send later */
}
if (unlikely(dev->raw_tx != 0)) {
/* in raw mode we are just done now after the buffer was sent
* completely. Since this was requested by some dongle driver
* running under the control of the irda-thread we must take
* care here not to re-enable the queue. The queue will be
* restarted when the irda-thread has completed the request.
*/
IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
dev->raw_tx = 0;
goto done; /* no post-frame handling in raw mode */
}
/* we have finished now sending this skb.
* update statistics and free the skb.
* finally we check and trigger a pending speed change, if any.
* if not we switch to rx mode and wake the queue for further
* packets.
* note the scheduled speed request blocks until the lower
* client driver and the corresponding hardware has really
* finished sending all data (xmit fifo drained f.e.)
* before the speed change gets finally done and the queue
* re-activated.
*/
IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
if ((skb=dev->tx_skb) != NULL) {
dev->tx_skb = NULL;
dev->netdev->stats.tx_packets++;
dev->netdev->stats.tx_bytes += skb->len;
dev_kfree_skb_any(skb);
}
if (unlikely(dev->new_speed > 0)) {
IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
err = sirdev_schedule_speed(dev, dev->new_speed);
if (unlikely(err)) {
/* should never happen
* forget the speed change and hope the stack recovers
*/
IRDA_ERROR("%s - schedule speed change failed: %d\n",
__func__, err);
netif_wake_queue(dev->netdev);
}
/* else: success
* speed change in progress now
* on completion dev->new_speed gets cleared,
* rx-reenabled and the queue restarted
*/
}
else {
sirdev_enable_rx(dev);
netif_wake_queue(dev->netdev);
}
done:
spin_unlock_irqrestore(&dev->tx_lock, flags);
}
EXPORT_SYMBOL(sirdev_write_complete);
/* called from client driver - likely with bh-context - to give us
* some more received bytes. We put them into the rx-buffer,
* normally unwrapping and building LAP-skb's (unless rx disabled)
*/
int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
{
if (!dev || !dev->netdev) {
IRDA_WARNING("%s(), not ready yet!\n", __func__);
return -1;
}
if (!dev->irlap) {
IRDA_WARNING("%s - too early: %p / %zd!\n",
__func__, cp, count);
return -1;
}
if (cp==NULL) {
/* error already at lower level receive
* just update stats and set media busy
*/
irda_device_set_media_busy(dev->netdev, TRUE);
dev->netdev->stats.rx_dropped++;
IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
return 0;
}
/* Read the characters into the buffer */
if (likely(atomic_read(&dev->enable_rx))) {
while (count--)
/* Unwrap and destuff one byte */
async_unwrap_char(dev->netdev, &dev->netdev->stats,
&dev->rx_buff, *cp++);
} else {
while (count--) {
/* rx not enabled: save the raw bytes and never
* trigger any netif_rx. The received bytes are flushed
* later when we re-enable rx but might be read meanwhile
* by the dongle driver.
*/
dev->rx_buff.data[dev->rx_buff.len++] = *cp++;
/* What should we do when the buffer is full? */
if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
dev->rx_buff.len = 0;
}
}
return 0;
}
EXPORT_SYMBOL(sirdev_receive);
/**********************************************************************/
/* callbacks from network layer */
static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct sir_dev *dev = netdev_priv(ndev);
unsigned long flags;
int actual = 0;
int err;
s32 speed;
IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
netif_stop_queue(ndev);
IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
speed = irda_get_next_speed(skb);
if ((speed != dev->speed) && (speed != -1)) {
if (!skb->len) {
err = sirdev_schedule_speed(dev, speed);
if (unlikely(err == -EWOULDBLOCK)) {
/* Failed to initiate the speed change, likely the fsm
* is still busy (pretty unlikely, but...)
* We refuse to accept the skb and return with the queue
* stopped so the network layer will retry after the
* fsm completes and wakes the queue.
*/
return NETDEV_TX_BUSY;
}
else if (unlikely(err)) {
/* other fatal error - forget the speed change and
* hope the stack will recover somehow
*/
netif_start_queue(ndev);
}
/* else: success
* speed change in progress now
* on completion the queue gets restarted
*/
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
} else
dev->new_speed = speed;
}
/* Init tx buffer*/
dev->tx_buff.data = dev->tx_buff.head;
/* Check problems */
if(spin_is_locked(&dev->tx_lock)) {
IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
}
/* serialize with write completion */
spin_lock_irqsave(&dev->tx_lock, flags);
/* Copy skb to tx_buff while wrapping, stuffing and making CRC */
dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize);
/* transmission will start now - disable receive.
* if we are just in the middle of an incoming frame,
* treat it as collision. probably it's a good idea to
* reset the rx_buf OUTSIDE_FRAME in this case too?
*/
atomic_set(&dev->enable_rx, 0);
if (unlikely(sirdev_is_receiving(dev)))
dev->netdev->stats.collisions++;
actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
if (likely(actual > 0)) {
dev->tx_skb = skb;
dev->tx_buff.data += actual;
dev->tx_buff.len -= actual;
}
else if (unlikely(actual < 0)) {
/* could be dropped later when we have tx_timeout to recover */
IRDA_ERROR("%s: drv->do_write failed (%d)\n",
__func__, actual);
dev_kfree_skb_any(skb);
dev->netdev->stats.tx_errors++;
dev->netdev->stats.tx_dropped++;
netif_wake_queue(ndev);
}
spin_unlock_irqrestore(&dev->tx_lock, flags);
return NETDEV_TX_OK;
}
/* called from network layer with rtnl hold */
static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct if_irda_req *irq = (struct if_irda_req *) rq;
struct sir_dev *dev = netdev_priv(ndev);
int ret = 0;
IRDA_ASSERT(dev != NULL, return -1;);
IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else
ret = sirdev_schedule_speed(dev, irq->ifr_baudrate);
/* cannot sleep here for completion
* we are called from network layer with rtnl hold
*/
break;
case SIOCSDONGLE: /* Set dongle */
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else
ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle);
/* cannot sleep here for completion
* we are called from network layer with rtnl hold
*/
break;
case SIOCSMEDIABUSY: /* Set media busy */
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else
irda_device_set_media_busy(dev->netdev, TRUE);
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
irq->ifr_receiving = sirdev_is_receiving(dev);
break;
case SIOCSDTRRTS:
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else
ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
/* cannot sleep here for completion
* we are called from network layer with rtnl hold
*/
break;
case SIOCSMODE:
#if 0
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else
ret = sirdev_schedule_mode(dev, irq->ifr_mode);
/* cannot sleep here for completion
* we are called from network layer with rtnl hold
*/
break;
#endif
default:
ret = -EOPNOTSUPP;
}
return ret;
}
/* ----------------------------------------------------------------------------- */
#define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */
static int sirdev_alloc_buffers(struct sir_dev *dev)
{
dev->tx_buff.truesize = SIRBUF_ALLOCSIZE;
dev->rx_buff.truesize = IRDA_SKB_MAX_MTU;
/* Bootstrap ZeroCopy Rx */
dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize,
GFP_KERNEL);
if (dev->rx_buff.skb == NULL)
return -ENOMEM;
skb_reserve(dev->rx_buff.skb, 1);
dev->rx_buff.head = dev->rx_buff.skb->data;
dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL);
if (dev->tx_buff.head == NULL) {
kfree_skb(dev->rx_buff.skb);
dev->rx_buff.skb = NULL;
dev->rx_buff.head = NULL;
return -ENOMEM;
}
dev->tx_buff.data = dev->tx_buff.head;
dev->rx_buff.data = dev->rx_buff.head;
dev->tx_buff.len = 0;
dev->rx_buff.len = 0;
dev->rx_buff.in_frame = FALSE;
dev->rx_buff.state = OUTSIDE_FRAME;
return 0;
};
static void sirdev_free_buffers(struct sir_dev *dev)
{
kfree_skb(dev->rx_buff.skb);
kfree(dev->tx_buff.head);
dev->rx_buff.head = dev->tx_buff.head = NULL;
dev->rx_buff.skb = NULL;
}
static int sirdev_open(struct net_device *ndev)
{
struct sir_dev *dev = netdev_priv(ndev);
const struct sir_driver *drv = dev->drv;
if (!drv)
return -ENODEV;
/* increase the reference count of the driver module before doing serious stuff */
if (!try_module_get(drv->owner))
return -ESTALE;
IRDA_DEBUG(2, "%s()\n", __func__);
if (sirdev_alloc_buffers(dev))
goto errout_dec;
if (!dev->drv->start_dev || dev->drv->start_dev(dev))
goto errout_free;
sirdev_enable_rx(dev);
dev->raw_tx = 0;
netif_start_queue(ndev);
dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname);
if (!dev->irlap)
goto errout_stop;
netif_wake_queue(ndev);
IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
return 0;
errout_stop:
atomic_set(&dev->enable_rx, 0);
if (dev->drv->stop_dev)
dev->drv->stop_dev(dev);
errout_free:
sirdev_free_buffers(dev);
errout_dec:
module_put(drv->owner);
return -EAGAIN;
}
static int sirdev_close(struct net_device *ndev)
{
struct sir_dev *dev = netdev_priv(ndev);
const struct sir_driver *drv;
// IRDA_DEBUG(0, "%s\n", __func__);
netif_stop_queue(ndev);
down(&dev->fsm.sem); /* block on pending config completion */
atomic_set(&dev->enable_rx, 0);
if (unlikely(!dev->irlap))
goto out;
irlap_close(dev->irlap);
dev->irlap = NULL;
drv = dev->drv;
if (unlikely(!drv || !dev->priv))
goto out;
if (drv->stop_dev)
drv->stop_dev(dev);
sirdev_free_buffers(dev);
module_put(drv->owner);
out:
dev->speed = 0;
up(&dev->fsm.sem);
return 0;
}
static const struct net_device_ops sirdev_ops = {
.ndo_start_xmit = sirdev_hard_xmit,
.ndo_open = sirdev_open,
.ndo_stop = sirdev_close,
.ndo_do_ioctl = sirdev_ioctl,
};
/* ----------------------------------------------------------------------------- */
struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
{
struct net_device *ndev;
struct sir_dev *dev;
IRDA_DEBUG(0, "%s - %s\n", __func__, name);
/* instead of adding tests to protect against drv->do_write==NULL
* at several places we refuse to create a sir_dev instance for
* drivers which don't implement do_write.
*/
if (!drv || !drv->do_write)
return NULL;
/*
* Allocate new instance of the device
*/
ndev = alloc_irdadev(sizeof(*dev));
if (ndev == NULL) {
IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
goto out;
}
dev = netdev_priv(ndev);
irda_init_max_qos_capabilies(&dev->qos);
dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
dev->qos.min_turn_time.bits = drv->qos_mtt_bits;
irda_qos_bits_to_value(&dev->qos);
strncpy(dev->hwname, name, sizeof(dev->hwname)-1);
atomic_set(&dev->enable_rx, 0);
dev->tx_skb = NULL;
spin_lock_init(&dev->tx_lock);
sema_init(&dev->fsm.sem, 1);
dev->drv = drv;
dev->netdev = ndev;
/* Override the network functions we need to use */
ndev->netdev_ops = &sirdev_ops;
if (register_netdev(ndev)) {
IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
goto out_freenetdev;
}
return dev;
out_freenetdev:
free_netdev(ndev);
out:
return NULL;
}
EXPORT_SYMBOL(sirdev_get_instance);
int sirdev_put_instance(struct sir_dev *dev)
{
int err = 0;
IRDA_DEBUG(0, "%s\n", __func__);
atomic_set(&dev->enable_rx, 0);
netif_carrier_off(dev->netdev);
netif_device_detach(dev->netdev);
if (dev->dongle_drv)
err = sirdev_schedule_dongle_close(dev);
if (err)
IRDA_ERROR("%s - error %d\n", __func__, err);
sirdev_close(dev->netdev);
down(&dev->fsm.sem);
dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */
dev->dongle_drv = NULL;
dev->priv = NULL;
up(&dev->fsm.sem);
/* Remove netdevice */
unregister_netdev(dev->netdev);
free_netdev(dev->netdev);
return 0;
}
EXPORT_SYMBOL(sirdev_put_instance);
static int __init sir_wq_init(void)
{
irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
if (!irda_sir_wq)
return -ENOMEM;
return 0;
}
static void __exit sir_wq_exit(void)
{
destroy_workqueue(irda_sir_wq);
}
module_init(sir_wq_init);
module_exit(sir_wq_exit);
MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
MODULE_DESCRIPTION("IrDA SIR core");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mythos234/SimplKernel-LL-BOFJ | drivers/misc/lattice-ecp3-config.c | 2093 | 5888 | /*
* Copyright (C) 2012 Stefan Roese <sr@denx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#define FIRMWARE_NAME "lattice-ecp3.bit"
/*
* The JTAG ID's of the supported FPGA's. The ID is 32bit wide
* reversed as noted in the manual.
*/
#define ID_ECP3_17 0xc2088080
#define ID_ECP3_35 0xc2048080
/* FPGA commands */
#define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */
#define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */
#define FPGA_CMD_CLEAR 0x70
#define FPGA_CMD_REFRESH 0x71
#define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */
#define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */
#define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */
/*
* The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf
* (LatticeECP3 Slave SPI Port User's Guide)
*/
#define FPGA_STATUS_DONE 0x00004000
#define FPGA_STATUS_CLEARED 0x00010000
#define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */
#define FPGA_CLEAR_MSLEEP 10
#define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP)
struct fpga_data {
struct completion fw_loaded;
};
struct ecp3_dev {
u32 jedec_id;
char *name;
};
static const struct ecp3_dev ecp3_dev[] = {
{
.jedec_id = ID_ECP3_17,
.name = "Lattice ECP3-17",
},
{
.jedec_id = ID_ECP3_35,
.name = "Lattice ECP3-35",
},
};
static void firmware_load(const struct firmware *fw, void *context)
{
struct spi_device *spi = (struct spi_device *)context;
struct fpga_data *data = spi_get_drvdata(spi);
u8 *buffer;
int ret;
u8 txbuf[8];
u8 rxbuf[8];
int rx_len = 8;
int i;
u32 jedec_id;
u32 status;
if (fw->size == 0) {
dev_err(&spi->dev, "Error: Firmware size is 0!\n");
return;
}
/* Fill dummy data (24 stuffing bits for commands) */
txbuf[1] = 0x00;
txbuf[2] = 0x00;
txbuf[3] = 0x00;
/* Trying to speak with the FPGA via SPI... */
txbuf[0] = FPGA_CMD_READ_ID;
ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]);
jedec_id = *(u32 *)&rxbuf[4];
for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
if (jedec_id == ecp3_dev[i].jedec_id)
break;
}
if (i == ARRAY_SIZE(ecp3_dev)) {
dev_err(&spi->dev,
"Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
jedec_id);
return;
}
dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
txbuf[0] = FPGA_CMD_READ_STATUS;
ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
buffer = kzalloc(fw->size + 8, GFP_KERNEL);
if (!buffer) {
dev_err(&spi->dev, "Error: Can't allocate memory!\n");
return;
}
/*
* Insert WRITE_INC command into stream (one SPI frame)
*/
buffer[0] = FPGA_CMD_WRITE_INC;
buffer[1] = 0xff;
buffer[2] = 0xff;
buffer[3] = 0xff;
memcpy(buffer + 4, fw->data, fw->size);
txbuf[0] = FPGA_CMD_REFRESH;
ret = spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_WRITE_EN;
ret = spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_CLEAR;
ret = spi_write(spi, txbuf, 4);
/*
* Wait for FPGA memory to become cleared
*/
for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
txbuf[0] = FPGA_CMD_READ_STATUS;
ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = *(u32 *)&rxbuf[4];
if (status == FPGA_STATUS_CLEARED)
break;
msleep(FPGA_CLEAR_MSLEEP);
}
if (i == FPGA_CLEAR_LOOP_COUNT) {
dev_err(&spi->dev,
"Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
status);
kfree(buffer);
return;
}
dev_info(&spi->dev, "Configuring the FPGA...\n");
ret = spi_write(spi, buffer, fw->size + 8);
txbuf[0] = FPGA_CMD_WRITE_DIS;
ret = spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_READ_STATUS;
ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
status = *(u32 *)&rxbuf[4];
/* Check result */
if (status & FPGA_STATUS_DONE)
dev_info(&spi->dev, "FPGA succesfully configured!\n");
else
dev_info(&spi->dev, "FPGA not configured (DONE not set)\n");
/*
* Don't forget to release the firmware again
*/
release_firmware(fw);
kfree(buffer);
complete(&data->fw_loaded);
}
static int lattice_ecp3_probe(struct spi_device *spi)
{
struct fpga_data *data;
int err;
data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(&spi->dev, "Memory allocation for fpga_data failed\n");
return -ENOMEM;
}
spi_set_drvdata(spi, data);
init_completion(&data->fw_loaded);
err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
FIRMWARE_NAME, &spi->dev,
GFP_KERNEL, spi, firmware_load);
if (err) {
dev_err(&spi->dev, "Firmware loading failed with %d!\n", err);
return err;
}
dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n");
return 0;
}
static int lattice_ecp3_remove(struct spi_device *spi)
{
struct fpga_data *data = spi_get_drvdata(spi);
wait_for_completion(&data->fw_loaded);
return 0;
}
static const struct spi_device_id lattice_ecp3_id[] = {
{ "ecp3-17", 0 },
{ "ecp3-35", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, lattice_ecp3_id);
static struct spi_driver lattice_ecp3_driver = {
.driver = {
.name = "lattice-ecp3",
.owner = THIS_MODULE,
},
.probe = lattice_ecp3_probe,
.remove = lattice_ecp3_remove,
.id_table = lattice_ecp3_id,
};
module_spi_driver(lattice_ecp3_driver);
MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CandyDevices/kernel_htc_msm8994 | arch/mips/pci/pci-ar724x.c | 2093 | 10051 | /*
* Atheros AR724X PCI host controller driver
*
* Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
* Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
#define AR724X_PCI_REG_INT_MASK 0x50
#define AR724X_PCI_RESET_LINK_UP BIT(0)
#define AR724X_PCI_INT_DEV0 BIT(14)
#define AR724X_PCI_IRQ_COUNT 1
#define AR7240_BAR0_WAR_VALUE 0xffff
#define AR724X_PCI_CMD_INIT (PCI_COMMAND_MEMORY | \
PCI_COMMAND_MASTER | \
PCI_COMMAND_INVALIDATE | \
PCI_COMMAND_PARITY | \
PCI_COMMAND_SERR | \
PCI_COMMAND_FAST_BACK)
struct ar724x_pci_controller {
void __iomem *devcfg_base;
void __iomem *ctrl_base;
void __iomem *crp_base;
int irq;
int irq_base;
bool link_up;
bool bar0_is_cached;
u32 bar0_value;
spinlock_t lock;
struct pci_controller pci_controller;
struct resource io_res;
struct resource mem_res;
};
static inline bool ar724x_pci_check_link(struct ar724x_pci_controller *apc)
{
u32 reset;
reset = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_RESET);
return reset & AR724X_PCI_RESET_LINK_UP;
}
static inline struct ar724x_pci_controller *
pci_bus_to_ar724x_controller(struct pci_bus *bus)
{
struct pci_controller *hose;
hose = (struct pci_controller *) bus->sysdata;
return container_of(hose, struct ar724x_pci_controller, pci_controller);
}
static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
int where, int size, u32 value)
{
unsigned long flags;
void __iomem *base;
u32 data;
int s;
WARN_ON(where & (size - 1));
if (!apc->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
base = apc->crp_base;
spin_lock_irqsave(&apc->lock, flags);
data = __raw_readl(base + (where & ~3));
switch (size) {
case 1:
s = ((where & 3) * 8);
data &= ~(0xff << s);
data |= ((value & 0xff) << s);
break;
case 2:
s = ((where & 2) * 8);
data &= ~(0xffff << s);
data |= ((value & 0xffff) << s);
break;
case 4:
data = value;
break;
default:
spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_BAD_REGISTER_NUMBER;
}
__raw_writel(data, base + (where & ~3));
/* flush write */
__raw_readl(base + (where & ~3));
spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_SUCCESSFUL;
}
static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t *value)
{
struct ar724x_pci_controller *apc;
unsigned long flags;
void __iomem *base;
u32 data;
apc = pci_bus_to_ar724x_controller(bus);
if (!apc->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
base = apc->devcfg_base;
spin_lock_irqsave(&apc->lock, flags);
data = __raw_readl(base + (where & ~3));
switch (size) {
case 1:
if (where & 1)
data >>= 8;
if (where & 2)
data >>= 16;
data &= 0xff;
break;
case 2:
if (where & 2)
data >>= 16;
data &= 0xffff;
break;
case 4:
break;
default:
spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_BAD_REGISTER_NUMBER;
}
spin_unlock_irqrestore(&apc->lock, flags);
if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
apc->bar0_is_cached) {
/* use the cached value */
*value = apc->bar0_value;
} else {
*value = data;
}
return PCIBIOS_SUCCESSFUL;
}
static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t value)
{
struct ar724x_pci_controller *apc;
unsigned long flags;
void __iomem *base;
u32 data;
int s;
apc = pci_bus_to_ar724x_controller(bus);
if (!apc->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
if (soc_is_ar7240() && where == PCI_BASE_ADDRESS_0 && size == 4) {
if (value != 0xffffffff) {
/*
* WAR for a hw issue. If the BAR0 register of the
* device is set to the proper base address, the
* memory space of the device is not accessible.
*
* Cache the intended value so it can be read back,
* and write a SoC specific constant value to the
* BAR0 register in order to make the device memory
* accessible.
*/
apc->bar0_is_cached = true;
apc->bar0_value = value;
value = AR7240_BAR0_WAR_VALUE;
} else {
apc->bar0_is_cached = false;
}
}
base = apc->devcfg_base;
spin_lock_irqsave(&apc->lock, flags);
data = __raw_readl(base + (where & ~3));
switch (size) {
case 1:
s = ((where & 3) * 8);
data &= ~(0xff << s);
data |= ((value & 0xff) << s);
break;
case 2:
s = ((where & 2) * 8);
data &= ~(0xffff << s);
data |= ((value & 0xffff) << s);
break;
case 4:
data = value;
break;
default:
spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_BAD_REGISTER_NUMBER;
}
__raw_writel(data, base + (where & ~3));
/* flush write */
__raw_readl(base + (where & ~3));
spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops ar724x_pci_ops = {
.read = ar724x_pci_read,
.write = ar724x_pci_write,
};
static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct ar724x_pci_controller *apc;
void __iomem *base;
u32 pending;
apc = irq_get_handler_data(irq);
base = apc->ctrl_base;
pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
__raw_readl(base + AR724X_PCI_REG_INT_MASK);
if (pending & AR724X_PCI_INT_DEV0)
generic_handle_irq(apc->irq_base + 0);
else
spurious_interrupt();
}
static void ar724x_pci_irq_unmask(struct irq_data *d)
{
struct ar724x_pci_controller *apc;
void __iomem *base;
int offset;
u32 t;
apc = irq_data_get_irq_chip_data(d);
base = apc->ctrl_base;
offset = apc->irq_base - d->irq;
switch (offset) {
case 0:
t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
__raw_writel(t | AR724X_PCI_INT_DEV0,
base + AR724X_PCI_REG_INT_MASK);
/* flush write */
__raw_readl(base + AR724X_PCI_REG_INT_MASK);
}
}
static void ar724x_pci_irq_mask(struct irq_data *d)
{
struct ar724x_pci_controller *apc;
void __iomem *base;
int offset;
u32 t;
apc = irq_data_get_irq_chip_data(d);
base = apc->ctrl_base;
offset = apc->irq_base - d->irq;
switch (offset) {
case 0:
t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
__raw_writel(t & ~AR724X_PCI_INT_DEV0,
base + AR724X_PCI_REG_INT_MASK);
/* flush write */
__raw_readl(base + AR724X_PCI_REG_INT_MASK);
t = __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
__raw_writel(t | AR724X_PCI_INT_DEV0,
base + AR724X_PCI_REG_INT_STATUS);
/* flush write */
__raw_readl(base + AR724X_PCI_REG_INT_STATUS);
}
}
static struct irq_chip ar724x_pci_irq_chip = {
.name = "AR724X PCI ",
.irq_mask = ar724x_pci_irq_mask,
.irq_unmask = ar724x_pci_irq_unmask,
.irq_mask_ack = ar724x_pci_irq_mask,
};
static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc,
int id)
{
void __iomem *base;
int i;
base = apc->ctrl_base;
__raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
__raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
apc->irq_base = ATH79_PCI_IRQ_BASE + (id * AR724X_PCI_IRQ_COUNT);
for (i = apc->irq_base;
i < apc->irq_base + AR724X_PCI_IRQ_COUNT; i++) {
irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
handle_level_irq);
irq_set_chip_data(i, apc);
}
irq_set_handler_data(apc->irq, apc);
irq_set_chained_handler(apc->irq, ar724x_pci_irq_handler);
}
static int ar724x_pci_probe(struct platform_device *pdev)
{
struct ar724x_pci_controller *apc;
struct resource *res;
int id;
id = pdev->id;
if (id == -1)
id = 0;
apc = devm_kzalloc(&pdev->dev, sizeof(struct ar724x_pci_controller),
GFP_KERNEL);
if (!apc)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl_base");
if (!res)
return -EINVAL;
apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(apc->ctrl_base))
return PTR_ERR(apc->ctrl_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
if (!res)
return -EINVAL;
apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(apc->devcfg_base))
return PTR_ERR(apc->devcfg_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
if (!res)
return -EINVAL;
apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(apc->crp_base))
return PTR_ERR(apc->crp_base);
apc->irq = platform_get_irq(pdev, 0);
if (apc->irq < 0)
return -EINVAL;
spin_lock_init(&apc->lock);
res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
if (!res)
return -EINVAL;
apc->io_res.parent = res;
apc->io_res.name = "PCI IO space";
apc->io_res.start = res->start;
apc->io_res.end = res->end;
apc->io_res.flags = IORESOURCE_IO;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
if (!res)
return -EINVAL;
apc->mem_res.parent = res;
apc->mem_res.name = "PCI memory space";
apc->mem_res.start = res->start;
apc->mem_res.end = res->end;
apc->mem_res.flags = IORESOURCE_MEM;
apc->pci_controller.pci_ops = &ar724x_pci_ops;
apc->pci_controller.io_resource = &apc->io_res;
apc->pci_controller.mem_resource = &apc->mem_res;
apc->link_up = ar724x_pci_check_link(apc);
if (!apc->link_up)
dev_warn(&pdev->dev, "PCIe link is down\n");
ar724x_pci_irq_init(apc, id);
ar724x_pci_local_write(apc, PCI_COMMAND, 4, AR724X_PCI_CMD_INIT);
register_pci_controller(&apc->pci_controller);
return 0;
}
static struct platform_driver ar724x_pci_driver = {
.probe = ar724x_pci_probe,
.driver = {
.name = "ar724x-pci",
.owner = THIS_MODULE,
},
};
static int __init ar724x_pci_init(void)
{
return platform_driver_register(&ar724x_pci_driver);
}
postcore_initcall(ar724x_pci_init);
| gpl-2.0 |
Hani-K/Simplicity_Kernel_Exynos5433_LL | drivers/media/i2c/sony-btf-mpx.c | 2093 | 11194 | /*
* Copyright (C) 2005-2006 Micronas USA Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/tuner.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <linux/slab.h>
MODULE_DESCRIPTION("sony-btf-mpx driver");
MODULE_LICENSE("GPL v2");
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level 0=off(default) 1=on\n");
/* #define MPX_DEBUG */
/*
* Note:
*
* AS(IF/MPX) pin: LOW HIGH/OPEN
* IF/MPX address: 0x42/0x40 0x43/0x44
*/
static int force_mpx_mode = -1;
module_param(force_mpx_mode, int, 0644);
struct sony_btf_mpx {
struct v4l2_subdev sd;
int mpxmode;
u32 audmode;
};
static inline struct sony_btf_mpx *to_state(struct v4l2_subdev *sd)
{
return container_of(sd, struct sony_btf_mpx, sd);
}
static int mpx_write(struct i2c_client *client, int dev, int addr, int val)
{
u8 buffer[5];
struct i2c_msg msg;
buffer[0] = dev;
buffer[1] = addr >> 8;
buffer[2] = addr & 0xff;
buffer[3] = val >> 8;
buffer[4] = val & 0xff;
msg.addr = client->addr;
msg.flags = 0;
msg.len = 5;
msg.buf = buffer;
i2c_transfer(client->adapter, &msg, 1);
return 0;
}
/*
* MPX register values for the BTF-PG472Z:
*
* FM_ NICAM_ SCART_
* MODUS SOURCE ACB PRESCAL PRESCAL PRESCAL SYSTEM VOLUME
* 10/0030 12/0008 12/0013 12/000E 12/0010 12/0000 10/0020 12/0000
* ---------------------------------------------------------------
* Auto 1003 0020 0100 2603 5000 XXXX 0001 7500
*
* B/G
* Mono 1003 0020 0100 2603 5000 XXXX 0003 7500
* A2 1003 0020 0100 2601 5000 XXXX 0003 7500
* NICAM 1003 0120 0100 2603 5000 XXXX 0008 7500
*
* I
* Mono 1003 0020 0100 2603 7900 XXXX 000A 7500
* NICAM 1003 0120 0100 2603 7900 XXXX 000A 7500
*
* D/K
* Mono 1003 0020 0100 2603 5000 XXXX 0004 7500
* A2-1 1003 0020 0100 2601 5000 XXXX 0004 7500
* A2-2 1003 0020 0100 2601 5000 XXXX 0005 7500
* A2-3 1003 0020 0100 2601 5000 XXXX 0007 7500
* NICAM 1003 0120 0100 2603 5000 XXXX 000B 7500
*
* L/L'
* Mono 0003 0200 0100 7C03 5000 2200 0009 7500
* NICAM 0003 0120 0100 7C03 5000 XXXX 0009 7500
*
* M
* Mono 1003 0200 0100 2B03 5000 2B00 0002 7500
*
* For Asia, replace the 0x26XX in FM_PRESCALE with 0x14XX.
*
* Bilingual selection in A2/NICAM:
*
* High byte of SOURCE Left chan Right chan
* 0x01 MAIN SUB
* 0x03 MAIN MAIN
* 0x04 SUB SUB
*
* Force mono in NICAM by setting the high byte of SOURCE to 0x02 (L/L') or
* 0x00 (all other bands). Force mono in A2 with FMONO_A2:
*
* FMONO_A2
* 10/0022
* --------
* Forced mono ON 07F0
* Forced mono OFF 0190
*/
static const struct {
enum { AUD_MONO, AUD_A2, AUD_NICAM, AUD_NICAM_L } audio_mode;
u16 modus;
u16 source;
u16 acb;
u16 fm_prescale;
u16 nicam_prescale;
u16 scart_prescale;
u16 system;
u16 volume;
} mpx_audio_modes[] = {
/* Auto */ { AUD_MONO, 0x1003, 0x0020, 0x0100, 0x2603,
0x5000, 0x0000, 0x0001, 0x7500 },
/* B/G Mono */ { AUD_MONO, 0x1003, 0x0020, 0x0100, 0x2603,
0x5000, 0x0000, 0x0003, 0x7500 },
/* B/G A2 */ { AUD_A2, 0x1003, 0x0020, 0x0100, 0x2601,
0x5000, 0x0000, 0x0003, 0x7500 },
/* B/G NICAM */ { AUD_NICAM, 0x1003, 0x0120, 0x0100, 0x2603,
0x5000, 0x0000, 0x0008, 0x7500 },
/* I Mono */ { AUD_MONO, 0x1003, 0x0020, 0x0100, 0x2603,
0x7900, 0x0000, 0x000A, 0x7500 },
/* I NICAM */ { AUD_NICAM, 0x1003, 0x0120, 0x0100, 0x2603,
0x7900, 0x0000, 0x000A, 0x7500 },
/* D/K Mono */ { AUD_MONO, 0x1003, 0x0020, 0x0100, 0x2603,
0x5000, 0x0000, 0x0004, 0x7500 },
/* D/K A2-1 */ { AUD_A2, 0x1003, 0x0020, 0x0100, 0x2601,
0x5000, 0x0000, 0x0004, 0x7500 },
/* D/K A2-2 */ { AUD_A2, 0x1003, 0x0020, 0x0100, 0x2601,
0x5000, 0x0000, 0x0005, 0x7500 },
/* D/K A2-3 */ { AUD_A2, 0x1003, 0x0020, 0x0100, 0x2601,
0x5000, 0x0000, 0x0007, 0x7500 },
/* D/K NICAM */ { AUD_NICAM, 0x1003, 0x0120, 0x0100, 0x2603,
0x5000, 0x0000, 0x000B, 0x7500 },
/* L/L' Mono */ { AUD_MONO, 0x0003, 0x0200, 0x0100, 0x7C03,
0x5000, 0x2200, 0x0009, 0x7500 },
/* L/L' NICAM */{ AUD_NICAM_L, 0x0003, 0x0120, 0x0100, 0x7C03,
0x5000, 0x0000, 0x0009, 0x7500 },
};
#define MPX_NUM_MODES ARRAY_SIZE(mpx_audio_modes)
static int mpx_setup(struct sony_btf_mpx *t)
{
struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
u16 source = 0;
u8 buffer[3];
struct i2c_msg msg;
int mode = t->mpxmode;
/* reset MPX */
buffer[0] = 0x00;
buffer[1] = 0x80;
buffer[2] = 0x00;
msg.addr = client->addr;
msg.flags = 0;
msg.len = 3;
msg.buf = buffer;
i2c_transfer(client->adapter, &msg, 1);
buffer[1] = 0x00;
i2c_transfer(client->adapter, &msg, 1);
if (t->audmode != V4L2_TUNER_MODE_MONO)
mode++;
if (mpx_audio_modes[mode].audio_mode != AUD_MONO) {
switch (t->audmode) {
case V4L2_TUNER_MODE_MONO:
switch (mpx_audio_modes[mode].audio_mode) {
case AUD_A2:
source = mpx_audio_modes[mode].source;
break;
case AUD_NICAM:
source = 0x0000;
break;
case AUD_NICAM_L:
source = 0x0200;
break;
default:
break;
}
break;
case V4L2_TUNER_MODE_STEREO:
source = mpx_audio_modes[mode].source;
break;
case V4L2_TUNER_MODE_LANG1:
source = 0x0300;
break;
case V4L2_TUNER_MODE_LANG2:
source = 0x0400;
break;
}
source |= mpx_audio_modes[mode].source & 0x00ff;
} else
source = mpx_audio_modes[mode].source;
mpx_write(client, 0x10, 0x0030, mpx_audio_modes[mode].modus);
mpx_write(client, 0x12, 0x0008, source);
mpx_write(client, 0x12, 0x0013, mpx_audio_modes[mode].acb);
mpx_write(client, 0x12, 0x000e,
mpx_audio_modes[mode].fm_prescale);
mpx_write(client, 0x12, 0x0010,
mpx_audio_modes[mode].nicam_prescale);
mpx_write(client, 0x12, 0x000d,
mpx_audio_modes[mode].scart_prescale);
mpx_write(client, 0x10, 0x0020, mpx_audio_modes[mode].system);
mpx_write(client, 0x12, 0x0000, mpx_audio_modes[mode].volume);
if (mpx_audio_modes[mode].audio_mode == AUD_A2)
mpx_write(client, 0x10, 0x0022,
t->audmode == V4L2_TUNER_MODE_MONO ? 0x07f0 : 0x0190);
#ifdef MPX_DEBUG
{
u8 buf1[3], buf2[2];
struct i2c_msg msgs[2];
v4l2_info(client,
"MPX registers: %04x %04x %04x %04x %04x %04x %04x %04x\n",
mpx_audio_modes[mode].modus,
source,
mpx_audio_modes[mode].acb,
mpx_audio_modes[mode].fm_prescale,
mpx_audio_modes[mode].nicam_prescale,
mpx_audio_modes[mode].scart_prescale,
mpx_audio_modes[mode].system,
mpx_audio_modes[mode].volume);
buf1[0] = 0x11;
buf1[1] = 0x00;
buf1[2] = 0x7e;
msgs[0].addr = client->addr;
msgs[0].flags = 0;
msgs[0].len = 3;
msgs[0].buf = buf1;
msgs[1].addr = client->addr;
msgs[1].flags = I2C_M_RD;
msgs[1].len = 2;
msgs[1].buf = buf2;
i2c_transfer(client->adapter, msgs, 2);
v4l2_info(client, "MPX system: %02x%02x\n",
buf2[0], buf2[1]);
buf1[0] = 0x11;
buf1[1] = 0x02;
buf1[2] = 0x00;
i2c_transfer(client->adapter, msgs, 2);
v4l2_info(client, "MPX status: %02x%02x\n",
buf2[0], buf2[1]);
}
#endif
return 0;
}
static int sony_btf_mpx_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct sony_btf_mpx *t = to_state(sd);
int default_mpx_mode = 0;
if (std & V4L2_STD_PAL_BG)
default_mpx_mode = 1;
else if (std & V4L2_STD_PAL_I)
default_mpx_mode = 4;
else if (std & V4L2_STD_PAL_DK)
default_mpx_mode = 6;
else if (std & V4L2_STD_SECAM_L)
default_mpx_mode = 11;
if (default_mpx_mode != t->mpxmode) {
t->mpxmode = default_mpx_mode;
mpx_setup(t);
}
return 0;
}
static int sony_btf_mpx_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct sony_btf_mpx *t = to_state(sd);
vt->capability = V4L2_TUNER_CAP_NORM |
V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 |
V4L2_TUNER_CAP_LANG2;
vt->rxsubchans = V4L2_TUNER_SUB_MONO |
V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_LANG1 |
V4L2_TUNER_SUB_LANG2;
vt->audmode = t->audmode;
return 0;
}
static int sony_btf_mpx_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
{
struct sony_btf_mpx *t = to_state(sd);
if (vt->type != V4L2_TUNER_ANALOG_TV)
return -EINVAL;
if (vt->audmode != t->audmode) {
t->audmode = vt->audmode;
mpx_setup(t);
}
return 0;
}
/* --------------------------------------------------------------------------*/
static const struct v4l2_subdev_core_ops sony_btf_mpx_core_ops = {
.s_std = sony_btf_mpx_s_std,
};
static const struct v4l2_subdev_tuner_ops sony_btf_mpx_tuner_ops = {
.s_tuner = sony_btf_mpx_s_tuner,
.g_tuner = sony_btf_mpx_g_tuner,
};
static const struct v4l2_subdev_ops sony_btf_mpx_ops = {
.core = &sony_btf_mpx_core_ops,
.tuner = &sony_btf_mpx_tuner_ops,
};
/* --------------------------------------------------------------------------*/
static int sony_btf_mpx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct sony_btf_mpx *t;
struct v4l2_subdev *sd;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
return -ENODEV;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
t = kzalloc(sizeof(struct sony_btf_mpx), GFP_KERNEL);
if (t == NULL)
return -ENOMEM;
sd = &t->sd;
v4l2_i2c_subdev_init(sd, client, &sony_btf_mpx_ops);
/* Initialize sony_btf_mpx */
t->mpxmode = 0;
t->audmode = V4L2_TUNER_MODE_STEREO;
return 0;
}
static int sony_btf_mpx_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(to_state(sd));
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id sony_btf_mpx_id[] = {
{ "sony-btf-mpx", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, sony_btf_mpx_id);
static struct i2c_driver sony_btf_mpx_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "sony-btf-mpx",
},
.probe = sony_btf_mpx_probe,
.remove = sony_btf_mpx_remove,
.id_table = sony_btf_mpx_id,
};
module_i2c_driver(sony_btf_mpx_driver);
| gpl-2.0 |
zhaochengw/ef40s_jb_kernel | drivers/usb/gadget/omap_udc.c | 2349 | 82149 | /*
* omap_udc.c -- for OMAP full speed udc; most chips support OTG.
*
* Copyright (C) 2004 Texas Instruments, Inc.
* Copyright (C) 2004-2005 David Brownell
*
* OMAP2 & DMA support by Kyungmin Park <kyungmin.park@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef DEBUG
#undef VERBOSE
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
#include <asm/mach-types.h>
#include <plat/dma.h>
#include <plat/usb.h>
#include "omap_udc.h"
#undef USB_TRACE
/* bulk DMA seems to be behaving for both IN and OUT */
#define USE_DMA
/* ISO too */
#define USE_ISO
#define DRIVER_DESC "OMAP UDC driver"
#define DRIVER_VERSION "4 October 2004"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
#define OMAP2_DMA_CH(ch) (((ch) - 1) << 1)
#define OMAP24XX_DMA(name, ch) (OMAP24XX_DMA_##name + OMAP2_DMA_CH(ch))
/*
* The OMAP UDC needs _very_ early endpoint setup: before enabling the
* D+ pullup to allow enumeration. That's too early for the gadget
* framework to use from usb_endpoint_enable(), which happens after
* enumeration as part of activating an interface. (But if we add an
* optional new "UDC not yet running" state to the gadget driver model,
* even just during driver binding, the endpoint autoconfig logic is the
* natural spot to manufacture new endpoints.)
*
* So instead of using endpoint enable calls to control the hardware setup,
* this driver defines a "fifo mode" parameter. It's used during driver
* initialization to choose among a set of pre-defined endpoint configs.
* See omap_udc_setup() for available modes, or to add others. That code
* lives in an init section, so use this driver as a module if you need
* to change the fifo mode after the kernel boots.
*
* Gadget drivers normally ignore endpoints they don't care about, and
* won't include them in configuration descriptors. That means only
* misbehaving hosts would even notice they exist.
*/
#ifdef USE_ISO
static unsigned fifo_mode = 3;
#else
static unsigned fifo_mode = 0;
#endif
/* "modprobe omap_udc fifo_mode=42", or else as a kernel
* boot parameter "omap_udc:fifo_mode=42"
*/
module_param (fifo_mode, uint, 0);
MODULE_PARM_DESC (fifo_mode, "endpoint configuration");
#ifdef USE_DMA
static unsigned use_dma = 1;
/* "modprobe omap_udc use_dma=y", or else as a kernel
* boot parameter "omap_udc:use_dma=y"
*/
module_param (use_dma, bool, 0);
MODULE_PARM_DESC (use_dma, "enable/disable DMA");
#else /* !USE_DMA */
/* save a bit of code */
#define use_dma 0
#endif /* !USE_DMA */
static const char driver_name [] = "omap_udc";
static const char driver_desc [] = DRIVER_DESC;
/*-------------------------------------------------------------------------*/
/* there's a notion of "current endpoint" for modifying endpoint
* state, and PIO access to its FIFO.
*/
static void use_ep(struct omap_ep *ep, u16 select)
{
u16 num = ep->bEndpointAddress & 0x0f;
if (ep->bEndpointAddress & USB_DIR_IN)
num |= UDC_EP_DIR;
omap_writew(num | select, UDC_EP_NUM);
/* when select, MUST deselect later !! */
}
static inline void deselect_ep(void)
{
u16 w;
w = omap_readw(UDC_EP_NUM);
w &= ~UDC_EP_SEL;
omap_writew(w, UDC_EP_NUM);
/* 6 wait states before TX will happen */
}
static void dma_channel_claim(struct omap_ep *ep, unsigned preferred);
/*-------------------------------------------------------------------------*/
static int omap_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_udc *udc;
unsigned long flags;
u16 maxp;
/* catch various bogus parameters */
if (!_ep || !desc || ep->desc
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->maxpacket < le16_to_cpu
(desc->wMaxPacketSize)) {
DBG("%s, bad ep or descriptor\n", __func__);
return -EINVAL;
}
maxp = le16_to_cpu (desc->wMaxPacketSize);
if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
&& maxp != ep->maxpacket)
|| le16_to_cpu(desc->wMaxPacketSize) > ep->maxpacket
|| !desc->wMaxPacketSize) {
DBG("%s, bad %s maxpacket\n", __func__, _ep->name);
return -ERANGE;
}
#ifdef USE_ISO
if ((desc->bmAttributes == USB_ENDPOINT_XFER_ISOC
&& desc->bInterval != 1)) {
/* hardware wants period = 1; USB allows 2^(Interval-1) */
DBG("%s, unsupported ISO period %dms\n", _ep->name,
1 << (desc->bInterval - 1));
return -EDOM;
}
#else
if (desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
DBG("%s, ISO nyet\n", _ep->name);
return -EDOM;
}
#endif
/* xfer types must match, except that interrupt ~= bulk */
if (ep->bmAttributes != desc->bmAttributes
&& ep->bmAttributes != USB_ENDPOINT_XFER_BULK
&& desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
DBG("%s, %s type mismatch\n", __func__, _ep->name);
return -EINVAL;
}
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
DBG("%s, bogus device state\n", __func__);
return -ESHUTDOWN;
}
spin_lock_irqsave(&udc->lock, flags);
ep->desc = desc;
ep->irqs = 0;
ep->stopped = 0;
ep->ep.maxpacket = maxp;
/* set endpoint to initial state */
ep->dma_channel = 0;
ep->has_dma = 0;
ep->lch = -1;
use_ep(ep, UDC_EP_SEL);
omap_writew(udc->clr_halt, UDC_CTRL);
ep->ackwait = 0;
deselect_ep();
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
list_add(&ep->iso, &udc->iso);
/* maybe assign a DMA channel to this endpoint */
if (use_dma && desc->bmAttributes == USB_ENDPOINT_XFER_BULK)
/* FIXME ISO can dma, but prefers first channel */
dma_channel_claim(ep, 0);
/* PIO OUT may RX packets */
if (desc->bmAttributes != USB_ENDPOINT_XFER_ISOC
&& !ep->has_dma
&& !(ep->bEndpointAddress & USB_DIR_IN)) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
spin_unlock_irqrestore(&udc->lock, flags);
VDBG("%s enabled\n", _ep->name);
return 0;
}
static void nuke(struct omap_ep *, int status);
static int omap_ep_disable(struct usb_ep *_ep)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
unsigned long flags;
if (!_ep || !ep->desc) {
DBG("%s, %s not enabled\n", __func__,
_ep ? ep->ep.name : NULL);
return -EINVAL;
}
spin_lock_irqsave(&ep->udc->lock, flags);
ep->desc = NULL;
nuke (ep, -ESHUTDOWN);
ep->ep.maxpacket = ep->maxpacket;
ep->has_dma = 0;
omap_writew(UDC_SET_HALT, UDC_CTRL);
list_del_init(&ep->iso);
del_timer(&ep->timer);
spin_unlock_irqrestore(&ep->udc->lock, flags);
VDBG("%s disabled\n", _ep->name);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *
omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct omap_req *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (req) {
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD (&req->queue);
}
return &req->req;
}
static void
omap_free_request(struct usb_ep *ep, struct usb_request *_req)
{
struct omap_req *req = container_of(_req, struct omap_req, req);
if (_req)
kfree (req);
}
/*-------------------------------------------------------------------------*/
static void
done(struct omap_ep *ep, struct omap_req *req, int status)
{
unsigned stopped = ep->stopped;
list_del_init(&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
if (use_dma && ep->has_dma) {
if (req->mapped) {
dma_unmap_single(ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
(ep->bEndpointAddress & USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->req.dma = DMA_ADDR_INVALID;
req->mapped = 0;
} else
dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
(ep->bEndpointAddress & USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
#ifndef USB_TRACE
if (status && status != -ESHUTDOWN)
#endif
VDBG("complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&ep->udc->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&ep->udc->lock);
ep->stopped = stopped;
}
/*-------------------------------------------------------------------------*/
#define UDC_FIFO_FULL (UDC_NON_ISO_FIFO_FULL | UDC_ISO_FIFO_FULL)
#define UDC_FIFO_UNWRITABLE (UDC_EP_HALTED | UDC_FIFO_FULL)
#define FIFO_EMPTY (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY)
#define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY)
static inline int
write_packet(u8 *buf, struct omap_req *req, unsigned max)
{
unsigned len;
u16 *wp;
len = min(req->req.length - req->req.actual, max);
req->req.actual += len;
max = len;
if (likely((((int)buf) & 1) == 0)) {
wp = (u16 *)buf;
while (max >= 2) {
omap_writew(*wp++, UDC_DATA);
max -= 2;
}
buf = (u8 *)wp;
}
while (max--)
omap_writeb(*buf++, UDC_DATA);
return len;
}
// FIXME change r/w fifo calling convention
// return: 0 = still running, 1 = completed, negative = errno
static int write_fifo(struct omap_ep *ep, struct omap_req *req)
{
u8 *buf;
unsigned count;
int is_last;
u16 ep_stat;
buf = req->req.buf + req->req.actual;
prefetch(buf);
/* PIO-IN isn't double buffered except for iso */
ep_stat = omap_readw(UDC_STAT_FLG);
if (ep_stat & UDC_FIFO_UNWRITABLE)
return 0;
count = ep->ep.maxpacket;
count = write_packet(buf, req, count);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1;
/* last packet is often short (sometimes a zlp) */
if (count != ep->ep.maxpacket)
is_last = 1;
else if (req->req.length == req->req.actual
&& !req->req.zero)
is_last = 1;
else
is_last = 0;
/* NOTE: requests complete when all IN data is in a
* FIFO (or sometimes later, if a zlp was needed).
* Use usb_ep_fifo_status() where needed.
*/
if (is_last)
done(ep, req, 0);
return is_last;
}
static inline int
read_packet(u8 *buf, struct omap_req *req, unsigned avail)
{
unsigned len;
u16 *wp;
len = min(req->req.length - req->req.actual, avail);
req->req.actual += len;
avail = len;
if (likely((((int)buf) & 1) == 0)) {
wp = (u16 *)buf;
while (avail >= 2) {
*wp++ = omap_readw(UDC_DATA);
avail -= 2;
}
buf = (u8 *)wp;
}
while (avail--)
*buf++ = omap_readb(UDC_DATA);
return len;
}
// return: 0 = still running, 1 = queue empty, negative = errno
static int read_fifo(struct omap_ep *ep, struct omap_req *req)
{
u8 *buf;
unsigned count, avail;
int is_last;
buf = req->req.buf + req->req.actual;
prefetchw(buf);
for (;;) {
u16 ep_stat = omap_readw(UDC_STAT_FLG);
is_last = 0;
if (ep_stat & FIFO_EMPTY) {
if (!ep->double_buf)
break;
ep->fnf = 1;
}
if (ep_stat & UDC_EP_HALTED)
break;
if (ep_stat & UDC_FIFO_FULL)
avail = ep->ep.maxpacket;
else {
avail = omap_readw(UDC_RXFSTAT);
ep->fnf = ep->double_buf;
}
count = read_packet(buf, req, avail);
/* partial packet reads may not be errors */
if (count < ep->ep.maxpacket) {
is_last = 1;
/* overflowed this request? flush extra data */
if (count != avail) {
req->req.status = -EOVERFLOW;
avail -= count;
while (avail--)
omap_readw(UDC_DATA);
}
} else if (req->req.length == req->req.actual)
is_last = 1;
else
is_last = 0;
if (!ep->bEndpointAddress)
break;
if (is_last)
done(ep, req, 0);
break;
}
return is_last;
}
/*-------------------------------------------------------------------------*/
static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
{
dma_addr_t end;
/* IN-DMA needs this on fault/cancel paths, so 15xx misreports
* the last transfer's bytecount by more than a FIFO's worth.
*/
if (cpu_is_omap15xx())
return 0;
end = omap_get_dma_src_pos(ep->lch);
if (end == ep->dma_counter)
return 0;
end |= start & (0xffff << 16);
if (end < start)
end += 0x10000;
return end - start;
}
static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
{
dma_addr_t end;
end = omap_get_dma_dst_pos(ep->lch);
if (end == ep->dma_counter)
return 0;
end |= start & (0xffff << 16);
if (cpu_is_omap15xx())
end++;
if (end < start)
end += 0x10000;
return end - start;
}
/* Each USB transfer request using DMA maps to one or more DMA transfers.
* When DMA completion isn't request completion, the UDC continues with
* the next DMA transfer for that USB transfer.
*/
static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
{
u16 txdma_ctrl, w;
unsigned length = req->req.length - req->req.actual;
const int sync_mode = cpu_is_omap15xx()
? OMAP_DMA_SYNC_FRAME
: OMAP_DMA_SYNC_ELEMENT;
int dma_trigger = 0;
if (cpu_is_omap24xx())
dma_trigger = OMAP24XX_DMA(USB_W2FC_TX0, ep->dma_channel);
/* measure length in either bytes or packets */
if ((cpu_is_omap16xx() && length <= UDC_TXN_TSC)
|| (cpu_is_omap24xx() && length < ep->maxpacket)
|| (cpu_is_omap15xx() && length < ep->maxpacket)) {
txdma_ctrl = UDC_TXN_EOT | length;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
length, 1, sync_mode, dma_trigger, 0);
} else {
length = min(length / ep->maxpacket,
(unsigned) UDC_TXN_TSC + 1);
txdma_ctrl = length;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
ep->ep.maxpacket >> 1, length, sync_mode,
dma_trigger, 0);
length *= ep->maxpacket;
}
omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
0, 0);
omap_start_dma(ep->lch);
ep->dma_counter = omap_get_dma_src_pos(ep->lch);
w = omap_readw(UDC_DMA_IRQ_EN);
w |= UDC_TX_DONE_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
omap_writew(UDC_TXN_START | txdma_ctrl, UDC_TXDMA(ep->dma_channel));
req->dma_bytes = length;
}
static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
{
u16 w;
if (status == 0) {
req->req.actual += req->dma_bytes;
/* return if this request needs to send data or zlp */
if (req->req.actual < req->req.length)
return;
if (req->req.zero
&& req->dma_bytes != 0
&& (req->req.actual % ep->maxpacket) == 0)
return;
} else
req->req.actual += dma_src_len(ep, req->req.dma
+ req->req.actual);
/* tx completion */
omap_stop_dma(ep->lch);
w = omap_readw(UDC_DMA_IRQ_EN);
w &= ~UDC_TX_DONE_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
done(ep, req, status);
}
static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
{
unsigned packets = req->req.length - req->req.actual;
int dma_trigger = 0;
u16 w;
if (cpu_is_omap24xx())
dma_trigger = OMAP24XX_DMA(USB_W2FC_RX0, ep->dma_channel);
/* NOTE: we filtered out "short reads" before, so we know
* the buffer has only whole numbers of packets.
* except MODE SELECT(6) sent the 24 bytes data in OMAP24XX DMA mode
*/
if (cpu_is_omap24xx() && packets < ep->maxpacket) {
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
packets, 1, OMAP_DMA_SYNC_ELEMENT,
dma_trigger, 0);
req->dma_bytes = packets;
} else {
/* set up this DMA transfer, enable the fifo, start */
packets /= ep->ep.maxpacket;
packets = min(packets, (unsigned)UDC_RXN_TC + 1);
req->dma_bytes = packets * ep->ep.maxpacket;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
ep->ep.maxpacket >> 1, packets,
OMAP_DMA_SYNC_ELEMENT,
dma_trigger, 0);
}
omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
0, 0);
ep->dma_counter = omap_get_dma_dst_pos(ep->lch);
omap_writew(UDC_RXN_STOP | (packets - 1), UDC_RXDMA(ep->dma_channel));
w = omap_readw(UDC_DMA_IRQ_EN);
w |= UDC_RX_EOT_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
omap_writew(ep->bEndpointAddress & 0xf, UDC_EP_NUM);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_start_dma(ep->lch);
}
static void
finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one)
{
u16 count, w;
if (status == 0)
ep->dma_counter = (u16) (req->req.dma + req->req.actual);
count = dma_dest_len(ep, req->req.dma + req->req.actual);
count += req->req.actual;
if (one)
count--;
if (count <= req->req.length)
req->req.actual = count;
if (count != req->dma_bytes || status)
omap_stop_dma(ep->lch);
/* if this wasn't short, request may need another transfer */
else if (req->req.actual < req->req.length)
return;
/* rx completion */
w = omap_readw(UDC_DMA_IRQ_EN);
w &= ~UDC_RX_EOT_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
done(ep, req, status);
}
static void dma_irq(struct omap_udc *udc, u16 irq_src)
{
u16 dman_stat = omap_readw(UDC_DMAN_STAT);
struct omap_ep *ep;
struct omap_req *req;
/* IN dma: tx to host */
if (irq_src & UDC_TXN_DONE) {
ep = &udc->ep[16 + UDC_DMA_TX_SRC(dman_stat)];
ep->irqs++;
/* can see TXN_DONE after dma abort */
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
finish_in_dma(ep, req, 0);
}
omap_writew(UDC_TXN_DONE, UDC_IRQ_SRC);
if (!list_empty (&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
next_in_dma(ep, req);
}
}
/* OUT dma: rx from host */
if (irq_src & UDC_RXN_EOT) {
ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
ep->irqs++;
/* can see RXN_EOT after dma abort */
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB);
}
omap_writew(UDC_RXN_EOT, UDC_IRQ_SRC);
if (!list_empty (&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
next_out_dma(ep, req);
}
}
if (irq_src & UDC_RXN_CNT) {
ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
ep->irqs++;
/* omap15xx does this unasked... */
VDBG("%s, RX_CNT irq?\n", ep->ep.name);
omap_writew(UDC_RXN_CNT, UDC_IRQ_SRC);
}
}
static void dma_error(int lch, u16 ch_status, void *data)
{
struct omap_ep *ep = data;
/* if ch_status & OMAP_DMA_DROP_IRQ ... */
/* if ch_status & OMAP1_DMA_TOUT_IRQ ... */
ERR("%s dma error, lch %d status %02x\n", ep->ep.name, lch, ch_status);
/* complete current transfer ... */
}
static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
{
u16 reg;
int status, restart, is_in;
int dma_channel;
is_in = ep->bEndpointAddress & USB_DIR_IN;
if (is_in)
reg = omap_readw(UDC_TXDMA_CFG);
else
reg = omap_readw(UDC_RXDMA_CFG);
reg |= UDC_DMA_REQ; /* "pulse" activated */
ep->dma_channel = 0;
ep->lch = -1;
if (channel == 0 || channel > 3) {
if ((reg & 0x0f00) == 0)
channel = 3;
else if ((reg & 0x00f0) == 0)
channel = 2;
else if ((reg & 0x000f) == 0) /* preferred for ISO */
channel = 1;
else {
status = -EMLINK;
goto just_restart;
}
}
reg |= (0x0f & ep->bEndpointAddress) << (4 * (channel - 1));
ep->dma_channel = channel;
if (is_in) {
if (cpu_is_omap24xx())
dma_channel = OMAP24XX_DMA(USB_W2FC_TX0, channel);
else
dma_channel = OMAP_DMA_USB_W2FC_TX0 - 1 + channel;
status = omap_request_dma(dma_channel,
ep->ep.name, dma_error, ep, &ep->lch);
if (status == 0) {
omap_writew(reg, UDC_TXDMA_CFG);
/* EMIFF or SDRC */
omap_set_dma_src_burst_mode(ep->lch,
OMAP_DMA_DATA_BURST_4);
omap_set_dma_src_data_pack(ep->lch, 1);
/* TIPB */
omap_set_dma_dest_params(ep->lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
UDC_DATA_DMA,
0, 0);
}
} else {
if (cpu_is_omap24xx())
dma_channel = OMAP24XX_DMA(USB_W2FC_RX0, channel);
else
dma_channel = OMAP_DMA_USB_W2FC_RX0 - 1 + channel;
status = omap_request_dma(dma_channel,
ep->ep.name, dma_error, ep, &ep->lch);
if (status == 0) {
omap_writew(reg, UDC_RXDMA_CFG);
/* TIPB */
omap_set_dma_src_params(ep->lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
UDC_DATA_DMA,
0, 0);
/* EMIFF or SDRC */
omap_set_dma_dest_burst_mode(ep->lch,
OMAP_DMA_DATA_BURST_4);
omap_set_dma_dest_data_pack(ep->lch, 1);
}
}
if (status)
ep->dma_channel = 0;
else {
ep->has_dma = 1;
omap_disable_dma_irq(ep->lch, OMAP_DMA_BLOCK_IRQ);
/* channel type P: hw synch (fifo) */
if (cpu_class_is_omap1() && !cpu_is_omap15xx())
omap_set_dma_channel_mode(ep->lch, OMAP_DMA_LCH_P);
}
just_restart:
/* restart any queue, even if the claim failed */
restart = !ep->stopped && !list_empty(&ep->queue);
if (status)
DBG("%s no dma channel: %d%s\n", ep->ep.name, status,
restart ? " (restart)" : "");
else
DBG("%s claimed %cxdma%d lch %d%s\n", ep->ep.name,
is_in ? 't' : 'r',
ep->dma_channel - 1, ep->lch,
restart ? " (restart)" : "");
if (restart) {
struct omap_req *req;
req = container_of(ep->queue.next, struct omap_req, queue);
if (ep->has_dma)
(is_in ? next_in_dma : next_out_dma)(ep, req);
else {
use_ep(ep, UDC_EP_SEL);
(is_in ? write_fifo : read_fifo)(ep, req);
deselect_ep();
if (!is_in) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
/* IN: 6 wait states before it'll tx */
}
}
}
static void dma_channel_release(struct omap_ep *ep)
{
int shift = 4 * (ep->dma_channel - 1);
u16 mask = 0x0f << shift;
struct omap_req *req;
int active;
/* abort any active usb transfer request */
if (!list_empty(&ep->queue))
req = container_of(ep->queue.next, struct omap_req, queue);
else
req = NULL;
active = omap_get_dma_active_status(ep->lch);
DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
active ? "active" : "idle",
(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
ep->dma_channel - 1, req);
/* NOTE: re-setting RX_REQ/TX_REQ because of a chip bug (before
* OMAP 1710 ES2.0) where reading the DMA_CFG can clear them.
*/
/* wait till current packet DMA finishes, and fifo empties */
if (ep->bEndpointAddress & USB_DIR_IN) {
omap_writew((omap_readw(UDC_TXDMA_CFG) & ~mask) | UDC_DMA_REQ,
UDC_TXDMA_CFG);
if (req) {
finish_in_dma(ep, req, -ECONNRESET);
/* clear FIFO; hosts probably won't empty it */
use_ep(ep, UDC_EP_SEL);
omap_writew(UDC_CLR_EP, UDC_CTRL);
deselect_ep();
}
while (omap_readw(UDC_TXDMA_CFG) & mask)
udelay(10);
} else {
omap_writew((omap_readw(UDC_RXDMA_CFG) & ~mask) | UDC_DMA_REQ,
UDC_RXDMA_CFG);
/* dma empties the fifo */
while (omap_readw(UDC_RXDMA_CFG) & mask)
udelay(10);
if (req)
finish_out_dma(ep, req, -ECONNRESET, 0);
}
omap_free_dma(ep->lch);
ep->dma_channel = 0;
ep->lch = -1;
/* has_dma still set, till endpoint is fully quiesced */
}
/*-------------------------------------------------------------------------*/
static int
omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_req *req = container_of(_req, struct omap_req, req);
struct omap_udc *udc;
unsigned long flags;
int is_iso = 0;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
DBG("%s, bad params\n", __func__);
return -EINVAL;
}
if (!_ep || (!ep->desc && ep->bEndpointAddress)) {
DBG("%s, bad ep\n", __func__);
return -EINVAL;
}
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
is_iso = 1;
}
/* this isn't bogus, but OMAP DMA isn't the only hardware to
* have a hard time with partial packet reads... reject it.
* Except OMAP2 can handle the small packets.
*/
if (use_dma
&& ep->has_dma
&& ep->bEndpointAddress != 0
&& (ep->bEndpointAddress & USB_DIR_IN) == 0
&& !cpu_class_is_omap2()
&& (req->req.length % ep->ep.maxpacket) != 0) {
DBG("%s, no partial packet OUT reads\n", __func__);
return -EMSGSIZE;
}
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (use_dma && ep->has_dma) {
if (req->req.dma == DMA_ADDR_INVALID) {
req->req.dma = dma_map_single(
ep->udc->gadget.dev.parent,
req->req.buf,
req->req.length,
(ep->bEndpointAddress & USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->mapped = 1;
} else {
dma_sync_single_for_device(
ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
(ep->bEndpointAddress & USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->mapped = 0;
}
}
VDBG("%s queue req %p, len %d buf %p\n",
ep->ep.name, _req, _req->length, _req->buf);
spin_lock_irqsave(&udc->lock, flags);
req->req.status = -EINPROGRESS;
req->req.actual = 0;
/* maybe kickstart non-iso i/o queues */
if (is_iso) {
u16 w;
w = omap_readw(UDC_IRQ_EN);
w |= UDC_SOF_IE;
omap_writew(w, UDC_IRQ_EN);
} else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) {
int is_in;
if (ep->bEndpointAddress == 0) {
if (!udc->ep0_pending || !list_empty (&ep->queue)) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EL2HLT;
}
/* empty DATA stage? */
is_in = udc->ep0_in;
if (!req->req.length) {
/* chip became CONFIGURED or ADDRESSED
* earlier; drivers may already have queued
* requests to non-control endpoints
*/
if (udc->ep0_set_config) {
u16 irq_en = omap_readw(UDC_IRQ_EN);
irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE;
if (!udc->ep0_reset_config)
irq_en |= UDC_EPN_RX_IE
| UDC_EPN_TX_IE;
omap_writew(irq_en, UDC_IRQ_EN);
}
/* STATUS for zero length DATA stages is
* always an IN ... even for IN transfers,
* a weird case which seem to stall OMAP.
*/
omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
/* cleanup */
udc->ep0_pending = 0;
done(ep, req, 0);
req = NULL;
/* non-empty DATA stage */
} else if (is_in) {
omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM);
} else {
if (udc->ep0_setup)
goto irq_wait;
omap_writew(UDC_EP_SEL, UDC_EP_NUM);
}
} else {
is_in = ep->bEndpointAddress & USB_DIR_IN;
if (!ep->has_dma)
use_ep(ep, UDC_EP_SEL);
/* if ISO: SOF IRQs must be enabled/disabled! */
}
if (ep->has_dma)
(is_in ? next_in_dma : next_out_dma)(ep, req);
else if (req) {
if ((is_in ? write_fifo : read_fifo)(ep, req) == 1)
req = NULL;
deselect_ep();
if (!is_in) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
/* IN: 6 wait states before it'll tx */
}
}
irq_wait:
/* irq handler advances the queue */
if (req != NULL)
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_req *req;
unsigned long flags;
if (!_ep || !_req)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry (req, &ep->queue, queue) {
if (&req->req == _req)
break;
}
if (&req->req != _req) {
spin_unlock_irqrestore(&ep->udc->lock, flags);
return -EINVAL;
}
if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
int channel = ep->dma_channel;
/* releasing the channel cancels the request,
* reclaiming the channel restarts the queue
*/
dma_channel_release(ep);
dma_channel_claim(ep, channel);
} else
done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int omap_ep_set_halt(struct usb_ep *_ep, int value)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
unsigned long flags;
int status = -EOPNOTSUPP;
spin_lock_irqsave(&ep->udc->lock, flags);
/* just use protocol stalls for ep0; real halts are annoying */
if (ep->bEndpointAddress == 0) {
if (!ep->udc->ep0_pending)
status = -EINVAL;
else if (value) {
if (ep->udc->ep0_set_config) {
WARNING("error changing config?\n");
omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
}
omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
ep->udc->ep0_pending = 0;
status = 0;
} else /* NOP */
status = 0;
/* otherwise, all active non-ISO endpoints can halt */
} else if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC && ep->desc) {
/* IN endpoints must already be idle */
if ((ep->bEndpointAddress & USB_DIR_IN)
&& !list_empty(&ep->queue)) {
status = -EAGAIN;
goto done;
}
if (value) {
int channel;
if (use_dma && ep->dma_channel
&& !list_empty(&ep->queue)) {
channel = ep->dma_channel;
dma_channel_release(ep);
} else
channel = 0;
use_ep(ep, UDC_EP_SEL);
if (omap_readw(UDC_STAT_FLG) & UDC_NON_ISO_FIFO_EMPTY) {
omap_writew(UDC_SET_HALT, UDC_CTRL);
status = 0;
} else
status = -EAGAIN;
deselect_ep();
if (channel)
dma_channel_claim(ep, channel);
} else {
use_ep(ep, 0);
omap_writew(ep->udc->clr_halt, UDC_CTRL);
ep->ackwait = 0;
if (!(ep->bEndpointAddress & USB_DIR_IN)) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
}
}
done:
VDBG("%s %s halt stat %d\n", ep->ep.name,
value ? "set" : "clear", status);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return status;
}
static struct usb_ep_ops omap_ep_ops = {
.enable = omap_ep_enable,
.disable = omap_ep_disable,
.alloc_request = omap_alloc_request,
.free_request = omap_free_request,
.queue = omap_ep_queue,
.dequeue = omap_ep_dequeue,
.set_halt = omap_ep_set_halt,
// fifo_status ... report bytes in fifo
// fifo_flush ... flush fifo
};
/*-------------------------------------------------------------------------*/
static int omap_get_frame(struct usb_gadget *gadget)
{
u16 sof = omap_readw(UDC_SOF);
return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC;
}
static int omap_wakeup(struct usb_gadget *gadget)
{
struct omap_udc *udc;
unsigned long flags;
int retval = -EHOSTUNREACH;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
if (udc->devstat & UDC_SUS) {
/* NOTE: OTG spec erratum says that OTG devices may
* issue wakeups without host enable.
*/
if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) {
DBG("remote wakeup...\n");
omap_writew(UDC_RMT_WKP, UDC_SYSCON2);
retval = 0;
}
/* NOTE: non-OTG systems may use SRP TOO... */
} else if (!(udc->devstat & UDC_ATT)) {
if (udc->transceiver)
retval = otg_start_srp(udc->transceiver);
}
spin_unlock_irqrestore(&udc->lock, flags);
return retval;
}
static int
omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
{
struct omap_udc *udc;
unsigned long flags;
u16 syscon1;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
syscon1 = omap_readw(UDC_SYSCON1);
if (is_selfpowered)
syscon1 |= UDC_SELF_PWR;
else
syscon1 &= ~UDC_SELF_PWR;
omap_writew(syscon1, UDC_SYSCON1);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int can_pullup(struct omap_udc *udc)
{
return udc->driver && udc->softconnect && udc->vbus_active;
}
static void pullup_enable(struct omap_udc *udc)
{
u16 w;
w = omap_readw(UDC_SYSCON1);
w |= UDC_PULLUP_EN;
omap_writew(w, UDC_SYSCON1);
if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
u32 l;
l = omap_readl(OTG_CTRL);
l |= OTG_BSESSVLD;
omap_writel(l, OTG_CTRL);
}
omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
}
static void pullup_disable(struct omap_udc *udc)
{
u16 w;
if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
u32 l;
l = omap_readl(OTG_CTRL);
l &= ~OTG_BSESSVLD;
omap_writel(l, OTG_CTRL);
}
omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
w = omap_readw(UDC_SYSCON1);
w &= ~UDC_PULLUP_EN;
omap_writew(w, UDC_SYSCON1);
}
static struct omap_udc *udc;
static void omap_udc_enable_clock(int enable)
{
if (udc == NULL || udc->dc_clk == NULL || udc->hhc_clk == NULL)
return;
if (enable) {
clk_enable(udc->dc_clk);
clk_enable(udc->hhc_clk);
udelay(100);
} else {
clk_disable(udc->hhc_clk);
clk_disable(udc->dc_clk);
}
}
/*
* Called by whatever detects VBUS sessions: external transceiver
* driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock.
*/
static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct omap_udc *udc;
unsigned long flags;
u32 l;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
VDBG("VBUS %s\n", is_active ? "on" : "off");
udc->vbus_active = (is_active != 0);
if (cpu_is_omap15xx()) {
/* "software" detect, ignored if !VBUS_MODE_1510 */
l = omap_readl(FUNC_MUX_CTRL_0);
if (is_active)
l |= VBUS_CTRL_1510;
else
l &= ~VBUS_CTRL_1510;
omap_writel(l, FUNC_MUX_CTRL_0);
}
if (udc->dc_clk != NULL && is_active) {
if (!udc->clk_requested) {
omap_udc_enable_clock(1);
udc->clk_requested = 1;
}
}
if (can_pullup(udc))
pullup_enable(udc);
else
pullup_disable(udc);
if (udc->dc_clk != NULL && !is_active) {
if (udc->clk_requested) {
omap_udc_enable_clock(0);
udc->clk_requested = 0;
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int omap_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct omap_udc *udc;
udc = container_of(gadget, struct omap_udc, gadget);
if (udc->transceiver)
return otg_set_power(udc->transceiver, mA);
return -EOPNOTSUPP;
}
static int omap_pullup(struct usb_gadget *gadget, int is_on)
{
struct omap_udc *udc;
unsigned long flags;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
udc->softconnect = (is_on != 0);
if (can_pullup(udc))
pullup_enable(udc);
else
pullup_disable(udc);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static struct usb_gadget_ops omap_gadget_ops = {
.get_frame = omap_get_frame,
.wakeup = omap_wakeup,
.set_selfpowered = omap_set_selfpowered,
.vbus_session = omap_vbus_session,
.vbus_draw = omap_vbus_draw,
.pullup = omap_pullup,
};
/*-------------------------------------------------------------------------*/
/* dequeue ALL requests; caller holds udc->lock */
static void nuke(struct omap_ep *ep, int status)
{
struct omap_req *req;
ep->stopped = 1;
if (use_dma && ep->dma_channel)
dma_channel_release(ep);
use_ep(ep, 0);
omap_writew(UDC_CLR_EP, UDC_CTRL);
if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
omap_writew(UDC_SET_HALT, UDC_CTRL);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct omap_req, queue);
done(ep, req, status);
}
}
/* caller holds udc->lock */
static void udc_quiesce(struct omap_udc *udc)
{
struct omap_ep *ep;
udc->gadget.speed = USB_SPEED_UNKNOWN;
nuke(&udc->ep[0], -ESHUTDOWN);
list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list)
nuke(ep, -ESHUTDOWN);
}
/*-------------------------------------------------------------------------*/
static void update_otg(struct omap_udc *udc)
{
u16 devstat;
if (!gadget_is_otg(&udc->gadget))
return;
if (omap_readl(OTG_CTRL) & OTG_ID)
devstat = omap_readw(UDC_DEVSTAT);
else
devstat = 0;
udc->gadget.b_hnp_enable = !!(devstat & UDC_B_HNP_ENABLE);
udc->gadget.a_hnp_support = !!(devstat & UDC_A_HNP_SUPPORT);
udc->gadget.a_alt_hnp_support = !!(devstat & UDC_A_ALT_HNP_SUPPORT);
/* Enable HNP early, avoiding races on suspend irq path.
* ASSUMES OTG state machine B_BUS_REQ input is true.
*/
if (udc->gadget.b_hnp_enable) {
u32 l;
l = omap_readl(OTG_CTRL);
l |= OTG_B_HNPEN | OTG_B_BUSREQ;
l &= ~OTG_PULLUP;
omap_writel(l, OTG_CTRL);
}
}
static void ep0_irq(struct omap_udc *udc, u16 irq_src)
{
struct omap_ep *ep0 = &udc->ep[0];
struct omap_req *req = NULL;
ep0->irqs++;
/* Clear any pending requests and then scrub any rx/tx state
* before starting to handle the SETUP request.
*/
if (irq_src & UDC_SETUP) {
u16 ack = irq_src & (UDC_EP0_TX|UDC_EP0_RX);
nuke(ep0, 0);
if (ack) {
omap_writew(ack, UDC_IRQ_SRC);
irq_src = UDC_SETUP;
}
}
/* IN/OUT packets mean we're in the DATA or STATUS stage.
* This driver uses only uses protocol stalls (ep0 never halts),
* and if we got this far the gadget driver already had a
* chance to stall. Tries to be forgiving of host oddities.
*
* NOTE: the last chance gadget drivers have to stall control
* requests is during their request completion callback.
*/
if (!list_empty(&ep0->queue))
req = container_of(ep0->queue.next, struct omap_req, queue);
/* IN == TX to host */
if (irq_src & UDC_EP0_TX) {
int stat;
omap_writew(UDC_EP0_TX, UDC_IRQ_SRC);
omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
stat = omap_readw(UDC_STAT_FLG);
if (stat & UDC_ACK) {
if (udc->ep0_in) {
/* write next IN packet from response,
* or set up the status stage.
*/
if (req)
stat = write_fifo(ep0, req);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
if (!req && udc->ep0_pending) {
omap_writew(UDC_EP_SEL, UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(0, UDC_EP_NUM);
udc->ep0_pending = 0;
} /* else: 6 wait states before it'll tx */
} else {
/* ack status stage of OUT transfer */
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
if (req)
done(ep0, req, 0);
}
req = NULL;
} else if (stat & UDC_STALL) {
omap_writew(UDC_CLR_HALT, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
} else {
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
}
}
/* OUT == RX from host */
if (irq_src & UDC_EP0_RX) {
int stat;
omap_writew(UDC_EP0_RX, UDC_IRQ_SRC);
omap_writew(UDC_EP_SEL, UDC_EP_NUM);
stat = omap_readw(UDC_STAT_FLG);
if (stat & UDC_ACK) {
if (!udc->ep0_in) {
stat = 0;
/* read next OUT packet of request, maybe
* reactiviting the fifo; stall on errors.
*/
if (!req || (stat = read_fifo(ep0, req)) < 0) {
omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
udc->ep0_pending = 0;
stat = 0;
} else if (stat == 0)
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(0, UDC_EP_NUM);
/* activate status stage */
if (stat == 1) {
done(ep0, req, 0);
/* that may have STALLed ep0... */
omap_writew(UDC_EP_SEL | UDC_EP_DIR,
UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
udc->ep0_pending = 0;
}
} else {
/* ack status stage of IN transfer */
omap_writew(0, UDC_EP_NUM);
if (req)
done(ep0, req, 0);
}
} else if (stat & UDC_STALL) {
omap_writew(UDC_CLR_HALT, UDC_CTRL);
omap_writew(0, UDC_EP_NUM);
} else {
omap_writew(0, UDC_EP_NUM);
}
}
/* SETUP starts all control transfers */
if (irq_src & UDC_SETUP) {
union u {
u16 word[4];
struct usb_ctrlrequest r;
} u;
int status = -EINVAL;
struct omap_ep *ep;
/* read the (latest) SETUP message */
do {
omap_writew(UDC_SETUP_SEL, UDC_EP_NUM);
/* two bytes at a time */
u.word[0] = omap_readw(UDC_DATA);
u.word[1] = omap_readw(UDC_DATA);
u.word[2] = omap_readw(UDC_DATA);
u.word[3] = omap_readw(UDC_DATA);
omap_writew(0, UDC_EP_NUM);
} while (omap_readw(UDC_IRQ_SRC) & UDC_SETUP);
#define w_value le16_to_cpu(u.r.wValue)
#define w_index le16_to_cpu(u.r.wIndex)
#define w_length le16_to_cpu(u.r.wLength)
/* Delegate almost all control requests to the gadget driver,
* except for a handful of ch9 status/feature requests that
* hardware doesn't autodecode _and_ the gadget API hides.
*/
udc->ep0_in = (u.r.bRequestType & USB_DIR_IN) != 0;
udc->ep0_set_config = 0;
udc->ep0_pending = 1;
ep0->stopped = 0;
ep0->ackwait = 0;
switch (u.r.bRequest) {
case USB_REQ_SET_CONFIGURATION:
/* udc needs to know when ep != 0 is valid */
if (u.r.bRequestType != USB_RECIP_DEVICE)
goto delegate;
if (w_length != 0)
goto do_stall;
udc->ep0_set_config = 1;
udc->ep0_reset_config = (w_value == 0);
VDBG("set config %d\n", w_value);
/* update udc NOW since gadget driver may start
* queueing requests immediately; clear config
* later if it fails the request.
*/
if (udc->ep0_reset_config)
omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
else
omap_writew(UDC_DEV_CFG, UDC_SYSCON2);
update_otg(udc);
goto delegate;
case USB_REQ_CLEAR_FEATURE:
/* clear endpoint halt */
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (w_value != USB_ENDPOINT_HALT
|| w_length != 0)
goto do_stall;
ep = &udc->ep[w_index & 0xf];
if (ep != ep0) {
if (w_index & USB_DIR_IN)
ep += 16;
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
|| !ep->desc)
goto do_stall;
use_ep(ep, 0);
omap_writew(udc->clr_halt, UDC_CTRL);
ep->ackwait = 0;
if (!(ep->bEndpointAddress & USB_DIR_IN)) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
/* NOTE: assumes the host behaves sanely,
* only clearing real halts. Else we may
* need to kill pending transfers and then
* restart the queue... very messy for DMA!
*/
}
VDBG("%s halt cleared by host\n", ep->name);
goto ep0out_status_stage;
case USB_REQ_SET_FEATURE:
/* set endpoint halt */
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (w_value != USB_ENDPOINT_HALT
|| w_length != 0)
goto do_stall;
ep = &udc->ep[w_index & 0xf];
if (w_index & USB_DIR_IN)
ep += 16;
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
|| ep == ep0 || !ep->desc)
goto do_stall;
if (use_dma && ep->has_dma) {
/* this has rude side-effects (aborts) and
* can't really work if DMA-IN is active
*/
DBG("%s host set_halt, NYET \n", ep->name);
goto do_stall;
}
use_ep(ep, 0);
/* can't halt if fifo isn't empty... */
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_HALT, UDC_CTRL);
VDBG("%s halted by host\n", ep->name);
ep0out_status_stage:
status = 0;
omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
udc->ep0_pending = 0;
break;
case USB_REQ_GET_STATUS:
/* USB_ENDPOINT_HALT status? */
if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
goto intf_status;
/* ep0 never stalls */
if (!(w_index & 0xf))
goto zero_status;
/* only active endpoints count */
ep = &udc->ep[w_index & 0xf];
if (w_index & USB_DIR_IN)
ep += 16;
if (!ep->desc)
goto do_stall;
/* iso never stalls */
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
goto zero_status;
/* FIXME don't assume non-halted endpoints!! */
ERR("%s status, can't report\n", ep->ep.name);
goto do_stall;
intf_status:
/* return interface status. if we were pedantic,
* we'd detect non-existent interfaces, and stall.
*/
if (u.r.bRequestType
!= (USB_DIR_IN|USB_RECIP_INTERFACE))
goto delegate;
zero_status:
/* return two zero bytes */
omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
omap_writew(0, UDC_DATA);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
status = 0;
VDBG("GET_STATUS, interface %d\n", w_index);
/* next, status stage */
break;
default:
delegate:
/* activate the ep0out fifo right away */
if (!udc->ep0_in && w_length) {
omap_writew(0, UDC_EP_NUM);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
}
/* gadget drivers see class/vendor specific requests,
* {SET,GET}_{INTERFACE,DESCRIPTOR,CONFIGURATION},
* and more
*/
VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n",
u.r.bRequestType, u.r.bRequest,
w_value, w_index, w_length);
#undef w_value
#undef w_index
#undef w_length
/* The gadget driver may return an error here,
* causing an immediate protocol stall.
*
* Else it must issue a response, either queueing a
* response buffer for the DATA stage, or halting ep0
* (causing a protocol stall, not a real halt). A
* zero length buffer means no DATA stage.
*
* It's fine to issue that response after the setup()
* call returns, and this IRQ was handled.
*/
udc->ep0_setup = 1;
spin_unlock(&udc->lock);
status = udc->driver->setup (&udc->gadget, &u.r);
spin_lock(&udc->lock);
udc->ep0_setup = 0;
}
if (status < 0) {
do_stall:
VDBG("req %02x.%02x protocol STALL; stat %d\n",
u.r.bRequestType, u.r.bRequest, status);
if (udc->ep0_set_config) {
if (udc->ep0_reset_config)
WARNING("error resetting config?\n");
else
omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
}
omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
udc->ep0_pending = 0;
}
}
}
/*-------------------------------------------------------------------------*/
#define OTG_FLAGS (UDC_B_HNP_ENABLE|UDC_A_HNP_SUPPORT|UDC_A_ALT_HNP_SUPPORT)
static void devstate_irq(struct omap_udc *udc, u16 irq_src)
{
u16 devstat, change;
devstat = omap_readw(UDC_DEVSTAT);
change = devstat ^ udc->devstat;
udc->devstat = devstat;
if (change & (UDC_USB_RESET|UDC_ATT)) {
udc_quiesce(udc);
if (change & UDC_ATT) {
/* driver for any external transceiver will
* have called omap_vbus_session() already
*/
if (devstat & UDC_ATT) {
udc->gadget.speed = USB_SPEED_FULL;
VDBG("connect\n");
if (!udc->transceiver)
pullup_enable(udc);
// if (driver->connect) call it
} else if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
udc->gadget.speed = USB_SPEED_UNKNOWN;
if (!udc->transceiver)
pullup_disable(udc);
DBG("disconnect, gadget %s\n",
udc->driver->driver.name);
if (udc->driver->disconnect) {
spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
}
change &= ~UDC_ATT;
}
if (change & UDC_USB_RESET) {
if (devstat & UDC_USB_RESET) {
VDBG("RESET=1\n");
} else {
udc->gadget.speed = USB_SPEED_FULL;
INFO("USB reset done, gadget %s\n",
udc->driver->driver.name);
/* ep0 traffic is legal from now on */
omap_writew(UDC_DS_CHG_IE | UDC_EP0_IE,
UDC_IRQ_EN);
}
change &= ~UDC_USB_RESET;
}
}
if (change & UDC_SUS) {
if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
// FIXME tell isp1301 to suspend/resume (?)
if (devstat & UDC_SUS) {
VDBG("suspend\n");
update_otg(udc);
/* HNP could be under way already */
if (udc->gadget.speed == USB_SPEED_FULL
&& udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
if (udc->transceiver)
otg_set_suspend(udc->transceiver, 1);
} else {
VDBG("resume\n");
if (udc->transceiver)
otg_set_suspend(udc->transceiver, 0);
if (udc->gadget.speed == USB_SPEED_FULL
&& udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
}
change &= ~UDC_SUS;
}
if (!cpu_is_omap15xx() && (change & OTG_FLAGS)) {
update_otg(udc);
change &= ~OTG_FLAGS;
}
change &= ~(UDC_CFG|UDC_DEF|UDC_ADD);
if (change)
VDBG("devstat %03x, ignore change %03x\n",
devstat, change);
omap_writew(UDC_DS_CHG, UDC_IRQ_SRC);
}
static irqreturn_t omap_udc_irq(int irq, void *_udc)
{
struct omap_udc *udc = _udc;
u16 irq_src;
irqreturn_t status = IRQ_NONE;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
irq_src = omap_readw(UDC_IRQ_SRC);
/* Device state change (usb ch9 stuff) */
if (irq_src & UDC_DS_CHG) {
devstate_irq(_udc, irq_src);
status = IRQ_HANDLED;
irq_src &= ~UDC_DS_CHG;
}
/* EP0 control transfers */
if (irq_src & (UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX)) {
ep0_irq(_udc, irq_src);
status = IRQ_HANDLED;
irq_src &= ~(UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX);
}
/* DMA transfer completion */
if (use_dma && (irq_src & (UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT))) {
dma_irq(_udc, irq_src);
status = IRQ_HANDLED;
irq_src &= ~(UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT);
}
irq_src &= ~(UDC_IRQ_SOF | UDC_EPN_TX|UDC_EPN_RX);
if (irq_src)
DBG("udc_irq, unhandled %03x\n", irq_src);
spin_unlock_irqrestore(&udc->lock, flags);
return status;
}
/* workaround for seemingly-lost IRQs for RX ACKs... */
#define PIO_OUT_TIMEOUT (jiffies + HZ/3)
#define HALF_FULL(f) (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
static void pio_out_timer(unsigned long _ep)
{
struct omap_ep *ep = (void *) _ep;
unsigned long flags;
u16 stat_flg;
spin_lock_irqsave(&ep->udc->lock, flags);
if (!list_empty(&ep->queue) && ep->ackwait) {
use_ep(ep, UDC_EP_SEL);
stat_flg = omap_readw(UDC_STAT_FLG);
if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN)
|| (ep->double_buf && HALF_FULL(stat_flg)))) {
struct omap_req *req;
VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg);
req = container_of(ep->queue.next,
struct omap_req, queue);
(void) read_fifo(ep, req);
omap_writew(ep->bEndpointAddress, UDC_EP_NUM);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
} else
deselect_ep();
}
mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
spin_unlock_irqrestore(&ep->udc->lock, flags);
}
static irqreturn_t omap_udc_pio_irq(int irq, void *_dev)
{
u16 epn_stat, irq_src;
irqreturn_t status = IRQ_NONE;
struct omap_ep *ep;
int epnum;
struct omap_udc *udc = _dev;
struct omap_req *req;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
epn_stat = omap_readw(UDC_EPN_STAT);
irq_src = omap_readw(UDC_IRQ_SRC);
/* handle OUT first, to avoid some wasteful NAKs */
if (irq_src & UDC_EPN_RX) {
epnum = (epn_stat >> 8) & 0x0f;
omap_writew(UDC_EPN_RX, UDC_IRQ_SRC);
status = IRQ_HANDLED;
ep = &udc->ep[epnum];
ep->irqs++;
omap_writew(epnum | UDC_EP_SEL, UDC_EP_NUM);
ep->fnf = 0;
if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
ep->ackwait--;
if (!list_empty(&ep->queue)) {
int stat;
req = container_of(ep->queue.next,
struct omap_req, queue);
stat = read_fifo(ep, req);
if (!ep->double_buf)
ep->fnf = 1;
}
}
/* min 6 clock delay before clearing EP_SEL ... */
epn_stat = omap_readw(UDC_EPN_STAT);
epn_stat = omap_readw(UDC_EPN_STAT);
omap_writew(epnum, UDC_EP_NUM);
/* enabling fifo _after_ clearing ACK, contrary to docs,
* reduces lossage; timer still needed though (sigh).
*/
if (ep->fnf) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
}
/* then IN transfers */
else if (irq_src & UDC_EPN_TX) {
epnum = epn_stat & 0x0f;
omap_writew(UDC_EPN_TX, UDC_IRQ_SRC);
status = IRQ_HANDLED;
ep = &udc->ep[16 + epnum];
ep->irqs++;
omap_writew(epnum | UDC_EP_DIR | UDC_EP_SEL, UDC_EP_NUM);
if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
ep->ackwait = 0;
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
(void) write_fifo(ep, req);
}
}
/* min 6 clock delay before clearing EP_SEL ... */
epn_stat = omap_readw(UDC_EPN_STAT);
epn_stat = omap_readw(UDC_EPN_STAT);
omap_writew(epnum | UDC_EP_DIR, UDC_EP_NUM);
/* then 6 clocks before it'd tx */
}
spin_unlock_irqrestore(&udc->lock, flags);
return status;
}
#ifdef USE_ISO
static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
{
struct omap_udc *udc = _dev;
struct omap_ep *ep;
int pending = 0;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* handle all non-DMA ISO transfers */
list_for_each_entry (ep, &udc->iso, iso) {
u16 stat;
struct omap_req *req;
if (ep->has_dma || list_empty(&ep->queue))
continue;
req = list_entry(ep->queue.next, struct omap_req, queue);
use_ep(ep, UDC_EP_SEL);
stat = omap_readw(UDC_STAT_FLG);
/* NOTE: like the other controller drivers, this isn't
* currently reporting lost or damaged frames.
*/
if (ep->bEndpointAddress & USB_DIR_IN) {
if (stat & UDC_MISS_IN)
/* done(ep, req, -EPROTO) */;
else
write_fifo(ep, req);
} else {
int status = 0;
if (stat & UDC_NO_RXPACKET)
status = -EREMOTEIO;
else if (stat & UDC_ISO_ERR)
status = -EILSEQ;
else if (stat & UDC_DATA_FLUSH)
status = -ENOSR;
if (status)
/* done(ep, req, status) */;
else
read_fifo(ep, req);
}
deselect_ep();
/* 6 wait states before next EP */
ep->irqs++;
if (!list_empty(&ep->queue))
pending = 1;
}
if (!pending) {
u16 w;
w = omap_readw(UDC_IRQ_EN);
w &= ~UDC_SOF_IE;
omap_writew(w, UDC_IRQ_EN);
}
omap_writew(UDC_IRQ_SOF, UDC_IRQ_SRC);
spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_HANDLED;
}
#endif
/*-------------------------------------------------------------------------*/
static inline int machine_without_vbus_sense(void)
{
return (machine_is_omap_innovator()
|| machine_is_omap_osk()
|| machine_is_omap_apollon()
#ifndef CONFIG_MACH_OMAP_H4_OTG
|| machine_is_omap_h4()
#endif
|| machine_is_sx1()
|| cpu_is_omap7xx() /* No known omap7xx boards with vbus sense */
);
}
int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
int status = -ENODEV;
struct omap_ep *ep;
unsigned long flags;
/* basic sanity tests */
if (!udc)
return -ENODEV;
if (!driver
// FIXME if otg, check: driver->is_otg
|| driver->speed < USB_SPEED_FULL
|| !bind || !driver->setup)
return -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
if (udc->driver) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EBUSY;
}
/* reset state */
list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
ep->irqs = 0;
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
continue;
use_ep(ep, 0);
omap_writew(UDC_SET_HALT, UDC_CTRL);
}
udc->ep0_pending = 0;
udc->ep[0].irqs = 0;
udc->softconnect = 1;
/* hook up the driver */
driver->driver.bus = NULL;
udc->driver = driver;
udc->gadget.dev.driver = &driver->driver;
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->dc_clk != NULL)
omap_udc_enable_clock(1);
status = bind(&udc->gadget);
if (status) {
DBG("bind to %s --> %d\n", driver->driver.name, status);
udc->gadget.dev.driver = NULL;
udc->driver = NULL;
goto done;
}
DBG("bound to driver %s\n", driver->driver.name);
omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
/* connect to bus through transceiver */
if (udc->transceiver) {
status = otg_set_peripheral(udc->transceiver, &udc->gadget);
if (status < 0) {
ERR("can't bind to transceiver\n");
if (driver->unbind) {
driver->unbind (&udc->gadget);
udc->gadget.dev.driver = NULL;
udc->driver = NULL;
}
goto done;
}
} else {
if (can_pullup(udc))
pullup_enable (udc);
else
pullup_disable (udc);
}
/* boards that don't have VBUS sensing can't autogate 48MHz;
* can't enter deep sleep while a gadget driver is active.
*/
if (machine_without_vbus_sense())
omap_vbus_session(&udc->gadget, 1);
done:
if (udc->dc_clk != NULL)
omap_udc_enable_clock(0);
return status;
}
EXPORT_SYMBOL(usb_gadget_probe_driver);
int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
{
unsigned long flags;
int status = -ENODEV;
if (!udc)
return -ENODEV;
if (!driver || driver != udc->driver || !driver->unbind)
return -EINVAL;
if (udc->dc_clk != NULL)
omap_udc_enable_clock(1);
if (machine_without_vbus_sense())
omap_vbus_session(&udc->gadget, 0);
if (udc->transceiver)
(void) otg_set_peripheral(udc->transceiver, NULL);
else
pullup_disable(udc);
spin_lock_irqsave(&udc->lock, flags);
udc_quiesce(udc);
spin_unlock_irqrestore(&udc->lock, flags);
driver->unbind(&udc->gadget);
udc->gadget.dev.driver = NULL;
udc->driver = NULL;
if (udc->dc_clk != NULL)
omap_udc_enable_clock(0);
DBG("unregistered driver '%s'\n", driver->driver.name);
return status;
}
EXPORT_SYMBOL(usb_gadget_unregister_driver);
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
#include <linux/seq_file.h>
static const char proc_filename[] = "driver/udc";
#define FOURBITS "%s%s%s%s"
#define EIGHTBITS FOURBITS FOURBITS
static void proc_ep_show(struct seq_file *s, struct omap_ep *ep)
{
u16 stat_flg;
struct omap_req *req;
char buf[20];
use_ep(ep, 0);
if (use_dma && ep->has_dma)
snprintf(buf, sizeof buf, "(%cxdma%d lch%d) ",
(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
ep->dma_channel - 1, ep->lch);
else
buf[0] = 0;
stat_flg = omap_readw(UDC_STAT_FLG);
seq_printf(s,
"\n%s %s%s%sirqs %ld stat %04x " EIGHTBITS FOURBITS "%s\n",
ep->name, buf,
ep->double_buf ? "dbuf " : "",
({char *s; switch(ep->ackwait){
case 0: s = ""; break;
case 1: s = "(ackw) "; break;
case 2: s = "(ackw2) "; break;
default: s = "(?) "; break;
} s;}),
ep->irqs, stat_flg,
(stat_flg & UDC_NO_RXPACKET) ? "no_rxpacket " : "",
(stat_flg & UDC_MISS_IN) ? "miss_in " : "",
(stat_flg & UDC_DATA_FLUSH) ? "data_flush " : "",
(stat_flg & UDC_ISO_ERR) ? "iso_err " : "",
(stat_flg & UDC_ISO_FIFO_EMPTY) ? "iso_fifo_empty " : "",
(stat_flg & UDC_ISO_FIFO_FULL) ? "iso_fifo_full " : "",
(stat_flg & UDC_EP_HALTED) ? "HALT " : "",
(stat_flg & UDC_STALL) ? "STALL " : "",
(stat_flg & UDC_NAK) ? "NAK " : "",
(stat_flg & UDC_ACK) ? "ACK " : "",
(stat_flg & UDC_FIFO_EN) ? "fifo_en " : "",
(stat_flg & UDC_NON_ISO_FIFO_EMPTY) ? "fifo_empty " : "",
(stat_flg & UDC_NON_ISO_FIFO_FULL) ? "fifo_full " : "");
if (list_empty (&ep->queue))
seq_printf(s, "\t(queue empty)\n");
else
list_for_each_entry (req, &ep->queue, queue) {
unsigned length = req->req.actual;
if (use_dma && buf[0]) {
length += ((ep->bEndpointAddress & USB_DIR_IN)
? dma_src_len : dma_dest_len)
(ep, req->req.dma + length);
buf[0] = 0;
}
seq_printf(s, "\treq %p len %d/%d buf %p\n",
&req->req, length,
req->req.length, req->req.buf);
}
}
static char *trx_mode(unsigned m, int enabled)
{
switch (m) {
case 0: return enabled ? "*6wire" : "unused";
case 1: return "4wire";
case 2: return "3wire";
case 3: return "6wire";
default: return "unknown";
}
}
static int proc_otg_show(struct seq_file *s)
{
u32 tmp;
u32 trans = 0;
char *ctrl_name = "(UNKNOWN)";
/* XXX This needs major revision for OMAP2+ */
tmp = omap_readl(OTG_REV);
if (cpu_class_is_omap1()) {
ctrl_name = "tranceiver_ctrl";
trans = omap_readw(USB_TRANSCEIVER_CTRL);
}
seq_printf(s, "\nOTG rev %d.%d, %s %05x\n",
tmp >> 4, tmp & 0xf, ctrl_name, trans);
tmp = omap_readw(OTG_SYSCON_1);
seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
FOURBITS "\n", tmp,
trx_mode(USB2_TRX_MODE(tmp), trans & CONF_USB2_UNI_R),
trx_mode(USB1_TRX_MODE(tmp), trans & CONF_USB1_UNI_R),
(USB0_TRX_MODE(tmp) == 0 && !cpu_is_omap1710())
? "internal"
: trx_mode(USB0_TRX_MODE(tmp), 1),
(tmp & OTG_IDLE_EN) ? " !otg" : "",
(tmp & HST_IDLE_EN) ? " !host" : "",
(tmp & DEV_IDLE_EN) ? " !dev" : "",
(tmp & OTG_RESET_DONE) ? " reset_done" : " reset_active");
tmp = omap_readl(OTG_SYSCON_2);
seq_printf(s, "otg_syscon2 %08x%s" EIGHTBITS
" b_ase_brst=%d hmc=%d\n", tmp,
(tmp & OTG_EN) ? " otg_en" : "",
(tmp & USBX_SYNCHRO) ? " synchro" : "",
// much more SRP stuff
(tmp & SRP_DATA) ? " srp_data" : "",
(tmp & SRP_VBUS) ? " srp_vbus" : "",
(tmp & OTG_PADEN) ? " otg_paden" : "",
(tmp & HMC_PADEN) ? " hmc_paden" : "",
(tmp & UHOST_EN) ? " uhost_en" : "",
(tmp & HMC_TLLSPEED) ? " tllspeed" : "",
(tmp & HMC_TLLATTACH) ? " tllattach" : "",
B_ASE_BRST(tmp),
OTG_HMC(tmp));
tmp = omap_readl(OTG_CTRL);
seq_printf(s, "otg_ctrl %06x" EIGHTBITS EIGHTBITS "%s\n", tmp,
(tmp & OTG_ASESSVLD) ? " asess" : "",
(tmp & OTG_BSESSEND) ? " bsess_end" : "",
(tmp & OTG_BSESSVLD) ? " bsess" : "",
(tmp & OTG_VBUSVLD) ? " vbus" : "",
(tmp & OTG_ID) ? " id" : "",
(tmp & OTG_DRIVER_SEL) ? " DEVICE" : " HOST",
(tmp & OTG_A_SETB_HNPEN) ? " a_setb_hnpen" : "",
(tmp & OTG_A_BUSREQ) ? " a_bus" : "",
(tmp & OTG_B_HNPEN) ? " b_hnpen" : "",
(tmp & OTG_B_BUSREQ) ? " b_bus" : "",
(tmp & OTG_BUSDROP) ? " busdrop" : "",
(tmp & OTG_PULLDOWN) ? " down" : "",
(tmp & OTG_PULLUP) ? " up" : "",
(tmp & OTG_DRV_VBUS) ? " drv" : "",
(tmp & OTG_PD_VBUS) ? " pd_vb" : "",
(tmp & OTG_PU_VBUS) ? " pu_vb" : "",
(tmp & OTG_PU_ID) ? " pu_id" : ""
);
tmp = omap_readw(OTG_IRQ_EN);
seq_printf(s, "otg_irq_en %04x" "\n", tmp);
tmp = omap_readw(OTG_IRQ_SRC);
seq_printf(s, "otg_irq_src %04x" "\n", tmp);
tmp = omap_readw(OTG_OUTCTRL);
seq_printf(s, "otg_outctrl %04x" "\n", tmp);
tmp = omap_readw(OTG_TEST);
seq_printf(s, "otg_test %04x" "\n", tmp);
return 0;
}
static int proc_udc_show(struct seq_file *s, void *_)
{
u32 tmp;
struct omap_ep *ep;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
seq_printf(s, "%s, version: " DRIVER_VERSION
#ifdef USE_ISO
" (iso)"
#endif
"%s\n",
driver_desc,
use_dma ? " (dma)" : "");
tmp = omap_readw(UDC_REV) & 0xff;
seq_printf(s,
"UDC rev %d.%d, fifo mode %d, gadget %s\n"
"hmc %d, transceiver %s\n",
tmp >> 4, tmp & 0xf,
fifo_mode,
udc->driver ? udc->driver->driver.name : "(none)",
HMC,
udc->transceiver
? udc->transceiver->label
: ((cpu_is_omap1710() || cpu_is_omap24xx())
? "external" : "(none)"));
if (cpu_class_is_omap1()) {
seq_printf(s, "ULPD control %04x req %04x status %04x\n",
omap_readw(ULPD_CLOCK_CTRL),
omap_readw(ULPD_SOFT_REQ),
omap_readw(ULPD_STATUS_REQ));
}
/* OTG controller registers */
if (!cpu_is_omap15xx())
proc_otg_show(s);
tmp = omap_readw(UDC_SYSCON1);
seq_printf(s, "\nsyscon1 %04x" EIGHTBITS "\n", tmp,
(tmp & UDC_CFG_LOCK) ? " cfg_lock" : "",
(tmp & UDC_DATA_ENDIAN) ? " data_endian" : "",
(tmp & UDC_DMA_ENDIAN) ? " dma_endian" : "",
(tmp & UDC_NAK_EN) ? " nak" : "",
(tmp & UDC_AUTODECODE_DIS) ? " autodecode_dis" : "",
(tmp & UDC_SELF_PWR) ? " self_pwr" : "",
(tmp & UDC_SOFF_DIS) ? " soff_dis" : "",
(tmp & UDC_PULLUP_EN) ? " PULLUP" : "");
// syscon2 is write-only
/* UDC controller registers */
if (!(tmp & UDC_PULLUP_EN)) {
seq_printf(s, "(suspended)\n");
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
tmp = omap_readw(UDC_DEVSTAT);
seq_printf(s, "devstat %04x" EIGHTBITS "%s%s\n", tmp,
(tmp & UDC_B_HNP_ENABLE) ? " b_hnp" : "",
(tmp & UDC_A_HNP_SUPPORT) ? " a_hnp" : "",
(tmp & UDC_A_ALT_HNP_SUPPORT) ? " a_alt_hnp" : "",
(tmp & UDC_R_WK_OK) ? " r_wk_ok" : "",
(tmp & UDC_USB_RESET) ? " usb_reset" : "",
(tmp & UDC_SUS) ? " SUS" : "",
(tmp & UDC_CFG) ? " CFG" : "",
(tmp & UDC_ADD) ? " ADD" : "",
(tmp & UDC_DEF) ? " DEF" : "",
(tmp & UDC_ATT) ? " ATT" : "");
seq_printf(s, "sof %04x\n", omap_readw(UDC_SOF));
tmp = omap_readw(UDC_IRQ_EN);
seq_printf(s, "irq_en %04x" FOURBITS "%s\n", tmp,
(tmp & UDC_SOF_IE) ? " sof" : "",
(tmp & UDC_EPN_RX_IE) ? " epn_rx" : "",
(tmp & UDC_EPN_TX_IE) ? " epn_tx" : "",
(tmp & UDC_DS_CHG_IE) ? " ds_chg" : "",
(tmp & UDC_EP0_IE) ? " ep0" : "");
tmp = omap_readw(UDC_IRQ_SRC);
seq_printf(s, "irq_src %04x" EIGHTBITS "%s%s\n", tmp,
(tmp & UDC_TXN_DONE) ? " txn_done" : "",
(tmp & UDC_RXN_CNT) ? " rxn_cnt" : "",
(tmp & UDC_RXN_EOT) ? " rxn_eot" : "",
(tmp & UDC_IRQ_SOF) ? " sof" : "",
(tmp & UDC_EPN_RX) ? " epn_rx" : "",
(tmp & UDC_EPN_TX) ? " epn_tx" : "",
(tmp & UDC_DS_CHG) ? " ds_chg" : "",
(tmp & UDC_SETUP) ? " setup" : "",
(tmp & UDC_EP0_RX) ? " ep0out" : "",
(tmp & UDC_EP0_TX) ? " ep0in" : "");
if (use_dma) {
unsigned i;
tmp = omap_readw(UDC_DMA_IRQ_EN);
seq_printf(s, "dma_irq_en %04x%s" EIGHTBITS "\n", tmp,
(tmp & UDC_TX_DONE_IE(3)) ? " tx2_done" : "",
(tmp & UDC_RX_CNT_IE(3)) ? " rx2_cnt" : "",
(tmp & UDC_RX_EOT_IE(3)) ? " rx2_eot" : "",
(tmp & UDC_TX_DONE_IE(2)) ? " tx1_done" : "",
(tmp & UDC_RX_CNT_IE(2)) ? " rx1_cnt" : "",
(tmp & UDC_RX_EOT_IE(2)) ? " rx1_eot" : "",
(tmp & UDC_TX_DONE_IE(1)) ? " tx0_done" : "",
(tmp & UDC_RX_CNT_IE(1)) ? " rx0_cnt" : "",
(tmp & UDC_RX_EOT_IE(1)) ? " rx0_eot" : "");
tmp = omap_readw(UDC_RXDMA_CFG);
seq_printf(s, "rxdma_cfg %04x\n", tmp);
if (tmp) {
for (i = 0; i < 3; i++) {
if ((tmp & (0x0f << (i * 4))) == 0)
continue;
seq_printf(s, "rxdma[%d] %04x\n", i,
omap_readw(UDC_RXDMA(i + 1)));
}
}
tmp = omap_readw(UDC_TXDMA_CFG);
seq_printf(s, "txdma_cfg %04x\n", tmp);
if (tmp) {
for (i = 0; i < 3; i++) {
if (!(tmp & (0x0f << (i * 4))))
continue;
seq_printf(s, "txdma[%d] %04x\n", i,
omap_readw(UDC_TXDMA(i + 1)));
}
}
}
tmp = omap_readw(UDC_DEVSTAT);
if (tmp & UDC_ATT) {
proc_ep_show(s, &udc->ep[0]);
if (tmp & UDC_ADD) {
list_for_each_entry (ep, &udc->gadget.ep_list,
ep.ep_list) {
if (ep->desc)
proc_ep_show(s, ep);
}
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int proc_udc_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_udc_show, NULL);
}
static const struct file_operations proc_ops = {
.owner = THIS_MODULE,
.open = proc_udc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void create_proc_file(void)
{
proc_create(proc_filename, 0, NULL, &proc_ops);
}
static void remove_proc_file(void)
{
remove_proc_entry(proc_filename, NULL);
}
#else
static inline void create_proc_file(void) {}
static inline void remove_proc_file(void) {}
#endif
/*-------------------------------------------------------------------------*/
/* Before this controller can enumerate, we need to pick an endpoint
* configuration, or "fifo_mode" That involves allocating 2KB of packet
* buffer space among the endpoints we'll be operating.
*
* NOTE: as of OMAP 1710 ES2.0, writing a new endpoint config when
* UDC_SYSCON_1.CFG_LOCK is set can now work. We won't use that
* capability yet though.
*/
static unsigned __init
omap_ep_setup(char *name, u8 addr, u8 type,
unsigned buf, unsigned maxp, int dbuf)
{
struct omap_ep *ep;
u16 epn_rxtx = 0;
/* OUT endpoints first, then IN */
ep = &udc->ep[addr & 0xf];
if (addr & USB_DIR_IN)
ep += 16;
/* in case of ep init table bugs */
BUG_ON(ep->name[0]);
/* chip setup ... bit values are same for IN, OUT */
if (type == USB_ENDPOINT_XFER_ISOC) {
switch (maxp) {
case 8: epn_rxtx = 0 << 12; break;
case 16: epn_rxtx = 1 << 12; break;
case 32: epn_rxtx = 2 << 12; break;
case 64: epn_rxtx = 3 << 12; break;
case 128: epn_rxtx = 4 << 12; break;
case 256: epn_rxtx = 5 << 12; break;
case 512: epn_rxtx = 6 << 12; break;
default: BUG();
}
epn_rxtx |= UDC_EPN_RX_ISO;
dbuf = 1;
} else {
/* double-buffering "not supported" on 15xx,
* and ignored for PIO-IN on newer chips
* (for more reliable behavior)
*/
if (!use_dma || cpu_is_omap15xx() || cpu_is_omap24xx())
dbuf = 0;
switch (maxp) {
case 8: epn_rxtx = 0 << 12; break;
case 16: epn_rxtx = 1 << 12; break;
case 32: epn_rxtx = 2 << 12; break;
case 64: epn_rxtx = 3 << 12; break;
default: BUG();
}
if (dbuf && addr)
epn_rxtx |= UDC_EPN_RX_DB;
init_timer(&ep->timer);
ep->timer.function = pio_out_timer;
ep->timer.data = (unsigned long) ep;
}
if (addr)
epn_rxtx |= UDC_EPN_RX_VALID;
BUG_ON(buf & 0x07);
epn_rxtx |= buf >> 3;
DBG("%s addr %02x rxtx %04x maxp %d%s buf %d\n",
name, addr, epn_rxtx, maxp, dbuf ? "x2" : "", buf);
if (addr & USB_DIR_IN)
omap_writew(epn_rxtx, UDC_EP_TX(addr & 0xf));
else
omap_writew(epn_rxtx, UDC_EP_RX(addr));
/* next endpoint's buffer starts after this one's */
buf += maxp;
if (dbuf)
buf += maxp;
BUG_ON(buf > 2048);
/* set up driver data structures */
BUG_ON(strlen(name) >= sizeof ep->name);
strlcpy(ep->name, name, sizeof ep->name);
INIT_LIST_HEAD(&ep->queue);
INIT_LIST_HEAD(&ep->iso);
ep->bEndpointAddress = addr;
ep->bmAttributes = type;
ep->double_buf = dbuf;
ep->udc = udc;
ep->ep.name = ep->name;
ep->ep.ops = &omap_ep_ops;
ep->ep.maxpacket = ep->maxpacket = maxp;
list_add_tail (&ep->ep.ep_list, &udc->gadget.ep_list);
return buf;
}
static void omap_udc_release(struct device *dev)
{
complete(udc->done);
kfree (udc);
udc = NULL;
}
static int __init
omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
{
unsigned tmp, buf;
/* abolish any previous hardware state */
omap_writew(0, UDC_SYSCON1);
omap_writew(0, UDC_IRQ_EN);
omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
omap_writew(0, UDC_DMA_IRQ_EN);
omap_writew(0, UDC_RXDMA_CFG);
omap_writew(0, UDC_TXDMA_CFG);
/* UDC_PULLUP_EN gates the chip clock */
// OTG_SYSCON_1 |= DEV_IDLE_EN;
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
spin_lock_init (&udc->lock);
udc->gadget.ops = &omap_gadget_ops;
udc->gadget.ep0 = &udc->ep[0].ep;
INIT_LIST_HEAD(&udc->gadget.ep_list);
INIT_LIST_HEAD(&udc->iso);
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->gadget.name = driver_name;
device_initialize(&udc->gadget.dev);
dev_set_name(&udc->gadget.dev, "gadget");
udc->gadget.dev.release = omap_udc_release;
udc->gadget.dev.parent = &odev->dev;
if (use_dma)
udc->gadget.dev.dma_mask = odev->dev.dma_mask;
udc->transceiver = xceiv;
/* ep0 is special; put it right after the SETUP buffer */
buf = omap_ep_setup("ep0", 0, USB_ENDPOINT_XFER_CONTROL,
8 /* after SETUP */, 64 /* maxpacket */, 0);
list_del_init(&udc->ep[0].ep.ep_list);
/* initially disable all non-ep0 endpoints */
for (tmp = 1; tmp < 15; tmp++) {
omap_writew(0, UDC_EP_RX(tmp));
omap_writew(0, UDC_EP_TX(tmp));
}
#define OMAP_BULK_EP(name,addr) \
buf = omap_ep_setup(name "-bulk", addr, \
USB_ENDPOINT_XFER_BULK, buf, 64, 1);
#define OMAP_INT_EP(name,addr, maxp) \
buf = omap_ep_setup(name "-int", addr, \
USB_ENDPOINT_XFER_INT, buf, maxp, 0);
#define OMAP_ISO_EP(name,addr, maxp) \
buf = omap_ep_setup(name "-iso", addr, \
USB_ENDPOINT_XFER_ISOC, buf, maxp, 1);
switch (fifo_mode) {
case 0:
OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16);
break;
case 1:
OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16);
OMAP_BULK_EP("ep3in", USB_DIR_IN | 3);
OMAP_BULK_EP("ep4out", USB_DIR_OUT | 4);
OMAP_INT_EP("ep10in", USB_DIR_IN | 10, 16);
OMAP_BULK_EP("ep5in", USB_DIR_IN | 5);
OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
OMAP_INT_EP("ep11in", USB_DIR_IN | 11, 16);
OMAP_BULK_EP("ep6in", USB_DIR_IN | 6);
OMAP_BULK_EP("ep6out", USB_DIR_OUT | 6);
OMAP_INT_EP("ep12in", USB_DIR_IN | 12, 16);
OMAP_BULK_EP("ep7in", USB_DIR_IN | 7);
OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
OMAP_INT_EP("ep13in", USB_DIR_IN | 13, 16);
OMAP_INT_EP("ep13out", USB_DIR_OUT | 13, 16);
OMAP_BULK_EP("ep8in", USB_DIR_IN | 8);
OMAP_BULK_EP("ep8out", USB_DIR_OUT | 8);
OMAP_INT_EP("ep14in", USB_DIR_IN | 14, 16);
OMAP_INT_EP("ep14out", USB_DIR_OUT | 14, 16);
OMAP_BULK_EP("ep15in", USB_DIR_IN | 15);
OMAP_BULK_EP("ep15out", USB_DIR_OUT | 15);
break;
#ifdef USE_ISO
case 2: /* mixed iso/bulk */
OMAP_ISO_EP("ep1in", USB_DIR_IN | 1, 256);
OMAP_ISO_EP("ep2out", USB_DIR_OUT | 2, 256);
OMAP_ISO_EP("ep3in", USB_DIR_IN | 3, 128);
OMAP_ISO_EP("ep4out", USB_DIR_OUT | 4, 128);
OMAP_INT_EP("ep5in", USB_DIR_IN | 5, 16);
OMAP_BULK_EP("ep6in", USB_DIR_IN | 6);
OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
OMAP_INT_EP("ep8in", USB_DIR_IN | 8, 16);
break;
case 3: /* mixed bulk/iso */
OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16);
OMAP_BULK_EP("ep4in", USB_DIR_IN | 4);
OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
OMAP_INT_EP("ep6in", USB_DIR_IN | 6, 16);
OMAP_ISO_EP("ep7in", USB_DIR_IN | 7, 256);
OMAP_ISO_EP("ep8out", USB_DIR_OUT | 8, 256);
OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16);
break;
#endif
/* add more modes as needed */
default:
ERR("unsupported fifo_mode #%d\n", fifo_mode);
return -ENODEV;
}
omap_writew(UDC_CFG_LOCK|UDC_SELF_PWR, UDC_SYSCON1);
INFO("fifo mode %d, %d bytes not used\n", fifo_mode, 2048 - buf);
return 0;
}
static int __init omap_udc_probe(struct platform_device *pdev)
{
int status = -ENODEV;
int hmc;
struct otg_transceiver *xceiv = NULL;
const char *type = NULL;
struct omap_usb_config *config = pdev->dev.platform_data;
struct clk *dc_clk;
struct clk *hhc_clk;
/* NOTE: "knows" the order of the resources! */
if (!request_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1,
driver_name)) {
DBG("request_mem_region failed\n");
return -EBUSY;
}
if (cpu_is_omap16xx()) {
dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
clk_enable(dc_clk);
clk_enable(hhc_clk);
udelay(100);
}
if (cpu_is_omap24xx()) {
dc_clk = clk_get(&pdev->dev, "usb_fck");
hhc_clk = clk_get(&pdev->dev, "usb_l4_ick");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
clk_enable(dc_clk);
clk_enable(hhc_clk);
udelay(100);
}
if (cpu_is_omap7xx()) {
dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
hhc_clk = clk_get(&pdev->dev, "l3_ocpi_ck");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
clk_enable(dc_clk);
clk_enable(hhc_clk);
udelay(100);
}
INFO("OMAP UDC rev %d.%d%s\n",
omap_readw(UDC_REV) >> 4, omap_readw(UDC_REV) & 0xf,
config->otg ? ", Mini-AB" : "");
/* use the mode given to us by board init code */
if (cpu_is_omap15xx()) {
hmc = HMC_1510;
type = "(unknown)";
if (machine_without_vbus_sense()) {
/* just set up software VBUS detect, and then
* later rig it so we always report VBUS.
* FIXME without really sensing VBUS, we can't
* know when to turn PULLUP_EN on/off; and that
* means we always "need" the 48MHz clock.
*/
u32 tmp = omap_readl(FUNC_MUX_CTRL_0);
tmp &= ~VBUS_CTRL_1510;
omap_writel(tmp, FUNC_MUX_CTRL_0);
tmp |= VBUS_MODE_1510;
tmp &= ~VBUS_CTRL_1510;
omap_writel(tmp, FUNC_MUX_CTRL_0);
}
} else {
/* The transceiver may package some GPIO logic or handle
* loopback and/or transceiverless setup; if we find one,
* use it. Except for OTG, we don't _need_ to talk to one;
* but not having one probably means no VBUS detection.
*/
xceiv = otg_get_transceiver();
if (xceiv)
type = xceiv->label;
else if (config->otg) {
DBG("OTG requires external transceiver!\n");
goto cleanup0;
}
hmc = HMC_1610;
if (cpu_is_omap24xx()) {
/* this could be transceiverless in one of the
* "we don't need to know" modes.
*/
type = "external";
goto known;
}
switch (hmc) {
case 0: /* POWERUP DEFAULT == 0 */
case 4:
case 12:
case 20:
if (!cpu_is_omap1710()) {
type = "integrated";
break;
}
/* FALL THROUGH */
case 3:
case 11:
case 16:
case 19:
case 25:
if (!xceiv) {
DBG("external transceiver not registered!\n");
type = "unknown";
}
break;
case 21: /* internal loopback */
type = "loopback";
break;
case 14: /* transceiverless */
if (cpu_is_omap1710())
goto bad_on_1710;
/* FALL THROUGH */
case 13:
case 15:
type = "no";
break;
default:
bad_on_1710:
ERR("unrecognized UDC HMC mode %d\n", hmc);
goto cleanup0;
}
}
known:
INFO("hmc mode %d, %s transceiver\n", hmc, type);
/* a "gadget" abstracts/virtualizes the controller */
status = omap_udc_setup(pdev, xceiv);
if (status) {
goto cleanup0;
}
xceiv = NULL;
// "udc" is now valid
pullup_disable(udc);
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
udc->gadget.is_otg = (config->otg != 0);
#endif
/* starting with omap1710 es2.0, clear toggle is a separate bit */
if (omap_readw(UDC_REV) >= 0x61)
udc->clr_halt = UDC_RESET_EP | UDC_CLRDATA_TOGGLE;
else
udc->clr_halt = UDC_RESET_EP;
/* USB general purpose IRQ: ep0, state changes, dma, etc */
status = request_irq(pdev->resource[1].start, omap_udc_irq,
IRQF_SAMPLE_RANDOM, driver_name, udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[1].start, status);
goto cleanup1;
}
/* USB "non-iso" IRQ (PIO for all but ep0) */
status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
IRQF_SAMPLE_RANDOM, "omap_udc pio", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[2].start, status);
goto cleanup2;
}
#ifdef USE_ISO
status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
IRQF_DISABLED, "omap_udc iso", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[3].start, status);
goto cleanup3;
}
#endif
if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
udc->dc_clk = dc_clk;
udc->hhc_clk = hhc_clk;
clk_disable(hhc_clk);
clk_disable(dc_clk);
}
if (cpu_is_omap24xx()) {
udc->dc_clk = dc_clk;
udc->hhc_clk = hhc_clk;
/* FIXME OMAP2 don't release hhc & dc clock */
#if 0
clk_disable(hhc_clk);
clk_disable(dc_clk);
#endif
}
create_proc_file();
status = device_add(&udc->gadget.dev);
if (!status)
return status;
/* If fail, fall through */
#ifdef USE_ISO
cleanup3:
free_irq(pdev->resource[2].start, udc);
#endif
cleanup2:
free_irq(pdev->resource[1].start, udc);
cleanup1:
kfree (udc);
udc = NULL;
cleanup0:
if (xceiv)
otg_put_transceiver(xceiv);
if (cpu_is_omap16xx() || cpu_is_omap24xx() || cpu_is_omap7xx()) {
clk_disable(hhc_clk);
clk_disable(dc_clk);
clk_put(hhc_clk);
clk_put(dc_clk);
}
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
return status;
}
static int __exit omap_udc_remove(struct platform_device *pdev)
{
DECLARE_COMPLETION_ONSTACK(done);
if (!udc)
return -ENODEV;
if (udc->driver)
return -EBUSY;
udc->done = &done;
pullup_disable(udc);
if (udc->transceiver) {
otg_put_transceiver(udc->transceiver);
udc->transceiver = NULL;
}
omap_writew(0, UDC_SYSCON1);
remove_proc_file();
#ifdef USE_ISO
free_irq(pdev->resource[3].start, udc);
#endif
free_irq(pdev->resource[2].start, udc);
free_irq(pdev->resource[1].start, udc);
if (udc->dc_clk) {
if (udc->clk_requested)
omap_udc_enable_clock(0);
clk_put(udc->hhc_clk);
clk_put(udc->dc_clk);
}
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
device_unregister(&udc->gadget.dev);
wait_for_completion(&done);
return 0;
}
/* suspend/resume/wakeup from sysfs (echo > power/state) or when the
* system is forced into deep sleep
*
* REVISIT we should probably reject suspend requests when there's a host
* session active, rather than disconnecting, at least on boards that can
* report VBUS irqs (UDC_DEVSTAT.UDC_ATT). And in any case, we need to
* make host resumes and VBUS detection trigger OMAP wakeup events; that
* may involve talking to an external transceiver (e.g. isp1301).
*/
static int omap_udc_suspend(struct platform_device *dev, pm_message_t message)
{
u32 devstat;
devstat = omap_readw(UDC_DEVSTAT);
/* we're requesting 48 MHz clock if the pullup is enabled
* (== we're attached to the host) and we're not suspended,
* which would prevent entry to deep sleep...
*/
if ((devstat & UDC_ATT) != 0 && (devstat & UDC_SUS) == 0) {
WARNING("session active; suspend requires disconnect\n");
omap_pullup(&udc->gadget, 0);
}
return 0;
}
static int omap_udc_resume(struct platform_device *dev)
{
DBG("resume + wakeup/SRP\n");
omap_pullup(&udc->gadget, 1);
/* maybe the host would enumerate us if we nudged it */
msleep(100);
return omap_wakeup(&udc->gadget);
}
/*-------------------------------------------------------------------------*/
static struct platform_driver udc_driver = {
.remove = __exit_p(omap_udc_remove),
.suspend = omap_udc_suspend,
.resume = omap_udc_resume,
.driver = {
.owner = THIS_MODULE,
.name = (char *) driver_name,
},
};
static int __init udc_init(void)
{
/* Disable DMA for omap7xx -- it doesn't work right. */
if (cpu_is_omap7xx())
use_dma = 0;
INFO("%s, version: " DRIVER_VERSION
#ifdef USE_ISO
" (iso)"
#endif
"%s\n", driver_desc,
use_dma ? " (dma)" : "");
return platform_driver_probe(&udc_driver, omap_udc_probe);
}
module_init(udc_init);
static void __exit udc_exit(void)
{
platform_driver_unregister(&udc_driver);
}
module_exit(udc_exit);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap_udc");
| gpl-2.0 |
neobuddy89/vibrant_fluid_kernel | arch/mips/lasat/interrupt.c | 2349 | 3456 | /*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Routines for generic manipulation of the interrupts found on the
* Lasat boards.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/irq_cpu.h>
#include <asm/lasat/lasat.h>
#include <asm/lasat/lasatint.h>
#include <irq.h>
static volatile int *lasat_int_status;
static volatile int *lasat_int_mask;
static volatile int lasat_int_mask_shift;
void disable_lasat_irq(struct irq_data *d)
{
unsigned int irq_nr = d->irq - LASAT_IRQ_BASE;
*lasat_int_mask &= ~(1 << irq_nr) << lasat_int_mask_shift;
}
void enable_lasat_irq(struct irq_data *d)
{
unsigned int irq_nr = d->irq - LASAT_IRQ_BASE;
*lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift;
}
static struct irq_chip lasat_irq_type = {
.name = "Lasat",
.irq_mask = disable_lasat_irq,
.irq_unmask = enable_lasat_irq,
};
static inline int ls1bit32(unsigned int x)
{
int b = 31, s;
s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
s = 1; if (x << 1 == 0) s = 0; b -= s;
return b;
}
static unsigned long (*get_int_status)(void);
static unsigned long get_int_status_100(void)
{
return *lasat_int_status & *lasat_int_mask;
}
static unsigned long get_int_status_200(void)
{
unsigned long int_status;
int_status = *lasat_int_status;
int_status &= (int_status >> LASATINT_MASK_SHIFT_200) & 0xffff;
return int_status;
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned long int_status;
unsigned int cause = read_c0_cause();
int irq;
if (cause & CAUSEF_IP7) { /* R4000 count / compare IRQ */
do_IRQ(7);
return;
}
int_status = get_int_status();
/* if int_status == 0, then the interrupt has already been cleared */
if (int_status) {
irq = LASAT_IRQ_BASE + ls1bit32(int_status);
do_IRQ(irq);
}
}
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
};
void __init arch_init_irq(void)
{
int i;
if (IS_LASAT_200()) {
lasat_int_status = (void *)LASAT_INT_STATUS_REG_200;
lasat_int_mask = (void *)LASAT_INT_MASK_REG_200;
lasat_int_mask_shift = LASATINT_MASK_SHIFT_200;
get_int_status = get_int_status_200;
*lasat_int_mask &= 0xffff;
} else {
lasat_int_status = (void *)LASAT_INT_STATUS_REG_100;
lasat_int_mask = (void *)LASAT_INT_MASK_REG_100;
lasat_int_mask_shift = LASATINT_MASK_SHIFT_100;
get_int_status = get_int_status_100;
*lasat_int_mask = 0;
}
mips_cpu_irq_init();
for (i = LASAT_IRQ_BASE; i <= LASAT_IRQ_END; i++)
irq_set_chip_and_handler(i, &lasat_irq_type, handle_level_irq);
setup_irq(LASAT_CASCADE_IRQ, &cascade);
}
| gpl-2.0 |
Abhinav1997/kernel_z3 | drivers/video/msm/lcdc_auo_wvga.c | 3629 | 9083 | /* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include <linux/pwm.h>
#ifdef CONFIG_SPI_QUP
#include <linux/spi/spi.h>
#else
#include <mach/gpio.h>
#endif
#include "msm_fb.h"
#define MAX_BACKLIGHT_LEVEL 15
#define PANEL_CMD_BACKLIGHT_LEVEL 0x6A18
#define PANEL_CMD_FORMAT 0x3A00
#define PANEL_CMD_RGBCTRL 0x3B00
#define PANEL_CMD_BCTRL 0x5300
#define PANEL_CMD_PWM_EN 0x6A17
#define PANEL_CMD_SLEEP_OUT 0x1100
#define PANEL_CMD_DISP_ON 0x2900
#define PANEL_CMD_DISP_OFF 0x2800
#define PANEL_CMD_SLEEP_IN 0x1000
#define LCDC_AUO_PANEL_NAME "lcdc_auo_wvga"
#ifdef CONFIG_SPI_QUP
#define LCDC_AUO_SPI_DEVICE_NAME "lcdc_auo_nt35582"
static struct spi_device *lcdc_spi_client;
#else
static int spi_cs;
static int spi_sclk;
static int spi_mosi;
#endif
struct auo_state_type {
boolean display_on;
int bl_level;
};
static struct auo_state_type auo_state = { .bl_level = 10 };
static struct msm_panel_common_pdata *lcdc_auo_pdata;
#ifndef CONFIG_SPI_QUP
static void auo_spi_write_byte(u8 data)
{
uint32 bit;
int bnum;
bnum = 8; /* 8 data bits */
bit = 0x80;
while (bnum--) {
gpio_set_value(spi_sclk, 0); /* clk low */
gpio_set_value(spi_mosi, (data & bit) ? 1 : 0);
udelay(1);
gpio_set_value(spi_sclk, 1); /* clk high */
udelay(1);
bit >>= 1;
}
gpio_set_value(spi_mosi, 0);
}
static void auo_spi_read_byte(u16 cmd_16, u8 *data)
{
int bnum;
u8 cmd_hi = (u8)(cmd_16 >> 8);
u8 cmd_low = (u8)(cmd_16);
/* Chip Select - low */
gpio_set_value(spi_cs, 0);
udelay(2);
/* command byte first */
auo_spi_write_byte(0x20);
udelay(2);
auo_spi_write_byte(cmd_hi);
udelay(2);
auo_spi_write_byte(0x00);
udelay(2);
auo_spi_write_byte(cmd_low);
udelay(2);
auo_spi_write_byte(0xc0);
udelay(2);
gpio_direction_input(spi_mosi);
/* followed by data bytes */
bnum = 1 * 8; /* number of bits */
*data = 0;
while (bnum) {
gpio_set_value(spi_sclk, 0); /* clk low */
udelay(1);
*data <<= 1;
*data |= gpio_get_value(spi_mosi) ? 1 : 0;
gpio_set_value(spi_sclk, 1); /* clk high */
udelay(1);
--bnum;
if ((bnum % 8) == 0)
++data;
}
gpio_direction_output(spi_mosi, 0);
/* Chip Select - high */
udelay(2);
gpio_set_value(spi_cs, 1);
}
#endif
static int auo_serigo(u8 *input_data, int input_len)
{
#ifdef CONFIG_SPI_QUP
int rc;
struct spi_message m;
struct spi_transfer t;
if (!lcdc_spi_client) {
pr_err("%s lcdc_spi_client is NULL\n", __func__);
return -EINVAL;
}
memset(&t, 0, sizeof t);
t.tx_buf = input_data;
t.len = input_len;
t.bits_per_word = 16;
spi_setup(lcdc_spi_client);
spi_message_init(&m);
spi_message_add_tail(&t, &m);
rc = spi_sync(lcdc_spi_client, &m);
return rc;
#else
int i;
/* Chip Select - low */
gpio_set_value(spi_cs, 0);
udelay(2);
for (i = 0; i < input_len; ++i) {
auo_spi_write_byte(input_data[i]);
udelay(2);
}
/* Chip Select - high */
gpio_set_value(spi_cs, 1);
return 0;
#endif
}
#ifndef CONFIG_SPI_QUP
static void auo_spi_init(void)
{
spi_sclk = *(lcdc_auo_pdata->gpio_num);
spi_cs = *(lcdc_auo_pdata->gpio_num + 1);
spi_mosi = *(lcdc_auo_pdata->gpio_num + 2);
/* Set the output so that we don't disturb the slave device */
gpio_set_value(spi_sclk, 1);
gpio_set_value(spi_mosi, 0);
/* Set the Chip Select deasserted (active low) */
gpio_set_value(spi_cs, 1);
}
#endif
static struct work_struct disp_on_delayed_work;
static void auo_write_cmd(u16 cmd)
{
u8 local_data[4];
local_data[0] = 0x20;
local_data[1] = (u8)(cmd >> 8);
local_data[2] = 0;
local_data[3] = (u8)cmd;
auo_serigo(local_data, 4);
}
static void auo_write_cmd_1param(u16 cmd, u8 para1)
{
u8 local_data[6];
local_data[0] = 0x20;
local_data[1] = (u8)(cmd >> 8);
local_data[2] = 0;
local_data[3] = (u8)cmd;
local_data[4] = 0x40;
local_data[5] = para1;
auo_serigo(local_data, 6);
}
static void lcdc_auo_set_backlight(struct msm_fb_data_type *mfd)
{
int bl_level;
bl_level = mfd->bl_level;
if (auo_state.display_on) {
auo_write_cmd_1param(PANEL_CMD_BACKLIGHT_LEVEL,
bl_level * 255 / MAX_BACKLIGHT_LEVEL);
auo_state.bl_level = bl_level;
}
}
static void auo_disp_on_delayed_work(struct work_struct *work_ptr)
{
/* 0x1100: Sleep Out */
auo_write_cmd(PANEL_CMD_SLEEP_OUT);
msleep(180);
/* SET_PIXEL_FORMAT: Set how many bits per pixel are used (3A00h)*/
auo_write_cmd_1param(PANEL_CMD_FORMAT, 0x66); /* 18 bits */
/* RGBCTRL: RGB Interface Signal Control (3B00h) */
auo_write_cmd_1param(PANEL_CMD_RGBCTRL, 0x2B);
/* Display ON command */
auo_write_cmd(PANEL_CMD_DISP_ON);
msleep(20);
/*Backlight on */
auo_write_cmd_1param(PANEL_CMD_BCTRL, 0x24); /*BCTRL, BL */
auo_write_cmd_1param(PANEL_CMD_PWM_EN, 0x01); /*Enable PWM Level */
msleep(20);
}
static void auo_disp_on(void)
{
if (!auo_state.display_on) {
INIT_WORK(&disp_on_delayed_work, auo_disp_on_delayed_work);
#ifdef CONFIG_SPI_QUP
if (lcdc_spi_client)
#endif
schedule_work(&disp_on_delayed_work);
auo_state.display_on = TRUE;
}
}
static int lcdc_auo_panel_on(struct platform_device *pdev)
{
pr_info("%s\n", __func__);
if (!auo_state.display_on) {
#ifndef CONFIG_SPI_QUP
lcdc_auo_pdata->panel_config_gpio(1);
auo_spi_init();
#endif
auo_disp_on();
}
return 0;
}
static int lcdc_auo_panel_off(struct platform_device *pdev)
{
pr_info("%s\n", __func__);
if (auo_state.display_on) {
/* 0x2800: Display Off */
auo_write_cmd(PANEL_CMD_DISP_OFF);
msleep(120);
/* 0x1000: Sleep In */
auo_write_cmd(PANEL_CMD_SLEEP_IN);
msleep(120);
auo_state.display_on = FALSE;
}
return 0;
}
static int auo_probe(struct platform_device *pdev)
{
pr_info("%s: id=%d\n", __func__, pdev->id);
if (pdev->id == 0) {
lcdc_auo_pdata = pdev->dev.platform_data;
return 0;
}
msm_fb_add_device(pdev);
return 0;
}
#ifdef CONFIG_SPI_QUP
static int __devinit lcdc_auo_spi_probe(struct spi_device *spi)
{
pr_info("%s\n", __func__);
lcdc_spi_client = spi;
lcdc_spi_client->bits_per_word = 32;
if (auo_state.display_on)
schedule_work(&disp_on_delayed_work);
return 0;
}
static int __devexit lcdc_auo_spi_remove(struct spi_device *spi)
{
lcdc_spi_client = NULL;
return 0;
}
static struct spi_driver lcdc_auo_spi_driver = {
.driver.name = LCDC_AUO_SPI_DEVICE_NAME,
.driver.owner = THIS_MODULE,
.probe = lcdc_auo_spi_probe,
.remove = __devexit_p(lcdc_auo_spi_remove),
};
#endif
static struct platform_driver this_driver = {
.probe = auo_probe,
.driver.name = LCDC_AUO_PANEL_NAME,
};
static struct msm_fb_panel_data auo_panel_data = {
.on = lcdc_auo_panel_on,
.off = lcdc_auo_panel_off,
.set_backlight = lcdc_auo_set_backlight,
};
static struct platform_device this_device = {
.name = LCDC_AUO_PANEL_NAME,
.id = 1,
.dev.platform_data = &auo_panel_data,
};
static int __init lcdc_auo_panel_init(void)
{
int ret;
struct msm_panel_info *pinfo;
if (msm_fb_detect_client(LCDC_AUO_PANEL_NAME)) {
pr_err("%s: detect failed\n", __func__);
return 0;
}
ret = platform_driver_register(&this_driver);
if (ret) {
pr_err("%s: driver register failed, rc=%d\n", __func__, ret);
return ret;
}
pinfo = &auo_panel_data.panel_info;
pinfo->xres = 480;
pinfo->yres = 800;
pinfo->type = LCDC_PANEL;
pinfo->pdest = DISPLAY_1;
pinfo->wait_cycle = 0;
pinfo->bpp = 18;
pinfo->fb_num = 2;
pinfo->clk_rate = 25600000;
pinfo->bl_max = MAX_BACKLIGHT_LEVEL;
pinfo->bl_min = 1;
pinfo->lcdc.h_back_porch = 16-2; /* HBP-HLW */
pinfo->lcdc.h_front_porch = 16;
pinfo->lcdc.h_pulse_width = 2;
pinfo->lcdc.v_back_porch = 3-2; /* VBP-VLW */
pinfo->lcdc.v_front_porch = 28;
pinfo->lcdc.v_pulse_width = 2;
pinfo->lcdc.border_clr = 0;
pinfo->lcdc.underflow_clr = 0xff;
pinfo->lcdc.hsync_skew = 0;
ret = platform_device_register(&this_device);
if (ret) {
pr_err("%s: device register failed, rc=%d\n", __func__, ret);
goto fail_driver;
}
#ifdef CONFIG_SPI_QUP
ret = spi_register_driver(&lcdc_auo_spi_driver);
if (ret) {
pr_err("%s: spi register failed: rc=%d\n", __func__, ret);
goto fail_device;
}
pr_info("%s: SUCCESS (SPI)\n", __func__);
#else
pr_info("%s: SUCCESS (BitBang)\n", __func__);
#endif
return ret;
#ifdef CONFIG_SPI_QUP
fail_device:
platform_device_unregister(&this_device);
#endif
fail_driver:
platform_driver_unregister(&this_driver);
return ret;
}
module_init(lcdc_auo_panel_init);
static void __exit lcdc_auo_panel_exit(void)
{
pr_info("%s\n", __func__);
platform_device_unregister(&this_device);
platform_driver_unregister(&this_driver);
#ifdef CONFIG_SPI_QUP
spi_unregister_driver(&lcdc_auo_spi_driver);
#endif
}
module_exit(lcdc_auo_panel_exit);
| gpl-2.0 |
SOKP/kernel_motorola_msm8610 | drivers/staging/android/trace_persistent.c | 4397 | 5567 | /*
* Copyright (C) 2012 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/persistent_ram.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "../../../kernel/trace/trace.h"
struct persistent_trace_record {
unsigned long ip;
unsigned long parent_ip;
};
#define REC_SIZE sizeof(struct persistent_trace_record)
static struct persistent_ram_zone *persistent_trace;
static int persistent_trace_enabled;
static struct trace_array *persistent_trace_array;
static struct ftrace_ops trace_ops;
static int persistent_tracer_init(struct trace_array *tr)
{
persistent_trace_array = tr;
tr->cpu = get_cpu();
put_cpu();
tracing_start_cmdline_record();
persistent_trace_enabled = 0;
smp_wmb();
register_ftrace_function(&trace_ops);
smp_wmb();
persistent_trace_enabled = 1;
return 0;
}
static void persistent_trace_reset(struct trace_array *tr)
{
persistent_trace_enabled = 0;
smp_wmb();
unregister_ftrace_function(&trace_ops);
tracing_stop_cmdline_record();
}
static void persistent_trace_start(struct trace_array *tr)
{
tracing_reset_online_cpus(tr);
}
static void persistent_trace_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = persistent_trace_array;
struct trace_array_cpu *data;
long disabled;
struct persistent_trace_record rec;
unsigned long flags;
int cpu;
smp_rmb();
if (unlikely(!persistent_trace_enabled))
return;
if (unlikely(oops_in_progress))
return;
/*
* Need to use raw, since this must be called before the
* recursive protection is performed.
*/
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
rec.ip = ip;
rec.parent_ip = parent_ip;
rec.ip |= cpu;
persistent_ram_write(persistent_trace, &rec, sizeof(rec));
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
}
static struct ftrace_ops trace_ops __read_mostly = {
.func = persistent_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL,
};
static struct tracer persistent_tracer __read_mostly = {
.name = "persistent",
.init = persistent_tracer_init,
.reset = persistent_trace_reset,
.start = persistent_trace_start,
.wait_pipe = poll_wait_pipe,
};
struct persistent_trace_seq_data {
const void *ptr;
size_t off;
size_t size;
};
void *persistent_trace_seq_start(struct seq_file *s, loff_t *pos)
{
struct persistent_trace_seq_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
data->ptr = persistent_ram_old(persistent_trace);
data->size = persistent_ram_old_size(persistent_trace);
data->off = data->size % REC_SIZE;
data->off += *pos * REC_SIZE;
if (data->off + REC_SIZE > data->size) {
kfree(data);
return NULL;
}
return data;
}
void persistent_trace_seq_stop(struct seq_file *s, void *v)
{
kfree(v);
}
void *persistent_trace_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct persistent_trace_seq_data *data = v;
data->off += REC_SIZE;
if (data->off + REC_SIZE > data->size)
return NULL;
(*pos)++;
return data;
}
int persistent_trace_seq_show(struct seq_file *s, void *v)
{
struct persistent_trace_seq_data *data = v;
struct persistent_trace_record *rec;
rec = (struct persistent_trace_record *)(data->ptr + data->off);
seq_printf(s, "%ld %08lx %08lx %pf <- %pF\n",
rec->ip & 3, rec->ip, rec->parent_ip,
(void *)rec->ip, (void *)rec->parent_ip);
return 0;
}
static const struct seq_operations persistent_trace_seq_ops = {
.start = persistent_trace_seq_start,
.next = persistent_trace_seq_next,
.stop = persistent_trace_seq_stop,
.show = persistent_trace_seq_show,
};
static int persistent_trace_old_open(struct inode *inode, struct file *file)
{
return seq_open(file, &persistent_trace_seq_ops);
}
static const struct file_operations persistent_trace_old_fops = {
.open = persistent_trace_old_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __devinit persistent_trace_probe(struct platform_device *pdev)
{
struct dentry *d;
int ret;
persistent_trace = persistent_ram_init_ringbuffer(&pdev->dev, false);
if (IS_ERR(persistent_trace)) {
pr_err("persistent_trace: failed to init ringbuffer: %ld\n",
PTR_ERR(persistent_trace));
return PTR_ERR(persistent_trace);
}
ret = register_tracer(&persistent_tracer);
if (ret)
pr_err("persistent_trace: failed to register tracer");
if (persistent_ram_old_size(persistent_trace) > 0) {
d = debugfs_create_file("persistent_trace", S_IRUGO, NULL,
NULL, &persistent_trace_old_fops);
if (IS_ERR_OR_NULL(d))
pr_err("persistent_trace: failed to create old file\n");
}
return 0;
}
static struct platform_driver persistent_trace_driver = {
.probe = persistent_trace_probe,
.driver = {
.name = "persistent_trace",
},
};
static int __init persistent_trace_init(void)
{
return platform_driver_register(&persistent_trace_driver);
}
core_initcall(persistent_trace_init);
| gpl-2.0 |
C-Aniruddh/PixCMKernel | arch/arm/mach-s5pc100/setup-spi.c | 4909 | 1547 | /* linux/arch/arm/mach-s5pc100/setup-spi.c
*
* Copyright (C) 2011 Samsung Electronics Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <plat/gpio-cfg.h>
#include <plat/s3c64xx-spi.h>
#ifdef CONFIG_S3C64XX_DEV_SPI0
struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 13,
.high_speed = 1,
.tx_st_done = 21,
};
int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
{
s3c_gpio_cfgall_range(S5PC100_GPB(0), 3,
S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
return 0;
}
#endif
#ifdef CONFIG_S3C64XX_DEV_SPI1
struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = {
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 13,
.high_speed = 1,
.tx_st_done = 21,
};
int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
{
s3c_gpio_cfgall_range(S5PC100_GPB(4), 3,
S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
return 0;
}
#endif
#ifdef CONFIG_S3C64XX_DEV_SPI2
struct s3c64xx_spi_info s3c64xx_spi2_pdata __initdata = {
.fifo_lvl_mask = 0x7f,
.rx_lvl_offset = 13,
.high_speed = 1,
.tx_st_done = 21,
};
int s3c64xx_spi2_cfg_gpio(struct platform_device *dev)
{
s3c_gpio_cfgpin(S5PC100_GPG3(0), S3C_GPIO_SFN(3));
s3c_gpio_setpull(S5PC100_GPG3(0), S3C_GPIO_PULL_UP);
s3c_gpio_cfgall_range(S5PC100_GPB(2), 2,
S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
return 0;
}
#endif
| gpl-2.0 |
rukin5197/android_kernel_htc_msm7x30 | arch/frv/mm/kmap.c | 4909 | 1319 | /* kmap.c: ioremapping handlers
*
* Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* - Derived from arch/m68k/mm/kmap.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
#include <asm/system.h>
#undef DEBUG
/*****************************************************************************/
/*
* Map some physical address range into the kernel address space.
*/
void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
return (void __iomem *)physaddr;
}
/*
* Unmap a ioremap()ed region again
*/
void iounmap(void volatile __iomem *addr)
{
}
/*
* Set new cache mode for some kernel address space.
* The caller must push data for that range itself, if such data may already
* be in the cache.
*/
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
{
}
| gpl-2.0 |
jackyh/qt210_kernel | arch/microblaze/pci/xilinx_pci.c | 7981 | 4579 | /*
* PCI support for Xilinx plbv46_pci soft-core which can be used on
* Xilinx Virtex ML410 / ML510 boards.
*
* Copyright 2009 Roderick Colenbrander
* Copyright 2009 Secret Lab Technologies Ltd.
*
* The pci bridge fixup code was copied from ppc4xx_pci.c and was written
* by Benjamin Herrenschmidt.
* Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <asm/io.h>
#define XPLB_PCI_ADDR 0x10c
#define XPLB_PCI_DATA 0x110
#define XPLB_PCI_BUS 0x114
#define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \
PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY)
static struct of_device_id xilinx_pci_match[] = {
{ .compatible = "xlnx,plbv46-pci-1.03.a", },
{}
};
/**
* xilinx_pci_fixup_bridge - Block Xilinx PHB configuration.
*/
static void xilinx_pci_fixup_bridge(struct pci_dev *dev)
{
struct pci_controller *hose;
int i;
if (dev->devfn || dev->bus->self)
return;
hose = pci_bus_to_host(dev->bus);
if (!hose)
return;
if (!of_match_node(xilinx_pci_match, hose->dn))
return;
/* Hide the PCI host BARs from the kernel as their content doesn't
* fit well in the resource management
*/
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
dev->resource[i].start = 0;
dev->resource[i].end = 0;
dev->resource[i].flags = 0;
}
dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n",
pci_name(dev));
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge);
#ifdef DEBUG
/**
* xilinx_pci_exclude_device - Don't do config access for non-root bus
*
* This is a hack. Config access to any bus other than bus 0 does not
* currently work on the ML510 so we prevent it here.
*/
static int
xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
{
return (bus != 0);
}
/**
* xilinx_early_pci_scan - List pci config space for available devices
*
* List pci devices in very early phase.
*/
void __init xilinx_early_pci_scan(struct pci_controller *hose)
{
u32 bus = 0;
u32 val, dev, func, offset;
/* Currently we have only 2 device connected - up-to 32 devices */
for (dev = 0; dev < 2; dev++) {
/* List only first function number - up-to 8 functions */
for (func = 0; func < 1; func++) {
printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func);
/* read the first 64 standardized bytes */
/* Up-to 192 bytes can be list of capabilities */
for (offset = 0; offset < 64; offset += 4) {
early_read_config_dword(hose, bus,
PCI_DEVFN(dev, func), offset, &val);
if (offset == 0 && val == 0xFFFFFFFF) {
printk(KERN_CONT "\nABSENT");
break;
}
if (!(offset % 0x10))
printk(KERN_CONT "\n%04x: ", offset);
printk(KERN_CONT "%08x ", val);
}
printk(KERN_INFO "\n");
}
}
}
#else
void __init xilinx_early_pci_scan(struct pci_controller *hose)
{
}
#endif
/**
* xilinx_pci_init - Find and register a Xilinx PCI host bridge
*/
void __init xilinx_pci_init(void)
{
struct pci_controller *hose;
struct resource r;
void __iomem *pci_reg;
struct device_node *pci_node;
pci_node = of_find_matching_node(NULL, xilinx_pci_match);
if (!pci_node)
return;
if (of_address_to_resource(pci_node, 0, &r)) {
pr_err("xilinx-pci: cannot resolve base address\n");
return;
}
hose = pcibios_alloc_controller(pci_node);
if (!hose) {
pr_err("xilinx-pci: pcibios_alloc_controller() failed\n");
return;
}
/* Setup config space */
setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR,
r.start + XPLB_PCI_DATA,
INDIRECT_TYPE_SET_CFG_TYPE);
/* According to the xilinx plbv46_pci documentation the soft-core starts
* a self-init when the bus master enable bit is set. Without this bit
* set the pci bus can't be scanned.
*/
early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD);
/* Set the max latency timer to 255 */
early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff);
/* Set the max bus number to 255, and bus/subbus no's to 0 */
pci_reg = of_iomap(pci_node, 0);
out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff);
iounmap(pci_reg);
/* Register the host bridge with the linux kernel! */
pci_process_bridge_OF_ranges(hose, pci_node,
INDIRECT_TYPE_SET_CFG_TYPE);
pr_info("xilinx-pci: Registered PCI host bridge\n");
xilinx_early_pci_scan(hose);
}
| gpl-2.0 |
EnJens/kernel_tf201_stock | drivers/i2c/busses/i2c-parport.c | 7981 | 8701 | /* ------------------------------------------------------------------------ *
* i2c-parport.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
Copyright (C) 2003-2011 Jean Delvare <khali@linux-fr.org>
Based on older i2c-philips-par.c driver
Copyright (C) 1995-2000 Simon G. Vogl
With some changes from:
Frodo Looijaard <frodol@dds.nl>
Kyösti Mälkki <kmalkki@cc.hut.fi>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
* ------------------------------------------------------------------------ */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/parport.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c-smbus.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include "i2c-parport.h"
/* ----- Device list ------------------------------------------------------ */
struct i2c_par {
struct pardevice *pdev;
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo_data;
struct i2c_smbus_alert_setup alert_data;
struct i2c_client *ara;
struct list_head node;
};
static LIST_HEAD(adapter_list);
static DEFINE_MUTEX(adapter_list_lock);
/* ----- Low-level parallel port access ----------------------------------- */
static void port_write_data(struct parport *p, unsigned char d)
{
parport_write_data(p, d);
}
static void port_write_control(struct parport *p, unsigned char d)
{
parport_write_control(p, d);
}
static unsigned char port_read_data(struct parport *p)
{
return parport_read_data(p);
}
static unsigned char port_read_status(struct parport *p)
{
return parport_read_status(p);
}
static unsigned char port_read_control(struct parport *p)
{
return parport_read_control(p);
}
static void (* const port_write[])(struct parport *, unsigned char) = {
port_write_data,
NULL,
port_write_control,
};
static unsigned char (* const port_read[])(struct parport *) = {
port_read_data,
port_read_status,
port_read_control,
};
/* ----- Unified line operation functions --------------------------------- */
static inline void line_set(struct parport *data, int state,
const struct lineop *op)
{
u8 oldval = port_read[op->port](data);
/* Touch only the bit(s) needed */
if ((op->inverted && !state) || (!op->inverted && state))
port_write[op->port](data, oldval | op->val);
else
port_write[op->port](data, oldval & ~op->val);
}
static inline int line_get(struct parport *data,
const struct lineop *op)
{
u8 oldval = port_read[op->port](data);
return ((op->inverted && (oldval & op->val) != op->val)
|| (!op->inverted && (oldval & op->val) == op->val));
}
/* ----- I2C algorithm call-back functions and structures ----------------- */
static void parport_setscl(void *data, int state)
{
line_set((struct parport *) data, state, &adapter_parm[type].setscl);
}
static void parport_setsda(void *data, int state)
{
line_set((struct parport *) data, state, &adapter_parm[type].setsda);
}
static int parport_getscl(void *data)
{
return line_get((struct parport *) data, &adapter_parm[type].getscl);
}
static int parport_getsda(void *data)
{
return line_get((struct parport *) data, &adapter_parm[type].getsda);
}
/* Encapsulate the functions above in the correct structure.
Note that this is only a template, from which the real structures are
copied. The attaching code will set getscl to NULL for adapters that
cannot read SCL back, and will also make the data field point to
the parallel port structure. */
static const struct i2c_algo_bit_data parport_algo_data = {
.setsda = parport_setsda,
.setscl = parport_setscl,
.getsda = parport_getsda,
.getscl = parport_getscl,
.udelay = 10, /* ~50 kbps */
.timeout = HZ,
};
/* ----- I2c and parallel port call-back functions and structures --------- */
void i2c_parport_irq(void *data)
{
struct i2c_par *adapter = data;
struct i2c_client *ara = adapter->ara;
if (ara) {
dev_dbg(&ara->dev, "SMBus alert received\n");
i2c_handle_smbus_alert(ara);
} else
dev_dbg(&adapter->adapter.dev,
"SMBus alert received but no ARA client!\n");
}
static void i2c_parport_attach(struct parport *port)
{
struct i2c_par *adapter;
adapter = kzalloc(sizeof(struct i2c_par), GFP_KERNEL);
if (adapter == NULL) {
printk(KERN_ERR "i2c-parport: Failed to kzalloc\n");
return;
}
pr_debug("i2c-parport: attaching to %s\n", port->name);
parport_disable_irq(port);
adapter->pdev = parport_register_device(port, "i2c-parport",
NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter);
if (!adapter->pdev) {
printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
goto err_free;
}
/* Fill the rest of the structure */
adapter->adapter.owner = THIS_MODULE;
adapter->adapter.class = I2C_CLASS_HWMON;
strlcpy(adapter->adapter.name, "Parallel port adapter",
sizeof(adapter->adapter.name));
adapter->algo_data = parport_algo_data;
/* Slow down if we can't sense SCL */
if (!adapter_parm[type].getscl.val) {
adapter->algo_data.getscl = NULL;
adapter->algo_data.udelay = 50; /* ~10 kbps */
}
adapter->algo_data.data = port;
adapter->adapter.algo_data = &adapter->algo_data;
adapter->adapter.dev.parent = port->physport->dev;
if (parport_claim_or_block(adapter->pdev) < 0) {
printk(KERN_ERR "i2c-parport: Could not claim parallel port\n");
goto err_unregister;
}
/* Reset hardware to a sane state (SCL and SDA high) */
parport_setsda(port, 1);
parport_setscl(port, 1);
/* Other init if needed (power on...) */
if (adapter_parm[type].init.val) {
line_set(port, 1, &adapter_parm[type].init);
/* Give powered devices some time to settle */
msleep(100);
}
if (i2c_bit_add_bus(&adapter->adapter) < 0) {
printk(KERN_ERR "i2c-parport: Unable to register with I2C\n");
goto err_unregister;
}
/* Setup SMBus alert if supported */
if (adapter_parm[type].smbus_alert) {
adapter->alert_data.alert_edge_triggered = 1;
adapter->ara = i2c_setup_smbus_alert(&adapter->adapter,
&adapter->alert_data);
if (adapter->ara)
parport_enable_irq(port);
else
printk(KERN_WARNING "i2c-parport: Failed to register "
"ARA client\n");
}
/* Add the new adapter to the list */
mutex_lock(&adapter_list_lock);
list_add_tail(&adapter->node, &adapter_list);
mutex_unlock(&adapter_list_lock);
return;
err_unregister:
parport_release(adapter->pdev);
parport_unregister_device(adapter->pdev);
err_free:
kfree(adapter);
}
static void i2c_parport_detach(struct parport *port)
{
struct i2c_par *adapter, *_n;
/* Walk the list */
mutex_lock(&adapter_list_lock);
list_for_each_entry_safe(adapter, _n, &adapter_list, node) {
if (adapter->pdev->port == port) {
if (adapter->ara) {
parport_disable_irq(port);
i2c_unregister_device(adapter->ara);
}
i2c_del_adapter(&adapter->adapter);
/* Un-init if needed (power off...) */
if (adapter_parm[type].init.val)
line_set(port, 0, &adapter_parm[type].init);
parport_release(adapter->pdev);
parport_unregister_device(adapter->pdev);
list_del(&adapter->node);
kfree(adapter);
}
}
mutex_unlock(&adapter_list_lock);
}
static struct parport_driver i2c_parport_driver = {
.name = "i2c-parport",
.attach = i2c_parport_attach,
.detach = i2c_parport_detach,
};
/* ----- Module loading, unloading and information ------------------------ */
static int __init i2c_parport_init(void)
{
if (type < 0) {
printk(KERN_WARNING "i2c-parport: adapter type unspecified\n");
return -ENODEV;
}
if (type >= ARRAY_SIZE(adapter_parm)) {
printk(KERN_WARNING "i2c-parport: invalid type (%d)\n", type);
return -ENODEV;
}
return parport_register_driver(&i2c_parport_driver);
}
static void __exit i2c_parport_exit(void)
{
parport_unregister_driver(&i2c_parport_driver);
}
MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("I2C bus over parallel port");
MODULE_LICENSE("GPL");
module_init(i2c_parport_init);
module_exit(i2c_parport_exit);
| gpl-2.0 |
friedrich420/N910G-AEL-Kernel-Lollipop-Sources | kernel/power/suspend_time.c | 8749 | 2724 | /*
* debugfs file to track time spent in suspend
*
* Copyright (c) 2011, Google, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/syscore_ops.h>
#include <linux/time.h>
static struct timespec suspend_time_before;
static unsigned int time_in_suspend_bins[32];
#ifdef CONFIG_DEBUG_FS
static int suspend_time_debug_show(struct seq_file *s, void *data)
{
int bin;
seq_printf(s, "time (secs) count\n");
seq_printf(s, "------------------\n");
for (bin = 0; bin < 32; bin++) {
if (time_in_suspend_bins[bin] == 0)
continue;
seq_printf(s, "%4d - %4d %4u\n",
bin ? 1 << (bin - 1) : 0, 1 << bin,
time_in_suspend_bins[bin]);
}
return 0;
}
static int suspend_time_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, suspend_time_debug_show, NULL);
}
static const struct file_operations suspend_time_debug_fops = {
.open = suspend_time_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init suspend_time_debug_init(void)
{
struct dentry *d;
d = debugfs_create_file("suspend_time", 0755, NULL, NULL,
&suspend_time_debug_fops);
if (!d) {
pr_err("Failed to create suspend_time debug file\n");
return -ENOMEM;
}
return 0;
}
late_initcall(suspend_time_debug_init);
#endif
static int suspend_time_syscore_suspend(void)
{
read_persistent_clock(&suspend_time_before);
return 0;
}
static void suspend_time_syscore_resume(void)
{
struct timespec after;
read_persistent_clock(&after);
after = timespec_sub(after, suspend_time_before);
time_in_suspend_bins[fls(after.tv_sec)]++;
pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec,
after.tv_nsec / NSEC_PER_MSEC);
}
static struct syscore_ops suspend_time_syscore_ops = {
.suspend = suspend_time_syscore_suspend,
.resume = suspend_time_syscore_resume,
};
static int suspend_time_syscore_init(void)
{
register_syscore_ops(&suspend_time_syscore_ops);
return 0;
}
static void suspend_time_syscore_exit(void)
{
unregister_syscore_ops(&suspend_time_syscore_ops);
}
module_init(suspend_time_syscore_init);
module_exit(suspend_time_syscore_exit);
| gpl-2.0 |
lenovo-yt2-dev/android_kernel_lenovo_baytrail | drivers/mtd/maps/l440gx.c | 9773 | 4077 | /*
* BIOS Flash chip on Intel 440GX board.
*
* Bugs this currently does not work under linuxBIOS.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#define PIIXE_IOBASE_RESOURCE 11
#define WINDOW_ADDR 0xfff00000
#define WINDOW_SIZE 0x00100000
#define BUSWIDTH 1
static u32 iobase;
#define IOBASE iobase
#define TRIBUF_PORT (IOBASE+0x37)
#define VPP_PORT (IOBASE+0x28)
static struct mtd_info *mymtd;
/* Is this really the vpp port? */
static DEFINE_SPINLOCK(l440gx_vpp_lock);
static int l440gx_vpp_refcnt;
static void l440gx_set_vpp(struct map_info *map, int vpp)
{
unsigned long flags;
spin_lock_irqsave(&l440gx_vpp_lock, flags);
if (vpp) {
if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */
outl(inl(VPP_PORT) | 1, VPP_PORT);
} else {
if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */
outl(inl(VPP_PORT) & ~1, VPP_PORT);
}
spin_unlock_irqrestore(&l440gx_vpp_lock, flags);
}
static struct map_info l440gx_map = {
.name = "L440GX BIOS",
.size = WINDOW_SIZE,
.bankwidth = BUSWIDTH,
.phys = WINDOW_ADDR,
#if 0
/* FIXME verify that this is the
* appripriate code for vpp enable/disable
*/
.set_vpp = l440gx_set_vpp
#endif
};
static int __init init_l440gx(void)
{
struct pci_dev *dev, *pm_dev;
struct resource *pm_iobase;
__u16 word;
dev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_0, NULL);
pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
pci_dev_put(dev);
if (!dev || !pm_dev) {
printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n");
pci_dev_put(pm_dev);
return -ENODEV;
}
l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
if (!l440gx_map.virt) {
printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
pci_dev_put(pm_dev);
return -ENOMEM;
}
simple_map_init(&l440gx_map);
printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt);
/* Setup the pm iobase resource
* This code should move into some kind of generic bridge
* driver but for the moment I'm content with getting the
* allocation correct.
*/
pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE];
if (!(pm_iobase->flags & IORESOURCE_IO)) {
pm_iobase->name = "pm iobase";
pm_iobase->start = 0;
pm_iobase->end = 63;
pm_iobase->flags = IORESOURCE_IO;
/* Put the current value in the resource */
pci_read_config_dword(pm_dev, 0x40, &iobase);
iobase &= ~1;
pm_iobase->start += iobase & ~1;
pm_iobase->end += iobase & ~1;
pci_dev_put(pm_dev);
/* Allocate the resource region */
if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) {
pci_dev_put(dev);
pci_dev_put(pm_dev);
printk(KERN_WARNING "Could not allocate pm iobase resource\n");
iounmap(l440gx_map.virt);
return -ENXIO;
}
}
/* Set the iobase */
iobase = pm_iobase->start;
pci_write_config_dword(pm_dev, 0x40, iobase | 1);
/* Set XBCS# */
pci_read_config_word(dev, 0x4e, &word);
word |= 0x4;
pci_write_config_word(dev, 0x4e, word);
/* Supply write voltage to the chip */
l440gx_set_vpp(&l440gx_map, 1);
/* Enable the gate on the WE line */
outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT);
printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n");
mymtd = do_map_probe("jedec_probe", &l440gx_map);
if (!mymtd) {
printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n");
mymtd = do_map_probe("map_rom", &l440gx_map);
}
if (mymtd) {
mymtd->owner = THIS_MODULE;
mtd_device_register(mymtd, NULL, 0);
return 0;
}
iounmap(l440gx_map.virt);
return -ENXIO;
}
static void __exit cleanup_l440gx(void)
{
mtd_device_unregister(mymtd);
map_destroy(mymtd);
iounmap(l440gx_map.virt);
}
module_init(init_l440gx);
module_exit(cleanup_l440gx);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on Intel L440GX motherboards");
| gpl-2.0 |
STS-Dev-Team/kernel_omap4_xt910s | drivers/ide/ide-disk.c | 10285 | 19740 | /*
* Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
* Copyright (C) 1998-2002 Linux ATA Development
* Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2003 Red Hat
* Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
*/
/*
* Mostly written by Mark Lord <mlord@pobox.com>
* and Gadi Oxman <gadio@netvision.net.il>
* and Andre Hedrick <andre@linux-ide.org>
*
* This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/major.h>
#include <linux/errno.h>
#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/leds.h>
#include <linux/ide.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/div64.h>
#include "ide-disk.h"
static const u8 ide_rw_cmds[] = {
ATA_CMD_READ_MULTI,
ATA_CMD_WRITE_MULTI,
ATA_CMD_READ_MULTI_EXT,
ATA_CMD_WRITE_MULTI_EXT,
ATA_CMD_PIO_READ,
ATA_CMD_PIO_WRITE,
ATA_CMD_PIO_READ_EXT,
ATA_CMD_PIO_WRITE_EXT,
ATA_CMD_READ,
ATA_CMD_WRITE,
ATA_CMD_READ_EXT,
ATA_CMD_WRITE_EXT,
};
static void ide_tf_set_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 dma)
{
u8 index, lba48, write;
lba48 = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
write = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
if (dma) {
cmd->protocol = ATA_PROT_DMA;
index = 8;
} else {
cmd->protocol = ATA_PROT_PIO;
if (drive->mult_count) {
cmd->tf_flags |= IDE_TFLAG_MULTI_PIO;
index = 0;
} else
index = 4;
}
cmd->tf.command = ide_rw_cmds[index + lba48 + write];
}
/*
* __ide_do_rw_disk() issues READ and WRITE commands to a disk,
* using LBA if supported, or CHS otherwise, to address sectors.
*/
static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
sector_t block)
{
ide_hwif_t *hwif = drive->hwif;
u16 nsectors = (u16)blk_rq_sectors(rq);
u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
struct ide_cmd cmd;
struct ide_taskfile *tf = &cmd.tf;
ide_startstop_t rc;
if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
if (block + blk_rq_sectors(rq) > 1ULL << 28)
dma = 0;
else
lba48 = 0;
}
memset(&cmd, 0, sizeof(cmd));
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
if (drive->dev_flags & IDE_DFLAG_LBA) {
if (lba48) {
pr_debug("%s: LBA=0x%012llx\n", drive->name,
(unsigned long long)block);
tf->nsect = nsectors & 0xff;
tf->lbal = (u8) block;
tf->lbam = (u8)(block >> 8);
tf->lbah = (u8)(block >> 16);
tf->device = ATA_LBA;
tf = &cmd.hob;
tf->nsect = (nsectors >> 8) & 0xff;
tf->lbal = (u8)(block >> 24);
if (sizeof(block) != 4) {
tf->lbam = (u8)((u64)block >> 32);
tf->lbah = (u8)((u64)block >> 40);
}
cmd.valid.out.hob = IDE_VALID_OUT_HOB;
cmd.valid.in.hob = IDE_VALID_IN_HOB;
cmd.tf_flags |= IDE_TFLAG_LBA48;
} else {
tf->nsect = nsectors & 0xff;
tf->lbal = block;
tf->lbam = block >>= 8;
tf->lbah = block >>= 8;
tf->device = ((block >> 8) & 0xf) | ATA_LBA;
}
} else {
unsigned int sect, head, cyl, track;
track = (int)block / drive->sect;
sect = (int)block % drive->sect + 1;
head = track % drive->head;
cyl = track / drive->head;
pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
tf->nsect = nsectors & 0xff;
tf->lbal = sect;
tf->lbam = cyl;
tf->lbah = cyl >> 8;
tf->device = head;
}
cmd.tf_flags |= IDE_TFLAG_FS;
if (rq_data_dir(rq))
cmd.tf_flags |= IDE_TFLAG_WRITE;
ide_tf_set_cmd(drive, &cmd, dma);
cmd.rq = rq;
if (dma == 0) {
ide_init_sg_cmd(&cmd, nsectors << 9);
ide_map_sg(drive, &cmd);
}
rc = do_rw_taskfile(drive, &cmd);
if (rc == ide_stopped && dma) {
/* fallback to PIO */
cmd.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
ide_tf_set_cmd(drive, &cmd, 0);
ide_init_sg_cmd(&cmd, nsectors << 9);
rc = do_rw_taskfile(drive, &cmd);
}
return rc;
}
/*
* 268435455 == 137439 MB or 28bit limit
* 320173056 == 163929 MB or 48bit addressing
* 1073741822 == 549756 MB or 48bit addressing fake drive
*/
static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
sector_t block)
{
ide_hwif_t *hwif = drive->hwif;
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
BUG_ON(rq->cmd_type != REQ_TYPE_FS);
ledtrig_ide_activity();
pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
(unsigned long long)block, blk_rq_sectors(rq),
(unsigned long)rq->buffer);
if (hwif->rw_disk)
hwif->rw_disk(drive, rq);
return __ide_do_rw_disk(drive, rq, block);
}
/*
* Queries for true maximum capacity of the drive.
* Returns maximum LBA address (> 0) of the drive, 0 if failed.
*/
static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
{
struct ide_cmd cmd;
struct ide_taskfile *tf = &cmd.tf;
u64 addr = 0;
memset(&cmd, 0, sizeof(cmd));
if (lba48)
tf->command = ATA_CMD_READ_NATIVE_MAX_EXT;
else
tf->command = ATA_CMD_READ_NATIVE_MAX;
tf->device = ATA_LBA;
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
if (lba48) {
cmd.valid.out.hob = IDE_VALID_OUT_HOB;
cmd.valid.in.hob = IDE_VALID_IN_HOB;
cmd.tf_flags = IDE_TFLAG_LBA48;
}
ide_no_data_taskfile(drive, &cmd);
/* if OK, compute maximum address value */
if (!(tf->status & ATA_ERR))
addr = ide_get_lba_addr(&cmd, lba48) + 1;
return addr;
}
/*
* Sets maximum virtual LBA address of the drive.
* Returns new maximum virtual LBA address (> 0) or 0 on failure.
*/
static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
{
struct ide_cmd cmd;
struct ide_taskfile *tf = &cmd.tf;
u64 addr_set = 0;
addr_req--;
memset(&cmd, 0, sizeof(cmd));
tf->lbal = (addr_req >> 0) & 0xff;
tf->lbam = (addr_req >>= 8) & 0xff;
tf->lbah = (addr_req >>= 8) & 0xff;
if (lba48) {
cmd.hob.lbal = (addr_req >>= 8) & 0xff;
cmd.hob.lbam = (addr_req >>= 8) & 0xff;
cmd.hob.lbah = (addr_req >>= 8) & 0xff;
tf->command = ATA_CMD_SET_MAX_EXT;
} else {
tf->device = (addr_req >>= 8) & 0x0f;
tf->command = ATA_CMD_SET_MAX;
}
tf->device |= ATA_LBA;
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
if (lba48) {
cmd.valid.out.hob = IDE_VALID_OUT_HOB;
cmd.valid.in.hob = IDE_VALID_IN_HOB;
cmd.tf_flags = IDE_TFLAG_LBA48;
}
ide_no_data_taskfile(drive, &cmd);
/* if OK, compute maximum address value */
if (!(tf->status & ATA_ERR))
addr_set = ide_get_lba_addr(&cmd, lba48) + 1;
return addr_set;
}
static unsigned long long sectors_to_MB(unsigned long long n)
{
n <<= 9; /* make it bytes */
do_div(n, 1000000); /* make it MB */
return n;
}
/*
* Some disks report total number of sectors instead of
* maximum sector address. We list them here.
*/
static const struct drive_list_entry hpa_list[] = {
{ "ST340823A", NULL },
{ "ST320413A", NULL },
{ "ST310211A", NULL },
{ NULL, NULL }
};
static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
{
u64 capacity, set_max;
capacity = drive->capacity64;
set_max = idedisk_read_native_max_address(drive, lba48);
if (ide_in_drive_list(drive->id, hpa_list)) {
/*
* Since we are inclusive wrt to firmware revisions do this
* extra check and apply the workaround only when needed.
*/
if (set_max == capacity + 1)
set_max--;
}
return set_max;
}
static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
{
set_max = idedisk_set_max_address(drive, set_max, lba48);
if (set_max)
drive->capacity64 = set_max;
return set_max;
}
static void idedisk_check_hpa(ide_drive_t *drive)
{
u64 capacity, set_max;
int lba48 = ata_id_lba48_enabled(drive->id);
capacity = drive->capacity64;
set_max = ide_disk_hpa_get_native_capacity(drive, lba48);
if (set_max <= capacity)
return;
drive->probed_capacity = set_max;
printk(KERN_INFO "%s: Host Protected Area detected.\n"
"\tcurrent capacity is %llu sectors (%llu MB)\n"
"\tnative capacity is %llu sectors (%llu MB)\n",
drive->name,
capacity, sectors_to_MB(capacity),
set_max, sectors_to_MB(set_max));
if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
return;
set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
if (set_max)
printk(KERN_INFO "%s: Host Protected Area disabled.\n",
drive->name);
}
static int ide_disk_get_capacity(ide_drive_t *drive)
{
u16 *id = drive->id;
int lba;
if (ata_id_lba48_enabled(id)) {
/* drive speaks 48-bit LBA */
lba = 1;
drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
} else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) {
/* drive speaks 28-bit LBA */
lba = 1;
drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
} else {
/* drive speaks boring old 28-bit CHS */
lba = 0;
drive->capacity64 = drive->cyl * drive->head * drive->sect;
}
drive->probed_capacity = drive->capacity64;
if (lba) {
drive->dev_flags |= IDE_DFLAG_LBA;
/*
* If this device supports the Host Protected Area feature set,
* then we may need to change our opinion about its capacity.
*/
if (ata_id_hpa_enabled(id))
idedisk_check_hpa(drive);
}
/* limit drive capacity to 137GB if LBA48 cannot be used */
if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
drive->capacity64 > 1ULL << 28) {
printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
"%llu sectors (%llu MB)\n",
drive->name, (unsigned long long)drive->capacity64,
sectors_to_MB(drive->capacity64));
drive->probed_capacity = drive->capacity64 = 1ULL << 28;
}
if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
(drive->dev_flags & IDE_DFLAG_LBA48)) {
if (drive->capacity64 > 1ULL << 28) {
printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
" will be used for accessing sectors "
"> %u\n", drive->name, 1 << 28);
} else
drive->dev_flags &= ~IDE_DFLAG_LBA48;
}
return 0;
}
static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
{
u16 *id = drive->id;
int lba48 = ata_id_lba48_enabled(id);
if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
ata_id_hpa_enabled(id) == 0)
return;
/*
* according to the spec the SET MAX ADDRESS command shall be
* immediately preceded by a READ NATIVE MAX ADDRESS command
*/
if (!ide_disk_hpa_get_native_capacity(drive, lba48))
return;
if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
}
static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
{
ide_drive_t *drive = q->queuedata;
struct ide_cmd *cmd;
if (!(rq->cmd_flags & REQ_FLUSH))
return BLKPREP_OK;
if (rq->special) {
cmd = rq->special;
memset(cmd, 0, sizeof(*cmd));
} else {
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
}
/* FIXME: map struct ide_taskfile on rq->cmd[] */
BUG_ON(cmd == NULL);
if (ata_id_flush_ext_enabled(drive->id) &&
(drive->capacity64 >= (1UL << 28)))
cmd->tf.command = ATA_CMD_FLUSH_EXT;
else
cmd->tf.command = ATA_CMD_FLUSH;
cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd->tf_flags = IDE_TFLAG_DYN;
cmd->protocol = ATA_PROT_NODATA;
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
rq->special = cmd;
cmd->rq = rq;
return BLKPREP_OK;
}
ide_devset_get(multcount, mult_count);
/*
* This is tightly woven into the driver->do_special can not touch.
* DON'T do it again until a total personality rewrite is committed.
*/
static int set_multcount(ide_drive_t *drive, int arg)
{
struct request *rq;
int error;
if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
return -EINVAL;
if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
return -EBUSY;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
drive->mult_req = arg;
drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
error = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
return (drive->mult_count == arg) ? 0 : -EIO;
}
ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR);
static int set_nowerr(ide_drive_t *drive, int arg)
{
if (arg < 0 || arg > 1)
return -EINVAL;
if (arg)
drive->dev_flags |= IDE_DFLAG_NOWERR;
else
drive->dev_flags &= ~IDE_DFLAG_NOWERR;
drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
return 0;
}
static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
{
struct ide_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.tf.feature = feature;
cmd.tf.nsect = nsect;
cmd.tf.command = ATA_CMD_SET_FEATURES;
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
return ide_no_data_taskfile(drive, &cmd);
}
static void update_flush(ide_drive_t *drive)
{
u16 *id = drive->id;
unsigned flush = 0;
if (drive->dev_flags & IDE_DFLAG_WCACHE) {
unsigned long long capacity;
int barrier;
/*
* We must avoid issuing commands a drive does not
* understand or we may crash it. We check flush cache
* is supported. We also check we have the LBA48 flush
* cache if the drive capacity is too large. By this
* time we have trimmed the drive capacity if LBA48 is
* not available so we don't need to recheck that.
*/
capacity = ide_gd_capacity(drive);
barrier = ata_id_flush_enabled(id) &&
(drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
capacity <= (1ULL << 28) ||
ata_id_flush_ext_enabled(id));
printk(KERN_INFO "%s: cache flushes %ssupported\n",
drive->name, barrier ? "" : "not ");
if (barrier) {
flush = REQ_FLUSH;
blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
}
}
blk_queue_flush(drive->queue, flush);
}
ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
static int set_wcache(ide_drive_t *drive, int arg)
{
int err = 1;
if (arg < 0 || arg > 1)
return -EINVAL;
if (ata_id_flush_enabled(drive->id)) {
err = ide_do_setfeature(drive,
arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0);
if (err == 0) {
if (arg)
drive->dev_flags |= IDE_DFLAG_WCACHE;
else
drive->dev_flags &= ~IDE_DFLAG_WCACHE;
}
}
update_flush(drive);
return err;
}
static int do_idedisk_flushcache(ide_drive_t *drive)
{
struct ide_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
if (ata_id_flush_ext_enabled(drive->id))
cmd.tf.command = ATA_CMD_FLUSH_EXT;
else
cmd.tf.command = ATA_CMD_FLUSH;
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
return ide_no_data_taskfile(drive, &cmd);
}
ide_devset_get(acoustic, acoustic);
static int set_acoustic(ide_drive_t *drive, int arg)
{
if (arg < 0 || arg > 254)
return -EINVAL;
ide_do_setfeature(drive,
arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg);
drive->acoustic = arg;
return 0;
}
ide_devset_get_flag(addressing, IDE_DFLAG_LBA48);
/*
* drive->addressing:
* 0: 28-bit
* 1: 48-bit
* 2: 48-bit capable doing 28-bit
*/
static int set_addressing(ide_drive_t *drive, int arg)
{
if (arg < 0 || arg > 2)
return -EINVAL;
if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
ata_id_lba48_enabled(drive->id) == 0))
return -EIO;
if (arg == 2)
arg = 0;
if (arg)
drive->dev_flags |= IDE_DFLAG_LBA48;
else
drive->dev_flags &= ~IDE_DFLAG_LBA48;
return 0;
}
ide_ext_devset_rw(acoustic, acoustic);
ide_ext_devset_rw(address, addressing);
ide_ext_devset_rw(multcount, multcount);
ide_ext_devset_rw(wcache, wcache);
ide_ext_devset_rw_sync(nowerr, nowerr);
static int ide_disk_check(ide_drive_t *drive, const char *s)
{
return 1;
}
static void ide_disk_setup(ide_drive_t *drive)
{
struct ide_disk_obj *idkp = drive->driver_data;
struct request_queue *q = drive->queue;
ide_hwif_t *hwif = drive->hwif;
u16 *id = drive->id;
char *m = (char *)&id[ATA_ID_PROD];
unsigned long long capacity;
ide_proc_register_driver(drive, idkp->driver);
if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0)
return;
if (drive->dev_flags & IDE_DFLAG_REMOVABLE) {
/*
* Removable disks (eg. SYQUEST); ignore 'WD' drives
*/
if (m[0] != 'W' || m[1] != 'D')
drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
}
(void)set_addressing(drive, 1);
if (drive->dev_flags & IDE_DFLAG_LBA48) {
int max_s = 2048;
if (max_s > hwif->rqsize)
max_s = hwif->rqsize;
blk_queue_max_hw_sectors(q, max_s);
}
printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
queue_max_sectors(q) / 2);
if (ata_id_is_ssd(id))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
/* calculate drive capacity, and select LBA if possible */
ide_disk_get_capacity(drive);
/*
* if possible, give fdisk access to more of the drive,
* by correcting bios_cyls:
*/
capacity = ide_gd_capacity(drive);
if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
if (ata_id_lba48_enabled(drive->id)) {
/* compatibility */
drive->bios_sect = 63;
drive->bios_head = 255;
}
if (drive->bios_sect && drive->bios_head) {
unsigned int cap0 = capacity; /* truncate to 32 bits */
unsigned int cylsz, cyl;
if (cap0 != capacity)
drive->bios_cyl = 65535;
else {
cylsz = drive->bios_sect * drive->bios_head;
cyl = cap0 / cylsz;
if (cyl > 65535)
cyl = 65535;
if (cyl > drive->bios_cyl)
drive->bios_cyl = cyl;
}
}
}
printk(KERN_INFO "%s: %llu sectors (%llu MB)",
drive->name, capacity, sectors_to_MB(capacity));
/* Only print cache size when it was specified */
if (id[ATA_ID_BUF_SIZE])
printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2);
printk(KERN_CONT ", CHS=%d/%d/%d\n",
drive->bios_cyl, drive->bios_head, drive->bios_sect);
/* write cache enabled? */
if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id))
drive->dev_flags |= IDE_DFLAG_WCACHE;
set_wcache(drive, 1);
if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
(drive->head == 0 || drive->head > 16)) {
printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
drive->name, drive->head);
drive->dev_flags &= ~IDE_DFLAG_ATTACH;
} else
drive->dev_flags |= IDE_DFLAG_ATTACH;
}
static void ide_disk_flush(ide_drive_t *drive)
{
if (ata_id_flush_enabled(drive->id) == 0 ||
(drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
return;
if (do_idedisk_flushcache(drive))
printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
}
static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
{
return 0;
}
static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
int on)
{
struct ide_cmd cmd;
int ret;
if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
return 0;
memset(&cmd, 0, sizeof(cmd));
cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
ret = ide_no_data_taskfile(drive, &cmd);
if (ret)
drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
return ret;
}
const struct ide_disk_ops ide_ata_disk_ops = {
.check = ide_disk_check,
.unlock_native_capacity = ide_disk_unlock_native_capacity,
.get_capacity = ide_disk_get_capacity,
.setup = ide_disk_setup,
.flush = ide_disk_flush,
.init_media = ide_disk_init_media,
.set_doorlock = ide_disk_set_doorlock,
.do_request = ide_do_rw_disk,
.ioctl = ide_disk_ioctl,
};
| gpl-2.0 |
Split-Screen/android_kernel_asus_grouper | drivers/clocksource/mmio.c | 10797 | 1904 | /*
* Generic MMIO clocksource support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clocksource.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
struct clocksource_mmio {
void __iomem *reg;
struct clocksource clksrc;
};
static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
{
return container_of(c, struct clocksource_mmio, clksrc);
}
cycle_t clocksource_mmio_readl_up(struct clocksource *c)
{
return readl_relaxed(to_mmio_clksrc(c)->reg);
}
cycle_t clocksource_mmio_readl_down(struct clocksource *c)
{
return ~readl_relaxed(to_mmio_clksrc(c)->reg);
}
cycle_t clocksource_mmio_readw_up(struct clocksource *c)
{
return readw_relaxed(to_mmio_clksrc(c)->reg);
}
cycle_t clocksource_mmio_readw_down(struct clocksource *c)
{
return ~(unsigned)readw_relaxed(to_mmio_clksrc(c)->reg);
}
/**
* clocksource_mmio_init - Initialize a simple mmio based clocksource
* @base: Virtual address of the clock readout register
* @name: Name of the clocksource
* @hz: Frequency of the clocksource in Hz
* @rating: Rating of the clocksource
* @bits: Number of valid bits
* @read: One of clocksource_mmio_read*() above
*/
int __init clocksource_mmio_init(void __iomem *base, const char *name,
unsigned long hz, int rating, unsigned bits,
cycle_t (*read)(struct clocksource *))
{
struct clocksource_mmio *cs;
if (bits > 32 || bits < 16)
return -EINVAL;
cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->reg = base;
cs->clksrc.name = name;
cs->clksrc.rating = rating;
cs->clksrc.read = read;
cs->clksrc.mask = CLOCKSOURCE_MASK(bits);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
return clocksource_register_hz(&cs->clksrc, hz);
}
| gpl-2.0 |
dchadic/linux-cmps107 | drivers/staging/fwserial/fwserial.c | 46 | 78719 | /*
* FireWire Serial driver
*
* Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/rculist.h>
#include <linux/workqueue.h>
#include <linux/ratelimit.h>
#include <linux/bug.h>
#include <linux/uaccess.h>
#include "fwserial.h"
#define be32_to_u64(hi, lo) ((u64)be32_to_cpu(hi) << 32 | be32_to_cpu(lo))
#define LINUX_VENDOR_ID 0xd00d1eU /* same id used in card root directory */
#define FWSERIAL_VERSION 0x00e81cU /* must be unique within LINUX_VENDOR_ID */
/* configurable options */
static int num_ttys = 4; /* # of std ttys to create per fw_card */
/* - doubles as loopback port index */
static bool auto_connect = true; /* try to VIRT_CABLE to every peer */
static bool create_loop_dev = true; /* create a loopback device for each card */
module_param_named(ttys, num_ttys, int, S_IRUGO | S_IWUSR);
module_param_named(auto, auto_connect, bool, S_IRUGO | S_IWUSR);
module_param_named(loop, create_loop_dev, bool, S_IRUGO | S_IWUSR);
/*
* Threshold below which the tty is woken for writing
* - should be equal to WAKEUP_CHARS in drivers/tty/n_tty.c because
* even if the writer is woken, n_tty_poll() won't set POLLOUT until
* our fifo is below this level
*/
#define WAKEUP_CHARS 256
/**
* fwserial_list: list of every fw_serial created for each fw_card
* See discussion in fwserial_probe.
*/
static LIST_HEAD(fwserial_list);
static DEFINE_MUTEX(fwserial_list_mutex);
/**
* port_table: array of tty ports allocated to each fw_card
*
* tty ports are allocated during probe when an fw_serial is first
* created for a given fw_card. Ports are allocated in a contiguous block,
* each block consisting of 'num_ports' ports.
*/
static struct fwtty_port *port_table[MAX_TOTAL_PORTS];
static DEFINE_MUTEX(port_table_lock);
static bool port_table_corrupt;
#define FWTTY_INVALID_INDEX MAX_TOTAL_PORTS
#define loop_idx(port) (((port)->index) / num_ports)
#define table_idx(loop) ((loop) * num_ports + num_ttys)
/* total # of tty ports created per fw_card */
static int num_ports;
/* slab used as pool for struct fwtty_transactions */
static struct kmem_cache *fwtty_txn_cache;
struct tty_driver *fwtty_driver;
static struct tty_driver *fwloop_driver;
static struct dentry *fwserial_debugfs;
struct fwtty_transaction;
typedef void (*fwtty_transaction_cb)(struct fw_card *card, int rcode,
void *data, size_t length,
struct fwtty_transaction *txn);
struct fwtty_transaction {
struct fw_transaction fw_txn;
fwtty_transaction_cb callback;
struct fwtty_port *port;
union {
struct dma_pending dma_pended;
};
};
#define to_device(a, b) (a->b)
#define fwtty_err(p, fmt, ...) \
dev_err(to_device(p, device), fmt, ##__VA_ARGS__)
#define fwtty_info(p, fmt, ...) \
dev_info(to_device(p, device), fmt, ##__VA_ARGS__)
#define fwtty_notice(p, fmt, ...) \
dev_notice(to_device(p, device), fmt, ##__VA_ARGS__)
#define fwtty_dbg(p, fmt, ...) \
dev_dbg(to_device(p, device), "%s: " fmt, __func__, ##__VA_ARGS__)
#define fwtty_err_ratelimited(p, fmt, ...) \
dev_err_ratelimited(to_device(p, device), fmt, ##__VA_ARGS__)
#ifdef DEBUG
static inline void debug_short_write(struct fwtty_port *port, int c, int n)
{
int avail;
if (n < c) {
spin_lock_bh(&port->lock);
avail = dma_fifo_avail(&port->tx_fifo);
spin_unlock_bh(&port->lock);
fwtty_dbg(port, "short write: avail:%d req:%d wrote:%d\n",
avail, c, n);
}
}
#else
#define debug_short_write(port, c, n)
#endif
static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
int generation, int id);
#ifdef FWTTY_PROFILING
static void fwtty_profile_fifo(struct fwtty_port *port, unsigned *stat)
{
spin_lock_bh(&port->lock);
fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo));
spin_unlock_bh(&port->lock);
}
static void fwtty_dump_profile(struct seq_file *m, struct stats *stats)
{
/* for each stat, print sum of 0 to 2^k, then individually */
int k = 4;
unsigned sum;
int j;
char t[10];
snprintf(t, 10, "< %d", 1 << k);
seq_printf(m, "\n%14s %6s", " ", t);
for (j = k + 1; j < DISTRIBUTION_MAX_INDEX; ++j)
seq_printf(m, "%6d", 1 << j);
++k;
for (j = 0, sum = 0; j <= k; ++j)
sum += stats->reads[j];
seq_printf(m, "\n%14s: %6d", "reads", sum);
for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
seq_printf(m, "%6d", stats->reads[j]);
for (j = 0, sum = 0; j <= k; ++j)
sum += stats->writes[j];
seq_printf(m, "\n%14s: %6d", "writes", sum);
for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
seq_printf(m, "%6d", stats->writes[j]);
for (j = 0, sum = 0; j <= k; ++j)
sum += stats->txns[j];
seq_printf(m, "\n%14s: %6d", "txns", sum);
for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
seq_printf(m, "%6d", stats->txns[j]);
for (j = 0, sum = 0; j <= k; ++j)
sum += stats->unthrottle[j];
seq_printf(m, "\n%14s: %6d", "avail @ unthr", sum);
for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
seq_printf(m, "%6d", stats->unthrottle[j]);
}
#else
#define fwtty_profile_fifo(port, stat)
#define fwtty_dump_profile(m, stats)
#endif
/*
* Returns the max receive packet size for the given node
* Devices which are OHCI v1.0/ v1.1/ v1.2-draft or RFC 2734 compliant
* are required by specification to support max_rec of 8 (512 bytes) or more.
*/
static inline int device_max_receive(struct fw_device *fw_device)
{
/* see IEEE 1394-2008 table 8-8 */
return min(2 << fw_device->max_rec, 4096);
}
static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
{
switch (rcode) {
case RCODE_SEND_ERROR:
fwtty_err_ratelimited(port, "card busy\n");
break;
case RCODE_ADDRESS_ERROR:
fwtty_err_ratelimited(port, "bad unit addr or write length\n");
break;
case RCODE_DATA_ERROR:
fwtty_err_ratelimited(port, "failed rx\n");
break;
case RCODE_NO_ACK:
fwtty_err_ratelimited(port, "missing ack\n");
break;
case RCODE_BUSY:
fwtty_err_ratelimited(port, "remote busy\n");
break;
default:
fwtty_err_ratelimited(port, "failed tx: %d\n", rcode);
}
}
static void fwtty_txn_constructor(void *this)
{
struct fwtty_transaction *txn = this;
init_timer(&txn->fw_txn.split_timeout_timer);
}
static void fwtty_common_callback(struct fw_card *card, int rcode,
void *payload, size_t len, void *cb_data)
{
struct fwtty_transaction *txn = cb_data;
struct fwtty_port *port = txn->port;
if (port && rcode != RCODE_COMPLETE)
fwtty_log_tx_error(port, rcode);
if (txn->callback)
txn->callback(card, rcode, payload, len, txn);
kmem_cache_free(fwtty_txn_cache, txn);
}
static int fwtty_send_data_async(struct fwtty_peer *peer, int tcode,
unsigned long long addr, void *payload,
size_t len, fwtty_transaction_cb callback,
struct fwtty_port *port)
{
struct fwtty_transaction *txn;
int generation;
txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC);
if (!txn)
return -ENOMEM;
txn->callback = callback;
txn->port = port;
generation = peer->generation;
smp_rmb();
fw_send_request(peer->serial->card, &txn->fw_txn, tcode,
peer->node_id, generation, peer->speed, addr, payload,
len, fwtty_common_callback, txn);
return 0;
}
static void fwtty_send_txn_async(struct fwtty_peer *peer,
struct fwtty_transaction *txn, int tcode,
unsigned long long addr, void *payload,
size_t len, fwtty_transaction_cb callback,
struct fwtty_port *port)
{
int generation;
txn->callback = callback;
txn->port = port;
generation = peer->generation;
smp_rmb();
fw_send_request(peer->serial->card, &txn->fw_txn, tcode,
peer->node_id, generation, peer->speed, addr, payload,
len, fwtty_common_callback, txn);
}
static void __fwtty_restart_tx(struct fwtty_port *port)
{
int len, avail;
len = dma_fifo_out_level(&port->tx_fifo);
if (len)
schedule_delayed_work(&port->drain, 0);
avail = dma_fifo_avail(&port->tx_fifo);
fwtty_dbg(port, "fifo len: %d avail: %d\n", len, avail);
}
static void fwtty_restart_tx(struct fwtty_port *port)
{
spin_lock_bh(&port->lock);
__fwtty_restart_tx(port);
spin_unlock_bh(&port->lock);
}
/**
* fwtty_update_port_status - decodes & dispatches line status changes
*
* Note: in loopback, the port->lock is being held. Only use functions that
* don't attempt to reclaim the port->lock.
*/
static void fwtty_update_port_status(struct fwtty_port *port, unsigned status)
{
unsigned delta;
struct tty_struct *tty;
/* simulated LSR/MSR status from remote */
status &= ~MCTRL_MASK;
delta = (port->mstatus ^ status) & ~MCTRL_MASK;
delta &= ~(status & TIOCM_RNG);
port->mstatus = status;
if (delta & TIOCM_RNG)
++port->icount.rng;
if (delta & TIOCM_DSR)
++port->icount.dsr;
if (delta & TIOCM_CAR)
++port->icount.dcd;
if (delta & TIOCM_CTS)
++port->icount.cts;
fwtty_dbg(port, "status: %x delta: %x\n", status, delta);
if (delta & TIOCM_CAR) {
tty = tty_port_tty_get(&port->port);
if (tty && !C_CLOCAL(tty)) {
if (status & TIOCM_CAR)
wake_up_interruptible(&port->port.open_wait);
else
schedule_work(&port->hangup);
}
tty_kref_put(tty);
}
if (delta & TIOCM_CTS) {
tty = tty_port_tty_get(&port->port);
if (tty && C_CRTSCTS(tty)) {
if (tty->hw_stopped) {
if (status & TIOCM_CTS) {
tty->hw_stopped = 0;
if (port->loopback)
__fwtty_restart_tx(port);
else
fwtty_restart_tx(port);
}
} else {
if (~status & TIOCM_CTS)
tty->hw_stopped = 1;
}
}
tty_kref_put(tty);
} else if (delta & OOB_TX_THROTTLE) {
tty = tty_port_tty_get(&port->port);
if (tty) {
if (tty->hw_stopped) {
if (~status & OOB_TX_THROTTLE) {
tty->hw_stopped = 0;
if (port->loopback)
__fwtty_restart_tx(port);
else
fwtty_restart_tx(port);
}
} else {
if (status & OOB_TX_THROTTLE)
tty->hw_stopped = 1;
}
}
tty_kref_put(tty);
}
if (delta & (UART_LSR_BI << 24)) {
if (status & (UART_LSR_BI << 24)) {
port->break_last = jiffies;
schedule_delayed_work(&port->emit_breaks, 0);
} else {
/* run emit_breaks one last time (if pending) */
mod_delayed_work(system_wq, &port->emit_breaks, 0);
}
}
if (delta & (TIOCM_DSR | TIOCM_CAR | TIOCM_CTS | TIOCM_RNG))
wake_up_interruptible(&port->port.delta_msr_wait);
}
/**
* __fwtty_port_line_status - generate 'line status' for indicated port
*
* This function returns a remote 'MSR' state based on the local 'MCR' state,
* as if a null modem cable was attached. The actual status is a mangling
* of TIOCM_* bits suitable for sending to a peer's status_addr.
*
* Note: caller must be holding port lock
*/
static unsigned __fwtty_port_line_status(struct fwtty_port *port)
{
unsigned status = 0;
/* TODO: add module param to tie RNG to DTR as well */
if (port->mctrl & TIOCM_DTR)
status |= TIOCM_DSR | TIOCM_CAR;
if (port->mctrl & TIOCM_RTS)
status |= TIOCM_CTS;
if (port->mctrl & OOB_RX_THROTTLE)
status |= OOB_TX_THROTTLE;
/* emulate BRK as add'l line status */
if (port->break_ctl)
status |= UART_LSR_BI << 24;
return status;
}
/**
* __fwtty_write_port_status - send the port line status to peer
*
* Note: caller must be holding the port lock.
*/
static int __fwtty_write_port_status(struct fwtty_port *port)
{
struct fwtty_peer *peer;
int err = -ENOENT;
unsigned status = __fwtty_port_line_status(port);
rcu_read_lock();
peer = rcu_dereference(port->peer);
if (peer) {
err = fwtty_send_data_async(peer, TCODE_WRITE_QUADLET_REQUEST,
peer->status_addr, &status,
sizeof(status), NULL, port);
}
rcu_read_unlock();
return err;
}
/**
* fwtty_write_port_status - same as above but locked by port lock
*/
static int fwtty_write_port_status(struct fwtty_port *port)
{
int err;
spin_lock_bh(&port->lock);
err = __fwtty_write_port_status(port);
spin_unlock_bh(&port->lock);
return err;
}
static void fwtty_throttle_port(struct fwtty_port *port)
{
struct tty_struct *tty;
unsigned old;
tty = tty_port_tty_get(&port->port);
if (!tty)
return;
spin_lock_bh(&port->lock);
old = port->mctrl;
port->mctrl |= OOB_RX_THROTTLE;
if (C_CRTSCTS(tty))
port->mctrl &= ~TIOCM_RTS;
if (~old & OOB_RX_THROTTLE)
__fwtty_write_port_status(port);
spin_unlock_bh(&port->lock);
tty_kref_put(tty);
}
/**
* fwtty_do_hangup - wait for ldisc to deliver all pending rx; only then hangup
*
* When the remote has finished tx, and all in-flight rx has been received and
* and pushed to the flip buffer, the remote may close its device. This will
* drop DTR on the remote which will drop carrier here. Typically, the tty is
* hung up when carrier is dropped or lost.
*
* However, there is a race between the hang up and the line discipline
* delivering its data to the reader. A hangup will cause the ldisc to flush
* (ie., clear) the read buffer and flip buffer. Because of firewire's
* relatively high throughput, the ldisc frequently lags well behind the driver,
* resulting in lost data (which has already been received and written to
* the flip buffer) when the remote closes its end.
*
* Unfortunately, since the flip buffer offers no direct method for determining
* if it holds data, ensuring the ldisc has delivered all data is problematic.
*/
/* FIXME: drop this workaround when __tty_hangup waits for ldisc completion */
static void fwtty_do_hangup(struct work_struct *work)
{
struct fwtty_port *port = to_port(work, hangup);
struct tty_struct *tty;
schedule_timeout_uninterruptible(msecs_to_jiffies(50));
tty = tty_port_tty_get(&port->port);
if (tty)
tty_vhangup(tty);
tty_kref_put(tty);
}
static void fwtty_emit_breaks(struct work_struct *work)
{
struct fwtty_port *port = to_port(to_delayed_work(work), emit_breaks);
static const char buf[16];
unsigned long now = jiffies;
unsigned long elapsed = now - port->break_last;
int n, t, c, brk = 0;
/* generate breaks at the line rate (but at least 1) */
n = (elapsed * port->cps) / HZ + 1;
port->break_last = now;
fwtty_dbg(port, "sending %d brks\n", n);
while (n) {
t = min(n, 16);
c = tty_insert_flip_string_fixed_flag(&port->port, buf,
TTY_BREAK, t);
n -= c;
brk += c;
if (c < t)
break;
}
tty_flip_buffer_push(&port->port);
if (port->mstatus & (UART_LSR_BI << 24))
schedule_delayed_work(&port->emit_breaks, FREQ_BREAKS);
port->icount.brk += brk;
}
static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
{
int c, n = len;
unsigned lsr;
int err = 0;
fwtty_dbg(port, "%d\n", n);
fwtty_profile_data(port->stats.reads, n);
if (port->write_only) {
n = 0;
goto out;
}
/* disregard break status; breaks are generated by emit_breaks work */
lsr = (port->mstatus >> 24) & ~UART_LSR_BI;
if (port->overrun)
lsr |= UART_LSR_OE;
if (lsr & UART_LSR_OE)
++port->icount.overrun;
lsr &= port->status_mask;
if (lsr & ~port->ignore_mask & UART_LSR_OE) {
if (!tty_insert_flip_char(&port->port, 0, TTY_OVERRUN)) {
err = -EIO;
goto out;
}
}
port->overrun = false;
if (lsr & port->ignore_mask & ~UART_LSR_OE) {
/* TODO: don't drop SAK and Magic SysRq here */
n = 0;
goto out;
}
c = tty_insert_flip_string_fixed_flag(&port->port, data, TTY_NORMAL, n);
if (c > 0)
tty_flip_buffer_push(&port->port);
n -= c;
if (n) {
port->overrun = true;
err = -EIO;
fwtty_err_ratelimited(port, "flip buffer overrun\n");
} else {
/* throttle the sender if remaining flip buffer space has
* reached high watermark to avoid losing data which may be
* in-flight. Since the AR request context is 32k, that much
* data may have _already_ been acked.
*/
if (tty_buffer_space_avail(&port->port) < HIGH_WATERMARK)
fwtty_throttle_port(port);
}
out:
port->icount.rx += len;
port->stats.lost += n;
return err;
}
/**
* fwtty_port_handler - bus address handler for port reads/writes
* @parameters: fw_address_callback_t as specified by firewire core interface
*
* This handler is responsible for handling inbound read/write dma from remotes.
*/
static void fwtty_port_handler(struct fw_card *card,
struct fw_request *request,
int tcode, int destination, int source,
int generation,
unsigned long long addr,
void *data, size_t len,
void *callback_data)
{
struct fwtty_port *port = callback_data;
struct fwtty_peer *peer;
int err;
int rcode;
/* Only accept rx from the peer virtual-cabled to this port */
rcu_read_lock();
peer = __fwserial_peer_by_node_id(card, generation, source);
rcu_read_unlock();
if (!peer || peer != rcu_access_pointer(port->peer)) {
rcode = RCODE_ADDRESS_ERROR;
fwtty_err_ratelimited(port, "ignoring unauthenticated data\n");
goto respond;
}
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
if (addr != port->rx_handler.offset || len != 4) {
rcode = RCODE_ADDRESS_ERROR;
} else {
fwtty_update_port_status(port, *(unsigned *)data);
rcode = RCODE_COMPLETE;
}
break;
case TCODE_WRITE_BLOCK_REQUEST:
if (addr != port->rx_handler.offset + 4 ||
len > port->rx_handler.length - 4) {
rcode = RCODE_ADDRESS_ERROR;
} else {
err = fwtty_rx(port, data, len);
switch (err) {
case 0:
rcode = RCODE_COMPLETE;
break;
case -EIO:
rcode = RCODE_DATA_ERROR;
break;
default:
rcode = RCODE_CONFLICT_ERROR;
break;
}
}
break;
default:
rcode = RCODE_TYPE_ERROR;
}
respond:
fw_send_response(card, request, rcode);
}
/**
* fwtty_tx_complete - callback for tx dma
* @data: ignored, has no meaning for write txns
* @length: ignored, has no meaning for write txns
*
* The writer must be woken here if the fifo has been emptied because it
* may have slept if chars_in_buffer was != 0
*/
static void fwtty_tx_complete(struct fw_card *card, int rcode,
void *data, size_t length,
struct fwtty_transaction *txn)
{
struct fwtty_port *port = txn->port;
int len;
fwtty_dbg(port, "rcode: %d\n", rcode);
switch (rcode) {
case RCODE_COMPLETE:
spin_lock_bh(&port->lock);
dma_fifo_out_complete(&port->tx_fifo, &txn->dma_pended);
len = dma_fifo_level(&port->tx_fifo);
spin_unlock_bh(&port->lock);
port->icount.tx += txn->dma_pended.len;
break;
default:
/* TODO: implement retries */
spin_lock_bh(&port->lock);
dma_fifo_out_complete(&port->tx_fifo, &txn->dma_pended);
len = dma_fifo_level(&port->tx_fifo);
spin_unlock_bh(&port->lock);
port->stats.dropped += txn->dma_pended.len;
}
if (len < WAKEUP_CHARS)
tty_port_tty_wakeup(&port->port);
}
static int fwtty_tx(struct fwtty_port *port, bool drain)
{
struct fwtty_peer *peer;
struct fwtty_transaction *txn;
struct tty_struct *tty;
int n, len;
tty = tty_port_tty_get(&port->port);
if (!tty)
return -ENOENT;
rcu_read_lock();
peer = rcu_dereference(port->peer);
if (!peer) {
n = -EIO;
goto out;
}
if (test_and_set_bit(IN_TX, &port->flags)) {
n = -EALREADY;
goto out;
}
/* try to write as many dma transactions out as possible */
n = -EAGAIN;
while (!tty->stopped && !tty->hw_stopped &&
!test_bit(STOP_TX, &port->flags)) {
txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC);
if (!txn) {
n = -ENOMEM;
break;
}
spin_lock_bh(&port->lock);
n = dma_fifo_out_pend(&port->tx_fifo, &txn->dma_pended);
spin_unlock_bh(&port->lock);
fwtty_dbg(port, "out: %u rem: %d\n", txn->dma_pended.len, n);
if (n < 0) {
kmem_cache_free(fwtty_txn_cache, txn);
if (n == -EAGAIN) {
++port->stats.tx_stall;
} else if (n == -ENODATA) {
fwtty_profile_data(port->stats.txns, 0);
} else {
++port->stats.fifo_errs;
fwtty_err_ratelimited(port, "fifo err: %d\n",
n);
}
break;
}
fwtty_profile_data(port->stats.txns, txn->dma_pended.len);
fwtty_send_txn_async(peer, txn, TCODE_WRITE_BLOCK_REQUEST,
peer->fifo_addr, txn->dma_pended.data,
txn->dma_pended.len, fwtty_tx_complete,
port);
++port->stats.sent;
/*
* Stop tx if the 'last view' of the fifo is empty or if
* this is the writer and there's not enough data to bother
*/
if (n == 0 || (!drain && n < WRITER_MINIMUM))
break;
}
if (n >= 0 || n == -EAGAIN || n == -ENOMEM || n == -ENODATA) {
spin_lock_bh(&port->lock);
len = dma_fifo_out_level(&port->tx_fifo);
if (len) {
unsigned long delay = (n == -ENOMEM) ? HZ : 1;
schedule_delayed_work(&port->drain, delay);
}
len = dma_fifo_level(&port->tx_fifo);
spin_unlock_bh(&port->lock);
/* wakeup the writer */
if (drain && len < WAKEUP_CHARS)
tty_wakeup(tty);
}
clear_bit(IN_TX, &port->flags);
wake_up_interruptible(&port->wait_tx);
out:
rcu_read_unlock();
tty_kref_put(tty);
return n;
}
static void fwtty_drain_tx(struct work_struct *work)
{
struct fwtty_port *port = to_port(to_delayed_work(work), drain);
fwtty_tx(port, true);
}
static void fwtty_write_xchar(struct fwtty_port *port, char ch)
{
struct fwtty_peer *peer;
++port->stats.xchars;
fwtty_dbg(port, "%02x\n", ch);
rcu_read_lock();
peer = rcu_dereference(port->peer);
if (peer) {
fwtty_send_data_async(peer, TCODE_WRITE_BLOCK_REQUEST,
peer->fifo_addr, &ch, sizeof(ch),
NULL, port);
}
rcu_read_unlock();
}
struct fwtty_port *fwtty_port_get(unsigned index)
{
struct fwtty_port *port;
if (index >= MAX_TOTAL_PORTS)
return NULL;
mutex_lock(&port_table_lock);
port = port_table[index];
if (port)
kref_get(&port->serial->kref);
mutex_unlock(&port_table_lock);
return port;
}
EXPORT_SYMBOL(fwtty_port_get);
static int fwtty_ports_add(struct fw_serial *serial)
{
int err = -EBUSY;
int i, j;
if (port_table_corrupt)
return err;
mutex_lock(&port_table_lock);
for (i = 0; i + num_ports <= MAX_TOTAL_PORTS; i += num_ports) {
if (!port_table[i]) {
for (j = 0; j < num_ports; ++i, ++j) {
serial->ports[j]->index = i;
port_table[i] = serial->ports[j];
}
err = 0;
break;
}
}
mutex_unlock(&port_table_lock);
return err;
}
static void fwserial_destroy(struct kref *kref)
{
struct fw_serial *serial = to_serial(kref, kref);
struct fwtty_port **ports = serial->ports;
int j, i = ports[0]->index;
synchronize_rcu();
mutex_lock(&port_table_lock);
for (j = 0; j < num_ports; ++i, ++j) {
port_table_corrupt |= port_table[i] != ports[j];
WARN_ONCE(port_table_corrupt, "port_table[%d]: %p != ports[%d]: %p",
i, port_table[i], j, ports[j]);
port_table[i] = NULL;
}
mutex_unlock(&port_table_lock);
for (j = 0; j < num_ports; ++j) {
fw_core_remove_address_handler(&ports[j]->rx_handler);
tty_port_destroy(&ports[j]->port);
kfree(ports[j]);
}
kfree(serial);
}
static void fwtty_port_put(struct fwtty_port *port)
{
kref_put(&port->serial->kref, fwserial_destroy);
}
static void fwtty_port_dtr_rts(struct tty_port *tty_port, int on)
{
struct fwtty_port *port = to_port(tty_port, port);
fwtty_dbg(port, "on/off: %d\n", on);
spin_lock_bh(&port->lock);
/* Don't change carrier state if this is a console */
if (!port->port.console) {
if (on)
port->mctrl |= TIOCM_DTR | TIOCM_RTS;
else
port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS);
}
__fwtty_write_port_status(port);
spin_unlock_bh(&port->lock);
}
/**
* fwtty_port_carrier_raised: required tty_port operation
*
* This port operation is polled after a tty has been opened and is waiting for
* carrier detect -- see drivers/tty/tty_port:tty_port_block_til_ready().
*/
static int fwtty_port_carrier_raised(struct tty_port *tty_port)
{
struct fwtty_port *port = to_port(tty_port, port);
int rc;
rc = (port->mstatus & TIOCM_CAR);
fwtty_dbg(port, "%d\n", rc);
return rc;
}
static unsigned set_termios(struct fwtty_port *port, struct tty_struct *tty)
{
unsigned baud, frame;
baud = tty_termios_baud_rate(&tty->termios);
tty_termios_encode_baud_rate(&tty->termios, baud, baud);
/* compute bit count of 2 frames */
frame = 12 + ((C_CSTOPB(tty)) ? 4 : 2) + ((C_PARENB(tty)) ? 2 : 0);
switch (C_CSIZE(tty)) {
case CS5:
frame -= (C_CSTOPB(tty)) ? 1 : 0;
break;
case CS6:
frame += 2;
break;
case CS7:
frame += 4;
break;
case CS8:
frame += 6;
break;
}
port->cps = (baud << 1) / frame;
port->status_mask = UART_LSR_OE;
if (_I_FLAG(tty, BRKINT | PARMRK))
port->status_mask |= UART_LSR_BI;
port->ignore_mask = 0;
if (I_IGNBRK(tty)) {
port->ignore_mask |= UART_LSR_BI;
if (I_IGNPAR(tty))
port->ignore_mask |= UART_LSR_OE;
}
port->write_only = !C_CREAD(tty);
/* turn off echo and newline xlat if loopback */
if (port->loopback) {
tty->termios.c_lflag &= ~(ECHO | ECHOE | ECHOK | ECHOKE |
ECHONL | ECHOPRT | ECHOCTL);
tty->termios.c_oflag &= ~ONLCR;
}
return baud;
}
static int fwtty_port_activate(struct tty_port *tty_port,
struct tty_struct *tty)
{
struct fwtty_port *port = to_port(tty_port, port);
unsigned baud;
int err;
set_bit(TTY_IO_ERROR, &tty->flags);
err = dma_fifo_alloc(&port->tx_fifo, FWTTY_PORT_TXFIFO_LEN,
cache_line_size(),
port->max_payload,
FWTTY_PORT_MAX_PEND_DMA,
GFP_KERNEL);
if (err)
return err;
spin_lock_bh(&port->lock);
baud = set_termios(port, tty);
/* if console, don't change carrier state */
if (!port->port.console) {
port->mctrl = 0;
if (baud != 0)
port->mctrl = TIOCM_DTR | TIOCM_RTS;
}
if (C_CRTSCTS(tty) && ~port->mstatus & TIOCM_CTS)
tty->hw_stopped = 1;
__fwtty_write_port_status(port);
spin_unlock_bh(&port->lock);
clear_bit(TTY_IO_ERROR, &tty->flags);
return 0;
}
/**
* fwtty_port_shutdown
*
* Note: the tty port core ensures this is not the console and
* manages TTY_IO_ERROR properly
*/
static void fwtty_port_shutdown(struct tty_port *tty_port)
{
struct fwtty_port *port = to_port(tty_port, port);
/* TODO: cancel outstanding transactions */
cancel_delayed_work_sync(&port->emit_breaks);
cancel_delayed_work_sync(&port->drain);
spin_lock_bh(&port->lock);
port->flags = 0;
port->break_ctl = 0;
port->overrun = 0;
__fwtty_write_port_status(port);
dma_fifo_free(&port->tx_fifo);
spin_unlock_bh(&port->lock);
}
static int fwtty_open(struct tty_struct *tty, struct file *fp)
{
struct fwtty_port *port = tty->driver_data;
return tty_port_open(&port->port, tty, fp);
}
static void fwtty_close(struct tty_struct *tty, struct file *fp)
{
struct fwtty_port *port = tty->driver_data;
tty_port_close(&port->port, tty, fp);
}
static void fwtty_hangup(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
tty_port_hangup(&port->port);
}
static void fwtty_cleanup(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
tty->driver_data = NULL;
fwtty_port_put(port);
}
static int fwtty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct fwtty_port *port = fwtty_port_get(tty->index);
int err;
err = tty_standard_install(driver, tty);
if (!err)
tty->driver_data = port;
else
fwtty_port_put(port);
return err;
}
static int fwloop_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct fwtty_port *port = fwtty_port_get(table_idx(tty->index));
int err;
err = tty_standard_install(driver, tty);
if (!err)
tty->driver_data = port;
else
fwtty_port_put(port);
return err;
}
static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c)
{
struct fwtty_port *port = tty->driver_data;
int n, len;
fwtty_dbg(port, "%d\n", c);
fwtty_profile_data(port->stats.writes, c);
spin_lock_bh(&port->lock);
n = dma_fifo_in(&port->tx_fifo, buf, c);
len = dma_fifo_out_level(&port->tx_fifo);
if (len < DRAIN_THRESHOLD)
schedule_delayed_work(&port->drain, 1);
spin_unlock_bh(&port->lock);
if (len >= DRAIN_THRESHOLD)
fwtty_tx(port, false);
debug_short_write(port, c, n);
return (n < 0) ? 0 : n;
}
static int fwtty_write_room(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
int n;
spin_lock_bh(&port->lock);
n = dma_fifo_avail(&port->tx_fifo);
spin_unlock_bh(&port->lock);
fwtty_dbg(port, "%d\n", n);
return n;
}
static int fwtty_chars_in_buffer(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
int n;
spin_lock_bh(&port->lock);
n = dma_fifo_level(&port->tx_fifo);
spin_unlock_bh(&port->lock);
fwtty_dbg(port, "%d\n", n);
return n;
}
static void fwtty_send_xchar(struct tty_struct *tty, char ch)
{
struct fwtty_port *port = tty->driver_data;
fwtty_dbg(port, "%02x\n", ch);
fwtty_write_xchar(port, ch);
}
static void fwtty_throttle(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
/*
* Ignore throttling (but not unthrottling).
* It only makes sense to throttle when data will no longer be
* accepted by the tty flip buffer. For example, it is
* possible for received data to overflow the tty buffer long
* before the line discipline ever has a chance to throttle the driver.
* Additionally, the driver may have already completed the I/O
* but the tty buffer is still emptying, so the line discipline is
* throttling and unthrottling nothing.
*/
++port->stats.throttled;
}
static void fwtty_unthrottle(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
fwtty_dbg(port, "CRTSCTS: %d\n", C_CRTSCTS(tty) != 0);
fwtty_profile_fifo(port, port->stats.unthrottle);
spin_lock_bh(&port->lock);
port->mctrl &= ~OOB_RX_THROTTLE;
if (C_CRTSCTS(tty))
port->mctrl |= TIOCM_RTS;
__fwtty_write_port_status(port);
spin_unlock_bh(&port->lock);
}
static int check_msr_delta(struct fwtty_port *port, unsigned long mask,
struct async_icount *prev)
{
struct async_icount now;
int delta;
now = port->icount;
delta = ((mask & TIOCM_RNG && prev->rng != now.rng) ||
(mask & TIOCM_DSR && prev->dsr != now.dsr) ||
(mask & TIOCM_CAR && prev->dcd != now.dcd) ||
(mask & TIOCM_CTS && prev->cts != now.cts));
*prev = now;
return delta;
}
static int wait_msr_change(struct fwtty_port *port, unsigned long mask)
{
struct async_icount prev;
prev = port->icount;
return wait_event_interruptible(port->port.delta_msr_wait,
check_msr_delta(port, mask, &prev));
}
static int get_serial_info(struct fwtty_port *port,
struct serial_struct __user *info)
{
struct serial_struct tmp;
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_UNKNOWN;
tmp.line = port->port.tty->index;
tmp.flags = port->port.flags;
tmp.xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
tmp.baud_base = 400000000;
tmp.close_delay = port->port.close_delay;
return (copy_to_user(info, &tmp, sizeof(*info))) ? -EFAULT : 0;
}
static int set_serial_info(struct fwtty_port *port,
struct serial_struct __user *info)
{
struct serial_struct tmp;
if (copy_from_user(&tmp, info, sizeof(tmp)))
return -EFAULT;
if (tmp.irq != 0 || tmp.port != 0 || tmp.custom_divisor != 0 ||
tmp.baud_base != 400000000)
return -EPERM;
if (!capable(CAP_SYS_ADMIN)) {
if (((tmp.flags & ~ASYNC_USR_MASK) !=
(port->port.flags & ~ASYNC_USR_MASK)))
return -EPERM;
} else {
port->port.close_delay = tmp.close_delay * HZ / 100;
}
return 0;
}
static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd,
unsigned long arg)
{
struct fwtty_port *port = tty->driver_data;
int err;
switch (cmd) {
case TIOCGSERIAL:
mutex_lock(&port->port.mutex);
err = get_serial_info(port, (void __user *)arg);
mutex_unlock(&port->port.mutex);
break;
case TIOCSSERIAL:
mutex_lock(&port->port.mutex);
err = set_serial_info(port, (void __user *)arg);
mutex_unlock(&port->port.mutex);
break;
case TIOCMIWAIT:
err = wait_msr_change(port, arg);
break;
default:
err = -ENOIOCTLCMD;
}
return err;
}
static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct fwtty_port *port = tty->driver_data;
unsigned baud;
spin_lock_bh(&port->lock);
baud = set_termios(port, tty);
if ((baud == 0) && (old->c_cflag & CBAUD)) {
port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS);
} else if ((baud != 0) && !(old->c_cflag & CBAUD)) {
if (C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
port->mctrl |= TIOCM_DTR | TIOCM_RTS;
else
port->mctrl |= TIOCM_DTR;
}
__fwtty_write_port_status(port);
spin_unlock_bh(&port->lock);
if (old->c_cflag & CRTSCTS) {
if (!C_CRTSCTS(tty)) {
tty->hw_stopped = 0;
fwtty_restart_tx(port);
}
} else if (C_CRTSCTS(tty) && ~port->mstatus & TIOCM_CTS) {
tty->hw_stopped = 1;
}
}
/**
* fwtty_break_ctl - start/stop sending breaks
*
* Signals the remote to start or stop generating simulated breaks.
* First, stop dequeueing from the fifo and wait for writer/drain to leave tx
* before signalling the break line status. This guarantees any pending rx will
* be queued to the line discipline before break is simulated on the remote.
* Conversely, turning off break_ctl requires signalling the line status change,
* then enabling tx.
*/
static int fwtty_break_ctl(struct tty_struct *tty, int state)
{
struct fwtty_port *port = tty->driver_data;
long ret;
fwtty_dbg(port, "%d\n", state);
if (state == -1) {
set_bit(STOP_TX, &port->flags);
ret = wait_event_interruptible_timeout(port->wait_tx,
!test_bit(IN_TX, &port->flags),
10);
if (ret == 0 || ret == -ERESTARTSYS) {
clear_bit(STOP_TX, &port->flags);
fwtty_restart_tx(port);
return -EINTR;
}
}
spin_lock_bh(&port->lock);
port->break_ctl = (state == -1);
__fwtty_write_port_status(port);
spin_unlock_bh(&port->lock);
if (state == 0) {
spin_lock_bh(&port->lock);
dma_fifo_reset(&port->tx_fifo);
clear_bit(STOP_TX, &port->flags);
spin_unlock_bh(&port->lock);
}
return 0;
}
static int fwtty_tiocmget(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
unsigned tiocm;
spin_lock_bh(&port->lock);
tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK);
spin_unlock_bh(&port->lock);
fwtty_dbg(port, "%x\n", tiocm);
return tiocm;
}
static int fwtty_tiocmset(struct tty_struct *tty, unsigned set, unsigned clear)
{
struct fwtty_port *port = tty->driver_data;
fwtty_dbg(port, "set: %x clear: %x\n", set, clear);
/* TODO: simulate loopback if TIOCM_LOOP set */
spin_lock_bh(&port->lock);
port->mctrl &= ~(clear & MCTRL_MASK & 0xffff);
port->mctrl |= set & MCTRL_MASK & 0xffff;
__fwtty_write_port_status(port);
spin_unlock_bh(&port->lock);
return 0;
}
static int fwtty_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct fwtty_port *port = tty->driver_data;
struct stats stats;
memcpy(&stats, &port->stats, sizeof(stats));
if (port->port.console)
(*port->fwcon_ops->stats)(&stats, port->con_data);
icount->cts = port->icount.cts;
icount->dsr = port->icount.dsr;
icount->rng = port->icount.rng;
icount->dcd = port->icount.dcd;
icount->rx = port->icount.rx;
icount->tx = port->icount.tx + stats.xchars;
icount->frame = port->icount.frame;
icount->overrun = port->icount.overrun;
icount->parity = port->icount.parity;
icount->brk = port->icount.brk;
icount->buf_overrun = port->icount.overrun;
return 0;
}
static void fwtty_proc_show_port(struct seq_file *m, struct fwtty_port *port)
{
struct stats stats;
memcpy(&stats, &port->stats, sizeof(stats));
if (port->port.console)
(*port->fwcon_ops->stats)(&stats, port->con_data);
seq_printf(m, " addr:%012llx tx:%d rx:%d", port->rx_handler.offset,
port->icount.tx + stats.xchars, port->icount.rx);
seq_printf(m, " cts:%d dsr:%d rng:%d dcd:%d", port->icount.cts,
port->icount.dsr, port->icount.rng, port->icount.dcd);
seq_printf(m, " fe:%d oe:%d pe:%d brk:%d", port->icount.frame,
port->icount.overrun, port->icount.parity, port->icount.brk);
}
static void fwtty_debugfs_show_port(struct seq_file *m, struct fwtty_port *port)
{
struct stats stats;
memcpy(&stats, &port->stats, sizeof(stats));
if (port->port.console)
(*port->fwcon_ops->stats)(&stats, port->con_data);
seq_printf(m, " dr:%d st:%d err:%d lost:%d", stats.dropped,
stats.tx_stall, stats.fifo_errs, stats.lost);
seq_printf(m, " pkts:%d thr:%d", stats.sent, stats.throttled);
if (port->port.console) {
seq_puts(m, "\n ");
(*port->fwcon_ops->proc_show)(m, port->con_data);
}
fwtty_dump_profile(m, &port->stats);
}
static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer)
{
int generation = peer->generation;
smp_rmb();
seq_printf(m, " %s:", dev_name(&peer->unit->device));
seq_printf(m, " node:%04x gen:%d", peer->node_id, generation);
seq_printf(m, " sp:%d max:%d guid:%016llx", peer->speed,
peer->max_payload, (unsigned long long) peer->guid);
seq_printf(m, " mgmt:%012llx", (unsigned long long) peer->mgmt_addr);
seq_printf(m, " addr:%012llx", (unsigned long long) peer->status_addr);
seq_putc(m, '\n');
}
static int fwtty_proc_show(struct seq_file *m, void *v)
{
struct fwtty_port *port;
int i;
seq_puts(m, "fwserinfo: 1.0 driver: 1.0\n");
for (i = 0; i < MAX_TOTAL_PORTS && (port = fwtty_port_get(i)); ++i) {
seq_printf(m, "%2d:", i);
if (capable(CAP_SYS_ADMIN))
fwtty_proc_show_port(m, port);
fwtty_port_put(port);
seq_puts(m, "\n");
}
return 0;
}
static int fwtty_debugfs_stats_show(struct seq_file *m, void *v)
{
struct fw_serial *serial = m->private;
struct fwtty_port *port;
int i;
for (i = 0; i < num_ports; ++i) {
port = fwtty_port_get(serial->ports[i]->index);
if (port) {
seq_printf(m, "%2d:", port->index);
fwtty_proc_show_port(m, port);
fwtty_debugfs_show_port(m, port);
fwtty_port_put(port);
seq_puts(m, "\n");
}
}
return 0;
}
static int fwtty_debugfs_peers_show(struct seq_file *m, void *v)
{
struct fw_serial *serial = m->private;
struct fwtty_peer *peer;
rcu_read_lock();
seq_printf(m, "card: %s guid: %016llx\n",
dev_name(serial->card->device),
(unsigned long long) serial->card->guid);
list_for_each_entry_rcu(peer, &serial->peer_list, list)
fwtty_debugfs_show_peer(m, peer);
rcu_read_unlock();
return 0;
}
static int fwtty_proc_open(struct inode *inode, struct file *fp)
{
return single_open(fp, fwtty_proc_show, NULL);
}
static int fwtty_stats_open(struct inode *inode, struct file *fp)
{
return single_open(fp, fwtty_debugfs_stats_show, inode->i_private);
}
static int fwtty_peers_open(struct inode *inode, struct file *fp)
{
return single_open(fp, fwtty_debugfs_peers_show, inode->i_private);
}
static const struct file_operations fwtty_stats_fops = {
.owner = THIS_MODULE,
.open = fwtty_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations fwtty_peers_fops = {
.owner = THIS_MODULE,
.open = fwtty_peers_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations fwtty_proc_fops = {
.owner = THIS_MODULE,
.open = fwtty_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct tty_port_operations fwtty_port_ops = {
.dtr_rts = fwtty_port_dtr_rts,
.carrier_raised = fwtty_port_carrier_raised,
.shutdown = fwtty_port_shutdown,
.activate = fwtty_port_activate,
};
static const struct tty_operations fwtty_ops = {
.open = fwtty_open,
.close = fwtty_close,
.hangup = fwtty_hangup,
.cleanup = fwtty_cleanup,
.install = fwtty_install,
.write = fwtty_write,
.write_room = fwtty_write_room,
.chars_in_buffer = fwtty_chars_in_buffer,
.send_xchar = fwtty_send_xchar,
.throttle = fwtty_throttle,
.unthrottle = fwtty_unthrottle,
.ioctl = fwtty_ioctl,
.set_termios = fwtty_set_termios,
.break_ctl = fwtty_break_ctl,
.tiocmget = fwtty_tiocmget,
.tiocmset = fwtty_tiocmset,
.get_icount = fwtty_get_icount,
.proc_fops = &fwtty_proc_fops,
};
static const struct tty_operations fwloop_ops = {
.open = fwtty_open,
.close = fwtty_close,
.hangup = fwtty_hangup,
.cleanup = fwtty_cleanup,
.install = fwloop_install,
.write = fwtty_write,
.write_room = fwtty_write_room,
.chars_in_buffer = fwtty_chars_in_buffer,
.send_xchar = fwtty_send_xchar,
.throttle = fwtty_throttle,
.unthrottle = fwtty_unthrottle,
.ioctl = fwtty_ioctl,
.set_termios = fwtty_set_termios,
.break_ctl = fwtty_break_ctl,
.tiocmget = fwtty_tiocmget,
.tiocmset = fwtty_tiocmset,
.get_icount = fwtty_get_icount,
};
static inline int mgmt_pkt_expected_len(__be16 code)
{
static const struct fwserial_mgmt_pkt pkt;
switch (be16_to_cpu(code)) {
case FWSC_VIRT_CABLE_PLUG:
return sizeof(pkt.hdr) + sizeof(pkt.plug_req);
case FWSC_VIRT_CABLE_PLUG_RSP: /* | FWSC_RSP_OK */
return sizeof(pkt.hdr) + sizeof(pkt.plug_rsp);
case FWSC_VIRT_CABLE_UNPLUG:
case FWSC_VIRT_CABLE_UNPLUG_RSP:
case FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK:
case FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK:
return sizeof(pkt.hdr);
default:
return -1;
}
}
static inline void fill_plug_params(struct virt_plug_params *params,
struct fwtty_port *port)
{
u64 status_addr = port->rx_handler.offset;
u64 fifo_addr = port->rx_handler.offset + 4;
size_t fifo_len = port->rx_handler.length - 4;
params->status_hi = cpu_to_be32(status_addr >> 32);
params->status_lo = cpu_to_be32(status_addr);
params->fifo_hi = cpu_to_be32(fifo_addr >> 32);
params->fifo_lo = cpu_to_be32(fifo_addr);
params->fifo_len = cpu_to_be32(fifo_len);
}
static inline void fill_plug_req(struct fwserial_mgmt_pkt *pkt,
struct fwtty_port *port)
{
pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG);
pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
fill_plug_params(&pkt->plug_req, port);
}
static inline void fill_plug_rsp_ok(struct fwserial_mgmt_pkt *pkt,
struct fwtty_port *port)
{
pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP);
pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
fill_plug_params(&pkt->plug_rsp, port);
}
static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
{
pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK);
pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
}
static inline void fill_unplug_req(struct fwserial_mgmt_pkt *pkt)
{
pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG);
pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
}
static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
{
pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK);
pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
}
static inline void fill_unplug_rsp_ok(struct fwserial_mgmt_pkt *pkt)
{
pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP);
pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
}
static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
struct virt_plug_params *params)
{
struct fwtty_port *port = peer->port;
peer->status_addr = be32_to_u64(params->status_hi, params->status_lo);
peer->fifo_addr = be32_to_u64(params->fifo_hi, params->fifo_lo);
peer->fifo_len = be32_to_cpu(params->fifo_len);
peer_set_state(peer, FWPS_ATTACHED);
/* reconfigure tx_fifo optimally for this peer */
spin_lock_bh(&port->lock);
port->max_payload = min(peer->max_payload, peer->fifo_len);
dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
spin_unlock_bh(&peer->port->lock);
if (port->port.console && port->fwcon_ops->notify != NULL)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data);
fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n",
(unsigned long long)peer->guid, dev_name(port->device));
}
static inline int fwserial_send_mgmt_sync(struct fwtty_peer *peer,
struct fwserial_mgmt_pkt *pkt)
{
int generation;
int rcode, tries = 5;
do {
generation = peer->generation;
smp_rmb();
rcode = fw_run_transaction(peer->serial->card,
TCODE_WRITE_BLOCK_REQUEST,
peer->node_id,
generation, peer->speed,
peer->mgmt_addr,
pkt, be16_to_cpu(pkt->hdr.len));
if (rcode == RCODE_BUSY || rcode == RCODE_SEND_ERROR ||
rcode == RCODE_GENERATION) {
fwtty_dbg(&peer->unit, "mgmt write error: %d\n", rcode);
continue;
} else {
break;
}
} while (--tries > 0);
return rcode;
}
/**
* fwserial_claim_port - attempt to claim port @ index for peer
*
* Returns ptr to claimed port or error code (as ERR_PTR())
* Can sleep - must be called from process context
*/
static struct fwtty_port *fwserial_claim_port(struct fwtty_peer *peer,
int index)
{
struct fwtty_port *port;
if (index < 0 || index >= num_ports)
return ERR_PTR(-EINVAL);
/* must guarantee that previous port releases have completed */
synchronize_rcu();
port = peer->serial->ports[index];
spin_lock_bh(&port->lock);
if (!rcu_access_pointer(port->peer))
rcu_assign_pointer(port->peer, peer);
else
port = ERR_PTR(-EBUSY);
spin_unlock_bh(&port->lock);
return port;
}
/**
* fwserial_find_port - find avail port and claim for peer
*
* Returns ptr to claimed port or NULL if none avail
* Can sleep - must be called from process context
*/
static struct fwtty_port *fwserial_find_port(struct fwtty_peer *peer)
{
struct fwtty_port **ports = peer->serial->ports;
int i;
/* must guarantee that previous port releases have completed */
synchronize_rcu();
/* TODO: implement optional GUID-to-specific port # matching */
/* find an unattached port (but not the loopback port, if present) */
for (i = 0; i < num_ttys; ++i) {
spin_lock_bh(&ports[i]->lock);
if (!ports[i]->peer) {
/* claim port */
rcu_assign_pointer(ports[i]->peer, peer);
spin_unlock_bh(&ports[i]->lock);
return ports[i];
}
spin_unlock_bh(&ports[i]->lock);
}
return NULL;
}
static void fwserial_release_port(struct fwtty_port *port, bool reset)
{
/* drop carrier (and all other line status) */
if (reset)
fwtty_update_port_status(port, 0);
spin_lock_bh(&port->lock);
/* reset dma fifo max transmission size back to S100 */
port->max_payload = link_speed_to_max_payload(SCODE_100);
dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
RCU_INIT_POINTER(port->peer, NULL);
spin_unlock_bh(&port->lock);
if (port->port.console && port->fwcon_ops->notify != NULL)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data);
}
static void fwserial_plug_timeout(unsigned long data)
{
struct fwtty_peer *peer = (struct fwtty_peer *)data;
struct fwtty_port *port;
spin_lock_bh(&peer->lock);
if (peer->state != FWPS_PLUG_PENDING) {
spin_unlock_bh(&peer->lock);
return;
}
port = peer_revert_state(peer);
spin_unlock_bh(&peer->lock);
if (port)
fwserial_release_port(port, false);
}
/**
* fwserial_connect_peer - initiate virtual cable with peer
*
* Returns 0 if VIRT_CABLE_PLUG request was successfully sent,
* otherwise error code. Must be called from process context.
*/
static int fwserial_connect_peer(struct fwtty_peer *peer)
{
struct fwtty_port *port;
struct fwserial_mgmt_pkt *pkt;
int err, rcode;
pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
if (!pkt)
return -ENOMEM;
port = fwserial_find_port(peer);
if (!port) {
fwtty_err(&peer->unit, "avail ports in use\n");
err = -EBUSY;
goto free_pkt;
}
spin_lock_bh(&peer->lock);
/* only initiate VIRT_CABLE_PLUG if peer is currently not attached */
if (peer->state != FWPS_NOT_ATTACHED) {
err = -EBUSY;
goto release_port;
}
peer->port = port;
peer_set_state(peer, FWPS_PLUG_PENDING);
fill_plug_req(pkt, peer->port);
setup_timer(&peer->timer, fwserial_plug_timeout, (unsigned long)peer);
mod_timer(&peer->timer, jiffies + VIRT_CABLE_PLUG_TIMEOUT);
spin_unlock_bh(&peer->lock);
rcode = fwserial_send_mgmt_sync(peer, pkt);
spin_lock_bh(&peer->lock);
if (peer->state == FWPS_PLUG_PENDING && rcode != RCODE_COMPLETE) {
if (rcode == RCODE_CONFLICT_ERROR)
err = -EAGAIN;
else
err = -EIO;
goto cancel_timer;
}
spin_unlock_bh(&peer->lock);
kfree(pkt);
return 0;
cancel_timer:
del_timer(&peer->timer);
peer_revert_state(peer);
release_port:
spin_unlock_bh(&peer->lock);
fwserial_release_port(port, false);
free_pkt:
kfree(pkt);
return err;
}
/**
* fwserial_close_port -
* HUP the tty (if the tty exists) and unregister the tty device.
* Only used by the unit driver upon unit removal to disconnect and
* cleanup all attached ports
*
* The port reference is put by fwtty_cleanup (if a reference was
* ever taken).
*/
static void fwserial_close_port(struct tty_driver *driver,
struct fwtty_port *port)
{
struct tty_struct *tty;
mutex_lock(&port->port.mutex);
tty = tty_port_tty_get(&port->port);
if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
mutex_unlock(&port->port.mutex);
if (driver == fwloop_driver)
tty_unregister_device(driver, loop_idx(port));
else
tty_unregister_device(driver, port->index);
}
/**
* fwserial_lookup - finds first fw_serial associated with card
* @card: fw_card to match
*
* NB: caller must be holding fwserial_list_mutex
*/
static struct fw_serial *fwserial_lookup(struct fw_card *card)
{
struct fw_serial *serial;
list_for_each_entry(serial, &fwserial_list, list) {
if (card == serial->card)
return serial;
}
return NULL;
}
/**
* __fwserial_lookup_rcu - finds first fw_serial associated with card
* @card: fw_card to match
*
* NB: caller must be inside rcu_read_lock() section
*/
static struct fw_serial *__fwserial_lookup_rcu(struct fw_card *card)
{
struct fw_serial *serial;
list_for_each_entry_rcu(serial, &fwserial_list, list) {
if (card == serial->card)
return serial;
}
return NULL;
}
/**
* __fwserial_peer_by_node_id - finds a peer matching the given generation + id
*
* If a matching peer could not be found for the specified generation/node id,
* this could be because:
* a) the generation has changed and one of the nodes hasn't updated yet
* b) the remote node has created its remote unit device before this
* local node has created its corresponding remote unit device
* In either case, the remote node should retry
*
* Note: caller must be in rcu_read_lock() section
*/
static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
int generation, int id)
{
struct fw_serial *serial;
struct fwtty_peer *peer;
serial = __fwserial_lookup_rcu(card);
if (!serial) {
/*
* Something is very wrong - there should be a matching
* fw_serial structure for every fw_card. Maybe the remote node
* has created its remote unit device before this driver has
* been probed for any unit devices...
*/
fwtty_err(card, "unknown card (guid %016llx)\n",
(unsigned long long) card->guid);
return NULL;
}
list_for_each_entry_rcu(peer, &serial->peer_list, list) {
int g = peer->generation;
smp_rmb();
if (generation == g && id == peer->node_id)
return peer;
}
return NULL;
}
#ifdef DEBUG
static void __dump_peer_list(struct fw_card *card)
{
struct fw_serial *serial;
struct fwtty_peer *peer;
serial = __fwserial_lookup_rcu(card);
if (!serial)
return;
list_for_each_entry_rcu(peer, &serial->peer_list, list) {
int g = peer->generation;
smp_rmb();
fwtty_dbg(card, "peer(%d:%x) guid: %016llx\n",
g, peer->node_id, (unsigned long long) peer->guid);
}
}
#else
#define __dump_peer_list(s)
#endif
static void fwserial_auto_connect(struct work_struct *work)
{
struct fwtty_peer *peer = to_peer(to_delayed_work(work), connect);
int err;
err = fwserial_connect_peer(peer);
if (err == -EAGAIN && ++peer->connect_retries < MAX_CONNECT_RETRIES)
schedule_delayed_work(&peer->connect, CONNECT_RETRY_DELAY);
}
static void fwserial_peer_workfn(struct work_struct *work)
{
struct fwtty_peer *peer = to_peer(work, work);
peer->workfn(work);
}
/**
* fwserial_add_peer - add a newly probed 'serial' unit device as a 'peer'
* @serial: aggregate representing the specific fw_card to add the peer to
* @unit: 'peer' to create and add to peer_list of serial
*
* Adds a 'peer' (ie, a local or remote 'serial' unit device) to the list of
* peers for a specific fw_card. Optionally, auto-attach this peer to an
* available tty port. This function is called either directly or indirectly
* as a result of a 'serial' unit device being created & probed.
*
* Note: this function is serialized with fwserial_remove_peer() by the
* fwserial_list_mutex held in fwserial_probe().
*
* A 1:1 correspondence between an fw_unit and an fwtty_peer is maintained
* via the dev_set_drvdata() for the device of the fw_unit.
*/
static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
{
struct device *dev = &unit->device;
struct fw_device *parent = fw_parent_device(unit);
struct fwtty_peer *peer;
struct fw_csr_iterator ci;
int key, val;
int generation;
peer = kzalloc(sizeof(*peer), GFP_KERNEL);
if (!peer)
return -ENOMEM;
peer_set_state(peer, FWPS_NOT_ATTACHED);
dev_set_drvdata(dev, peer);
peer->unit = unit;
peer->guid = (u64)parent->config_rom[3] << 32 | parent->config_rom[4];
peer->speed = parent->max_speed;
peer->max_payload = min(device_max_receive(parent),
link_speed_to_max_payload(peer->speed));
generation = parent->generation;
smp_rmb();
peer->node_id = parent->node_id;
smp_wmb();
peer->generation = generation;
/* retrieve the mgmt bus addr from the unit directory */
fw_csr_iterator_init(&ci, unit->directory);
while (fw_csr_iterator_next(&ci, &key, &val)) {
if (key == (CSR_OFFSET | CSR_DEPENDENT_INFO)) {
peer->mgmt_addr = CSR_REGISTER_BASE + 4 * val;
break;
}
}
if (peer->mgmt_addr == 0ULL) {
/*
* No mgmt address effectively disables VIRT_CABLE_PLUG -
* this peer will not be able to attach to a remote
*/
peer_set_state(peer, FWPS_NO_MGMT_ADDR);
}
spin_lock_init(&peer->lock);
peer->port = NULL;
init_timer(&peer->timer);
INIT_WORK(&peer->work, fwserial_peer_workfn);
INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect);
/* associate peer with specific fw_card */
peer->serial = serial;
list_add_rcu(&peer->list, &serial->peer_list);
fwtty_info(&peer->unit, "peer added (guid:%016llx)\n",
(unsigned long long)peer->guid);
/* identify the local unit & virt cable to loopback port */
if (parent->is_local) {
serial->self = peer;
if (create_loop_dev) {
struct fwtty_port *port;
port = fwserial_claim_port(peer, num_ttys);
if (!IS_ERR(port)) {
struct virt_plug_params params;
spin_lock_bh(&peer->lock);
peer->port = port;
fill_plug_params(¶ms, port);
fwserial_virt_plug_complete(peer, ¶ms);
spin_unlock_bh(&peer->lock);
fwtty_write_port_status(port);
}
}
} else if (auto_connect) {
/* auto-attach to remote units only (if policy allows) */
schedule_delayed_work(&peer->connect, 1);
}
return 0;
}
/**
* fwserial_remove_peer - remove a 'serial' unit device as a 'peer'
*
* Remove a 'peer' from its list of peers. This function is only
* called by fwserial_remove() on bus removal of the unit device.
*
* Note: this function is serialized with fwserial_add_peer() by the
* fwserial_list_mutex held in fwserial_remove().
*/
static void fwserial_remove_peer(struct fwtty_peer *peer)
{
struct fwtty_port *port;
spin_lock_bh(&peer->lock);
peer_set_state(peer, FWPS_GONE);
spin_unlock_bh(&peer->lock);
cancel_delayed_work_sync(&peer->connect);
cancel_work_sync(&peer->work);
spin_lock_bh(&peer->lock);
/* if this unit is the local unit, clear link */
if (peer == peer->serial->self)
peer->serial->self = NULL;
/* cancel the request timeout timer (if running) */
del_timer(&peer->timer);
port = peer->port;
peer->port = NULL;
list_del_rcu(&peer->list);
fwtty_info(&peer->unit, "peer removed (guid:%016llx)\n",
(unsigned long long)peer->guid);
spin_unlock_bh(&peer->lock);
if (port)
fwserial_release_port(port, true);
synchronize_rcu();
kfree(peer);
}
/**
* fwserial_create - init everything to create TTYs for a specific fw_card
* @unit: fw_unit for first 'serial' unit device probed for this fw_card
*
* This function inits the aggregate structure (an fw_serial instance)
* used to manage the TTY ports registered by a specific fw_card. Also, the
* unit device is added as the first 'peer'.
*
* This unit device may represent a local unit device (as specified by the
* config ROM unit directory) or it may represent a remote unit device
* (as specified by the reading of the remote node's config ROM).
*
* Returns 0 to indicate "ownership" of the unit device, or a negative errno
* value to indicate which error.
*/
static int fwserial_create(struct fw_unit *unit)
{
struct fw_device *parent = fw_parent_device(unit);
struct fw_card *card = parent->card;
struct fw_serial *serial;
struct fwtty_port *port;
struct device *tty_dev;
int i, j;
int err;
serial = kzalloc(sizeof(*serial), GFP_KERNEL);
if (!serial)
return -ENOMEM;
kref_init(&serial->kref);
serial->card = card;
INIT_LIST_HEAD(&serial->peer_list);
for (i = 0; i < num_ports; ++i) {
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port) {
err = -ENOMEM;
goto free_ports;
}
tty_port_init(&port->port);
port->index = FWTTY_INVALID_INDEX;
port->port.ops = &fwtty_port_ops;
port->serial = serial;
tty_buffer_set_limit(&port->port, 128 * 1024);
spin_lock_init(&port->lock);
INIT_DELAYED_WORK(&port->drain, fwtty_drain_tx);
INIT_DELAYED_WORK(&port->emit_breaks, fwtty_emit_breaks);
INIT_WORK(&port->hangup, fwtty_do_hangup);
init_waitqueue_head(&port->wait_tx);
port->max_payload = link_speed_to_max_payload(SCODE_100);
dma_fifo_init(&port->tx_fifo);
RCU_INIT_POINTER(port->peer, NULL);
serial->ports[i] = port;
/* get unique bus addr region for port's status & recv fifo */
port->rx_handler.length = FWTTY_PORT_RXFIFO_LEN + 4;
port->rx_handler.address_callback = fwtty_port_handler;
port->rx_handler.callback_data = port;
/*
* XXX: use custom memory region above cpu physical memory addrs
* this will ease porting to 64-bit firewire adapters
*/
err = fw_core_add_address_handler(&port->rx_handler,
&fw_high_memory_region);
if (err) {
kfree(port);
goto free_ports;
}
}
/* preserve i for error cleanup */
err = fwtty_ports_add(serial);
if (err) {
fwtty_err(&unit, "no space in port table\n");
goto free_ports;
}
for (j = 0; j < num_ttys; ++j) {
tty_dev = tty_port_register_device(&serial->ports[j]->port,
fwtty_driver,
serial->ports[j]->index,
card->device);
if (IS_ERR(tty_dev)) {
err = PTR_ERR(tty_dev);
fwtty_err(&unit, "register tty device error (%d)\n",
err);
goto unregister_ttys;
}
serial->ports[j]->device = tty_dev;
}
/* preserve j for error cleanup */
if (create_loop_dev) {
struct device *loop_dev;
loop_dev = tty_port_register_device(&serial->ports[j]->port,
fwloop_driver,
loop_idx(serial->ports[j]),
card->device);
if (IS_ERR(loop_dev)) {
err = PTR_ERR(loop_dev);
fwtty_err(&unit, "create loop device failed (%d)\n",
err);
goto unregister_ttys;
}
serial->ports[j]->device = loop_dev;
serial->ports[j]->loopback = true;
}
if (!IS_ERR_OR_NULL(fwserial_debugfs)) {
serial->debugfs = debugfs_create_dir(dev_name(&unit->device),
fwserial_debugfs);
if (!IS_ERR_OR_NULL(serial->debugfs)) {
debugfs_create_file("peers", 0444, serial->debugfs,
serial, &fwtty_peers_fops);
debugfs_create_file("stats", 0444, serial->debugfs,
serial, &fwtty_stats_fops);
}
}
list_add_rcu(&serial->list, &fwserial_list);
fwtty_notice(&unit, "TTY over FireWire on device %s (guid %016llx)\n",
dev_name(card->device), (unsigned long long) card->guid);
err = fwserial_add_peer(serial, unit);
if (!err)
return 0;
fwtty_err(&unit, "unable to add peer unit device (%d)\n", err);
/* fall-through to error processing */
debugfs_remove_recursive(serial->debugfs);
list_del_rcu(&serial->list);
if (create_loop_dev)
tty_unregister_device(fwloop_driver,
loop_idx(serial->ports[j]));
unregister_ttys:
for (--j; j >= 0; --j)
tty_unregister_device(fwtty_driver, serial->ports[j]->index);
kref_put(&serial->kref, fwserial_destroy);
return err;
free_ports:
for (--i; i >= 0; --i) {
tty_port_destroy(&serial->ports[i]->port);
kfree(serial->ports[i]);
}
kfree(serial);
return err;
}
/**
* fwserial_probe: bus probe function for firewire 'serial' unit devices
*
* A 'serial' unit device is created and probed as a result of:
* - declaring a ieee1394 bus id table for 'devices' matching a fabricated
* 'serial' unit specifier id
* - adding a unit directory to the config ROM(s) for a 'serial' unit
*
* The firewire core registers unit devices by enumerating unit directories
* of a node's config ROM after reading the config ROM when a new node is
* added to the bus topology after a bus reset.
*
* The practical implications of this are:
* - this probe is called for both local and remote nodes that have a 'serial'
* unit directory in their config ROM (that matches the specifiers in
* fwserial_id_table).
* - no specific order is enforced for local vs. remote unit devices
*
* This unit driver copes with the lack of specific order in the same way the
* firewire net driver does -- each probe, for either a local or remote unit
* device, is treated as a 'peer' (has a struct fwtty_peer instance) and the
* first peer created for a given fw_card (tracked by the global fwserial_list)
* creates the underlying TTYs (aggregated in a fw_serial instance).
*
* NB: an early attempt to differentiate local & remote unit devices by creating
* peers only for remote units and fw_serial instances (with their
* associated TTY devices) only for local units was discarded. Managing
* the peer lifetimes on device removal proved too complicated.
*
* fwserial_probe/fwserial_remove are effectively serialized by the
* fwserial_list_mutex. This is necessary because the addition of the first peer
* for a given fw_card will trigger the creation of the fw_serial for that
* fw_card, which must not simultaneously contend with the removal of the
* last peer for a given fw_card triggering the destruction of the same
* fw_serial for the same fw_card.
*/
static int fwserial_probe(struct fw_unit *unit,
const struct ieee1394_device_id *id)
{
struct fw_serial *serial;
int err;
mutex_lock(&fwserial_list_mutex);
serial = fwserial_lookup(fw_parent_device(unit)->card);
if (!serial)
err = fwserial_create(unit);
else
err = fwserial_add_peer(serial, unit);
mutex_unlock(&fwserial_list_mutex);
return err;
}
/**
* fwserial_remove: bus removal function for firewire 'serial' unit devices
*
* The corresponding 'peer' for this unit device is removed from the list of
* peers for the associated fw_serial (which has a 1:1 correspondence with a
* specific fw_card). If this is the last peer being removed, then trigger
* the destruction of the underlying TTYs.
*/
static void fwserial_remove(struct fw_unit *unit)
{
struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
struct fw_serial *serial = peer->serial;
int i;
mutex_lock(&fwserial_list_mutex);
fwserial_remove_peer(peer);
if (list_empty(&serial->peer_list)) {
/* unlink from the fwserial_list here */
list_del_rcu(&serial->list);
debugfs_remove_recursive(serial->debugfs);
for (i = 0; i < num_ttys; ++i)
fwserial_close_port(fwtty_driver, serial->ports[i]);
if (create_loop_dev)
fwserial_close_port(fwloop_driver, serial->ports[i]);
kref_put(&serial->kref, fwserial_destroy);
}
mutex_unlock(&fwserial_list_mutex);
}
/**
* fwserial_update: bus update function for 'firewire' serial unit devices
*
* Updates the new node_id and bus generation for this peer. Note that locking
* is unnecessary; but careful memory barrier usage is important to enforce the
* load and store order of generation & node_id.
*
* The fw-core orders the write of node_id before generation in the parent
* fw_device to ensure that a stale node_id cannot be used with a current
* bus generation. So the generation value must be read before the node_id.
*
* In turn, this orders the write of node_id before generation in the peer to
* also ensure a stale node_id cannot be used with a current bus generation.
*/
static void fwserial_update(struct fw_unit *unit)
{
struct fw_device *parent = fw_parent_device(unit);
struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
int generation;
generation = parent->generation;
smp_rmb();
peer->node_id = parent->node_id;
smp_wmb();
peer->generation = generation;
}
static const struct ieee1394_device_id fwserial_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.specifier_id = LINUX_VENDOR_ID,
.version = FWSERIAL_VERSION,
},
{ }
};
static struct fw_driver fwserial_driver = {
.driver = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.bus = &fw_bus_type,
},
.probe = fwserial_probe,
.update = fwserial_update,
.remove = fwserial_remove,
.id_table = fwserial_id_table,
};
#define FW_UNIT_SPECIFIER(id) ((CSR_SPECIFIER_ID << 24) | (id))
#define FW_UNIT_VERSION(ver) ((CSR_VERSION << 24) | (ver))
#define FW_UNIT_ADDRESS(ofs) (((CSR_OFFSET | CSR_DEPENDENT_INFO) << 24) \
| (((ofs) - CSR_REGISTER_BASE) >> 2))
/* XXX: config ROM definitons could be improved with semi-automated offset
* and length calculation
*/
#define FW_ROM_LEN(quads) ((quads) << 16)
#define FW_ROM_DESCRIPTOR(ofs) (((CSR_LEAF | CSR_DESCRIPTOR) << 24) | (ofs))
struct fwserial_unit_directory_data {
u32 len_crc;
u32 unit_specifier;
u32 unit_sw_version;
u32 unit_addr_offset;
u32 desc1_ofs;
u32 desc1_len_crc;
u32 desc1_data[5];
} __packed;
static struct fwserial_unit_directory_data fwserial_unit_directory_data = {
.len_crc = FW_ROM_LEN(4),
.unit_specifier = FW_UNIT_SPECIFIER(LINUX_VENDOR_ID),
.unit_sw_version = FW_UNIT_VERSION(FWSERIAL_VERSION),
.desc1_ofs = FW_ROM_DESCRIPTOR(1),
.desc1_len_crc = FW_ROM_LEN(5),
.desc1_data = {
0x00000000, /* type = text */
0x00000000, /* enc = ASCII, lang EN */
0x4c696e75, /* 'Linux TTY' */
0x78205454,
0x59000000,
},
};
static struct fw_descriptor fwserial_unit_directory = {
.length = sizeof(fwserial_unit_directory_data) / sizeof(u32),
.key = (CSR_DIRECTORY | CSR_UNIT) << 24,
.data = (u32 *)&fwserial_unit_directory_data,
};
/*
* The management address is in the unit space region but above other known
* address users (to keep wild writes from causing havoc)
*/
static const struct fw_address_region fwserial_mgmt_addr_region = {
.start = CSR_REGISTER_BASE + 0x1e0000ULL,
.end = 0x1000000000000ULL,
};
static struct fw_address_handler fwserial_mgmt_addr_handler;
/**
* fwserial_handle_plug_req - handle VIRT_CABLE_PLUG request work
* @work: ptr to peer->work
*
* Attempts to complete the VIRT_CABLE_PLUG handshake sequence for this peer.
*
* This checks for a collided request-- ie, that a VIRT_CABLE_PLUG request was
* already sent to this peer. If so, the collision is resolved by comparing
* guid values; the loser sends the plug response.
*
* Note: if an error prevents a response, don't do anything -- the
* remote will timeout its request.
*/
static void fwserial_handle_plug_req(struct work_struct *work)
{
struct fwtty_peer *peer = to_peer(work, work);
struct virt_plug_params *plug_req = &peer->work_params.plug_req;
struct fwtty_port *port;
struct fwserial_mgmt_pkt *pkt;
int rcode;
pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
if (!pkt)
return;
port = fwserial_find_port(peer);
spin_lock_bh(&peer->lock);
switch (peer->state) {
case FWPS_NOT_ATTACHED:
if (!port) {
fwtty_err(&peer->unit, "no more ports avail\n");
fill_plug_rsp_nack(pkt);
} else {
peer->port = port;
fill_plug_rsp_ok(pkt, peer->port);
peer_set_state(peer, FWPS_PLUG_RESPONDING);
/* don't release claimed port */
port = NULL;
}
break;
case FWPS_PLUG_PENDING:
if (peer->serial->card->guid > peer->guid)
goto cleanup;
/* We lost - hijack the already-claimed port and send ok */
del_timer(&peer->timer);
fill_plug_rsp_ok(pkt, peer->port);
peer_set_state(peer, FWPS_PLUG_RESPONDING);
break;
default:
fill_plug_rsp_nack(pkt);
}
spin_unlock_bh(&peer->lock);
if (port)
fwserial_release_port(port, false);
rcode = fwserial_send_mgmt_sync(peer, pkt);
spin_lock_bh(&peer->lock);
if (peer->state == FWPS_PLUG_RESPONDING) {
if (rcode == RCODE_COMPLETE) {
struct fwtty_port *tmp = peer->port;
fwserial_virt_plug_complete(peer, plug_req);
spin_unlock_bh(&peer->lock);
fwtty_write_port_status(tmp);
spin_lock_bh(&peer->lock);
} else {
fwtty_err(&peer->unit, "PLUG_RSP error (%d)\n", rcode);
port = peer_revert_state(peer);
}
}
cleanup:
spin_unlock_bh(&peer->lock);
if (port)
fwserial_release_port(port, false);
kfree(pkt);
}
static void fwserial_handle_unplug_req(struct work_struct *work)
{
struct fwtty_peer *peer = to_peer(work, work);
struct fwtty_port *port = NULL;
struct fwserial_mgmt_pkt *pkt;
int rcode;
pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
if (!pkt)
return;
spin_lock_bh(&peer->lock);
switch (peer->state) {
case FWPS_ATTACHED:
fill_unplug_rsp_ok(pkt);
peer_set_state(peer, FWPS_UNPLUG_RESPONDING);
break;
case FWPS_UNPLUG_PENDING:
if (peer->serial->card->guid > peer->guid)
goto cleanup;
/* We lost - send unplug rsp */
del_timer(&peer->timer);
fill_unplug_rsp_ok(pkt);
peer_set_state(peer, FWPS_UNPLUG_RESPONDING);
break;
default:
fill_unplug_rsp_nack(pkt);
}
spin_unlock_bh(&peer->lock);
rcode = fwserial_send_mgmt_sync(peer, pkt);
spin_lock_bh(&peer->lock);
if (peer->state == FWPS_UNPLUG_RESPONDING) {
if (rcode != RCODE_COMPLETE)
fwtty_err(&peer->unit, "UNPLUG_RSP error (%d)\n",
rcode);
port = peer_revert_state(peer);
}
cleanup:
spin_unlock_bh(&peer->lock);
if (port)
fwserial_release_port(port, true);
kfree(pkt);
}
static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
struct fwserial_mgmt_pkt *pkt,
unsigned long long addr,
size_t len)
{
struct fwtty_port *port = NULL;
bool reset = false;
int rcode;
if (addr != fwserial_mgmt_addr_handler.offset || len < sizeof(pkt->hdr))
return RCODE_ADDRESS_ERROR;
if (len != be16_to_cpu(pkt->hdr.len) ||
len != mgmt_pkt_expected_len(pkt->hdr.code))
return RCODE_DATA_ERROR;
spin_lock_bh(&peer->lock);
if (peer->state == FWPS_GONE) {
/*
* This should never happen - it would mean that the
* remote unit that just wrote this transaction was
* already removed from the bus -- and the removal was
* processed before we rec'd this transaction
*/
fwtty_err(&peer->unit, "peer already removed\n");
spin_unlock_bh(&peer->lock);
return RCODE_ADDRESS_ERROR;
}
rcode = RCODE_COMPLETE;
fwtty_dbg(&peer->unit, "mgmt: hdr.code: %04hx\n", pkt->hdr.code);
switch (be16_to_cpu(pkt->hdr.code) & FWSC_CODE_MASK) {
case FWSC_VIRT_CABLE_PLUG:
if (work_pending(&peer->work)) {
fwtty_err(&peer->unit, "plug req: busy\n");
rcode = RCODE_CONFLICT_ERROR;
} else {
peer->work_params.plug_req = pkt->plug_req;
peer->workfn = fwserial_handle_plug_req;
queue_work(system_unbound_wq, &peer->work);
}
break;
case FWSC_VIRT_CABLE_PLUG_RSP:
if (peer->state != FWPS_PLUG_PENDING) {
rcode = RCODE_CONFLICT_ERROR;
} else if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK) {
fwtty_notice(&peer->unit, "NACK plug rsp\n");
port = peer_revert_state(peer);
} else {
struct fwtty_port *tmp = peer->port;
fwserial_virt_plug_complete(peer, &pkt->plug_rsp);
spin_unlock_bh(&peer->lock);
fwtty_write_port_status(tmp);
spin_lock_bh(&peer->lock);
}
break;
case FWSC_VIRT_CABLE_UNPLUG:
if (work_pending(&peer->work)) {
fwtty_err(&peer->unit, "unplug req: busy\n");
rcode = RCODE_CONFLICT_ERROR;
} else {
peer->workfn = fwserial_handle_unplug_req;
queue_work(system_unbound_wq, &peer->work);
}
break;
case FWSC_VIRT_CABLE_UNPLUG_RSP:
if (peer->state != FWPS_UNPLUG_PENDING) {
rcode = RCODE_CONFLICT_ERROR;
} else {
if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK)
fwtty_notice(&peer->unit, "NACK unplug?\n");
port = peer_revert_state(peer);
reset = true;
}
break;
default:
fwtty_err(&peer->unit, "unknown mgmt code %d\n",
be16_to_cpu(pkt->hdr.code));
rcode = RCODE_DATA_ERROR;
}
spin_unlock_bh(&peer->lock);
if (port)
fwserial_release_port(port, reset);
return rcode;
}
/**
* fwserial_mgmt_handler: bus address handler for mgmt requests
* @parameters: fw_address_callback_t as specified by firewire core interface
*
* This handler is responsible for handling virtual cable requests from remotes
* for all cards.
*/
static void fwserial_mgmt_handler(struct fw_card *card,
struct fw_request *request,
int tcode, int destination, int source,
int generation,
unsigned long long addr,
void *data, size_t len,
void *callback_data)
{
struct fwserial_mgmt_pkt *pkt = data;
struct fwtty_peer *peer;
int rcode;
rcu_read_lock();
peer = __fwserial_peer_by_node_id(card, generation, source);
if (!peer) {
fwtty_dbg(card, "peer(%d:%x) not found\n", generation, source);
__dump_peer_list(card);
rcode = RCODE_CONFLICT_ERROR;
} else {
switch (tcode) {
case TCODE_WRITE_BLOCK_REQUEST:
rcode = fwserial_parse_mgmt_write(peer, pkt, addr, len);
break;
default:
rcode = RCODE_TYPE_ERROR;
}
}
rcu_read_unlock();
fw_send_response(card, request, rcode);
}
static int __init fwserial_init(void)
{
int err, num_loops = !!(create_loop_dev);
/* XXX: placeholder for a "firewire" debugfs node */
fwserial_debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
/* num_ttys/num_ports must not be set above the static alloc avail */
if (num_ttys + num_loops > MAX_CARD_PORTS)
num_ttys = MAX_CARD_PORTS - num_loops;
num_ports = num_ttys + num_loops;
fwtty_driver = tty_alloc_driver(MAX_TOTAL_PORTS, TTY_DRIVER_REAL_RAW
| TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(fwtty_driver)) {
err = PTR_ERR(fwtty_driver);
goto remove_debugfs;
}
fwtty_driver->driver_name = KBUILD_MODNAME;
fwtty_driver->name = tty_dev_name;
fwtty_driver->major = 0;
fwtty_driver->minor_start = 0;
fwtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
fwtty_driver->subtype = SERIAL_TYPE_NORMAL;
fwtty_driver->init_termios = tty_std_termios;
fwtty_driver->init_termios.c_cflag |= CLOCAL;
tty_set_operations(fwtty_driver, &fwtty_ops);
err = tty_register_driver(fwtty_driver);
if (err) {
pr_err("register tty driver failed (%d)\n", err);
goto put_tty;
}
if (create_loop_dev) {
fwloop_driver = tty_alloc_driver(MAX_TOTAL_PORTS / num_ports,
TTY_DRIVER_REAL_RAW
| TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(fwloop_driver)) {
err = PTR_ERR(fwloop_driver);
goto unregister_driver;
}
fwloop_driver->driver_name = KBUILD_MODNAME "_loop";
fwloop_driver->name = loop_dev_name;
fwloop_driver->major = 0;
fwloop_driver->minor_start = 0;
fwloop_driver->type = TTY_DRIVER_TYPE_SERIAL;
fwloop_driver->subtype = SERIAL_TYPE_NORMAL;
fwloop_driver->init_termios = tty_std_termios;
fwloop_driver->init_termios.c_cflag |= CLOCAL;
tty_set_operations(fwloop_driver, &fwloop_ops);
err = tty_register_driver(fwloop_driver);
if (err) {
pr_err("register loop driver failed (%d)\n", err);
goto put_loop;
}
}
fwtty_txn_cache = kmem_cache_create("fwtty_txn_cache",
sizeof(struct fwtty_transaction),
0, 0, fwtty_txn_constructor);
if (!fwtty_txn_cache) {
err = -ENOMEM;
goto unregister_loop;
}
/*
* Ideally, this address handler would be registered per local node
* (rather than the same handler for all local nodes). However,
* since the firewire core requires the config rom descriptor *before*
* the local unit device(s) are created, a single management handler
* must suffice for all local serial units.
*/
fwserial_mgmt_addr_handler.length = sizeof(struct fwserial_mgmt_pkt);
fwserial_mgmt_addr_handler.address_callback = fwserial_mgmt_handler;
err = fw_core_add_address_handler(&fwserial_mgmt_addr_handler,
&fwserial_mgmt_addr_region);
if (err) {
pr_err("add management handler failed (%d)\n", err);
goto destroy_cache;
}
fwserial_unit_directory_data.unit_addr_offset =
FW_UNIT_ADDRESS(fwserial_mgmt_addr_handler.offset);
err = fw_core_add_descriptor(&fwserial_unit_directory);
if (err) {
pr_err("add unit descriptor failed (%d)\n", err);
goto remove_handler;
}
err = driver_register(&fwserial_driver.driver);
if (err) {
pr_err("register fwserial driver failed (%d)\n", err);
goto remove_descriptor;
}
return 0;
remove_descriptor:
fw_core_remove_descriptor(&fwserial_unit_directory);
remove_handler:
fw_core_remove_address_handler(&fwserial_mgmt_addr_handler);
destroy_cache:
kmem_cache_destroy(fwtty_txn_cache);
unregister_loop:
if (create_loop_dev)
tty_unregister_driver(fwloop_driver);
put_loop:
if (create_loop_dev)
put_tty_driver(fwloop_driver);
unregister_driver:
tty_unregister_driver(fwtty_driver);
put_tty:
put_tty_driver(fwtty_driver);
remove_debugfs:
debugfs_remove_recursive(fwserial_debugfs);
return err;
}
static void __exit fwserial_exit(void)
{
driver_unregister(&fwserial_driver.driver);
fw_core_remove_descriptor(&fwserial_unit_directory);
fw_core_remove_address_handler(&fwserial_mgmt_addr_handler);
kmem_cache_destroy(fwtty_txn_cache);
if (create_loop_dev) {
tty_unregister_driver(fwloop_driver);
put_tty_driver(fwloop_driver);
}
tty_unregister_driver(fwtty_driver);
put_tty_driver(fwtty_driver);
debugfs_remove_recursive(fwserial_debugfs);
}
module_init(fwserial_init);
module_exit(fwserial_exit);
MODULE_AUTHOR("Peter Hurley (peter@hurleysoftware.com)");
MODULE_DESCRIPTION("FireWire Serial TTY Driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, fwserial_id_table);
MODULE_PARM_DESC(ttys, "Number of ttys to create for each local firewire node");
MODULE_PARM_DESC(auto, "Auto-connect a tty to each firewire node discovered");
MODULE_PARM_DESC(loop, "Create a loopback device, fwloop<n>, with ttys");
| gpl-2.0 |
jumpstarter-io/gdb | gdb/testsuite/gdb.trace/unavailable-dwarf-piece.c | 46 | 1664 | /* This testcase is part of GDB, the GNU debugger.
Copyright 2013-2015 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
struct s
{
unsigned char a;
unsigned char b;
unsigned char c;
};
struct t
{
/* First, a complete byte. */
unsigned char a;
/* Next, 8 single bits. */
unsigned char b : 1;
unsigned char c : 1;
unsigned char d : 1;
unsigned char e : 1;
unsigned char f : 1;
unsigned char g : 1;
unsigned char h : 1;
unsigned char i : 1;
/* Now another byte. */
unsigned char j;
};
void
end (void)
{
/* Nothing. */
}
void
dummy (void)
{
/* Nothing. */
}
int
foo (struct s x, struct s y, struct s z)
{
dummy ();
asm (".global foo_end_lbl\nfoo_end_lbl:");
return 0;
}
int
bar (struct t x, struct t y, struct t z)
{
dummy ();
asm (".global bar_end_lbl\nbar_end_lbl:");
return 0;
}
int
main (void)
{
struct s v = { 0, 1, 2 };
struct t w = { 5, 0, 1, 0, 1, 0, 1, 0, 1, 7 };
int ans;
ans = foo (v, v, v);
end ();
ans = bar (w, w, w);
end ();
return ans;
}
| gpl-2.0 |
AveryOS/binutils | gdb/testsuite/gdb.base/call-sc.c | 46 | 1522 | /* This testcase is part of GDB, the GNU debugger.
Copyright 2004-2015 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* Useful abreviations. */
typedef void t;
typedef char tc;
typedef short ts;
typedef int ti;
typedef long tl;
typedef long long tll;
typedef float tf;
typedef double td;
typedef long double tld;
typedef enum { e = '1' } te;
/* Force the type of each field. */
#ifndef T
typedef t T;
#endif
T foo = '1', L;
T fun()
{
return foo;
}
void Fun(T foo)
{
L = foo;
}
void zed ()
{
L = 'Z';
}
int main()
{
int i;
Fun(foo);
/* An infinite loop that first clears all the variables and then
calls the function. This "hack" is to make re-testing easier -
"advance fun" is guaranteed to have always been preceded by a
global variable clearing zed call. */
zed ();
while (1)
{
L = fun ();
zed ();
}
return 0;
}
| gpl-2.0 |
bju2000/android_kernel_samsung_slteskt | net/ipv6/ping.c | 46 | 5994 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* "Ping" sockets
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Based on ipv4/ping.c code.
*
* Authors: Lorenzo Colitti (IPv6 support)
* Vasiliy Kulikov / Openwall (IPv4 implementation, for Linux 2.6),
* Pavel Kankovsky (IPv4 implementation, for Linux 2.4.32)
*
*/
#include <linux/export.h>
#include <net/addrconf.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/protocol.h>
#include <net/udp.h>
#include <net/transp_v6.h>
#include <net/ping.h>
struct proto pingv6_prot = {
.name = "PINGv6",
.owner = THIS_MODULE,
.init = ping_init_sock,
.close = ping_close,
.connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.sendmsg = ping_v6_sendmsg,
.recvmsg = ping_recvmsg,
.bind = ping_bind,
.backlog_rcv = ping_queue_rcv_skb,
.hash = ping_hash,
.unhash = ping_unhash,
.get_port = ping_get_port,
.obj_size = sizeof(struct raw6_sock),
};
EXPORT_SYMBOL_GPL(pingv6_prot);
static struct inet_protosw pingv6_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_ICMPV6,
.prot = &pingv6_prot,
.ops = &inet6_dgram_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_REUSE,
};
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
{
return -EAFNOSUPPORT;
}
int dummy_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb)
{
return -EAFNOSUPPORT;
}
int dummy_icmpv6_err_convert(u8 type, u8 code, int *err)
{
return -EAFNOSUPPORT;
}
void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload) {}
int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict)
{
return 0;
}
int __init pingv6_init(void)
{
pingv6_ops.ipv6_recv_error = ipv6_recv_error;
pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
return inet6_register_protosw(&pingv6_protosw);
}
/* This never gets called because it's not possible to unload the ipv6 module,
* but just in case.
*/
void pingv6_exit(void)
{
pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
pingv6_ops.ip6_datagram_recv_ctl = dummy_datagram_recv_ctl;
pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
inet6_unregister_protosw(&pingv6_protosw);
}
int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct icmp6hdr user_icmph;
int addr_type;
struct in6_addr *daddr;
int iif = 0;
struct flowi6 fl6;
int err;
int hlimit;
struct dst_entry *dst;
struct rt6_info *rt;
struct pingfakehdr pfh;
pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
err = ping_common_sendmsg(AF_INET6, msg, len, &user_icmph,
sizeof(user_icmph));
if (err)
return err;
if (msg->msg_name) {
struct sockaddr_in6 *u = (struct sockaddr_in6 *) msg->msg_name;
if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
u->sin6_family != AF_INET6) {
return -EINVAL;
}
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != u->sin6_scope_id) {
return -EINVAL;
}
daddr = &(u->sin6_addr);
iif = u->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &np->daddr;
}
if (!iif)
iif = sk->sk_bound_dev_if;
addr_type = ipv6_addr_type(daddr);
if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
return -EINVAL;
if (addr_type & IPV6_ADDR_MAPPED)
return -EINVAL;
/* TODO: use ip6_datagram_send_ctl to get options from cmsg */
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
fl6.flowi6_uid = sock_i_uid(sk);
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, 1);
if (IS_ERR(dst))
return PTR_ERR(dst);
rt = (struct rt6_info *) dst;
np = inet6_sk(sk);
if (!np)
return -EBADF;
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
pfh.icmph.type = user_icmph.icmp6_type;
pfh.icmph.code = user_icmph.icmp6_code;
pfh.icmph.checksum = 0;
pfh.icmph.un.echo.id = inet->inet_sport;
pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence;
pfh.iov = msg->msg_iov;
pfh.wcheck = 0;
pfh.family = AF_INET6;
if (ipv6_addr_is_multicast(&fl6.daddr))
hlimit = np->mcast_hops;
else
hlimit = np->hop_limit;
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
lock_sock(sk);
err = ip6_append_data(sk, ping_getfrag, &pfh, len,
0, hlimit,
np->tclass, NULL, &fl6, rt,
MSG_DONTWAIT, np->dontfrag);
if (err) {
ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
err = icmpv6_push_pending_frames(sk, &fl6,
(struct icmp6hdr *) &pfh.icmph,
len);
}
release_sock(sk);
if (err)
return err;
return len;
}
| gpl-2.0 |
keenet/kernel_ubuntu_aurora2 | drivers/amlogic/input/touchscreen/guitar_update.c | 46 | 28822 | /*
*
* Copyright (C) 2011 Goodix, Inc.
*
* Author: Scott
* Date: 2012.01.05
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <mach/gpio.h>
//#include <plat/gpio-cfg.h>
#include <linux/irq.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <asm/uaccess.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include "gt82x.h"
//#ifdef AUTO_UPDATE_GUITAR
//****************************Éý¼¶Ä£¿é²ÎÊý******************************************
#define SEARCH_FILE_TIMES 100
#define UPDATE_FILE_PATH_2 "/data/goodix/_goodix_update_.bin"
#define UPDATE_FILE_PATH_1 "/sdcard/goodix/_goodix_update_.bin"
#define PACK_SIZE 64 //update file package size
#define BIT_NVRAM_STROE 0
#define BIT_NVRAM_RECALL 1
#define BIT_NVRAM_LOCK 2
#define REG_NVRCS_H 0X12
#define REG_NVRCS_L 0X01
#define READ_FW_MSG_ADDR_H 0x0F
#define READ_FW_MSG_ADDR_L 0x7C
#define UPDATE_FW_MSG_ADDR_H 0x40
#define UPDATE_FW_MSG_ADDR_L 0x50
#define READ_MSK_VER_ADDR_H 0xC0
#define READ_MSK_VER_ADDR_L 0x09
#define FW_HEAD_LENGTH 30
#define FILE_HEAD_LENGTH 100
#define IGNORE_LENGTH 100
#define FW_MSG_LENGTH 7
#define UPDATE_DATA_LENGTH 5000
static int guitar_update_proc(void*);
static u8 get_ic_fw_msg(struct goodix_ts_data *);
static int guitar_update_mode(struct goodix_ts_data *);
extern int goodix_init_panel(struct goodix_ts_data *, u8);
#pragma pack(1)
typedef struct
{
u8 type; //²úÆ·ÀàÐÍ//
u16 version; //FW°æ±¾ºÅ//
u8 msk_ver[4]; //MASK°æ±¾//
u8 st_addr[2]; //ÉÕ¼µÄÆðʼµØÖ·//
u16 lenth; //FW³¤¶È//
u8 chk_sum[3];
u8 force_update[6];//Ç¿ÖÆÉý¼¶±êÖ¾,Ϊ"GOODIX"ÔòÇ¿ÖÆÉý¼¶//
}st_fw_head;
#pragma pack()
typedef struct
{
u8 force_update;
u8 fw_flag;
loff_t gt_loc;
struct file *file;
st_fw_head ic_fw_msg;
mm_segment_t old_fs;
}st_update_msg;
st_update_msg update_msg;
//******************************************************************************
static int i2c_read_bytes(struct i2c_client *client, uint8_t *buf, int len)
{
struct i2c_msg msgs[2];
int ret=-1;
//·¢ËÍдµØÖ·
msgs[0].flags=!I2C_M_RD; //дÏûÏ¢
msgs[0].addr=client->addr;
msgs[0].len=2;
msgs[0].buf=&buf[0];
//½ÓÊÕÊý¾Ý
msgs[1].flags=I2C_M_RD;//¶ÁÏûÏ¢
msgs[1].addr=client->addr;
msgs[1].len=len-2;
msgs[1].buf=&buf[2];
ret=i2c_transfer(client->adapter,msgs, 2);
return ret;
}
/*******************************************************
¹¦ÄÜ£º
Ïò´Ó»úдÊý¾Ý
²ÎÊý£º
client: i2cÉ豸£¬°üº¬É豸µØÖ·
buf[0]~buf[1]£º Ê××Ö½ÚΪдµØÖ·
buf[2]~buf[len]£ºÊý¾Ý»º³åÇø
len£º Êý¾Ý³¤¶È
return£º
Ö´ÐÐÏûÏ¢Êý
*******************************************************/
/*Function as i2c_master_send */
static int i2c_write_bytes(struct i2c_client *client,uint8_t *data,int len)
{
struct i2c_msg msg;
int ret=-1;
//·¢ËÍÉ豸µØÖ·
msg.flags=!I2C_M_RD;//дÏûÏ¢
msg.addr=client->addr;
msg.len=len;
msg.buf=data;
ret=i2c_transfer(client->adapter,&msg, 1);
return ret;
}
/*******************************************************
¹¦ÄÜ£º
·¢Ëͺó׺ÃüÁî
ts: client˽ÓÐÊý¾Ý½á¹¹Ìå
return£º
Ö´Ðнá¹ûÂ룬0±íʾÕý³£Ö´ÐÐ
*******************************************************/
static int i2c_end_cmd(struct goodix_ts_data *ts)
{
int ret;
u8 end_cmd_data[2]={0x80, 0x00};
ret=i2c_write_bytes(ts->client,end_cmd_data,2);
return ret;//*/
}
static u8 is_equal( u8 *src , u8 *dst , int len )
{
int i;
for( i = 0 ; i < len ; i++ )
{
if (src[i] != dst[i])
{
return false;
}
}
return true;
}
static u8 get_ic_msg(struct goodix_ts_data *ts, u16 addr, u8* msg, s32 len)
{
s32 i = 0;
msg[0] = addr >> 8 & 0xff;
msg[1] = addr & 0xff;
for (i = 0; i < 5; i++)
{
if (i2c_read_bytes(ts->client, msg, ADDR_LENGTH + len) > 0)
{
break;
}
}
i2c_end_cmd(ts);
if (i >= 5)
{
DEBUG_UPDATE("Read data from 0x%02x%02x failed!\n", msg[0], msg[1]);
return fail;
}
return success;
}
static u8 clear_mix_flag(struct goodix_ts_data *ts)
{
s32 i = 0;
u8 buf[3];
buf[0] = 0x14;
buf[1] = 0x00;
buf[2] = 0x80;
for (i = 0; i < 5; i++)
{
if (i2c_write_bytes(ts->client, buf, 3) > 0)
{
break;
}
}
i2c_end_cmd(ts);
if (i >= 5)
{
DEBUG_UPDATE("Clear mix flag failed!\n");
return fail;
}
return success;
}
static u8 get_ic_fw_msg(struct goodix_ts_data *ts)
{
s32 ret = 0;
s32 i = 0;
u8 buf[32];
if (fail == clear_mix_flag(ts))
{
return fail;
}
//Get the mask version in rom of IC
if (fail == get_ic_msg(ts, READ_MSK_VER_ADDR_H << 8 | READ_MSK_VER_ADDR_L, buf, 4))
{
DEBUG_UPDATE("Read mask version failed!\n");
return fail;
}
memcpy(update_msg.ic_fw_msg.msk_ver, &buf[ADDR_LENGTH], 4);
DEBUG_UPDATE("IC The mask version in rom is %c%c%c%c.\n",
update_msg.ic_fw_msg.msk_ver[0],update_msg.ic_fw_msg.msk_ver[1],
update_msg.ic_fw_msg.msk_ver[2],update_msg.ic_fw_msg.msk_ver[3]);
#if 1
//Get the firmware msg in IC, include firmware version and checksum flag
for (i = 0; i < 2; i++)
{
if (fail == get_ic_msg(ts, READ_FW_MSG_ADDR_H<< 8 | READ_FW_MSG_ADDR_L, buf, 4))
{
DEBUG_UPDATE("Get firmware msg in IC error.\n");
return fail;
}
update_msg.force_update = buf[ADDR_LENGTH];
if (i == 0 && update_msg.force_update == 0xAA)
{
DEBUG_UPDATE("The check sum in ic is error.\n");
DEBUG_UPDATE("IC will be reset.\n");
DEBUG_UPDATE("If the check sum is still error,\n ");
DEBUG_UPDATE("The IC will be updated by force.\n");
guitar_reset(ts, 10);
continue;
//msleep(100);
}
break;
}
//ic_fw_msg.type = buf[ADDR_LENGTH + 1];
update_msg.ic_fw_msg.version = buf[ADDR_LENGTH + 2] << 8 | buf[ADDR_LENGTH + 3];
DEBUG_UPDATE("IC VID:0x%x\n", (int)update_msg.ic_fw_msg.version);
DEBUG_UPDATE("IC force update:%x\n", update_msg.force_update);
#endif
//Cuts the frequency
buf[0] = 0x15;
buf[1] = 0x22;
buf[2] = 0x18;
ret = i2c_write_bytes(ts->client, buf, 3);
if (ret <= 0)
{
return fail;
}
i2c_end_cmd(ts);
//Get the pid at 0x4011 in nvram
if (fail == get_ic_msg(ts, 0x4011, buf, 1))
{
DEBUG_UPDATE("Read pid failed!\n");
return fail;
}
update_msg.ic_fw_msg.type = buf[ADDR_LENGTH];
DEBUG_UPDATE("IC PID:%x\n", update_msg.ic_fw_msg.type);
// guitar_reset(ts, 10);
return success;
}
/*
* Steps of reset guitar
*1. INT½ÅÊä³öµÍ£¬ÑÓʱ5ms
*2. RESET½ÅÀµÍ100ms£¬×ªÊäÈëÐü¸¡Ì¬
*3. I2CѰַGUITAR
*4. ÑÓʱ100ms¶ÁÈ¡0xff(3¡¢4ÂÖѯ80´Î£¬Ö±ÖÁ³É¹¦)
*5. OxffµÈÓÚ0x55Ôò·µ»Ø³É¹¦£¬·ñÔòʧ°Ü
*/
static int guitar_update_mode( struct goodix_ts_data *ts )
{
int ret = 1;
u8 retry;
unsigned char inbuf[3] = {0,0xff,0};
// step 1
guitar_enter_update_mode(ts);
msleep(5);
//step 2
guitar_reset(ts, 100);
for(retry=0;retry < 80; retry++)
{
//step 3
ret =i2c_write_bytes(ts->client, inbuf, 0); //Test I2C connection.
if (ret > 0)
{
DEBUG_UPDATE("<Set update mode>I2C is OK!\n");
//step 4
msleep(100);
ret =i2c_read_bytes(ts->client, inbuf, 3);
if (ret > 0)
{
DEBUG_UPDATE("The value of 0x00ff is 0x%02x\n", inbuf[2]);
//step 5
if(inbuf[2] == 0x55)
{
return success;
}
}
}
msleep(10);
}
DEBUG_UPDATE(KERN_INFO"Detect address %0X\n", ts->client->addr);
return fail;
}
u8 load_update_file(struct goodix_ts_data *ts, st_fw_head* fw_head, u8* data, u8* path)
{
u8 mask_num = 0;
int ret = 0;
int i = 0;
u8 buf[FW_HEAD_LENGTH];
if (path)
{
update_msg.file = filp_open(path, O_RDWR, 0666);
if (IS_ERR(update_msg.file))
{
DEBUG_UPDATE("Open update file(%s) error!\n", path);
return fail;
}
}
else
{
//Begin to search update file
for (i = 0; i < SEARCH_FILE_TIMES; i++)
{
update_msg.file = filp_open(UPDATE_FILE_PATH_1, O_RDWR, 0666);
if (IS_ERR(update_msg.file))
{
update_msg.file = filp_open(UPDATE_FILE_PATH_2, O_RDWR, 0666);//O_RDWR
if (IS_ERR(update_msg.file))
{
DEBUG_UPDATE("%3d:Searching file...\n", i);
msleep(3000);
continue;
}
else
{
break;
}
}
else
{
break;
}
}
if (i >= 100)
{
DEBUG_UPDATE("Can't find update file.\n");
return fail;
}
DEBUG_UPDATE("Find the update file.\n");
}
update_msg.old_fs = get_fs();
set_fs(KERNEL_DS);
update_msg.file->f_pos = IGNORE_LENGTH;
//Make sure the file is the right file.(By compare the "Guitar" flag)
ret = update_msg.file->f_op->read(update_msg.file, (char*)&buf, 6, &update_msg.file->f_pos);
if (ret < 0)
{
DEBUG_UPDATE("Read \"Guitar\" flag error.\n");
goto load_failed;
}
if (false == is_equal(buf, "Guitar", 6))
{
DEBUG_UPDATE("The flag is %s.Not equal!\n"
"The update file is incorrect!\n", buf);
goto load_failed;
}
DEBUG_UPDATE("The file flag is :%s.\n", buf);
//Get the total number of masks
update_msg.file->f_pos++; //ignore one byte.
ret = update_msg.file->f_op->read(update_msg.file, &mask_num, 1, &update_msg.file->f_pos);
if (ret < 0)
{
DEBUG_UPDATE("Didn't get the mask number from the file.\n");
goto load_failed;
}
DEBUG_UPDATE("FILE The total number of masks is:%d.\n", mask_num);
update_msg.file->f_pos = FILE_HEAD_LENGTH + IGNORE_LENGTH;
//Get the correct nvram data
//The correct conditions:
//1. the product id is the same
//2. the mask id is the same
//3. the nvram version in update file is greater than the nvram version in ic
//or force update flag is marked or the check sum in ic is wrong
update_msg.gt_loc = -1;
for ( i = 0; i < mask_num; i++)
{
ret = update_msg.file->f_op->read(update_msg.file, (char*)buf, FW_HEAD_LENGTH, &update_msg.file->f_pos);
if (ret < 0)
{
DEBUG_UPDATE("Read update file head error.\n");
goto load_failed;
}
memcpy(fw_head, buf, sizeof(st_fw_head));
fw_head->version = buf[1] << 8 | buf[2];
fw_head->lenth = buf[9] << 8 | buf[10];
DEBUG_UPDATE("No.%d firmware\n", i);
DEBUG_UPDATE("FILE PID:%x\n", fw_head->type);
DEBUG_UPDATE("FILE VID:0x%x\n", fw_head->version);
DEBUG_UPDATE("FILE mask version:%c%c%c%c.\n", fw_head->msk_ver[0],
fw_head->msk_ver[1],fw_head->msk_ver[2],fw_head->msk_ver[3]);
DEBUG_UPDATE("FILE start address:0x%02x%02x.\n", fw_head->st_addr[0], fw_head->st_addr[1]);
DEBUG_UPDATE("FILE length:%d\n", (int)fw_head->lenth);
DEBUG_UPDATE("FILE force update flag:%s\n", fw_head->force_update);
DEBUG_UPDATE("FILE chksum:0x%02x%02x%02x\n", fw_head->chk_sum[0],
fw_head->chk_sum[1], fw_head->chk_sum[2]);
//First two conditions
if (is_equal(fw_head->msk_ver, update_msg.ic_fw_msg.msk_ver, sizeof(update_msg.ic_fw_msg.msk_ver))
&& update_msg.ic_fw_msg.type == fw_head->type)
{
DEBUG_UPDATE("Get the same mask version and same pid.\n");
//The third condition
if (fw_head->version > update_msg.ic_fw_msg.version
|| is_equal(fw_head->force_update, "GOODIX", 6)
|| update_msg.force_update == 0xAA)
{
// DEBUG_UPDATE("FILE read position:%d\n", file->f_pos);
// file->f_pos = FW_HEAD_LENGTH + FILE_HEAD_LENGTH + IGNORE_LENGTH;
if (is_equal(fw_head->force_update, "GOODIX", 6))
{
update_msg.gt_loc = update_msg.file->f_pos - FW_HEAD_LENGTH + sizeof(st_fw_head) - sizeof(fw_head->force_update);
}
ret = update_msg.file->f_op->read(update_msg.file, (char*)data, fw_head->lenth, &update_msg.file->f_pos);
if (ret <= 0)
{
DEBUG_UPDATE("Read firmware data in file error.\n");
goto load_failed;
}
// DEBUG_ARRAY(data, 512);
// set_fs(ts->old_fs);
// filp_close(ts->file, NULL);
DEBUG_UPDATE("Load data from file successfully.\n");
return success;
}
DEBUG_UPDATE("Don't meet the third condition.\n");
goto load_failed;
}
update_msg.file->f_pos += UPDATE_DATA_LENGTH;
}
load_failed:
set_fs(update_msg.old_fs);
filp_close(update_msg.file, NULL);
return fail;
}
static u8 guitar_nvram_store( struct goodix_ts_data *ts )
{
int ret;
int i;
u8 inbuf[3] = {REG_NVRCS_H,REG_NVRCS_L, 0x18};
ret = i2c_read_bytes(ts->client, inbuf, 3);
if ( ret < 0 )
{
return fail;
}
if ((inbuf[2] & BIT_NVRAM_LOCK ) == BIT_NVRAM_LOCK)
{
return fail;
}
inbuf[2] = 0x18;
inbuf[2] |= (1<<BIT_NVRAM_STROE); //store command
for ( i = 0 ; i < 300 ; i++ )
{
ret = i2c_write_bytes( ts->client, inbuf, 3 );
if ( ret > 0 )
return success;
}
return fail;
}
static u8 guitar_nvram_recall( struct goodix_ts_data *ts )
{
int ret;
u8 inbuf[3] = {REG_NVRCS_H,REG_NVRCS_L,0};
ret = i2c_read_bytes( ts->client, inbuf, 3 );
if ( ret < 0 )
{
return fail;
}
if ( ( inbuf[2]&BIT_NVRAM_LOCK) == BIT_NVRAM_LOCK )
{
return fail;
}
inbuf[2] = ( 1 << BIT_NVRAM_RECALL ); //recall command
ret = i2c_write_bytes( ts->client , inbuf, 3);
if (ret <= 0)
{
return fail;
}
return success;
}
static u8 guitar_update_nvram(struct goodix_ts_data *ts, st_fw_head* fw_head, u8 *nvram)
{
int length = 0;
int ret = 0;
int write_bytes = 0;
int retry = 0;
int i = 0;
int comp = 0;
u16 st_addr = 0;
u8 w_buf[PACK_SIZE + ADDR_LENGTH];
u8 r_buf[PACK_SIZE + ADDR_LENGTH];
if (fw_head->lenth > PACK_SIZE)
{
write_bytes = PACK_SIZE;
}
else
{
write_bytes = fw_head->lenth;
}
clear_mix_flag(ts);
st_addr = (fw_head->st_addr[0] << 8) | (fw_head->st_addr[1]&0xff);
memcpy(&w_buf[2], &nvram[length], write_bytes);
DEBUG_UPDATE("Total length:%d\n", (int)fw_head->lenth);
while(length < fw_head->lenth)
{
w_buf[0] = st_addr >> 8;
w_buf[1] = st_addr & 0xff;
DEBUG_UPDATE("Write address:0x%02x%02x\tlength:%d\n", w_buf[0], w_buf[1], write_bytes);
ret = i2c_write_bytes(ts->client, w_buf, ADDR_LENGTH + write_bytes);
if (ret <= 0)
{
if (retry++ > 10)
{
DEBUG_UPDATE("Write the same address 10 times.Give up!\n");
return fail;
}
DEBUG_UPDATE("Write error![guitar_update_nvram]\n");
continue;
}
else
{
// DEBUG_UPDATE("w_buf:\n");
// DEBUG_ARRAY(w_buf, ADDR_LENGTH + write_bytes);
/* r_buf[0] = 0x14;
r_buf[1] = 0x00;
r_buf[2] = 0x80;
i2c_write_bytes(ts->client, r_buf, 3);
r_buf[0] = 0x14;
r_buf[1] = 0x00;
i2c_read_bytes(ts->client, r_buf, 3);
DEBUG_UPDATE("I2CCS:0x%x\n", r_buf[2]);//*/
r_buf[0] = w_buf[0];
r_buf[1] = w_buf[1];
for (i = 0; i < 10; i++)
{
ret = i2c_read_bytes(ts->client, r_buf, ADDR_LENGTH + write_bytes);
if (ret <= 0)
{
continue;
}
break;
}
if (i >= 10)
{
DEBUG_UPDATE("Read error! Can't check the nvram data.\n");
return fail;
}
// DEBUG_UPDATE("r_buf:\n");
// DEBUG_ARRAY(r_buf, ADDR_LENGTH + write_bytes);
#if 0
if (fail == guitar_nvram_store(ts))
{
DEBUG_UPDATE("Store nvram failed.\n");
//continue;
}
return fail;
#endif
if (false == is_equal(r_buf, w_buf, ADDR_LENGTH + write_bytes))
{
if (comp ++ > 10)
{
DEBUG_UPDATE("Compare error!\n");
return fail;
}
DEBUG_UPDATE("Updating nvram: Not equal!\n");
DEBUG_UPDATE("r_buf:\n");
DEBUG_ARRAY(r_buf, ADDR_LENGTH + write_bytes);
DEBUG_UPDATE("w_buf:\n");
// DEBUG_ARRAY(w_buf, ADDR_LENGTH + write_bytes);
continue;
//return fail;
}
}
comp = 0;
retry = 0;
length += PACK_SIZE;
st_addr += PACK_SIZE;
if ((length + PACK_SIZE) > fw_head->lenth)
{
write_bytes = fw_head->lenth - length;
}
memcpy(&w_buf[2], &nvram[length], write_bytes);
}
return success;
}
static u8 guitar_update_firmware(struct goodix_ts_data *ts, st_fw_head* fw_head, u8 *nvram)
{
int retry;
int ret;
u32 status = 0;
u8 buf[32];
//Cuts the frequency
buf[0] = 0x15;
buf[1] = 0x22;
buf[2] = 0x18;
ret = i2c_write_bytes(ts->client, buf, 3);
if (ret <= 0)
{
return fail;
}
get_ic_msg(ts, 0x1522, buf, 1);
DEBUG_UPDATE("IC OSC_CAL:0x%02x.\n", buf[2]);
for (retry = 0; retry < 10; retry++)
{
//Write the 1st part (pid and vid)
/* if (!(status & 0x01))
{
buf[0] = UPDATE_FW_MSG_ADDR_H;
buf[1] = UPDATE_FW_MSG_ADDR_L;
buf[2] = fw_head->type;
buf[3] = fw_head->version >> 8;
buf[4] = fw_head->version & 0xff;
ret = i2c_write_bytes(ts->client, buf, 5);
if (ret <= 0)
{
continue;
}
else
{
DEBUG_UPDATE("Update pid and vid successfully!\n");
status |= 0x01;
msleep(1);
}
}
*/
//Write the 2nd part (nvram)
if (!(status & 0x02))
{
if (fail == guitar_update_nvram(ts, fw_head, nvram))
{
continue;
}
else
{
DEBUG_UPDATE("Update nvram successfully!\n");
status |= 0x02;
msleep(1);
}
}
//Write the 3rd part (check sum)
if (1)
{
buf[0] = 0x4f;
buf[1] = 0xf3;
memcpy(&buf[2], fw_head->chk_sum, sizeof(fw_head->chk_sum));
ret = i2c_write_bytes(ts->client, buf, 5);
if (ret <= 0)
{
continue;
}
else
{
DEBUG_UPDATE("Update check sum successfully!\n");
break;
}
}
}
if (retry >= 10)
{
return fail;
}
else
{
for (retry = 0; retry < 10; retry++)
{
buf[0] = 0x00;
buf[1] = 0xff;
buf[2] = 0x44;
ret = i2c_write_bytes(ts->client, buf, 3);
if (ret > 0)
{
break;
}
}
if (retry >= 10)
{
DEBUG_UPDATE("Write address at 0x00ff error!\n");
return fail;
}
msleep(10);
}
for (retry = 0; retry < 30; retry++)
{
msleep(1);
if (fail == get_ic_msg(ts, 0x00ff, buf, 1))
{
DEBUG_UPDATE("Read address at 0x00ff error!\t retry:%d\n", retry);
continue;
}
if (0xcc == buf[ADDR_LENGTH])
{
return success;
}
else
{
DEBUG_UPDATE("The value of 0x00ff: 0x%02x!\t retry:%d\n", buf[ADDR_LENGTH], retry);
continue;
}
}
DEBUG_UPDATE("The value of 0x00ff error.\n");
return fail;
}
static int guitar_update_proc(void *v_ts)
{
s32 ret;
u32 retry = 100;
u32 i = 0;
struct goodix_ts_data* ts = NULL;
u8* data = NULL;
u8* ic_nvram = NULL;
st_fw_head fw_head;
u8 buf[32];
ts = (struct goodix_ts_data*)v_ts;
data = kzalloc(UPDATE_DATA_LENGTH, GFP_KERNEL);
if (NULL == data)
{
DEBUG_UPDATE("data failed apply for memory.\n");
return fail;
}
ic_nvram = kzalloc(UPDATE_DATA_LENGTH, GFP_KERNEL);
if (NULL == ic_nvram)
{
DEBUG_UPDATE("ic_nvram failed apply for memory.\n");
goto app_mem_failed;
}
DEBUG_UPDATE("Apply for memory successfully.memory size: %d.\n", UPDATE_DATA_LENGTH);
msleep(1000);
DEBUG_UPDATE("Updating...\n");
if (fail == load_update_file(ts, &fw_head, &data[2], NULL))
{
DEBUG_UPDATE("Load file data failed!\n");
goto load_failed;
}
DEBUG_UPDATE("Load file data successfully!\n");
if(!ts->irq_is_disable)
{
disable_irq(ts->client->irq);
}
ts->irq_is_disable = 2;
for (i = 0; i < 5; i++)
{
if (fail == guitar_update_mode(ts))
{
DEBUG_UPDATE("Next try![Enter update mode]\n");
continue;
}
else
{
DEBUG_UPDATE("Set update mode successfully.\n");
break;
}
}
if (i >= 5)
{
DEBUG_UPDATE("Set update mode failed.\n");
return fail;
}
retry = 0;
while(retry++ < 5)
{
if (fail == guitar_update_firmware(ts, &fw_head, &data[2]))
{
DEBUG_UPDATE("Update firmware failed.\n");
continue;
}
DEBUG_UPDATE("Update firmware successfully.\n");
//while(1) // simulation store operation failed
if (fail == guitar_nvram_store(ts))
{
DEBUG_UPDATE("Store nvram failed.\n");
continue;
}
msleep(100);
if (fail == get_ic_msg(ts, 0x1201, buf, 1))
{
DEBUG_UPDATE("Read NVRCS failed.(Store)\n");
continue;
}
if (buf[ADDR_LENGTH] & 0x01)
{
DEBUG_UPDATE("Check NVRCS(0x%02x) failed.(Store)\n", buf[ADDR_LENGTH]);
continue;
}
DEBUG_UPDATE("Store nvram successfully.\n");
if (fail == guitar_nvram_recall(ts))
{
DEBUG_UPDATE("Recall nvram failed.\n");
continue;
}
msleep(5);
if (fail == get_ic_msg(ts, 0x1201, buf, 1))
{
DEBUG_UPDATE("Read NVRCS failed.(Recall)\n");
continue;
}
if (buf[ADDR_LENGTH] & 0x02)
{
DEBUG_UPDATE("Check NVRCS(0x%02x) failed.(Recall)\n", buf[ADDR_LENGTH]);
continue;
}
DEBUG_UPDATE("Recall nvram successfully.\n");
ic_nvram[0] = fw_head.st_addr[0];
ic_nvram[1] = fw_head.st_addr[1];
for ( i = 0; i < 10; i++)
{
ret = i2c_read_bytes(ts->client, ic_nvram, ADDR_LENGTH + fw_head.lenth);
if (ret <= 0)
{
continue;
}
break;
}
if (i >= 10)
{
DEBUG_UPDATE("Read nvram failed!\n");
continue;
}
DEBUG_UPDATE("Read nvram successfully!\n");
if (false == is_equal(&data[2], &ic_nvram[2], fw_head.lenth))
{
DEBUG_UPDATE("Nvram not equal!\n");
continue;
}
DEBUG_UPDATE("Check nvram by byte successfully!\n");
if (update_msg.gt_loc > 0)
{
DEBUG_UPDATE("Location:%d, Ret:%d.\n", (s32)update_msg.gt_loc, (s32)ret);
memset(buf, 0, sizeof(buf));
ret = update_msg.file->f_op->write(update_msg.file, buf, 6, &update_msg.gt_loc);
if (ret < 0)
{
DEBUG_UPDATE("Didn't clear the focre update flag in file.\n");
}
else
{
DEBUG_UPDATE("Clear the focre update flag in file.Location:%d, Ret:%d.\n", (s32)update_msg.gt_loc, (s32)ret);
}
}
DEBUG_UPDATE("Update successfully!\n");
break;
}
set_fs(update_msg.old_fs);
filp_close(update_msg.file, NULL);
guitar_leave_update_mode(ts);
DEBUG_UPDATE("Leave update mode!\n");
//Reset guitar
DEBUG_UPDATE("Reset IC and send config!\n");
guitar_reset(ts, 10);
for (i = 0; i < 3; i++)
{
if (fail == goodix_init_panel(ts, 1))
{
msleep(10);
continue;
}
break;
}
if (i >= 3)
{
DEBUG_UPDATE("Send config data failed.\n");
}
msleep(10);
ts->irq_is_disable = 0;
enable_irq(ts->client->irq);
load_failed:
kfree(ic_nvram);
app_mem_failed:
kfree(data);
if (retry < 5)
{
return success;
}
DEBUG_UPDATE("Update failed!\n");
return fail;
}
s32 init_update_proc(struct goodix_ts_data *ts)
{
u8 flag = 0;
struct task_struct *thread = NULL;
s32 retry = 0;
DEBUG_MSG("Ready to run update thread.\n");
update_msg.fw_flag = get_ic_fw_msg(ts);
if (fail == update_msg.fw_flag)
{
DEBUG_UPDATE("Try get ic msg in update mode.\n");
for (retry = 0; retry < 5; retry++)
{
if (success == guitar_update_mode(ts))
{
break;
}
}
if (retry >= 5)
{
update_msg.fw_flag = fail;
}
else
{
DEBUG_UPDATE("Get ic msg in update mode.\n");
update_msg.fw_flag = get_ic_fw_msg(ts);
update_msg.ic_fw_msg.version = 0xfff0;
if (update_msg.force_update == 0xAA)
{
flag = 0xff;
}
}
guitar_leave_update_mode(ts);
}
else
{
guitar_reset(ts, 10);
}
if (success == update_msg.fw_flag)
{
update_msg.gt_loc = -1;
thread = kthread_run(guitar_update_proc, (void*)ts, "guitar_update");
if (IS_ERR(thread))
{
dev_err(&ts->client->dev, " failed to create update thread\n");
}
if (0xff == flag)
{
return 0xff;
}
}
return success;
}
//#endif //endif AUTO_UPDATE_GUITAR
//******************************End of firmware update surpport*******************************
| gpl-2.0 |
aguirrem/at91-qemu | hw/i8254.c | 46 | 13649 | /*
* QEMU 8253/8254 interval timer emulation
*
* Copyright (c) 2003-2004 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "hw.h"
#include "pc.h"
#include "isa.h"
#include "qemu-timer.h"
//#define DEBUG_PIT
#define RW_STATE_LSB 1
#define RW_STATE_MSB 2
#define RW_STATE_WORD0 3
#define RW_STATE_WORD1 4
typedef struct PITChannelState {
int count; /* can be 65536 */
uint16_t latched_count;
uint8_t count_latched;
uint8_t status_latched;
uint8_t status;
uint8_t read_state;
uint8_t write_state;
uint8_t write_latch;
uint8_t rw_mode;
uint8_t mode;
uint8_t bcd; /* not supported */
uint8_t gate; /* timer start */
int64_t count_load_time;
/* irq handling */
int64_t next_transition_time;
QEMUTimer *irq_timer;
qemu_irq irq;
} PITChannelState;
struct PITState {
PITChannelState channels[3];
};
static PITState pit_state;
static void pit_irq_timer_update(PITChannelState *s, int64_t current_time);
static int pit_get_count(PITChannelState *s)
{
uint64_t d;
int counter;
d = muldiv64(qemu_get_clock(vm_clock) - s->count_load_time, PIT_FREQ, ticks_per_sec);
switch(s->mode) {
case 0:
case 1:
case 4:
case 5:
counter = (s->count - d) & 0xffff;
break;
case 3:
/* XXX: may be incorrect for odd counts */
counter = s->count - ((2 * d) % s->count);
break;
default:
counter = s->count - (d % s->count);
break;
}
return counter;
}
/* get pit output bit */
static int pit_get_out1(PITChannelState *s, int64_t current_time)
{
uint64_t d;
int out;
d = muldiv64(current_time - s->count_load_time, PIT_FREQ, ticks_per_sec);
switch(s->mode) {
default:
case 0:
out = (d >= s->count);
break;
case 1:
out = (d < s->count);
break;
case 2:
if ((d % s->count) == 0 && d != 0)
out = 1;
else
out = 0;
break;
case 3:
out = (d % s->count) < ((s->count + 1) >> 1);
break;
case 4:
case 5:
out = (d == s->count);
break;
}
return out;
}
int pit_get_out(PITState *pit, int channel, int64_t current_time)
{
PITChannelState *s = &pit->channels[channel];
return pit_get_out1(s, current_time);
}
/* return -1 if no transition will occur. */
static int64_t pit_get_next_transition_time(PITChannelState *s,
int64_t current_time)
{
uint64_t d, next_time, base;
int period2;
d = muldiv64(current_time - s->count_load_time, PIT_FREQ, ticks_per_sec);
switch(s->mode) {
default:
case 0:
case 1:
if (d < s->count)
next_time = s->count;
else
return -1;
break;
case 2:
base = (d / s->count) * s->count;
if ((d - base) == 0 && d != 0)
next_time = base + s->count;
else
next_time = base + s->count + 1;
break;
case 3:
base = (d / s->count) * s->count;
period2 = ((s->count + 1) >> 1);
if ((d - base) < period2)
next_time = base + period2;
else
next_time = base + s->count;
break;
case 4:
case 5:
if (d < s->count)
next_time = s->count;
else if (d == s->count)
next_time = s->count + 1;
else
return -1;
break;
}
/* convert to timer units */
next_time = s->count_load_time + muldiv64(next_time, ticks_per_sec, PIT_FREQ);
/* fix potential rounding problems */
/* XXX: better solution: use a clock at PIT_FREQ Hz */
if (next_time <= current_time)
next_time = current_time + 1;
return next_time;
}
/* val must be 0 or 1 */
void pit_set_gate(PITState *pit, int channel, int val)
{
PITChannelState *s = &pit->channels[channel];
switch(s->mode) {
default:
case 0:
case 4:
/* XXX: just disable/enable counting */
break;
case 1:
case 5:
if (s->gate < val) {
/* restart counting on rising edge */
s->count_load_time = qemu_get_clock(vm_clock);
pit_irq_timer_update(s, s->count_load_time);
}
break;
case 2:
case 3:
if (s->gate < val) {
/* restart counting on rising edge */
s->count_load_time = qemu_get_clock(vm_clock);
pit_irq_timer_update(s, s->count_load_time);
}
/* XXX: disable/enable counting */
break;
}
s->gate = val;
}
int pit_get_gate(PITState *pit, int channel)
{
PITChannelState *s = &pit->channels[channel];
return s->gate;
}
int pit_get_initial_count(PITState *pit, int channel)
{
PITChannelState *s = &pit->channels[channel];
return s->count;
}
int pit_get_mode(PITState *pit, int channel)
{
PITChannelState *s = &pit->channels[channel];
return s->mode;
}
static inline void pit_load_count(PITChannelState *s, int val)
{
if (val == 0)
val = 0x10000;
s->count_load_time = qemu_get_clock(vm_clock);
s->count = val;
pit_irq_timer_update(s, s->count_load_time);
}
/* if already latched, do not latch again */
static void pit_latch_count(PITChannelState *s)
{
if (!s->count_latched) {
s->latched_count = pit_get_count(s);
s->count_latched = s->rw_mode;
}
}
static void pit_ioport_write(void *opaque, uint32_t addr, uint32_t val)
{
PITState *pit = opaque;
int channel, access;
PITChannelState *s;
addr &= 3;
if (addr == 3) {
channel = val >> 6;
if (channel == 3) {
/* read back command */
for(channel = 0; channel < 3; channel++) {
s = &pit->channels[channel];
if (val & (2 << channel)) {
if (!(val & 0x20)) {
pit_latch_count(s);
}
if (!(val & 0x10) && !s->status_latched) {
/* status latch */
/* XXX: add BCD and null count */
s->status = (pit_get_out1(s, qemu_get_clock(vm_clock)) << 7) |
(s->rw_mode << 4) |
(s->mode << 1) |
s->bcd;
s->status_latched = 1;
}
}
}
} else {
s = &pit->channels[channel];
access = (val >> 4) & 3;
if (access == 0) {
pit_latch_count(s);
} else {
s->rw_mode = access;
s->read_state = access;
s->write_state = access;
s->mode = (val >> 1) & 7;
s->bcd = val & 1;
/* XXX: update irq timer ? */
}
}
} else {
s = &pit->channels[addr];
switch(s->write_state) {
default:
case RW_STATE_LSB:
pit_load_count(s, val);
break;
case RW_STATE_MSB:
pit_load_count(s, val << 8);
break;
case RW_STATE_WORD0:
s->write_latch = val;
s->write_state = RW_STATE_WORD1;
break;
case RW_STATE_WORD1:
pit_load_count(s, s->write_latch | (val << 8));
s->write_state = RW_STATE_WORD0;
break;
}
}
}
static uint32_t pit_ioport_read(void *opaque, uint32_t addr)
{
PITState *pit = opaque;
int ret, count;
PITChannelState *s;
addr &= 3;
s = &pit->channels[addr];
if (s->status_latched) {
s->status_latched = 0;
ret = s->status;
} else if (s->count_latched) {
switch(s->count_latched) {
default:
case RW_STATE_LSB:
ret = s->latched_count & 0xff;
s->count_latched = 0;
break;
case RW_STATE_MSB:
ret = s->latched_count >> 8;
s->count_latched = 0;
break;
case RW_STATE_WORD0:
ret = s->latched_count & 0xff;
s->count_latched = RW_STATE_MSB;
break;
}
} else {
switch(s->read_state) {
default:
case RW_STATE_LSB:
count = pit_get_count(s);
ret = count & 0xff;
break;
case RW_STATE_MSB:
count = pit_get_count(s);
ret = (count >> 8) & 0xff;
break;
case RW_STATE_WORD0:
count = pit_get_count(s);
ret = count & 0xff;
s->read_state = RW_STATE_WORD1;
break;
case RW_STATE_WORD1:
count = pit_get_count(s);
ret = (count >> 8) & 0xff;
s->read_state = RW_STATE_WORD0;
break;
}
}
return ret;
}
static void pit_irq_timer_update(PITChannelState *s, int64_t current_time)
{
int64_t expire_time;
int irq_level;
if (!s->irq_timer)
return;
expire_time = pit_get_next_transition_time(s, current_time);
irq_level = pit_get_out1(s, current_time);
qemu_set_irq(s->irq, irq_level);
#ifdef DEBUG_PIT
printf("irq_level=%d next_delay=%f\n",
irq_level,
(double)(expire_time - current_time) / ticks_per_sec);
#endif
s->next_transition_time = expire_time;
if (expire_time != -1)
qemu_mod_timer(s->irq_timer, expire_time);
else
qemu_del_timer(s->irq_timer);
}
static void pit_irq_timer(void *opaque)
{
PITChannelState *s = opaque;
pit_irq_timer_update(s, s->next_transition_time);
}
static void pit_save(QEMUFile *f, void *opaque)
{
PITState *pit = opaque;
PITChannelState *s;
int i;
for(i = 0; i < 3; i++) {
s = &pit->channels[i];
qemu_put_be32(f, s->count);
qemu_put_be16s(f, &s->latched_count);
qemu_put_8s(f, &s->count_latched);
qemu_put_8s(f, &s->status_latched);
qemu_put_8s(f, &s->status);
qemu_put_8s(f, &s->read_state);
qemu_put_8s(f, &s->write_state);
qemu_put_8s(f, &s->write_latch);
qemu_put_8s(f, &s->rw_mode);
qemu_put_8s(f, &s->mode);
qemu_put_8s(f, &s->bcd);
qemu_put_8s(f, &s->gate);
qemu_put_be64(f, s->count_load_time);
if (s->irq_timer) {
qemu_put_be64(f, s->next_transition_time);
qemu_put_timer(f, s->irq_timer);
}
}
}
static int pit_load(QEMUFile *f, void *opaque, int version_id)
{
PITState *pit = opaque;
PITChannelState *s;
int i;
if (version_id != 1)
return -EINVAL;
for(i = 0; i < 3; i++) {
s = &pit->channels[i];
s->count=qemu_get_be32(f);
qemu_get_be16s(f, &s->latched_count);
qemu_get_8s(f, &s->count_latched);
qemu_get_8s(f, &s->status_latched);
qemu_get_8s(f, &s->status);
qemu_get_8s(f, &s->read_state);
qemu_get_8s(f, &s->write_state);
qemu_get_8s(f, &s->write_latch);
qemu_get_8s(f, &s->rw_mode);
qemu_get_8s(f, &s->mode);
qemu_get_8s(f, &s->bcd);
qemu_get_8s(f, &s->gate);
s->count_load_time=qemu_get_be64(f);
if (s->irq_timer) {
s->next_transition_time=qemu_get_be64(f);
qemu_get_timer(f, s->irq_timer);
}
}
return 0;
}
static void pit_reset(void *opaque)
{
PITState *pit = opaque;
PITChannelState *s;
int i;
for(i = 0;i < 3; i++) {
s = &pit->channels[i];
s->mode = 3;
s->gate = (i != 2);
pit_load_count(s, 0);
}
}
/* When HPET is operating in legacy mode, i8254 timer0 is disabled */
void hpet_pit_disable(void) {
PITChannelState *s;
s = &pit_state.channels[0];
if (s->irq_timer)
qemu_del_timer(s->irq_timer);
}
/* When HPET is reset or leaving legacy mode, it must reenable i8254
* timer 0
*/
void hpet_pit_enable(void)
{
PITState *pit = &pit_state;
PITChannelState *s;
s = &pit->channels[0];
s->mode = 3;
s->gate = 1;
pit_load_count(s, 0);
}
PITState *pit_init(int base, qemu_irq irq)
{
PITState *pit = &pit_state;
PITChannelState *s;
s = &pit->channels[0];
/* the timer 0 is connected to an IRQ */
s->irq_timer = qemu_new_timer(vm_clock, pit_irq_timer, s);
s->irq = irq;
register_savevm("i8254", base, 1, pit_save, pit_load, pit);
qemu_register_reset(pit_reset, pit);
register_ioport_write(base, 4, 1, pit_ioport_write, pit);
register_ioport_read(base, 3, 1, pit_ioport_read, pit);
pit_reset(pit);
return pit;
}
| gpl-2.0 |
mohlerm/hotspot | src/os/linux/vm/decoder_linux.cpp | 46 | 1896 | /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "prims/jvm.h"
#include "utilities/decoder_elf.hpp"
#include <cxxabi.h>
bool ElfDecoder::demangle(const char* symbol, char *buf, int buflen) {
int status;
char* result;
size_t size = (size_t)buflen;
#ifdef PPC64
// On PPC64 ElfDecoder::decode() may return a dot (.) prefixed name
// (see elfFuncDescTable.hpp for details)
if (symbol && *symbol == '.') symbol += 1;
#endif
// Don't pass buf to __cxa_demangle. In case of the 'buf' is too small,
// __cxa_demangle will call system "realloc" for additional memory, which
// may use different malloc/realloc mechanism that allocates 'buf'.
if ((result = abi::__cxa_demangle(symbol, NULL, NULL, &status)) != NULL) {
jio_snprintf(buf, buflen, "%s", result);
// call c library's free
::free(result);
return true;
}
return false;
}
| gpl-2.0 |
aospan/linux-netup-1.4 | mm/mlock.c | 46 | 21670 | /*
* linux/mm/mlock.c
*
* (C) Copyright 1995 Linus Torvalds
* (C) Copyright 2002 Christoph Hellwig
*/
#include <linux/capability.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/rmap.h>
#include <linux/mmzone.h>
#include <linux/hugetlb.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include "internal.h"
bool can_do_mlock(void)
{
if (rlimit(RLIMIT_MEMLOCK) != 0)
return true;
if (capable(CAP_IPC_LOCK))
return true;
return false;
}
EXPORT_SYMBOL(can_do_mlock);
/*
* Mlocked pages are marked with PageMlocked() flag for efficient testing
* in vmscan and, possibly, the fault path; and to support semi-accurate
* statistics.
*
* An mlocked page [PageMlocked(page)] is unevictable. As such, it will
* be placed on the LRU "unevictable" list, rather than the [in]active lists.
* The unevictable list is an LRU sibling list to the [in]active lists.
* PageUnevictable is set to indicate the unevictable state.
*
* When lazy mlocking via vmscan, it is important to ensure that the
* vma's VM_LOCKED status is not concurrently being modified, otherwise we
* may have mlocked a page that is being munlocked. So lazy mlock must take
* the mmap_sem for read, and verify that the vma really is locked
* (see mm/rmap.c).
*/
/*
* LRU accounting for clear_page_mlock()
*/
void clear_page_mlock(struct page *page)
{
if (!TestClearPageMlocked(page))
return;
mod_zone_page_state(page_zone(page), NR_MLOCK,
-hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGCLEARED);
if (!isolate_lru_page(page)) {
putback_lru_page(page);
} else {
/*
* We lost the race. the page already moved to evictable list.
*/
if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
}
}
/*
* Mark page as mlocked if not already.
* If page on LRU, isolate and putback to move to unevictable list.
*/
void mlock_vma_page(struct page *page)
{
/* Serialize with page migration */
BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
if (!TestSetPageMlocked(page)) {
mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
if (!isolate_lru_page(page))
putback_lru_page(page);
}
}
/*
* Isolate a page from LRU with optional get_page() pin.
* Assumes lru_lock already held and page already pinned.
*/
static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
{
if (PageLRU(page)) {
struct lruvec *lruvec;
lruvec = mem_cgroup_page_lruvec(page, page_zone(page));
if (getpage)
get_page(page);
ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_lru(page));
return true;
}
return false;
}
/*
* Finish munlock after successful page isolation
*
* Page must be locked. This is a wrapper for try_to_munlock()
* and putback_lru_page() with munlock accounting.
*/
static void __munlock_isolated_page(struct page *page)
{
int ret = SWAP_AGAIN;
/*
* Optimization: if the page was mapped just once, that's our mapping
* and we don't need to check all the other vmas.
*/
if (page_mapcount(page) > 1)
ret = try_to_munlock(page);
/* Did try_to_unlock() succeed or punt? */
if (ret != SWAP_MLOCK)
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
putback_lru_page(page);
}
/*
* Accounting for page isolation fail during munlock
*
* Performs accounting when page isolation fails in munlock. There is nothing
* else to do because it means some other task has already removed the page
* from the LRU. putback_lru_page() will take care of removing the page from
* the unevictable list, if necessary. vmscan [page_referenced()] will move
* the page back to the unevictable list if some other vma has it mlocked.
*/
static void __munlock_isolation_failed(struct page *page)
{
if (PageUnevictable(page))
__count_vm_event(UNEVICTABLE_PGSTRANDED);
else
__count_vm_event(UNEVICTABLE_PGMUNLOCKED);
}
/**
* munlock_vma_page - munlock a vma page
* @page - page to be unlocked, either a normal page or THP page head
*
* returns the size of the page as a page mask (0 for normal page,
* HPAGE_PMD_NR - 1 for THP head page)
*
* called from munlock()/munmap() path with page supposedly on the LRU.
* When we munlock a page, because the vma where we found the page is being
* munlock()ed or munmap()ed, we want to check whether other vmas hold the
* page locked so that we can leave it on the unevictable lru list and not
* bother vmscan with it. However, to walk the page's rmap list in
* try_to_munlock() we must isolate the page from the LRU. If some other
* task has removed the page from the LRU, we won't be able to do that.
* So we clear the PageMlocked as we might not get another chance. If we
* can't isolate the page, we leave it for putback_lru_page() and vmscan
* [page_referenced()/try_to_unmap()] to deal with.
*/
unsigned int munlock_vma_page(struct page *page)
{
int nr_pages;
struct zone *zone = page_zone(page);
/* For try_to_munlock() and to serialize with page migration */
BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(PageTail(page), page);
/*
* Serialize with any parallel __split_huge_page_refcount() which
* might otherwise copy PageMlocked to part of the tail pages before
* we clear it in the head page. It also stabilizes hpage_nr_pages().
*/
spin_lock_irq(&zone->lru_lock);
nr_pages = hpage_nr_pages(page);
if (!TestClearPageMlocked(page))
goto unlock_out;
__mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
if (__munlock_isolate_lru_page(page, true)) {
spin_unlock_irq(&zone->lru_lock);
__munlock_isolated_page(page);
goto out;
}
__munlock_isolation_failed(page);
unlock_out:
spin_unlock_irq(&zone->lru_lock);
out:
return nr_pages - 1;
}
/*
* convert get_user_pages() return value to posix mlock() error
*/
static int __mlock_posix_error_return(long retval)
{
if (retval == -EFAULT)
retval = -ENOMEM;
else if (retval == -ENOMEM)
retval = -EAGAIN;
return retval;
}
/*
* Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
*
* The fast path is available only for evictable pages with single mapping.
* Then we can bypass the per-cpu pvec and get better performance.
* when mapcount > 1 we need try_to_munlock() which can fail.
* when !page_evictable(), we need the full redo logic of putback_lru_page to
* avoid leaving evictable page in unevictable list.
*
* In case of success, @page is added to @pvec and @pgrescued is incremented
* in case that the page was previously unevictable. @page is also unlocked.
*/
static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
int *pgrescued)
{
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (page_mapcount(page) <= 1 && page_evictable(page)) {
pagevec_add(pvec, page);
if (TestClearPageUnevictable(page))
(*pgrescued)++;
unlock_page(page);
return true;
}
return false;
}
/*
* Putback multiple evictable pages to the LRU
*
* Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
* the pages might have meanwhile become unevictable but that is OK.
*/
static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
{
count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
/*
*__pagevec_lru_add() calls release_pages() so we don't call
* put_page() explicitly
*/
__pagevec_lru_add(pvec);
count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
}
/*
* Munlock a batch of pages from the same zone
*
* The work is split to two main phases. First phase clears the Mlocked flag
* and attempts to isolate the pages, all under a single zone lru lock.
* The second phase finishes the munlock only for pages where isolation
* succeeded.
*
* Note that the pagevec may be modified during the process.
*/
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
int i;
int nr = pagevec_count(pvec);
int delta_munlocked;
struct pagevec pvec_putback;
int pgrescued = 0;
pagevec_init(&pvec_putback, 0);
/* Phase 1: page isolation */
spin_lock_irq(&zone->lru_lock);
for (i = 0; i < nr; i++) {
struct page *page = pvec->pages[i];
if (TestClearPageMlocked(page)) {
/*
* We already have pin from follow_page_mask()
* so we can spare the get_page() here.
*/
if (__munlock_isolate_lru_page(page, false))
continue;
else
__munlock_isolation_failed(page);
}
/*
* We won't be munlocking this page in the next phase
* but we still need to release the follow_page_mask()
* pin. We cannot do it under lru_lock however. If it's
* the last pin, __page_cache_release() would deadlock.
*/
pagevec_add(&pvec_putback, pvec->pages[i]);
pvec->pages[i] = NULL;
}
delta_munlocked = -nr + pagevec_count(&pvec_putback);
__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
spin_unlock_irq(&zone->lru_lock);
/* Now we can release pins of pages that we are not munlocking */
pagevec_release(&pvec_putback);
/* Phase 2: page munlock */
for (i = 0; i < nr; i++) {
struct page *page = pvec->pages[i];
if (page) {
lock_page(page);
if (!__putback_lru_fast_prepare(page, &pvec_putback,
&pgrescued)) {
/*
* Slow path. We don't want to lose the last
* pin before unlock_page()
*/
get_page(page); /* for putback_lru_page() */
__munlock_isolated_page(page);
unlock_page(page);
put_page(page); /* from follow_page_mask() */
}
}
}
/*
* Phase 3: page putback for pages that qualified for the fast path
* This will also call put_page() to return pin from follow_page_mask()
*/
if (pagevec_count(&pvec_putback))
__putback_lru_fast(&pvec_putback, pgrescued);
}
/*
* Fill up pagevec for __munlock_pagevec using pte walk
*
* The function expects that the struct page corresponding to @start address is
* a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
*
* The rest of @pvec is filled by subsequent pages within the same pmd and same
* zone, as long as the pte's are present and vm_normal_page() succeeds. These
* pages also get pinned.
*
* Returns the address of the next page that should be scanned. This equals
* @start + PAGE_SIZE when no page could be added by the pte walk.
*/
static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
struct vm_area_struct *vma, int zoneid, unsigned long start,
unsigned long end)
{
pte_t *pte;
spinlock_t *ptl;
/*
* Initialize pte walk starting at the already pinned page where we
* are sure that there is a pte, as it was pinned under the same
* mmap_sem write op.
*/
pte = get_locked_pte(vma->vm_mm, start, &ptl);
/* Make sure we do not cross the page table boundary */
end = pgd_addr_end(start, end);
end = pud_addr_end(start, end);
end = pmd_addr_end(start, end);
/* The page next to the pinned page is the first we will try to get */
start += PAGE_SIZE;
while (start < end) {
struct page *page = NULL;
pte++;
if (pte_present(*pte))
page = vm_normal_page(vma, start, *pte);
/*
* Break if page could not be obtained or the page's node+zone does not
* match
*/
if (!page || page_zone_id(page) != zoneid)
break;
/*
* Do not use pagevec for PTE-mapped THP,
* munlock_vma_pages_range() will handle them.
*/
if (PageTransCompound(page))
break;
get_page(page);
/*
* Increase the address that will be returned *before* the
* eventual break due to pvec becoming full by adding the page
*/
start += PAGE_SIZE;
if (pagevec_add(pvec, page) == 0)
break;
}
pte_unmap_unlock(pte, ptl);
return start;
}
/*
* munlock_vma_pages_range() - munlock all pages in the vma range.'
* @vma - vma containing range to be munlock()ed.
* @start - start address in @vma of the range
* @end - end of range in @vma.
*
* For mremap(), munmap() and exit().
*
* Called with @vma VM_LOCKED.
*
* Returns with VM_LOCKED cleared. Callers must be prepared to
* deal with this.
*
* We don't save and restore VM_LOCKED here because pages are
* still on lru. In unmap path, pages might be scanned by reclaim
* and re-mlocked by try_to_{munlock|unmap} before we unmap and
* free them. This will result in freeing mlocked pages.
*/
void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
while (start < end) {
struct page *page;
unsigned int page_mask;
unsigned long page_increm;
struct pagevec pvec;
struct zone *zone;
int zoneid;
pagevec_init(&pvec, 0);
/*
* Although FOLL_DUMP is intended for get_dump_page(),
* it just so happens that its special treatment of the
* ZERO_PAGE (returning an error instead of doing get_page)
* suits munlock very well (and if somehow an abnormal page
* has sneaked into the range, we won't oops here: great).
*/
page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
&page_mask);
if (page && !IS_ERR(page)) {
if (PageTransTail(page)) {
VM_BUG_ON_PAGE(PageMlocked(page), page);
put_page(page); /* follow_page_mask() */
} else if (PageTransHuge(page)) {
lock_page(page);
/*
* Any THP page found by follow_page_mask() may
* have gotten split before reaching
* munlock_vma_page(), so we need to recompute
* the page_mask here.
*/
page_mask = munlock_vma_page(page);
unlock_page(page);
put_page(page); /* follow_page_mask() */
} else {
/*
* Non-huge pages are handled in batches via
* pagevec. The pin from follow_page_mask()
* prevents them from collapsing by THP.
*/
pagevec_add(&pvec, page);
zone = page_zone(page);
zoneid = page_zone_id(page);
/*
* Try to fill the rest of pagevec using fast
* pte walk. This will also update start to
* the next page to process. Then munlock the
* pagevec.
*/
start = __munlock_pagevec_fill(&pvec, vma,
zoneid, start, end);
__munlock_pagevec(&pvec, zone);
goto next;
}
}
page_increm = 1 + page_mask;
start += page_increm * PAGE_SIZE;
next:
cond_resched();
}
}
/*
* mlock_fixup - handle mlock[all]/munlock[all] requests.
*
* Filters out "special" vmas -- VM_LOCKED never gets set for these, and
* munlock is a no-op. However, for some special vmas, we go ahead and
* populate the ptes.
*
* For vmas that pass the filters, merge/split as appropriate.
*/
static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
unsigned long start, unsigned long end, vm_flags_t newflags)
{
struct mm_struct *mm = vma->vm_mm;
pgoff_t pgoff;
int nr_pages;
int ret = 0;
int lock = !!(newflags & VM_LOCKED);
if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
goto out;
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
vma->vm_file, pgoff, vma_policy(vma),
vma->vm_userfaultfd_ctx);
if (*prev) {
vma = *prev;
goto success;
}
if (start != vma->vm_start) {
ret = split_vma(mm, vma, start, 1);
if (ret)
goto out;
}
if (end != vma->vm_end) {
ret = split_vma(mm, vma, end, 0);
if (ret)
goto out;
}
success:
/*
* Keep track of amount of locked VM.
*/
nr_pages = (end - start) >> PAGE_SHIFT;
if (!lock)
nr_pages = -nr_pages;
mm->locked_vm += nr_pages;
/*
* vm_flags is protected by the mmap_sem held in write mode.
* It's okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED, populate_vma_page_range will bring it back.
*/
if (lock)
vma->vm_flags = newflags;
else
munlock_vma_pages_range(vma, start, end);
out:
*prev = vma;
return ret;
}
static int apply_vma_lock_flags(unsigned long start, size_t len,
vm_flags_t flags)
{
unsigned long nstart, end, tmp;
struct vm_area_struct * vma, * prev;
int error;
VM_BUG_ON(offset_in_page(start));
VM_BUG_ON(len != PAGE_ALIGN(len));
end = start + len;
if (end < start)
return -EINVAL;
if (end == start)
return 0;
vma = find_vma(current->mm, start);
if (!vma || vma->vm_start > start)
return -ENOMEM;
prev = vma->vm_prev;
if (start > vma->vm_start)
prev = vma;
for (nstart = start ; ; ) {
vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
newflags |= flags;
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
tmp = vma->vm_end;
if (tmp > end)
tmp = end;
error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
if (error)
break;
nstart = tmp;
if (nstart < prev->vm_end)
nstart = prev->vm_end;
if (nstart >= end)
break;
vma = prev->vm_next;
if (!vma || vma->vm_start != nstart) {
error = -ENOMEM;
break;
}
}
return error;
}
static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
{
unsigned long locked;
unsigned long lock_limit;
int error = -ENOMEM;
if (!can_do_mlock())
return -EPERM;
lru_add_drain_all(); /* flush pagevec */
len = PAGE_ALIGN(len + (offset_in_page(start)));
start &= PAGE_MASK;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
locked = len >> PAGE_SHIFT;
if (down_write_killable(¤t->mm->mmap_sem))
return -EINTR;
locked += current->mm->locked_vm;
/* check against resource limits */
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
error = apply_vma_lock_flags(start, len, flags);
up_write(¤t->mm->mmap_sem);
if (error)
return error;
error = __mm_populate(start, len, 0);
if (error)
return __mlock_posix_error_return(error);
return 0;
}
SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
{
return do_mlock(start, len, VM_LOCKED);
}
SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
{
vm_flags_t vm_flags = VM_LOCKED;
if (flags & ~MLOCK_ONFAULT)
return -EINVAL;
if (flags & MLOCK_ONFAULT)
vm_flags |= VM_LOCKONFAULT;
return do_mlock(start, len, vm_flags);
}
SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
{
int ret;
len = PAGE_ALIGN(len + (offset_in_page(start)));
start &= PAGE_MASK;
if (down_write_killable(¤t->mm->mmap_sem))
return -EINTR;
ret = apply_vma_lock_flags(start, len, 0);
up_write(¤t->mm->mmap_sem);
return ret;
}
/*
* Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
* and translate into the appropriate modifications to mm->def_flags and/or the
* flags for all current VMAs.
*
* There are a couple of subtleties with this. If mlockall() is called multiple
* times with different flags, the values do not necessarily stack. If mlockall
* is called once including the MCL_FUTURE flag and then a second time without
* it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
*/
static int apply_mlockall_flags(int flags)
{
struct vm_area_struct * vma, * prev = NULL;
vm_flags_t to_add = 0;
current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
if (flags & MCL_FUTURE) {
current->mm->def_flags |= VM_LOCKED;
if (flags & MCL_ONFAULT)
current->mm->def_flags |= VM_LOCKONFAULT;
if (!(flags & MCL_CURRENT))
goto out;
}
if (flags & MCL_CURRENT) {
to_add |= VM_LOCKED;
if (flags & MCL_ONFAULT)
to_add |= VM_LOCKONFAULT;
}
for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
vm_flags_t newflags;
newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
newflags |= to_add;
/* Ignore errors */
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
cond_resched_rcu_qs();
}
out:
return 0;
}
SYSCALL_DEFINE1(mlockall, int, flags)
{
unsigned long lock_limit;
int ret;
if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)))
return -EINVAL;
if (!can_do_mlock())
return -EPERM;
if (flags & MCL_CURRENT)
lru_add_drain_all(); /* flush pagevec */
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
if (down_write_killable(¤t->mm->mmap_sem))
return -EINTR;
ret = -ENOMEM;
if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
capable(CAP_IPC_LOCK))
ret = apply_mlockall_flags(flags);
up_write(¤t->mm->mmap_sem);
if (!ret && (flags & MCL_CURRENT))
mm_populate(0, TASK_SIZE);
return ret;
}
SYSCALL_DEFINE0(munlockall)
{
int ret;
if (down_write_killable(¤t->mm->mmap_sem))
return -EINTR;
ret = apply_mlockall_flags(0);
up_write(¤t->mm->mmap_sem);
return ret;
}
/*
* Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
* shm segments) get accounted against the user_struct instead.
*/
static DEFINE_SPINLOCK(shmlock_user_lock);
int user_shm_lock(size_t size, struct user_struct *user)
{
unsigned long lock_limit, locked;
int allowed = 0;
locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
lock_limit = rlimit(RLIMIT_MEMLOCK);
if (lock_limit == RLIM_INFINITY)
allowed = 1;
lock_limit >>= PAGE_SHIFT;
spin_lock(&shmlock_user_lock);
if (!allowed &&
locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
goto out;
get_uid(user);
user->locked_shm += locked;
allowed = 1;
out:
spin_unlock(&shmlock_user_lock);
return allowed;
}
void user_shm_unlock(size_t size, struct user_struct *user)
{
spin_lock(&shmlock_user_lock);
user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
spin_unlock(&shmlock_user_lock);
free_uid(user);
}
| gpl-2.0 |
Lukiqq/GT-I9100-Galaxian-ICS-Kernel | net/bluetooth/hci_event.c | 302 | 73936 | /*
BlueZ - Bluetooth protocol stack for Linux
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth HCI event handling. */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/sock.h>
#include <asm/system.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
static int enable_le;
/* Handle HCI Event packets */
static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
if (status)
return;
if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 0);
hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
hci_conn_check_pending(hdev);
}
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
if (status)
return;
if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 0);
hci_conn_check_pending(hdev);
}
static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_role_discovery *rp = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
if (conn) {
if (rp->role)
conn->link_mode &= ~HCI_LM_MASTER;
else
conn->link_mode |= HCI_LM_MASTER;
}
hci_dev_unlock(hdev);
}
static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_link_policy *rp = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
if (conn)
conn->link_policy = __le16_to_cpu(rp->policy);
hci_dev_unlock(hdev);
}
static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_write_link_policy *rp = (void *) skb->data;
struct hci_conn *conn;
void *sent;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
if (!sent)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
if (conn)
conn->link_policy = get_unaligned_le16(sent + 2);
hci_dev_unlock(hdev);
}
static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
hdev->link_policy = __le16_to_cpu(rp->policy);
}
static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
if (!sent)
return;
if (!status)
hdev->link_policy = get_unaligned_le16(sent);
hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
}
static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
clear_bit(HCI_RESET, &hdev->flags);
hci_req_complete(hdev, HCI_OP_RESET, status);
}
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
if (!sent)
return;
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_set_local_name_complete(hdev->id, sent, status);
if (status)
return;
memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
}
static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_name *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
}
static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
if (!sent)
return;
if (!status) {
__u8 param = *((__u8 *) sent);
if (param == AUTH_ENABLED)
set_bit(HCI_AUTH, &hdev->flags);
else
clear_bit(HCI_AUTH, &hdev->flags);
}
hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
}
static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
if (!sent)
return;
if (!status) {
__u8 param = *((__u8 *) sent);
if (param)
set_bit(HCI_ENCRYPT, &hdev->flags);
else
clear_bit(HCI_ENCRYPT, &hdev->flags);
}
hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
}
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
if (!sent)
return;
if (!status) {
__u8 param = *((__u8 *) sent);
int old_pscan, old_iscan;
old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
if (param & SCAN_INQUIRY) {
set_bit(HCI_ISCAN, &hdev->flags);
if (!old_iscan)
mgmt_discoverable(hdev->id, 1);
} else if (old_iscan)
mgmt_discoverable(hdev->id, 0);
if (param & SCAN_PAGE) {
set_bit(HCI_PSCAN, &hdev->flags);
if (!old_pscan)
mgmt_connectable(hdev->id, 1);
} else if (old_pscan)
mgmt_connectable(hdev->id, 0);
}
hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
}
static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
memcpy(hdev->dev_class, rp->dev_class, 3);
BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
}
static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
if (status)
return;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
if (!sent)
return;
memcpy(hdev->dev_class, sent, 3);
}
static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_voice_setting *rp = (void *) skb->data;
__u16 setting;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
setting = __le16_to_cpu(rp->voice_setting);
if (hdev->voice_setting == setting)
return;
hdev->voice_setting = setting;
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
if (hdev->notify) {
tasklet_disable(&hdev->tx_task);
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
tasklet_enable(&hdev->tx_task);
}
}
static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
__u16 setting;
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
if (status)
return;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
if (!sent)
return;
setting = get_unaligned_le16(sent);
if (hdev->voice_setting == setting)
return;
hdev->voice_setting = setting;
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
if (hdev->notify) {
tasklet_disable(&hdev->tx_task);
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
tasklet_enable(&hdev->tx_task);
}
}
static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
}
static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
hdev->ssp_mode = rp->mode;
}
static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
if (status)
return;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
if (!sent)
return;
hdev->ssp_mode = *((__u8 *) sent);
}
static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
{
if (hdev->features[6] & LMP_EXT_INQ)
return 2;
if (hdev->features[3] & LMP_RSSI_INQ)
return 1;
if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
hdev->lmp_subver == 0x0757)
return 1;
if (hdev->manufacturer == 15) {
if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
return 1;
if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
return 1;
if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
return 1;
}
if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
hdev->lmp_subver == 0x1805)
return 1;
return 0;
}
static void hci_setup_inquiry_mode(struct hci_dev *hdev)
{
u8 mode;
mode = hci_get_inquiry_mode(hdev);
hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
}
static void hci_setup_event_mask(struct hci_dev *hdev)
{
/* The second byte is 0xff instead of 0x9f (two reserved bits
* disabled) since a Broadcom 1.2 dongle doesn't respond to the
* command otherwise */
u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
/* CSR 1.1 dongles does not accept any bitfield so don't try to set
* any event mask for pre 1.2 devices */
if (hdev->lmp_ver <= 1)
return;
events[4] |= 0x01; /* Flow Specification Complete */
events[4] |= 0x02; /* Inquiry Result with RSSI */
events[4] |= 0x04; /* Read Remote Extended Features Complete */
events[5] |= 0x08; /* Synchronous Connection Complete */
events[5] |= 0x10; /* Synchronous Connection Changed */
if (hdev->features[3] & LMP_RSSI_INQ)
events[4] |= 0x04; /* Inquiry Result with RSSI */
if (hdev->features[5] & LMP_SNIFF_SUBR)
events[5] |= 0x20; /* Sniff Subrating */
if (hdev->features[5] & LMP_PAUSE_ENC)
events[5] |= 0x80; /* Encryption Key Refresh Complete */
if (hdev->features[6] & LMP_EXT_INQ)
events[5] |= 0x40; /* Extended Inquiry Result */
if (hdev->features[6] & LMP_NO_FLUSH)
events[7] |= 0x01; /* Enhanced Flush Complete */
if (hdev->features[7] & LMP_LSTO)
events[6] |= 0x80; /* Link Supervision Timeout Changed */
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
events[6] |= 0x01; /* IO Capability Request */
events[6] |= 0x02; /* IO Capability Response */
events[6] |= 0x04; /* User Confirmation Request */
events[6] |= 0x08; /* User Passkey Request */
events[6] |= 0x10; /* Remote OOB Data Request */
events[6] |= 0x20; /* Simple Pairing Complete */
events[7] |= 0x04; /* User Passkey Notification */
events[7] |= 0x08; /* Keypress Notification */
events[7] |= 0x10; /* Remote Host Supported
* Features Notification */
}
if (hdev->features[4] & LMP_LE)
events[7] |= 0x20; /* LE Meta-Event */
hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
}
static void hci_set_le_support(struct hci_dev *hdev)
{
struct hci_cp_write_le_host_supported cp;
memset(&cp, 0, sizeof(cp));
if (enable_le) {
cp.le = 1;
cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
}
hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
}
static void hci_setup(struct hci_dev *hdev)
{
hci_setup_event_mask(hdev);
if (hdev->lmp_ver > 1)
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
u8 mode = 0x01;
hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
}
if (hdev->features[3] & LMP_RSSI_INQ)
hci_setup_inquiry_mode(hdev);
if (hdev->features[7] & LMP_INQ_TX_PWR)
hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
if (hdev->features[7] & LMP_EXTFEATURES) {
struct hci_cp_read_local_ext_features cp;
cp.page = 0x01;
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
sizeof(cp), &cp);
}
if (hdev->features[4] & LMP_LE)
hci_set_le_support(hdev);
}
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_version *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
hdev->hci_ver = rp->hci_ver;
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
hdev->lmp_ver = rp->lmp_ver;
hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
hdev->manufacturer,
hdev->hci_ver, hdev->hci_rev);
if (test_bit(HCI_INIT, &hdev->flags))
hci_setup(hdev);
}
static void hci_setup_link_policy(struct hci_dev *hdev)
{
u16 link_policy = 0;
if (hdev->features[0] & LMP_RSWITCH)
link_policy |= HCI_LP_RSWITCH;
if (hdev->features[0] & LMP_HOLD)
link_policy |= HCI_LP_HOLD;
if (hdev->features[0] & LMP_SNIFF)
link_policy |= HCI_LP_SNIFF;
if (hdev->features[1] & LMP_PARK)
link_policy |= HCI_LP_PARK;
link_policy = cpu_to_le16(link_policy);
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
sizeof(link_policy), &link_policy);
}
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_commands *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
goto done;
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
hci_setup_link_policy(hdev);
done:
hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
}
static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_features *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
memcpy(hdev->features, rp->features, 8);
/* Adjust default settings according to features
* supported by device. */
if (hdev->features[0] & LMP_3SLOT)
hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
if (hdev->features[0] & LMP_5SLOT)
hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
if (hdev->features[1] & LMP_HV2) {
hdev->pkt_type |= (HCI_HV2);
hdev->esco_type |= (ESCO_HV2);
}
if (hdev->features[1] & LMP_HV3) {
hdev->pkt_type |= (HCI_HV3);
hdev->esco_type |= (ESCO_HV3);
}
if (hdev->features[3] & LMP_ESCO)
hdev->esco_type |= (ESCO_EV3);
if (hdev->features[4] & LMP_EV4)
hdev->esco_type |= (ESCO_EV4);
if (hdev->features[4] & LMP_EV5)
hdev->esco_type |= (ESCO_EV5);
if (hdev->features[5] & LMP_EDR_ESCO_2M)
hdev->esco_type |= (ESCO_2EV3);
if (hdev->features[5] & LMP_EDR_ESCO_3M)
hdev->esco_type |= (ESCO_3EV3);
if (hdev->features[5] & LMP_EDR_3S_ESCO)
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
hdev->features[0], hdev->features[1],
hdev->features[2], hdev->features[3],
hdev->features[4], hdev->features[5],
hdev->features[6], hdev->features[7]);
}
static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
memcpy(hdev->extfeatures, rp->features, 8);
hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
}
static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_buffer_size *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
hdev->sco_mtu = rp->sco_mtu;
hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
hdev->sco_mtu = 64;
hdev->sco_pkts = 8;
}
hdev->acl_cnt = hdev->acl_pkts;
hdev->sco_cnt = hdev->sco_pkts;
BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
hdev->acl_mtu, hdev->acl_pkts,
hdev->sco_mtu, hdev->sco_pkts);
}
static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_bd_addr *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (!rp->status)
bacpy(&hdev->bdaddr, &rp->bdaddr);
hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
}
static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
}
static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
}
static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
}
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
}
static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
}
static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_pin_code_reply *rp = (void *) skb->data;
struct hci_cp_pin_code_reply *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
if (rp->status != 0)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
if (!cp)
return;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (conn)
conn->pin_length = cp->pin_len;
}
static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
rp->status);
}
static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
hdev->le_pkts = rp->le_max_pkt;
hdev->le_cnt = hdev->le_pkts;
BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
}
static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
rp->status);
}
static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
rp->status);
}
static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
rp->randomizer, rp->status);
}
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_cp_le_set_scan_enable *cp;
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
if (status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
if (!cp)
return;
hci_dev_lock(hdev);
if (cp->enable == 0x01) {
del_timer(&hdev->adv_timer);
hci_adv_entries_clear(hdev);
} else if (cp->enable == 0x00) {
mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
}
hci_dev_unlock(hdev);
}
static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
}
static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
}
static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_cp_read_local_ext_features cp;
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%x", hdev->name, status);
if (status)
return;
cp.page = 0x01;
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
}
static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
BT_DBG("%s status 0x%x", hdev->name, status);
if (status) {
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
return;
}
if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 1);
}
static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_create_conn *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, status);
cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
if (!cp)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
if (status) {
if (conn && conn->state == BT_CONNECT) {
if (status != 0x0c || conn->attempt > 2) {
conn->state = BT_CLOSED;
hci_proto_connect_cfm(conn, status);
hci_conn_del(conn);
} else
conn->state = BT_CONNECT2;
}
} else {
if (!conn) {
conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
if (conn) {
conn->out = 1;
conn->link_mode |= HCI_LM_MASTER;
} else
BT_ERR("No memory for new connection");
}
}
hci_dev_unlock(hdev);
}
static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_add_sco *cp;
struct hci_conn *acl, *sco;
__u16 handle;
BT_DBG("%s status 0x%x", hdev->name, status);
if (!status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
if (!cp)
return;
handle = __le16_to_cpu(cp->handle);
BT_DBG("%s handle %d", hdev->name, handle);
hci_dev_lock(hdev);
acl = hci_conn_hash_lookup_handle(hdev, handle);
if (acl) {
sco = acl->link;
if (sco) {
sco->state = BT_CLOSED;
hci_proto_connect_cfm(sco, status);
hci_conn_del(sco);
}
}
hci_dev_unlock(hdev);
}
static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_auth_requested *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, status);
if (!status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
if (!cp)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
hci_conn_put(conn);
}
}
hci_dev_unlock(hdev);
}
static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_set_conn_encrypt *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, status);
if (!status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
if (!cp)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
hci_conn_put(conn);
}
}
hci_dev_unlock(hdev);
}
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
struct hci_conn *conn)
{
if (conn->state != BT_CONFIG || !conn->out)
return 0;
if (conn->pending_sec_level == BT_SECURITY_SDP)
return 0;
/* Only request authentication for SSP connections or non-SSP
* devices with sec_level HIGH */
if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
conn->pending_sec_level != BT_SECURITY_HIGH)
return 0;
return 1;
}
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_remote_name_req *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, status);
/* If successful wait for the name req complete event before
* checking for the need to do authentication */
if (!status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
if (!cp)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (!conn)
goto unlock;
if (!hci_outgoing_auth_needed(hdev, conn))
goto unlock;
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
}
unlock:
hci_dev_unlock(hdev);
}
static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_read_remote_features *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, status);
if (!status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
if (!cp)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
hci_conn_put(conn);
}
}
hci_dev_unlock(hdev);
}
static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_read_remote_ext_features *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, status);
if (!status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
if (!cp)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
hci_conn_put(conn);
}
}
hci_dev_unlock(hdev);
}
static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_setup_sync_conn *cp;
struct hci_conn *acl, *sco;
__u16 handle;
BT_DBG("%s status 0x%x", hdev->name, status);
if (!status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
if (!cp)
return;
handle = __le16_to_cpu(cp->handle);
BT_DBG("%s handle %d", hdev->name, handle);
hci_dev_lock(hdev);
acl = hci_conn_hash_lookup_handle(hdev, handle);
if (acl) {
sco = acl->link;
if (sco) {
sco->state = BT_CLOSED;
hci_proto_connect_cfm(sco, status);
hci_conn_del(sco);
}
}
hci_dev_unlock(hdev);
}
static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_sniff_mode *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, status);
if (!status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
if (!cp)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
hci_sco_setup(conn, status);
}
hci_dev_unlock(hdev);
}
static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_exit_sniff_mode *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, status);
if (!status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
if (!cp)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
hci_sco_setup(conn, status);
}
hci_dev_unlock(hdev);
}
static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_le_create_conn *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%x", hdev->name, status);
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
if (!cp)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
conn);
if (status) {
if (conn && conn->state == BT_CONNECT) {
conn->state = BT_CLOSED;
hci_proto_connect_cfm(conn, status);
hci_conn_del(conn);
}
} else {
if (!conn) {
conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr);
if (conn) {
conn->dst_type = cp->peer_addr_type;
conn->out = 1;
} else {
BT_ERR("No memory for new connection");
}
}
}
hci_dev_unlock(hdev);
}
static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
{
BT_DBG("%s status 0x%x", hdev->name, status);
}
static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status %d", hdev->name, status);
if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 0);
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
}
static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct inquiry_data data;
struct inquiry_info *info = (void *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
hci_dev_lock(hdev);
if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 1);
}
for (; num_rsp; num_rsp--, info++) {
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
data.pscan_mode = info->pscan_mode;
memcpy(data.dev_class, info->dev_class, 3);
data.clock_offset = info->clock_offset;
data.rssi = 0x00;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
NULL);
}
hci_dev_unlock(hdev);
}
static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
if (ev->link_type != SCO_LINK)
goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
if (!conn)
goto unlock;
conn->type = SCO_LINK;
}
if (!ev->status) {
conn->handle = __le16_to_cpu(ev->handle);
if (conn->type == ACL_LINK) {
conn->state = BT_CONFIG;
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
mgmt_connected(hdev->id, &ev->bdaddr);
} else
conn->state = BT_CONNECTED;
hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
if (test_bit(HCI_AUTH, &hdev->flags))
conn->link_mode |= HCI_LM_AUTH;
if (test_bit(HCI_ENCRYPT, &hdev->flags))
conn->link_mode |= HCI_LM_ENCRYPT;
/* Get remote features */
if (conn->type == ACL_LINK) {
struct hci_cp_read_remote_features cp;
cp.handle = ev->handle;
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
sizeof(cp), &cp);
}
/* Set packet type for incoming connection */
if (!conn->out && hdev->hci_ver < 3) {
struct hci_cp_change_conn_ptype cp;
cp.handle = ev->handle;
cp.pkt_type = cpu_to_le16(conn->pkt_type);
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
sizeof(cp), &cp);
}
} else {
conn->state = BT_CLOSED;
if (conn->type == ACL_LINK)
mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
}
if (conn->type == ACL_LINK)
hci_sco_setup(conn, ev->status);
if (ev->status) {
hci_proto_connect_cfm(conn, ev->status);
hci_conn_del(conn);
} else if (ev->link_type != ACL_LINK)
hci_proto_connect_cfm(conn, ev->status);
unlock:
hci_dev_unlock(hdev);
hci_conn_check_pending(hdev);
}
static inline bool is_sco_active(struct hci_dev *hdev)
{
if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
(hci_conn_hash_lookup_state(hdev, ESCO_LINK,
BT_CONNECTED)))
return true;
return false;
}
static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_request *ev = (void *) skb->data;
int mask = hdev->link_mode;
BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
batostr(&ev->bdaddr), ev->link_type);
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
if ((mask & HCI_LM_ACCEPT) &&
!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
/* Connection accepted */
struct inquiry_entry *ie;
struct hci_conn *conn;
hci_dev_lock(hdev);
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
if (ie)
memcpy(ie->data.dev_class, ev->dev_class, 3);
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
/* pkt_type not yet used for incoming connections */
conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
if (!conn) {
BT_ERR("No memory for new connection");
hci_dev_unlock(hdev);
return;
}
}
memcpy(conn->dev_class, ev->dev_class, 3);
conn->state = BT_CONNECT;
hci_dev_unlock(hdev);
if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
struct hci_cp_accept_conn_req cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
|| is_sco_active(hdev)))
cp.role = 0x00; /* Become master */
else
cp.role = 0x01; /* Remain slave */
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
sizeof(cp), &cp);
} else {
struct hci_cp_accept_sync_conn_req cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
cp.max_latency = cpu_to_le16(0xffff);
cp.content_format = cpu_to_le16(hdev->voice_setting);
cp.retrans_effort = 0xff;
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
sizeof(cp), &cp);
}
} else {
/* Connection rejected */
struct hci_cp_reject_conn_req cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
cp.reason = 0x0f;
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
}
}
static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_disconn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
if (ev->status) {
mgmt_disconnect_failed(hdev->id);
return;
}
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (!conn)
goto unlock;
conn->state = BT_CLOSED;
if (conn->type == ACL_LINK || conn->type == LE_LINK)
mgmt_disconnected(hdev->id, &conn->dst);
hci_proto_disconn_cfm(conn, ev->reason);
hci_conn_del(conn);
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_auth_complete *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (!conn)
goto unlock;
/* SS_BLUETOOTH(gudam.ryu) 2012. 03. 02 - Fixed for opp sending fail,
if the devices were unpaired on the remote end */
if (ev->status == 0x06 && hdev->ssp_mode > 0 &&
conn->ssp_mode > 0) {
struct hci_cp_auth_requested cp;
hci_remove_link_key(hdev, &conn->dst);
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);
hci_dev_unlock(hdev);
BT_DBG("Pin or key missing !!!");
return;
}
/* SS_Bluetooth(gudam.ryu) End */
if (!ev->status) {
if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
BT_INFO("re-auth of legacy device is not possible.");
} else {
conn->link_mode |= HCI_LM_AUTH;
conn->sec_level = conn->pending_sec_level;
}
} else {
mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
conn->disc_timeout = HCI_DISCONN_TIMEOUT/200; /* 0.01 sec */
}
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
if (conn->state == BT_CONFIG) {
if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
struct hci_cp_set_conn_encrypt cp;
cp.handle = ev->handle;
cp.encrypt = 0x01;
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
&cp);
} else {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
hci_conn_put(conn);
}
} else {
hci_auth_cfm(conn, ev->status);
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_put(conn);
}
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
if (!ev->status) {
struct hci_cp_set_conn_encrypt cp;
cp.handle = ev->handle;
cp.encrypt = 0x01;
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
&cp);
} else {
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
hci_encrypt_cfm(conn, ev->status, 0x00);
}
}
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_remote_name *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_conn_check_pending(hdev);
hci_dev_lock(hdev);
if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
goto unlock;
if (!hci_outgoing_auth_needed(hdev, conn))
goto unlock;
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
}
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_encrypt_change *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn) {
if (!ev->status) {
if (ev->encrypt) {
/* Encryption implies authentication */
conn->link_mode |= HCI_LM_AUTH;
conn->link_mode |= HCI_LM_ENCRYPT;
conn->sec_level = conn->pending_sec_level;
} else
conn->link_mode &= ~HCI_LM_ENCRYPT;
}
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
if (conn->state == BT_CONFIG) {
if (!ev->status)
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
hci_conn_put(conn);
} else
hci_encrypt_cfm(conn, ev->status, ev->encrypt);
}
hci_dev_unlock(hdev);
}
static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn) {
if (!ev->status)
conn->link_mode |= HCI_LM_SECURE;
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
hci_key_change_cfm(conn, ev->status);
}
hci_dev_unlock(hdev);
}
static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_remote_features *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (!conn)
goto unlock;
if (!ev->status)
memcpy(conn->features, ev->features, 8);
if (conn->state != BT_CONFIG)
goto unlock;
if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
struct hci_cp_read_remote_ext_features cp;
cp.handle = ev->handle;
cp.page = 0x01;
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
sizeof(cp), &cp);
goto unlock;
}
if (!ev->status) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
}
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
hci_conn_put(conn);
}
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_cmd_complete *ev = (void *) skb->data;
__u16 opcode;
skb_pull(skb, sizeof(*ev));
opcode = __le16_to_cpu(ev->opcode);
switch (opcode) {
case HCI_OP_INQUIRY_CANCEL:
hci_cc_inquiry_cancel(hdev, skb);
break;
case HCI_OP_EXIT_PERIODIC_INQ:
hci_cc_exit_periodic_inq(hdev, skb);
break;
case HCI_OP_REMOTE_NAME_REQ_CANCEL:
hci_cc_remote_name_req_cancel(hdev, skb);
break;
case HCI_OP_ROLE_DISCOVERY:
hci_cc_role_discovery(hdev, skb);
break;
case HCI_OP_READ_LINK_POLICY:
hci_cc_read_link_policy(hdev, skb);
break;
case HCI_OP_WRITE_LINK_POLICY:
hci_cc_write_link_policy(hdev, skb);
break;
case HCI_OP_READ_DEF_LINK_POLICY:
hci_cc_read_def_link_policy(hdev, skb);
break;
case HCI_OP_WRITE_DEF_LINK_POLICY:
hci_cc_write_def_link_policy(hdev, skb);
break;
case HCI_OP_RESET:
hci_cc_reset(hdev, skb);
break;
case HCI_OP_WRITE_LOCAL_NAME:
hci_cc_write_local_name(hdev, skb);
break;
case HCI_OP_READ_LOCAL_NAME:
hci_cc_read_local_name(hdev, skb);
break;
case HCI_OP_WRITE_AUTH_ENABLE:
hci_cc_write_auth_enable(hdev, skb);
break;
case HCI_OP_WRITE_ENCRYPT_MODE:
hci_cc_write_encrypt_mode(hdev, skb);
break;
case HCI_OP_WRITE_SCAN_ENABLE:
hci_cc_write_scan_enable(hdev, skb);
break;
case HCI_OP_READ_CLASS_OF_DEV:
hci_cc_read_class_of_dev(hdev, skb);
break;
case HCI_OP_WRITE_CLASS_OF_DEV:
hci_cc_write_class_of_dev(hdev, skb);
break;
case HCI_OP_READ_VOICE_SETTING:
hci_cc_read_voice_setting(hdev, skb);
break;
case HCI_OP_WRITE_VOICE_SETTING:
hci_cc_write_voice_setting(hdev, skb);
break;
case HCI_OP_HOST_BUFFER_SIZE:
hci_cc_host_buffer_size(hdev, skb);
break;
case HCI_OP_READ_SSP_MODE:
hci_cc_read_ssp_mode(hdev, skb);
break;
case HCI_OP_WRITE_SSP_MODE:
hci_cc_write_ssp_mode(hdev, skb);
break;
case HCI_OP_READ_LOCAL_VERSION:
hci_cc_read_local_version(hdev, skb);
break;
case HCI_OP_READ_LOCAL_COMMANDS:
hci_cc_read_local_commands(hdev, skb);
break;
case HCI_OP_READ_LOCAL_FEATURES:
hci_cc_read_local_features(hdev, skb);
break;
case HCI_OP_READ_LOCAL_EXT_FEATURES:
hci_cc_read_local_ext_features(hdev, skb);
break;
case HCI_OP_READ_BUFFER_SIZE:
hci_cc_read_buffer_size(hdev, skb);
break;
case HCI_OP_READ_BD_ADDR:
hci_cc_read_bd_addr(hdev, skb);
break;
case HCI_OP_WRITE_CA_TIMEOUT:
hci_cc_write_ca_timeout(hdev, skb);
break;
case HCI_OP_DELETE_STORED_LINK_KEY:
hci_cc_delete_stored_link_key(hdev, skb);
break;
case HCI_OP_SET_EVENT_MASK:
hci_cc_set_event_mask(hdev, skb);
break;
case HCI_OP_WRITE_INQUIRY_MODE:
hci_cc_write_inquiry_mode(hdev, skb);
break;
case HCI_OP_READ_INQ_RSP_TX_POWER:
hci_cc_read_inq_rsp_tx_power(hdev, skb);
break;
case HCI_OP_SET_EVENT_FLT:
hci_cc_set_event_flt(hdev, skb);
break;
case HCI_OP_PIN_CODE_REPLY:
hci_cc_pin_code_reply(hdev, skb);
break;
case HCI_OP_PIN_CODE_NEG_REPLY:
hci_cc_pin_code_neg_reply(hdev, skb);
break;
case HCI_OP_READ_LOCAL_OOB_DATA:
hci_cc_read_local_oob_data_reply(hdev, skb);
break;
case HCI_OP_LE_READ_BUFFER_SIZE:
hci_cc_le_read_buffer_size(hdev, skb);
break;
case HCI_OP_USER_CONFIRM_REPLY:
hci_cc_user_confirm_reply(hdev, skb);
break;
case HCI_OP_USER_CONFIRM_NEG_REPLY:
hci_cc_user_confirm_neg_reply(hdev, skb);
break;
case HCI_OP_LE_SET_SCAN_ENABLE:
hci_cc_le_set_scan_enable(hdev, skb);
break;
case HCI_OP_LE_LTK_REPLY:
hci_cc_le_ltk_reply(hdev, skb);
break;
case HCI_OP_LE_LTK_NEG_REPLY:
hci_cc_le_ltk_neg_reply(hdev, skb);
break;
case HCI_OP_WRITE_LE_HOST_SUPPORTED:
hci_cc_write_le_host_supported(hdev, skb);
break;
default:
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
break;
}
if (ev->opcode != HCI_OP_NOP)
del_timer(&hdev->cmd_timer);
if (ev->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
tasklet_schedule(&hdev->cmd_task);
}
}
static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_cmd_status *ev = (void *) skb->data;
__u16 opcode;
skb_pull(skb, sizeof(*ev));
opcode = __le16_to_cpu(ev->opcode);
switch (opcode) {
case HCI_OP_INQUIRY:
hci_cs_inquiry(hdev, ev->status);
break;
case HCI_OP_CREATE_CONN:
hci_cs_create_conn(hdev, ev->status);
break;
case HCI_OP_ADD_SCO:
hci_cs_add_sco(hdev, ev->status);
break;
case HCI_OP_AUTH_REQUESTED:
hci_cs_auth_requested(hdev, ev->status);
break;
case HCI_OP_SET_CONN_ENCRYPT:
hci_cs_set_conn_encrypt(hdev, ev->status);
break;
case HCI_OP_REMOTE_NAME_REQ:
hci_cs_remote_name_req(hdev, ev->status);
break;
case HCI_OP_READ_REMOTE_FEATURES:
hci_cs_read_remote_features(hdev, ev->status);
break;
case HCI_OP_READ_REMOTE_EXT_FEATURES:
hci_cs_read_remote_ext_features(hdev, ev->status);
break;
case HCI_OP_SETUP_SYNC_CONN:
hci_cs_setup_sync_conn(hdev, ev->status);
break;
case HCI_OP_SNIFF_MODE:
hci_cs_sniff_mode(hdev, ev->status);
break;
case HCI_OP_EXIT_SNIFF_MODE:
hci_cs_exit_sniff_mode(hdev, ev->status);
break;
case HCI_OP_DISCONNECT:
if (ev->status != 0)
mgmt_disconnect_failed(hdev->id);
break;
case HCI_OP_LE_CREATE_CONN:
hci_cs_le_create_conn(hdev, ev->status);
break;
case HCI_OP_LE_START_ENC:
hci_cs_le_start_enc(hdev, ev->status);
break;
default:
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
break;
}
if (ev->opcode != HCI_OP_NOP)
del_timer(&hdev->cmd_timer);
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
tasklet_schedule(&hdev->cmd_task);
}
}
static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_role_change *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (conn) {
if (!ev->status) {
if (ev->role)
conn->link_mode &= ~HCI_LM_MASTER;
else
conn->link_mode |= HCI_LM_MASTER;
}
clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
hci_role_switch_cfm(conn, ev->status, ev->role);
}
hci_dev_unlock(hdev);
}
static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
__le16 *ptr;
int i;
skb_pull(skb, sizeof(*ev));
BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
if (skb->len < ev->num_hndl * 4) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
tasklet_disable(&hdev->tx_task);
for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
struct hci_conn *conn;
__u16 handle, count;
handle = get_unaligned_le16(ptr++);
count = get_unaligned_le16(ptr++);
conn = hci_conn_hash_lookup_handle(hdev, handle);
if (conn) {
conn->sent -= count;
if (conn->type == ACL_LINK) {
hdev->acl_cnt += count;
if (hdev->acl_cnt > hdev->acl_pkts)
hdev->acl_cnt = hdev->acl_pkts;
} else if (conn->type == LE_LINK) {
if (hdev->le_pkts) {
hdev->le_cnt += count;
if (hdev->le_cnt > hdev->le_pkts)
hdev->le_cnt = hdev->le_pkts;
} else {
hdev->acl_cnt += count;
if (hdev->acl_cnt > hdev->acl_pkts)
hdev->acl_cnt = hdev->acl_pkts;
}
} else {
hdev->sco_cnt += count;
if (hdev->sco_cnt > hdev->sco_pkts)
hdev->sco_cnt = hdev->sco_pkts;
}
}
}
tasklet_schedule(&hdev->tx_task);
tasklet_enable(&hdev->tx_task);
}
static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_mode_change *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn) {
conn->mode = ev->mode;
conn->interval = __le16_to_cpu(ev->interval);
if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
if (conn->mode == HCI_CM_ACTIVE)
conn->power_save = 1;
else
conn->power_save = 0;
}
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
hci_sco_setup(conn, ev->status);
}
hci_dev_unlock(hdev);
}
static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pin_code_req *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (conn && conn->state == BT_CONNECTED) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
hci_conn_put(conn);
}
if (!test_bit(HCI_PAIRABLE, &hdev->flags))
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
else if (test_bit(HCI_MGMT, &hdev->flags)) {
u8 secure;
if (conn->pending_sec_level == BT_SECURITY_HIGH)
secure = 1;
else
secure = 0;
mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
}
hci_dev_unlock(hdev);
}
static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_req *ev = (void *) skb->data;
struct hci_cp_link_key_reply cp;
struct hci_conn *conn;
struct link_key *key;
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
return;
hci_dev_lock(hdev);
key = hci_find_link_key(hdev, &ev->bdaddr);
if (!key) {
BT_DBG("%s link key not found for %s", hdev->name,
batostr(&ev->bdaddr));
goto not_found;
}
BT_DBG("%s found key type %u for %s", hdev->name, key->type,
batostr(&ev->bdaddr));
if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
key->type == HCI_LK_DEBUG_COMBINATION) {
BT_DBG("%s ignoring debug key", hdev->name);
goto not_found;
}
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (conn) {
if (key->type == HCI_LK_UNAUTH_COMBINATION &&
conn->auth_type != 0xff &&
(conn->auth_type & 0x01)) {
BT_DBG("%s ignoring unauthenticated key", hdev->name);
goto not_found;
}
if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
conn->pending_sec_level == BT_SECURITY_HIGH) {
BT_DBG("%s ignoring key unauthenticated for high \
security", hdev->name);
goto not_found;
}
conn->key_type = key->type;
conn->pin_length = key->pin_len;
}
bacpy(&cp.bdaddr, &ev->bdaddr);
memcpy(cp.link_key, key->val, 16);
hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
hci_dev_unlock(hdev);
return;
not_found:
hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
hci_dev_unlock(hdev);
}
static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_notify *ev = (void *) skb->data;
struct hci_conn *conn;
u8 pin_len = 0;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (conn) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
pin_len = conn->pin_length;
if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
conn->key_type = ev->key_type;
hci_conn_put(conn);
}
if (test_bit(HCI_LINK_KEYS, &hdev->flags))
hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
ev->key_type, pin_len);
hci_dev_unlock(hdev);
}
static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_clock_offset *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn && !ev->status) {
struct inquiry_entry *ie;
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
if (ie) {
ie->data.clock_offset = ev->clock_offset;
ie->timestamp = jiffies;
}
}
hci_dev_unlock(hdev);
}
static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pkt_type_change *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn && !ev->status)
conn->pkt_type = __le16_to_cpu(ev->pkt_type);
hci_dev_unlock(hdev);
}
static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
struct inquiry_entry *ie;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
if (ie) {
ie->data.pscan_rep_mode = ev->pscan_rep_mode;
ie->timestamp = jiffies;
}
hci_dev_unlock(hdev);
}
static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct inquiry_data data;
int num_rsp = *((__u8 *) skb->data);
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
hci_dev_lock(hdev);
if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 1);
}
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1);
for (; num_rsp; num_rsp--, info++) {
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
data.pscan_mode = info->pscan_mode;
memcpy(data.dev_class, info->dev_class, 3);
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
mgmt_device_found(hdev->id, &info->bdaddr,
info->dev_class, info->rssi,
NULL);
}
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
for (; num_rsp; num_rsp--, info++) {
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
data.pscan_mode = 0x00;
memcpy(data.dev_class, info->dev_class, 3);
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
mgmt_device_found(hdev->id, &info->bdaddr,
info->dev_class, info->rssi,
NULL);
}
}
hci_dev_unlock(hdev);
}
static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_remote_ext_features *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (!conn)
goto unlock;
if (!ev->status && ev->page == 0x01) {
struct inquiry_entry *ie;
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
if (ie)
ie->data.ssp_mode = (ev->features[0] & 0x01);
conn->ssp_mode = (ev->features[0] & 0x01);
}
if (conn->state != BT_CONFIG)
goto unlock;
if (!ev->status) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
}
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
hci_conn_put(conn);
}
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
if (ev->link_type == ESCO_LINK)
goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
if (!conn)
goto unlock;
conn->type = SCO_LINK;
}
switch (ev->status) {
case 0x00:
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
break;
case 0x10: /* Connection Accept Timeout */
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
case 0x1f: /* Unspecified error */
if (conn->out && conn->attempt < 2) {
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
(hdev->esco_type & EDR_ESCO_MASK);
hci_setup_sync(conn, conn->link->handle);
goto unlock;
}
/* fall through */
default:
conn->state = BT_CLOSED;
break;
}
hci_proto_connect_cfm(conn, ev->status);
if (ev->status)
hci_conn_del(conn);
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_sniff_subrate *ev = (void *) skb->data;
BT_DBG("%s status %d", hdev->name, ev->status);
}
static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct inquiry_data data;
struct extended_inquiry_info *info = (void *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 1);
}
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
data.pscan_mode = 0x00;
memcpy(data.dev_class, info->dev_class, 3);
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x01;
hci_inquiry_cache_update(hdev, &data);
mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
info->rssi, info->data);
}
hci_dev_unlock(hdev);
}
static inline u8 hci_get_auth_req(struct hci_conn *conn)
{
/* If remote requests dedicated bonding follow that lead */
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
/* If both remote and local IO capabilities allow MITM
* protection then require it, otherwise don't */
if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
return 0x02;
else
return 0x03;
}
/* If remote requests no-bonding follow that lead */
if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
return conn->remote_auth | (conn->auth_type & 0x01);
return conn->auth_type;
}
static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_request *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
goto unlock;
hci_conn_hold(conn);
if (!test_bit(HCI_MGMT, &hdev->flags))
goto unlock;
if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
struct hci_cp_io_capability_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
cp.capability = conn->io_capability;
conn->auth_type = hci_get_auth_req(conn);
cp.authentication = conn->auth_type;
if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
hci_find_remote_oob_data(hdev, &conn->dst))
cp.oob_data = 0x01;
else
cp.oob_data = 0x00;
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
sizeof(cp), &cp);
} else {
struct hci_cp_io_capability_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
cp.reason = 0x18; /* Pairing not allowed */
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
sizeof(cp), &cp);
}
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_reply *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
goto unlock;
conn->remote_cap = ev->capability;
conn->remote_oob = ev->oob_data;
conn->remote_auth = ev->authentication;
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_ev_user_confirm_req *ev = (void *) skb->data;
int loc_mitm, rem_mitm, confirm_hint = 0;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
if (!test_bit(HCI_MGMT, &hdev->flags))
goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
goto unlock;
loc_mitm = (conn->auth_type & 0x01);
rem_mitm = (conn->remote_auth & 0x01);
/* If we require MITM but the remote device can't provide that
* (it has NoInputNoOutput) then reject the confirmation
* request. The only exception is when we're dedicated bonding
* initiators (connect_cfm_cb set) since then we always have the MITM
* bit set. */
if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
BT_DBG("Rejecting request: remote device can't provide MITM");
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
goto unlock;
}
/* If no side requires MITM protection; auto-accept */
if ((!loc_mitm || conn->remote_cap == 0x03) &&
(!rem_mitm || conn->io_capability == 0x03)) {
/* If we're not the initiators request authorization to
* proceed from user space (mgmt_user_confirm with
* confirm_hint set to 1). */
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
BT_DBG("Confirming auto-accept as acceptor");
confirm_hint = 1;
goto confirm;
}
BT_DBG("Auto-accept of user confirmation with %ums delay",
hdev->auto_accept_delay);
if (hdev->auto_accept_delay > 0) {
int delay = msecs_to_jiffies(hdev->auto_accept_delay);
mod_timer(&conn->auto_accept_timer, jiffies + delay);
goto unlock;
}
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
goto unlock;
}
confirm:
mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
confirm_hint);
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
goto unlock;
/* To avoid duplicate auth_failed events to user space we check
* the HCI_CONN_AUTH_PEND flag which will be set if we
* initiated the authentication. A traditional auth_complete
* event gets always produced as initiator and is also mapped to
* the mgmt_auth_failed event */
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) {
mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
conn->out = 1;
conn->disc_timeout = HCI_DISCONN_TIMEOUT/200; /* 0.01 sec */
}
hci_conn_put(conn);
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_remote_host_features *ev = (void *) skb->data;
struct inquiry_entry *ie;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
if (ie)
ie->data.ssp_mode = (ev->features[0] & 0x01);
hci_dev_unlock(hdev);
}
static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
struct oob_data *data;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
if (!test_bit(HCI_MGMT, &hdev->flags))
goto unlock;
data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
if (data) {
struct hci_cp_remote_oob_data_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
memcpy(cp.hash, data->hash, sizeof(cp.hash));
memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
&cp);
} else {
struct hci_cp_remote_oob_data_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
&cp);
}
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
if (!conn) {
conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
if (!conn) {
BT_ERR("No memory for new connection");
hci_dev_unlock(hdev);
return;
}
conn->dst_type = ev->bdaddr_type;
}
if (ev->status) {
mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
hci_proto_connect_cfm(conn, ev->status);
conn->state = BT_CLOSED;
hci_conn_del(conn);
goto unlock;
}
mgmt_connected(hdev->id, &ev->bdaddr);
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
hci_proto_connect_cfm(conn, ev->status);
unlock:
hci_dev_unlock(hdev);
}
static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_ev_le_advertising_info *ev;
u8 num_reports;
num_reports = skb->data[0];
ev = (void *) &skb->data[1];
hci_dev_lock(hdev);
hci_add_adv_entry(hdev, ev);
while (--num_reports) {
ev = (void *) (ev->data + ev->length + 1);
hci_add_adv_entry(hdev, ev);
}
hci_dev_unlock(hdev);
}
static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_ev_le_ltk_req *ev = (void *) skb->data;
struct hci_cp_le_ltk_reply cp;
struct hci_cp_le_ltk_neg_reply neg;
struct hci_conn *conn;
struct link_key *ltk;
BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn == NULL)
goto not_found;
ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
if (ltk == NULL)
goto not_found;
memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
cp.handle = cpu_to_le16(conn->handle);
conn->pin_length = ltk->pin_len;
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
hci_dev_unlock(hdev);
return;
not_found:
neg.handle = ev->handle;
hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
hci_dev_unlock(hdev);
}
static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_meta *le_ev = (void *) skb->data;
skb_pull(skb, sizeof(*le_ev));
switch (le_ev->subevent) {
case HCI_EV_LE_CONN_COMPLETE:
hci_le_conn_complete_evt(hdev, skb);
break;
case HCI_EV_LE_ADVERTISING_REPORT:
hci_le_adv_report_evt(hdev, skb);
break;
case HCI_EV_LE_LTK_REQ:
hci_le_ltk_request_evt(hdev, skb);
break;
default:
break;
}
}
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
__u8 event = hdr->evt;
skb_pull(skb, HCI_EVENT_HDR_SIZE);
switch (event) {
case HCI_EV_INQUIRY_COMPLETE:
hci_inquiry_complete_evt(hdev, skb);
break;
case HCI_EV_INQUIRY_RESULT:
hci_inquiry_result_evt(hdev, skb);
break;
case HCI_EV_CONN_COMPLETE:
hci_conn_complete_evt(hdev, skb);
break;
case HCI_EV_CONN_REQUEST:
hci_conn_request_evt(hdev, skb);
break;
case HCI_EV_DISCONN_COMPLETE:
hci_disconn_complete_evt(hdev, skb);
break;
case HCI_EV_AUTH_COMPLETE:
hci_auth_complete_evt(hdev, skb);
break;
case HCI_EV_REMOTE_NAME:
hci_remote_name_evt(hdev, skb);
break;
case HCI_EV_ENCRYPT_CHANGE:
hci_encrypt_change_evt(hdev, skb);
break;
case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
hci_change_link_key_complete_evt(hdev, skb);
break;
case HCI_EV_REMOTE_FEATURES:
hci_remote_features_evt(hdev, skb);
break;
case HCI_EV_REMOTE_VERSION:
hci_remote_version_evt(hdev, skb);
break;
case HCI_EV_QOS_SETUP_COMPLETE:
hci_qos_setup_complete_evt(hdev, skb);
break;
case HCI_EV_CMD_COMPLETE:
hci_cmd_complete_evt(hdev, skb);
break;
case HCI_EV_CMD_STATUS:
hci_cmd_status_evt(hdev, skb);
break;
case HCI_EV_ROLE_CHANGE:
hci_role_change_evt(hdev, skb);
break;
case HCI_EV_NUM_COMP_PKTS:
hci_num_comp_pkts_evt(hdev, skb);
break;
case HCI_EV_MODE_CHANGE:
hci_mode_change_evt(hdev, skb);
break;
case HCI_EV_PIN_CODE_REQ:
hci_pin_code_request_evt(hdev, skb);
break;
case HCI_EV_LINK_KEY_REQ:
hci_link_key_request_evt(hdev, skb);
break;
case HCI_EV_LINK_KEY_NOTIFY:
hci_link_key_notify_evt(hdev, skb);
break;
case HCI_EV_CLOCK_OFFSET:
hci_clock_offset_evt(hdev, skb);
break;
case HCI_EV_PKT_TYPE_CHANGE:
hci_pkt_type_change_evt(hdev, skb);
break;
case HCI_EV_PSCAN_REP_MODE:
hci_pscan_rep_mode_evt(hdev, skb);
break;
case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
hci_inquiry_result_with_rssi_evt(hdev, skb);
break;
case HCI_EV_REMOTE_EXT_FEATURES:
hci_remote_ext_features_evt(hdev, skb);
break;
case HCI_EV_SYNC_CONN_COMPLETE:
hci_sync_conn_complete_evt(hdev, skb);
break;
case HCI_EV_SYNC_CONN_CHANGED:
hci_sync_conn_changed_evt(hdev, skb);
break;
case HCI_EV_SNIFF_SUBRATE:
hci_sniff_subrate_evt(hdev, skb);
break;
case HCI_EV_EXTENDED_INQUIRY_RESULT:
hci_extended_inquiry_result_evt(hdev, skb);
break;
case HCI_EV_IO_CAPA_REQUEST:
hci_io_capa_request_evt(hdev, skb);
break;
case HCI_EV_IO_CAPA_REPLY:
hci_io_capa_reply_evt(hdev, skb);
break;
case HCI_EV_USER_CONFIRM_REQUEST:
hci_user_confirm_request_evt(hdev, skb);
break;
case HCI_EV_SIMPLE_PAIR_COMPLETE:
hci_simple_pair_complete_evt(hdev, skb);
break;
case HCI_EV_REMOTE_HOST_FEATURES:
hci_remote_host_features_evt(hdev, skb);
break;
case HCI_EV_LE_META:
hci_le_meta_evt(hdev, skb);
break;
case HCI_EV_REMOTE_OOB_DATA_REQUEST:
hci_remote_oob_data_request_evt(hdev, skb);
break;
default:
BT_DBG("%s event 0x%x", hdev->name, event);
break;
}
kfree_skb(skb);
hdev->stat.evt_rx++;
}
/* Generate internal stack event */
void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
{
struct hci_event_hdr *hdr;
struct hci_ev_stack_internal *ev;
struct sk_buff *skb;
skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
if (!skb)
return;
hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
hdr->evt = HCI_EV_STACK_INTERNAL;
hdr->plen = sizeof(*ev) + dlen;
ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
ev->type = type;
memcpy(ev->data, data, dlen);
bt_cb(skb)->incoming = 1;
__net_timestamp(skb);
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
skb->dev = (void *) hdev;
hci_send_to_sock(hdev, skb, NULL);
kfree_skb(skb);
}
module_param(enable_le, bool, 0444);
MODULE_PARM_DESC(enable_le, "Enable LE support");
| gpl-2.0 |
klquicksall/HTC-MSM-8X60-ICS | arch/arm/mm/dma-mapping.c | 558 | 16893 | /*
* linux/arch/arm/mm/dma-mapping.c
*
* Copyright (C) 2000-2004 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* DMA uncached mapping support.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <asm/memory.h>
#include <asm/highmem.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/sizes.h>
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = ISA_DMA_THRESHOLD;
if (dev) {
mask = dev->coherent_dma_mask;
/*
* Sanity check the DMA mask - it must be non-zero, and
* must be able to be satisfied by a DMA allocation.
*/
if (mask == 0) {
dev_warn(dev, "coherent DMA mask is unset\n");
return 0;
}
if ((~mask) & ISA_DMA_THRESHOLD) {
dev_warn(dev, "coherent DMA mask %#llx is smaller "
"than system GFP_DMA mask %#llx\n",
mask, (unsigned long long)ISA_DMA_THRESHOLD);
return 0;
}
}
return mask;
}
/*
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
*/
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
void *ptr;
u64 mask = get_coherent_dma_mask(dev);
#ifdef CONFIG_DMA_API_DEBUG
u64 limit = (mask + 1) & ~mask;
if (limit && size >= limit) {
dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
size, mask);
return NULL;
}
#endif
if (!mask)
return NULL;
if (mask < 0xffffffffULL)
gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
if (!page)
return NULL;
/*
* Now split the huge page and free the excess pages
*/
split_page(page, order);
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
ptr = page_address(page);
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
return page;
}
/*
* Free a DMA buffer. 'size' must be page aligned.
*/
static void __dma_free_buffer(struct page *page, size_t size)
{
struct page *e = page + (size >> PAGE_SHIFT);
while (page < e) {
__free_page(page);
page++;
}
}
#ifdef CONFIG_MMU
/* Sanity check size */
#if (CONSISTENT_DMA_SIZE % SZ_2M)
#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
#endif
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
*/
static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
#include "vmregion.h"
static struct arm_vmregion_head consistent_head = {
.vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
.vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
.vm_start = CONSISTENT_BASE,
.vm_end = CONSISTENT_END,
};
#ifdef CONFIG_HUGETLB_PAGE
#error ARM Coherent DMA allocator does not (yet) support huge TLB
#endif
/*
* Initialise the consistent memory allocation.
*/
static int __init consistent_init(void)
{
int ret = 0;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i = 0;
u32 base = CONSISTENT_BASE;
do {
pgd = pgd_offset(&init_mm, base);
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
printk(KERN_ERR "%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
printk(KERN_ERR "%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
WARN_ON(!pmd_none(*pmd));
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
printk(KERN_ERR "%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
consistent_pte[i++] = pte;
base += (1 << PGDIR_SHIFT);
} while (base < CONSISTENT_END);
return ret;
}
core_initcall(consistent_init);
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
{
struct arm_vmregion *c;
size_t align;
int bit;
if (!consistent_pte[0]) {
printk(KERN_ERR "%s: not initialised\n", __func__);
dump_stack();
return NULL;
}
/*
* Align the virtual region allocation - maximum alignment is
* a section size, minimum is a page size. This helps reduce
* fragmentation of the DMA space, and also prevents allocations
* smaller than a section from crossing a section boundary.
*/
bit = fls(size - 1);
if (bit > SECTION_SHIFT)
bit = SECTION_SHIFT;
align = 1 << bit;
/*
* Allocate a virtual address in the consistent mapping region.
*/
c = arm_vmregion_alloc(&consistent_head, align, size,
gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
if (c) {
pte_t *pte;
int idx = CONSISTENT_PTE_INDEX(c->vm_start);
u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
pte = consistent_pte[idx] + off;
c->vm_pages = page;
do {
BUG_ON(!pte_none(*pte));
set_pte_ext(pte, mk_pte(page, prot), 0);
page++;
pte++;
off++;
if (off >= PTRS_PER_PTE) {
off = 0;
pte = consistent_pte[++idx];
}
} while (size -= PAGE_SIZE);
dsb();
return (void *)c->vm_start;
}
return NULL;
}
static void __dma_free_remap(void *cpu_addr, size_t size)
{
struct arm_vmregion *c;
unsigned long addr;
pte_t *ptep;
int idx;
u32 off;
c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
if (!c) {
printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
return;
}
if ((c->vm_end - c->vm_start) != size) {
printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
}
idx = CONSISTENT_PTE_INDEX(c->vm_start);
off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
ptep = consistent_pte[idx] + off;
addr = c->vm_start;
do {
pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
ptep++;
addr += PAGE_SIZE;
off++;
if (off >= PTRS_PER_PTE) {
off = 0;
ptep = consistent_pte[++idx];
}
if (pte_none(pte) || !pte_present(pte))
printk(KERN_CRIT "%s: bad page in kernel page table\n",
__func__);
} while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end);
arm_vmregion_free(&consistent_head, c);
}
#else /* !CONFIG_MMU */
#define __dma_alloc_remap(page, size, gfp, prot) page_address(page)
#define __dma_free_remap(addr, size) do { } while (0)
#endif /* CONFIG_MMU */
static void *
__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
pgprot_t prot)
{
struct page *page;
void *addr;
*handle = ~0;
size = PAGE_ALIGN(size);
page = __dma_alloc_buffer(dev, size, gfp);
if (!page)
return NULL;
if (!arch_is_coherent())
addr = __dma_alloc_remap(page, size, gfp, prot);
else
addr = page_address(page);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
else
__dma_free_buffer(page, size);
return addr;
}
/*
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
return __dma_alloc(dev, size, handle, gfp,
pgprot_dmacoherent(pgprot_kernel));
}
EXPORT_SYMBOL(dma_alloc_coherent);
/*
* Allocate a writecombining region, in much the same way as
* dma_alloc_coherent above.
*/
void *
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
return __dma_alloc(dev, size, handle, gfp,
pgprot_writecombine(pgprot_kernel));
}
EXPORT_SYMBOL(dma_alloc_writecombine);
static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
unsigned long user_size, kern_size;
struct arm_vmregion *c;
user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
if (c) {
unsigned long off = vma->vm_pgoff;
kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
if (off < kern_size &&
user_size <= (kern_size - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
page_to_pfn(c->vm_pages) + off,
user_size << PAGE_SHIFT,
vma->vm_page_prot);
}
}
#endif /* CONFIG_MMU */
return ret;
}
int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
}
EXPORT_SYMBOL(dma_mmap_coherent);
int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
}
EXPORT_SYMBOL(dma_mmap_writecombine);
/*
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
WARN_ON(irqs_disabled());
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;
size = PAGE_ALIGN(size);
if (!arch_is_coherent())
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
}
EXPORT_SYMBOL(dma_free_coherent);
/*
* Make an area consistent for devices.
* Note: Drivers should NOT use this function directly, as it will break
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
#ifdef CONFIG_OUTER_CACHE
unsigned long paddr;
BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
#endif
dmac_map_area(kaddr, size, dir);
#ifdef CONFIG_OUTER_CACHE
paddr = __pa(kaddr);
if (dir == DMA_FROM_DEVICE) {
outer_inv_range(paddr, paddr + size);
} else {
outer_clean_range(paddr, paddr + size);
}
#endif
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
EXPORT_SYMBOL(___dma_single_cpu_to_dev);
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
#ifdef CONFIG_OUTER_CACHE
BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
/* FIXME: non-speculating: not required */
/* don't bother invalidating if DMA to device */
if (dir != DMA_TO_DEVICE) {
unsigned long paddr = __pa(kaddr);
outer_inv_range(paddr, paddr + size);
}
#endif
dmac_unmap_area(kaddr, size, dir);
}
EXPORT_SYMBOL(___dma_single_dev_to_cpu);
static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
void (*op)(const void *, size_t, int))
{
/*
* A single sg entry may refer to multiple physically contiguous
* pages. But we still need to process highmem pages individually.
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
size_t left = size;
do {
size_t len = left;
void *vaddr;
if (PageHighMem(page)) {
if (len + offset > PAGE_SIZE) {
if (offset >= PAGE_SIZE) {
page += offset / PAGE_SIZE;
offset %= PAGE_SIZE;
}
len = PAGE_SIZE - offset;
}
vaddr = kmap_high_get(page);
if (vaddr) {
vaddr += offset;
op(vaddr, len, dir);
kunmap_high(page);
} else if (cache_is_vipt()) {
/* unmapped pages might still be cached */
vaddr = kmap_atomic(page);
op(vaddr + offset, len, dir);
kunmap_atomic(vaddr);
}
} else {
vaddr = page_address(page) + offset;
op(vaddr, len, dir);
}
offset = 0;
page++;
left -= len;
} while (left);
}
void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
dma_cache_maint_page(page, off, size, dir, dmac_map_area);
paddr = page_to_phys(page) + off;
if (dir == DMA_FROM_DEVICE) {
outer_inv_range(paddr, paddr + size);
} else {
outer_clean_range(paddr, paddr + size);
}
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
EXPORT_SYMBOL(___dma_page_cpu_to_dev);
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr = page_to_phys(page) + off;
/* FIXME: non-speculating: not required */
/* don't bother invalidating if DMA to device */
if (dir != DMA_TO_DEVICE)
outer_inv_range(paddr, paddr + size);
dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
/*
* Mark the D-cache clean for this page to avoid extra flushing.
*/
if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
set_bit(PG_dcache_clean, &page->flags);
}
EXPORT_SYMBOL(___dma_page_dev_to_cpu);
/**
* dma_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* This is the scatter-gather version of the dma_map_single interface.
* Here the scatter gather list elements are each tagged with the
* appropriate dma address and length. They are obtained via
* sg_dma_{address,length}.
*
* Device ownership issues as mentioned for dma_map_single are the same
* here.
*/
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
struct scatterlist *s;
int i, j;
BUG_ON(!valid_dma_direction(dir));
for_each_sg(sg, s, nents, i) {
s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
if (dma_mapping_error(dev, s->dma_address))
goto bad_mapping;
}
debug_dma_map_sg(dev, sg, nents, nents, dir);
return nents;
bad_mapping:
for_each_sg(sg, s, i, j)
__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
return 0;
}
EXPORT_SYMBOL(dma_map_sg);
/**
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*
* Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single().
*/
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
debug_dma_unmap_sg(dev, sg, nents, dir);
for_each_sg(sg, s, nents, i)
__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
}
EXPORT_SYMBOL(dma_unmap_sg);
/**
* dma_sync_sg_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
sg_dma_len(s), dir))
continue;
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
/**
* dma_sync_sg_for_device
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
sg_dma_len(s), dir))
continue;
__dma_page_cpu_to_dev(sg_page(s), s->offset,
s->length, dir);
}
debug_dma_sync_sg_for_device(dev, sg, nents, dir);
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
static int __init dma_debug_do_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
fs_initcall(dma_debug_do_init);
| gpl-2.0 |
freebsdmax/gsmart1315_kernel | drivers/hid/hid-tmff.c | 558 | 7194 | /*
* Force feedback support for various HID compliant devices by ThrustMaster:
* ThrustMaster FireStorm Dual Power 2
* and possibly others whose device ids haven't been added.
*
* Modified to support ThrustMaster devices by Zinx Verituse
* on 2003-01-25 from the Logitech force feedback driver,
* which is by Johann Deneux.
*
* Copyright (c) 2003 Zinx Verituse <zinx@epicsol.org>
* Copyright (c) 2002 Johann Deneux
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/usb.h>
#include "hid-ids.h"
static const signed short ff_rumble[] = {
FF_RUMBLE,
-1
};
static const signed short ff_joystick[] = {
FF_CONSTANT,
-1
};
#ifdef CONFIG_THRUSTMASTER_FF
#include "usbhid/usbhid.h"
/* Usages for thrustmaster devices I know about */
#define THRUSTMASTER_USAGE_FF (HID_UP_GENDESK | 0xbb)
struct tmff_device {
struct hid_report *report;
struct hid_field *ff_field;
};
/* Changes values from 0 to 0xffff into values from minimum to maximum */
static inline int tmff_scale_u16(unsigned int in, int minimum, int maximum)
{
int ret;
ret = (in * (maximum - minimum) / 0xffff) + minimum;
if (ret < minimum)
return minimum;
if (ret > maximum)
return maximum;
return ret;
}
/* Changes values from -0x80 to 0x7f into values from minimum to maximum */
static inline int tmff_scale_s8(int in, int minimum, int maximum)
{
int ret;
ret = (((in + 0x80) * (maximum - minimum)) / 0xff) + minimum;
if (ret < minimum)
return minimum;
if (ret > maximum)
return maximum;
return ret;
}
static int tmff_play(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct hid_device *hid = input_get_drvdata(dev);
struct tmff_device *tmff = data;
struct hid_field *ff_field = tmff->ff_field;
int x, y;
int left, right; /* Rumbling */
switch (effect->type) {
case FF_CONSTANT:
x = tmff_scale_s8(effect->u.ramp.start_level,
ff_field->logical_minimum,
ff_field->logical_maximum);
y = tmff_scale_s8(effect->u.ramp.end_level,
ff_field->logical_minimum,
ff_field->logical_maximum);
dbg_hid("(x, y)=(%04x, %04x)\n", x, y);
ff_field->value[0] = x;
ff_field->value[1] = y;
usbhid_submit_report(hid, tmff->report, USB_DIR_OUT);
break;
case FF_RUMBLE:
left = tmff_scale_u16(effect->u.rumble.weak_magnitude,
ff_field->logical_minimum,
ff_field->logical_maximum);
right = tmff_scale_u16(effect->u.rumble.strong_magnitude,
ff_field->logical_minimum,
ff_field->logical_maximum);
dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
ff_field->value[0] = left;
ff_field->value[1] = right;
usbhid_submit_report(hid, tmff->report, USB_DIR_OUT);
break;
}
return 0;
}
static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
{
struct tmff_device *tmff;
struct hid_report *report;
struct list_head *report_list;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct input_dev *input_dev = hidinput->input;
int error;
int i;
tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
if (!tmff)
return -ENOMEM;
/* Find the report to use */
report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
list_for_each_entry(report, report_list, list) {
int fieldnum;
for (fieldnum = 0; fieldnum < report->maxfield; ++fieldnum) {
struct hid_field *field = report->field[fieldnum];
if (field->maxusage <= 0)
continue;
switch (field->usage[0].hid) {
case THRUSTMASTER_USAGE_FF:
if (field->report_count < 2) {
dev_warn(&hid->dev, "ignoring FF field "
"with report_count < 2\n");
continue;
}
if (field->logical_maximum ==
field->logical_minimum) {
dev_warn(&hid->dev, "ignoring FF field "
"with logical_maximum "
"== logical_minimum\n");
continue;
}
if (tmff->report && tmff->report != report) {
dev_warn(&hid->dev, "ignoring FF field "
"in other report\n");
continue;
}
if (tmff->ff_field && tmff->ff_field != field) {
dev_warn(&hid->dev, "ignoring "
"duplicate FF field\n");
continue;
}
tmff->report = report;
tmff->ff_field = field;
for (i = 0; ff_bits[i] >= 0; i++)
set_bit(ff_bits[i], input_dev->ffbit);
break;
default:
dev_warn(&hid->dev, "ignoring unknown output "
"usage %08x\n",
field->usage[0].hid);
continue;
}
}
}
if (!tmff->report) {
dev_err(&hid->dev, "can't find FF field in output reports\n");
error = -ENODEV;
goto fail;
}
error = input_ff_create_memless(input_dev, tmff, tmff_play);
if (error)
goto fail;
dev_info(&hid->dev, "force feedback for ThrustMaster devices by Zinx "
"Verituse <zinx@epicsol.org>");
return 0;
fail:
kfree(tmff);
return error;
}
#else
static inline int tmff_init(struct hid_device *hid, const signed short *ff_bits)
{
return 0;
}
#endif
static int tm_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
ret = hid_parse(hdev);
if (ret) {
dev_err(&hdev->dev, "parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (ret) {
dev_err(&hdev->dev, "hw start failed\n");
goto err;
}
tmff_init(hdev, (void *)id->driver_data);
return 0;
err:
return ret;
}
static const struct hid_device_id tm_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300),
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651), /* FGT Rumble Force Wheel */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */
.driver_data = (unsigned long)ff_joystick },
{ }
};
MODULE_DEVICE_TABLE(hid, tm_devices);
static struct hid_driver tm_driver = {
.name = "thrustmaster",
.id_table = tm_devices,
.probe = tm_probe,
};
static int __init tm_init(void)
{
return hid_register_driver(&tm_driver);
}
static void __exit tm_exit(void)
{
hid_unregister_driver(&tm_driver);
}
module_init(tm_init);
module_exit(tm_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
leemchaehoon/linux_m | drivers/platform/goldfish/goldfish_pipe.c | 814 | 17723 | /*
* Copyright (C) 2011 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2013 Intel, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/* This source file contains the implementation of a special device driver
* that intends to provide a *very* fast communication channel between the
* guest system and the QEMU emulator.
*
* Usage from the guest is simply the following (error handling simplified):
*
* int fd = open("/dev/qemu_pipe",O_RDWR);
* .... write() or read() through the pipe.
*
* This driver doesn't deal with the exact protocol used during the session.
* It is intended to be as simple as something like:
*
* // do this _just_ after opening the fd to connect to a specific
* // emulator service.
* const char* msg = "<pipename>";
* if (write(fd, msg, strlen(msg)+1) < 0) {
* ... could not connect to <pipename> service
* close(fd);
* }
*
* // after this, simply read() and write() to communicate with the
* // service. Exact protocol details left as an exercise to the reader.
*
* This driver is very fast because it doesn't copy any data through
* intermediate buffers, since the emulator is capable of translating
* guest user addresses into host ones.
*
* Note that we must however ensure that each user page involved in the
* exchange is properly mapped during a transfer.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/goldfish.h>
/*
* IMPORTANT: The following constants must match the ones used and defined
* in external/qemu/hw/goldfish_pipe.c in the Android source tree.
*/
/* pipe device registers */
#define PIPE_REG_COMMAND 0x00 /* write: value = command */
#define PIPE_REG_STATUS 0x04 /* read */
#define PIPE_REG_CHANNEL 0x08 /* read/write: channel id */
#define PIPE_REG_CHANNEL_HIGH 0x30 /* read/write: channel id */
#define PIPE_REG_SIZE 0x0c /* read/write: buffer size */
#define PIPE_REG_ADDRESS 0x10 /* write: physical address */
#define PIPE_REG_ADDRESS_HIGH 0x34 /* write: physical address */
#define PIPE_REG_WAKES 0x14 /* read: wake flags */
#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
#define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
/* list of commands for PIPE_REG_COMMAND */
#define CMD_OPEN 1 /* open new channel */
#define CMD_CLOSE 2 /* close channel (from guest) */
#define CMD_POLL 3 /* poll read/write status */
/* List of bitflags returned in status of CMD_POLL command */
#define PIPE_POLL_IN (1 << 0)
#define PIPE_POLL_OUT (1 << 1)
#define PIPE_POLL_HUP (1 << 2)
/* The following commands are related to write operations */
#define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
#define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
is possible */
/* The following commands are related to read operations, they must be
* listed in the same order than the corresponding write ones, since we
* will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
* in goldfish_pipe_read_write() below.
*/
#define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
#define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
* is possible */
/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
#define PIPE_ERROR_INVAL -1
#define PIPE_ERROR_AGAIN -2
#define PIPE_ERROR_NOMEM -3
#define PIPE_ERROR_IO -4
/* Bit-flags used to signal events from the emulator */
#define PIPE_WAKE_CLOSED (1 << 0) /* emulator closed pipe */
#define PIPE_WAKE_READ (1 << 1) /* pipe can now be read from */
#define PIPE_WAKE_WRITE (1 << 2) /* pipe can now be written to */
struct access_params {
unsigned long channel;
u32 size;
unsigned long address;
u32 cmd;
u32 result;
/* reserved for future extension */
u32 flags;
};
/* The global driver data. Holds a reference to the i/o page used to
* communicate with the emulator, and a wake queue for blocked tasks
* waiting to be awoken.
*/
struct goldfish_pipe_dev {
spinlock_t lock;
unsigned char __iomem *base;
struct access_params *aps;
int irq;
};
static struct goldfish_pipe_dev pipe_dev[1];
/* This data type models a given pipe instance */
struct goldfish_pipe {
struct goldfish_pipe_dev *dev;
struct mutex lock;
unsigned long flags;
wait_queue_head_t wake_queue;
};
/* Bit flags for the 'flags' field */
enum {
BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
};
static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
{
unsigned long flags;
u32 status;
struct goldfish_pipe_dev *dev = pipe->dev;
spin_lock_irqsave(&dev->lock, flags);
gf_write64((u64)(unsigned long)pipe, dev->base + PIPE_REG_CHANNEL,
dev->base + PIPE_REG_CHANNEL_HIGH);
writel(cmd, dev->base + PIPE_REG_COMMAND);
status = readl(dev->base + PIPE_REG_STATUS);
spin_unlock_irqrestore(&dev->lock, flags);
return status;
}
static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
{
unsigned long flags;
struct goldfish_pipe_dev *dev = pipe->dev;
spin_lock_irqsave(&dev->lock, flags);
gf_write64((u64)(unsigned long)pipe, dev->base + PIPE_REG_CHANNEL,
dev->base + PIPE_REG_CHANNEL_HIGH);
writel(cmd, dev->base + PIPE_REG_COMMAND);
spin_unlock_irqrestore(&dev->lock, flags);
}
/* This function converts an error code returned by the emulator through
* the PIPE_REG_STATUS i/o register into a valid negative errno value.
*/
static int goldfish_pipe_error_convert(int status)
{
switch (status) {
case PIPE_ERROR_AGAIN:
return -EAGAIN;
case PIPE_ERROR_NOMEM:
return -ENOMEM;
case PIPE_ERROR_IO:
return -EIO;
default:
return -EINVAL;
}
}
/*
* Notice: QEMU will return 0 for un-known register access, indicating
* param_acess is supported or not
*/
static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
struct access_params *aps)
{
u32 aph, apl;
u64 paddr;
aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
paddr = ((u64)aph << 32) | apl;
if (paddr != (__pa(aps)))
return 0;
return 1;
}
/* 0 on success */
static int setup_access_params_addr(struct platform_device *pdev,
struct goldfish_pipe_dev *dev)
{
u64 paddr;
struct access_params *aps;
aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
if (!aps)
return -1;
/* FIXME */
paddr = __pa(aps);
writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
if (valid_batchbuffer_addr(dev, aps)) {
dev->aps = aps;
return 0;
} else
return -1;
}
/* A value that will not be set by qemu emulator */
#define INITIAL_BATCH_RESULT (0xdeadbeaf)
static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
unsigned long address, unsigned long avail,
struct goldfish_pipe *pipe, int *status)
{
struct access_params *aps = dev->aps;
if (aps == NULL)
return -1;
aps->result = INITIAL_BATCH_RESULT;
aps->channel = (unsigned long)pipe;
aps->size = avail;
aps->address = address;
aps->cmd = cmd;
writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
/*
* If the aps->result has not changed, that means
* that the batch command failed
*/
if (aps->result == INITIAL_BATCH_RESULT)
return -1;
*status = aps->result;
return 0;
}
/* This function is used for both reading from and writing to a given
* pipe.
*/
static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
size_t bufflen, int is_write)
{
unsigned long irq_flags;
struct goldfish_pipe *pipe = filp->private_data;
struct goldfish_pipe_dev *dev = pipe->dev;
const int cmd_offset = is_write ? 0
: (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
unsigned long address, address_end;
int ret = 0;
/* If the emulator already closed the pipe, no need to go further */
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
return -EIO;
/* Null reads or writes succeeds */
if (unlikely(bufflen) == 0)
return 0;
/* Check the buffer range for access */
if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
buffer, bufflen))
return -EFAULT;
/* Serialize access to the pipe */
if (mutex_lock_interruptible(&pipe->lock))
return -ERESTARTSYS;
address = (unsigned long)(void *)buffer;
address_end = address + bufflen;
while (address < address_end) {
unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
unsigned long next = page_end < address_end ? page_end
: address_end;
unsigned long avail = next - address;
int status, wakeBit;
/* Ensure that the corresponding page is properly mapped */
/* FIXME: this isn't safe or sufficient - use get_user_pages */
if (is_write) {
char c;
/* Ensure that the page is mapped and readable */
if (__get_user(c, (char __user *)address)) {
if (!ret)
ret = -EFAULT;
break;
}
} else {
/* Ensure that the page is mapped and writable */
if (__put_user(0, (char __user *)address)) {
if (!ret)
ret = -EFAULT;
break;
}
}
/* Now, try to transfer the bytes in the current page */
spin_lock_irqsave(&dev->lock, irq_flags);
if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
address, avail, pipe, &status)) {
gf_write64((u64)(unsigned long)pipe,
dev->base + PIPE_REG_CHANNEL,
dev->base + PIPE_REG_CHANNEL_HIGH);
writel(avail, dev->base + PIPE_REG_SIZE);
gf_write64(address, dev->base + PIPE_REG_ADDRESS,
dev->base + PIPE_REG_ADDRESS_HIGH);
writel(CMD_WRITE_BUFFER + cmd_offset,
dev->base + PIPE_REG_COMMAND);
status = readl(dev->base + PIPE_REG_STATUS);
}
spin_unlock_irqrestore(&dev->lock, irq_flags);
if (status > 0) { /* Correct transfer */
ret += status;
address += status;
continue;
}
if (status == 0) /* EOF */
break;
/* An error occured. If we already transfered stuff, just
* return with its count. We expect the next call to return
* an error code */
if (ret > 0)
break;
/* If the error is not PIPE_ERROR_AGAIN, or if we are not in
* non-blocking mode, just return the error code.
*/
if (status != PIPE_ERROR_AGAIN ||
(filp->f_flags & O_NONBLOCK) != 0) {
ret = goldfish_pipe_error_convert(status);
break;
}
/* We will have to wait until more data/space is available.
* First, mark the pipe as waiting for a specific wake signal.
*/
wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
set_bit(wakeBit, &pipe->flags);
/* Tell the emulator we're going to wait for a wake event */
goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
/* Unlock the pipe, then wait for the wake signal */
mutex_unlock(&pipe->lock);
while (test_bit(wakeBit, &pipe->flags)) {
if (wait_event_interruptible(
pipe->wake_queue,
!test_bit(wakeBit, &pipe->flags)))
return -ERESTARTSYS;
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
return -EIO;
}
/* Try to re-acquire the lock */
if (mutex_lock_interruptible(&pipe->lock))
return -ERESTARTSYS;
/* Try the transfer again */
continue;
}
mutex_unlock(&pipe->lock);
return ret;
}
static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
size_t bufflen, loff_t *ppos)
{
return goldfish_pipe_read_write(filp, buffer, bufflen, 0);
}
static ssize_t goldfish_pipe_write(struct file *filp,
const char __user *buffer, size_t bufflen,
loff_t *ppos)
{
return goldfish_pipe_read_write(filp, (char __user *)buffer,
bufflen, 1);
}
static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
{
struct goldfish_pipe *pipe = filp->private_data;
unsigned int mask = 0;
int status;
mutex_lock(&pipe->lock);
poll_wait(filp, &pipe->wake_queue, wait);
status = goldfish_cmd_status(pipe, CMD_POLL);
mutex_unlock(&pipe->lock);
if (status & PIPE_POLL_IN)
mask |= POLLIN | POLLRDNORM;
if (status & PIPE_POLL_OUT)
mask |= POLLOUT | POLLWRNORM;
if (status & PIPE_POLL_HUP)
mask |= POLLHUP;
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
mask |= POLLERR;
return mask;
}
static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
{
struct goldfish_pipe_dev *dev = dev_id;
unsigned long irq_flags;
int count = 0;
/* We're going to read from the emulator a list of (channel,flags)
* pairs corresponding to the wake events that occured on each
* blocked pipe (i.e. channel).
*/
spin_lock_irqsave(&dev->lock, irq_flags);
for (;;) {
/* First read the channel, 0 means the end of the list */
struct goldfish_pipe *pipe;
unsigned long wakes;
unsigned long channel = 0;
#ifdef CONFIG_64BIT
channel = (u64)readl(dev->base + PIPE_REG_CHANNEL_HIGH) << 32;
if (channel == 0)
break;
#endif
channel |= readl(dev->base + PIPE_REG_CHANNEL);
if (channel == 0)
break;
/* Convert channel to struct pipe pointer + read wake flags */
wakes = readl(dev->base + PIPE_REG_WAKES);
pipe = (struct goldfish_pipe *)(ptrdiff_t)channel;
/* Did the emulator just closed a pipe? */
if (wakes & PIPE_WAKE_CLOSED) {
set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
}
if (wakes & PIPE_WAKE_READ)
clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
if (wakes & PIPE_WAKE_WRITE)
clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
wake_up_interruptible(&pipe->wake_queue);
count++;
}
spin_unlock_irqrestore(&dev->lock, irq_flags);
return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
}
/**
* goldfish_pipe_open - open a channel to the AVD
* @inode: inode of device
* @file: file struct of opener
*
* Create a new pipe link between the emulator and the use application.
* Each new request produces a new pipe.
*
* Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
* right now so this is fine. A move to 64bit will need this addressing
*/
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
struct goldfish_pipe *pipe;
struct goldfish_pipe_dev *dev = pipe_dev;
int32_t status;
/* Allocate new pipe kernel object */
pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
if (pipe == NULL)
return -ENOMEM;
pipe->dev = dev;
mutex_init(&pipe->lock);
init_waitqueue_head(&pipe->wake_queue);
/*
* Now, tell the emulator we're opening a new pipe. We use the
* pipe object's address as the channel identifier for simplicity.
*/
status = goldfish_cmd_status(pipe, CMD_OPEN);
if (status < 0) {
kfree(pipe);
return status;
}
/* All is done, save the pipe into the file's private data field */
file->private_data = pipe;
return 0;
}
static int goldfish_pipe_release(struct inode *inode, struct file *filp)
{
struct goldfish_pipe *pipe = filp->private_data;
/* The guest is closing the channel, so tell the emulator right now */
goldfish_cmd(pipe, CMD_CLOSE);
kfree(pipe);
filp->private_data = NULL;
return 0;
}
static const struct file_operations goldfish_pipe_fops = {
.owner = THIS_MODULE,
.read = goldfish_pipe_read,
.write = goldfish_pipe_write,
.poll = goldfish_pipe_poll,
.open = goldfish_pipe_open,
.release = goldfish_pipe_release,
};
static struct miscdevice goldfish_pipe_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "goldfish_pipe",
.fops = &goldfish_pipe_fops,
};
static int goldfish_pipe_probe(struct platform_device *pdev)
{
int err;
struct resource *r;
struct goldfish_pipe_dev *dev = pipe_dev;
/* not thread safe, but this should not happen */
WARN_ON(dev->base != NULL);
spin_lock_init(&dev->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL || resource_size(r) < PAGE_SIZE) {
dev_err(&pdev->dev, "can't allocate i/o page\n");
return -EINVAL;
}
dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
if (dev->base == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
return -EINVAL;
}
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (r == NULL) {
err = -EINVAL;
goto error;
}
dev->irq = r->start;
err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
IRQF_SHARED, "goldfish_pipe", dev);
if (err) {
dev_err(&pdev->dev, "unable to allocate IRQ\n");
goto error;
}
err = misc_register(&goldfish_pipe_device);
if (err) {
dev_err(&pdev->dev, "unable to register device\n");
goto error;
}
setup_access_params_addr(pdev, dev);
return 0;
error:
dev->base = NULL;
return err;
}
static int goldfish_pipe_remove(struct platform_device *pdev)
{
struct goldfish_pipe_dev *dev = pipe_dev;
misc_deregister(&goldfish_pipe_device);
dev->base = NULL;
return 0;
}
static struct platform_driver goldfish_pipe = {
.probe = goldfish_pipe_probe,
.remove = goldfish_pipe_remove,
.driver = {
.name = "goldfish_pipe"
}
};
module_platform_driver(goldfish_pipe);
MODULE_AUTHOR("David Turner <digit@google.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Wren6991/linux | samples/bpf/tracex3_user.c | 814 | 2932 | /* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <stdbool.h>
#include <string.h>
#include <linux/bpf.h>
#include "libbpf.h"
#include "bpf_load.h"
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
#define SLOTS 100
static void clear_stats(int fd)
{
__u32 key;
__u64 value = 0;
for (key = 0; key < SLOTS; key++)
bpf_update_elem(fd, &key, &value, BPF_ANY);
}
const char *color[] = {
"\033[48;5;255m",
"\033[48;5;252m",
"\033[48;5;250m",
"\033[48;5;248m",
"\033[48;5;246m",
"\033[48;5;244m",
"\033[48;5;242m",
"\033[48;5;240m",
"\033[48;5;238m",
"\033[48;5;236m",
"\033[48;5;234m",
"\033[48;5;232m",
};
const int num_colors = ARRAY_SIZE(color);
const char nocolor[] = "\033[00m";
const char *sym[] = {
" ",
" ",
".",
".",
"*",
"*",
"o",
"o",
"O",
"O",
"#",
"#",
};
bool full_range = false;
bool text_only = false;
static void print_banner(void)
{
if (full_range)
printf("|1ns |10ns |100ns |1us |10us |100us"
" |1ms |10ms |100ms |1s |10s\n");
else
printf("|1us |10us |100us |1ms |10ms "
"|100ms |1s |10s\n");
}
static void print_hist(int fd)
{
__u32 key;
__u64 value;
__u64 cnt[SLOTS];
__u64 max_cnt = 0;
__u64 total_events = 0;
for (key = 0; key < SLOTS; key++) {
value = 0;
bpf_lookup_elem(fd, &key, &value);
cnt[key] = value;
total_events += value;
if (value > max_cnt)
max_cnt = value;
}
clear_stats(fd);
for (key = full_range ? 0 : 29; key < SLOTS; key++) {
int c = num_colors * cnt[key] / (max_cnt + 1);
if (text_only)
printf("%s", sym[c]);
else
printf("%s %s", color[c], nocolor);
}
printf(" # %lld\n", total_events);
}
int main(int ac, char **argv)
{
char filename[256];
int i;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
if (load_bpf_file(filename)) {
printf("%s", bpf_log_buf);
return 1;
}
for (i = 1; i < ac; i++) {
if (strcmp(argv[i], "-a") == 0) {
full_range = true;
} else if (strcmp(argv[i], "-t") == 0) {
text_only = true;
} else if (strcmp(argv[i], "-h") == 0) {
printf("Usage:\n"
" -a display wider latency range\n"
" -t text only\n");
return 1;
}
}
printf(" heatmap of IO latency\n");
if (text_only)
printf(" %s", sym[num_colors - 1]);
else
printf(" %s %s", color[num_colors - 1], nocolor);
printf(" - many events with this latency\n");
if (text_only)
printf(" %s", sym[0]);
else
printf(" %s %s", color[0], nocolor);
printf(" - few events\n");
for (i = 0; ; i++) {
if (i % 20 == 0)
print_banner();
print_hist(map_fd[1]);
sleep(2);
}
return 0;
}
| gpl-2.0 |
AndroidGX/SimpleGX-L-5.0.2_G901F | drivers/usb/phy/phy-samsung-usb2.c | 1838 | 12631 | /* linux/drivers/usb/phy/phy-samsung-usb2.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Praveen Paneri <p.paneri@samsung.com>
*
* Samsung USB2.0 PHY transceiver; talks to S3C HS OTG controller, EHCI-S5P and
* OHCI-EXYNOS controllers.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/usb/otg.h>
#include <linux/usb/samsung_usb_phy.h>
#include <linux/platform_data/samsung-usbphy.h>
#include "phy-samsung-usb.h"
static int samsung_usbphy_set_host(struct usb_otg *otg, struct usb_bus *host)
{
if (!otg)
return -ENODEV;
if (!otg->host)
otg->host = host;
return 0;
}
static bool exynos5_phyhost_is_on(void __iomem *regs)
{
u32 reg;
reg = readl(regs + EXYNOS5_PHY_HOST_CTRL0);
return !(reg & HOST_CTRL0_SIDDQ);
}
static void samsung_exynos5_usb2phy_enable(struct samsung_usbphy *sphy)
{
void __iomem *regs = sphy->regs;
u32 phyclk = sphy->ref_clk_freq;
u32 phyhost;
u32 phyotg;
u32 phyhsic;
u32 ehcictrl;
u32 ohcictrl;
/*
* phy_usage helps in keeping usage count for phy
* so that the first consumer enabling the phy is also
* the last consumer to disable it.
*/
atomic_inc(&sphy->phy_usage);
if (exynos5_phyhost_is_on(regs)) {
dev_info(sphy->dev, "Already power on PHY\n");
return;
}
/* Host configuration */
phyhost = readl(regs + EXYNOS5_PHY_HOST_CTRL0);
/* phy reference clock configuration */
phyhost &= ~HOST_CTRL0_FSEL_MASK;
phyhost |= HOST_CTRL0_FSEL(phyclk);
/* host phy reset */
phyhost &= ~(HOST_CTRL0_PHYSWRST |
HOST_CTRL0_PHYSWRSTALL |
HOST_CTRL0_SIDDQ |
/* Enable normal mode of operation */
HOST_CTRL0_FORCESUSPEND |
HOST_CTRL0_FORCESLEEP);
/* Link reset */
phyhost |= (HOST_CTRL0_LINKSWRST |
HOST_CTRL0_UTMISWRST |
/* COMMON Block configuration during suspend */
HOST_CTRL0_COMMONON_N);
writel(phyhost, regs + EXYNOS5_PHY_HOST_CTRL0);
udelay(10);
phyhost &= ~(HOST_CTRL0_LINKSWRST |
HOST_CTRL0_UTMISWRST);
writel(phyhost, regs + EXYNOS5_PHY_HOST_CTRL0);
/* OTG configuration */
phyotg = readl(regs + EXYNOS5_PHY_OTG_SYS);
/* phy reference clock configuration */
phyotg &= ~OTG_SYS_FSEL_MASK;
phyotg |= OTG_SYS_FSEL(phyclk);
/* Enable normal mode of operation */
phyotg &= ~(OTG_SYS_FORCESUSPEND |
OTG_SYS_SIDDQ_UOTG |
OTG_SYS_FORCESLEEP |
OTG_SYS_REFCLKSEL_MASK |
/* COMMON Block configuration during suspend */
OTG_SYS_COMMON_ON);
/* OTG phy & link reset */
phyotg |= (OTG_SYS_PHY0_SWRST |
OTG_SYS_LINKSWRST_UOTG |
OTG_SYS_PHYLINK_SWRESET |
OTG_SYS_OTGDISABLE |
/* Set phy refclk */
OTG_SYS_REFCLKSEL_CLKCORE);
writel(phyotg, regs + EXYNOS5_PHY_OTG_SYS);
udelay(10);
phyotg &= ~(OTG_SYS_PHY0_SWRST |
OTG_SYS_LINKSWRST_UOTG |
OTG_SYS_PHYLINK_SWRESET);
writel(phyotg, regs + EXYNOS5_PHY_OTG_SYS);
/* HSIC phy configuration */
phyhsic = (HSIC_CTRL_REFCLKDIV_12 |
HSIC_CTRL_REFCLKSEL |
HSIC_CTRL_PHYSWRST);
writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL1);
writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL2);
udelay(10);
phyhsic &= ~HSIC_CTRL_PHYSWRST;
writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL1);
writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL2);
udelay(80);
/* enable EHCI DMA burst */
ehcictrl = readl(regs + EXYNOS5_PHY_HOST_EHCICTRL);
ehcictrl |= (HOST_EHCICTRL_ENAINCRXALIGN |
HOST_EHCICTRL_ENAINCR4 |
HOST_EHCICTRL_ENAINCR8 |
HOST_EHCICTRL_ENAINCR16);
writel(ehcictrl, regs + EXYNOS5_PHY_HOST_EHCICTRL);
/* set ohci_suspend_on_n */
ohcictrl = readl(regs + EXYNOS5_PHY_HOST_OHCICTRL);
ohcictrl |= HOST_OHCICTRL_SUSPLGCY;
writel(ohcictrl, regs + EXYNOS5_PHY_HOST_OHCICTRL);
}
static void samsung_usb2phy_enable(struct samsung_usbphy *sphy)
{
void __iomem *regs = sphy->regs;
u32 phypwr;
u32 phyclk;
u32 rstcon;
/* set clock frequency for PLL */
phyclk = sphy->ref_clk_freq;
phypwr = readl(regs + SAMSUNG_PHYPWR);
rstcon = readl(regs + SAMSUNG_RSTCON);
switch (sphy->drv_data->cpu_type) {
case TYPE_S3C64XX:
phyclk &= ~PHYCLK_COMMON_ON_N;
phypwr &= ~PHYPWR_NORMAL_MASK;
rstcon |= RSTCON_SWRST;
break;
case TYPE_EXYNOS4210:
phypwr &= ~PHYPWR_NORMAL_MASK_PHY0;
rstcon |= RSTCON_SWRST;
default:
break;
}
writel(phyclk, regs + SAMSUNG_PHYCLK);
/* Configure PHY0 for normal operation*/
writel(phypwr, regs + SAMSUNG_PHYPWR);
/* reset all ports of PHY and Link */
writel(rstcon, regs + SAMSUNG_RSTCON);
udelay(10);
rstcon &= ~RSTCON_SWRST;
writel(rstcon, regs + SAMSUNG_RSTCON);
}
static void samsung_exynos5_usb2phy_disable(struct samsung_usbphy *sphy)
{
void __iomem *regs = sphy->regs;
u32 phyhost;
u32 phyotg;
u32 phyhsic;
if (atomic_dec_return(&sphy->phy_usage) > 0) {
dev_info(sphy->dev, "still being used\n");
return;
}
phyhsic = (HSIC_CTRL_REFCLKDIV_12 |
HSIC_CTRL_REFCLKSEL |
HSIC_CTRL_SIDDQ |
HSIC_CTRL_FORCESLEEP |
HSIC_CTRL_FORCESUSPEND);
writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL1);
writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL2);
phyhost = readl(regs + EXYNOS5_PHY_HOST_CTRL0);
phyhost |= (HOST_CTRL0_SIDDQ |
HOST_CTRL0_FORCESUSPEND |
HOST_CTRL0_FORCESLEEP |
HOST_CTRL0_PHYSWRST |
HOST_CTRL0_PHYSWRSTALL);
writel(phyhost, regs + EXYNOS5_PHY_HOST_CTRL0);
phyotg = readl(regs + EXYNOS5_PHY_OTG_SYS);
phyotg |= (OTG_SYS_FORCESUSPEND |
OTG_SYS_SIDDQ_UOTG |
OTG_SYS_FORCESLEEP);
writel(phyotg, regs + EXYNOS5_PHY_OTG_SYS);
}
static void samsung_usb2phy_disable(struct samsung_usbphy *sphy)
{
void __iomem *regs = sphy->regs;
u32 phypwr;
phypwr = readl(regs + SAMSUNG_PHYPWR);
switch (sphy->drv_data->cpu_type) {
case TYPE_S3C64XX:
phypwr |= PHYPWR_NORMAL_MASK;
break;
case TYPE_EXYNOS4210:
phypwr |= PHYPWR_NORMAL_MASK_PHY0;
default:
break;
}
/* Disable analog and otg block power */
writel(phypwr, regs + SAMSUNG_PHYPWR);
}
/*
* The function passed to the usb driver for phy initialization
*/
static int samsung_usb2phy_init(struct usb_phy *phy)
{
struct samsung_usbphy *sphy;
struct usb_bus *host = NULL;
unsigned long flags;
int ret = 0;
sphy = phy_to_sphy(phy);
host = phy->otg->host;
/* Enable the phy clock */
ret = clk_prepare_enable(sphy->clk);
if (ret) {
dev_err(sphy->dev, "%s: clk_prepare_enable failed\n", __func__);
return ret;
}
spin_lock_irqsave(&sphy->lock, flags);
if (host) {
/* setting default phy-type for USB 2.0 */
if (!strstr(dev_name(host->controller), "ehci") ||
!strstr(dev_name(host->controller), "ohci"))
samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_HOST);
} else {
samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_DEVICE);
}
/* Disable phy isolation */
if (sphy->plat && sphy->plat->pmu_isolation)
sphy->plat->pmu_isolation(false);
else
samsung_usbphy_set_isolation(sphy, false);
/* Selecting Host/OTG mode; After reset USB2.0PHY_CFG: HOST */
samsung_usbphy_cfg_sel(sphy);
/* Initialize usb phy registers */
if (sphy->drv_data->cpu_type == TYPE_EXYNOS5250)
samsung_exynos5_usb2phy_enable(sphy);
else
samsung_usb2phy_enable(sphy);
spin_unlock_irqrestore(&sphy->lock, flags);
/* Disable the phy clock */
clk_disable_unprepare(sphy->clk);
return ret;
}
/*
* The function passed to the usb driver for phy shutdown
*/
static void samsung_usb2phy_shutdown(struct usb_phy *phy)
{
struct samsung_usbphy *sphy;
struct usb_bus *host = NULL;
unsigned long flags;
sphy = phy_to_sphy(phy);
host = phy->otg->host;
if (clk_prepare_enable(sphy->clk)) {
dev_err(sphy->dev, "%s: clk_prepare_enable failed\n", __func__);
return;
}
spin_lock_irqsave(&sphy->lock, flags);
if (host) {
/* setting default phy-type for USB 2.0 */
if (!strstr(dev_name(host->controller), "ehci") ||
!strstr(dev_name(host->controller), "ohci"))
samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_HOST);
} else {
samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_DEVICE);
}
/* De-initialize usb phy registers */
if (sphy->drv_data->cpu_type == TYPE_EXYNOS5250)
samsung_exynos5_usb2phy_disable(sphy);
else
samsung_usb2phy_disable(sphy);
/* Enable phy isolation */
if (sphy->plat && sphy->plat->pmu_isolation)
sphy->plat->pmu_isolation(true);
else
samsung_usbphy_set_isolation(sphy, true);
spin_unlock_irqrestore(&sphy->lock, flags);
clk_disable_unprepare(sphy->clk);
}
static int samsung_usb2phy_probe(struct platform_device *pdev)
{
struct samsung_usbphy *sphy;
struct usb_otg *otg;
struct samsung_usbphy_data *pdata = pdev->dev.platform_data;
const struct samsung_usbphy_drvdata *drv_data;
struct device *dev = &pdev->dev;
struct resource *phy_mem;
void __iomem *phy_base;
struct clk *clk;
int ret;
phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
phy_base = devm_ioremap_resource(dev, phy_mem);
if (IS_ERR(phy_base))
return PTR_ERR(phy_base);
sphy = devm_kzalloc(dev, sizeof(*sphy), GFP_KERNEL);
if (!sphy)
return -ENOMEM;
otg = devm_kzalloc(dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
drv_data = samsung_usbphy_get_driver_data(pdev);
if (drv_data->cpu_type == TYPE_EXYNOS5250)
clk = devm_clk_get(dev, "usbhost");
else
clk = devm_clk_get(dev, "otg");
if (IS_ERR(clk)) {
dev_err(dev, "Failed to get otg clock\n");
return PTR_ERR(clk);
}
sphy->dev = dev;
if (dev->of_node) {
ret = samsung_usbphy_parse_dt(sphy);
if (ret < 0)
return ret;
} else {
if (!pdata) {
dev_err(dev, "no platform data specified\n");
return -EINVAL;
}
}
sphy->plat = pdata;
sphy->regs = phy_base;
sphy->clk = clk;
sphy->drv_data = drv_data;
sphy->phy.dev = sphy->dev;
sphy->phy.label = "samsung-usb2phy";
sphy->phy.init = samsung_usb2phy_init;
sphy->phy.shutdown = samsung_usb2phy_shutdown;
sphy->ref_clk_freq = samsung_usbphy_get_refclk_freq(sphy);
sphy->phy.otg = otg;
sphy->phy.otg->phy = &sphy->phy;
sphy->phy.otg->set_host = samsung_usbphy_set_host;
spin_lock_init(&sphy->lock);
platform_set_drvdata(pdev, sphy);
return usb_add_phy(&sphy->phy, USB_PHY_TYPE_USB2);
}
static int samsung_usb2phy_remove(struct platform_device *pdev)
{
struct samsung_usbphy *sphy = platform_get_drvdata(pdev);
usb_remove_phy(&sphy->phy);
if (sphy->pmuregs)
iounmap(sphy->pmuregs);
if (sphy->sysreg)
iounmap(sphy->sysreg);
return 0;
}
static const struct samsung_usbphy_drvdata usb2phy_s3c64xx = {
.cpu_type = TYPE_S3C64XX,
.devphy_en_mask = S3C64XX_USBPHY_ENABLE,
};
static const struct samsung_usbphy_drvdata usb2phy_exynos4 = {
.cpu_type = TYPE_EXYNOS4210,
.devphy_en_mask = EXYNOS_USBPHY_ENABLE,
.hostphy_en_mask = EXYNOS_USBPHY_ENABLE,
};
static struct samsung_usbphy_drvdata usb2phy_exynos5 = {
.cpu_type = TYPE_EXYNOS5250,
.hostphy_en_mask = EXYNOS_USBPHY_ENABLE,
.hostphy_reg_offset = EXYNOS_USBHOST_PHY_CTRL_OFFSET,
};
#ifdef CONFIG_OF
static const struct of_device_id samsung_usbphy_dt_match[] = {
{
.compatible = "samsung,s3c64xx-usb2phy",
.data = &usb2phy_s3c64xx,
}, {
.compatible = "samsung,exynos4210-usb2phy",
.data = &usb2phy_exynos4,
}, {
.compatible = "samsung,exynos5250-usb2phy",
.data = &usb2phy_exynos5
},
{},
};
MODULE_DEVICE_TABLE(of, samsung_usbphy_dt_match);
#endif
static struct platform_device_id samsung_usbphy_driver_ids[] = {
{
.name = "s3c64xx-usb2phy",
.driver_data = (unsigned long)&usb2phy_s3c64xx,
}, {
.name = "exynos4210-usb2phy",
.driver_data = (unsigned long)&usb2phy_exynos4,
}, {
.name = "exynos5250-usb2phy",
.driver_data = (unsigned long)&usb2phy_exynos5,
},
{},
};
MODULE_DEVICE_TABLE(platform, samsung_usbphy_driver_ids);
static struct platform_driver samsung_usb2phy_driver = {
.probe = samsung_usb2phy_probe,
.remove = samsung_usb2phy_remove,
.id_table = samsung_usbphy_driver_ids,
.driver = {
.name = "samsung-usb2phy",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(samsung_usbphy_dt_match),
},
};
module_platform_driver(samsung_usb2phy_driver);
MODULE_DESCRIPTION("Samsung USB 2.0 phy controller");
MODULE_AUTHOR("Praveen Paneri <p.paneri@samsung.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:samsung-usb2phy");
| gpl-2.0 |
sakindia123/android_kernel_samsung_j7eltexx | drivers/media/platform/exynos4-is/fimc-m2m.c | 2094 | 21611 | /*
* Samsung S5P/EXYNOS4 SoC series FIMC (video postprocessor) driver
*
* Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 2 of the License,
* or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
#include "fimc-core.h"
#include "fimc-reg.h"
#include "media-dev.h"
static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
{
if (stream_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return FMT_FLAGS_M2M_IN;
else
return FMT_FLAGS_M2M_OUT;
}
void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
{
struct vb2_buffer *src_vb, *dst_vb;
if (!ctx || !ctx->m2m_ctx)
return;
src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
if (src_vb && dst_vb) {
v4l2_m2m_buf_done(src_vb, vb_state);
v4l2_m2m_buf_done(dst_vb, vb_state);
v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
ctx->m2m_ctx);
}
}
/* Complete the transaction which has been scheduled for execution. */
static int fimc_m2m_shutdown(struct fimc_ctx *ctx)
{
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
if (!fimc_m2m_pending(fimc))
return 0;
fimc_ctx_state_set(FIMC_CTX_SHUT, ctx);
ret = wait_event_timeout(fimc->irq_queue,
!fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
FIMC_SHUTDOWN_TIMEOUT);
return ret == 0 ? -ETIMEDOUT : ret;
}
static int start_streaming(struct vb2_queue *q, unsigned int count)
{
struct fimc_ctx *ctx = q->drv_priv;
int ret;
ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
return ret > 0 ? 0 : ret;
}
static int stop_streaming(struct vb2_queue *q)
{
struct fimc_ctx *ctx = q->drv_priv;
int ret;
ret = fimc_m2m_shutdown(ctx);
if (ret == -ETIMEDOUT)
fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
pm_runtime_put(&ctx->fimc_dev->pdev->dev);
return 0;
}
static void fimc_device_run(void *priv)
{
struct vb2_buffer *src_vb, *dst_vb;
struct fimc_ctx *ctx = priv;
struct fimc_frame *sf, *df;
struct fimc_dev *fimc;
unsigned long flags;
int ret;
if (WARN(!ctx, "Null context\n"))
return;
fimc = ctx->fimc_dev;
spin_lock_irqsave(&fimc->slock, flags);
set_bit(ST_M2M_PEND, &fimc->state);
sf = &ctx->s_frame;
df = &ctx->d_frame;
if (ctx->state & FIMC_PARAMS) {
/* Prepare the DMA offsets for scaler */
fimc_prepare_dma_offset(ctx, sf);
fimc_prepare_dma_offset(ctx, df);
}
src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
ret = fimc_prepare_addr(ctx, src_vb, sf, &sf->paddr);
if (ret)
goto dma_unlock;
dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
ret = fimc_prepare_addr(ctx, dst_vb, df, &df->paddr);
if (ret)
goto dma_unlock;
dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
/* Reconfigure hardware if the context has changed. */
if (fimc->m2m.ctx != ctx) {
ctx->state |= FIMC_PARAMS;
fimc->m2m.ctx = ctx;
}
if (ctx->state & FIMC_PARAMS) {
fimc_set_yuv_order(ctx);
fimc_hw_set_input_path(ctx);
fimc_hw_set_in_dma(ctx);
ret = fimc_set_scaler_info(ctx);
if (ret)
goto dma_unlock;
fimc_hw_set_prescaler(ctx);
fimc_hw_set_mainscaler(ctx);
fimc_hw_set_target_format(ctx);
fimc_hw_set_rotation(ctx);
fimc_hw_set_effect(ctx);
fimc_hw_set_out_dma(ctx);
if (fimc->drv_data->alpha_color)
fimc_hw_set_rgb_alpha(ctx);
fimc_hw_set_output_path(ctx);
}
fimc_hw_set_input_addr(fimc, &sf->paddr);
fimc_hw_set_output_addr(fimc, &df->paddr, -1);
fimc_activate_capture(ctx);
ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP);
fimc_hw_activate_input_dma(fimc, true);
dma_unlock:
spin_unlock_irqrestore(&fimc->slock, flags);
}
static void fimc_job_abort(void *priv)
{
fimc_m2m_shutdown(priv);
}
static int fimc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *allocators[])
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
struct fimc_frame *f;
int i;
f = ctx_get_frame(ctx, vq->type);
if (IS_ERR(f))
return PTR_ERR(f);
/*
* Return number of non-contigous planes (plane buffers)
* depending on the configured color format.
*/
if (!f->fmt)
return -EINVAL;
*num_planes = f->fmt->memplanes;
for (i = 0; i < f->fmt->memplanes; i++) {
sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
return 0;
}
static int fimc_buf_prepare(struct vb2_buffer *vb)
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct fimc_frame *frame;
int i;
frame = ctx_get_frame(ctx, vb->vb2_queue->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
for (i = 0; i < frame->fmt->memplanes; i++)
vb2_set_plane_payload(vb, i, frame->payload[i]);
return 0;
}
static void fimc_buf_queue(struct vb2_buffer *vb)
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
if (ctx->m2m_ctx)
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
}
static void fimc_lock(struct vb2_queue *vq)
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
mutex_lock(&ctx->fimc_dev->lock);
}
static void fimc_unlock(struct vb2_queue *vq)
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
mutex_unlock(&ctx->fimc_dev->lock);
}
static struct vb2_ops fimc_qops = {
.queue_setup = fimc_queue_setup,
.buf_prepare = fimc_buf_prepare,
.buf_queue = fimc_buf_queue,
.wait_prepare = fimc_unlock,
.wait_finish = fimc_lock,
.stop_streaming = stop_streaming,
.start_streaming = start_streaming,
};
/*
* V4L2 ioctl handlers
*/
static int fimc_m2m_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
struct fimc_dev *fimc = video_drvdata(file);
unsigned int caps;
/*
* This is only a mem-to-mem video device. The capture and output
* device capability flags are left only for backward compatibility
* and are scheduled for removal.
*/
caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE |
V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
__fimc_vidioc_querycap(&fimc->pdev->dev, cap, caps);
return 0;
}
static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct fimc_fmt *fmt;
fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
f->index);
if (!fmt)
return -EINVAL;
strncpy(f->description, fmt->name, sizeof(f->description) - 1);
f->pixelformat = fmt->fourcc;
return 0;
}
static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
__fimc_get_format(frame, f);
return 0;
}
static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
{
struct fimc_dev *fimc = ctx->fimc_dev;
const struct fimc_variant *variant = fimc->variant;
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct fimc_fmt *fmt;
u32 max_w, mod_x, mod_y;
if (!IS_M2M(f->type))
return -EINVAL;
fmt = fimc_find_format(&pix->pixelformat, NULL,
get_m2m_fmt_flags(f->type), 0);
if (WARN(fmt == NULL, "Pixel format lookup failed"))
return -EINVAL;
if (pix->field == V4L2_FIELD_ANY)
pix->field = V4L2_FIELD_NONE;
else if (pix->field != V4L2_FIELD_NONE)
return -EINVAL;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
max_w = variant->pix_limit->scaler_dis_w;
mod_x = ffs(variant->min_inp_pixsize) - 1;
} else {
max_w = variant->pix_limit->out_rot_dis_w;
mod_x = ffs(variant->min_out_pixsize) - 1;
}
if (tiled_fmt(fmt)) {
mod_x = 6; /* 64 x 32 pixels tile */
mod_y = 5;
} else {
if (variant->min_vsize_align == 1)
mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
else
mod_y = ffs(variant->min_vsize_align) - 1;
}
v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
fimc_adjust_mplane_format(fmt, pix->width, pix->height, &f->fmt.pix_mp);
return 0;
}
static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
return fimc_try_fmt_mplane(ctx, f);
}
static void __set_frame_format(struct fimc_frame *frame, struct fimc_fmt *fmt,
struct v4l2_pix_format_mplane *pixm)
{
int i;
for (i = 0; i < fmt->colplanes; i++) {
frame->bytesperline[i] = pixm->plane_fmt[i].bytesperline;
frame->payload[i] = pixm->plane_fmt[i].sizeimage;
}
frame->f_width = pixm->width;
frame->f_height = pixm->height;
frame->o_width = pixm->width;
frame->o_height = pixm->height;
frame->width = pixm->width;
frame->height = pixm->height;
frame->offs_h = 0;
frame->offs_v = 0;
frame->fmt = fmt;
}
static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_fmt *fmt;
struct vb2_queue *vq;
struct fimc_frame *frame;
int ret;
ret = fimc_try_fmt_mplane(ctx, f);
if (ret)
return ret;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (vb2_is_busy(vq)) {
v4l2_err(&fimc->m2m.vfd, "queue (%d) busy\n", f->type);
return -EBUSY;
}
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
frame = &ctx->s_frame;
else
frame = &ctx->d_frame;
fmt = fimc_find_format(&f->fmt.pix_mp.pixelformat, NULL,
get_m2m_fmt_flags(f->type), 0);
if (!fmt)
return -EINVAL;
__set_frame_format(frame, fmt, &f->fmt.pix_mp);
/* Update RGB Alpha control state and value range */
fimc_alpha_ctrl_update(ctx);
return 0;
}
static int fimc_m2m_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *reqbufs)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
static int fimc_m2m_querybuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
static int fimc_m2m_qbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
static int fimc_m2m_dqbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
static int fimc_m2m_expbuf(struct file *file, void *fh,
struct v4l2_exportbuffer *eb)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
}
static int fimc_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
static int fimc_m2m_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
static int fimc_m2m_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *cr)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_frame *frame;
frame = ctx_get_frame(ctx, cr->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
cr->bounds.left = 0;
cr->bounds.top = 0;
cr->bounds.width = frame->o_width;
cr->bounds.height = frame->o_height;
cr->defrect = cr->bounds;
return 0;
}
static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_frame *frame;
frame = ctx_get_frame(ctx, cr->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
cr->c.left = frame->offs_h;
cr->c.top = frame->offs_v;
cr->c.width = frame->width;
cr->c.height = frame->height;
return 0;
}
static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
{
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *f;
u32 min_size, halign, depth = 0;
int i;
if (cr->c.top < 0 || cr->c.left < 0) {
v4l2_err(&fimc->m2m.vfd,
"doesn't support negative values for top & left\n");
return -EINVAL;
}
if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
f = &ctx->d_frame;
else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
f = &ctx->s_frame;
else
return -EINVAL;
min_size = (f == &ctx->s_frame) ?
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
/* Get pixel alignment constraints. */
if (fimc->variant->min_vsize_align == 1)
halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
else
halign = ffs(fimc->variant->min_vsize_align) - 1;
for (i = 0; i < f->fmt->colplanes; i++)
depth += f->fmt->depth[i];
v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
ffs(min_size) - 1,
&cr->c.height, min_size, f->o_height,
halign, 64/(ALIGN(depth, 8)));
/* adjust left/top if cropping rectangle is out of bounds */
if (cr->c.left + cr->c.width > f->o_width)
cr->c.left = f->o_width - cr->c.width;
if (cr->c.top + cr->c.height > f->o_height)
cr->c.top = f->o_height - cr->c.height;
cr->c.left = round_down(cr->c.left, min_size);
cr->c.top = round_down(cr->c.top, fimc->variant->hor_offs_align);
dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
cr->c.left, cr->c.top, cr->c.width, cr->c.height,
f->f_width, f->f_height);
return 0;
}
static int fimc_m2m_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
struct v4l2_crop cr = *crop;
struct fimc_frame *f;
int ret;
ret = fimc_m2m_try_crop(ctx, &cr);
if (ret)
return ret;
f = (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
&ctx->s_frame : &ctx->d_frame;
/* Check to see if scaling ratio is within supported range */
if (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
ret = fimc_check_scaler_ratio(ctx, cr.c.width,
cr.c.height, ctx->d_frame.width,
ctx->d_frame.height, ctx->rotation);
} else {
ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
ctx->s_frame.height, cr.c.width,
cr.c.height, ctx->rotation);
}
if (ret) {
v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n");
return -EINVAL;
}
f->offs_h = cr.c.left;
f->offs_v = cr.c.top;
f->width = cr.c.width;
f->height = cr.c.height;
fimc_ctx_state_set(FIMC_PARAMS, ctx);
return 0;
}
static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_querycap = fimc_m2m_querycap,
.vidioc_enum_fmt_vid_cap_mplane = fimc_m2m_enum_fmt_mplane,
.vidioc_enum_fmt_vid_out_mplane = fimc_m2m_enum_fmt_mplane,
.vidioc_g_fmt_vid_cap_mplane = fimc_m2m_g_fmt_mplane,
.vidioc_g_fmt_vid_out_mplane = fimc_m2m_g_fmt_mplane,
.vidioc_try_fmt_vid_cap_mplane = fimc_m2m_try_fmt_mplane,
.vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
.vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
.vidioc_reqbufs = fimc_m2m_reqbufs,
.vidioc_querybuf = fimc_m2m_querybuf,
.vidioc_qbuf = fimc_m2m_qbuf,
.vidioc_dqbuf = fimc_m2m_dqbuf,
.vidioc_expbuf = fimc_m2m_expbuf,
.vidioc_streamon = fimc_m2m_streamon,
.vidioc_streamoff = fimc_m2m_streamoff,
.vidioc_g_crop = fimc_m2m_g_crop,
.vidioc_s_crop = fimc_m2m_s_crop,
.vidioc_cropcap = fimc_m2m_cropcap
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct fimc_ctx *ctx = priv;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &fimc_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &fimc_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
return vb2_queue_init(dst_vq);
}
static int fimc_m2m_set_default_format(struct fimc_ctx *ctx)
{
struct v4l2_pix_format_mplane pixm = {
.pixelformat = V4L2_PIX_FMT_RGB32,
.width = 800,
.height = 600,
.plane_fmt[0] = {
.bytesperline = 800 * 4,
.sizeimage = 800 * 4 * 600,
},
};
struct fimc_fmt *fmt;
fmt = fimc_find_format(&pixm.pixelformat, NULL, FMT_FLAGS_M2M, 0);
if (!fmt)
return -EINVAL;
__set_frame_format(&ctx->s_frame, fmt, &pixm);
__set_frame_format(&ctx->d_frame, fmt, &pixm);
return 0;
}
static int fimc_m2m_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_ctx *ctx;
int ret = -EBUSY;
pr_debug("pid: %d, state: %#lx\n", task_pid_nr(current), fimc->state);
if (mutex_lock_interruptible(&fimc->lock))
return -ERESTARTSYS;
/*
* Don't allow simultaneous open() of the mem-to-mem and the
* capture video node that belong to same FIMC IP instance.
*/
if (test_bit(ST_CAPT_BUSY, &fimc->state))
goto unlock;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
ret = -ENOMEM;
goto unlock;
}
v4l2_fh_init(&ctx->fh, &fimc->m2m.vfd);
ctx->fimc_dev = fimc;
/* Default color format */
ctx->s_frame.fmt = fimc_get_format(0);
ctx->d_frame.fmt = fimc_get_format(0);
ret = fimc_ctrls_create(ctx);
if (ret)
goto error_fh;
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrls.handler;
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
/* Setup the device context for memory-to-memory mode */
ctx->state = FIMC_CTX_M2M;
ctx->flags = 0;
ctx->in_path = FIMC_IO_DMA;
ctx->out_path = FIMC_IO_DMA;
ctx->scaler.enabled = 1;
ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
ret = PTR_ERR(ctx->m2m_ctx);
goto error_c;
}
if (fimc->m2m.refcnt++ == 0)
set_bit(ST_M2M_RUN, &fimc->state);
ret = fimc_m2m_set_default_format(ctx);
if (ret < 0)
goto error_m2m_ctx;
mutex_unlock(&fimc->lock);
return 0;
error_m2m_ctx:
v4l2_m2m_ctx_release(ctx->m2m_ctx);
error_c:
fimc_ctrls_delete(ctx);
error_fh:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
unlock:
mutex_unlock(&fimc->lock);
return ret;
}
static int fimc_m2m_release(struct file *file)
{
struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
struct fimc_dev *fimc = ctx->fimc_dev;
dbg("pid: %d, state: 0x%lx, refcnt= %d",
task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
mutex_lock(&fimc->lock);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
fimc_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
if (--fimc->m2m.refcnt <= 0)
clear_bit(ST_M2M_RUN, &fimc->state);
kfree(ctx);
mutex_unlock(&fimc->lock);
return 0;
}
static unsigned int fimc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
if (mutex_lock_interruptible(&fimc->lock))
return -ERESTARTSYS;
ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
mutex_unlock(&fimc->lock);
return ret;
}
static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
if (mutex_lock_interruptible(&fimc->lock))
return -ERESTARTSYS;
ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
mutex_unlock(&fimc->lock);
return ret;
}
static const struct v4l2_file_operations fimc_m2m_fops = {
.owner = THIS_MODULE,
.open = fimc_m2m_open,
.release = fimc_m2m_release,
.poll = fimc_m2m_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = fimc_m2m_mmap,
};
static struct v4l2_m2m_ops m2m_ops = {
.device_run = fimc_device_run,
.job_abort = fimc_job_abort,
};
int fimc_register_m2m_device(struct fimc_dev *fimc,
struct v4l2_device *v4l2_dev)
{
struct video_device *vfd = &fimc->m2m.vfd;
int ret;
fimc->v4l2_dev = v4l2_dev;
memset(vfd, 0, sizeof(*vfd));
vfd->fops = &fimc_m2m_fops;
vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
vfd->v4l2_dev = v4l2_dev;
vfd->minor = -1;
vfd->release = video_device_release_empty;
vfd->lock = &fimc->lock;
vfd->vfl_dir = VFL_DIR_M2M;
snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.m2m", fimc->id);
video_set_drvdata(vfd, fimc);
fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
if (IS_ERR(fimc->m2m.m2m_dev)) {
v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
return PTR_ERR(fimc->m2m.m2m_dev);
}
ret = media_entity_init(&vfd->entity, 0, NULL, 0);
if (ret)
goto err_me;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret)
goto err_vd;
v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
vfd->name, video_device_node_name(vfd));
return 0;
err_vd:
media_entity_cleanup(&vfd->entity);
err_me:
v4l2_m2m_release(fimc->m2m.m2m_dev);
return ret;
}
void fimc_unregister_m2m_device(struct fimc_dev *fimc)
{
if (!fimc)
return;
if (fimc->m2m.m2m_dev)
v4l2_m2m_release(fimc->m2m.m2m_dev);
if (video_is_registered(&fimc->m2m.vfd)) {
video_unregister_device(&fimc->m2m.vfd);
media_entity_cleanup(&fimc->m2m.vfd.entity);
}
}
| gpl-2.0 |
muddy1/herckernels | arch/mips/lantiq/xway/gpio.c | 2606 | 5144 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <lantiq_soc.h>
#define LTQ_GPIO_OUT 0x00
#define LTQ_GPIO_IN 0x04
#define LTQ_GPIO_DIR 0x08
#define LTQ_GPIO_ALTSEL0 0x0C
#define LTQ_GPIO_ALTSEL1 0x10
#define LTQ_GPIO_OD 0x14
#define PINS_PER_PORT 16
#define MAX_PORTS 3
#define ltq_gpio_getbit(m, r, p) (!!(ltq_r32(m + r) & (1 << p)))
#define ltq_gpio_setbit(m, r, p) ltq_w32_mask(0, (1 << p), m + r)
#define ltq_gpio_clearbit(m, r, p) ltq_w32_mask((1 << p), 0, m + r)
struct ltq_gpio {
void __iomem *membase;
struct gpio_chip chip;
};
static struct ltq_gpio ltq_gpio_port[MAX_PORTS];
int gpio_to_irq(unsigned int gpio)
{
return -EINVAL;
}
EXPORT_SYMBOL(gpio_to_irq);
int irq_to_gpio(unsigned int gpio)
{
return -EINVAL;
}
EXPORT_SYMBOL(irq_to_gpio);
int ltq_gpio_request(unsigned int pin, unsigned int alt0,
unsigned int alt1, unsigned int dir, const char *name)
{
int id = 0;
if (pin >= (MAX_PORTS * PINS_PER_PORT))
return -EINVAL;
if (gpio_request(pin, name)) {
pr_err("failed to setup lantiq gpio: %s\n", name);
return -EBUSY;
}
if (dir)
gpio_direction_output(pin, 1);
else
gpio_direction_input(pin);
while (pin >= PINS_PER_PORT) {
pin -= PINS_PER_PORT;
id++;
}
if (alt0)
ltq_gpio_setbit(ltq_gpio_port[id].membase,
LTQ_GPIO_ALTSEL0, pin);
else
ltq_gpio_clearbit(ltq_gpio_port[id].membase,
LTQ_GPIO_ALTSEL0, pin);
if (alt1)
ltq_gpio_setbit(ltq_gpio_port[id].membase,
LTQ_GPIO_ALTSEL1, pin);
else
ltq_gpio_clearbit(ltq_gpio_port[id].membase,
LTQ_GPIO_ALTSEL1, pin);
return 0;
}
EXPORT_SYMBOL(ltq_gpio_request);
static void ltq_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip);
if (value)
ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset);
else
ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset);
}
static int ltq_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip);
return ltq_gpio_getbit(ltq_gpio->membase, LTQ_GPIO_IN, offset);
}
static int ltq_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip);
ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OD, offset);
ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset);
return 0;
}
static int ltq_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip);
ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OD, offset);
ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset);
ltq_gpio_set(chip, offset, value);
return 0;
}
static int ltq_gpio_req(struct gpio_chip *chip, unsigned offset)
{
struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip);
ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL0, offset);
ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL1, offset);
return 0;
}
static int ltq_gpio_probe(struct platform_device *pdev)
{
struct resource *res;
if (pdev->id >= MAX_PORTS) {
dev_err(&pdev->dev, "invalid gpio port %d\n",
pdev->id);
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get memory for gpio port %d\n",
pdev->id);
return -ENOENT;
}
res = devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), dev_name(&pdev->dev));
if (!res) {
dev_err(&pdev->dev,
"failed to request memory for gpio port %d\n",
pdev->id);
return -EBUSY;
}
ltq_gpio_port[pdev->id].membase = devm_ioremap_nocache(&pdev->dev,
res->start, resource_size(res));
if (!ltq_gpio_port[pdev->id].membase) {
dev_err(&pdev->dev, "failed to remap memory for gpio port %d\n",
pdev->id);
return -ENOMEM;
}
ltq_gpio_port[pdev->id].chip.label = "ltq_gpio";
ltq_gpio_port[pdev->id].chip.direction_input = ltq_gpio_direction_input;
ltq_gpio_port[pdev->id].chip.direction_output =
ltq_gpio_direction_output;
ltq_gpio_port[pdev->id].chip.get = ltq_gpio_get;
ltq_gpio_port[pdev->id].chip.set = ltq_gpio_set;
ltq_gpio_port[pdev->id].chip.request = ltq_gpio_req;
ltq_gpio_port[pdev->id].chip.base = PINS_PER_PORT * pdev->id;
ltq_gpio_port[pdev->id].chip.ngpio = PINS_PER_PORT;
platform_set_drvdata(pdev, <q_gpio_port[pdev->id]);
return gpiochip_add(<q_gpio_port[pdev->id].chip);
}
static struct platform_driver
ltq_gpio_driver = {
.probe = ltq_gpio_probe,
.driver = {
.name = "ltq_gpio",
.owner = THIS_MODULE,
},
};
int __init ltq_gpio_init(void)
{
int ret = platform_driver_register(<q_gpio_driver);
if (ret)
pr_info("ltq_gpio : Error registering platfom driver!");
return ret;
}
postcore_initcall(ltq_gpio_init);
| gpl-2.0 |
TW-LL-msm8226/kernel_samsung_msm8226 | drivers/usb/musb/cppi_dma.c | 3374 | 45192 | /*
* Copyright (C) 2005-2006 by Texas Instruments
*
* This file implements a DMA interface using TI's CPPI DMA.
* For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
* The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "musb_core.h"
#include "musb_debug.h"
#include "cppi_dma.h"
/* CPPI DMA status 7-mar-2006:
*
* - See musb_{host,gadget}.c for more info
*
* - Correct RX DMA generally forces the engine into irq-per-packet mode,
* which can easily saturate the CPU under non-mass-storage loads.
*
* NOTES 24-aug-2006 (2.6.18-rc4):
*
* - peripheral RXDMA wedged in a test with packets of length 512/512/1.
* evidently after the 1 byte packet was received and acked, the queue
* of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
* and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
* 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
* of its next (512 byte) packet. IRQ issues?
*
* REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
* evidently also directly update the RX and TX CSRs ... so audit all
* host and peripheral side DMA code to avoid CSR access after DMA has
* been started.
*/
/* REVISIT now we can avoid preallocating these descriptors; or
* more simply, switch to a global freelist not per-channel ones.
* Note: at full speed, 64 descriptors == 4K bulk data.
*/
#define NUM_TXCHAN_BD 64
#define NUM_RXCHAN_BD 64
static inline void cpu_drain_writebuffer(void)
{
wmb();
#ifdef CONFIG_CPU_ARM926T
/* REVISIT this "should not be needed",
* but lack of it sure seemed to hurt ...
*/
asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
#endif
}
static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
{
struct cppi_descriptor *bd = c->freelist;
if (bd)
c->freelist = bd->next;
return bd;
}
static inline void
cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
{
if (!bd)
return;
bd->next = c->freelist;
c->freelist = bd;
}
/*
* Start DMA controller
*
* Initialize the DMA controller as necessary.
*/
/* zero out entire rx state RAM entry for the channel */
static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
{
musb_writel(&rx->rx_skipbytes, 0, 0);
musb_writel(&rx->rx_head, 0, 0);
musb_writel(&rx->rx_sop, 0, 0);
musb_writel(&rx->rx_current, 0, 0);
musb_writel(&rx->rx_buf_current, 0, 0);
musb_writel(&rx->rx_len_len, 0, 0);
musb_writel(&rx->rx_cnt_cnt, 0, 0);
}
/* zero out entire tx state RAM entry for the channel */
static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
{
musb_writel(&tx->tx_head, 0, 0);
musb_writel(&tx->tx_buf, 0, 0);
musb_writel(&tx->tx_current, 0, 0);
musb_writel(&tx->tx_buf_current, 0, 0);
musb_writel(&tx->tx_info, 0, 0);
musb_writel(&tx->tx_rem_len, 0, 0);
/* musb_writel(&tx->tx_dummy, 0, 0); */
musb_writel(&tx->tx_complete, 0, ptr);
}
static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
{
int j;
/* initialize channel fields */
c->head = NULL;
c->tail = NULL;
c->last_processed = NULL;
c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
c->controller = cppi;
c->is_rndis = 0;
c->freelist = NULL;
/* build the BD Free list for the channel */
for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
struct cppi_descriptor *bd;
dma_addr_t dma;
bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
bd->dma = dma;
cppi_bd_free(c, bd);
}
}
static int cppi_channel_abort(struct dma_channel *);
static void cppi_pool_free(struct cppi_channel *c)
{
struct cppi *cppi = c->controller;
struct cppi_descriptor *bd;
(void) cppi_channel_abort(&c->channel);
c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
c->controller = NULL;
/* free all its bds */
bd = c->last_processed;
do {
if (bd)
dma_pool_free(cppi->pool, bd, bd->dma);
bd = cppi_bd_alloc(c);
} while (bd);
c->last_processed = NULL;
}
static int __init cppi_controller_start(struct dma_controller *c)
{
struct cppi *controller;
void __iomem *tibase;
int i;
controller = container_of(c, struct cppi, controller);
/* do whatever is necessary to start controller */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
controller->tx[i].transmit = true;
controller->tx[i].index = i;
}
for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
controller->rx[i].transmit = false;
controller->rx[i].index = i;
}
/* setup BD list on a per channel basis */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
cppi_pool_init(controller, controller->tx + i);
for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
cppi_pool_init(controller, controller->rx + i);
tibase = controller->tibase;
INIT_LIST_HEAD(&controller->tx_complete);
/* initialise tx/rx channel head pointers to zero */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
struct cppi_channel *tx_ch = controller->tx + i;
struct cppi_tx_stateram __iomem *tx;
INIT_LIST_HEAD(&tx_ch->tx_complete);
tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
tx_ch->state_ram = tx;
cppi_reset_tx(tx, 0);
}
for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
struct cppi_channel *rx_ch = controller->rx + i;
struct cppi_rx_stateram __iomem *rx;
INIT_LIST_HEAD(&rx_ch->tx_complete);
rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
rx_ch->state_ram = rx;
cppi_reset_rx(rx);
}
/* enable individual cppi channels */
musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
/* enable tx/rx CPPI control */
musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
/* disable RNDIS mode, also host rx RNDIS autorequest */
musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
return 0;
}
/*
* Stop DMA controller
*
* De-Init the DMA controller as necessary.
*/
static int cppi_controller_stop(struct dma_controller *c)
{
struct cppi *controller;
void __iomem *tibase;
int i;
struct musb *musb;
controller = container_of(c, struct cppi, controller);
musb = controller->musb;
tibase = controller->tibase;
/* DISABLE INDIVIDUAL CHANNEL Interrupts */
musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
dev_dbg(musb->controller, "Tearing down RX and TX Channels\n");
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
/* FIXME restructure of txdma to use bds like rxdma */
controller->tx[i].last_processed = NULL;
cppi_pool_free(controller->tx + i);
}
for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
cppi_pool_free(controller->rx + i);
/* in Tx Case proper teardown is supported. We resort to disabling
* Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
* complete TX CPPI cannot be disabled.
*/
/*disable tx/rx cppi */
musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
return 0;
}
/* While dma channel is allocated, we only want the core irqs active
* for fault reports, otherwise we'd get irqs that we don't care about.
* Except for TX irqs, where dma done != fifo empty and reusable ...
*
* NOTE: docs don't say either way, but irq masking **enables** irqs.
*
* REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
*/
static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
{
musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
}
static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
{
musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
}
/*
* Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
* each transfer direction of a non-control endpoint, so allocating
* (and deallocating) is mostly a way to notice bad housekeeping on
* the software side. We assume the irqs are always active.
*/
static struct dma_channel *
cppi_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *ep, u8 transmit)
{
struct cppi *controller;
u8 index;
struct cppi_channel *cppi_ch;
void __iomem *tibase;
struct musb *musb;
controller = container_of(c, struct cppi, controller);
tibase = controller->tibase;
musb = controller->musb;
/* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
index = ep->epnum - 1;
/* return the corresponding CPPI Channel Handle, and
* probably disable the non-CPPI irq until we need it.
*/
if (transmit) {
if (index >= ARRAY_SIZE(controller->tx)) {
dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'T', index);
return NULL;
}
cppi_ch = controller->tx + index;
} else {
if (index >= ARRAY_SIZE(controller->rx)) {
dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'R', index);
return NULL;
}
cppi_ch = controller->rx + index;
core_rxirq_disable(tibase, ep->epnum);
}
/* REVISIT make this an error later once the same driver code works
* with the other DMA engine too
*/
if (cppi_ch->hw_ep)
dev_dbg(musb->controller, "re-allocating DMA%d %cX channel %p\n",
index, transmit ? 'T' : 'R', cppi_ch);
cppi_ch->hw_ep = ep;
cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
cppi_ch->channel.max_len = 0x7fffffff;
dev_dbg(musb->controller, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
return &cppi_ch->channel;
}
/* Release a CPPI Channel. */
static void cppi_channel_release(struct dma_channel *channel)
{
struct cppi_channel *c;
void __iomem *tibase;
/* REVISIT: for paranoia, check state and abort if needed... */
c = container_of(channel, struct cppi_channel, channel);
tibase = c->controller->tibase;
if (!c->hw_ep)
dev_dbg(c->controller->musb->controller,
"releasing idle DMA channel %p\n", c);
else if (!c->transmit)
core_rxirq_enable(tibase, c->index + 1);
/* for now, leave its cppi IRQ enabled (we won't trigger it) */
c->hw_ep = NULL;
channel->status = MUSB_DMA_STATUS_UNKNOWN;
}
/* Context: controller irqlocked */
static void
cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
{
void __iomem *base = c->controller->mregs;
struct cppi_rx_stateram __iomem *rx = c->state_ram;
musb_ep_select(base, c->index + 1);
dev_dbg(c->controller->musb->controller,
"RX DMA%d%s: %d left, csr %04x, "
"%08x H%08x S%08x C%08x, "
"B%08x L%08x %08x .. %08x"
"\n",
c->index, tag,
musb_readl(c->controller->tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
musb_readw(c->hw_ep->regs, MUSB_RXCSR),
musb_readl(&rx->rx_skipbytes, 0),
musb_readl(&rx->rx_head, 0),
musb_readl(&rx->rx_sop, 0),
musb_readl(&rx->rx_current, 0),
musb_readl(&rx->rx_buf_current, 0),
musb_readl(&rx->rx_len_len, 0),
musb_readl(&rx->rx_cnt_cnt, 0),
musb_readl(&rx->rx_complete, 0)
);
}
/* Context: controller irqlocked */
static void
cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
{
void __iomem *base = c->controller->mregs;
struct cppi_tx_stateram __iomem *tx = c->state_ram;
musb_ep_select(base, c->index + 1);
dev_dbg(c->controller->musb->controller,
"TX DMA%d%s: csr %04x, "
"H%08x S%08x C%08x %08x, "
"F%08x L%08x .. %08x"
"\n",
c->index, tag,
musb_readw(c->hw_ep->regs, MUSB_TXCSR),
musb_readl(&tx->tx_head, 0),
musb_readl(&tx->tx_buf, 0),
musb_readl(&tx->tx_current, 0),
musb_readl(&tx->tx_buf_current, 0),
musb_readl(&tx->tx_info, 0),
musb_readl(&tx->tx_rem_len, 0),
/* dummy/unused word 6 */
musb_readl(&tx->tx_complete, 0)
);
}
/* Context: controller irqlocked */
static inline void
cppi_rndis_update(struct cppi_channel *c, int is_rx,
void __iomem *tibase, int is_rndis)
{
/* we may need to change the rndis flag for this cppi channel */
if (c->is_rndis != is_rndis) {
u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
u32 temp = 1 << (c->index);
if (is_rx)
temp <<= 16;
if (is_rndis)
value |= temp;
else
value &= ~temp;
musb_writel(tibase, DAVINCI_RNDIS_REG, value);
c->is_rndis = is_rndis;
}
}
#ifdef CONFIG_USB_MUSB_DEBUG
static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
{
pr_debug("RXBD/%s %08x: "
"nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
tag, bd->dma,
bd->hw_next, bd->hw_bufp, bd->hw_off_len,
bd->hw_options);
}
#endif
static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
{
#ifdef CONFIG_USB_MUSB_DEBUG
struct cppi_descriptor *bd;
if (!_dbg_level(level))
return;
cppi_dump_rx(level, rx, tag);
if (rx->last_processed)
cppi_dump_rxbd("last", rx->last_processed);
for (bd = rx->head; bd; bd = bd->next)
cppi_dump_rxbd("active", bd);
#endif
}
/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
* so we won't ever use it (see "CPPI RX Woes" below).
*/
static inline int cppi_autoreq_update(struct cppi_channel *rx,
void __iomem *tibase, int onepacket, unsigned n_bds)
{
u32 val;
#ifdef RNDIS_RX_IS_USABLE
u32 tmp;
/* assert(is_host_active(musb)) */
/* start from "AutoReq never" */
tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
val = tmp & ~((0x3) << (rx->index * 2));
/* HCD arranged reqpkt for packet #1. we arrange int
* for all but the last one, maybe in two segments.
*/
if (!onepacket) {
#if 0
/* use two segments, autoreq "all" then the last "never" */
val |= ((0x3) << (rx->index * 2));
n_bds--;
#else
/* one segment, autoreq "all-but-last" */
val |= ((0x1) << (rx->index * 2));
#endif
}
if (val != tmp) {
int n = 100;
/* make sure that autoreq is updated before continuing */
musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
do {
tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
if (tmp == val)
break;
cpu_relax();
} while (n-- > 0);
}
#endif
/* REQPKT is turned off after each segment */
if (n_bds && rx->channel.actual_len) {
void __iomem *regs = rx->hw_ep->regs;
val = musb_readw(regs, MUSB_RXCSR);
if (!(val & MUSB_RXCSR_H_REQPKT)) {
val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, val);
/* flush writebuffer */
val = musb_readw(regs, MUSB_RXCSR);
}
}
return n_bds;
}
/* Buffer enqueuing Logic:
*
* - RX builds new queues each time, to help handle routine "early
* termination" cases (faults, including errors and short reads)
* more correctly.
*
* - for now, TX reuses the same queue of BDs every time
*
* REVISIT long term, we want a normal dynamic model.
* ... the goal will be to append to the
* existing queue, processing completed "dma buffers" (segments) on the fly.
*
* Otherwise we force an IRQ latency between requests, which slows us a lot
* (especially in "transparent" dma). Unfortunately that model seems to be
* inherent in the DMA model from the Mentor code, except in the rare case
* of transfers big enough (~128+ KB) that we could append "middle" segments
* in the TX paths. (RX can't do this, see below.)
*
* That's true even in the CPPI- friendly iso case, where most urbs have
* several small segments provided in a group and where the "packet at a time"
* "transparent" DMA model is always correct, even on the RX side.
*/
/*
* CPPI TX:
* ========
* TX is a lot more reasonable than RX; it doesn't need to run in
* irq-per-packet mode very often. RNDIS mode seems to behave too
* (except how it handles the exactly-N-packets case). Building a
* txdma queue with multiple requests (urb or usb_request) looks
* like it would work ... but fault handling would need much testing.
*
* The main issue with TX mode RNDIS relates to transfer lengths that
* are an exact multiple of the packet length. It appears that there's
* a hiccup in that case (maybe the DMA completes before the ZLP gets
* written?) boiling down to not being able to rely on CPPI writing any
* terminating zero length packet before the next transfer is written.
* So that's punted to PIO; better yet, gadget drivers can avoid it.
*
* Plus, there's allegedly an undocumented constraint that rndis transfer
* length be a multiple of 64 bytes ... but the chip doesn't act that
* way, and we really don't _want_ that behavior anyway.
*
* On TX, "transparent" mode works ... although experiments have shown
* problems trying to use the SOP/EOP bits in different USB packets.
*
* REVISIT try to handle terminating zero length packets using CPPI
* instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
* links avoid that issue by forcing them to avoid zlps.)
*/
static void
cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
{
unsigned maxpacket = tx->maxpacket;
dma_addr_t addr = tx->buf_dma + tx->offset;
size_t length = tx->buf_len - tx->offset;
struct cppi_descriptor *bd;
unsigned n_bds;
unsigned i;
struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
int rndis;
/* TX can use the CPPI "rndis" mode, where we can probably fit this
* transfer in one BD and one IRQ. The only time we would NOT want
* to use it is when hardware constraints prevent it, or if we'd
* trigger the "send a ZLP?" confusion.
*/
rndis = (maxpacket & 0x3f) == 0
&& length > maxpacket
&& length < 0xffff
&& (length % maxpacket) != 0;
if (rndis) {
maxpacket = length;
n_bds = 1;
} else {
n_bds = length / maxpacket;
if (!length || (length % maxpacket))
n_bds++;
n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
length = min(n_bds * maxpacket, length);
}
dev_dbg(musb->controller, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
tx->index,
maxpacket,
rndis ? "rndis" : "transparent",
n_bds,
(unsigned long long)addr, length);
cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
/* assuming here that channel_program is called during
* transfer initiation ... current code maintains state
* for one outstanding request only (no queues, not even
* the implicit ones of an iso urb).
*/
bd = tx->freelist;
tx->head = bd;
tx->last_processed = NULL;
/* FIXME use BD pool like RX side does, and just queue
* the minimum number for this request.
*/
/* Prepare queue of BDs first, then hand it to hardware.
* All BDs except maybe the last should be of full packet
* size; for RNDIS there _is_ only that last packet.
*/
for (i = 0; i < n_bds; ) {
if (++i < n_bds && bd->next)
bd->hw_next = bd->next->dma;
else
bd->hw_next = 0;
bd->hw_bufp = tx->buf_dma + tx->offset;
/* FIXME set EOP only on the last packet,
* SOP only on the first ... avoid IRQs
*/
if ((tx->offset + maxpacket) <= tx->buf_len) {
tx->offset += maxpacket;
bd->hw_off_len = maxpacket;
bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
| CPPI_OWN_SET | maxpacket;
} else {
/* only this one may be a partial USB Packet */
u32 partial_len;
partial_len = tx->buf_len - tx->offset;
tx->offset = tx->buf_len;
bd->hw_off_len = partial_len;
bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
| CPPI_OWN_SET | partial_len;
if (partial_len == 0)
bd->hw_options |= CPPI_ZERO_SET;
}
dev_dbg(musb->controller, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
/* update the last BD enqueued to the list */
tx->tail = bd;
bd = bd->next;
}
/* BDs live in DMA-coherent memory, but writes might be pending */
cpu_drain_writebuffer();
/* Write to the HeadPtr in state RAM to trigger */
musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
cppi_dump_tx(5, tx, "/S");
}
/*
* CPPI RX Woes:
* =============
* Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
* packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
* (Full speed transfers have similar scenarios.)
*
* The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
* and the next packet goes into a buffer that's queued later; while (b) fills
* the buffer with 1024 bytes. How to do that with CPPI?
*
* - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
* (b) loses **BADLY** because nothing (!) happens when that second packet
* fills the buffer, much less when a third one arrives. (Which makes this
* not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
* is optional, and it's fine if peripherals -- not hosts! -- pad messages
* out to end-of-buffer. Standard PCI host controller DMA descriptors
* implement that mode by default ... which is no accident.)
*
* - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
* converse problems: (b) is handled right, but (a) loses badly. CPPI RX
* ignores SOP/EOP markings and processes both of those BDs; so both packets
* are loaded into the buffer (with a 212 byte gap between them), and the next
* buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
* are intended as outputs for RX queues, not inputs...)
*
* - A variant of "transparent" mode -- one BD at a time -- is the only way to
* reliably make both cases work, with software handling both cases correctly
* and at the significant penalty of needing an IRQ per packet. (The lack of
* I/O overlap can be slightly ameliorated by enabling double buffering.)
*
* So how to get rid of IRQ-per-packet? The transparent multi-BD case could
* be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
* (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
* with guaranteed driver level fault recovery and scrubbing out what's left
* of that garbaged datastream.
*
* But there seems to be no way to identify the cases where CPPI RNDIS mode
* is appropriate -- which do NOT include RNDIS host drivers, but do include
* the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
* So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
* that applies best on the peripheral side (and which could fail rudely).
*
* Leaving only "transparent" mode; we avoid multi-bd modes in almost all
* cases other than mass storage class. Otherwise we're correct but slow,
* since CPPI penalizes our need for a "true RNDIS" default mode.
*/
/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
*
* IFF
* (a) peripheral mode ... since rndis peripherals could pad their
* writes to hosts, causing i/o failure; or we'd have to cope with
* a largely unknowable variety of host side protocol variants
* (b) and short reads are NOT errors ... since full reads would
* cause those same i/o failures
* (c) and read length is
* - less than 64KB (max per cppi descriptor)
* - not a multiple of 4096 (g_zero default, full reads typical)
* - N (>1) packets long, ditto (full reads not EXPECTED)
* THEN
* try rx rndis mode
*
* Cost of heuristic failing: RXDMA wedges at the end of transfers that
* fill out the whole buffer. Buggy host side usb network drivers could
* trigger that, but "in the field" such bugs seem to be all but unknown.
*
* So this module parameter lets the heuristic be disabled. When using
* gadgetfs, the heuristic will probably need to be disabled.
*/
static bool cppi_rx_rndis = 1;
module_param(cppi_rx_rndis, bool, 0);
MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
/**
* cppi_next_rx_segment - dma read for the next chunk of a buffer
* @musb: the controller
* @rx: dma channel
* @onepacket: true unless caller treats short reads as errors, and
* performs fault recovery above usbcore.
* Context: controller irqlocked
*
* See above notes about why we can't use multi-BD RX queues except in
* rare cases (mass storage class), and can never use the hardware "rndis"
* mode (since it's not a "true" RNDIS mode) with complete safety..
*
* It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
* code to recover from corrupted datastreams after each short transfer.
*/
static void
cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
{
unsigned maxpacket = rx->maxpacket;
dma_addr_t addr = rx->buf_dma + rx->offset;
size_t length = rx->buf_len - rx->offset;
struct cppi_descriptor *bd, *tail;
unsigned n_bds;
unsigned i;
void __iomem *tibase = musb->ctrl_base;
int is_rndis = 0;
struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
if (onepacket) {
/* almost every USB driver, host or peripheral side */
n_bds = 1;
/* maybe apply the heuristic above */
if (cppi_rx_rndis
&& is_peripheral_active(musb)
&& length > maxpacket
&& (length & ~0xffff) == 0
&& (length & 0x0fff) != 0
&& (length & (maxpacket - 1)) == 0) {
maxpacket = length;
is_rndis = 1;
}
} else {
/* virtually nothing except mass storage class */
if (length > 0xffff) {
n_bds = 0xffff / maxpacket;
length = n_bds * maxpacket;
} else {
n_bds = length / maxpacket;
if (length % maxpacket)
n_bds++;
}
if (n_bds == 1)
onepacket = 1;
else
n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
}
/* In host mode, autorequest logic can generate some IN tokens; it's
* tricky since we can't leave REQPKT set in RXCSR after the transfer
* finishes. So: multipacket transfers involve two or more segments.
* And always at least two IRQs ... RNDIS mode is not an option.
*/
if (is_host_active(musb))
n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
length = min(n_bds * maxpacket, length);
dev_dbg(musb->controller, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
"dma 0x%llx len %u %u/%u\n",
rx->index, maxpacket,
onepacket
? (is_rndis ? "rndis" : "onepacket")
: "multipacket",
n_bds,
musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff,
(unsigned long long)addr, length,
rx->channel.actual_len, rx->buf_len);
/* only queue one segment at a time, since the hardware prevents
* correct queue shutdown after unexpected short packets
*/
bd = cppi_bd_alloc(rx);
rx->head = bd;
/* Build BDs for all packets in this segment */
for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
u32 bd_len;
if (i) {
bd = cppi_bd_alloc(rx);
if (!bd)
break;
tail->next = bd;
tail->hw_next = bd->dma;
}
bd->hw_next = 0;
/* all but the last packet will be maxpacket size */
if (maxpacket < length)
bd_len = maxpacket;
else
bd_len = length;
bd->hw_bufp = addr;
addr += bd_len;
rx->offset += bd_len;
bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
bd->buflen = bd_len;
bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
length -= bd_len;
}
/* we always expect at least one reusable BD! */
if (!tail) {
WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
return;
} else if (i < n_bds)
WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
tail->next = NULL;
tail->hw_next = 0;
bd = rx->head;
rx->tail = tail;
/* short reads and other faults should terminate this entire
* dma segment. we want one "dma packet" per dma segment, not
* one per USB packet, terminating the whole queue at once...
* NOTE that current hardware seems to ignore SOP and EOP.
*/
bd->hw_options |= CPPI_SOP_SET;
tail->hw_options |= CPPI_EOP_SET;
#ifdef CONFIG_USB_MUSB_DEBUG
if (_dbg_level(5)) {
struct cppi_descriptor *d;
for (d = rx->head; d; d = d->next)
cppi_dump_rxbd("S", d);
}
#endif
/* in case the preceding transfer left some state... */
tail = rx->last_processed;
if (tail) {
tail->next = bd;
tail->hw_next = bd->dma;
}
core_rxirq_enable(tibase, rx->index + 1);
/* BDs live in DMA-coherent memory, but writes might be pending */
cpu_drain_writebuffer();
/* REVISIT specs say to write this AFTER the BUFCNT register
* below ... but that loses badly.
*/
musb_writel(&rx_ram->rx_head, 0, bd->dma);
/* bufferCount must be at least 3, and zeroes on completion
* unless it underflows below zero, or stops at two, or keeps
* growing ... grr.
*/
i = musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff;
if (!i)
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds + 2);
else if (n_bds > (i - 3))
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds - (i - 3));
i = musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff;
if (i < (2 + n_bds)) {
dev_dbg(musb->controller, "bufcnt%d underrun - %d (for %d)\n",
rx->index, i, n_bds);
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds + 2);
}
cppi_dump_rx(4, rx, "/S");
}
/**
* cppi_channel_program - program channel for data transfer
* @ch: the channel
* @maxpacket: max packet size
* @mode: For RX, 1 unless the usb protocol driver promised to treat
* all short reads as errors and kick in high level fault recovery.
* For TX, ignored because of RNDIS mode races/glitches.
* @dma_addr: dma address of buffer
* @len: length of buffer
* Context: controller irqlocked
*/
static int cppi_channel_program(struct dma_channel *ch,
u16 maxpacket, u8 mode,
dma_addr_t dma_addr, u32 len)
{
struct cppi_channel *cppi_ch;
struct cppi *controller;
struct musb *musb;
cppi_ch = container_of(ch, struct cppi_channel, channel);
controller = cppi_ch->controller;
musb = controller->musb;
switch (ch->status) {
case MUSB_DMA_STATUS_BUS_ABORT:
case MUSB_DMA_STATUS_CORE_ABORT:
/* fault irq handler should have handled cleanup */
WARNING("%cX DMA%d not cleaned up after abort!\n",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* WARN_ON(1); */
break;
case MUSB_DMA_STATUS_BUSY:
WARNING("program active channel? %cX DMA%d\n",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* WARN_ON(1); */
break;
case MUSB_DMA_STATUS_UNKNOWN:
dev_dbg(musb->controller, "%cX DMA%d not allocated!\n",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* FALLTHROUGH */
case MUSB_DMA_STATUS_FREE:
break;
}
ch->status = MUSB_DMA_STATUS_BUSY;
/* set transfer parameters, then queue up its first segment */
cppi_ch->buf_dma = dma_addr;
cppi_ch->offset = 0;
cppi_ch->maxpacket = maxpacket;
cppi_ch->buf_len = len;
cppi_ch->channel.actual_len = 0;
/* TX channel? or RX? */
if (cppi_ch->transmit)
cppi_next_tx_segment(musb, cppi_ch);
else
cppi_next_rx_segment(musb, cppi_ch, mode);
return true;
}
static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
{
struct cppi_channel *rx = &cppi->rx[ch];
struct cppi_rx_stateram __iomem *state = rx->state_ram;
struct cppi_descriptor *bd;
struct cppi_descriptor *last = rx->last_processed;
bool completed = false;
bool acked = false;
int i;
dma_addr_t safe2ack;
void __iomem *regs = rx->hw_ep->regs;
struct musb *musb = cppi->musb;
cppi_dump_rx(6, rx, "/K");
bd = last ? last->next : rx->head;
if (!bd)
return false;
/* run through all completed BDs */
for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
(safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
i++, bd = bd->next) {
u16 len;
/* catch latest BD writes from CPPI */
rmb();
if (!completed && (bd->hw_options & CPPI_OWN_SET))
break;
dev_dbg(musb->controller, "C/RXBD %llx: nxt %08x buf %08x "
"off.len %08x opt.len %08x (%d)\n",
(unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options,
rx->channel.actual_len);
/* actual packet received length */
if ((bd->hw_options & CPPI_SOP_SET) && !completed)
len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
else
len = 0;
if (bd->hw_options & CPPI_EOQ_MASK)
completed = true;
if (!completed && len < bd->buflen) {
/* NOTE: when we get a short packet, RXCSR_H_REQPKT
* must have been cleared, and no more DMA packets may
* active be in the queue... TI docs didn't say, but
* CPPI ignores those BDs even though OWN is still set.
*/
completed = true;
dev_dbg(musb->controller, "rx short %d/%d (%d)\n",
len, bd->buflen,
rx->channel.actual_len);
}
/* If we got here, we expect to ack at least one BD; meanwhile
* CPPI may completing other BDs while we scan this list...
*
* RACE: we can notice OWN cleared before CPPI raises the
* matching irq by writing that BD as the completion pointer.
* In such cases, stop scanning and wait for the irq, avoiding
* lost acks and states where BD ownership is unclear.
*/
if (bd->dma == safe2ack) {
musb_writel(&state->rx_complete, 0, safe2ack);
safe2ack = musb_readl(&state->rx_complete, 0);
acked = true;
if (bd->dma == safe2ack)
safe2ack = 0;
}
rx->channel.actual_len += len;
cppi_bd_free(rx, last);
last = bd;
/* stop scanning on end-of-segment */
if (bd->hw_next == 0)
completed = true;
}
rx->last_processed = last;
/* dma abort, lost ack, or ... */
if (!acked && last) {
int csr;
if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
musb_writel(&state->rx_complete, 0, safe2ack);
if (safe2ack == 0) {
cppi_bd_free(rx, last);
rx->last_processed = NULL;
/* if we land here on the host side, H_REQPKT will
* be clear and we need to restart the queue...
*/
WARN_ON(rx->head);
}
musb_ep_select(cppi->mregs, rx->index + 1);
csr = musb_readw(regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_DMAENAB) {
dev_dbg(musb->controller, "list%d %p/%p, last %llx%s, csr %04x\n",
rx->index,
rx->head, rx->tail,
rx->last_processed
? (unsigned long long)
rx->last_processed->dma
: 0,
completed ? ", completed" : "",
csr);
cppi_dump_rxq(4, "/what?", rx);
}
}
if (!completed) {
int csr;
rx->head = bd;
/* REVISIT seems like "autoreq all but EOP" doesn't...
* setting it here "should" be racey, but seems to work
*/
csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
if (is_host_active(cppi->musb)
&& bd
&& !(csr & MUSB_RXCSR_H_REQPKT)) {
csr |= MUSB_RXCSR_H_REQPKT;
musb_writew(regs, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | csr);
csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
}
} else {
rx->head = NULL;
rx->tail = NULL;
}
cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
return completed;
}
irqreturn_t cppi_interrupt(int irq, void *dev_id)
{
struct musb *musb = dev_id;
struct cppi *cppi;
void __iomem *tibase;
struct musb_hw_ep *hw_ep = NULL;
u32 rx, tx;
int i, index;
unsigned long uninitialized_var(flags);
cppi = container_of(musb->dma_controller, struct cppi, controller);
if (cppi->irq)
spin_lock_irqsave(&musb->lock, flags);
tibase = musb->ctrl_base;
tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
if (!tx && !rx) {
if (cppi->irq)
spin_unlock_irqrestore(&musb->lock, flags);
return IRQ_NONE;
}
dev_dbg(musb->controller, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
/* process TX channels */
for (index = 0; tx; tx = tx >> 1, index++) {
struct cppi_channel *tx_ch;
struct cppi_tx_stateram __iomem *tx_ram;
bool completed = false;
struct cppi_descriptor *bd;
if (!(tx & 1))
continue;
tx_ch = cppi->tx + index;
tx_ram = tx_ch->state_ram;
/* FIXME need a cppi_tx_scan() routine, which
* can also be called from abort code
*/
cppi_dump_tx(5, tx_ch, "/E");
bd = tx_ch->head;
/*
* If Head is null then this could mean that a abort interrupt
* that needs to be acknowledged.
*/
if (NULL == bd) {
dev_dbg(musb->controller, "null BD\n");
musb_writel(&tx_ram->tx_complete, 0, 0);
continue;
}
/* run through all completed BDs */
for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
i++, bd = bd->next) {
u16 len;
/* catch latest BD writes from CPPI */
rmb();
if (bd->hw_options & CPPI_OWN_SET)
break;
dev_dbg(musb->controller, "C/TXBD %p n %x b %x off %x opt %x\n",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
tx_ch->channel.actual_len += len;
tx_ch->last_processed = bd;
/* write completion register to acknowledge
* processing of completed BDs, and possibly
* release the IRQ; EOQ might not be set ...
*
* REVISIT use the same ack strategy as rx
*
* REVISIT have observed bit 18 set; huh??
*/
/* if ((bd->hw_options & CPPI_EOQ_MASK)) */
musb_writel(&tx_ram->tx_complete, 0, bd->dma);
/* stop scanning on end-of-segment */
if (bd->hw_next == 0)
completed = true;
}
/* on end of segment, maybe go to next one */
if (completed) {
/* cppi_dump_tx(4, tx_ch, "/complete"); */
/* transfer more, or report completion */
if (tx_ch->offset >= tx_ch->buf_len) {
tx_ch->head = NULL;
tx_ch->tail = NULL;
tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
hw_ep = tx_ch->hw_ep;
musb_dma_completion(musb, index + 1, 1);
} else {
/* Bigger transfer than we could fit in
* that first batch of descriptors...
*/
cppi_next_tx_segment(musb, tx_ch);
}
} else
tx_ch->head = bd;
}
/* Start processing the RX block */
for (index = 0; rx; rx = rx >> 1, index++) {
if (rx & 1) {
struct cppi_channel *rx_ch;
rx_ch = cppi->rx + index;
/* let incomplete dma segments finish */
if (!cppi_rx_scan(cppi, index))
continue;
/* start another dma segment if needed */
if (rx_ch->channel.actual_len != rx_ch->buf_len
&& rx_ch->channel.actual_len
== rx_ch->offset) {
cppi_next_rx_segment(musb, rx_ch, 1);
continue;
}
/* all segments completed! */
rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
hw_ep = rx_ch->hw_ep;
core_rxirq_disable(tibase, index + 1);
musb_dma_completion(musb, index + 1, 0);
}
}
/* write to CPPI EOI register to re-enable interrupts */
musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
if (cppi->irq)
spin_unlock_irqrestore(&musb->lock, flags);
return IRQ_HANDLED;
}
/* Instantiate a software object representing a DMA controller. */
struct dma_controller *__init
dma_controller_create(struct musb *musb, void __iomem *mregs)
{
struct cppi *controller;
struct device *dev = musb->controller;
struct platform_device *pdev = to_platform_device(dev);
int irq = platform_get_irq_byname(pdev, "dma");
controller = kzalloc(sizeof *controller, GFP_KERNEL);
if (!controller)
return NULL;
controller->mregs = mregs;
controller->tibase = mregs - DAVINCI_BASE_OFFSET;
controller->musb = musb;
controller->controller.start = cppi_controller_start;
controller->controller.stop = cppi_controller_stop;
controller->controller.channel_alloc = cppi_channel_allocate;
controller->controller.channel_release = cppi_channel_release;
controller->controller.channel_program = cppi_channel_program;
controller->controller.channel_abort = cppi_channel_abort;
/* NOTE: allocating from on-chip SRAM would give the least
* contention for memory access, if that ever matters here.
*/
/* setup BufferPool */
controller->pool = dma_pool_create("cppi",
controller->musb->controller,
sizeof(struct cppi_descriptor),
CPPI_DESCRIPTOR_ALIGN, 0);
if (!controller->pool) {
kfree(controller);
return NULL;
}
if (irq > 0) {
if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
dev_err(dev, "request_irq %d failed!\n", irq);
dma_controller_destroy(&controller->controller);
return NULL;
}
controller->irq = irq;
}
return &controller->controller;
}
/*
* Destroy a previously-instantiated DMA controller.
*/
void dma_controller_destroy(struct dma_controller *c)
{
struct cppi *cppi;
cppi = container_of(c, struct cppi, controller);
if (cppi->irq)
free_irq(cppi->irq, cppi->musb);
/* assert: caller stopped the controller first */
dma_pool_destroy(cppi->pool);
kfree(cppi);
}
/*
* Context: controller irqlocked, endpoint selected
*/
static int cppi_channel_abort(struct dma_channel *channel)
{
struct cppi_channel *cppi_ch;
struct cppi *controller;
void __iomem *mbase;
void __iomem *tibase;
void __iomem *regs;
u32 value;
struct cppi_descriptor *queue;
cppi_ch = container_of(channel, struct cppi_channel, channel);
controller = cppi_ch->controller;
switch (channel->status) {
case MUSB_DMA_STATUS_BUS_ABORT:
case MUSB_DMA_STATUS_CORE_ABORT:
/* from RX or TX fault irq handler */
case MUSB_DMA_STATUS_BUSY:
/* the hardware needs shutting down */
regs = cppi_ch->hw_ep->regs;
break;
case MUSB_DMA_STATUS_UNKNOWN:
case MUSB_DMA_STATUS_FREE:
return 0;
default:
return -EINVAL;
}
if (!cppi_ch->transmit && cppi_ch->head)
cppi_dump_rxq(3, "/abort", cppi_ch);
mbase = controller->mregs;
tibase = controller->tibase;
queue = cppi_ch->head;
cppi_ch->head = NULL;
cppi_ch->tail = NULL;
/* REVISIT should rely on caller having done this,
* and caller should rely on us not changing it.
* peripheral code is safe ... check host too.
*/
musb_ep_select(mbase, cppi_ch->index + 1);
if (cppi_ch->transmit) {
struct cppi_tx_stateram __iomem *tx_ram;
/* REVISIT put timeouts on these controller handshakes */
cppi_dump_tx(6, cppi_ch, " (teardown)");
/* teardown DMA engine then usb core */
do {
value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
} while (!(value & CPPI_TEAR_READY));
musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
tx_ram = cppi_ch->state_ram;
do {
value = musb_readl(&tx_ram->tx_complete, 0);
} while (0xFFFFFFFC != value);
/* FIXME clean up the transfer state ... here?
* the completion routine should get called with
* an appropriate status code.
*/
value = musb_readw(regs, MUSB_TXCSR);
value &= ~MUSB_TXCSR_DMAENAB;
value |= MUSB_TXCSR_FLUSHFIFO;
musb_writew(regs, MUSB_TXCSR, value);
musb_writew(regs, MUSB_TXCSR, value);
/*
* 1. Write to completion Ptr value 0x1(bit 0 set)
* (write back mode)
* 2. Wait for abort interrupt and then put the channel in
* compare mode by writing 1 to the tx_complete register.
*/
cppi_reset_tx(tx_ram, 1);
cppi_ch->head = NULL;
musb_writel(&tx_ram->tx_complete, 0, 1);
cppi_dump_tx(5, cppi_ch, " (done teardown)");
/* REVISIT tx side _should_ clean up the same way
* as the RX side ... this does no cleanup at all!
*/
} else /* RX */ {
u16 csr;
/* NOTE: docs don't guarantee any of this works ... we
* expect that if the usb core stops telling the cppi core
* to pull more data from it, then it'll be safe to flush
* current RX DMA state iff any pending fifo transfer is done.
*/
core_rxirq_disable(tibase, cppi_ch->index + 1);
/* for host, ensure ReqPkt is never set again */
if (is_host_active(cppi_ch->controller->musb)) {
value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
value &= ~((0x3) << (cppi_ch->index * 2));
musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
}
csr = musb_readw(regs, MUSB_RXCSR);
/* for host, clear (just) ReqPkt at end of current packet(s) */
if (is_host_active(cppi_ch->controller->musb)) {
csr |= MUSB_RXCSR_H_WZC_BITS;
csr &= ~MUSB_RXCSR_H_REQPKT;
} else
csr |= MUSB_RXCSR_P_WZC_BITS;
/* clear dma enable */
csr &= ~(MUSB_RXCSR_DMAENAB);
musb_writew(regs, MUSB_RXCSR, csr);
csr = musb_readw(regs, MUSB_RXCSR);
/* Quiesce: wait for current dma to finish (if not cleanup).
* We can't use bit zero of stateram->rx_sop, since that
* refers to an entire "DMA packet" not just emptying the
* current fifo. Most segments need multiple usb packets.
*/
if (channel->status == MUSB_DMA_STATUS_BUSY)
udelay(50);
/* scan the current list, reporting any data that was
* transferred and acking any IRQ
*/
cppi_rx_scan(controller, cppi_ch->index);
/* clobber the existing state once it's idle
*
* NOTE: arguably, we should also wait for all the other
* RX channels to quiesce (how??) and then temporarily
* disable RXCPPI_CTRL_REG ... but it seems that we can
* rely on the controller restarting from state ram, with
* only RXCPPI_BUFCNT state being bogus. BUFCNT will
* correct itself after the next DMA transfer though.
*
* REVISIT does using rndis mode change that?
*/
cppi_reset_rx(cppi_ch->state_ram);
/* next DMA request _should_ load cppi head ptr */
/* ... we don't "free" that list, only mutate it in place. */
cppi_dump_rx(5, cppi_ch, " (done abort)");
/* clean up previously pending bds */
cppi_bd_free(cppi_ch, cppi_ch->last_processed);
cppi_ch->last_processed = NULL;
while (queue) {
struct cppi_descriptor *tmp = queue->next;
cppi_bd_free(cppi_ch, queue);
queue = tmp;
}
}
channel->status = MUSB_DMA_STATUS_FREE;
cppi_ch->buf_dma = 0;
cppi_ch->offset = 0;
cppi_ch->buf_len = 0;
cppi_ch->maxpacket = 0;
return 0;
}
/* TBD Queries:
*
* Power Management ... probably turn off cppi during suspend, restart;
* check state ram? Clocking is presumably shared with usb core.
*/
| gpl-2.0 |
multirom-m8/kernel_htc_m8gpe | drivers/video/msm/lcdc_chimei_wxga.c | 3630 | 5170 | /* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include <linux/pwm.h>
#ifdef CONFIG_PMIC8058_PWM
#include <linux/mfd/pmic8058.h>
#include <linux/pmic8058-pwm.h>
#endif
#include <mach/gpio.h>
#include "msm_fb.h"
static struct pwm_device *bl_pwm;
#define PWM_FREQ_HZ 210
#define PWM_PERIOD_USEC (USEC_PER_SEC / PWM_FREQ_HZ)
#define PWM_DUTY_LEVEL (PWM_PERIOD_USEC / PWM_LEVEL)
#define PWM_LEVEL 15
static struct msm_panel_common_pdata *cm_pdata;
static struct platform_device *cm_fbpdev;
static int led_pwm; /* pm8058 gpio 24, channel 0 */
static int led_en; /* pm8058 gpio 1 */
static int lvds_pwr_down; /* msm gpio 30 */
static int chimei_bl_level = 1;
static void lcdc_chimei_set_backlight(int level)
{
int ret;
if (bl_pwm) {
ret = pwm_config(bl_pwm, PWM_DUTY_LEVEL * level,
PWM_PERIOD_USEC);
if (ret) {
pr_err("%s: pwm_config on pwm failed %d\n",
__func__, ret);
return;
}
ret = pwm_enable(bl_pwm);
if (ret) {
pr_err("%s: pwm_enable on pwm failed %d\n",
__func__, ret);
return;
}
}
chimei_bl_level = level;
}
static int lcdc_chimei_panel_on(struct platform_device *pdev)
{
int ret;
/* panel powered on here */
ret = gpio_request(lvds_pwr_down, "lvds_pwr_down");
if (ret == 0) {
/* output, pull high to enable */
gpio_direction_output(lvds_pwr_down, 1);
} else {
pr_err("%s: lvds_pwr_down=%d, gpio_request failed\n",
__func__, lvds_pwr_down);
}
msleep(200);
/* power on led pwm power >= 200 ms */
if (chimei_bl_level == 0)
chimei_bl_level = 1;
lcdc_chimei_set_backlight(chimei_bl_level);
msleep(10);
ret = gpio_request(led_en, "led_en");
if (ret == 0) {
/* output, pull high */
gpio_direction_output(led_en, 1);
} else {
pr_err("%s: led_en=%d, gpio_request failed\n",
__func__, led_en);
}
return ret;
}
static int lcdc_chimei_panel_off(struct platform_device *pdev)
{
/* pull low to disable */
gpio_set_value_cansleep(led_en, 0);
gpio_free(led_en);
msleep(10);
lcdc_chimei_set_backlight(0);
msleep(200);
/* power off led pwm power >= 200 ms */
/* pull low to shut down lvds */
gpio_set_value_cansleep(lvds_pwr_down, 0);
gpio_free(lvds_pwr_down);
/* panel power off here */
return 0;
}
static void lcdc_chimei_panel_backlight(struct msm_fb_data_type *mfd)
{
lcdc_chimei_set_backlight(mfd->bl_level);
}
static int __devinit chimei_probe(struct platform_device *pdev)
{
int rc = 0;
if (pdev->id == 0) {
cm_pdata = pdev->dev.platform_data;
if (cm_pdata == NULL) {
pr_err("%s: no PWM gpio specified\n", __func__);
return 0;
}
led_pwm = cm_pdata->gpio_num[0];
led_en = cm_pdata->gpio_num[1];
lvds_pwr_down = cm_pdata->gpio_num[2];
pr_info("%s: led_pwm=%d led_en=%d lvds_pwr_down=%d\n",
__func__, led_pwm, led_en, lvds_pwr_down);
return 0;
}
if (cm_pdata == NULL)
return -ENODEV;
bl_pwm = pwm_request(led_pwm, "backlight");
if (bl_pwm == NULL || IS_ERR(bl_pwm)) {
pr_err("%s pwm_request() failed\n", __func__);
bl_pwm = NULL;
}
cm_fbpdev = msm_fb_add_device(pdev);
if (!cm_fbpdev) {
dev_err(&pdev->dev, "failed to add msm_fb device\n");
rc = -ENODEV;
goto probe_exit;
}
probe_exit:
return rc;
}
static struct platform_driver this_driver = {
.probe = chimei_probe,
.driver = {
.name = "lcdc_chimei_lvds_wxga",
},
};
static struct msm_fb_panel_data chimei_panel_data = {
.on = lcdc_chimei_panel_on,
.off = lcdc_chimei_panel_off,
.set_backlight = lcdc_chimei_panel_backlight,
};
static struct platform_device this_device = {
.name = "lcdc_chimei_lvds_wxga",
.id = 1,
.dev = {
.platform_data = &chimei_panel_data,
}
};
static int __init lcdc_chimei_lvds_panel_init(void)
{
int ret;
struct msm_panel_info *pinfo;
if (msm_fb_detect_client("lcdc_chimei_wxga"))
return 0;
ret = platform_driver_register(&this_driver);
if (ret)
return ret;
pinfo = &chimei_panel_data.panel_info;
pinfo->xres = 1366;
pinfo->yres = 768;
MSM_FB_SINGLE_MODE_PANEL(pinfo);
pinfo->type = LCDC_PANEL;
pinfo->pdest = DISPLAY_1;
pinfo->wait_cycle = 0;
pinfo->bpp = 18;
pinfo->fb_num = 2;
pinfo->clk_rate = 69300000;
pinfo->bl_max = PWM_LEVEL;
pinfo->bl_min = 1;
/*
* this panel is operated by de,
* vsycn and hsync are ignored
*/
pinfo->lcdc.h_back_porch = 108;
pinfo->lcdc.h_front_porch = 0;
pinfo->lcdc.h_pulse_width = 1;
pinfo->lcdc.v_back_porch = 0;
pinfo->lcdc.v_front_porch = 16;
pinfo->lcdc.v_pulse_width = 1;
pinfo->lcdc.border_clr = 0;
pinfo->lcdc.underflow_clr = 0xff;
pinfo->lcdc.hsync_skew = 0;
ret = platform_device_register(&this_device);
if (ret)
platform_driver_unregister(&this_driver);
return ret;
}
module_init(lcdc_chimei_lvds_panel_init);
| gpl-2.0 |
Valera1978/android_kernel_samsung_viennalte | drivers/net/ethernet/renesas/sh_eth.c | 3630 | 57434 | /*
* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2012 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/cache.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/clk.h>
#include <linux/sh_eth.h>
#include "sh_eth.h"
#define SH_ETH_DEF_MSG_ENABLE \
(NETIF_MSG_LINK | \
NETIF_MSG_TIMER | \
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
/* There is CPU dependent code */
#if defined(CONFIG_CPU_SUBTYPE_SH7724)
#define SH_ETH_RESET_DEFAULT 1
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
if (mdp->duplex) /* Full */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
else /* Half */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
break;
case 100:/* 100BASE */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
break;
default:
break;
}
}
/* SH7724 */
static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate,
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
.tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
.apr = 1,
.mpr = 1,
.tpauser = 1,
.hw_swap = 1,
.rpadir = 1,
.rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
};
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
#define SH_ETH_HAS_BOTH_MODULES 1
#define SH_ETH_HAS_TSU 1
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
if (mdp->duplex) /* Full */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
else /* Half */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, 0, RTRATE);
break;
case 100:/* 100BASE */
sh_eth_write(ndev, 1, RTRATE);
break;
default:
break;
}
}
/* SH7757 */
static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.rmcr_value = 0x00000001,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
.tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
.apr = 1,
.mpr = 1,
.tpauser = 1,
.hw_swap = 1,
.no_ade = 1,
.rpadir = 1,
.rpadir_value = 2 << 16,
};
#define SH_GIGA_ETH_BASE 0xfee00000
#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
static void sh_eth_chip_reset_giga(struct net_device *ndev)
{
int i;
unsigned long mahr[2], malr[2];
/* save MAHR and MALR */
for (i = 0; i < 2; i++) {
malr[i] = ioread32((void *)GIGA_MALR(i));
mahr[i] = ioread32((void *)GIGA_MAHR(i));
}
/* reset device */
iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
mdelay(1);
/* restore MAHR and MALR */
for (i = 0; i < 2; i++) {
iowrite32(malr[i], (void *)GIGA_MALR(i));
iowrite32(mahr[i], (void *)GIGA_MAHR(i));
}
}
static int sh_eth_is_gether(struct sh_eth_private *mdp);
static void sh_eth_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int cnt = 100;
if (sh_eth_is_gether(mdp)) {
sh_eth_write(ndev, 0x03, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
EDMR);
while (cnt > 0) {
if (!(sh_eth_read(ndev, EDMR) & 0x3))
break;
mdelay(1);
cnt--;
}
if (cnt < 0)
printk(KERN_ERR "Device reset fail\n");
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
sh_eth_write(ndev, 0x0, TDFAR);
sh_eth_write(ndev, 0x0, TDFXR);
sh_eth_write(ndev, 0x0, TDFFR);
sh_eth_write(ndev, 0x0, RDLAR);
sh_eth_write(ndev, 0x0, RDFAR);
sh_eth_write(ndev, 0x0, RDFXR);
sh_eth_write(ndev, 0x0, RDFFR);
} else {
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
EDMR);
mdelay(3);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
EDMR);
}
}
static void sh_eth_set_duplex_giga(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
if (mdp->duplex) /* Full */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
else /* Half */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
}
static void sh_eth_set_rate_giga(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, 0x00000000, GECMR);
break;
case 100:/* 100BASE */
sh_eth_write(ndev, 0x00000010, GECMR);
break;
case 1000: /* 1000BASE */
sh_eth_write(ndev, 0x00000020, GECMR);
break;
default:
break;
}
}
/* SH7757(GETHERC) */
static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
.chip_reset = sh_eth_chip_reset_giga,
.set_duplex = sh_eth_set_duplex_giga,
.set_rate = sh_eth_set_rate_giga,
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
EESR_ECI,
.tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
EESR_TFE,
.fdr_value = 0x0000072f,
.rmcr_value = 0x00000001,
.apr = 1,
.mpr = 1,
.tpauser = 1,
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
.rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
};
static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
{
if (sh_eth_is_gether(mdp))
return &sh_eth_my_cpu_data_giga;
else
return &sh_eth_my_cpu_data;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
#define SH_ETH_HAS_TSU 1
static void sh_eth_reset_hw_crc(struct net_device *ndev);
static void sh_eth_chip_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
/* reset device */
sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
mdelay(1);
}
static void sh_eth_reset(struct net_device *ndev)
{
int cnt = 100;
sh_eth_write(ndev, EDSR_ENALL, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
while (cnt > 0) {
if (!(sh_eth_read(ndev, EDMR) & 0x3))
break;
mdelay(1);
cnt--;
}
if (cnt == 0)
printk(KERN_ERR "Device reset fail\n");
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
sh_eth_write(ndev, 0x0, TDFAR);
sh_eth_write(ndev, 0x0, TDFXR);
sh_eth_write(ndev, 0x0, TDFFR);
sh_eth_write(ndev, 0x0, RDLAR);
sh_eth_write(ndev, 0x0, RDFAR);
sh_eth_write(ndev, 0x0, RDFXR);
sh_eth_write(ndev, 0x0, RDFFR);
/* Reset HW CRC register */
sh_eth_reset_hw_crc(ndev);
}
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
if (mdp->duplex) /* Full */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
else /* Half */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, GECMR_10, GECMR);
break;
case 100:/* 100BASE */
sh_eth_write(ndev, GECMR_100, GECMR);
break;
case 1000: /* 1000BASE */
sh_eth_write(ndev, GECMR_1000, GECMR);
break;
default:
break;
}
}
/* sh7763 */
static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.chip_reset = sh_eth_chip_reset,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate,
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
EESR_ECI,
.tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
EESR_TFE,
.apr = 1,
.mpr = 1,
.tpauser = 1,
.bculr = 1,
.hw_swap = 1,
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
#if defined(CONFIG_CPU_SUBTYPE_SH7734)
.hw_crc = 1,
#endif
};
static void sh_eth_reset_hw_crc(struct net_device *ndev)
{
if (sh_eth_my_cpu_data.hw_crc)
sh_eth_write(ndev, 0x0, CSMR);
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
#define SH_ETH_RESET_DEFAULT 1
static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.apr = 1,
.mpr = 1,
.tpauser = 1,
.hw_swap = 1,
};
#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
#define SH_ETH_RESET_DEFAULT 1
#define SH_ETH_HAS_TSU 1
static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tsu = 1,
};
#endif
static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
{
if (!cd->ecsr_value)
cd->ecsr_value = DEFAULT_ECSR_INIT;
if (!cd->ecsipr_value)
cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
if (!cd->fcftr_value)
cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
DEFAULT_FIFO_F_D_RFD;
if (!cd->fdr_value)
cd->fdr_value = DEFAULT_FDR_INIT;
if (!cd->rmcr_value)
cd->rmcr_value = DEFAULT_RMCR_VALUE;
if (!cd->tx_check)
cd->tx_check = DEFAULT_TX_CHECK;
if (!cd->eesr_err_check)
cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
if (!cd->tx_error_check)
cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
}
#if defined(SH_ETH_RESET_DEFAULT)
/* Chip Reset */
static void sh_eth_reset(struct net_device *ndev)
{
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
mdelay(3);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
}
#endif
#if defined(CONFIG_CPU_SH4)
static void sh_eth_set_receive_align(struct sk_buff *skb)
{
int reserve;
reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
if (reserve)
skb_reserve(skb, reserve);
}
#else
static void sh_eth_set_receive_align(struct sk_buff *skb)
{
skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
}
#endif
/* CPU <-> EDMAC endian convert */
static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
{
switch (mdp->edmac_endian) {
case EDMAC_LITTLE_ENDIAN:
return cpu_to_le32(x);
case EDMAC_BIG_ENDIAN:
return cpu_to_be32(x);
}
return x;
}
static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
{
switch (mdp->edmac_endian) {
case EDMAC_LITTLE_ENDIAN:
return le32_to_cpu(x);
case EDMAC_BIG_ENDIAN:
return be32_to_cpu(x);
}
return x;
}
/*
* Program the hardware MAC address from dev->dev_addr.
*/
static void update_mac_address(struct net_device *ndev)
{
sh_eth_write(ndev,
(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
sh_eth_write(ndev,
(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
}
/*
* Get MAC address from SuperH MAC address register
*
* SuperH's Ethernet device doesn't have 'ROM' to MAC address.
* This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
* When you want use this device, you must set MAC address in bootloader.
*
*/
static void read_mac_address(struct net_device *ndev, unsigned char *mac)
{
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
memcpy(ndev->dev_addr, mac, 6);
} else {
ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
}
}
static int sh_eth_is_gether(struct sh_eth_private *mdp)
{
if (mdp->reg_offset == sh_eth_offset_gigabit)
return 1;
else
return 0;
}
static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
{
if (sh_eth_is_gether(mdp))
return EDTRR_TRNS_GETHER;
else
return EDTRR_TRNS_ETHER;
}
struct bb_info {
void (*set_gate)(void *addr);
struct mdiobb_ctrl ctrl;
void *addr;
u32 mmd_msk;/* MMD */
u32 mdo_msk;
u32 mdi_msk;
u32 mdc_msk;
};
/* PHY bit set */
static void bb_set(void *addr, u32 msk)
{
iowrite32(ioread32(addr) | msk, addr);
}
/* PHY bit clear */
static void bb_clr(void *addr, u32 msk)
{
iowrite32((ioread32(addr) & ~msk), addr);
}
/* PHY bit read */
static int bb_read(void *addr, u32 msk)
{
return (ioread32(addr) & msk) != 0;
}
/* Data I/O pin control */
static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
if (bitbang->set_gate)
bitbang->set_gate(bitbang->addr);
if (bit)
bb_set(bitbang->addr, bitbang->mmd_msk);
else
bb_clr(bitbang->addr, bitbang->mmd_msk);
}
/* Set bit data*/
static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
if (bitbang->set_gate)
bitbang->set_gate(bitbang->addr);
if (bit)
bb_set(bitbang->addr, bitbang->mdo_msk);
else
bb_clr(bitbang->addr, bitbang->mdo_msk);
}
/* Get bit data*/
static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
if (bitbang->set_gate)
bitbang->set_gate(bitbang->addr);
return bb_read(bitbang->addr, bitbang->mdi_msk);
}
/* MDC pin control */
static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
if (bitbang->set_gate)
bitbang->set_gate(bitbang->addr);
if (bit)
bb_set(bitbang->addr, bitbang->mdc_msk);
else
bb_clr(bitbang->addr, bitbang->mdc_msk);
}
/* mdio bus control struct */
static struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = sh_mdc_ctrl,
.set_mdio_dir = sh_mmd_ctrl,
.set_mdio_data = sh_set_mdio,
.get_mdio_data = sh_get_mdio,
};
/* free skb and descriptor buffer */
static void sh_eth_ring_free(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int i;
/* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) {
for (i = 0; i < RX_RING_SIZE; i++) {
if (mdp->rx_skbuff[i])
dev_kfree_skb(mdp->rx_skbuff[i]);
}
}
kfree(mdp->rx_skbuff);
/* Free Tx skb ringbuffer */
if (mdp->tx_skbuff) {
for (i = 0; i < TX_RING_SIZE; i++) {
if (mdp->tx_skbuff[i])
dev_kfree_skb(mdp->tx_skbuff[i]);
}
}
kfree(mdp->tx_skbuff);
}
/* format skb and descriptor buffer */
static void sh_eth_ring_format(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int i;
struct sk_buff *skb;
struct sh_eth_rxdesc *rxdesc = NULL;
struct sh_eth_txdesc *txdesc = NULL;
int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
mdp->cur_rx = mdp->cur_tx = 0;
mdp->dirty_rx = mdp->dirty_tx = 0;
memset(mdp->rx_ring, 0, rx_ringsize);
/* build Rx ring buffer */
for (i = 0; i < RX_RING_SIZE; i++) {
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* The size of the buffer is 16 byte boundary. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
/* Rx descriptor address set */
if (i == 0) {
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
if (sh_eth_is_gether(mdp))
sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
}
}
mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
/* Mark the last entry as wrapping the ring. */
rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
memset(mdp->tx_ring, 0, tx_ringsize);
/* build Tx ring buffer */
for (i = 0; i < TX_RING_SIZE; i++) {
mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i];
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
txdesc->buffer_length = 0;
if (i == 0) {
/* Tx descriptor address set */
sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
if (sh_eth_is_gether(mdp))
sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
}
}
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
}
/* Get skb and descriptor buffer */
static int sh_eth_ring_init(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int rx_ringsize, tx_ringsize, ret = 0;
/*
* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
* card needs room to do 8 byte alignment, +2 so we can reserve
* the first 2 bytes, and +16 gets room for the status word from the
* card.
*/
mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
(((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
if (mdp->cd->rpadir)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
GFP_KERNEL);
if (!mdp->rx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
ret = -ENOMEM;
return ret;
}
mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
GFP_KERNEL);
if (!mdp->tx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
ret = -ENOMEM;
goto skb_ring_free;
}
/* Allocate all Rx descriptors. */
rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
GFP_KERNEL);
if (!mdp->rx_ring) {
dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
rx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
mdp->dirty_rx = 0;
/* Allocate all Tx descriptors. */
tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
GFP_KERNEL);
if (!mdp->tx_ring) {
dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
tx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
return ret;
desc_ring_free:
/* free DMA buffer */
dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
skb_ring_free:
/* Free Rx and Tx skb ring buffer */
sh_eth_ring_free(ndev);
return ret;
}
static int sh_eth_dev_init(struct net_device *ndev)
{
int ret = 0;
struct sh_eth_private *mdp = netdev_priv(ndev);
u_int32_t rx_int_var, tx_int_var;
u32 val;
/* Soft Reset */
sh_eth_reset(ndev);
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
/* all sh_eth int mask */
sh_eth_write(ndev, 0, EESIPR);
#if defined(__LITTLE_ENDIAN)
if (mdp->cd->hw_swap)
sh_eth_write(ndev, EDMR_EL, EDMR);
else
#endif
sh_eth_write(ndev, 0, EDMR);
/* FIFO size set */
sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
sh_eth_write(ndev, 0, TFTR);
/* Frame recv control */
sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
if (mdp->cd->bculr)
sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
if (!mdp->cd->no_trimd)
sh_eth_write(ndev, 0, TRIMD);
/* Recv frame limit set register */
sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* PAUSE Prohibition */
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
sh_eth_write(ndev, val, ECMR);
if (mdp->cd->set_rate)
mdp->cd->set_rate(ndev);
/* E-MAC Status Register clear */
sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
/* E-MAC Interrupt Enable register */
sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
/* mask reset */
if (mdp->cd->apr)
sh_eth_write(ndev, APR_AP, APR);
if (mdp->cd->mpr)
sh_eth_write(ndev, MPR_MP, MPR);
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
/* Setting the Rx mode will start the Rx process. */
sh_eth_write(ndev, EDRRR_R, EDRRR);
netif_start_queue(ndev);
return ret;
}
/* free Tx skb function */
static int sh_eth_txfree(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
int freeNum = 0;
int entry = 0;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
entry = mdp->dirty_tx % TX_RING_SIZE;
txdesc = &mdp->tx_ring[entry];
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
dma_unmap_single(&ndev->dev, txdesc->addr,
txdesc->buffer_length, DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
freeNum++;
}
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
if (entry >= TX_RING_SIZE - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += txdesc->buffer_length;
}
return freeNum;
}
/* Packet receive function */
static int sh_eth_rx(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_rxdesc *rxdesc;
int entry = mdp->cur_rx % RX_RING_SIZE;
int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
if (--boguscnt < 0)
break;
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
RD_RFS5 | RD_RFS6 | RD_RFS10)) {
ndev->stats.rx_errors++;
if (desc_status & RD_RFS1)
ndev->stats.rx_crc_errors++;
if (desc_status & RD_RFS2)
ndev->stats.rx_frame_errors++;
if (desc_status & RD_RFS3)
ndev->stats.rx_length_errors++;
if (desc_status & RD_RFS4)
ndev->stats.rx_length_errors++;
if (desc_status & RD_RFS6)
ndev->stats.rx_missed_errors++;
if (desc_status & RD_RFS10)
ndev->stats.rx_over_errors++;
} else {
if (!mdp->cd->hw_swap)
sh_eth_soft_swap(
phys_to_virt(ALIGN(rxdesc->addr, 4)),
pkt_len + 2);
skb = mdp->rx_skbuff[entry];
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
netif_rx(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len;
}
rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
entry = (++mdp->cur_rx) % RX_RING_SIZE;
rxdesc = &mdp->rx_ring[entry];
}
/* Refill the Rx ring buffers. */
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
entry = mdp->dirty_rx % RX_RING_SIZE;
rxdesc = &mdp->rx_ring[entry];
/* The size of the buffer is 16 byte boundary. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
}
if (entry >= RX_RING_SIZE - 1)
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
else
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP);
}
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
sh_eth_write(ndev, EDRRR_R, EDRRR);
return 0;
}
static void sh_eth_rcv_snd_disable(struct net_device *ndev)
{
/* disable tx and rx */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
~(ECMR_RE | ECMR_TE), ECMR);
}
static void sh_eth_rcv_snd_enable(struct net_device *ndev)
{
/* enable tx and rx */
sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
(ECMR_RE | ECMR_TE), ECMR);
}
/* error control function */
static void sh_eth_error(struct net_device *ndev, int intr_status)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 felic_stat;
u32 link_stat;
u32 mask;
if (intr_status & EESR_ECI) {
felic_stat = sh_eth_read(ndev, ECSR);
sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
if (felic_stat & ECSR_ICD)
ndev->stats.tx_carrier_errors++;
if (felic_stat & ECSR_LCHNG) {
/* Link Changed */
if (mdp->cd->no_psr || mdp->no_ether_link) {
if (mdp->link == PHY_DOWN)
link_stat = 0;
else
link_stat = PHY_ST_LINK;
} else {
link_stat = (sh_eth_read(ndev, PSR));
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
}
if (!(link_stat & PHY_ST_LINK))
sh_eth_rcv_snd_disable(ndev);
else {
/* Link Up */
sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
~DMAC_M_ECI, EESIPR);
/*clear int */
sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
ECSR);
sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
DMAC_M_ECI, EESIPR);
/* enable tx and rx */
sh_eth_rcv_snd_enable(ndev);
}
}
}
if (intr_status & EESR_TWB) {
/* Write buck end. unused write back interrupt */
if (intr_status & EESR_TABT) /* Transmit Abort int */
ndev->stats.tx_aborted_errors++;
if (netif_msg_tx_err(mdp))
dev_err(&ndev->dev, "Transmit Abort\n");
}
if (intr_status & EESR_RABT) {
/* Receive Abort int */
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
ndev->stats.rx_frame_errors++;
if (netif_msg_rx_err(mdp))
dev_err(&ndev->dev, "Receive Abort\n");
}
}
if (intr_status & EESR_TDE) {
/* Transmit Descriptor Empty int */
ndev->stats.tx_fifo_errors++;
if (netif_msg_tx_err(mdp))
dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
}
if (intr_status & EESR_TFE) {
/* FIFO under flow */
ndev->stats.tx_fifo_errors++;
if (netif_msg_tx_err(mdp))
dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
}
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
sh_eth_write(ndev, EDRRR_R, EDRRR);
if (netif_msg_rx_err(mdp))
dev_err(&ndev->dev, "Receive Descriptor Empty\n");
}
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
ndev->stats.rx_fifo_errors++;
if (netif_msg_rx_err(mdp))
dev_err(&ndev->dev, "Receive FIFO Overflow\n");
}
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
/* Address Error */
ndev->stats.tx_fifo_errors++;
if (netif_msg_tx_err(mdp))
dev_err(&ndev->dev, "Address Error\n");
}
mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
if (mdp->cd->no_ade)
mask &= ~EESR_ADE;
if (intr_status & mask) {
/* Tx error */
u32 edtrr = sh_eth_read(ndev, EDTRR);
/* dmesg */
dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
intr_status, mdp->cur_tx);
dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
mdp->dirty_tx, (u32) ndev->state, edtrr);
/* dirty buffer free */
sh_eth_txfree(ndev);
/* SH7712 BUG */
if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
/* tx dma start */
sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
}
/* wakeup */
netif_wake_queue(ndev);
}
}
static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
{
struct net_device *ndev = netdev;
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
u32 intr_status = 0;
spin_lock(&mdp->lock);
/* Get interrpt stat */
intr_status = sh_eth_read(ndev, EESR);
/* Clear interrupt */
if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
cd->tx_check | cd->eesr_err_check)) {
sh_eth_write(ndev, intr_status, EESR);
ret = IRQ_HANDLED;
} else
goto other_irq;
if (intr_status & (EESR_FRC | /* Frame recv*/
EESR_RMAF | /* Multi cast address recv*/
EESR_RRF | /* Bit frame recv */
EESR_RTLF | /* Long frame recv*/
EESR_RTSF | /* short frame recv */
EESR_PRE | /* PHY-LSI recv error */
EESR_CERF)){ /* recv frame CRC error */
sh_eth_rx(ndev);
}
/* Tx Check */
if (intr_status & cd->tx_check) {
sh_eth_txfree(ndev);
netif_wake_queue(ndev);
}
if (intr_status & cd->eesr_err_check)
sh_eth_error(ndev, intr_status);
other_irq:
spin_unlock(&mdp->lock);
return ret;
}
static void sh_eth_timer(unsigned long data)
{
struct net_device *ndev = (struct net_device *)data;
struct sh_eth_private *mdp = netdev_priv(ndev);
mod_timer(&mdp->timer, jiffies + (10 * HZ));
}
/* PHY state control function */
static void sh_eth_adjust_link(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct phy_device *phydev = mdp->phydev;
int new_state = 0;
if (phydev->link != PHY_DOWN) {
if (phydev->duplex != mdp->duplex) {
new_state = 1;
mdp->duplex = phydev->duplex;
if (mdp->cd->set_duplex)
mdp->cd->set_duplex(ndev);
}
if (phydev->speed != mdp->speed) {
new_state = 1;
mdp->speed = phydev->speed;
if (mdp->cd->set_rate)
mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
sh_eth_write(ndev,
(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
new_state = 1;
mdp->link = phydev->link;
}
} else if (mdp->link) {
new_state = 1;
mdp->link = PHY_DOWN;
mdp->speed = 0;
mdp->duplex = -1;
}
if (new_state && netif_msg_link(mdp))
phy_print_status(phydev);
}
/* PHY init function */
static int sh_eth_phy_init(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
char phy_id[MII_BUS_ID_SIZE + 3];
struct phy_device *phydev = NULL;
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
mdp->mii_bus->id , mdp->phy_id);
mdp->link = PHY_DOWN;
mdp->speed = 0;
mdp->duplex = -1;
/* Try connect to PHY */
phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
0, mdp->phy_interface);
if (IS_ERR(phydev)) {
dev_err(&ndev->dev, "phy_connect failed\n");
return PTR_ERR(phydev);
}
dev_info(&ndev->dev, "attached phy %i to driver %s\n",
phydev->addr, phydev->drv->name);
mdp->phydev = phydev;
return 0;
}
/* PHY control start function */
static int sh_eth_phy_start(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret;
ret = sh_eth_phy_init(ndev);
if (ret)
return ret;
/* reset phy - this also wakes it from PDOWN */
phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
phy_start(mdp->phydev);
return 0;
}
static int sh_eth_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
int ret;
spin_lock_irqsave(&mdp->lock, flags);
ret = phy_ethtool_gset(mdp->phydev, ecmd);
spin_unlock_irqrestore(&mdp->lock, flags);
return ret;
}
static int sh_eth_set_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
int ret;
spin_lock_irqsave(&mdp->lock, flags);
/* disable tx and rx */
sh_eth_rcv_snd_disable(ndev);
ret = phy_ethtool_sset(mdp->phydev, ecmd);
if (ret)
goto error_exit;
if (ecmd->duplex == DUPLEX_FULL)
mdp->duplex = 1;
else
mdp->duplex = 0;
if (mdp->cd->set_duplex)
mdp->cd->set_duplex(ndev);
error_exit:
mdelay(1);
/* enable tx and rx */
sh_eth_rcv_snd_enable(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
return ret;
}
static int sh_eth_nway_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
int ret;
spin_lock_irqsave(&mdp->lock, flags);
ret = phy_start_aneg(mdp->phydev);
spin_unlock_irqrestore(&mdp->lock, flags);
return ret;
}
static u32 sh_eth_get_msglevel(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
return mdp->msg_enable;
}
static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
mdp->msg_enable = value;
}
static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_current", "tx_current",
"rx_dirty", "tx_dirty",
};
#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return SH_ETH_STATS_LEN;
default:
return -EOPNOTSUPP;
}
}
static void sh_eth_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int i = 0;
/* device-specific stats */
data[i++] = mdp->cur_rx;
data[i++] = mdp->cur_tx;
data[i++] = mdp->dirty_rx;
data[i++] = mdp->dirty_tx;
}
static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *sh_eth_gstrings_stats,
sizeof(sh_eth_gstrings_stats));
break;
}
}
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_settings = sh_eth_get_settings,
.set_settings = sh_eth_set_settings,
.nway_reset = sh_eth_nway_reset,
.get_msglevel = sh_eth_get_msglevel,
.set_msglevel = sh_eth_set_msglevel,
.get_link = ethtool_op_get_link,
.get_strings = sh_eth_get_strings,
.get_ethtool_stats = sh_eth_get_ethtool_stats,
.get_sset_count = sh_eth_get_sset_count,
};
/* network device open function */
static int sh_eth_open(struct net_device *ndev)
{
int ret = 0;
struct sh_eth_private *mdp = netdev_priv(ndev);
pm_runtime_get_sync(&mdp->pdev->dev);
ret = request_irq(ndev->irq, sh_eth_interrupt,
#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
defined(CONFIG_CPU_SUBTYPE_SH7764) || \
defined(CONFIG_CPU_SUBTYPE_SH7757)
IRQF_SHARED,
#else
0,
#endif
ndev->name, ndev);
if (ret) {
dev_err(&ndev->dev, "Can not assign IRQ number\n");
return ret;
}
/* Descriptor set */
ret = sh_eth_ring_init(ndev);
if (ret)
goto out_free_irq;
/* device init */
ret = sh_eth_dev_init(ndev);
if (ret)
goto out_free_irq;
/* PHY control start*/
ret = sh_eth_phy_start(ndev);
if (ret)
goto out_free_irq;
/* Set the timer to check for link beat. */
init_timer(&mdp->timer);
mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
return ret;
out_free_irq:
free_irq(ndev->irq, ndev);
pm_runtime_put_sync(&mdp->pdev->dev);
return ret;
}
/* Timeout function */
static void sh_eth_tx_timeout(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_rxdesc *rxdesc;
int i;
netif_stop_queue(ndev);
if (netif_msg_timer(mdp))
dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
" resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
/* tx_errors count up */
ndev->stats.tx_errors++;
/* timer off */
del_timer_sync(&mdp->timer);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
rxdesc = &mdp->rx_ring[i];
rxdesc->status = 0;
rxdesc->addr = 0xBADF00D0;
if (mdp->rx_skbuff[i])
dev_kfree_skb(mdp->rx_skbuff[i]);
mdp->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (mdp->tx_skbuff[i])
dev_kfree_skb(mdp->tx_skbuff[i]);
mdp->tx_skbuff[i] = NULL;
}
/* device init */
sh_eth_dev_init(ndev);
/* timer on */
mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
add_timer(&mdp->timer);
}
/* Packet transmit function */
static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
u32 entry;
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
if (!sh_eth_txfree(ndev)) {
if (netif_msg_tx_queued(mdp))
dev_warn(&ndev->dev, "TxFD exhausted.\n");
netif_stop_queue(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
return NETDEV_TX_BUSY;
}
}
spin_unlock_irqrestore(&mdp->lock, flags);
entry = mdp->cur_tx % TX_RING_SIZE;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
/* soft swap. */
if (!mdp->cd->hw_swap)
sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (skb->len < ETHERSMALL)
txdesc->buffer_length = ETHERSMALL;
else
txdesc->buffer_length = skb->len;
if (entry >= TX_RING_SIZE - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
mdp->cur_tx++;
if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
return NETDEV_TX_OK;
}
/* device close function */
static int sh_eth_close(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ringsize;
netif_stop_queue(ndev);
/* Disable interrupts by clearing the interrupt mask. */
sh_eth_write(ndev, 0x0000, EESIPR);
/* Stop the chip's Tx and Rx processes. */
sh_eth_write(ndev, 0, EDTRR);
sh_eth_write(ndev, 0, EDRRR);
/* PHY Disconnect */
if (mdp->phydev) {
phy_stop(mdp->phydev);
phy_disconnect(mdp->phydev);
}
free_irq(ndev->irq, ndev);
del_timer_sync(&mdp->timer);
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
/* free DMA buffer */
ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
/* free DMA buffer */
ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
pm_runtime_put_sync(&mdp->pdev->dev);
return 0;
}
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
pm_runtime_get_sync(&mdp->pdev->dev);
ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
sh_eth_write(ndev, 0, TROCR); /* (write clear) */
ndev->stats.collisions += sh_eth_read(ndev, CDCR);
sh_eth_write(ndev, 0, CDCR); /* (write clear) */
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
sh_eth_write(ndev, 0, LCCR); /* (write clear) */
if (sh_eth_is_gether(mdp)) {
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
sh_eth_write(ndev, 0, CERCR); /* (write clear) */
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
sh_eth_write(ndev, 0, CEECR); /* (write clear) */
} else {
ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
}
pm_runtime_put_sync(&mdp->pdev->dev);
return &ndev->stats;
}
/* ioctl to device function */
static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
int cmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct phy_device *phydev = mdp->phydev;
if (!netif_running(ndev))
return -EINVAL;
if (!phydev)
return -ENODEV;
return phy_mii_ioctl(phydev, rq, cmd);
}
#if defined(SH_ETH_HAS_TSU)
/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
int entry)
{
return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
}
static u32 sh_eth_tsu_get_post_mask(int entry)
{
return 0x0f << (28 - ((entry % 8) * 4));
}
static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
{
return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
}
static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
int entry)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 tmp;
void *reg_offset;
reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
tmp = ioread32(reg_offset);
iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
}
static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
int entry)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 post_mask, ref_mask, tmp;
void *reg_offset;
reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
post_mask = sh_eth_tsu_get_post_mask(entry);
ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
tmp = ioread32(reg_offset);
iowrite32(tmp & ~post_mask, reg_offset);
/* If other port enables, the function returns "true" */
return tmp & ref_mask;
}
static int sh_eth_tsu_busy(struct net_device *ndev)
{
int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
struct sh_eth_private *mdp = netdev_priv(ndev);
while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
udelay(10);
timeout--;
if (timeout <= 0) {
dev_err(&ndev->dev, "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
}
return 0;
}
static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
const u8 *addr)
{
u32 val;
val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
iowrite32(val, reg);
if (sh_eth_tsu_busy(ndev) < 0)
return -EBUSY;
val = addr[4] << 8 | addr[5];
iowrite32(val, reg + 4);
if (sh_eth_tsu_busy(ndev) < 0)
return -EBUSY;
return 0;
}
static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
{
u32 val;
val = ioread32(reg);
addr[0] = (val >> 24) & 0xff;
addr[1] = (val >> 16) & 0xff;
addr[2] = (val >> 8) & 0xff;
addr[3] = val & 0xff;
val = ioread32(reg + 4);
addr[4] = (val >> 8) & 0xff;
addr[5] = val & 0xff;
}
static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
u8 c_addr[ETH_ALEN];
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
sh_eth_tsu_read_entry(reg_offset, c_addr);
if (memcmp(addr, c_addr, ETH_ALEN) == 0)
return i;
}
return -ENOENT;
}
static int sh_eth_tsu_find_empty(struct net_device *ndev)
{
u8 blank[ETH_ALEN];
int entry;
memset(blank, 0, sizeof(blank));
entry = sh_eth_tsu_find_entry(ndev, blank);
return (entry < 0) ? -ENOMEM : entry;
}
static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
int entry)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int ret;
u8 blank[ETH_ALEN];
sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
~(1 << (31 - entry)), TSU_TEN);
memset(blank, 0, sizeof(blank));
ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
if (ret < 0)
return ret;
return 0;
}
static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i, ret;
if (!mdp->cd->tsu)
return 0;
i = sh_eth_tsu_find_entry(ndev, addr);
if (i < 0) {
/* No entry found, create one */
i = sh_eth_tsu_find_empty(ndev);
if (i < 0)
return -ENOMEM;
ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
if (ret < 0)
return ret;
/* Enable the entry */
sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
(1 << (31 - i)), TSU_TEN);
}
/* Entry found or created, enable POST */
sh_eth_tsu_enable_cam_entry_post(ndev, i);
return 0;
}
static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int i, ret;
if (!mdp->cd->tsu)
return 0;
i = sh_eth_tsu_find_entry(ndev, addr);
if (i) {
/* Entry found */
if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
goto done;
/* Disable the entry if both ports was disabled */
ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
if (ret < 0)
return ret;
}
done:
return 0;
}
static int sh_eth_tsu_purge_all(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int i, ret;
if (unlikely(!mdp->cd->tsu))
return 0;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
continue;
/* Disable the entry if both ports was disabled */
ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
if (ret < 0)
return ret;
}
return 0;
}
static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u8 addr[ETH_ALEN];
void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
if (unlikely(!mdp->cd->tsu))
return;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
sh_eth_tsu_read_entry(reg_offset, addr);
if (is_multicast_ether_addr(addr))
sh_eth_tsu_del_entry(ndev, addr);
}
}
/* Multicast reception directions set */
static void sh_eth_set_multicast_list(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 ecmr_bits;
int mcast_all = 0;
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
/*
* Initial condition is MCT = 1, PRM = 0.
* Depending on ndev->flags, set PRM or clear MCT
*/
ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
if (!(ndev->flags & IFF_MULTICAST)) {
sh_eth_tsu_purge_mcast(ndev);
mcast_all = 1;
}
if (ndev->flags & IFF_ALLMULTI) {
sh_eth_tsu_purge_mcast(ndev);
ecmr_bits &= ~ECMR_MCT;
mcast_all = 1;
}
if (ndev->flags & IFF_PROMISC) {
sh_eth_tsu_purge_all(ndev);
ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
} else if (mdp->cd->tsu) {
struct netdev_hw_addr *ha;
netdev_for_each_mc_addr(ha, ndev) {
if (mcast_all && is_multicast_ether_addr(ha->addr))
continue;
if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
if (!mcast_all) {
sh_eth_tsu_purge_mcast(ndev);
ecmr_bits &= ~ECMR_MCT;
mcast_all = 1;
}
}
}
} else {
/* Normal, unicast/broadcast-only mode. */
ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
}
/* update the ethernet mode */
sh_eth_write(ndev, ecmr_bits, ECMR);
spin_unlock_irqrestore(&mdp->lock, flags);
}
static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
{
if (!mdp->port)
return TSU_VTAG0;
else
return TSU_VTAG1;
}
static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int vtag_reg_index = sh_eth_get_vtag_index(mdp);
if (unlikely(!mdp->cd->tsu))
return -EPERM;
/* No filtering if vid = 0 */
if (!vid)
return 0;
mdp->vlan_num_ids++;
/*
* The controller has one VLAN tag HW filter. So, if the filter is
* already enabled, the driver disables it and the filte
*/
if (mdp->vlan_num_ids > 1) {
/* disable VLAN filter */
sh_eth_tsu_write(mdp, 0, vtag_reg_index);
return 0;
}
sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
vtag_reg_index);
return 0;
}
static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int vtag_reg_index = sh_eth_get_vtag_index(mdp);
if (unlikely(!mdp->cd->tsu))
return -EPERM;
/* No filtering if vid = 0 */
if (!vid)
return 0;
mdp->vlan_num_ids--;
sh_eth_tsu_write(mdp, 0, vtag_reg_index);
return 0;
}
#endif /* SH_ETH_HAS_TSU */
/* SuperH's TSU register init function */
static void sh_eth_tsu_init(struct sh_eth_private *mdp)
{
sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
if (sh_eth_is_gether(mdp)) {
sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
} else {
sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
}
sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
}
/* MDIO bus release function */
static int sh_mdio_release(struct net_device *ndev)
{
struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
/* unregister mdio bus */
mdiobus_unregister(bus);
/* remove mdio bus info from net_device */
dev_set_drvdata(&ndev->dev, NULL);
/* free interrupts memory */
kfree(bus->irq);
/* free bitbang info */
free_mdio_bitbang(bus);
return 0;
}
/* MDIO bus init function */
static int sh_mdio_init(struct net_device *ndev, int id,
struct sh_eth_plat_data *pd)
{
int ret, i;
struct bb_info *bitbang;
struct sh_eth_private *mdp = netdev_priv(ndev);
/* create bit control struct for PHY */
bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
if (!bitbang) {
ret = -ENOMEM;
goto out;
}
/* bitbang init */
bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
bitbang->set_gate = pd->set_mdio_gate;
bitbang->mdi_msk = 0x08;
bitbang->mdo_msk = 0x04;
bitbang->mmd_msk = 0x02;/* MMD */
bitbang->mdc_msk = 0x01;
bitbang->ctrl.ops = &bb_ops;
/* MII controller setting */
mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
if (!mdp->mii_bus) {
ret = -ENOMEM;
goto out_free_bitbang;
}
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev;
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
mdp->pdev->name, id);
/* PHY IRQ */
mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!mdp->mii_bus->irq) {
ret = -ENOMEM;
goto out_free_bus;
}
for (i = 0; i < PHY_MAX_ADDR; i++)
mdp->mii_bus->irq[i] = PHY_POLL;
/* regist mdio bus */
ret = mdiobus_register(mdp->mii_bus);
if (ret)
goto out_free_irq;
dev_set_drvdata(&ndev->dev, mdp->mii_bus);
return 0;
out_free_irq:
kfree(mdp->mii_bus->irq);
out_free_bus:
free_mdio_bitbang(mdp->mii_bus);
out_free_bitbang:
kfree(bitbang);
out:
return ret;
}
static const u16 *sh_eth_get_register_offset(int register_type)
{
const u16 *reg_offset = NULL;
switch (register_type) {
case SH_ETH_REG_GIGABIT:
reg_offset = sh_eth_offset_gigabit;
break;
case SH_ETH_REG_FAST_SH4:
reg_offset = sh_eth_offset_fast_sh4;
break;
case SH_ETH_REG_FAST_SH3_SH2:
reg_offset = sh_eth_offset_fast_sh3_sh2;
break;
default:
printk(KERN_ERR "Unknown register type (%d)\n", register_type);
break;
}
return reg_offset;
}
static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_open = sh_eth_open,
.ndo_stop = sh_eth_close,
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
#if defined(SH_ETH_HAS_TSU)
.ndo_set_rx_mode = sh_eth_set_multicast_list,
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
#endif
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
};
static int sh_eth_drv_probe(struct platform_device *pdev)
{
int ret, devno = 0;
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp = NULL;
struct sh_eth_plat_data *pd;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
dev_err(&pdev->dev, "invalid resource\n");
ret = -EINVAL;
goto out;
}
ndev = alloc_etherdev(sizeof(struct sh_eth_private));
if (!ndev) {
ret = -ENOMEM;
goto out;
}
/* The sh Ether-specific entries in the device structure. */
ndev->base_addr = res->start;
devno = pdev->id;
if (devno < 0)
devno = 0;
ndev->dma = -1;
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
ret = -ENODEV;
goto out_release;
}
ndev->irq = ret;
SET_NETDEV_DEV(ndev, &pdev->dev);
/* Fill in the fields of the device structure with ethernet values. */
ether_setup(ndev);
mdp = netdev_priv(ndev);
mdp->addr = ioremap(res->start, resource_size(res));
if (mdp->addr == NULL) {
ret = -ENOMEM;
dev_err(&pdev->dev, "ioremap failed.\n");
goto out_release;
}
spin_lock_init(&mdp->lock);
mdp->pdev = pdev;
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
/* get PHY ID */
mdp->phy_id = pd->phy;
mdp->phy_interface = pd->phy_interface;
/* EDMAC endian */
mdp->edmac_endian = pd->edmac_endian;
mdp->no_ether_link = pd->no_ether_link;
mdp->ether_link_active_low = pd->ether_link_active_low;
mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
/* set cpu data */
#if defined(SH_ETH_HAS_BOTH_MODULES)
mdp->cd = sh_eth_get_cpu_data(mdp);
#else
mdp->cd = &sh_eth_my_cpu_data;
#endif
sh_eth_set_default_cpu_data(mdp->cd);
/* set function */
ndev->netdev_ops = &sh_eth_netdev_ops;
SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
ndev->watchdog_timeo = TX_TIMEOUT;
/* debug message level */
mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
mdp->post_rx = POST_RX >> (devno << 1);
mdp->post_fw = POST_FW >> (devno << 1);
/* read and set MAC address */
read_mac_address(ndev, pd->mac_addr);
/* ioremap the TSU registers */
if (mdp->cd->tsu) {
struct resource *rtsu;
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!rtsu) {
dev_err(&pdev->dev, "Not found TSU resource\n");
goto out_release;
}
mdp->tsu_addr = ioremap(rtsu->start,
resource_size(rtsu));
mdp->port = devno % 2;
ndev->features = NETIF_F_HW_VLAN_FILTER;
}
/* initialize first or needed device */
if (!devno || pd->needs_init) {
if (mdp->cd->chip_reset)
mdp->cd->chip_reset(ndev);
if (mdp->cd->tsu) {
/* TSU init (Init only)*/
sh_eth_tsu_init(mdp);
}
}
/* network device register */
ret = register_netdev(ndev);
if (ret)
goto out_release;
/* mdio bus init */
ret = sh_mdio_init(ndev, pdev->id, pd);
if (ret)
goto out_unregister;
/* print device information */
pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
platform_set_drvdata(pdev, ndev);
return ret;
out_unregister:
unregister_netdev(ndev);
out_release:
/* net_dev free */
if (mdp && mdp->addr)
iounmap(mdp->addr);
if (mdp && mdp->tsu_addr)
iounmap(mdp->tsu_addr);
if (ndev)
free_netdev(ndev);
out:
return ret;
}
static int sh_eth_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct sh_eth_private *mdp = netdev_priv(ndev);
if (mdp->cd->tsu)
iounmap(mdp->tsu_addr);
sh_mdio_release(ndev);
unregister_netdev(ndev);
pm_runtime_disable(&pdev->dev);
iounmap(mdp->addr);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
return 0;
}
static int sh_eth_runtime_nop(struct device *dev)
{
/*
* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* This driver re-initializes all registers after
* pm_runtime_get_sync() anyway so there is no need
* to save and restore registers here.
*/
return 0;
}
static struct dev_pm_ops sh_eth_dev_pm_ops = {
.runtime_suspend = sh_eth_runtime_nop,
.runtime_resume = sh_eth_runtime_nop,
};
static struct platform_driver sh_eth_driver = {
.probe = sh_eth_drv_probe,
.remove = sh_eth_drv_remove,
.driver = {
.name = CARDNAME,
.pm = &sh_eth_dev_pm_ops,
},
};
module_platform_driver(sh_eth_driver);
MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
ReflexBow/ghost | drivers/pcmcia/yenta_socket.c | 4398 | 40340 | /*
* Regular cardbus driver ("yenta_socket")
*
* (C) Copyright 1999, 2000 Linus Torvalds
*
* Changelog:
* Aug 2002: Manfred Spraul <manfred@colorfullife.com>
* Dynamically adjust the size of the bridge resource
*
* May 2003: Dominik Brodowski <linux@brodo.de>
* Merge pci_socket.c and yenta.c into one file
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <pcmcia/ss.h>
#include "yenta_socket.h"
#include "i82365.h"
static bool disable_clkrun;
module_param(disable_clkrun, bool, 0444);
MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option");
static bool isa_probe = 1;
module_param(isa_probe, bool, 0444);
MODULE_PARM_DESC(isa_probe, "If set ISA interrupts are probed (default). Set to N to disable probing");
static bool pwr_irqs_off;
module_param(pwr_irqs_off, bool, 0644);
MODULE_PARM_DESC(pwr_irqs_off, "Force IRQs off during power-on of slot. Use only when seeing IRQ storms!");
static char o2_speedup[] = "default";
module_param_string(o2_speedup, o2_speedup, sizeof(o2_speedup), 0444);
MODULE_PARM_DESC(o2_speedup, "Use prefetch/burst for O2-bridges: 'on', 'off' "
"or 'default' (uses recommended behaviour for the detected bridge)");
/*
* Only probe "regular" interrupts, don't
* touch dangerous spots like the mouse irq,
* because there are mice that apparently
* get really confused if they get fondled
* too intimately.
*
* Default to 11, 10, 9, 7, 6, 5, 4, 3.
*/
static u32 isa_interrupts = 0x0ef8;
#define debug(x, s, args...) dev_dbg(&s->dev->dev, x, ##args)
/* Don't ask.. */
#define to_cycles(ns) ((ns)/120)
#define to_ns(cycles) ((cycles)*120)
/*
* yenta PCI irq probing.
* currently only used in the TI/EnE initialization code
*/
#ifdef CONFIG_YENTA_TI
static int yenta_probe_cb_irq(struct yenta_socket *socket);
static unsigned int yenta_probe_irq(struct yenta_socket *socket,
u32 isa_irq_mask);
#endif
static unsigned int override_bios;
module_param(override_bios, uint, 0000);
MODULE_PARM_DESC(override_bios, "yenta ignore bios resource allocation");
/*
* Generate easy-to-use ways of reading a cardbus sockets
* regular memory space ("cb_xxx"), configuration space
* ("config_xxx") and compatibility space ("exca_xxxx")
*/
static inline u32 cb_readl(struct yenta_socket *socket, unsigned reg)
{
u32 val = readl(socket->base + reg);
debug("%04x %08x\n", socket, reg, val);
return val;
}
static inline void cb_writel(struct yenta_socket *socket, unsigned reg, u32 val)
{
debug("%04x %08x\n", socket, reg, val);
writel(val, socket->base + reg);
readl(socket->base + reg); /* avoid problems with PCI write posting */
}
static inline u8 config_readb(struct yenta_socket *socket, unsigned offset)
{
u8 val;
pci_read_config_byte(socket->dev, offset, &val);
debug("%04x %02x\n", socket, offset, val);
return val;
}
static inline u16 config_readw(struct yenta_socket *socket, unsigned offset)
{
u16 val;
pci_read_config_word(socket->dev, offset, &val);
debug("%04x %04x\n", socket, offset, val);
return val;
}
static inline u32 config_readl(struct yenta_socket *socket, unsigned offset)
{
u32 val;
pci_read_config_dword(socket->dev, offset, &val);
debug("%04x %08x\n", socket, offset, val);
return val;
}
static inline void config_writeb(struct yenta_socket *socket, unsigned offset, u8 val)
{
debug("%04x %02x\n", socket, offset, val);
pci_write_config_byte(socket->dev, offset, val);
}
static inline void config_writew(struct yenta_socket *socket, unsigned offset, u16 val)
{
debug("%04x %04x\n", socket, offset, val);
pci_write_config_word(socket->dev, offset, val);
}
static inline void config_writel(struct yenta_socket *socket, unsigned offset, u32 val)
{
debug("%04x %08x\n", socket, offset, val);
pci_write_config_dword(socket->dev, offset, val);
}
static inline u8 exca_readb(struct yenta_socket *socket, unsigned reg)
{
u8 val = readb(socket->base + 0x800 + reg);
debug("%04x %02x\n", socket, reg, val);
return val;
}
static inline u8 exca_readw(struct yenta_socket *socket, unsigned reg)
{
u16 val;
val = readb(socket->base + 0x800 + reg);
val |= readb(socket->base + 0x800 + reg + 1) << 8;
debug("%04x %04x\n", socket, reg, val);
return val;
}
static inline void exca_writeb(struct yenta_socket *socket, unsigned reg, u8 val)
{
debug("%04x %02x\n", socket, reg, val);
writeb(val, socket->base + 0x800 + reg);
readb(socket->base + 0x800 + reg); /* PCI write posting... */
}
static void exca_writew(struct yenta_socket *socket, unsigned reg, u16 val)
{
debug("%04x %04x\n", socket, reg, val);
writeb(val, socket->base + 0x800 + reg);
writeb(val >> 8, socket->base + 0x800 + reg + 1);
/* PCI write posting... */
readb(socket->base + 0x800 + reg);
readb(socket->base + 0x800 + reg + 1);
}
static ssize_t show_yenta_registers(struct device *yentadev, struct device_attribute *attr, char *buf)
{
struct pci_dev *dev = to_pci_dev(yentadev);
struct yenta_socket *socket = pci_get_drvdata(dev);
int offset = 0, i;
offset = snprintf(buf, PAGE_SIZE, "CB registers:");
for (i = 0; i < 0x24; i += 4) {
unsigned val;
if (!(i & 15))
offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
val = cb_readl(socket, i);
offset += snprintf(buf + offset, PAGE_SIZE - offset, " %08x", val);
}
offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n\nExCA registers:");
for (i = 0; i < 0x45; i++) {
unsigned char val;
if (!(i & 7)) {
if (i & 8) {
memcpy(buf + offset, " -", 2);
offset += 2;
} else
offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
}
val = exca_readb(socket, i);
offset += snprintf(buf + offset, PAGE_SIZE - offset, " %02x", val);
}
buf[offset++] = '\n';
return offset;
}
static DEVICE_ATTR(yenta_registers, S_IRUSR, show_yenta_registers, NULL);
/*
* Ugh, mixed-mode cardbus and 16-bit pccard state: things depend
* on what kind of card is inserted..
*/
static int yenta_get_status(struct pcmcia_socket *sock, unsigned int *value)
{
struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
unsigned int val;
u32 state = cb_readl(socket, CB_SOCKET_STATE);
val = (state & CB_3VCARD) ? SS_3VCARD : 0;
val |= (state & CB_XVCARD) ? SS_XVCARD : 0;
val |= (state & (CB_5VCARD | CB_3VCARD | CB_XVCARD | CB_YVCARD)) ? 0 : SS_PENDING;
val |= (state & (CB_CDETECT1 | CB_CDETECT2)) ? SS_PENDING : 0;
if (state & CB_CBCARD) {
val |= SS_CARDBUS;
val |= (state & CB_CARDSTS) ? SS_STSCHG : 0;
val |= (state & (CB_CDETECT1 | CB_CDETECT2)) ? 0 : SS_DETECT;
val |= (state & CB_PWRCYCLE) ? SS_POWERON | SS_READY : 0;
} else if (state & CB_16BITCARD) {
u8 status = exca_readb(socket, I365_STATUS);
val |= ((status & I365_CS_DETECT) == I365_CS_DETECT) ? SS_DETECT : 0;
if (exca_readb(socket, I365_INTCTL) & I365_PC_IOCARD) {
val |= (status & I365_CS_STSCHG) ? 0 : SS_STSCHG;
} else {
val |= (status & I365_CS_BVD1) ? 0 : SS_BATDEAD;
val |= (status & I365_CS_BVD2) ? 0 : SS_BATWARN;
}
val |= (status & I365_CS_WRPROT) ? SS_WRPROT : 0;
val |= (status & I365_CS_READY) ? SS_READY : 0;
val |= (status & I365_CS_POWERON) ? SS_POWERON : 0;
}
*value = val;
return 0;
}
static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state)
{
/* some birdges require to use the ExCA registers to power 16bit cards */
if (!(cb_readl(socket, CB_SOCKET_STATE) & CB_CBCARD) &&
(socket->flags & YENTA_16BIT_POWER_EXCA)) {
u8 reg, old;
reg = old = exca_readb(socket, I365_POWER);
reg &= ~(I365_VCC_MASK | I365_VPP1_MASK | I365_VPP2_MASK);
/* i82365SL-DF style */
if (socket->flags & YENTA_16BIT_POWER_DF) {
switch (state->Vcc) {
case 33:
reg |= I365_VCC_3V;
break;
case 50:
reg |= I365_VCC_5V;
break;
default:
reg = 0;
break;
}
switch (state->Vpp) {
case 33:
case 50:
reg |= I365_VPP1_5V;
break;
case 120:
reg |= I365_VPP1_12V;
break;
}
} else {
/* i82365SL-B style */
switch (state->Vcc) {
case 50:
reg |= I365_VCC_5V;
break;
default:
reg = 0;
break;
}
switch (state->Vpp) {
case 50:
reg |= I365_VPP1_5V | I365_VPP2_5V;
break;
case 120:
reg |= I365_VPP1_12V | I365_VPP2_12V;
break;
}
}
if (reg != old)
exca_writeb(socket, I365_POWER, reg);
} else {
u32 reg = 0; /* CB_SC_STPCLK? */
switch (state->Vcc) {
case 33:
reg = CB_SC_VCC_3V;
break;
case 50:
reg = CB_SC_VCC_5V;
break;
default:
reg = 0;
break;
}
switch (state->Vpp) {
case 33:
reg |= CB_SC_VPP_3V;
break;
case 50:
reg |= CB_SC_VPP_5V;
break;
case 120:
reg |= CB_SC_VPP_12V;
break;
}
if (reg != cb_readl(socket, CB_SOCKET_CONTROL))
cb_writel(socket, CB_SOCKET_CONTROL, reg);
}
}
static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
{
struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
u16 bridge;
/* if powering down: do it immediately */
if (state->Vcc == 0)
yenta_set_power(socket, state);
socket->io_irq = state->io_irq;
bridge = config_readw(socket, CB_BRIDGE_CONTROL) & ~(CB_BRIDGE_CRST | CB_BRIDGE_INTR);
if (cb_readl(socket, CB_SOCKET_STATE) & CB_CBCARD) {
u8 intr;
bridge |= (state->flags & SS_RESET) ? CB_BRIDGE_CRST : 0;
/* ISA interrupt control? */
intr = exca_readb(socket, I365_INTCTL);
intr = (intr & ~0xf);
if (!socket->dev->irq) {
intr |= socket->cb_irq ? socket->cb_irq : state->io_irq;
bridge |= CB_BRIDGE_INTR;
}
exca_writeb(socket, I365_INTCTL, intr);
} else {
u8 reg;
reg = exca_readb(socket, I365_INTCTL) & (I365_RING_ENA | I365_INTR_ENA);
reg |= (state->flags & SS_RESET) ? 0 : I365_PC_RESET;
reg |= (state->flags & SS_IOCARD) ? I365_PC_IOCARD : 0;
if (state->io_irq != socket->dev->irq) {
reg |= state->io_irq;
bridge |= CB_BRIDGE_INTR;
}
exca_writeb(socket, I365_INTCTL, reg);
reg = exca_readb(socket, I365_POWER) & (I365_VCC_MASK|I365_VPP1_MASK);
reg |= I365_PWR_NORESET;
if (state->flags & SS_PWR_AUTO)
reg |= I365_PWR_AUTO;
if (state->flags & SS_OUTPUT_ENA)
reg |= I365_PWR_OUT;
if (exca_readb(socket, I365_POWER) != reg)
exca_writeb(socket, I365_POWER, reg);
/* CSC interrupt: no ISA irq for CSC */
reg = exca_readb(socket, I365_CSCINT);
reg &= I365_CSC_IRQ_MASK;
reg |= I365_CSC_DETECT;
if (state->flags & SS_IOCARD) {
if (state->csc_mask & SS_STSCHG)
reg |= I365_CSC_STSCHG;
} else {
if (state->csc_mask & SS_BATDEAD)
reg |= I365_CSC_BVD1;
if (state->csc_mask & SS_BATWARN)
reg |= I365_CSC_BVD2;
if (state->csc_mask & SS_READY)
reg |= I365_CSC_READY;
}
exca_writeb(socket, I365_CSCINT, reg);
exca_readb(socket, I365_CSC);
if (sock->zoom_video)
sock->zoom_video(sock, state->flags & SS_ZVCARD);
}
config_writew(socket, CB_BRIDGE_CONTROL, bridge);
/* Socket event mask: get card insert/remove events.. */
cb_writel(socket, CB_SOCKET_EVENT, -1);
cb_writel(socket, CB_SOCKET_MASK, CB_CDMASK);
/* if powering up: do it as the last step when the socket is configured */
if (state->Vcc != 0)
yenta_set_power(socket, state);
return 0;
}
static int yenta_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
{
struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
int map;
unsigned char ioctl, addr, enable;
map = io->map;
if (map > 1)
return -EINVAL;
enable = I365_ENA_IO(map);
addr = exca_readb(socket, I365_ADDRWIN);
/* Disable the window before changing it.. */
if (addr & enable) {
addr &= ~enable;
exca_writeb(socket, I365_ADDRWIN, addr);
}
exca_writew(socket, I365_IO(map)+I365_W_START, io->start);
exca_writew(socket, I365_IO(map)+I365_W_STOP, io->stop);
ioctl = exca_readb(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map);
if (io->flags & MAP_0WS)
ioctl |= I365_IOCTL_0WS(map);
if (io->flags & MAP_16BIT)
ioctl |= I365_IOCTL_16BIT(map);
if (io->flags & MAP_AUTOSZ)
ioctl |= I365_IOCTL_IOCS16(map);
exca_writeb(socket, I365_IOCTL, ioctl);
if (io->flags & MAP_ACTIVE)
exca_writeb(socket, I365_ADDRWIN, addr | enable);
return 0;
}
static int yenta_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem)
{
struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
struct pci_bus_region region;
int map;
unsigned char addr, enable;
unsigned int start, stop, card_start;
unsigned short word;
pcibios_resource_to_bus(socket->dev, ®ion, mem->res);
map = mem->map;
start = region.start;
stop = region.end;
card_start = mem->card_start;
if (map > 4 || start > stop || ((start ^ stop) >> 24) ||
(card_start >> 26) || mem->speed > 1000)
return -EINVAL;
enable = I365_ENA_MEM(map);
addr = exca_readb(socket, I365_ADDRWIN);
if (addr & enable) {
addr &= ~enable;
exca_writeb(socket, I365_ADDRWIN, addr);
}
exca_writeb(socket, CB_MEM_PAGE(map), start >> 24);
word = (start >> 12) & 0x0fff;
if (mem->flags & MAP_16BIT)
word |= I365_MEM_16BIT;
if (mem->flags & MAP_0WS)
word |= I365_MEM_0WS;
exca_writew(socket, I365_MEM(map) + I365_W_START, word);
word = (stop >> 12) & 0x0fff;
switch (to_cycles(mem->speed)) {
case 0:
break;
case 1:
word |= I365_MEM_WS0;
break;
case 2:
word |= I365_MEM_WS1;
break;
default:
word |= I365_MEM_WS1 | I365_MEM_WS0;
break;
}
exca_writew(socket, I365_MEM(map) + I365_W_STOP, word);
word = ((card_start - start) >> 12) & 0x3fff;
if (mem->flags & MAP_WRPROT)
word |= I365_MEM_WRPROT;
if (mem->flags & MAP_ATTRIB)
word |= I365_MEM_REG;
exca_writew(socket, I365_MEM(map) + I365_W_OFF, word);
if (mem->flags & MAP_ACTIVE)
exca_writeb(socket, I365_ADDRWIN, addr | enable);
return 0;
}
static irqreturn_t yenta_interrupt(int irq, void *dev_id)
{
unsigned int events;
struct yenta_socket *socket = (struct yenta_socket *) dev_id;
u8 csc;
u32 cb_event;
/* Clear interrupt status for the event */
cb_event = cb_readl(socket, CB_SOCKET_EVENT);
cb_writel(socket, CB_SOCKET_EVENT, cb_event);
csc = exca_readb(socket, I365_CSC);
if (!(cb_event || csc))
return IRQ_NONE;
events = (cb_event & (CB_CD1EVENT | CB_CD2EVENT)) ? SS_DETECT : 0 ;
events |= (csc & I365_CSC_DETECT) ? SS_DETECT : 0;
if (exca_readb(socket, I365_INTCTL) & I365_PC_IOCARD) {
events |= (csc & I365_CSC_STSCHG) ? SS_STSCHG : 0;
} else {
events |= (csc & I365_CSC_BVD1) ? SS_BATDEAD : 0;
events |= (csc & I365_CSC_BVD2) ? SS_BATWARN : 0;
events |= (csc & I365_CSC_READY) ? SS_READY : 0;
}
if (events)
pcmcia_parse_events(&socket->socket, events);
return IRQ_HANDLED;
}
static void yenta_interrupt_wrapper(unsigned long data)
{
struct yenta_socket *socket = (struct yenta_socket *) data;
yenta_interrupt(0, (void *)socket);
socket->poll_timer.expires = jiffies + HZ;
add_timer(&socket->poll_timer);
}
static void yenta_clear_maps(struct yenta_socket *socket)
{
int i;
struct resource res = { .start = 0, .end = 0x0fff };
pccard_io_map io = { 0, 0, 0, 0, 1 };
pccard_mem_map mem = { .res = &res, };
yenta_set_socket(&socket->socket, &dead_socket);
for (i = 0; i < 2; i++) {
io.map = i;
yenta_set_io_map(&socket->socket, &io);
}
for (i = 0; i < 5; i++) {
mem.map = i;
yenta_set_mem_map(&socket->socket, &mem);
}
}
/* redoes voltage interrogation if required */
static void yenta_interrogate(struct yenta_socket *socket)
{
u32 state;
state = cb_readl(socket, CB_SOCKET_STATE);
if (!(state & (CB_5VCARD | CB_3VCARD | CB_XVCARD | CB_YVCARD)) ||
(state & (CB_CDETECT1 | CB_CDETECT2 | CB_NOTACARD | CB_BADVCCREQ)) ||
((state & (CB_16BITCARD | CB_CBCARD)) == (CB_16BITCARD | CB_CBCARD)))
cb_writel(socket, CB_SOCKET_FORCE, CB_CVSTEST);
}
/* Called at resume and initialization events */
static int yenta_sock_init(struct pcmcia_socket *sock)
{
struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
exca_writeb(socket, I365_GBLCTL, 0x00);
exca_writeb(socket, I365_GENCTL, 0x00);
/* Redo card voltage interrogation */
yenta_interrogate(socket);
yenta_clear_maps(socket);
if (socket->type && socket->type->sock_init)
socket->type->sock_init(socket);
/* Re-enable CSC interrupts */
cb_writel(socket, CB_SOCKET_MASK, CB_CDMASK);
return 0;
}
static int yenta_sock_suspend(struct pcmcia_socket *sock)
{
struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
/* Disable CSC interrupts */
cb_writel(socket, CB_SOCKET_MASK, 0x0);
return 0;
}
/*
* Use an adaptive allocation for the memory resource,
* sometimes the memory behind pci bridges is limited:
* 1/8 of the size of the io window of the parent.
* max 4 MB, min 16 kB. We try very hard to not get below
* the "ACC" values, though.
*/
#define BRIDGE_MEM_MAX (4*1024*1024)
#define BRIDGE_MEM_ACC (128*1024)
#define BRIDGE_MEM_MIN (16*1024)
#define BRIDGE_IO_MAX 512
#define BRIDGE_IO_ACC 256
#define BRIDGE_IO_MIN 32
#ifndef PCIBIOS_MIN_CARDBUS_IO
#define PCIBIOS_MIN_CARDBUS_IO PCIBIOS_MIN_IO
#endif
static int yenta_search_one_res(struct resource *root, struct resource *res,
u32 min)
{
u32 align, size, start, end;
if (res->flags & IORESOURCE_IO) {
align = 1024;
size = BRIDGE_IO_MAX;
start = PCIBIOS_MIN_CARDBUS_IO;
end = ~0U;
} else {
unsigned long avail = root->end - root->start;
int i;
size = BRIDGE_MEM_MAX;
if (size > avail/8) {
size = (avail+1)/8;
/* round size down to next power of 2 */
i = 0;
while ((size /= 2) != 0)
i++;
size = 1 << i;
}
if (size < min)
size = min;
align = size;
start = PCIBIOS_MIN_MEM;
end = ~0U;
}
do {
if (allocate_resource(root, res, size, start, end, align,
NULL, NULL) == 0) {
return 1;
}
size = size/2;
align = size;
} while (size >= min);
return 0;
}
static int yenta_search_res(struct yenta_socket *socket, struct resource *res,
u32 min)
{
struct resource *root;
int i;
pci_bus_for_each_resource(socket->dev->bus, root, i) {
if (!root)
continue;
if ((res->flags ^ root->flags) &
(IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH))
continue; /* Wrong type */
if (yenta_search_one_res(root, res, min))
return 1;
}
return 0;
}
static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type, int addr_start, int addr_end)
{
struct pci_dev *dev = socket->dev;
struct resource *res;
struct pci_bus_region region;
unsigned mask;
res = dev->resource + PCI_BRIDGE_RESOURCES + nr;
/* Already allocated? */
if (res->parent)
return 0;
/* The granularity of the memory limit is 4kB, on IO it's 4 bytes */
mask = ~0xfff;
if (type & IORESOURCE_IO)
mask = ~3;
res->name = dev->subordinate->name;
res->flags = type;
region.start = config_readl(socket, addr_start) & mask;
region.end = config_readl(socket, addr_end) | ~mask;
if (region.start && region.end > region.start && !override_bios) {
pcibios_bus_to_resource(dev, res, ®ion);
if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0)
return 0;
dev_printk(KERN_INFO, &dev->dev,
"Preassigned resource %d busy or not available, "
"reconfiguring...\n",
nr);
}
if (type & IORESOURCE_IO) {
if ((yenta_search_res(socket, res, BRIDGE_IO_MAX)) ||
(yenta_search_res(socket, res, BRIDGE_IO_ACC)) ||
(yenta_search_res(socket, res, BRIDGE_IO_MIN)))
return 1;
} else {
if (type & IORESOURCE_PREFETCH) {
if ((yenta_search_res(socket, res, BRIDGE_MEM_MAX)) ||
(yenta_search_res(socket, res, BRIDGE_MEM_ACC)) ||
(yenta_search_res(socket, res, BRIDGE_MEM_MIN)))
return 1;
/* Approximating prefetchable by non-prefetchable */
res->flags = IORESOURCE_MEM;
}
if ((yenta_search_res(socket, res, BRIDGE_MEM_MAX)) ||
(yenta_search_res(socket, res, BRIDGE_MEM_ACC)) ||
(yenta_search_res(socket, res, BRIDGE_MEM_MIN)))
return 1;
}
dev_printk(KERN_INFO, &dev->dev,
"no resource of type %x available, trying to continue...\n",
type);
res->start = res->end = res->flags = 0;
return 0;
}
/*
* Allocate the bridge mappings for the device..
*/
static void yenta_allocate_resources(struct yenta_socket *socket)
{
int program = 0;
program += yenta_allocate_res(socket, 0, IORESOURCE_IO,
PCI_CB_IO_BASE_0, PCI_CB_IO_LIMIT_0);
program += yenta_allocate_res(socket, 1, IORESOURCE_IO,
PCI_CB_IO_BASE_1, PCI_CB_IO_LIMIT_1);
program += yenta_allocate_res(socket, 2, IORESOURCE_MEM|IORESOURCE_PREFETCH,
PCI_CB_MEMORY_BASE_0, PCI_CB_MEMORY_LIMIT_0);
program += yenta_allocate_res(socket, 3, IORESOURCE_MEM,
PCI_CB_MEMORY_BASE_1, PCI_CB_MEMORY_LIMIT_1);
if (program)
pci_setup_cardbus(socket->dev->subordinate);
}
/*
* Free the bridge mappings for the device..
*/
static void yenta_free_resources(struct yenta_socket *socket)
{
int i;
for (i = 0; i < 4; i++) {
struct resource *res;
res = socket->dev->resource + PCI_BRIDGE_RESOURCES + i;
if (res->start != 0 && res->end != 0)
release_resource(res);
res->start = res->end = res->flags = 0;
}
}
/*
* Close it down - release our resources and go home..
*/
static void __devexit yenta_close(struct pci_dev *dev)
{
struct yenta_socket *sock = pci_get_drvdata(dev);
/* Remove the register attributes */
device_remove_file(&dev->dev, &dev_attr_yenta_registers);
/* we don't want a dying socket registered */
pcmcia_unregister_socket(&sock->socket);
/* Disable all events so we don't die in an IRQ storm */
cb_writel(sock, CB_SOCKET_MASK, 0x0);
exca_writeb(sock, I365_CSCINT, 0);
if (sock->cb_irq)
free_irq(sock->cb_irq, sock);
else
del_timer_sync(&sock->poll_timer);
if (sock->base)
iounmap(sock->base);
yenta_free_resources(sock);
pci_release_regions(dev);
pci_disable_device(dev);
pci_set_drvdata(dev, NULL);
}
static struct pccard_operations yenta_socket_operations = {
.init = yenta_sock_init,
.suspend = yenta_sock_suspend,
.get_status = yenta_get_status,
.set_socket = yenta_set_socket,
.set_io_map = yenta_set_io_map,
.set_mem_map = yenta_set_mem_map,
};
#ifdef CONFIG_YENTA_TI
#include "ti113x.h"
#endif
#ifdef CONFIG_YENTA_RICOH
#include "ricoh.h"
#endif
#ifdef CONFIG_YENTA_TOSHIBA
#include "topic.h"
#endif
#ifdef CONFIG_YENTA_O2
#include "o2micro.h"
#endif
enum {
CARDBUS_TYPE_DEFAULT = -1,
CARDBUS_TYPE_TI,
CARDBUS_TYPE_TI113X,
CARDBUS_TYPE_TI12XX,
CARDBUS_TYPE_TI1250,
CARDBUS_TYPE_RICOH,
CARDBUS_TYPE_TOPIC95,
CARDBUS_TYPE_TOPIC97,
CARDBUS_TYPE_O2MICRO,
CARDBUS_TYPE_ENE,
};
/*
* Different cardbus controllers have slightly different
* initialization sequences etc details. List them here..
*/
static struct cardbus_type cardbus_type[] = {
#ifdef CONFIG_YENTA_TI
[CARDBUS_TYPE_TI] = {
.override = ti_override,
.save_state = ti_save_state,
.restore_state = ti_restore_state,
.sock_init = ti_init,
},
[CARDBUS_TYPE_TI113X] = {
.override = ti113x_override,
.save_state = ti_save_state,
.restore_state = ti_restore_state,
.sock_init = ti_init,
},
[CARDBUS_TYPE_TI12XX] = {
.override = ti12xx_override,
.save_state = ti_save_state,
.restore_state = ti_restore_state,
.sock_init = ti_init,
},
[CARDBUS_TYPE_TI1250] = {
.override = ti1250_override,
.save_state = ti_save_state,
.restore_state = ti_restore_state,
.sock_init = ti_init,
},
[CARDBUS_TYPE_ENE] = {
.override = ene_override,
.save_state = ti_save_state,
.restore_state = ti_restore_state,
.sock_init = ti_init,
},
#endif
#ifdef CONFIG_YENTA_RICOH
[CARDBUS_TYPE_RICOH] = {
.override = ricoh_override,
.save_state = ricoh_save_state,
.restore_state = ricoh_restore_state,
},
#endif
#ifdef CONFIG_YENTA_TOSHIBA
[CARDBUS_TYPE_TOPIC95] = {
.override = topic95_override,
},
[CARDBUS_TYPE_TOPIC97] = {
.override = topic97_override,
},
#endif
#ifdef CONFIG_YENTA_O2
[CARDBUS_TYPE_O2MICRO] = {
.override = o2micro_override,
.restore_state = o2micro_restore_state,
},
#endif
};
static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mask)
{
int i;
unsigned long val;
u32 mask;
u8 reg;
/*
* Probe for usable interrupts using the force
* register to generate bogus card status events.
*/
cb_writel(socket, CB_SOCKET_EVENT, -1);
cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
reg = exca_readb(socket, I365_CSCINT);
exca_writeb(socket, I365_CSCINT, 0);
val = probe_irq_on() & isa_irq_mask;
for (i = 1; i < 16; i++) {
if (!((val >> i) & 1))
continue;
exca_writeb(socket, I365_CSCINT, I365_CSC_STSCHG | (i << 4));
cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS);
udelay(100);
cb_writel(socket, CB_SOCKET_EVENT, -1);
}
cb_writel(socket, CB_SOCKET_MASK, 0);
exca_writeb(socket, I365_CSCINT, reg);
mask = probe_irq_mask(val) & 0xffff;
return mask;
}
/*
* yenta PCI irq probing.
* currently only used in the TI/EnE initialization code
*/
#ifdef CONFIG_YENTA_TI
/* interrupt handler, only used during probing */
static irqreturn_t yenta_probe_handler(int irq, void *dev_id)
{
struct yenta_socket *socket = (struct yenta_socket *) dev_id;
u8 csc;
u32 cb_event;
/* Clear interrupt status for the event */
cb_event = cb_readl(socket, CB_SOCKET_EVENT);
cb_writel(socket, CB_SOCKET_EVENT, -1);
csc = exca_readb(socket, I365_CSC);
if (cb_event || csc) {
socket->probe_status = 1;
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/* probes the PCI interrupt, use only on override functions */
static int yenta_probe_cb_irq(struct yenta_socket *socket)
{
u8 reg = 0;
if (!socket->cb_irq)
return -1;
socket->probe_status = 0;
if (request_irq(socket->cb_irq, yenta_probe_handler, IRQF_SHARED, "yenta", socket)) {
dev_printk(KERN_WARNING, &socket->dev->dev,
"request_irq() in yenta_probe_cb_irq() failed!\n");
return -1;
}
/* generate interrupt, wait */
if (!socket->dev->irq)
reg = exca_readb(socket, I365_CSCINT);
exca_writeb(socket, I365_CSCINT, reg | I365_CSC_STSCHG);
cb_writel(socket, CB_SOCKET_EVENT, -1);
cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS);
msleep(100);
/* disable interrupts */
cb_writel(socket, CB_SOCKET_MASK, 0);
exca_writeb(socket, I365_CSCINT, reg);
cb_writel(socket, CB_SOCKET_EVENT, -1);
exca_readb(socket, I365_CSC);
free_irq(socket->cb_irq, socket);
return (int) socket->probe_status;
}
#endif /* CONFIG_YENTA_TI */
/*
* Set static data that doesn't need re-initializing..
*/
static void yenta_get_socket_capabilities(struct yenta_socket *socket, u32 isa_irq_mask)
{
socket->socket.pci_irq = socket->cb_irq;
if (isa_probe)
socket->socket.irq_mask = yenta_probe_irq(socket, isa_irq_mask);
else
socket->socket.irq_mask = 0;
dev_printk(KERN_INFO, &socket->dev->dev,
"ISA IRQ mask 0x%04x, PCI irq %d\n",
socket->socket.irq_mask, socket->cb_irq);
}
/*
* Initialize the standard cardbus registers
*/
static void yenta_config_init(struct yenta_socket *socket)
{
u16 bridge;
struct pci_dev *dev = socket->dev;
struct pci_bus_region region;
pcibios_resource_to_bus(socket->dev, ®ion, &dev->resource[0]);
config_writel(socket, CB_LEGACY_MODE_BASE, 0);
config_writel(socket, PCI_BASE_ADDRESS_0, region.start);
config_writew(socket, PCI_COMMAND,
PCI_COMMAND_IO |
PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER |
PCI_COMMAND_WAIT);
/* MAGIC NUMBERS! Fixme */
config_writeb(socket, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES / 4);
config_writeb(socket, PCI_LATENCY_TIMER, 168);
config_writel(socket, PCI_PRIMARY_BUS,
(176 << 24) | /* sec. latency timer */
(dev->subordinate->subordinate << 16) | /* subordinate bus */
(dev->subordinate->secondary << 8) | /* secondary bus */
dev->subordinate->primary); /* primary bus */
/*
* Set up the bridging state:
* - enable write posting.
* - memory window 0 prefetchable, window 1 non-prefetchable
* - PCI interrupts enabled if a PCI interrupt exists..
*/
bridge = config_readw(socket, CB_BRIDGE_CONTROL);
bridge &= ~(CB_BRIDGE_CRST | CB_BRIDGE_PREFETCH1 | CB_BRIDGE_ISAEN | CB_BRIDGE_VGAEN);
bridge |= CB_BRIDGE_PREFETCH0 | CB_BRIDGE_POSTEN;
config_writew(socket, CB_BRIDGE_CONTROL, bridge);
}
/**
* yenta_fixup_parent_bridge - Fix subordinate bus# of the parent bridge
* @cardbus_bridge: The PCI bus which the CardBus bridge bridges to
*
* Checks if devices on the bus which the CardBus bridge bridges to would be
* invisible during PCI scans because of a misconfigured subordinate number
* of the parent brige - some BIOSes seem to be too lazy to set it right.
* Does the fixup carefully by checking how far it can go without conflicts.
* See http://bugzilla.kernel.org/show_bug.cgi?id=2944 for more information.
*/
static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
{
struct list_head *tmp;
unsigned char upper_limit;
/*
* We only check and fix the parent bridge: All systems which need
* this fixup that have been reviewed are laptops and the only bridge
* which needed fixing was the parent bridge of the CardBus bridge:
*/
struct pci_bus *bridge_to_fix = cardbus_bridge->parent;
/* Check bus numbers are already set up correctly: */
if (bridge_to_fix->subordinate >= cardbus_bridge->subordinate)
return; /* The subordinate number is ok, nothing to do */
if (!bridge_to_fix->parent)
return; /* Root bridges are ok */
/* stay within the limits of the bus range of the parent: */
upper_limit = bridge_to_fix->parent->subordinate;
/* check the bus ranges of all silbling bridges to prevent overlap */
list_for_each(tmp, &bridge_to_fix->parent->children) {
struct pci_bus *silbling = pci_bus_b(tmp);
/*
* If the silbling has a higher secondary bus number
* and it's secondary is equal or smaller than our
* current upper limit, set the new upper limit to
* the bus number below the silbling's range:
*/
if (silbling->secondary > bridge_to_fix->subordinate
&& silbling->secondary <= upper_limit)
upper_limit = silbling->secondary - 1;
}
/* Show that the wanted subordinate number is not possible: */
if (cardbus_bridge->subordinate > upper_limit)
dev_printk(KERN_WARNING, &cardbus_bridge->dev,
"Upper limit for fixing this "
"bridge's parent bridge: #%02x\n", upper_limit);
/* If we have room to increase the bridge's subordinate number, */
if (bridge_to_fix->subordinate < upper_limit) {
/* use the highest number of the hidden bus, within limits */
unsigned char subordinate_to_assign =
min(cardbus_bridge->subordinate, upper_limit);
dev_printk(KERN_INFO, &bridge_to_fix->dev,
"Raising subordinate bus# of parent "
"bus (#%02x) from #%02x to #%02x\n",
bridge_to_fix->number,
bridge_to_fix->subordinate, subordinate_to_assign);
/* Save the new subordinate in the bus struct of the bridge */
bridge_to_fix->subordinate = subordinate_to_assign;
/* and update the PCI config space with the new subordinate */
pci_write_config_byte(bridge_to_fix->self,
PCI_SUBORDINATE_BUS, bridge_to_fix->subordinate);
}
}
/*
* Initialize a cardbus controller. Make sure we have a usable
* interrupt, and that we can map the cardbus area. Fill in the
* socket information structure..
*/
static int __devinit yenta_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct yenta_socket *socket;
int ret;
/*
* If we failed to assign proper bus numbers for this cardbus
* controller during PCI probe, its subordinate pci_bus is NULL.
* Bail out if so.
*/
if (!dev->subordinate) {
dev_printk(KERN_ERR, &dev->dev, "no bus associated! "
"(try 'pci=assign-busses')\n");
return -ENODEV;
}
socket = kzalloc(sizeof(struct yenta_socket), GFP_KERNEL);
if (!socket)
return -ENOMEM;
/* prepare pcmcia_socket */
socket->socket.ops = ¥ta_socket_operations;
socket->socket.resource_ops = &pccard_nonstatic_ops;
socket->socket.dev.parent = &dev->dev;
socket->socket.driver_data = socket;
socket->socket.owner = THIS_MODULE;
socket->socket.features = SS_CAP_PAGE_REGS | SS_CAP_PCCARD;
socket->socket.map_size = 0x1000;
socket->socket.cb_dev = dev;
/* prepare struct yenta_socket */
socket->dev = dev;
pci_set_drvdata(dev, socket);
/*
* Do some basic sanity checking..
*/
if (pci_enable_device(dev)) {
ret = -EBUSY;
goto free;
}
ret = pci_request_regions(dev, "yenta_socket");
if (ret)
goto disable;
if (!pci_resource_start(dev, 0)) {
dev_printk(KERN_ERR, &dev->dev, "No cardbus resource!\n");
ret = -ENODEV;
goto release;
}
/*
* Ok, start setup.. Map the cardbus registers,
* and request the IRQ.
*/
socket->base = ioremap(pci_resource_start(dev, 0), 0x1000);
if (!socket->base) {
ret = -ENOMEM;
goto release;
}
/*
* report the subsystem vendor and device for help debugging
* the irq stuff...
*/
dev_printk(KERN_INFO, &dev->dev, "CardBus bridge found [%04x:%04x]\n",
dev->subsystem_vendor, dev->subsystem_device);
yenta_config_init(socket);
/* Disable all events */
cb_writel(socket, CB_SOCKET_MASK, 0x0);
/* Set up the bridge regions.. */
yenta_allocate_resources(socket);
socket->cb_irq = dev->irq;
/* Do we have special options for the device? */
if (id->driver_data != CARDBUS_TYPE_DEFAULT &&
id->driver_data < ARRAY_SIZE(cardbus_type)) {
socket->type = &cardbus_type[id->driver_data];
ret = socket->type->override(socket);
if (ret < 0)
goto unmap;
}
/* We must finish initialization here */
if (!socket->cb_irq || request_irq(socket->cb_irq, yenta_interrupt, IRQF_SHARED, "yenta", socket)) {
/* No IRQ or request_irq failed. Poll */
socket->cb_irq = 0; /* But zero is a valid IRQ number. */
init_timer(&socket->poll_timer);
socket->poll_timer.function = yenta_interrupt_wrapper;
socket->poll_timer.data = (unsigned long)socket;
socket->poll_timer.expires = jiffies + HZ;
add_timer(&socket->poll_timer);
dev_printk(KERN_INFO, &dev->dev,
"no PCI IRQ, CardBus support disabled for this "
"socket.\n");
dev_printk(KERN_INFO, &dev->dev,
"check your BIOS CardBus, BIOS IRQ or ACPI "
"settings.\n");
} else {
socket->socket.features |= SS_CAP_CARDBUS;
}
/* Figure out what the dang thing can do for the PCMCIA layer... */
yenta_interrogate(socket);
yenta_get_socket_capabilities(socket, isa_interrupts);
dev_printk(KERN_INFO, &dev->dev,
"Socket status: %08x\n", cb_readl(socket, CB_SOCKET_STATE));
yenta_fixup_parent_bridge(dev->subordinate);
/* Register it with the pcmcia layer.. */
ret = pcmcia_register_socket(&socket->socket);
if (ret == 0) {
/* Add the yenta register attributes */
ret = device_create_file(&dev->dev, &dev_attr_yenta_registers);
if (ret == 0)
goto out;
/* error path... */
pcmcia_unregister_socket(&socket->socket);
}
unmap:
iounmap(socket->base);
release:
pci_release_regions(dev);
disable:
pci_disable_device(dev);
free:
kfree(socket);
out:
return ret;
}
#ifdef CONFIG_PM
static int yenta_dev_suspend_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct yenta_socket *socket = pci_get_drvdata(pdev);
if (!socket)
return 0;
if (socket->type && socket->type->save_state)
socket->type->save_state(socket);
pci_save_state(pdev);
pci_read_config_dword(pdev, 16*4, &socket->saved_state[0]);
pci_read_config_dword(pdev, 17*4, &socket->saved_state[1]);
pci_disable_device(pdev);
return 0;
}
static int yenta_dev_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct yenta_socket *socket = pci_get_drvdata(pdev);
int ret;
if (!socket)
return 0;
pci_write_config_dword(pdev, 16*4, socket->saved_state[0]);
pci_write_config_dword(pdev, 17*4, socket->saved_state[1]);
ret = pci_enable_device(pdev);
if (ret)
return ret;
pci_set_master(pdev);
if (socket->type && socket->type->restore_state)
socket->type->restore_state(socket);
return 0;
}
static const struct dev_pm_ops yenta_pm_ops = {
.suspend_noirq = yenta_dev_suspend_noirq,
.resume_noirq = yenta_dev_resume_noirq,
.freeze_noirq = yenta_dev_suspend_noirq,
.thaw_noirq = yenta_dev_resume_noirq,
.poweroff_noirq = yenta_dev_suspend_noirq,
.restore_noirq = yenta_dev_resume_noirq,
};
#define YENTA_PM_OPS (¥ta_pm_ops)
#else
#define YENTA_PM_OPS NULL
#endif
#define CB_ID(vend, dev, type) \
{ \
.vendor = vend, \
.device = dev, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.class = PCI_CLASS_BRIDGE_CARDBUS << 8, \
.class_mask = ~0, \
.driver_data = CARDBUS_TYPE_##type, \
}
static DEFINE_PCI_DEVICE_TABLE(yenta_table) = {
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1031, TI),
/*
* TBD: Check if these TI variants can use more
* advanced overrides instead. (I can't get the
* data sheets for these devices. --rmk)
*/
#ifdef CONFIG_YENTA_TI
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1210, TI),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1130, TI113X),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1131, TI113X),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1211, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1220, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1221, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1225, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1251A, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1251B, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1420, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1450, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1451A, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1510, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1620, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_4410, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_4450, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_4451, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_4510, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_4520, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1250, TI1250),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1410, TI1250),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X515, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X420, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X620, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7410, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, ENE),
#endif /* CONFIG_YENTA_TI */
#ifdef CONFIG_YENTA_RICOH
CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C465, RICOH),
CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C466, RICOH),
CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C475, RICOH),
CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, RICOH),
CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C478, RICOH),
#endif
#ifdef CONFIG_YENTA_TOSHIBA
CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC95, TOPIC95),
CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC97, TOPIC97),
CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC100, TOPIC97),
#endif
#ifdef CONFIG_YENTA_O2
CB_ID(PCI_VENDOR_ID_O2, PCI_ANY_ID, O2MICRO),
#endif
/* match any cardbus bridge */
CB_ID(PCI_ANY_ID, PCI_ANY_ID, DEFAULT),
{ /* all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, yenta_table);
static struct pci_driver yenta_cardbus_driver = {
.name = "yenta_cardbus",
.id_table = yenta_table,
.probe = yenta_probe,
.remove = __devexit_p(yenta_close),
.driver.pm = YENTA_PM_OPS,
};
static int __init yenta_socket_init(void)
{
return pci_register_driver(¥ta_cardbus_driver);
}
static void __exit yenta_socket_exit(void)
{
pci_unregister_driver(¥ta_cardbus_driver);
}
module_init(yenta_socket_init);
module_exit(yenta_socket_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
jetonbacaj/SomeKernel_G920P_PB6 | arch/arm/mach-netx/nxdkn.c | 4654 | 2504 | /*
* arch/arm/mach-netx/nxdkn.c
*
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mtd/plat-ram.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/netx-regs.h>
#include <linux/platform_data/eth-netx.h>
#include "generic.h"
static struct netxeth_platform_data eth0_platform_data = {
.xcno = 0,
};
static struct platform_device nxdkn_eth0_device = {
.name = "netx-eth",
.id = 0,
.num_resources = 0,
.resource = NULL,
.dev = {
.platform_data = ð0_platform_data,
}
};
static struct netxeth_platform_data eth1_platform_data = {
.xcno = 1,
};
static struct platform_device nxdkn_eth1_device = {
.name = "netx-eth",
.id = 1,
.num_resources = 0,
.resource = NULL,
.dev = {
.platform_data = ð1_platform_data,
}
};
static struct resource netx_uart0_resources[] = {
[0] = {
.start = 0x00100A00,
.end = 0x00100A3F,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART0),
.end = (NETX_IRQ_UART0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart0_device = {
.name = "netx-uart",
.id = 0,
.num_resources = ARRAY_SIZE(netx_uart0_resources),
.resource = netx_uart0_resources,
};
static struct platform_device *devices[] __initdata = {
&nxdkn_eth0_device,
&nxdkn_eth1_device,
&netx_uart0_device,
};
static void __init nxdkn_init(void)
{
platform_add_devices(devices, ARRAY_SIZE(devices));
}
MACHINE_START(NXDKN, "Hilscher nxdkn")
.atag_offset = 0x100,
.map_io = netx_map_io,
.init_irq = netx_init_irq,
.init_time = netx_timer_init,
.init_machine = nxdkn_init,
.restart = netx_restart,
MACHINE_END
| gpl-2.0 |
VentureROM-Legacy/android_kernel_lge_d85x | drivers/leds/leds-wm831x-status.c | 5166 | 7757 | /*
* LED driver for WM831x status LEDs
*
* Copyright(C) 2009 Wolfson Microelectronics PLC.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
#include <linux/mfd/wm831x/status.h>
#include <linux/module.h>
struct wm831x_status {
struct led_classdev cdev;
struct wm831x *wm831x;
struct work_struct work;
struct mutex mutex;
spinlock_t value_lock;
int reg; /* Control register */
int reg_val; /* Control register value */
int blink;
int blink_time;
int blink_cyc;
int src;
enum led_brightness brightness;
};
#define to_wm831x_status(led_cdev) \
container_of(led_cdev, struct wm831x_status, cdev)
static void wm831x_status_work(struct work_struct *work)
{
struct wm831x_status *led = container_of(work, struct wm831x_status,
work);
unsigned long flags;
mutex_lock(&led->mutex);
led->reg_val &= ~(WM831X_LED_SRC_MASK | WM831X_LED_MODE_MASK |
WM831X_LED_DUTY_CYC_MASK | WM831X_LED_DUR_MASK);
spin_lock_irqsave(&led->value_lock, flags);
led->reg_val |= led->src << WM831X_LED_SRC_SHIFT;
if (led->blink) {
led->reg_val |= 2 << WM831X_LED_MODE_SHIFT;
led->reg_val |= led->blink_time << WM831X_LED_DUR_SHIFT;
led->reg_val |= led->blink_cyc;
} else {
if (led->brightness != LED_OFF)
led->reg_val |= 1 << WM831X_LED_MODE_SHIFT;
}
spin_unlock_irqrestore(&led->value_lock, flags);
wm831x_reg_write(led->wm831x, led->reg, led->reg_val);
mutex_unlock(&led->mutex);
}
static void wm831x_status_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct wm831x_status *led = to_wm831x_status(led_cdev);
unsigned long flags;
spin_lock_irqsave(&led->value_lock, flags);
led->brightness = value;
if (value == LED_OFF)
led->blink = 0;
schedule_work(&led->work);
spin_unlock_irqrestore(&led->value_lock, flags);
}
static int wm831x_status_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
struct wm831x_status *led = to_wm831x_status(led_cdev);
unsigned long flags;
int ret = 0;
/* Pick some defaults if we've not been given times */
if (*delay_on == 0 && *delay_off == 0) {
*delay_on = 250;
*delay_off = 250;
}
spin_lock_irqsave(&led->value_lock, flags);
/* We only have a limited selection of settings, see if we can
* support the configuration we're being given */
switch (*delay_on) {
case 1000:
led->blink_time = 0;
break;
case 250:
led->blink_time = 1;
break;
case 125:
led->blink_time = 2;
break;
case 62:
case 63:
/* Actually 62.5ms */
led->blink_time = 3;
break;
default:
ret = -EINVAL;
break;
}
if (ret == 0) {
switch (*delay_off / *delay_on) {
case 1:
led->blink_cyc = 0;
break;
case 3:
led->blink_cyc = 1;
break;
case 4:
led->blink_cyc = 2;
break;
case 8:
led->blink_cyc = 3;
break;
default:
ret = -EINVAL;
break;
}
}
if (ret == 0)
led->blink = 1;
else
led->blink = 0;
/* Always update; if we fail turn off blinking since we expect
* a software fallback. */
schedule_work(&led->work);
spin_unlock_irqrestore(&led->value_lock, flags);
return ret;
}
static const char *led_src_texts[] = {
"otp",
"power",
"charger",
"soft",
};
static ssize_t wm831x_status_src_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct wm831x_status *led = to_wm831x_status(led_cdev);
int i;
ssize_t ret = 0;
mutex_lock(&led->mutex);
for (i = 0; i < ARRAY_SIZE(led_src_texts); i++)
if (i == led->src)
ret += sprintf(&buf[ret], "[%s] ", led_src_texts[i]);
else
ret += sprintf(&buf[ret], "%s ", led_src_texts[i]);
mutex_unlock(&led->mutex);
ret += sprintf(&buf[ret], "\n");
return ret;
}
static ssize_t wm831x_status_src_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct wm831x_status *led = to_wm831x_status(led_cdev);
char name[20];
int i;
size_t len;
name[sizeof(name) - 1] = '\0';
strncpy(name, buf, sizeof(name) - 1);
len = strlen(name);
if (len && name[len - 1] == '\n')
name[len - 1] = '\0';
for (i = 0; i < ARRAY_SIZE(led_src_texts); i++) {
if (!strcmp(name, led_src_texts[i])) {
mutex_lock(&led->mutex);
led->src = i;
schedule_work(&led->work);
mutex_unlock(&led->mutex);
}
}
return size;
}
static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store);
static int wm831x_status_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *chip_pdata;
struct wm831x_status_pdata pdata;
struct wm831x_status *drvdata;
struct resource *res;
int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
int ret;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No I/O resource\n");
ret = -EINVAL;
goto err;
}
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_status),
GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, drvdata);
drvdata->wm831x = wm831x;
drvdata->reg = res->start;
if (wm831x->dev->platform_data)
chip_pdata = wm831x->dev->platform_data;
else
chip_pdata = NULL;
memset(&pdata, 0, sizeof(pdata));
if (chip_pdata && chip_pdata->status[id])
memcpy(&pdata, chip_pdata->status[id], sizeof(pdata));
else
pdata.name = dev_name(&pdev->dev);
mutex_init(&drvdata->mutex);
INIT_WORK(&drvdata->work, wm831x_status_work);
spin_lock_init(&drvdata->value_lock);
/* We cache the configuration register and read startup values
* from it. */
drvdata->reg_val = wm831x_reg_read(wm831x, drvdata->reg);
if (drvdata->reg_val & WM831X_LED_MODE_MASK)
drvdata->brightness = LED_FULL;
else
drvdata->brightness = LED_OFF;
/* Set a default source if configured, otherwise leave the
* current hardware setting.
*/
if (pdata.default_src == WM831X_STATUS_PRESERVE) {
drvdata->src = drvdata->reg_val;
drvdata->src &= WM831X_LED_SRC_MASK;
drvdata->src >>= WM831X_LED_SRC_SHIFT;
} else {
drvdata->src = pdata.default_src - 1;
}
drvdata->cdev.name = pdata.name;
drvdata->cdev.default_trigger = pdata.default_trigger;
drvdata->cdev.brightness_set = wm831x_status_set;
drvdata->cdev.blink_set = wm831x_status_blink_set;
ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
goto err_led;
}
ret = device_create_file(drvdata->cdev.dev, &dev_attr_src);
if (ret != 0)
dev_err(&pdev->dev,
"No source control for LED: %d\n", ret);
return 0;
err_led:
led_classdev_unregister(&drvdata->cdev);
err:
return ret;
}
static int wm831x_status_remove(struct platform_device *pdev)
{
struct wm831x_status *drvdata = platform_get_drvdata(pdev);
device_remove_file(drvdata->cdev.dev, &dev_attr_src);
led_classdev_unregister(&drvdata->cdev);
return 0;
}
static struct platform_driver wm831x_status_driver = {
.driver = {
.name = "wm831x-status",
.owner = THIS_MODULE,
},
.probe = wm831x_status_probe,
.remove = wm831x_status_remove,
};
module_platform_driver(wm831x_status_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x status LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-status");
| gpl-2.0 |
abgoyal/zen_u105_kernel | drivers/net/wireless/rtl818x/rtl8187/rtl8225.c | 8494 | 30314 | /*
* Radio tuning for RTL8225 on RTL8187
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
* Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
*
* Based on the r8187 driver, which is:
* Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
*
* Magic delays, register offsets, and phy value tables below are
* taken from the original r8187 driver sources. Thanks to Realtek
* for their support!
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/usb.h>
#include <net/mac80211.h>
#include "rtl8187.h"
#include "rtl8225.h"
static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data)
{
struct rtl8187_priv *priv = dev->priv;
u16 reg80, reg84, reg82;
u32 bangdata;
int i;
bangdata = (data << 4) | (addr & 0xf);
reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput) & 0xfff3;
reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x7);
reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x7);
udelay(10);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
udelay(2);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
udelay(10);
for (i = 15; i >= 0; i--) {
u16 reg = reg80 | (bangdata & (1 << i)) >> i;
if (i & 1)
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1));
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1));
if (!(i & 1))
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
}
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
udelay(10);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
}
static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data)
{
struct rtl8187_priv *priv = dev->priv;
u16 reg80, reg82, reg84;
reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput);
reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
reg80 &= ~(0x3 << 2);
reg84 &= ~0xF;
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x0007);
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x0007);
udelay(10);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
udelay(2);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
udelay(10);
mutex_lock(&priv->io_mutex);
priv->io_dmabuf->bits16 = data;
usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
addr, 0x8225, &priv->io_dmabuf->bits16, sizeof(data),
HZ / 2);
mutex_unlock(&priv->io_mutex);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
udelay(10);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
}
static void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data)
{
struct rtl8187_priv *priv = dev->priv;
if (priv->asic_rev)
rtl8225_write_8051(dev, addr, cpu_to_le16(data));
else
rtl8225_write_bitbang(dev, addr, data);
}
static u16 rtl8225_read(struct ieee80211_hw *dev, u8 addr)
{
struct rtl8187_priv *priv = dev->priv;
u16 reg80, reg82, reg84, out;
int i;
reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput);
reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
reg80 &= ~0xF;
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x000F);
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x000F);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
udelay(4);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
udelay(5);
for (i = 4; i >= 0; i--) {
u16 reg = reg80 | ((addr >> i) & 1);
if (!(i & 1)) {
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
udelay(1);
}
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg | (1 << 1));
udelay(2);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg | (1 << 1));
udelay(2);
if (i & 1) {
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
udelay(1);
}
}
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg80 | (1 << 3) | (1 << 1));
udelay(2);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg80 | (1 << 3));
udelay(2);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg80 | (1 << 3));
udelay(2);
out = 0;
for (i = 11; i >= 0; i--) {
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg80 | (1 << 3));
udelay(1);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg80 | (1 << 3) | (1 << 1));
udelay(2);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg80 | (1 << 3) | (1 << 1));
udelay(2);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg80 | (1 << 3) | (1 << 1));
udelay(2);
if (rtl818x_ioread16(priv, &priv->map->RFPinsInput) & (1 << 1))
out |= 1 << i;
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg80 | (1 << 3));
udelay(2);
}
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
reg80 | (1 << 3) | (1 << 2));
udelay(2);
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82);
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x03A0);
return out;
}
static const u16 rtl8225bcd_rxgain[] = {
0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409,
0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541,
0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583,
0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644,
0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688,
0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745,
0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789,
0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793,
0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d,
0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9,
0x07aa, 0x07ab, 0x07ac, 0x07ad, 0x07b0, 0x07b1, 0x07b2, 0x07b3,
0x07b4, 0x07b5, 0x07b8, 0x07b9, 0x07ba, 0x07bb, 0x07bb
};
static const u8 rtl8225_agc[] = {
0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e,
0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96,
0x95, 0x94, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e,
0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86,
0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e,
0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36,
0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e,
0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26,
0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e,
0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16,
0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e,
0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06,
0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01
};
static const u8 rtl8225_gain[] = {
0x23, 0x88, 0x7c, 0xa5, /* -82dBm */
0x23, 0x88, 0x7c, 0xb5, /* -82dBm */
0x23, 0x88, 0x7c, 0xc5, /* -82dBm */
0x33, 0x80, 0x79, 0xc5, /* -78dBm */
0x43, 0x78, 0x76, 0xc5, /* -74dBm */
0x53, 0x60, 0x73, 0xc5, /* -70dBm */
0x63, 0x58, 0x70, 0xc5, /* -66dBm */
};
static const u8 rtl8225_threshold[] = {
0x8d, 0x8d, 0x8d, 0x8d, 0x9d, 0xad, 0xbd
};
static const u8 rtl8225_tx_gain_cck_ofdm[] = {
0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
};
static const u8 rtl8225_tx_power_cck[] = {
0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
};
static const u8 rtl8225_tx_power_cck_ch14[] = {
0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
};
static const u8 rtl8225_tx_power_ofdm[] = {
0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
};
static const u32 rtl8225_chan[] = {
0x085c, 0x08dc, 0x095c, 0x09dc, 0x0a5c, 0x0adc, 0x0b5c,
0x0bdc, 0x0c5c, 0x0cdc, 0x0d5c, 0x0ddc, 0x0e5c, 0x0f72
};
static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
{
struct rtl8187_priv *priv = dev->priv;
u8 cck_power, ofdm_power;
const u8 *tmp;
u32 reg;
int i;
cck_power = priv->channels[channel - 1].hw_value & 0xF;
ofdm_power = priv->channels[channel - 1].hw_value >> 4;
cck_power = min(cck_power, (u8)11);
if (ofdm_power > (u8)15)
ofdm_power = 25;
else
ofdm_power += 10;
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1);
if (channel == 14)
tmp = &rtl8225_tx_power_cck_ch14[(cck_power % 6) * 8];
else
tmp = &rtl8225_tx_power_cck[(cck_power % 6) * 8];
for (i = 0; i < 8; i++)
rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++);
msleep(1); // FIXME: optional?
/* anaparam2 on */
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
RTL8187_RTL8225_ANAPARAM2_ON);
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
rtl8225_write_phy_ofdm(dev, 2, 0x42);
rtl8225_write_phy_ofdm(dev, 6, 0x00);
rtl8225_write_phy_ofdm(dev, 8, 0x00);
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
rtl8225_tx_gain_cck_ofdm[ofdm_power / 6] >> 1);
tmp = &rtl8225_tx_power_ofdm[ofdm_power % 6];
rtl8225_write_phy_ofdm(dev, 5, *tmp);
rtl8225_write_phy_ofdm(dev, 7, *tmp);
msleep(1);
}
static void rtl8225_rf_init(struct ieee80211_hw *dev)
{
struct rtl8187_priv *priv = dev->priv;
int i;
rtl8225_write(dev, 0x0, 0x067);
rtl8225_write(dev, 0x1, 0xFE0);
rtl8225_write(dev, 0x2, 0x44D);
rtl8225_write(dev, 0x3, 0x441);
rtl8225_write(dev, 0x4, 0x486);
rtl8225_write(dev, 0x5, 0xBC0);
rtl8225_write(dev, 0x6, 0xAE6);
rtl8225_write(dev, 0x7, 0x82A);
rtl8225_write(dev, 0x8, 0x01F);
rtl8225_write(dev, 0x9, 0x334);
rtl8225_write(dev, 0xA, 0xFD4);
rtl8225_write(dev, 0xB, 0x391);
rtl8225_write(dev, 0xC, 0x050);
rtl8225_write(dev, 0xD, 0x6DB);
rtl8225_write(dev, 0xE, 0x029);
rtl8225_write(dev, 0xF, 0x914); msleep(100);
rtl8225_write(dev, 0x2, 0xC4D); msleep(200);
rtl8225_write(dev, 0x2, 0x44D); msleep(200);
if (!(rtl8225_read(dev, 6) & (1 << 7))) {
rtl8225_write(dev, 0x02, 0x0c4d);
msleep(200);
rtl8225_write(dev, 0x02, 0x044d);
msleep(100);
if (!(rtl8225_read(dev, 6) & (1 << 7)))
wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
rtl8225_read(dev, 6));
}
rtl8225_write(dev, 0x0, 0x127);
for (i = 0; i < ARRAY_SIZE(rtl8225bcd_rxgain); i++) {
rtl8225_write(dev, 0x1, i + 1);
rtl8225_write(dev, 0x2, rtl8225bcd_rxgain[i]);
}
rtl8225_write(dev, 0x0, 0x027);
rtl8225_write(dev, 0x0, 0x22F);
for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) {
rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]);
rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i);
}
msleep(1);
rtl8225_write_phy_ofdm(dev, 0x00, 0x01);
rtl8225_write_phy_ofdm(dev, 0x01, 0x02);
rtl8225_write_phy_ofdm(dev, 0x02, 0x42);
rtl8225_write_phy_ofdm(dev, 0x03, 0x00);
rtl8225_write_phy_ofdm(dev, 0x04, 0x00);
rtl8225_write_phy_ofdm(dev, 0x05, 0x00);
rtl8225_write_phy_ofdm(dev, 0x06, 0x40);
rtl8225_write_phy_ofdm(dev, 0x07, 0x00);
rtl8225_write_phy_ofdm(dev, 0x08, 0x40);
rtl8225_write_phy_ofdm(dev, 0x09, 0xfe);
rtl8225_write_phy_ofdm(dev, 0x0a, 0x09);
rtl8225_write_phy_ofdm(dev, 0x0b, 0x80);
rtl8225_write_phy_ofdm(dev, 0x0c, 0x01);
rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3);
rtl8225_write_phy_ofdm(dev, 0x0f, 0x38);
rtl8225_write_phy_ofdm(dev, 0x10, 0x84);
rtl8225_write_phy_ofdm(dev, 0x11, 0x06);
rtl8225_write_phy_ofdm(dev, 0x12, 0x20);
rtl8225_write_phy_ofdm(dev, 0x13, 0x20);
rtl8225_write_phy_ofdm(dev, 0x14, 0x00);
rtl8225_write_phy_ofdm(dev, 0x15, 0x40);
rtl8225_write_phy_ofdm(dev, 0x16, 0x00);
rtl8225_write_phy_ofdm(dev, 0x17, 0x40);
rtl8225_write_phy_ofdm(dev, 0x18, 0xef);
rtl8225_write_phy_ofdm(dev, 0x19, 0x19);
rtl8225_write_phy_ofdm(dev, 0x1a, 0x20);
rtl8225_write_phy_ofdm(dev, 0x1b, 0x76);
rtl8225_write_phy_ofdm(dev, 0x1c, 0x04);
rtl8225_write_phy_ofdm(dev, 0x1e, 0x95);
rtl8225_write_phy_ofdm(dev, 0x1f, 0x75);
rtl8225_write_phy_ofdm(dev, 0x20, 0x1f);
rtl8225_write_phy_ofdm(dev, 0x21, 0x27);
rtl8225_write_phy_ofdm(dev, 0x22, 0x16);
rtl8225_write_phy_ofdm(dev, 0x24, 0x46);
rtl8225_write_phy_ofdm(dev, 0x25, 0x20);
rtl8225_write_phy_ofdm(dev, 0x26, 0x90);
rtl8225_write_phy_ofdm(dev, 0x27, 0x88);
rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[2 * 4]);
rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[2 * 4 + 2]);
rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[2 * 4 + 3]);
rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[2 * 4 + 1]);
rtl8225_write_phy_cck(dev, 0x00, 0x98);
rtl8225_write_phy_cck(dev, 0x03, 0x20);
rtl8225_write_phy_cck(dev, 0x04, 0x7e);
rtl8225_write_phy_cck(dev, 0x05, 0x12);
rtl8225_write_phy_cck(dev, 0x06, 0xfc);
rtl8225_write_phy_cck(dev, 0x07, 0x78);
rtl8225_write_phy_cck(dev, 0x08, 0x2e);
rtl8225_write_phy_cck(dev, 0x10, 0x9b);
rtl8225_write_phy_cck(dev, 0x11, 0x88);
rtl8225_write_phy_cck(dev, 0x12, 0x47);
rtl8225_write_phy_cck(dev, 0x13, 0xd0);
rtl8225_write_phy_cck(dev, 0x19, 0x00);
rtl8225_write_phy_cck(dev, 0x1a, 0xa0);
rtl8225_write_phy_cck(dev, 0x1b, 0x08);
rtl8225_write_phy_cck(dev, 0x40, 0x86);
rtl8225_write_phy_cck(dev, 0x41, 0x8d);
rtl8225_write_phy_cck(dev, 0x42, 0x15);
rtl8225_write_phy_cck(dev, 0x43, 0x18);
rtl8225_write_phy_cck(dev, 0x44, 0x1f);
rtl8225_write_phy_cck(dev, 0x45, 0x1e);
rtl8225_write_phy_cck(dev, 0x46, 0x1a);
rtl8225_write_phy_cck(dev, 0x47, 0x15);
rtl8225_write_phy_cck(dev, 0x48, 0x10);
rtl8225_write_phy_cck(dev, 0x49, 0x0a);
rtl8225_write_phy_cck(dev, 0x4a, 0x05);
rtl8225_write_phy_cck(dev, 0x4b, 0x02);
rtl8225_write_phy_cck(dev, 0x4c, 0x05);
rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D);
rtl8225_rf_set_tx_power(dev, 1);
/* RX antenna default to A */
rtl8225_write_phy_cck(dev, 0x10, 0x9b); /* B: 0xDB */
rtl8225_write_phy_ofdm(dev, 0x26, 0x90); /* B: 0x10 */
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */
msleep(1);
rtl818x_iowrite32(priv, (__le32 *)0xFF94, 0x3dc00002);
/* set sensitivity */
rtl8225_write(dev, 0x0c, 0x50);
rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[2 * 4]);
rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[2 * 4 + 2]);
rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[2 * 4 + 3]);
rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[2 * 4 + 1]);
rtl8225_write_phy_cck(dev, 0x41, rtl8225_threshold[2]);
}
static const u8 rtl8225z2_agc[] = {
0x5e, 0x5e, 0x5e, 0x5e, 0x5d, 0x5b, 0x59, 0x57, 0x55, 0x53, 0x51, 0x4f,
0x4d, 0x4b, 0x49, 0x47, 0x45, 0x43, 0x41, 0x3f, 0x3d, 0x3b, 0x39, 0x37,
0x35, 0x33, 0x31, 0x2f, 0x2d, 0x2b, 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f,
0x1d, 0x1b, 0x19, 0x17, 0x15, 0x13, 0x11, 0x0f, 0x0d, 0x0b, 0x09, 0x07,
0x05, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19,
0x19, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28,
0x28, 0x29, 0x2a, 0x2a, 0x2a, 0x2b, 0x2b, 0x2b, 0x2c, 0x2c, 0x2c, 0x2d,
0x2d, 0x2d, 0x2d, 0x2e, 0x2e, 0x2e, 0x2e, 0x2f, 0x2f, 0x2f, 0x30, 0x30,
0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31
};
static const u8 rtl8225z2_ofdm[] = {
0x10, 0x0d, 0x01, 0x00, 0x14, 0xfb, 0xfb, 0x60,
0x00, 0x60, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00,
0x40, 0x00, 0x40, 0x00, 0x00, 0x00, 0xa8, 0x26,
0x32, 0x33, 0x07, 0xa5, 0x6f, 0x55, 0xc8, 0xb3,
0x0a, 0xe1, 0x2C, 0x8a, 0x86, 0x83, 0x34, 0x0f,
0x4f, 0x24, 0x6f, 0xc2, 0x6b, 0x40, 0x80, 0x00,
0xc0, 0xc1, 0x58, 0xf1, 0x00, 0xe4, 0x90, 0x3e,
0x6d, 0x3c, 0xfb, 0x07
};
static const u8 rtl8225z2_tx_power_cck_ch14[] = {
0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00,
0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00,
0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00,
0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00
};
static const u8 rtl8225z2_tx_power_cck[] = {
0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04,
0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03,
0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03,
0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03
};
static const u8 rtl8225z2_tx_power_ofdm[] = {
0x42, 0x00, 0x40, 0x00, 0x40
};
static const u8 rtl8225z2_tx_gain_cck_ofdm[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23
};
static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
{
struct rtl8187_priv *priv = dev->priv;
u8 cck_power, ofdm_power;
const u8 *tmp;
u32 reg;
int i;
cck_power = priv->channels[channel - 1].hw_value & 0xF;
ofdm_power = priv->channels[channel - 1].hw_value >> 4;
cck_power = min(cck_power, (u8)15);
cck_power += priv->txpwr_base & 0xF;
cck_power = min(cck_power, (u8)35);
if (ofdm_power > (u8)15)
ofdm_power = 25;
else
ofdm_power += 10;
ofdm_power += priv->txpwr_base >> 4;
ofdm_power = min(ofdm_power, (u8)35);
if (channel == 14)
tmp = rtl8225z2_tx_power_cck_ch14;
else
tmp = rtl8225z2_tx_power_cck;
for (i = 0; i < 8; i++)
rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++);
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
rtl8225z2_tx_gain_cck_ofdm[cck_power]);
msleep(1);
/* anaparam2 on */
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
RTL8187_RTL8225_ANAPARAM2_ON);
rtl818x_iowrite8(priv, &priv->map->CONFIG3,
reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
rtl8225_write_phy_ofdm(dev, 2, 0x42);
rtl8225_write_phy_ofdm(dev, 5, 0x00);
rtl8225_write_phy_ofdm(dev, 6, 0x40);
rtl8225_write_phy_ofdm(dev, 7, 0x00);
rtl8225_write_phy_ofdm(dev, 8, 0x40);
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
rtl8225z2_tx_gain_cck_ofdm[ofdm_power]);
msleep(1);
}
static void rtl8225z2_b_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
{
struct rtl8187_priv *priv = dev->priv;
u8 cck_power, ofdm_power;
const u8 *tmp;
int i;
cck_power = priv->channels[channel - 1].hw_value & 0xF;
ofdm_power = priv->channels[channel - 1].hw_value >> 4;
if (cck_power > 15)
cck_power = (priv->hw_rev == RTL8187BvB) ? 15 : 22;
else
cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
cck_power += priv->txpwr_base & 0xF;
cck_power = min(cck_power, (u8)35);
if (ofdm_power > 15)
ofdm_power = (priv->hw_rev == RTL8187BvB) ? 17 : 25;
else
ofdm_power += (priv->hw_rev == RTL8187BvB) ? 2 : 10;
ofdm_power += (priv->txpwr_base >> 4) & 0xF;
ofdm_power = min(ofdm_power, (u8)35);
if (channel == 14)
tmp = rtl8225z2_tx_power_cck_ch14;
else
tmp = rtl8225z2_tx_power_cck;
if (priv->hw_rev == RTL8187BvB) {
if (cck_power <= 6)
; /* do nothing */
else if (cck_power <= 11)
tmp += 8;
else
tmp += 16;
} else {
if (cck_power <= 5)
; /* do nothing */
else if (cck_power <= 11)
tmp += 8;
else if (cck_power <= 17)
tmp += 16;
else
tmp += 24;
}
for (i = 0; i < 8; i++)
rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++);
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
rtl8225z2_tx_gain_cck_ofdm[cck_power] << 1);
msleep(1);
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
rtl8225z2_tx_gain_cck_ofdm[ofdm_power] << 1);
if (priv->hw_rev == RTL8187BvB) {
if (ofdm_power <= 11) {
rtl8225_write_phy_ofdm(dev, 0x87, 0x60);
rtl8225_write_phy_ofdm(dev, 0x89, 0x60);
} else {
rtl8225_write_phy_ofdm(dev, 0x87, 0x5c);
rtl8225_write_phy_ofdm(dev, 0x89, 0x5c);
}
} else {
if (ofdm_power <= 11) {
rtl8225_write_phy_ofdm(dev, 0x87, 0x5c);
rtl8225_write_phy_ofdm(dev, 0x89, 0x5c);
} else if (ofdm_power <= 17) {
rtl8225_write_phy_ofdm(dev, 0x87, 0x54);
rtl8225_write_phy_ofdm(dev, 0x89, 0x54);
} else {
rtl8225_write_phy_ofdm(dev, 0x87, 0x50);
rtl8225_write_phy_ofdm(dev, 0x89, 0x50);
}
}
msleep(1);
}
static const u16 rtl8225z2_rxgain[] = {
0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409,
0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541,
0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583,
0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644,
0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688,
0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745,
0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789,
0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793,
0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d,
0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9,
0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3,
0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bb
};
static const u8 rtl8225z2_gain_bg[] = {
0x23, 0x15, 0xa5, /* -82-1dBm */
0x23, 0x15, 0xb5, /* -82-2dBm */
0x23, 0x15, 0xc5, /* -82-3dBm */
0x33, 0x15, 0xc5, /* -78dBm */
0x43, 0x15, 0xc5, /* -74dBm */
0x53, 0x15, 0xc5, /* -70dBm */
0x63, 0x15, 0xc5 /* -66dBm */
};
static void rtl8225z2_rf_init(struct ieee80211_hw *dev)
{
struct rtl8187_priv *priv = dev->priv;
int i;
rtl8225_write(dev, 0x0, 0x2BF);
rtl8225_write(dev, 0x1, 0xEE0);
rtl8225_write(dev, 0x2, 0x44D);
rtl8225_write(dev, 0x3, 0x441);
rtl8225_write(dev, 0x4, 0x8C3);
rtl8225_write(dev, 0x5, 0xC72);
rtl8225_write(dev, 0x6, 0x0E6);
rtl8225_write(dev, 0x7, 0x82A);
rtl8225_write(dev, 0x8, 0x03F);
rtl8225_write(dev, 0x9, 0x335);
rtl8225_write(dev, 0xa, 0x9D4);
rtl8225_write(dev, 0xb, 0x7BB);
rtl8225_write(dev, 0xc, 0x850);
rtl8225_write(dev, 0xd, 0xCDF);
rtl8225_write(dev, 0xe, 0x02B);
rtl8225_write(dev, 0xf, 0x114);
msleep(100);
rtl8225_write(dev, 0x0, 0x1B7);
for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) {
rtl8225_write(dev, 0x1, i + 1);
rtl8225_write(dev, 0x2, rtl8225z2_rxgain[i]);
}
rtl8225_write(dev, 0x3, 0x080);
rtl8225_write(dev, 0x5, 0x004);
rtl8225_write(dev, 0x0, 0x0B7);
rtl8225_write(dev, 0x2, 0xc4D);
msleep(200);
rtl8225_write(dev, 0x2, 0x44D);
msleep(100);
if (!(rtl8225_read(dev, 6) & (1 << 7))) {
rtl8225_write(dev, 0x02, 0x0C4D);
msleep(200);
rtl8225_write(dev, 0x02, 0x044D);
msleep(100);
if (!(rtl8225_read(dev, 6) & (1 << 7)))
wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
rtl8225_read(dev, 6));
}
msleep(200);
rtl8225_write(dev, 0x0, 0x2BF);
for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) {
rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]);
rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i);
}
msleep(1);
rtl8225_write_phy_ofdm(dev, 0x00, 0x01);
rtl8225_write_phy_ofdm(dev, 0x01, 0x02);
rtl8225_write_phy_ofdm(dev, 0x02, 0x42);
rtl8225_write_phy_ofdm(dev, 0x03, 0x00);
rtl8225_write_phy_ofdm(dev, 0x04, 0x00);
rtl8225_write_phy_ofdm(dev, 0x05, 0x00);
rtl8225_write_phy_ofdm(dev, 0x06, 0x40);
rtl8225_write_phy_ofdm(dev, 0x07, 0x00);
rtl8225_write_phy_ofdm(dev, 0x08, 0x40);
rtl8225_write_phy_ofdm(dev, 0x09, 0xfe);
rtl8225_write_phy_ofdm(dev, 0x0a, 0x08);
rtl8225_write_phy_ofdm(dev, 0x0b, 0x80);
rtl8225_write_phy_ofdm(dev, 0x0c, 0x01);
rtl8225_write_phy_ofdm(dev, 0x0d, 0x43);
rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3);
rtl8225_write_phy_ofdm(dev, 0x0f, 0x38);
rtl8225_write_phy_ofdm(dev, 0x10, 0x84);
rtl8225_write_phy_ofdm(dev, 0x11, 0x07);
rtl8225_write_phy_ofdm(dev, 0x12, 0x20);
rtl8225_write_phy_ofdm(dev, 0x13, 0x20);
rtl8225_write_phy_ofdm(dev, 0x14, 0x00);
rtl8225_write_phy_ofdm(dev, 0x15, 0x40);
rtl8225_write_phy_ofdm(dev, 0x16, 0x00);
rtl8225_write_phy_ofdm(dev, 0x17, 0x40);
rtl8225_write_phy_ofdm(dev, 0x18, 0xef);
rtl8225_write_phy_ofdm(dev, 0x19, 0x19);
rtl8225_write_phy_ofdm(dev, 0x1a, 0x20);
rtl8225_write_phy_ofdm(dev, 0x1b, 0x15);
rtl8225_write_phy_ofdm(dev, 0x1c, 0x04);
rtl8225_write_phy_ofdm(dev, 0x1d, 0xc5);
rtl8225_write_phy_ofdm(dev, 0x1e, 0x95);
rtl8225_write_phy_ofdm(dev, 0x1f, 0x75);
rtl8225_write_phy_ofdm(dev, 0x20, 0x1f);
rtl8225_write_phy_ofdm(dev, 0x21, 0x17);
rtl8225_write_phy_ofdm(dev, 0x22, 0x16);
rtl8225_write_phy_ofdm(dev, 0x23, 0x80);
rtl8225_write_phy_ofdm(dev, 0x24, 0x46);
rtl8225_write_phy_ofdm(dev, 0x25, 0x00);
rtl8225_write_phy_ofdm(dev, 0x26, 0x90);
rtl8225_write_phy_ofdm(dev, 0x27, 0x88);
rtl8225_write_phy_ofdm(dev, 0x0b, rtl8225z2_gain_bg[4 * 3]);
rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225z2_gain_bg[4 * 3 + 1]);
rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225z2_gain_bg[4 * 3 + 2]);
rtl8225_write_phy_ofdm(dev, 0x21, 0x37);
rtl8225_write_phy_cck(dev, 0x00, 0x98);
rtl8225_write_phy_cck(dev, 0x03, 0x20);
rtl8225_write_phy_cck(dev, 0x04, 0x7e);
rtl8225_write_phy_cck(dev, 0x05, 0x12);
rtl8225_write_phy_cck(dev, 0x06, 0xfc);
rtl8225_write_phy_cck(dev, 0x07, 0x78);
rtl8225_write_phy_cck(dev, 0x08, 0x2e);
rtl8225_write_phy_cck(dev, 0x10, 0x9b);
rtl8225_write_phy_cck(dev, 0x11, 0x88);
rtl8225_write_phy_cck(dev, 0x12, 0x47);
rtl8225_write_phy_cck(dev, 0x13, 0xd0);
rtl8225_write_phy_cck(dev, 0x19, 0x00);
rtl8225_write_phy_cck(dev, 0x1a, 0xa0);
rtl8225_write_phy_cck(dev, 0x1b, 0x08);
rtl8225_write_phy_cck(dev, 0x40, 0x86);
rtl8225_write_phy_cck(dev, 0x41, 0x8d);
rtl8225_write_phy_cck(dev, 0x42, 0x15);
rtl8225_write_phy_cck(dev, 0x43, 0x18);
rtl8225_write_phy_cck(dev, 0x44, 0x36);
rtl8225_write_phy_cck(dev, 0x45, 0x35);
rtl8225_write_phy_cck(dev, 0x46, 0x2e);
rtl8225_write_phy_cck(dev, 0x47, 0x25);
rtl8225_write_phy_cck(dev, 0x48, 0x1c);
rtl8225_write_phy_cck(dev, 0x49, 0x12);
rtl8225_write_phy_cck(dev, 0x4a, 0x09);
rtl8225_write_phy_cck(dev, 0x4b, 0x04);
rtl8225_write_phy_cck(dev, 0x4c, 0x05);
rtl818x_iowrite8(priv, (u8 *)0xFF5B, 0x0D); msleep(1);
rtl8225z2_rf_set_tx_power(dev, 1);
/* RX antenna default to A */
rtl8225_write_phy_cck(dev, 0x10, 0x9b); /* B: 0xDB */
rtl8225_write_phy_ofdm(dev, 0x26, 0x90); /* B: 0x10 */
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */
msleep(1);
rtl818x_iowrite32(priv, (__le32 *)0xFF94, 0x3dc00002);
}
static void rtl8225z2_b_rf_init(struct ieee80211_hw *dev)
{
struct rtl8187_priv *priv = dev->priv;
int i;
rtl8225_write(dev, 0x0, 0x0B7);
rtl8225_write(dev, 0x1, 0xEE0);
rtl8225_write(dev, 0x2, 0x44D);
rtl8225_write(dev, 0x3, 0x441);
rtl8225_write(dev, 0x4, 0x8C3);
rtl8225_write(dev, 0x5, 0xC72);
rtl8225_write(dev, 0x6, 0x0E6);
rtl8225_write(dev, 0x7, 0x82A);
rtl8225_write(dev, 0x8, 0x03F);
rtl8225_write(dev, 0x9, 0x335);
rtl8225_write(dev, 0xa, 0x9D4);
rtl8225_write(dev, 0xb, 0x7BB);
rtl8225_write(dev, 0xc, 0x850);
rtl8225_write(dev, 0xd, 0xCDF);
rtl8225_write(dev, 0xe, 0x02B);
rtl8225_write(dev, 0xf, 0x114);
rtl8225_write(dev, 0x0, 0x1B7);
for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) {
rtl8225_write(dev, 0x1, i + 1);
rtl8225_write(dev, 0x2, rtl8225z2_rxgain[i]);
}
rtl8225_write(dev, 0x3, 0x080);
rtl8225_write(dev, 0x5, 0x004);
rtl8225_write(dev, 0x0, 0x0B7);
rtl8225_write(dev, 0x2, 0xC4D);
rtl8225_write(dev, 0x2, 0x44D);
rtl8225_write(dev, 0x0, 0x2BF);
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x03);
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x07);
rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
rtl8225_write_phy_ofdm(dev, 0x80, 0x12);
for (i = 0; i < ARRAY_SIZE(rtl8225z2_agc); i++) {
rtl8225_write_phy_ofdm(dev, 0xF, rtl8225z2_agc[i]);
rtl8225_write_phy_ofdm(dev, 0xE, 0x80 + i);
rtl8225_write_phy_ofdm(dev, 0xE, 0);
}
rtl8225_write_phy_ofdm(dev, 0x80, 0x10);
for (i = 0; i < ARRAY_SIZE(rtl8225z2_ofdm); i++)
rtl8225_write_phy_ofdm(dev, i, rtl8225z2_ofdm[i]);
rtl8225_write_phy_ofdm(dev, 0x97, 0x46);
rtl8225_write_phy_ofdm(dev, 0xa4, 0xb6);
rtl8225_write_phy_ofdm(dev, 0x85, 0xfc);
rtl8225_write_phy_cck(dev, 0xc1, 0x88);
}
static void rtl8225_rf_stop(struct ieee80211_hw *dev)
{
rtl8225_write(dev, 0x4, 0x1f);
}
static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
struct ieee80211_conf *conf)
{
struct rtl8187_priv *priv = dev->priv;
int chan = ieee80211_frequency_to_channel(conf->channel->center_freq);
if (priv->rf->init == rtl8225_rf_init)
rtl8225_rf_set_tx_power(dev, chan);
else if (priv->rf->init == rtl8225z2_rf_init)
rtl8225z2_rf_set_tx_power(dev, chan);
else
rtl8225z2_b_rf_set_tx_power(dev, chan);
rtl8225_write(dev, 0x7, rtl8225_chan[chan - 1]);
msleep(10);
}
static const struct rtl818x_rf_ops rtl8225_ops = {
.name = "rtl8225",
.init = rtl8225_rf_init,
.stop = rtl8225_rf_stop,
.set_chan = rtl8225_rf_set_channel
};
static const struct rtl818x_rf_ops rtl8225z2_ops = {
.name = "rtl8225z2",
.init = rtl8225z2_rf_init,
.stop = rtl8225_rf_stop,
.set_chan = rtl8225_rf_set_channel
};
static const struct rtl818x_rf_ops rtl8225z2_b_ops = {
.name = "rtl8225z2",
.init = rtl8225z2_b_rf_init,
.stop = rtl8225_rf_stop,
.set_chan = rtl8225_rf_set_channel
};
const struct rtl818x_rf_ops * rtl8187_detect_rf(struct ieee80211_hw *dev)
{
u16 reg8, reg9;
struct rtl8187_priv *priv = dev->priv;
if (!priv->is_rtl8187b) {
rtl8225_write(dev, 0, 0x1B7);
reg8 = rtl8225_read(dev, 8);
reg9 = rtl8225_read(dev, 9);
rtl8225_write(dev, 0, 0x0B7);
if (reg8 != 0x588 || reg9 != 0x700)
return &rtl8225_ops;
return &rtl8225z2_ops;
} else
return &rtl8225z2_b_ops;
}
| gpl-2.0 |
zlatinski/p-android-omap-3.4-new-ion-topic-sync-dma-buf-fence2 | drivers/gpu/drm/drm_dp_i2c_helper.c | 9262 | 5420 | /*
* Copyright © 2009 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
#include "drm_dp_helper.h"
#include "drmP.h"
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
}
/*
* I2C over AUX CH
*/
/*
* Send the address. If the I2C link is running, this 'restarts'
* the connection with the new address, this is used for doing
* a write followed by a read (as needed for DDC)
*/
static int
i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int mode = MODE_I2C_START;
int ret;
if (reading)
mode |= MODE_I2C_READ;
else
mode |= MODE_I2C_WRITE;
algo_data->address = address;
algo_data->running = true;
ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
return ret;
}
/*
* Stop the I2C transaction. This closes out the link, sending
* a bare address packet with the MOT bit turned off
*/
static void
i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int mode = MODE_I2C_STOP;
if (reading)
mode |= MODE_I2C_READ;
else
mode |= MODE_I2C_WRITE;
if (algo_data->running) {
(void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
algo_data->running = false;
}
}
/*
* Write a single byte to the current I2C address, the
* the I2C link must be running or this returns -EIO
*/
static int
i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
if (!algo_data->running)
return -EIO;
ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
return ret;
}
/*
* Read a single byte from the current I2C address, the
* I2C link must be running or this returns -EIO
*/
static int
i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
if (!algo_data->running)
return -EIO;
ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
return ret;
}
static int
i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
int ret = 0;
bool reading = false;
int m;
int b;
for (m = 0; m < num; m++) {
u16 len = msgs[m].len;
u8 *buf = msgs[m].buf;
reading = (msgs[m].flags & I2C_M_RD) != 0;
ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
if (ret < 0)
break;
if (reading) {
for (b = 0; b < len; b++) {
ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
if (ret < 0)
break;
}
} else {
for (b = 0; b < len; b++) {
ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
if (ret < 0)
break;
}
}
if (ret < 0)
break;
}
if (ret >= 0)
ret = num;
i2c_algo_dp_aux_stop(adapter, reading);
DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
return ret;
}
static u32
i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR;
}
static const struct i2c_algorithm i2c_dp_aux_algo = {
.master_xfer = i2c_algo_dp_aux_xfer,
.functionality = i2c_algo_dp_aux_functionality,
};
static void
i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
}
static int
i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
{
adapter->algo = &i2c_dp_aux_algo;
adapter->retries = 3;
i2c_dp_aux_reset_bus(adapter);
return 0;
}
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
error = i2c_add_adapter(adapter);
return error;
}
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
| gpl-2.0 |
PrasannaC/Galaxy_Fit_s5670-msm7x27_kernel | arch/arm/mach-u300/regulator.c | 11310 | 2326 | /*
* arch/arm/mach-u300/regulator.c
*
* Copyright (C) 2009 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* Handle board-bound regulators and board power not related
* to any devices.
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
#include <linux/device.h>
#include <linux/signal.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
/* Those are just for writing in syscon */
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/syscon.h>
/*
* Regulators that power the board and chip and which are
* not copuled to specific drivers are hogged in these
* instances.
*/
static struct regulator *main_power_15;
/*
* This function is used from pm.h to shut down the system by
* resetting all regulators in turn and then disable regulator
* LDO D (main power).
*/
void u300_pm_poweroff(void)
{
sigset_t old, all;
sigfillset(&all);
if (!sigprocmask(SIG_BLOCK, &all, &old)) {
/* Disable LDO D to shut down the system */
if (main_power_15)
regulator_disable(main_power_15);
else
pr_err("regulator not available to shut down system\n");
(void) sigprocmask(SIG_SETMASK, &old, NULL);
}
return;
}
/*
* Hog the regulators needed to power up the board.
*/
static int __init u300_init_boardpower(void)
{
int err;
u32 val;
pr_info("U300: setting up board power\n");
main_power_15 = regulator_get(NULL, "vana15");
if (IS_ERR(main_power_15)) {
pr_err("could not get vana15");
return PTR_ERR(main_power_15);
}
err = regulator_enable(main_power_15);
if (err) {
pr_err("could not enable vana15\n");
return err;
}
/*
* On U300 a special system controller register pulls up the DC
* until the vana15 (LDO D) regulator comes up. At this point, all
* regulators are set and we do not need power control via
* DC ON anymore. This function will likely be moved whenever
* the rest of the U300 power management is implemented.
*/
pr_info("U300: disable system controller pull-up\n");
val = readw(U300_SYSCON_VBASE + U300_SYSCON_PMCR);
val &= ~U300_SYSCON_PMCR_DCON_ENABLE;
writew(val, U300_SYSCON_VBASE + U300_SYSCON_PMCR);
/* Register globally exported PM poweroff hook */
pm_power_off = u300_pm_poweroff;
return 0;
}
/*
* So at module init time we hog the regulator!
*/
module_init(u300_init_boardpower);
| gpl-2.0 |
e-mailky/kernel-comment | drivers/net/fddi/skfp/cfm.c | 12846 | 16476 | /******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
SMT CFM
Configuration Management
DAS with single MAC
*/
/*
* Hardware independent state machine implemantation
* The following external SMT functions are referenced :
*
* queue_event()
*
* The following external HW dependent functions are referenced :
* config_mux()
*
* The following HW dependent events are required :
* NONE
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#define KERNEL
#include "h/smtstate.h"
#ifndef lint
static const char ID_sccs[] = "@(#)cfm.c 2.18 98/10/06 (C) SK " ;
#endif
/*
* FSM Macros
*/
#define AFLAG 0x10
#define GO_STATE(x) (smc->mib.fddiSMTCF_State = (x)|AFLAG)
#define ACTIONS_DONE() (smc->mib.fddiSMTCF_State &= ~AFLAG)
#define ACTIONS(x) (x|AFLAG)
#ifdef DEBUG
/*
* symbolic state names
*/
static const char * const cfm_states[] = {
"SC0_ISOLATED","CF1","CF2","CF3","CF4",
"SC1_WRAP_A","SC2_WRAP_B","SC5_TRHU_B","SC7_WRAP_S",
"SC9_C_WRAP_A","SC10_C_WRAP_B","SC11_C_WRAP_S","SC4_THRU_A"
} ;
/*
* symbolic event names
*/
static const char * const cfm_events[] = {
"NONE","CF_LOOP_A","CF_LOOP_B","CF_JOIN_A","CF_JOIN_B"
} ;
#endif
/*
* map from state to downstream port type
*/
static const unsigned char cf_to_ptype[] = {
TNONE,TNONE,TNONE,TNONE,TNONE,
TNONE,TB,TB,TS,
TA,TB,TS,TB
} ;
/*
* CEM port states
*/
#define CEM_PST_DOWN 0
#define CEM_PST_UP 1
#define CEM_PST_HOLD 2
/* define portstate array only for A and B port */
/* Do this within the smc structure (use in multiple cards) */
/*
* all Globals are defined in smc.h
* struct s_cfm
*/
/*
* function declarations
*/
static void cfm_fsm(struct s_smc *smc, int cmd);
/*
init CFM state machine
clear all CFM vars and flags
*/
void cfm_init(struct s_smc *smc)
{
smc->mib.fddiSMTCF_State = ACTIONS(SC0_ISOLATED) ;
smc->r.rm_join = 0 ;
smc->r.rm_loop = 0 ;
smc->y[PA].scrub = 0 ;
smc->y[PB].scrub = 0 ;
smc->y[PA].cem_pst = CEM_PST_DOWN ;
smc->y[PB].cem_pst = CEM_PST_DOWN ;
}
/* Some terms conditions used by the selection criteria */
#define THRU_ENABLED(smc) (smc->y[PA].pc_mode != PM_TREE && \
smc->y[PB].pc_mode != PM_TREE)
/* Selection criteria for the ports */
static void selection_criteria (struct s_smc *smc, struct s_phy *phy)
{
switch (phy->mib->fddiPORTMy_Type) {
case TA:
if ( !THRU_ENABLED(smc) && smc->y[PB].cf_join ) {
phy->wc_flag = TRUE ;
} else {
phy->wc_flag = FALSE ;
}
break;
case TB:
/* take precedence over PA */
phy->wc_flag = FALSE ;
break;
case TS:
phy->wc_flag = FALSE ;
break;
case TM:
phy->wc_flag = FALSE ;
break;
}
}
void all_selection_criteria(struct s_smc *smc)
{
struct s_phy *phy ;
int p ;
for ( p = 0,phy = smc->y ; p < NUMPHYS; p++, phy++ ) {
/* Do the selection criteria */
selection_criteria (smc,phy);
}
}
static void cem_priv_state(struct s_smc *smc, int event)
/* State machine for private PORT states: used to optimize dual homing */
{
int np; /* Number of the port */
int i;
/* Do this only in a DAS */
if (smc->s.sas != SMT_DAS )
return ;
np = event - CF_JOIN;
if (np != PA && np != PB) {
return ;
}
/* Change the port state according to the event (portnumber) */
if (smc->y[np].cf_join) {
smc->y[np].cem_pst = CEM_PST_UP ;
} else if (!smc->y[np].wc_flag) {
/* set the port to done only if it is not withheld */
smc->y[np].cem_pst = CEM_PST_DOWN ;
}
/* Don't set an hold port to down */
/* Check all ports of restart conditions */
for (i = 0 ; i < 2 ; i ++ ) {
/* Check all port for PORT is on hold and no withhold is done */
if ( smc->y[i].cem_pst == CEM_PST_HOLD && !smc->y[i].wc_flag ) {
smc->y[i].cem_pst = CEM_PST_DOWN;
queue_event(smc,(int)(EVENT_PCM+i),PC_START) ;
}
if ( smc->y[i].cem_pst == CEM_PST_UP && smc->y[i].wc_flag ) {
smc->y[i].cem_pst = CEM_PST_HOLD;
queue_event(smc,(int)(EVENT_PCM+i),PC_START) ;
}
if ( smc->y[i].cem_pst == CEM_PST_DOWN && smc->y[i].wc_flag ) {
/*
* The port must be restarted when the wc_flag
* will be reset. So set the port on hold.
*/
smc->y[i].cem_pst = CEM_PST_HOLD;
}
}
return ;
}
/*
CFM state machine
called by dispatcher
do
display state change
process event
until SM is stable
*/
void cfm(struct s_smc *smc, int event)
{
int state ; /* remember last state */
int cond ;
int oldstate ;
/* We will do the following: */
/* - compute the variable WC_Flag for every port (This is where */
/* we can extend the requested path checking !!) */
/* - do the old (SMT 6.2 like) state machine */
/* - do the resulting station states */
all_selection_criteria (smc);
/* We will check now whether a state transition is allowed or not */
/* - change the portstates */
cem_priv_state (smc, event);
oldstate = smc->mib.fddiSMTCF_State ;
do {
DB_CFM("CFM : state %s%s",
(smc->mib.fddiSMTCF_State & AFLAG) ? "ACTIONS " : "",
cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG]) ;
DB_CFM(" event %s\n",cfm_events[event],0) ;
state = smc->mib.fddiSMTCF_State ;
cfm_fsm(smc,event) ;
event = 0 ;
} while (state != smc->mib.fddiSMTCF_State) ;
#ifndef SLIM_SMT
/*
* check peer wrap condition
*/
cond = FALSE ;
if ( (smc->mib.fddiSMTCF_State == SC9_C_WRAP_A &&
smc->y[PA].pc_mode == PM_PEER) ||
(smc->mib.fddiSMTCF_State == SC10_C_WRAP_B &&
smc->y[PB].pc_mode == PM_PEER) ||
(smc->mib.fddiSMTCF_State == SC11_C_WRAP_S &&
smc->y[PS].pc_mode == PM_PEER &&
smc->y[PS].mib->fddiPORTNeighborType != TS ) ) {
cond = TRUE ;
}
if (cond != smc->mib.fddiSMTPeerWrapFlag)
smt_srf_event(smc,SMT_COND_SMT_PEER_WRAP,0,cond) ;
#if 0
/*
* Don't send ever MAC_PATH_CHANGE events. Our MAC is hard-wired
* to the primary path.
*/
/*
* path change
*/
if (smc->mib.fddiSMTCF_State != oldstate) {
smt_srf_event(smc,SMT_EVENT_MAC_PATH_CHANGE,INDEX_MAC,0) ;
}
#endif
#endif /* no SLIM_SMT */
/*
* set MAC port type
*/
smc->mib.m[MAC0].fddiMACDownstreamPORTType =
cf_to_ptype[smc->mib.fddiSMTCF_State] ;
cfm_state_change(smc,(int)smc->mib.fddiSMTCF_State) ;
}
/*
process CFM event
*/
/*ARGSUSED1*/
static void cfm_fsm(struct s_smc *smc, int cmd)
{
switch(smc->mib.fddiSMTCF_State) {
case ACTIONS(SC0_ISOLATED) :
smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_SEPA ;
config_mux(smc,MUX_ISOLATE) ; /* configure PHY Mux */
smc->r.rm_loop = FALSE ;
smc->r.rm_join = FALSE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
/* Don't do the WC-Flag changing here */
ACTIONS_DONE() ;
DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
break;
case SC0_ISOLATED :
/*SC07*/
/*SAS port can be PA or PB ! */
if (smc->s.sas && (smc->y[PA].cf_join || smc->y[PA].cf_loop ||
smc->y[PB].cf_join || smc->y[PB].cf_loop)) {
GO_STATE(SC11_C_WRAP_S) ;
break ;
}
/*SC01*/
if ((smc->y[PA].cem_pst == CEM_PST_UP && smc->y[PA].cf_join &&
!smc->y[PA].wc_flag) || smc->y[PA].cf_loop) {
GO_STATE(SC9_C_WRAP_A) ;
break ;
}
/*SC02*/
if ((smc->y[PB].cem_pst == CEM_PST_UP && smc->y[PB].cf_join &&
!smc->y[PB].wc_flag) || smc->y[PB].cf_loop) {
GO_STATE(SC10_C_WRAP_B) ;
break ;
}
break ;
case ACTIONS(SC9_C_WRAP_A) :
smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
smc->mib.p[PA].fddiPORTMACPlacement = INDEX_MAC ;
smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ;
config_mux(smc,MUX_WRAPA) ; /* configure PHY mux */
if (smc->y[PA].cf_loop) {
smc->r.rm_join = FALSE ;
smc->r.rm_loop = TRUE ;
queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */
}
if (smc->y[PA].cf_join) {
smc->r.rm_loop = FALSE ;
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
}
ACTIONS_DONE() ;
DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
break ;
case SC9_C_WRAP_A :
/*SC10*/
if ( (smc->y[PA].wc_flag || !smc->y[PA].cf_join) &&
!smc->y[PA].cf_loop ) {
GO_STATE(SC0_ISOLATED) ;
break ;
}
/*SC12*/
else if ( (smc->y[PB].cf_loop && smc->y[PA].cf_join &&
smc->y[PA].cem_pst == CEM_PST_UP) ||
((smc->y[PB].cf_loop ||
(smc->y[PB].cf_join &&
smc->y[PB].cem_pst == CEM_PST_UP)) &&
(smc->y[PA].pc_mode == PM_TREE ||
smc->y[PB].pc_mode == PM_TREE))) {
smc->y[PA].scrub = TRUE ;
GO_STATE(SC10_C_WRAP_B) ;
break ;
}
/*SC14*/
else if (!smc->s.attach_s &&
smc->y[PA].cf_join &&
smc->y[PA].cem_pst == CEM_PST_UP &&
smc->y[PA].pc_mode == PM_PEER && smc->y[PB].cf_join &&
smc->y[PB].cem_pst == CEM_PST_UP &&
smc->y[PB].pc_mode == PM_PEER) {
smc->y[PA].scrub = TRUE ;
smc->y[PB].scrub = TRUE ;
GO_STATE(SC4_THRU_A) ;
break ;
}
/*SC15*/
else if ( smc->s.attach_s &&
smc->y[PA].cf_join &&
smc->y[PA].cem_pst == CEM_PST_UP &&
smc->y[PA].pc_mode == PM_PEER &&
smc->y[PB].cf_join &&
smc->y[PB].cem_pst == CEM_PST_UP &&
smc->y[PB].pc_mode == PM_PEER) {
smc->y[PA].scrub = TRUE ;
smc->y[PB].scrub = TRUE ;
GO_STATE(SC5_THRU_B) ;
break ;
}
break ;
case ACTIONS(SC10_C_WRAP_B) :
smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
smc->mib.p[PB].fddiPORTMACPlacement = INDEX_MAC ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ;
config_mux(smc,MUX_WRAPB) ; /* configure PHY mux */
if (smc->y[PB].cf_loop) {
smc->r.rm_join = FALSE ;
smc->r.rm_loop = TRUE ;
queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */
}
if (smc->y[PB].cf_join) {
smc->r.rm_loop = FALSE ;
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
}
ACTIONS_DONE() ;
DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
break ;
case SC10_C_WRAP_B :
/*SC20*/
if ( !smc->y[PB].cf_join && !smc->y[PB].cf_loop ) {
GO_STATE(SC0_ISOLATED) ;
break ;
}
/*SC21*/
else if ( smc->y[PA].cf_loop && smc->y[PA].pc_mode == PM_PEER &&
smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) {
smc->y[PB].scrub = TRUE ;
GO_STATE(SC9_C_WRAP_A) ;
break ;
}
/*SC24*/
else if (!smc->s.attach_s &&
smc->y[PA].cf_join && smc->y[PA].pc_mode == PM_PEER &&
smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) {
smc->y[PA].scrub = TRUE ;
smc->y[PB].scrub = TRUE ;
GO_STATE(SC4_THRU_A) ;
break ;
}
/*SC25*/
else if ( smc->s.attach_s &&
smc->y[PA].cf_join && smc->y[PA].pc_mode == PM_PEER &&
smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) {
smc->y[PA].scrub = TRUE ;
smc->y[PB].scrub = TRUE ;
GO_STATE(SC5_THRU_B) ;
break ;
}
break ;
case ACTIONS(SC4_THRU_A) :
smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_THRU ;
smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_THRU ;
smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
smc->mib.p[PB].fddiPORTMACPlacement = INDEX_MAC ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_THRU ;
config_mux(smc,MUX_THRUA) ; /* configure PHY mux */
smc->r.rm_loop = FALSE ;
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
ACTIONS_DONE() ;
DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
break ;
case SC4_THRU_A :
/*SC41*/
if (smc->y[PB].wc_flag || !smc->y[PB].cf_join) {
smc->y[PA].scrub = TRUE ;
GO_STATE(SC9_C_WRAP_A) ;
break ;
}
/*SC42*/
else if (!smc->y[PA].cf_join || smc->y[PA].wc_flag) {
smc->y[PB].scrub = TRUE ;
GO_STATE(SC10_C_WRAP_B) ;
break ;
}
/*SC45*/
else if (smc->s.attach_s) {
smc->y[PB].scrub = TRUE ;
GO_STATE(SC5_THRU_B) ;
break ;
}
break ;
case ACTIONS(SC5_THRU_B) :
smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_THRU ;
smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_THRU ;
smc->mib.p[PA].fddiPORTMACPlacement = INDEX_MAC ;
smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_THRU ;
config_mux(smc,MUX_THRUB) ; /* configure PHY mux */
smc->r.rm_loop = FALSE ;
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
ACTIONS_DONE() ;
DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
break ;
case SC5_THRU_B :
/*SC51*/
if (!smc->y[PB].cf_join || smc->y[PB].wc_flag) {
smc->y[PA].scrub = TRUE ;
GO_STATE(SC9_C_WRAP_A) ;
break ;
}
/*SC52*/
else if (!smc->y[PA].cf_join || smc->y[PA].wc_flag) {
smc->y[PB].scrub = TRUE ;
GO_STATE(SC10_C_WRAP_B) ;
break ;
}
/*SC54*/
else if (!smc->s.attach_s) {
smc->y[PA].scrub = TRUE ;
GO_STATE(SC4_THRU_A) ;
break ;
}
break ;
case ACTIONS(SC11_C_WRAP_S) :
smc->mib.p[PS].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
smc->mib.p[PS].fddiPORTMACPlacement = INDEX_MAC ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ;
config_mux(smc,MUX_WRAPS) ; /* configure PHY mux */
if (smc->y[PA].cf_loop || smc->y[PB].cf_loop) {
smc->r.rm_join = FALSE ;
smc->r.rm_loop = TRUE ;
queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */
}
if (smc->y[PA].cf_join || smc->y[PB].cf_join) {
smc->r.rm_loop = FALSE ;
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
}
ACTIONS_DONE() ;
DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
break ;
case SC11_C_WRAP_S :
/*SC70*/
if ( !smc->y[PA].cf_join && !smc->y[PA].cf_loop &&
!smc->y[PB].cf_join && !smc->y[PB].cf_loop) {
GO_STATE(SC0_ISOLATED) ;
break ;
}
break ;
default:
SMT_PANIC(smc,SMT_E0106, SMT_E0106_MSG) ;
break;
}
}
/*
* get MAC's input Port
* return :
* PA or PB
*/
int cfm_get_mac_input(struct s_smc *smc)
{
return (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
smc->mib.fddiSMTCF_State == SC5_THRU_B) ? PB : PA;
}
/*
* get MAC's output Port
* return :
* PA or PB
*/
int cfm_get_mac_output(struct s_smc *smc)
{
return (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
smc->mib.fddiSMTCF_State == SC4_THRU_A) ? PB : PA;
}
static char path_iso[] = {
0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_ISO,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_ISO,
0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_ISO
} ;
static char path_wrap_a[] = {
0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_PRIM,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_ISO
} ;
static char path_wrap_b[] = {
0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_PRIM,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_ISO
} ;
static char path_thru[] = {
0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_PRIM,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_PRIM
} ;
static char path_wrap_s[] = {
0,0, 0,RES_PORT, 0,PS + INDEX_PORT, 0,PATH_PRIM,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
} ;
static char path_iso_s[] = {
0,0, 0,RES_PORT, 0,PS + INDEX_PORT, 0,PATH_ISO,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_ISO,
} ;
int cem_build_path(struct s_smc *smc, char *to, int path_index)
{
char *path ;
int len ;
switch (smc->mib.fddiSMTCF_State) {
default :
case SC0_ISOLATED :
path = smc->s.sas ? path_iso_s : path_iso ;
len = smc->s.sas ? sizeof(path_iso_s) : sizeof(path_iso) ;
break ;
case SC9_C_WRAP_A :
path = path_wrap_a ;
len = sizeof(path_wrap_a) ;
break ;
case SC10_C_WRAP_B :
path = path_wrap_b ;
len = sizeof(path_wrap_b) ;
break ;
case SC4_THRU_A :
path = path_thru ;
len = sizeof(path_thru) ;
break ;
case SC11_C_WRAP_S :
path = path_wrap_s ;
len = sizeof(path_wrap_s) ;
break ;
}
memcpy(to,path,len) ;
LINT_USE(path_index);
return len;
}
| gpl-2.0 |
dhkim1027/linux | sound/core/oss/linear.c | 12846 | 5791 | /*
* Linear conversion Plug-In
* Copyright (c) 1999 by Jaroslav Kysela <perex@perex.cz>,
* Abramo Bagnara <abramo@alsa-project.org>
*
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Library General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "pcm_plugin.h"
/*
* Basic linear conversion plugin
*/
struct linear_priv {
int cvt_endian; /* need endian conversion? */
unsigned int src_ofs; /* byte offset in source format */
unsigned int dst_ofs; /* byte soffset in destination format */
unsigned int copy_ofs; /* byte offset in temporary u32 data */
unsigned int dst_bytes; /* byte size of destination format */
unsigned int copy_bytes; /* bytes to copy per conversion */
unsigned int flip; /* MSB flip for signeness, done after endian conv */
};
static inline void do_convert(struct linear_priv *data,
unsigned char *dst, unsigned char *src)
{
unsigned int tmp = 0;
unsigned char *p = (unsigned char *)&tmp;
memcpy(p + data->copy_ofs, src + data->src_ofs, data->copy_bytes);
if (data->cvt_endian)
tmp = swab32(tmp);
tmp ^= data->flip;
memcpy(dst, p + data->dst_ofs, data->dst_bytes);
}
static void convert(struct snd_pcm_plugin *plugin,
const struct snd_pcm_plugin_channel *src_channels,
struct snd_pcm_plugin_channel *dst_channels,
snd_pcm_uframes_t frames)
{
struct linear_priv *data = (struct linear_priv *)plugin->extra_data;
int channel;
int nchannels = plugin->src_format.channels;
for (channel = 0; channel < nchannels; ++channel) {
char *src;
char *dst;
int src_step, dst_step;
snd_pcm_uframes_t frames1;
if (!src_channels[channel].enabled) {
if (dst_channels[channel].wanted)
snd_pcm_area_silence(&dst_channels[channel].area, 0, frames, plugin->dst_format.format);
dst_channels[channel].enabled = 0;
continue;
}
dst_channels[channel].enabled = 1;
src = src_channels[channel].area.addr + src_channels[channel].area.first / 8;
dst = dst_channels[channel].area.addr + dst_channels[channel].area.first / 8;
src_step = src_channels[channel].area.step / 8;
dst_step = dst_channels[channel].area.step / 8;
frames1 = frames;
while (frames1-- > 0) {
do_convert(data, dst, src);
src += src_step;
dst += dst_step;
}
}
}
static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin,
const struct snd_pcm_plugin_channel *src_channels,
struct snd_pcm_plugin_channel *dst_channels,
snd_pcm_uframes_t frames)
{
if (snd_BUG_ON(!plugin || !src_channels || !dst_channels))
return -ENXIO;
if (frames == 0)
return 0;
#ifdef CONFIG_SND_DEBUG
{
unsigned int channel;
for (channel = 0; channel < plugin->src_format.channels; channel++) {
if (snd_BUG_ON(src_channels[channel].area.first % 8 ||
src_channels[channel].area.step % 8))
return -ENXIO;
if (snd_BUG_ON(dst_channels[channel].area.first % 8 ||
dst_channels[channel].area.step % 8))
return -ENXIO;
}
}
#endif
convert(plugin, src_channels, dst_channels, frames);
return frames;
}
static void init_data(struct linear_priv *data,
snd_pcm_format_t src_format, snd_pcm_format_t dst_format)
{
int src_le, dst_le, src_bytes, dst_bytes;
src_bytes = snd_pcm_format_width(src_format) / 8;
dst_bytes = snd_pcm_format_width(dst_format) / 8;
src_le = snd_pcm_format_little_endian(src_format) > 0;
dst_le = snd_pcm_format_little_endian(dst_format) > 0;
data->dst_bytes = dst_bytes;
data->cvt_endian = src_le != dst_le;
data->copy_bytes = src_bytes < dst_bytes ? src_bytes : dst_bytes;
if (src_le) {
data->copy_ofs = 4 - data->copy_bytes;
data->src_ofs = src_bytes - data->copy_bytes;
} else
data->src_ofs = snd_pcm_format_physical_width(src_format) / 8 -
src_bytes;
if (dst_le)
data->dst_ofs = 4 - data->dst_bytes;
else
data->dst_ofs = snd_pcm_format_physical_width(dst_format) / 8 -
dst_bytes;
if (snd_pcm_format_signed(src_format) !=
snd_pcm_format_signed(dst_format)) {
if (dst_le)
data->flip = (__force u32)cpu_to_le32(0x80000000);
else
data->flip = (__force u32)cpu_to_be32(0x80000000);
}
}
int snd_pcm_plugin_build_linear(struct snd_pcm_substream *plug,
struct snd_pcm_plugin_format *src_format,
struct snd_pcm_plugin_format *dst_format,
struct snd_pcm_plugin **r_plugin)
{
int err;
struct linear_priv *data;
struct snd_pcm_plugin *plugin;
if (snd_BUG_ON(!r_plugin))
return -ENXIO;
*r_plugin = NULL;
if (snd_BUG_ON(src_format->rate != dst_format->rate))
return -ENXIO;
if (snd_BUG_ON(src_format->channels != dst_format->channels))
return -ENXIO;
if (snd_BUG_ON(!snd_pcm_format_linear(src_format->format) ||
!snd_pcm_format_linear(dst_format->format)))
return -ENXIO;
err = snd_pcm_plugin_build(plug, "linear format conversion",
src_format, dst_format,
sizeof(struct linear_priv), &plugin);
if (err < 0)
return err;
data = (struct linear_priv *)plugin->extra_data;
init_data(data, src_format->format, dst_format->format);
plugin->transfer = linear_transfer;
*r_plugin = plugin;
return 0;
}
| gpl-2.0 |
vDorst/linux | drivers/media/video/s2255drv.c | 47 | 77495 | /*
* s2255drv.c - a driver for the Sensoray 2255 USB video capture device
*
* Copyright (C) 2007-2010 by Sensoray Company Inc.
* Dean Anderson
*
* Some video buffer code based on vivi driver:
*
* Sensoray 2255 device supports 4 simultaneous channels.
* The channels are not "crossbar" inputs, they are physically
* attached to separate video decoders.
*
* Because of USB2.0 bandwidth limitations. There is only a
* certain amount of data which may be transferred at one time.
*
* Example maximum bandwidth utilization:
*
* -full size, color mode YUYV or YUV422P: 2 channels at once
* -full or half size Grey scale: all 4 channels at once
* -half size, color mode YUYV or YUV422P: all 4 channels at once
* -full size, color mode YUYV or YUV422P 1/2 frame rate: all 4 channels
* at once.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/mm.h>
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/vmalloc.h>
#include <linux/usb.h>
#define S2255_VERSION "1.22.1"
#define FIRMWARE_FILE_NAME "f2255usb.bin"
/* default JPEG quality */
#define S2255_DEF_JPEG_QUAL 50
/* vendor request in */
#define S2255_VR_IN 0
/* vendor request out */
#define S2255_VR_OUT 1
/* firmware query */
#define S2255_VR_FW 0x30
/* USB endpoint number for configuring the device */
#define S2255_CONFIG_EP 2
/* maximum time for DSP to start responding after last FW word loaded(ms) */
#define S2255_DSP_BOOTTIME 800
/* maximum time to wait for firmware to load (ms) */
#define S2255_LOAD_TIMEOUT (5000 + S2255_DSP_BOOTTIME)
#define S2255_DEF_BUFS 16
#define S2255_SETMODE_TIMEOUT 500
#define S2255_VIDSTATUS_TIMEOUT 350
#define S2255_MARKER_FRAME cpu_to_le32(0x2255DA4AL)
#define S2255_MARKER_RESPONSE cpu_to_le32(0x2255ACACL)
#define S2255_RESPONSE_SETMODE cpu_to_le32(0x01)
#define S2255_RESPONSE_FW cpu_to_le32(0x10)
#define S2255_RESPONSE_STATUS cpu_to_le32(0x20)
#define S2255_USB_XFER_SIZE (16 * 1024)
#define MAX_CHANNELS 4
#define SYS_FRAMES 4
/* maximum size is PAL full size plus room for the marker header(s) */
#define SYS_FRAMES_MAXSIZE (720*288*2*2 + 4096)
#define DEF_USB_BLOCK S2255_USB_XFER_SIZE
#define LINE_SZ_4CIFS_NTSC 640
#define LINE_SZ_2CIFS_NTSC 640
#define LINE_SZ_1CIFS_NTSC 320
#define LINE_SZ_4CIFS_PAL 704
#define LINE_SZ_2CIFS_PAL 704
#define LINE_SZ_1CIFS_PAL 352
#define NUM_LINES_4CIFS_NTSC 240
#define NUM_LINES_2CIFS_NTSC 240
#define NUM_LINES_1CIFS_NTSC 240
#define NUM_LINES_4CIFS_PAL 288
#define NUM_LINES_2CIFS_PAL 288
#define NUM_LINES_1CIFS_PAL 288
#define LINE_SZ_DEF 640
#define NUM_LINES_DEF 240
/* predefined settings */
#define FORMAT_NTSC 1
#define FORMAT_PAL 2
#define SCALE_4CIFS 1 /* 640x480(NTSC) or 704x576(PAL) */
#define SCALE_2CIFS 2 /* 640x240(NTSC) or 704x288(PAL) */
#define SCALE_1CIFS 3 /* 320x240(NTSC) or 352x288(PAL) */
/* SCALE_4CIFSI is the 2 fields interpolated into one */
#define SCALE_4CIFSI 4 /* 640x480(NTSC) or 704x576(PAL) high quality */
#define COLOR_YUVPL 1 /* YUV planar */
#define COLOR_YUVPK 2 /* YUV packed */
#define COLOR_Y8 4 /* monochrome */
#define COLOR_JPG 5 /* JPEG */
#define MASK_COLOR 0x000000ff
#define MASK_JPG_QUALITY 0x0000ff00
#define MASK_INPUT_TYPE 0x000f0000
/* frame decimation. */
#define FDEC_1 1 /* capture every frame. default */
#define FDEC_2 2 /* capture every 2nd frame */
#define FDEC_3 3 /* capture every 3rd frame */
#define FDEC_5 5 /* capture every 5th frame */
/*-------------------------------------------------------
* Default mode parameters.
*-------------------------------------------------------*/
#define DEF_SCALE SCALE_4CIFS
#define DEF_COLOR COLOR_YUVPL
#define DEF_FDEC FDEC_1
#define DEF_BRIGHT 0
#define DEF_CONTRAST 0x5c
#define DEF_SATURATION 0x80
#define DEF_HUE 0
/* usb config commands */
#define IN_DATA_TOKEN cpu_to_le32(0x2255c0de)
#define CMD_2255 0xc2255000
#define CMD_SET_MODE cpu_to_le32((CMD_2255 | 0x10))
#define CMD_START cpu_to_le32((CMD_2255 | 0x20))
#define CMD_STOP cpu_to_le32((CMD_2255 | 0x30))
#define CMD_STATUS cpu_to_le32((CMD_2255 | 0x40))
struct s2255_mode {
u32 format; /* input video format (NTSC, PAL) */
u32 scale; /* output video scale */
u32 color; /* output video color format */
u32 fdec; /* frame decimation */
u32 bright; /* brightness */
u32 contrast; /* contrast */
u32 saturation; /* saturation */
u32 hue; /* hue (NTSC only)*/
u32 single; /* capture 1 frame at a time (!=0), continuously (==0)*/
u32 usb_block; /* block size. should be 4096 of DEF_USB_BLOCK */
u32 restart; /* if DSP requires restart */
};
#define S2255_READ_IDLE 0
#define S2255_READ_FRAME 1
/* frame structure */
struct s2255_framei {
unsigned long size;
unsigned long ulState; /* ulState:S2255_READ_IDLE, S2255_READ_FRAME*/
void *lpvbits; /* image data */
unsigned long cur_size; /* current data copied to it */
};
/* image buffer structure */
struct s2255_bufferi {
unsigned long dwFrames; /* number of frames in buffer */
struct s2255_framei frame[SYS_FRAMES]; /* array of FRAME structures */
};
#define DEF_MODEI_NTSC_CONT {FORMAT_NTSC, DEF_SCALE, DEF_COLOR, \
DEF_FDEC, DEF_BRIGHT, DEF_CONTRAST, DEF_SATURATION, \
DEF_HUE, 0, DEF_USB_BLOCK, 0}
struct s2255_dmaqueue {
struct list_head active;
struct s2255_dev *dev;
};
/* for firmware loading, fw_state */
#define S2255_FW_NOTLOADED 0
#define S2255_FW_LOADED_DSPWAIT 1
#define S2255_FW_SUCCESS 2
#define S2255_FW_FAILED 3
#define S2255_FW_DISCONNECTING 4
#define S2255_FW_MARKER cpu_to_le32(0x22552f2f)
/* 2255 read states */
#define S2255_READ_IDLE 0
#define S2255_READ_FRAME 1
struct s2255_fw {
int fw_loaded;
int fw_size;
struct urb *fw_urb;
atomic_t fw_state;
void *pfw_data;
wait_queue_head_t wait_fw;
const struct firmware *fw;
};
struct s2255_pipeinfo {
u32 max_transfer_size;
u32 cur_transfer_size;
u8 *transfer_buffer;
u32 state;
void *stream_urb;
void *dev; /* back pointer to s2255_dev struct*/
u32 err_count;
u32 idx;
};
struct s2255_fmt; /*forward declaration */
struct s2255_dev;
struct s2255_channel {
struct video_device vdev;
int resources;
struct s2255_dmaqueue vidq;
struct s2255_bufferi buffer;
struct s2255_mode mode;
/* jpeg compression */
struct v4l2_jpegcompression jc;
/* capture parameters (for high quality mode full size) */
struct v4l2_captureparm cap_parm;
int cur_frame;
int last_frame;
int b_acquire;
/* allocated image size */
unsigned long req_image_size;
/* received packet size */
unsigned long pkt_size;
int bad_payload;
unsigned long frame_count;
/* if JPEG image */
int jpg_size;
/* if channel configured to default state */
int configured;
wait_queue_head_t wait_setmode;
int setmode_ready;
/* video status items */
int vidstatus;
wait_queue_head_t wait_vidstatus;
int vidstatus_ready;
unsigned int width;
unsigned int height;
const struct s2255_fmt *fmt;
int idx; /* channel number on device, 0-3 */
};
struct s2255_dev {
struct s2255_channel channel[MAX_CHANNELS];
struct v4l2_device v4l2_dev;
atomic_t num_channels;
int frames;
struct mutex lock; /* channels[].vdev.lock */
struct mutex open_lock;
struct usb_device *udev;
struct usb_interface *interface;
u8 read_endpoint;
struct timer_list timer;
struct s2255_fw *fw_data;
struct s2255_pipeinfo pipe;
u32 cc; /* current channel */
int frame_ready;
int chn_ready;
spinlock_t slock;
/* dsp firmware version (f2255usb.bin) */
int dsp_fw_ver;
u16 pid; /* product id */
};
static inline struct s2255_dev *to_s2255_dev(struct v4l2_device *v4l2_dev)
{
return container_of(v4l2_dev, struct s2255_dev, v4l2_dev);
}
struct s2255_fmt {
char *name;
u32 fourcc;
int depth;
};
/* buffer for one video frame */
struct s2255_buffer {
/* common v4l buffer stuff -- must be first */
struct videobuf_buffer vb;
const struct s2255_fmt *fmt;
};
struct s2255_fh {
struct s2255_dev *dev;
struct videobuf_queue vb_vidq;
enum v4l2_buf_type type;
struct s2255_channel *channel;
int resources;
};
/* current cypress EEPROM firmware version */
#define S2255_CUR_USB_FWVER ((3 << 8) | 12)
/* current DSP FW version */
#define S2255_CUR_DSP_FWVER 10104
/* Need DSP version 5+ for video status feature */
#define S2255_MIN_DSP_STATUS 5
#define S2255_MIN_DSP_COLORFILTER 8
#define S2255_NORMS (V4L2_STD_PAL | V4L2_STD_NTSC)
/* private V4L2 controls */
/*
* The following chart displays how COLORFILTER should be set
* =========================================================
* = fourcc = COLORFILTER =
* = ===============================
* = = 0 = 1 =
* =========================================================
* = V4L2_PIX_FMT_GREY(Y8) = monochrome from = monochrome=
* = = s-video or = composite =
* = = B/W camera = input =
* =========================================================
* = other = color, svideo = color, =
* = = = composite =
* =========================================================
*
* Notes:
* channels 0-3 on 2255 are composite
* channels 0-1 on 2257 are composite, 2-3 are s-video
* If COLORFILTER is 0 with a composite color camera connected,
* the output will appear monochrome but hatching
* will occur.
* COLORFILTER is different from "color killer" and "color effects"
* for reasons above.
*/
#define S2255_V4L2_YC_ON 1
#define S2255_V4L2_YC_OFF 0
#define V4L2_CID_PRIVATE_COLORFILTER (V4L2_CID_PRIVATE_BASE + 0)
/* frame prefix size (sent once every frame) */
#define PREFIX_SIZE 512
/* Channels on box are in reverse order */
static unsigned long G_chnmap[MAX_CHANNELS] = {3, 2, 1, 0};
static int debug;
static int *s2255_debug = &debug;
static int s2255_start_readpipe(struct s2255_dev *dev);
static void s2255_stop_readpipe(struct s2255_dev *dev);
static int s2255_start_acquire(struct s2255_channel *channel);
static int s2255_stop_acquire(struct s2255_channel *channel);
static void s2255_fillbuff(struct s2255_channel *chn, struct s2255_buffer *buf,
int jpgsize);
static int s2255_set_mode(struct s2255_channel *chan, struct s2255_mode *mode);
static int s2255_board_shutdown(struct s2255_dev *dev);
static void s2255_fwload_start(struct s2255_dev *dev, int reset);
static void s2255_destroy(struct s2255_dev *dev);
static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
u16 index, u16 value, void *buf,
s32 buf_len, int bOut);
/* dev_err macro with driver name */
#define S2255_DRIVER_NAME "s2255"
#define s2255_dev_err(dev, fmt, arg...) \
dev_err(dev, S2255_DRIVER_NAME " - " fmt, ##arg)
#define dprintk(level, fmt, arg...) \
do { \
if (*s2255_debug >= (level)) { \
printk(KERN_DEBUG S2255_DRIVER_NAME \
": " fmt, ##arg); \
} \
} while (0)
static struct usb_driver s2255_driver;
/* Declare static vars that will be used as parameters */
static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
/* start video number */
static int video_nr = -1; /* /dev/videoN, -1 for autodetect */
/* Enable jpeg capture. */
static int jpeg_enable = 1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level(0-100) default 0");
module_param(vid_limit, int, 0644);
MODULE_PARM_DESC(vid_limit, "video memory limit(Mb)");
module_param(video_nr, int, 0644);
MODULE_PARM_DESC(video_nr, "start video minor(-1 default autodetect)");
module_param(jpeg_enable, int, 0644);
MODULE_PARM_DESC(jpeg_enable, "Jpeg enable(1-on 0-off) default 1");
/* USB device table */
#define USB_SENSORAY_VID 0x1943
static struct usb_device_id s2255_table[] = {
{USB_DEVICE(USB_SENSORAY_VID, 0x2255)},
{USB_DEVICE(USB_SENSORAY_VID, 0x2257)}, /*same family as 2255*/
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, s2255_table);
#define BUFFER_TIMEOUT msecs_to_jiffies(400)
/* image formats. */
/* JPEG formats must be defined last to support jpeg_enable parameter */
static const struct s2255_fmt formats[] = {
{
.name = "4:2:2, planar, YUV422P",
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = 16
}, {
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16
}, {
.name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16
}, {
.name = "8bpp GREY",
.fourcc = V4L2_PIX_FMT_GREY,
.depth = 8
}, {
.name = "JPG",
.fourcc = V4L2_PIX_FMT_JPEG,
.depth = 24
}, {
.name = "MJPG",
.fourcc = V4L2_PIX_FMT_MJPEG,
.depth = 24
}
};
static int norm_maxw(struct video_device *vdev)
{
return (vdev->current_norm & V4L2_STD_NTSC) ?
LINE_SZ_4CIFS_NTSC : LINE_SZ_4CIFS_PAL;
}
static int norm_maxh(struct video_device *vdev)
{
return (vdev->current_norm & V4L2_STD_NTSC) ?
(NUM_LINES_1CIFS_NTSC * 2) : (NUM_LINES_1CIFS_PAL * 2);
}
static int norm_minw(struct video_device *vdev)
{
return (vdev->current_norm & V4L2_STD_NTSC) ?
LINE_SZ_1CIFS_NTSC : LINE_SZ_1CIFS_PAL;
}
static int norm_minh(struct video_device *vdev)
{
return (vdev->current_norm & V4L2_STD_NTSC) ?
(NUM_LINES_1CIFS_NTSC) : (NUM_LINES_1CIFS_PAL);
}
/*
* TODO: fixme: move YUV reordering to hardware
* converts 2255 planar format to yuyv or uyvy
*/
static void planar422p_to_yuv_packed(const unsigned char *in,
unsigned char *out,
int width, int height,
int fmt)
{
unsigned char *pY;
unsigned char *pCb;
unsigned char *pCr;
unsigned long size = height * width;
unsigned int i;
pY = (unsigned char *)in;
pCr = (unsigned char *)in + height * width;
pCb = (unsigned char *)in + height * width + (height * width / 2);
for (i = 0; i < size * 2; i += 4) {
out[i] = (fmt == V4L2_PIX_FMT_YUYV) ? *pY++ : *pCr++;
out[i + 1] = (fmt == V4L2_PIX_FMT_YUYV) ? *pCr++ : *pY++;
out[i + 2] = (fmt == V4L2_PIX_FMT_YUYV) ? *pY++ : *pCb++;
out[i + 3] = (fmt == V4L2_PIX_FMT_YUYV) ? *pCb++ : *pY++;
}
return;
}
static void s2255_reset_dsppower(struct s2255_dev *dev)
{
s2255_vendor_req(dev, 0x40, 0x0000, 0x0001, NULL, 0, 1);
msleep(10);
s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
msleep(600);
s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1);
return;
}
/* kickstarts the firmware loading. from probe
*/
static void s2255_timer(unsigned long user_data)
{
struct s2255_fw *data = (struct s2255_fw *)user_data;
dprintk(100, "%s\n", __func__);
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
printk(KERN_ERR "s2255: can't submit urb\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
}
/* this loads the firmware asynchronously.
Originally this was done synchroously in probe.
But it is better to load it asynchronously here than block
inside the probe function. Blocking inside probe affects boot time.
FW loading is triggered by the timer in the probe function
*/
static void s2255_fwchunk_complete(struct urb *urb)
{
struct s2255_fw *data = urb->context;
struct usb_device *udev = urb->dev;
int len;
dprintk(100, "%s: udev %p urb %p", __func__, udev, urb);
if (urb->status) {
dev_err(&udev->dev, "URB failed with status %d\n", urb->status);
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
if (data->fw_urb == NULL) {
s2255_dev_err(&udev->dev, "disconnected\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
#define CHUNK_SIZE 512
/* all USB transfers must be done with continuous kernel memory.
can't allocate more than 128k in current linux kernel, so
upload the firmware in chunks
*/
if (data->fw_loaded < data->fw_size) {
len = (data->fw_loaded + CHUNK_SIZE) > data->fw_size ?
data->fw_size % CHUNK_SIZE : CHUNK_SIZE;
if (len < CHUNK_SIZE)
memset(data->pfw_data, 0, CHUNK_SIZE);
dprintk(100, "completed len %d, loaded %d \n", len,
data->fw_loaded);
memcpy(data->pfw_data,
(char *) data->fw->data + data->fw_loaded, len);
usb_fill_bulk_urb(data->fw_urb, udev, usb_sndbulkpipe(udev, 2),
data->pfw_data, CHUNK_SIZE,
s2255_fwchunk_complete, data);
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
dev_err(&udev->dev, "failed submit URB\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
data->fw_loaded += len;
} else {
atomic_set(&data->fw_state, S2255_FW_LOADED_DSPWAIT);
dprintk(100, "%s: firmware upload complete\n", __func__);
}
return;
}
static int s2255_got_frame(struct s2255_channel *channel, int jpgsize)
{
struct s2255_dmaqueue *dma_q = &channel->vidq;
struct s2255_buffer *buf;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
unsigned long flags = 0;
int rc = 0;
spin_lock_irqsave(&dev->slock, flags);
if (list_empty(&dma_q->active)) {
dprintk(1, "No active queue to serve\n");
rc = -1;
goto unlock;
}
buf = list_entry(dma_q->active.next,
struct s2255_buffer, vb.queue);
list_del(&buf->vb.queue);
do_gettimeofday(&buf->vb.ts);
s2255_fillbuff(channel, buf, jpgsize);
wake_up(&buf->vb.done);
dprintk(2, "%s: [buf/i] [%p/%d]\n", __func__, buf, buf->vb.i);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
return rc;
}
static const struct s2255_fmt *format_by_fourcc(int fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (-1 == formats[i].fourcc)
continue;
if (!jpeg_enable && ((formats[i].fourcc == V4L2_PIX_FMT_JPEG) ||
(formats[i].fourcc == V4L2_PIX_FMT_MJPEG)))
continue;
if (formats[i].fourcc == fourcc)
return formats + i;
}
return NULL;
}
/* video buffer vmalloc implementation based partly on VIVI driver which is
* Copyright (c) 2006 by
* Mauro Carvalho Chehab <mchehab--a.t--infradead.org>
* Ted Walther <ted--a.t--enumera.com>
* John Sokol <sokol--a.t--videotechnology.com>
* http://v4l.videotechnology.com/
*
*/
static void s2255_fillbuff(struct s2255_channel *channel,
struct s2255_buffer *buf, int jpgsize)
{
int pos = 0;
struct timeval ts;
const char *tmpbuf;
char *vbuf = videobuf_to_vmalloc(&buf->vb);
unsigned long last_frame;
if (!vbuf)
return;
last_frame = channel->last_frame;
if (last_frame != -1) {
tmpbuf =
(const char *)channel->buffer.frame[last_frame].lpvbits;
switch (buf->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
planar422p_to_yuv_packed((const unsigned char *)tmpbuf,
vbuf, buf->vb.width,
buf->vb.height,
buf->fmt->fourcc);
break;
case V4L2_PIX_FMT_GREY:
memcpy(vbuf, tmpbuf, buf->vb.width * buf->vb.height);
break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_MJPEG:
buf->vb.size = jpgsize;
memcpy(vbuf, tmpbuf, buf->vb.size);
break;
case V4L2_PIX_FMT_YUV422P:
memcpy(vbuf, tmpbuf,
buf->vb.width * buf->vb.height * 2);
break;
default:
printk(KERN_DEBUG "s2255: unknown format?\n");
}
channel->last_frame = -1;
} else {
printk(KERN_ERR "s2255: =======no frame\n");
return;
}
dprintk(2, "s2255fill at : Buffer 0x%08lx size= %d\n",
(unsigned long)vbuf, pos);
/* tell v4l buffer was filled */
buf->vb.field_count = channel->frame_count * 2;
do_gettimeofday(&ts);
buf->vb.ts = ts;
buf->vb.state = VIDEOBUF_DONE;
}
/* ------------------------------------------------------------------
Videobuf operations
------------------------------------------------------------------*/
static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct s2255_fh *fh = vq->priv_data;
struct s2255_channel *channel = fh->channel;
*size = channel->width * channel->height * (channel->fmt->depth >> 3);
if (0 == *count)
*count = S2255_DEF_BUFS;
if (*size * *count > vid_limit * 1024 * 1024)
*count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
static void free_buffer(struct videobuf_queue *vq, struct s2255_buffer *buf)
{
dprintk(4, "%s\n", __func__);
videobuf_vmalloc_free(&buf->vb);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct s2255_fh *fh = vq->priv_data;
struct s2255_channel *channel = fh->channel;
struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
int rc;
int w = channel->width;
int h = channel->height;
dprintk(4, "%s, field=%d\n", __func__, field);
if (channel->fmt == NULL)
return -EINVAL;
if ((w < norm_minw(&channel->vdev)) ||
(w > norm_maxw(&channel->vdev)) ||
(h < norm_minh(&channel->vdev)) ||
(h > norm_maxh(&channel->vdev))) {
dprintk(4, "invalid buffer prepare\n");
return -EINVAL;
}
buf->vb.size = w * h * (channel->fmt->depth >> 3);
if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) {
dprintk(4, "invalid buffer prepare\n");
return -EINVAL;
}
buf->fmt = channel->fmt;
buf->vb.width = w;
buf->vb.height = h;
buf->vb.field = field;
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
if (rc < 0)
goto fail;
}
buf->vb.state = VIDEOBUF_PREPARED;
return 0;
fail:
free_buffer(vq, buf);
return rc;
}
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
struct s2255_fh *fh = vq->priv_data;
struct s2255_channel *channel = fh->channel;
struct s2255_dmaqueue *vidq = &channel->vidq;
dprintk(1, "%s\n", __func__);
buf->vb.state = VIDEOBUF_QUEUED;
list_add_tail(&buf->vb.queue, &vidq->active);
}
static void buffer_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
struct s2255_fh *fh = vq->priv_data;
dprintk(4, "%s %d\n", __func__, fh->channel->idx);
free_buffer(vq, buf);
}
static struct videobuf_queue_ops s2255_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
static int res_get(struct s2255_fh *fh)
{
struct s2255_channel *channel = fh->channel;
/* is it free? */
if (channel->resources)
return 0; /* no, someone else uses it */
/* it's free, grab it */
channel->resources = 1;
fh->resources = 1;
dprintk(1, "s2255: res: get\n");
return 1;
}
static int res_locked(struct s2255_fh *fh)
{
return fh->channel->resources;
}
static int res_check(struct s2255_fh *fh)
{
return fh->resources;
}
static void res_free(struct s2255_fh *fh)
{
struct s2255_channel *channel = fh->channel;
channel->resources = 0;
fh->resources = 0;
dprintk(1, "res: put\n");
}
static int vidioc_querymenu(struct file *file, void *priv,
struct v4l2_querymenu *qmenu)
{
static const char *colorfilter[] = {
"Off",
"On",
NULL
};
if (qmenu->id == V4L2_CID_PRIVATE_COLORFILTER) {
int i;
const char **menu_items = colorfilter;
for (i = 0; i < qmenu->index && menu_items[i]; i++)
; /* do nothing (from v4l2-common.c) */
if (menu_items[i] == NULL || menu_items[i][0] == '\0')
return -EINVAL;
strlcpy(qmenu->name, menu_items[qmenu->index],
sizeof(qmenu->name));
return 0;
}
return v4l2_ctrl_query_menu(qmenu, NULL, NULL);
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct s2255_fh *fh = file->private_data;
struct s2255_dev *dev = fh->dev;
strlcpy(cap->driver, "s2255", sizeof(cap->driver));
strlcpy(cap->card, "s2255", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
int index = f->index;
if (index >= ARRAY_SIZE(formats))
return -EINVAL;
if (!jpeg_enable && ((formats[index].fourcc == V4L2_PIX_FMT_JPEG) ||
(formats[index].fourcc == V4L2_PIX_FMT_MJPEG)))
return -EINVAL;
dprintk(4, "name %s\n", formats[index].name);
strlcpy(f->description, formats[index].name, sizeof(f->description));
f->pixelformat = formats[index].fourcc;
return 0;
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
f->fmt.pix.width = channel->width;
f->fmt.pix.height = channel->height;
f->fmt.pix.field = fh->vb_vidq.field;
f->fmt.pix.pixelformat = channel->fmt->fourcc;
f->fmt.pix.bytesperline = f->fmt.pix.width * (channel->fmt->depth >> 3);
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct s2255_fmt *fmt;
enum v4l2_field field;
int b_any_field = 0;
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
int is_ntsc;
is_ntsc =
(channel->vdev.current_norm & V4L2_STD_NTSC) ? 1 : 0;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (fmt == NULL)
return -EINVAL;
field = f->fmt.pix.field;
if (field == V4L2_FIELD_ANY)
b_any_field = 1;
dprintk(50, "%s NTSC: %d suggested width: %d, height: %d\n",
__func__, is_ntsc, f->fmt.pix.width, f->fmt.pix.height);
if (is_ntsc) {
/* NTSC */
if (f->fmt.pix.height >= NUM_LINES_1CIFS_NTSC * 2) {
f->fmt.pix.height = NUM_LINES_1CIFS_NTSC * 2;
if (b_any_field) {
field = V4L2_FIELD_SEQ_TB;
} else if (!((field == V4L2_FIELD_INTERLACED) ||
(field == V4L2_FIELD_SEQ_TB) ||
(field == V4L2_FIELD_INTERLACED_TB))) {
dprintk(1, "unsupported field setting\n");
return -EINVAL;
}
} else {
f->fmt.pix.height = NUM_LINES_1CIFS_NTSC;
if (b_any_field) {
field = V4L2_FIELD_TOP;
} else if (!((field == V4L2_FIELD_TOP) ||
(field == V4L2_FIELD_BOTTOM))) {
dprintk(1, "unsupported field setting\n");
return -EINVAL;
}
}
if (f->fmt.pix.width >= LINE_SZ_4CIFS_NTSC)
f->fmt.pix.width = LINE_SZ_4CIFS_NTSC;
else if (f->fmt.pix.width >= LINE_SZ_2CIFS_NTSC)
f->fmt.pix.width = LINE_SZ_2CIFS_NTSC;
else if (f->fmt.pix.width >= LINE_SZ_1CIFS_NTSC)
f->fmt.pix.width = LINE_SZ_1CIFS_NTSC;
else
f->fmt.pix.width = LINE_SZ_1CIFS_NTSC;
} else {
/* PAL */
if (f->fmt.pix.height >= NUM_LINES_1CIFS_PAL * 2) {
f->fmt.pix.height = NUM_LINES_1CIFS_PAL * 2;
if (b_any_field) {
field = V4L2_FIELD_SEQ_TB;
} else if (!((field == V4L2_FIELD_INTERLACED) ||
(field == V4L2_FIELD_SEQ_TB) ||
(field == V4L2_FIELD_INTERLACED_TB))) {
dprintk(1, "unsupported field setting\n");
return -EINVAL;
}
} else {
f->fmt.pix.height = NUM_LINES_1CIFS_PAL;
if (b_any_field) {
field = V4L2_FIELD_TOP;
} else if (!((field == V4L2_FIELD_TOP) ||
(field == V4L2_FIELD_BOTTOM))) {
dprintk(1, "unsupported field setting\n");
return -EINVAL;
}
}
if (f->fmt.pix.width >= LINE_SZ_4CIFS_PAL) {
f->fmt.pix.width = LINE_SZ_4CIFS_PAL;
field = V4L2_FIELD_SEQ_TB;
} else if (f->fmt.pix.width >= LINE_SZ_2CIFS_PAL) {
f->fmt.pix.width = LINE_SZ_2CIFS_PAL;
field = V4L2_FIELD_TOP;
} else if (f->fmt.pix.width >= LINE_SZ_1CIFS_PAL) {
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
field = V4L2_FIELD_TOP;
} else {
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
field = V4L2_FIELD_TOP;
}
}
f->fmt.pix.field = field;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
dprintk(50, "%s: set width %d height %d field %d\n", __func__,
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
const struct s2255_fmt *fmt;
struct videobuf_queue *q = &fh->vb_vidq;
struct s2255_mode mode;
int ret;
ret = vidioc_try_fmt_vid_cap(file, fh, f);
if (ret < 0)
return ret;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (fmt == NULL)
return -EINVAL;
mutex_lock(&q->vb_lock);
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
dprintk(1, "queue busy\n");
ret = -EBUSY;
goto out_s_fmt;
}
if (res_locked(fh)) {
dprintk(1, "%s: channel busy\n", __func__);
ret = -EBUSY;
goto out_s_fmt;
}
mode = channel->mode;
channel->fmt = fmt;
channel->width = f->fmt.pix.width;
channel->height = f->fmt.pix.height;
fh->vb_vidq.field = f->fmt.pix.field;
fh->type = f->type;
if (channel->width > norm_minw(&channel->vdev)) {
if (channel->height > norm_minh(&channel->vdev)) {
if (channel->cap_parm.capturemode &
V4L2_MODE_HIGHQUALITY)
mode.scale = SCALE_4CIFSI;
else
mode.scale = SCALE_4CIFS;
} else
mode.scale = SCALE_2CIFS;
} else {
mode.scale = SCALE_1CIFS;
}
/* color mode */
switch (channel->fmt->fourcc) {
case V4L2_PIX_FMT_GREY:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_Y8;
break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_MJPEG:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_JPG;
mode.color |= (channel->jc.quality << 8);
break;
case V4L2_PIX_FMT_YUV422P:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_YUVPL;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
default:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_YUVPK;
break;
}
if ((mode.color & MASK_COLOR) != (channel->mode.color & MASK_COLOR))
mode.restart = 1;
else if (mode.scale != channel->mode.scale)
mode.restart = 1;
else if (mode.format != channel->mode.format)
mode.restart = 1;
channel->mode = mode;
(void) s2255_set_mode(channel, &mode);
ret = 0;
out_s_fmt:
mutex_unlock(&q->vb_lock);
return ret;
}
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
int rc;
struct s2255_fh *fh = priv;
rc = videobuf_reqbufs(&fh->vb_vidq, p);
return rc;
}
static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
int rc;
struct s2255_fh *fh = priv;
rc = videobuf_querybuf(&fh->vb_vidq, p);
return rc;
}
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
int rc;
struct s2255_fh *fh = priv;
rc = videobuf_qbuf(&fh->vb_vidq, p);
return rc;
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
int rc;
struct s2255_fh *fh = priv;
rc = videobuf_dqbuf(&fh->vb_vidq, p, file->f_flags & O_NONBLOCK);
return rc;
}
/* write to the configuration pipe, synchronously */
static int s2255_write_config(struct usb_device *udev, unsigned char *pbuf,
int size)
{
int pipe;
int done;
long retval = -1;
if (udev) {
pipe = usb_sndbulkpipe(udev, S2255_CONFIG_EP);
retval = usb_bulk_msg(udev, pipe, pbuf, size, &done, 500);
}
return retval;
}
static u32 get_transfer_size(struct s2255_mode *mode)
{
int linesPerFrame = LINE_SZ_DEF;
int pixelsPerLine = NUM_LINES_DEF;
u32 outImageSize;
u32 usbInSize;
unsigned int mask_mult;
if (mode == NULL)
return 0;
if (mode->format == FORMAT_NTSC) {
switch (mode->scale) {
case SCALE_4CIFS:
case SCALE_4CIFSI:
linesPerFrame = NUM_LINES_4CIFS_NTSC * 2;
pixelsPerLine = LINE_SZ_4CIFS_NTSC;
break;
case SCALE_2CIFS:
linesPerFrame = NUM_LINES_2CIFS_NTSC;
pixelsPerLine = LINE_SZ_2CIFS_NTSC;
break;
case SCALE_1CIFS:
linesPerFrame = NUM_LINES_1CIFS_NTSC;
pixelsPerLine = LINE_SZ_1CIFS_NTSC;
break;
default:
break;
}
} else if (mode->format == FORMAT_PAL) {
switch (mode->scale) {
case SCALE_4CIFS:
case SCALE_4CIFSI:
linesPerFrame = NUM_LINES_4CIFS_PAL * 2;
pixelsPerLine = LINE_SZ_4CIFS_PAL;
break;
case SCALE_2CIFS:
linesPerFrame = NUM_LINES_2CIFS_PAL;
pixelsPerLine = LINE_SZ_2CIFS_PAL;
break;
case SCALE_1CIFS:
linesPerFrame = NUM_LINES_1CIFS_PAL;
pixelsPerLine = LINE_SZ_1CIFS_PAL;
break;
default:
break;
}
}
outImageSize = linesPerFrame * pixelsPerLine;
if ((mode->color & MASK_COLOR) != COLOR_Y8) {
/* 2 bytes/pixel if not monochrome */
outImageSize *= 2;
}
/* total bytes to send including prefix and 4K padding;
must be a multiple of USB_READ_SIZE */
usbInSize = outImageSize + PREFIX_SIZE; /* always send prefix */
mask_mult = 0xffffffffUL - DEF_USB_BLOCK + 1;
/* if size not a multiple of USB_READ_SIZE */
if (usbInSize & ~mask_mult)
usbInSize = (usbInSize & mask_mult) + (DEF_USB_BLOCK);
return usbInSize;
}
static void s2255_print_cfg(struct s2255_dev *sdev, struct s2255_mode *mode)
{
struct device *dev = &sdev->udev->dev;
dev_info(dev, "------------------------------------------------\n");
dev_info(dev, "format: %d\nscale %d\n", mode->format, mode->scale);
dev_info(dev, "fdec: %d\ncolor %d\n", mode->fdec, mode->color);
dev_info(dev, "bright: 0x%x\n", mode->bright);
dev_info(dev, "------------------------------------------------\n");
}
/*
* set mode is the function which controls the DSP.
* the restart parameter in struct s2255_mode should be set whenever
* the image size could change via color format, video system or image
* size.
* When the restart parameter is set, we sleep for ONE frame to allow the
* DSP time to get the new frame
*/
static int s2255_set_mode(struct s2255_channel *channel,
struct s2255_mode *mode)
{
int res;
__le32 *buffer;
unsigned long chn_rev;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
chn_rev = G_chnmap[channel->idx];
dprintk(3, "%s channel: %d\n", __func__, channel->idx);
/* if JPEG, set the quality */
if ((mode->color & MASK_COLOR) == COLOR_JPG) {
mode->color &= ~MASK_COLOR;
mode->color |= COLOR_JPG;
mode->color &= ~MASK_JPG_QUALITY;
mode->color |= (channel->jc.quality << 8);
}
/* save the mode */
channel->mode = *mode;
channel->req_image_size = get_transfer_size(mode);
dprintk(1, "%s: reqsize %ld\n", __func__, channel->req_image_size);
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
/* set the mode */
buffer[0] = IN_DATA_TOKEN;
buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_SET_MODE;
memcpy(&buffer[3], &channel->mode, sizeof(struct s2255_mode));
channel->setmode_ready = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (debug)
s2255_print_cfg(dev, mode);
kfree(buffer);
/* wait at least 3 frames before continuing */
if (mode->restart) {
wait_event_timeout(channel->wait_setmode,
(channel->setmode_ready != 0),
msecs_to_jiffies(S2255_SETMODE_TIMEOUT));
if (channel->setmode_ready != 1) {
printk(KERN_DEBUG "s2255: no set mode response\n");
res = -EFAULT;
}
}
/* clear the restart flag */
channel->mode.restart = 0;
dprintk(1, "%s chn %d, result: %d\n", __func__, channel->idx, res);
return res;
}
static int s2255_cmd_status(struct s2255_channel *channel, u32 *pstatus)
{
int res;
__le32 *buffer;
u32 chn_rev;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
chn_rev = G_chnmap[channel->idx];
dprintk(4, "%s chan %d\n", __func__, channel->idx);
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
/* form the get vid status command */
buffer[0] = IN_DATA_TOKEN;
buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_STATUS;
*pstatus = 0;
channel->vidstatus_ready = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
kfree(buffer);
wait_event_timeout(channel->wait_vidstatus,
(channel->vidstatus_ready != 0),
msecs_to_jiffies(S2255_VIDSTATUS_TIMEOUT));
if (channel->vidstatus_ready != 1) {
printk(KERN_DEBUG "s2255: no vidstatus response\n");
res = -EFAULT;
}
*pstatus = channel->vidstatus;
dprintk(4, "%s, vid status %d\n", __func__, *pstatus);
return res;
}
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
int res;
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
struct s2255_channel *channel = fh->channel;
int j;
dprintk(4, "%s\n", __func__);
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dev_err(&dev->udev->dev, "invalid fh type0\n");
return -EINVAL;
}
if (i != fh->type) {
dev_err(&dev->udev->dev, "invalid fh type1\n");
return -EINVAL;
}
if (!res_get(fh)) {
s2255_dev_err(&dev->udev->dev, "stream busy\n");
return -EBUSY;
}
channel->last_frame = -1;
channel->bad_payload = 0;
channel->cur_frame = 0;
channel->frame_count = 0;
for (j = 0; j < SYS_FRAMES; j++) {
channel->buffer.frame[j].ulState = S2255_READ_IDLE;
channel->buffer.frame[j].cur_size = 0;
}
res = videobuf_streamon(&fh->vb_vidq);
if (res == 0) {
s2255_start_acquire(channel);
channel->b_acquire = 1;
} else
res_free(fh);
return res;
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct s2255_fh *fh = priv;
dprintk(4, "%s\n, channel: %d", __func__, fh->channel->idx);
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
printk(KERN_ERR "invalid fh type0\n");
return -EINVAL;
}
if (i != fh->type) {
printk(KERN_ERR "invalid type i\n");
return -EINVAL;
}
s2255_stop_acquire(fh->channel);
videobuf_streamoff(&fh->vb_vidq);
res_free(fh);
return 0;
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
{
struct s2255_fh *fh = priv;
struct s2255_mode mode;
struct videobuf_queue *q = &fh->vb_vidq;
int ret = 0;
mutex_lock(&q->vb_lock);
if (videobuf_queue_is_busy(q)) {
dprintk(1, "queue busy\n");
ret = -EBUSY;
goto out_s_std;
}
if (res_locked(fh)) {
dprintk(1, "can't change standard after started\n");
ret = -EBUSY;
goto out_s_std;
}
mode = fh->channel->mode;
if (*i & V4L2_STD_NTSC) {
dprintk(4, "%s NTSC\n", __func__);
/* if changing format, reset frame decimation/intervals */
if (mode.format != FORMAT_NTSC) {
mode.restart = 1;
mode.format = FORMAT_NTSC;
mode.fdec = FDEC_1;
}
} else if (*i & V4L2_STD_PAL) {
dprintk(4, "%s PAL\n", __func__);
if (mode.format != FORMAT_PAL) {
mode.restart = 1;
mode.format = FORMAT_PAL;
mode.fdec = FDEC_1;
}
} else {
ret = -EINVAL;
}
if (mode.restart)
s2255_set_mode(fh->channel, &mode);
out_s_std:
mutex_unlock(&q->vb_lock);
return ret;
}
/* Sensoray 2255 is a multiple channel capture device.
It does not have a "crossbar" of inputs.
We use one V4L device per channel. The user must
be aware that certain combinations are not allowed.
For instance, you cannot do full FPS on more than 2 channels(2 videodevs)
at once in color(you can do full fps on 4 channels with greyscale.
*/
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
struct s2255_channel *channel = fh->channel;
u32 status = 0;
if (inp->index != 0)
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = S2255_NORMS;
inp->status = 0;
if (dev->dsp_fw_ver >= S2255_MIN_DSP_STATUS) {
int rc;
rc = s2255_cmd_status(fh->channel, &status);
dprintk(4, "s2255_cmd_status rc: %d status %x\n", rc, status);
if (rc == 0)
inp->status = (status & 0x01) ? 0
: V4L2_IN_ST_NO_SIGNAL;
}
switch (dev->pid) {
case 0x2255:
default:
strlcpy(inp->name, "Composite", sizeof(inp->name));
break;
case 0x2257:
strlcpy(inp->name, (channel->idx < 2) ? "Composite" : "S-Video",
sizeof(inp->name));
break;
}
return 0;
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
if (i > 0)
return -EINVAL;
return 0;
}
/* --- controls ---------------------------------------------- */
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
struct s2255_dev *dev = fh->dev;
switch (qc->id) {
case V4L2_CID_BRIGHTNESS:
v4l2_ctrl_query_fill(qc, -127, 127, 1, DEF_BRIGHT);
break;
case V4L2_CID_CONTRAST:
v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_CONTRAST);
break;
case V4L2_CID_SATURATION:
v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_SATURATION);
break;
case V4L2_CID_HUE:
v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_HUE);
break;
case V4L2_CID_PRIVATE_COLORFILTER:
if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
return -EINVAL;
if ((dev->pid == 0x2257) && (channel->idx > 1))
return -EINVAL;
strlcpy(qc->name, "Color Filter", sizeof(qc->name));
qc->type = V4L2_CTRL_TYPE_MENU;
qc->minimum = 0;
qc->maximum = 1;
qc->step = 1;
qc->default_value = 1;
qc->flags = 0;
break;
default:
return -EINVAL;
}
dprintk(4, "%s, id %d\n", __func__, qc->id);
return 0;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
struct s2255_channel *channel = fh->channel;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
ctrl->value = channel->mode.bright;
break;
case V4L2_CID_CONTRAST:
ctrl->value = channel->mode.contrast;
break;
case V4L2_CID_SATURATION:
ctrl->value = channel->mode.saturation;
break;
case V4L2_CID_HUE:
ctrl->value = channel->mode.hue;
break;
case V4L2_CID_PRIVATE_COLORFILTER:
if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
return -EINVAL;
if ((dev->pid == 0x2257) && (channel->idx > 1))
return -EINVAL;
ctrl->value = !((channel->mode.color & MASK_INPUT_TYPE) >> 16);
break;
default:
return -EINVAL;
}
dprintk(4, "%s, id %d val %d\n", __func__, ctrl->id, ctrl->value);
return 0;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
struct s2255_mode mode;
mode = channel->mode;
dprintk(4, "%s\n", __func__);
/* update the mode to the corresponding value */
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
mode.bright = ctrl->value;
break;
case V4L2_CID_CONTRAST:
mode.contrast = ctrl->value;
break;
case V4L2_CID_HUE:
mode.hue = ctrl->value;
break;
case V4L2_CID_SATURATION:
mode.saturation = ctrl->value;
break;
case V4L2_CID_PRIVATE_COLORFILTER:
if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
return -EINVAL;
if ((dev->pid == 0x2257) && (channel->idx > 1))
return -EINVAL;
mode.color &= ~MASK_INPUT_TYPE;
mode.color |= ((ctrl->value ? 0 : 1) << 16);
break;
default:
return -EINVAL;
}
mode.restart = 0;
/* set mode here. Note: stream does not need restarted.
some V4L programs restart stream unnecessarily
after a s_crtl.
*/
s2255_set_mode(fh->channel, &mode);
return 0;
}
static int vidioc_g_jpegcomp(struct file *file, void *priv,
struct v4l2_jpegcompression *jc)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
*jc = channel->jc;
dprintk(2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
static int vidioc_s_jpegcomp(struct file *file, void *priv,
struct v4l2_jpegcompression *jc)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
if (jc->quality < 0 || jc->quality > 100)
return -EINVAL;
channel->jc.quality = jc->quality;
dprintk(2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct s2255_fh *fh = priv;
__u32 def_num, def_dem;
struct s2255_channel *channel = fh->channel;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
memset(sp, 0, sizeof(struct v4l2_streamparm));
sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
sp->parm.capture.capturemode = channel->cap_parm.capturemode;
def_num = (channel->mode.format == FORMAT_NTSC) ? 1001 : 1000;
def_dem = (channel->mode.format == FORMAT_NTSC) ? 30000 : 25000;
sp->parm.capture.timeperframe.denominator = def_dem;
switch (channel->mode.fdec) {
default:
case FDEC_1:
sp->parm.capture.timeperframe.numerator = def_num;
break;
case FDEC_2:
sp->parm.capture.timeperframe.numerator = def_num * 2;
break;
case FDEC_3:
sp->parm.capture.timeperframe.numerator = def_num * 3;
break;
case FDEC_5:
sp->parm.capture.timeperframe.numerator = def_num * 5;
break;
}
dprintk(4, "%s capture mode, %d timeperframe %d/%d\n", __func__,
sp->parm.capture.capturemode,
sp->parm.capture.timeperframe.numerator,
sp->parm.capture.timeperframe.denominator);
return 0;
}
static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
struct s2255_mode mode;
int fdec = FDEC_1;
__u32 def_num, def_dem;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
mode = channel->mode;
/* high quality capture mode requires a stream restart */
if (channel->cap_parm.capturemode
!= sp->parm.capture.capturemode && res_locked(fh))
return -EBUSY;
def_num = (mode.format == FORMAT_NTSC) ? 1001 : 1000;
def_dem = (mode.format == FORMAT_NTSC) ? 30000 : 25000;
if (def_dem != sp->parm.capture.timeperframe.denominator)
sp->parm.capture.timeperframe.numerator = def_num;
else if (sp->parm.capture.timeperframe.numerator <= def_num)
sp->parm.capture.timeperframe.numerator = def_num;
else if (sp->parm.capture.timeperframe.numerator <= (def_num * 2)) {
sp->parm.capture.timeperframe.numerator = def_num * 2;
fdec = FDEC_2;
} else if (sp->parm.capture.timeperframe.numerator <= (def_num * 3)) {
sp->parm.capture.timeperframe.numerator = def_num * 3;
fdec = FDEC_3;
} else {
sp->parm.capture.timeperframe.numerator = def_num * 5;
fdec = FDEC_5;
}
mode.fdec = fdec;
sp->parm.capture.timeperframe.denominator = def_dem;
s2255_set_mode(channel, &mode);
dprintk(4, "%s capture mode, %d timeperframe %d/%d, fdec %d\n",
__func__,
sp->parm.capture.capturemode,
sp->parm.capture.timeperframe.numerator,
sp->parm.capture.timeperframe.denominator, fdec);
return 0;
}
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fe)
{
int is_ntsc = 0;
#define NUM_FRAME_ENUMS 4
int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5};
if (fe->index < 0 || fe->index >= NUM_FRAME_ENUMS)
return -EINVAL;
switch (fe->width) {
case 640:
if (fe->height != 240 && fe->height != 480)
return -EINVAL;
is_ntsc = 1;
break;
case 320:
if (fe->height != 240)
return -EINVAL;
is_ntsc = 1;
break;
case 704:
if (fe->height != 288 && fe->height != 576)
return -EINVAL;
break;
case 352:
if (fe->height != 288)
return -EINVAL;
break;
default:
return -EINVAL;
}
fe->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fe->discrete.denominator = is_ntsc ? 30000 : 25000;
fe->discrete.numerator = (is_ntsc ? 1001 : 1000) * frm_dec[fe->index];
dprintk(4, "%s discrete %d/%d\n", __func__, fe->discrete.numerator,
fe->discrete.denominator);
return 0;
}
static int s2255_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct s2255_channel *channel = video_drvdata(file);
struct s2255_dev *dev = to_s2255_dev(vdev->v4l2_dev);
struct s2255_fh *fh;
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
int state;
dprintk(1, "s2255: open called (dev=%s)\n",
video_device_node_name(vdev));
/*
* open lock necessary to prevent multiple instances
* of v4l-conf (or other programs) from simultaneously
* reloading firmware.
*/
mutex_lock(&dev->open_lock);
state = atomic_read(&dev->fw_data->fw_state);
switch (state) {
case S2255_FW_DISCONNECTING:
mutex_unlock(&dev->open_lock);
return -ENODEV;
case S2255_FW_FAILED:
s2255_dev_err(&dev->udev->dev,
"firmware load failed. retrying.\n");
s2255_fwload_start(dev, 1);
wait_event_timeout(dev->fw_data->wait_fw,
((atomic_read(&dev->fw_data->fw_state)
== S2255_FW_SUCCESS) ||
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
msecs_to_jiffies(S2255_LOAD_TIMEOUT));
/* state may have changed, re-read */
state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_NOTLOADED:
case S2255_FW_LOADED_DSPWAIT:
/* give S2255_LOAD_TIMEOUT time for firmware to load in case
driver loaded and then device immediately opened */
printk(KERN_INFO "%s waiting for firmware load\n", __func__);
wait_event_timeout(dev->fw_data->wait_fw,
((atomic_read(&dev->fw_data->fw_state)
== S2255_FW_SUCCESS) ||
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
msecs_to_jiffies(S2255_LOAD_TIMEOUT));
/* state may have changed, re-read */
state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_SUCCESS:
default:
break;
}
/* state may have changed in above switch statement */
switch (state) {
case S2255_FW_SUCCESS:
break;
case S2255_FW_FAILED:
printk(KERN_INFO "2255 firmware load failed.\n");
mutex_unlock(&dev->open_lock);
return -ENODEV;
case S2255_FW_DISCONNECTING:
printk(KERN_INFO "%s: disconnecting\n", __func__);
mutex_unlock(&dev->open_lock);
return -ENODEV;
case S2255_FW_LOADED_DSPWAIT:
case S2255_FW_NOTLOADED:
printk(KERN_INFO "%s: firmware not loaded yet"
"please try again later\n",
__func__);
/*
* Timeout on firmware load means device unusable.
* Set firmware failure state.
* On next s2255_open the firmware will be reloaded.
*/
atomic_set(&dev->fw_data->fw_state,
S2255_FW_FAILED);
mutex_unlock(&dev->open_lock);
return -EAGAIN;
default:
printk(KERN_INFO "%s: unknown state\n", __func__);
mutex_unlock(&dev->open_lock);
return -EFAULT;
}
mutex_unlock(&dev->open_lock);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (NULL == fh)
return -ENOMEM;
file->private_data = fh;
fh->dev = dev;
fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fh->channel = channel;
if (!channel->configured) {
/* configure channel to default state */
channel->fmt = &formats[0];
s2255_set_mode(channel, &channel->mode);
channel->configured = 1;
}
dprintk(1, "%s: dev=%s type=%s\n", __func__,
video_device_node_name(vdev), v4l2_type_names[type]);
dprintk(2, "%s: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n", __func__,
(unsigned long)fh, (unsigned long)dev,
(unsigned long)&channel->vidq);
dprintk(4, "%s: list_empty active=%d\n", __func__,
list_empty(&channel->vidq.active));
videobuf_queue_vmalloc_init(&fh->vb_vidq, &s2255_video_qops,
NULL, &dev->slock,
fh->type,
V4L2_FIELD_INTERLACED,
sizeof(struct s2255_buffer),
fh, vdev->lock);
return 0;
}
static unsigned int s2255_poll(struct file *file,
struct poll_table_struct *wait)
{
struct s2255_fh *fh = file->private_data;
int rc;
dprintk(100, "%s\n", __func__);
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
rc = videobuf_poll_stream(file, &fh->vb_vidq, wait);
return rc;
}
static void s2255_destroy(struct s2255_dev *dev)
{
/* board shutdown stops the read pipe if it is running */
s2255_board_shutdown(dev);
/* make sure firmware still not trying to load */
del_timer(&dev->timer); /* only started in .probe and .open */
if (dev->fw_data->fw_urb) {
usb_kill_urb(dev->fw_data->fw_urb);
usb_free_urb(dev->fw_data->fw_urb);
dev->fw_data->fw_urb = NULL;
}
release_firmware(dev->fw_data->fw);
kfree(dev->fw_data->pfw_data);
kfree(dev->fw_data);
/* reset the DSP so firmware can be reloaded next time */
s2255_reset_dsppower(dev);
mutex_destroy(&dev->open_lock);
mutex_destroy(&dev->lock);
usb_put_dev(dev->udev);
v4l2_device_unregister(&dev->v4l2_dev);
dprintk(1, "%s", __func__);
kfree(dev);
}
static int s2255_release(struct file *file)
{
struct s2255_fh *fh = file->private_data;
struct s2255_dev *dev = fh->dev;
struct video_device *vdev = video_devdata(file);
struct s2255_channel *channel = fh->channel;
if (!dev)
return -ENODEV;
/* turn off stream */
if (res_check(fh)) {
if (channel->b_acquire)
s2255_stop_acquire(fh->channel);
videobuf_streamoff(&fh->vb_vidq);
res_free(fh);
}
videobuf_mmap_free(&fh->vb_vidq);
dprintk(1, "%s (dev=%s)\n", __func__, video_device_node_name(vdev));
kfree(fh);
return 0;
}
static int s2255_mmap_v4l(struct file *file, struct vm_area_struct *vma)
{
struct s2255_fh *fh = file->private_data;
int ret;
if (!fh)
return -ENODEV;
dprintk(4, "%s, vma=0x%08lx\n", __func__, (unsigned long)vma);
ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
dprintk(4, "%s vma start=0x%08lx, size=%ld, ret=%d\n", __func__,
(unsigned long)vma->vm_start,
(unsigned long)vma->vm_end - (unsigned long)vma->vm_start, ret);
return ret;
}
static const struct v4l2_file_operations s2255_fops_v4l = {
.owner = THIS_MODULE,
.open = s2255_open,
.release = s2255_release,
.poll = s2255_poll,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = s2255_mmap_v4l,
};
static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
.vidioc_querymenu = vidioc_querymenu,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_s_jpegcomp = vidioc_s_jpegcomp,
.vidioc_g_jpegcomp = vidioc_g_jpegcomp,
.vidioc_s_parm = vidioc_s_parm,
.vidioc_g_parm = vidioc_g_parm,
.vidioc_enum_frameintervals = vidioc_enum_frameintervals,
};
static void s2255_video_device_release(struct video_device *vdev)
{
struct s2255_dev *dev = to_s2255_dev(vdev->v4l2_dev);
dprintk(4, "%s, chnls: %d \n", __func__,
atomic_read(&dev->num_channels));
if (atomic_dec_and_test(&dev->num_channels))
s2255_destroy(dev);
return;
}
static struct video_device template = {
.name = "s2255v",
.fops = &s2255_fops_v4l,
.ioctl_ops = &s2255_ioctl_ops,
.release = s2255_video_device_release,
.tvnorms = S2255_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
static int s2255_probe_v4l(struct s2255_dev *dev)
{
int ret;
int i;
int cur_nr = video_nr;
struct s2255_channel *channel;
ret = v4l2_device_register(&dev->interface->dev, &dev->v4l2_dev);
if (ret)
return ret;
/* initialize all video 4 linux */
/* register 4 video devices */
for (i = 0; i < MAX_CHANNELS; i++) {
channel = &dev->channel[i];
INIT_LIST_HEAD(&channel->vidq.active);
channel->vidq.dev = dev;
/* register 4 video devices */
channel->vdev = template;
channel->vdev.lock = &dev->lock;
/* Locking in file operations other than ioctl should be done
by the driver, not the V4L2 core.
This driver needs auditing so that this flag can be removed. */
set_bit(V4L2_FL_LOCK_ALL_FOPS, &channel->vdev.flags);
channel->vdev.v4l2_dev = &dev->v4l2_dev;
video_set_drvdata(&channel->vdev, channel);
if (video_nr == -1)
ret = video_register_device(&channel->vdev,
VFL_TYPE_GRABBER,
video_nr);
else
ret = video_register_device(&channel->vdev,
VFL_TYPE_GRABBER,
cur_nr + i);
if (ret) {
dev_err(&dev->udev->dev,
"failed to register video device!\n");
break;
}
atomic_inc(&dev->num_channels);
v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
video_device_node_name(&channel->vdev));
}
printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %s\n",
S2255_VERSION);
/* if no channels registered, return error and probe will fail*/
if (atomic_read(&dev->num_channels) == 0) {
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
}
if (atomic_read(&dev->num_channels) != MAX_CHANNELS)
printk(KERN_WARNING "s2255: Not all channels available.\n");
return 0;
}
/* this function moves the usb stream read pipe data
* into the system buffers.
* returns 0 on success, EAGAIN if more data to process( call this
* function again).
*
* Received frame structure:
* bytes 0-3: marker : 0x2255DA4AL (S2255_MARKER_FRAME)
* bytes 4-7: channel: 0-3
* bytes 8-11: payload size: size of the frame
* bytes 12-payloadsize+12: frame data
*/
static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
{
char *pdest;
u32 offset = 0;
int bframe = 0;
char *psrc;
unsigned long copy_size;
unsigned long size;
s32 idx = -1;
struct s2255_framei *frm;
unsigned char *pdata;
struct s2255_channel *channel;
dprintk(100, "buffer to user\n");
channel = &dev->channel[dev->cc];
idx = channel->cur_frame;
frm = &channel->buffer.frame[idx];
if (frm->ulState == S2255_READ_IDLE) {
int jj;
unsigned int cc;
__le32 *pdword; /*data from dsp is little endian */
int payload;
/* search for marker codes */
pdata = (unsigned char *)pipe_info->transfer_buffer;
pdword = (__le32 *)pdata;
for (jj = 0; jj < (pipe_info->cur_transfer_size - 12); jj++) {
switch (*pdword) {
case S2255_MARKER_FRAME:
dprintk(4, "found frame marker at offset:"
" %d [%x %x]\n", jj, pdata[0],
pdata[1]);
offset = jj + PREFIX_SIZE;
bframe = 1;
cc = le32_to_cpu(pdword[1]);
if (cc >= MAX_CHANNELS) {
printk(KERN_ERR
"bad channel\n");
return -EINVAL;
}
/* reverse it */
dev->cc = G_chnmap[cc];
channel = &dev->channel[dev->cc];
payload = le32_to_cpu(pdword[3]);
if (payload > channel->req_image_size) {
channel->bad_payload++;
/* discard the bad frame */
return -EINVAL;
}
channel->pkt_size = payload;
channel->jpg_size = le32_to_cpu(pdword[4]);
break;
case S2255_MARKER_RESPONSE:
pdata += DEF_USB_BLOCK;
jj += DEF_USB_BLOCK;
if (le32_to_cpu(pdword[1]) >= MAX_CHANNELS)
break;
cc = G_chnmap[le32_to_cpu(pdword[1])];
if (cc >= MAX_CHANNELS)
break;
channel = &dev->channel[cc];
switch (pdword[2]) {
case S2255_RESPONSE_SETMODE:
/* check if channel valid */
/* set mode ready */
channel->setmode_ready = 1;
wake_up(&channel->wait_setmode);
dprintk(5, "setmode ready %d\n", cc);
break;
case S2255_RESPONSE_FW:
dev->chn_ready |= (1 << cc);
if ((dev->chn_ready & 0x0f) != 0x0f)
break;
/* all channels ready */
printk(KERN_INFO "s2255: fw loaded\n");
atomic_set(&dev->fw_data->fw_state,
S2255_FW_SUCCESS);
wake_up(&dev->fw_data->wait_fw);
break;
case S2255_RESPONSE_STATUS:
channel->vidstatus = le32_to_cpu(pdword[3]);
channel->vidstatus_ready = 1;
wake_up(&channel->wait_vidstatus);
dprintk(5, "got vidstatus %x chan %d\n",
le32_to_cpu(pdword[3]), cc);
break;
default:
printk(KERN_INFO "s2255 unknown resp\n");
}
default:
pdata++;
break;
}
if (bframe)
break;
} /* for */
if (!bframe)
return -EINVAL;
}
channel = &dev->channel[dev->cc];
idx = channel->cur_frame;
frm = &channel->buffer.frame[idx];
/* search done. now find out if should be acquiring on this channel */
if (!channel->b_acquire) {
/* we found a frame, but this channel is turned off */
frm->ulState = S2255_READ_IDLE;
return -EINVAL;
}
if (frm->ulState == S2255_READ_IDLE) {
frm->ulState = S2255_READ_FRAME;
frm->cur_size = 0;
}
/* skip the marker 512 bytes (and offset if out of sync) */
psrc = (u8 *)pipe_info->transfer_buffer + offset;
if (frm->lpvbits == NULL) {
dprintk(1, "s2255 frame buffer == NULL.%p %p %d %d",
frm, dev, dev->cc, idx);
return -ENOMEM;
}
pdest = frm->lpvbits + frm->cur_size;
copy_size = (pipe_info->cur_transfer_size - offset);
size = channel->pkt_size - PREFIX_SIZE;
/* sanity check on pdest */
if ((copy_size + frm->cur_size) < channel->req_image_size)
memcpy(pdest, psrc, copy_size);
frm->cur_size += copy_size;
dprintk(4, "cur_size size %lu size %lu \n", frm->cur_size, size);
if (frm->cur_size >= size) {
dprintk(2, "****************[%d]Buffer[%d]full*************\n",
dev->cc, idx);
channel->last_frame = channel->cur_frame;
channel->cur_frame++;
/* end of system frame ring buffer, start at zero */
if ((channel->cur_frame == SYS_FRAMES) ||
(channel->cur_frame == channel->buffer.dwFrames))
channel->cur_frame = 0;
/* frame ready */
if (channel->b_acquire)
s2255_got_frame(channel, channel->jpg_size);
channel->frame_count++;
frm->ulState = S2255_READ_IDLE;
frm->cur_size = 0;
}
/* done successfully */
return 0;
}
static void s2255_read_video_callback(struct s2255_dev *dev,
struct s2255_pipeinfo *pipe_info)
{
int res;
dprintk(50, "callback read video \n");
if (dev->cc >= MAX_CHANNELS) {
dev->cc = 0;
dev_err(&dev->udev->dev, "invalid channel\n");
return;
}
/* otherwise copy to the system buffers */
res = save_frame(dev, pipe_info);
if (res != 0)
dprintk(4, "s2255: read callback failed\n");
dprintk(50, "callback read video done\n");
return;
}
static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
u16 Index, u16 Value, void *TransferBuffer,
s32 TransferBufferLength, int bOut)
{
int r;
if (!bOut) {
r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
Request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
Value, Index, TransferBuffer,
TransferBufferLength, HZ * 5);
} else {
r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
Value, Index, TransferBuffer,
TransferBufferLength, HZ * 5);
}
return r;
}
/*
* retrieve FX2 firmware version. future use.
* @param dev pointer to device extension
* @return -1 for fail, else returns firmware version as an int(16 bits)
*/
static int s2255_get_fx2fw(struct s2255_dev *dev)
{
int fw;
int ret;
unsigned char transBuffer[64];
ret = s2255_vendor_req(dev, S2255_VR_FW, 0, 0, transBuffer, 2,
S2255_VR_IN);
if (ret < 0)
dprintk(2, "get fw error: %x\n", ret);
fw = transBuffer[0] + (transBuffer[1] << 8);
dprintk(2, "Get FW %x %x\n", transBuffer[0], transBuffer[1]);
return fw;
}
/*
* Create the system ring buffer to copy frames into from the
* usb read pipe.
*/
static int s2255_create_sys_buffers(struct s2255_channel *channel)
{
unsigned long i;
unsigned long reqsize;
dprintk(1, "create sys buffers\n");
channel->buffer.dwFrames = SYS_FRAMES;
/* always allocate maximum size(PAL) for system buffers */
reqsize = SYS_FRAMES_MAXSIZE;
if (reqsize > SYS_FRAMES_MAXSIZE)
reqsize = SYS_FRAMES_MAXSIZE;
for (i = 0; i < SYS_FRAMES; i++) {
/* allocate the frames */
channel->buffer.frame[i].lpvbits = vmalloc(reqsize);
dprintk(1, "valloc %p chan %d, idx %lu, pdata %p\n",
&channel->buffer.frame[i], channel->idx, i,
channel->buffer.frame[i].lpvbits);
channel->buffer.frame[i].size = reqsize;
if (channel->buffer.frame[i].lpvbits == NULL) {
printk(KERN_INFO "out of memory. using less frames\n");
channel->buffer.dwFrames = i;
break;
}
}
/* make sure internal states are set */
for (i = 0; i < SYS_FRAMES; i++) {
channel->buffer.frame[i].ulState = 0;
channel->buffer.frame[i].cur_size = 0;
}
channel->cur_frame = 0;
channel->last_frame = -1;
return 0;
}
static int s2255_release_sys_buffers(struct s2255_channel *channel)
{
unsigned long i;
dprintk(1, "release sys buffers\n");
for (i = 0; i < SYS_FRAMES; i++) {
if (channel->buffer.frame[i].lpvbits) {
dprintk(1, "vfree %p\n",
channel->buffer.frame[i].lpvbits);
vfree(channel->buffer.frame[i].lpvbits);
}
channel->buffer.frame[i].lpvbits = NULL;
}
return 0;
}
static int s2255_board_init(struct s2255_dev *dev)
{
struct s2255_mode mode_def = DEF_MODEI_NTSC_CONT;
int fw_ver;
int j;
struct s2255_pipeinfo *pipe = &dev->pipe;
dprintk(4, "board init: %p", dev);
memset(pipe, 0, sizeof(*pipe));
pipe->dev = dev;
pipe->cur_transfer_size = S2255_USB_XFER_SIZE;
pipe->max_transfer_size = S2255_USB_XFER_SIZE;
pipe->transfer_buffer = kzalloc(pipe->max_transfer_size,
GFP_KERNEL);
if (pipe->transfer_buffer == NULL) {
dprintk(1, "out of memory!\n");
return -ENOMEM;
}
/* query the firmware */
fw_ver = s2255_get_fx2fw(dev);
printk(KERN_INFO "s2255: usb firmware version %d.%d\n",
(fw_ver >> 8) & 0xff,
fw_ver & 0xff);
if (fw_ver < S2255_CUR_USB_FWVER)
printk(KERN_INFO "s2255: newer USB firmware available\n");
for (j = 0; j < MAX_CHANNELS; j++) {
struct s2255_channel *channel = &dev->channel[j];
channel->b_acquire = 0;
channel->mode = mode_def;
if (dev->pid == 0x2257 && j > 1)
channel->mode.color |= (1 << 16);
channel->jc.quality = S2255_DEF_JPEG_QUAL;
channel->width = LINE_SZ_4CIFS_NTSC;
channel->height = NUM_LINES_4CIFS_NTSC * 2;
channel->fmt = &formats[0];
channel->mode.restart = 1;
channel->req_image_size = get_transfer_size(&mode_def);
channel->frame_count = 0;
/* create the system buffers */
s2255_create_sys_buffers(channel);
}
/* start read pipe */
s2255_start_readpipe(dev);
dprintk(1, "%s: success\n", __func__);
return 0;
}
static int s2255_board_shutdown(struct s2255_dev *dev)
{
u32 i;
dprintk(1, "%s: dev: %p", __func__, dev);
for (i = 0; i < MAX_CHANNELS; i++) {
if (dev->channel[i].b_acquire)
s2255_stop_acquire(&dev->channel[i]);
}
s2255_stop_readpipe(dev);
for (i = 0; i < MAX_CHANNELS; i++)
s2255_release_sys_buffers(&dev->channel[i]);
/* release transfer buffer */
kfree(dev->pipe.transfer_buffer);
return 0;
}
static void read_pipe_completion(struct urb *purb)
{
struct s2255_pipeinfo *pipe_info;
struct s2255_dev *dev;
int status;
int pipe;
pipe_info = purb->context;
dprintk(100, "%s: urb:%p, status %d\n", __func__, purb,
purb->status);
if (pipe_info == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
return;
}
dev = pipe_info->dev;
if (dev == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
return;
}
status = purb->status;
/* if shutting down, do not resubmit, exit immediately */
if (status == -ESHUTDOWN) {
dprintk(2, "%s: err shutdown\n", __func__);
pipe_info->err_count++;
return;
}
if (pipe_info->state == 0) {
dprintk(2, "%s: exiting USB pipe", __func__);
return;
}
if (status == 0)
s2255_read_video_callback(dev, pipe_info);
else {
pipe_info->err_count++;
dprintk(1, "%s: failed URB %d\n", __func__, status);
}
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
/* reuse urb */
usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
pipe,
pipe_info->transfer_buffer,
pipe_info->cur_transfer_size,
read_pipe_completion, pipe_info);
if (pipe_info->state != 0) {
if (usb_submit_urb(pipe_info->stream_urb, GFP_ATOMIC)) {
dev_err(&dev->udev->dev, "error submitting urb\n");
}
} else {
dprintk(2, "%s :complete state 0\n", __func__);
}
return;
}
static int s2255_start_readpipe(struct s2255_dev *dev)
{
int pipe;
int retval;
struct s2255_pipeinfo *pipe_info = &dev->pipe;
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
dprintk(2, "%s: IN %d\n", __func__, dev->read_endpoint);
pipe_info->state = 1;
pipe_info->err_count = 0;
pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pipe_info->stream_urb) {
dev_err(&dev->udev->dev,
"ReadStream: Unable to alloc URB\n");
return -ENOMEM;
}
/* transfer buffer allocated in board_init */
usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
pipe,
pipe_info->transfer_buffer,
pipe_info->cur_transfer_size,
read_pipe_completion, pipe_info);
retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
if (retval) {
printk(KERN_ERR "s2255: start read pipe failed\n");
return retval;
}
return 0;
}
/* starts acquisition process */
static int s2255_start_acquire(struct s2255_channel *channel)
{
unsigned char *buffer;
int res;
unsigned long chn_rev;
int j;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
chn_rev = G_chnmap[channel->idx];
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
channel->last_frame = -1;
channel->bad_payload = 0;
channel->cur_frame = 0;
for (j = 0; j < SYS_FRAMES; j++) {
channel->buffer.frame[j].ulState = 0;
channel->buffer.frame[j].cur_size = 0;
}
/* send the start command */
*(__le32 *) buffer = IN_DATA_TOKEN;
*((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
*((__le32 *) buffer + 2) = CMD_START;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_START error\n");
dprintk(2, "start acquire exit[%d] %d \n", channel->idx, res);
kfree(buffer);
return 0;
}
static int s2255_stop_acquire(struct s2255_channel *channel)
{
unsigned char *buffer;
int res;
unsigned long chn_rev;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
chn_rev = G_chnmap[channel->idx];
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
/* send the stop command */
*(__le32 *) buffer = IN_DATA_TOKEN;
*((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
*((__le32 *) buffer + 2) = CMD_STOP;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_STOP error\n");
kfree(buffer);
channel->b_acquire = 0;
dprintk(4, "%s: chn %d, res %d\n", __func__, channel->idx, res);
return res;
}
static void s2255_stop_readpipe(struct s2255_dev *dev)
{
struct s2255_pipeinfo *pipe = &dev->pipe;
pipe->state = 0;
if (pipe->stream_urb) {
/* cancel urb */
usb_kill_urb(pipe->stream_urb);
usb_free_urb(pipe->stream_urb);
pipe->stream_urb = NULL;
}
dprintk(4, "%s", __func__);
return;
}
static void s2255_fwload_start(struct s2255_dev *dev, int reset)
{
if (reset)
s2255_reset_dsppower(dev);
dev->fw_data->fw_size = dev->fw_data->fw->size;
atomic_set(&dev->fw_data->fw_state, S2255_FW_NOTLOADED);
memcpy(dev->fw_data->pfw_data,
dev->fw_data->fw->data, CHUNK_SIZE);
dev->fw_data->fw_loaded = CHUNK_SIZE;
usb_fill_bulk_urb(dev->fw_data->fw_urb, dev->udev,
usb_sndbulkpipe(dev->udev, 2),
dev->fw_data->pfw_data,
CHUNK_SIZE, s2255_fwchunk_complete,
dev->fw_data);
mod_timer(&dev->timer, jiffies + HZ);
}
/* standard usb probe function */
static int s2255_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct s2255_dev *dev = NULL;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
int i;
int retval = -ENOMEM;
__le32 *pdata;
int fw_size;
dprintk(2, "%s\n", __func__);
/* allocate memory for our device state and initialize it to zero */
dev = kzalloc(sizeof(struct s2255_dev), GFP_KERNEL);
if (dev == NULL) {
s2255_dev_err(&interface->dev, "out of memory\n");
return -ENOMEM;
}
atomic_set(&dev->num_channels, 0);
dev->pid = id->idProduct;
dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
if (!dev->fw_data)
goto errorFWDATA1;
mutex_init(&dev->lock);
mutex_init(&dev->open_lock);
/* grab usb_device and save it */
dev->udev = usb_get_dev(interface_to_usbdev(interface));
if (dev->udev == NULL) {
dev_err(&interface->dev, "null usb device\n");
retval = -ENODEV;
goto errorUDEV;
}
dprintk(1, "dev: %p, udev %p interface %p\n", dev,
dev->udev, interface);
dev->interface = interface;
/* set up the endpoint information */
iface_desc = interface->cur_altsetting;
dprintk(1, "num endpoints %d\n", iface_desc->desc.bNumEndpoints);
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (!dev->read_endpoint && usb_endpoint_is_bulk_in(endpoint)) {
/* we found the bulk in endpoint */
dev->read_endpoint = endpoint->bEndpointAddress;
}
}
if (!dev->read_endpoint) {
dev_err(&interface->dev, "Could not find bulk-in endpoint\n");
goto errorEP;
}
init_timer(&dev->timer);
dev->timer.function = s2255_timer;
dev->timer.data = (unsigned long)dev->fw_data;
init_waitqueue_head(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
struct s2255_channel *channel = &dev->channel[i];
dev->channel[i].idx = i;
init_waitqueue_head(&channel->wait_setmode);
init_waitqueue_head(&channel->wait_vidstatus);
}
dev->fw_data->fw_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->fw_data->fw_urb) {
dev_err(&interface->dev, "out of memory!\n");
goto errorFWURB;
}
dev->fw_data->pfw_data = kzalloc(CHUNK_SIZE, GFP_KERNEL);
if (!dev->fw_data->pfw_data) {
dev_err(&interface->dev, "out of memory!\n");
goto errorFWDATA2;
}
/* load the first chunk */
if (request_firmware(&dev->fw_data->fw,
FIRMWARE_FILE_NAME, &dev->udev->dev)) {
printk(KERN_ERR "sensoray 2255 failed to get firmware\n");
goto errorREQFW;
}
/* check the firmware is valid */
fw_size = dev->fw_data->fw->size;
pdata = (__le32 *) &dev->fw_data->fw->data[fw_size - 8];
if (*pdata != S2255_FW_MARKER) {
printk(KERN_INFO "Firmware invalid.\n");
retval = -ENODEV;
goto errorFWMARKER;
} else {
/* make sure firmware is the latest */
__le32 *pRel;
pRel = (__le32 *) &dev->fw_data->fw->data[fw_size - 4];
printk(KERN_INFO "s2255 dsp fw version %x\n", *pRel);
dev->dsp_fw_ver = le32_to_cpu(*pRel);
if (dev->dsp_fw_ver < S2255_CUR_DSP_FWVER)
printk(KERN_INFO "s2255: f2255usb.bin out of date.\n");
if (dev->pid == 0x2257 &&
dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
printk(KERN_WARNING "s2255: 2257 requires firmware %d"
" or above.\n", S2255_MIN_DSP_COLORFILTER);
}
usb_reset_device(dev->udev);
/* load 2255 board specific */
retval = s2255_board_init(dev);
if (retval)
goto errorBOARDINIT;
spin_lock_init(&dev->slock);
s2255_fwload_start(dev, 0);
/* loads v4l specific */
retval = s2255_probe_v4l(dev);
if (retval)
goto errorBOARDINIT;
dev_info(&interface->dev, "Sensoray 2255 detected\n");
return 0;
errorBOARDINIT:
s2255_board_shutdown(dev);
errorFWMARKER:
release_firmware(dev->fw_data->fw);
errorREQFW:
kfree(dev->fw_data->pfw_data);
errorFWDATA2:
usb_free_urb(dev->fw_data->fw_urb);
errorFWURB:
del_timer(&dev->timer);
errorEP:
usb_put_dev(dev->udev);
errorUDEV:
kfree(dev->fw_data);
mutex_destroy(&dev->open_lock);
mutex_destroy(&dev->lock);
errorFWDATA1:
kfree(dev);
printk(KERN_WARNING "Sensoray 2255 driver load failed: 0x%x\n", retval);
return retval;
}
/* disconnect routine. when board is removed physically or with rmmod */
static void s2255_disconnect(struct usb_interface *interface)
{
struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
int i;
int channels = atomic_read(&dev->num_channels);
mutex_lock(&dev->lock);
v4l2_device_disconnect(&dev->v4l2_dev);
mutex_unlock(&dev->lock);
/*see comments in the uvc_driver.c usb disconnect function */
atomic_inc(&dev->num_channels);
/* unregister each video device. */
for (i = 0; i < channels; i++)
video_unregister_device(&dev->channel[i].vdev);
/* wake up any of our timers */
atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING);
wake_up(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
dev->channel[i].setmode_ready = 1;
wake_up(&dev->channel[i].wait_setmode);
dev->channel[i].vidstatus_ready = 1;
wake_up(&dev->channel[i].wait_vidstatus);
}
if (atomic_dec_and_test(&dev->num_channels))
s2255_destroy(dev);
dev_info(&interface->dev, "%s\n", __func__);
}
static struct usb_driver s2255_driver = {
.name = S2255_DRIVER_NAME,
.probe = s2255_probe,
.disconnect = s2255_disconnect,
.id_table = s2255_table,
};
module_usb_driver(s2255_driver);
MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver");
MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)");
MODULE_LICENSE("GPL");
MODULE_VERSION(S2255_VERSION);
| gpl-2.0 |
o11s/open80211s | net/openvswitch/flow.c | 47 | 40122 | /*
* Copyright (c) 2007-2011 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*/
#include "flow.h"
#include "datapath.h"
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/llc_pdu.h>
#include <linux/kernel.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/llc.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/rcupdate.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/rculist.h>
#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
static struct kmem_cache *flow_cache;
static int check_header(struct sk_buff *skb, int len)
{
if (unlikely(skb->len < len))
return -EINVAL;
if (unlikely(!pskb_may_pull(skb, len)))
return -ENOMEM;
return 0;
}
static bool arphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_network_offset(skb) +
sizeof(struct arp_eth_header));
}
static int check_iphdr(struct sk_buff *skb)
{
unsigned int nh_ofs = skb_network_offset(skb);
unsigned int ip_len;
int err;
err = check_header(skb, nh_ofs + sizeof(struct iphdr));
if (unlikely(err))
return err;
ip_len = ip_hdrlen(skb);
if (unlikely(ip_len < sizeof(struct iphdr) ||
skb->len < nh_ofs + ip_len))
return -EINVAL;
skb_set_transport_header(skb, nh_ofs + ip_len);
return 0;
}
static bool tcphdr_ok(struct sk_buff *skb)
{
int th_ofs = skb_transport_offset(skb);
int tcp_len;
if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
return false;
tcp_len = tcp_hdrlen(skb);
if (unlikely(tcp_len < sizeof(struct tcphdr) ||
skb->len < th_ofs + tcp_len))
return false;
return true;
}
static bool udphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
sizeof(struct udphdr));
}
static bool icmphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
sizeof(struct icmphdr));
}
u64 ovs_flow_used_time(unsigned long flow_jiffies)
{
struct timespec cur_ts;
u64 cur_ms, idle_ms;
ktime_get_ts(&cur_ts);
idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
cur_ts.tv_nsec / NSEC_PER_MSEC;
return cur_ms - idle_ms;
}
#define SW_FLOW_KEY_OFFSET(field) \
(offsetof(struct sw_flow_key, field) + \
FIELD_SIZEOF(struct sw_flow_key, field))
static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
int *key_lenp)
{
unsigned int nh_ofs = skb_network_offset(skb);
unsigned int nh_len;
int payload_ofs;
struct ipv6hdr *nh;
uint8_t nexthdr;
__be16 frag_off;
int err;
*key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
err = check_header(skb, nh_ofs + sizeof(*nh));
if (unlikely(err))
return err;
nh = ipv6_hdr(skb);
nexthdr = nh->nexthdr;
payload_ofs = (u8 *)(nh + 1) - skb->data;
key->ip.proto = NEXTHDR_NONE;
key->ip.tos = ipv6_get_dsfield(nh);
key->ip.ttl = nh->hop_limit;
key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
key->ipv6.addr.src = nh->saddr;
key->ipv6.addr.dst = nh->daddr;
payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
if (unlikely(payload_ofs < 0))
return -EINVAL;
if (frag_off) {
if (frag_off & htons(~0x7))
key->ip.frag = OVS_FRAG_TYPE_LATER;
else
key->ip.frag = OVS_FRAG_TYPE_FIRST;
}
nh_len = payload_ofs - nh_ofs;
skb_set_transport_header(skb, nh_ofs + nh_len);
key->ip.proto = nexthdr;
return nh_len;
}
static bool icmp6hdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
sizeof(struct icmp6hdr));
}
#define TCP_FLAGS_OFFSET 13
#define TCP_FLAG_MASK 0x3f
void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
{
u8 tcp_flags = 0;
if ((flow->key.eth.type == htons(ETH_P_IP) ||
flow->key.eth.type == htons(ETH_P_IPV6)) &&
flow->key.ip.proto == IPPROTO_TCP &&
likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
u8 *tcp = (u8 *)tcp_hdr(skb);
tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
}
spin_lock(&flow->lock);
flow->used = jiffies;
flow->packet_count++;
flow->byte_count += skb->len;
flow->tcp_flags |= tcp_flags;
spin_unlock(&flow->lock);
}
struct sw_flow_actions *ovs_flow_actions_alloc(int size)
{
struct sw_flow_actions *sfa;
if (size > MAX_ACTIONS_BUFSIZE)
return ERR_PTR(-EINVAL);
sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
if (!sfa)
return ERR_PTR(-ENOMEM);
sfa->actions_len = 0;
return sfa;
}
struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
if (!flow)
return ERR_PTR(-ENOMEM);
spin_lock_init(&flow->lock);
flow->sf_acts = NULL;
return flow;
}
static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
{
hash = jhash_1word(hash, table->hash_seed);
return flex_array_get(table->buckets,
(hash & (table->n_buckets - 1)));
}
static struct flex_array *alloc_buckets(unsigned int n_buckets)
{
struct flex_array *buckets;
int i, err;
buckets = flex_array_alloc(sizeof(struct hlist_head),
n_buckets, GFP_KERNEL);
if (!buckets)
return NULL;
err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
if (err) {
flex_array_free(buckets);
return NULL;
}
for (i = 0; i < n_buckets; i++)
INIT_HLIST_HEAD((struct hlist_head *)
flex_array_get(buckets, i));
return buckets;
}
static void free_buckets(struct flex_array *buckets)
{
flex_array_free(buckets);
}
struct flow_table *ovs_flow_tbl_alloc(int new_size)
{
struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return NULL;
table->buckets = alloc_buckets(new_size);
if (!table->buckets) {
kfree(table);
return NULL;
}
table->n_buckets = new_size;
table->count = 0;
table->node_ver = 0;
table->keep_flows = false;
get_random_bytes(&table->hash_seed, sizeof(u32));
return table;
}
void ovs_flow_tbl_destroy(struct flow_table *table)
{
int i;
if (!table)
return;
if (table->keep_flows)
goto skip_flows;
for (i = 0; i < table->n_buckets; i++) {
struct sw_flow *flow;
struct hlist_head *head = flex_array_get(table->buckets, i);
struct hlist_node *n;
int ver = table->node_ver;
hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
hlist_del_rcu(&flow->hash_node[ver]);
ovs_flow_free(flow);
}
}
skip_flows:
free_buckets(table->buckets);
kfree(table);
}
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
struct flow_table *table = container_of(rcu, struct flow_table, rcu);
ovs_flow_tbl_destroy(table);
}
void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
{
if (!table)
return;
call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
}
struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
{
struct sw_flow *flow;
struct hlist_head *head;
int ver;
int i;
ver = table->node_ver;
while (*bucket < table->n_buckets) {
i = 0;
head = flex_array_get(table->buckets, *bucket);
hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
if (i < *last) {
i++;
continue;
}
*last = i + 1;
return flow;
}
(*bucket)++;
*last = 0;
}
return NULL;
}
static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
{
struct hlist_head *head;
head = find_bucket(table, flow->hash);
hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
table->count++;
}
static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
{
int old_ver;
int i;
old_ver = old->node_ver;
new->node_ver = !old_ver;
/* Insert in new table. */
for (i = 0; i < old->n_buckets; i++) {
struct sw_flow *flow;
struct hlist_head *head;
head = flex_array_get(old->buckets, i);
hlist_for_each_entry(flow, head, hash_node[old_ver])
__flow_tbl_insert(new, flow);
}
old->keep_flows = true;
}
static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
{
struct flow_table *new_table;
new_table = ovs_flow_tbl_alloc(n_buckets);
if (!new_table)
return ERR_PTR(-ENOMEM);
flow_table_copy_flows(table, new_table);
return new_table;
}
struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
{
return __flow_tbl_rehash(table, table->n_buckets);
}
struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
{
return __flow_tbl_rehash(table, table->n_buckets * 2);
}
void ovs_flow_free(struct sw_flow *flow)
{
if (unlikely(!flow))
return;
kfree((struct sf_flow_acts __force *)flow->sf_acts);
kmem_cache_free(flow_cache, flow);
}
/* RCU callback used by ovs_flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
ovs_flow_free(flow);
}
/* Schedules 'flow' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
void ovs_flow_deferred_free(struct sw_flow *flow)
{
call_rcu(&flow->rcu, rcu_free_flow_callback);
}
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
{
kfree_rcu(sf_acts, rcu);
}
static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
{
struct qtag_prefix {
__be16 eth_type; /* ETH_P_8021Q */
__be16 tci;
};
struct qtag_prefix *qp;
if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
return 0;
if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
sizeof(__be16))))
return -ENOMEM;
qp = (struct qtag_prefix *) skb->data;
key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
__skb_pull(skb, sizeof(struct qtag_prefix));
return 0;
}
static __be16 parse_ethertype(struct sk_buff *skb)
{
struct llc_snap_hdr {
u8 dsap; /* Always 0xAA */
u8 ssap; /* Always 0xAA */
u8 ctrl;
u8 oui[3];
__be16 ethertype;
};
struct llc_snap_hdr *llc;
__be16 proto;
proto = *(__be16 *) skb->data;
__skb_pull(skb, sizeof(__be16));
if (ntohs(proto) >= ETH_P_802_3_MIN)
return proto;
if (skb->len < sizeof(struct llc_snap_hdr))
return htons(ETH_P_802_2);
if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
return htons(0);
llc = (struct llc_snap_hdr *) skb->data;
if (llc->dsap != LLC_SAP_SNAP ||
llc->ssap != LLC_SAP_SNAP ||
(llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
return htons(ETH_P_802_2);
__skb_pull(skb, sizeof(struct llc_snap_hdr));
if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN)
return llc->ethertype;
return htons(ETH_P_802_2);
}
static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
int *key_lenp, int nh_len)
{
struct icmp6hdr *icmp = icmp6_hdr(skb);
int error = 0;
int key_len;
/* The ICMPv6 type and code fields use the 16-bit transport port
* fields, so we need to store them in 16-bit network byte order.
*/
key->ipv6.tp.src = htons(icmp->icmp6_type);
key->ipv6.tp.dst = htons(icmp->icmp6_code);
key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (icmp->icmp6_code == 0 &&
(icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
int icmp_len = skb->len - skb_transport_offset(skb);
struct nd_msg *nd;
int offset;
key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
/* In order to process neighbor discovery options, we need the
* entire packet.
*/
if (unlikely(icmp_len < sizeof(*nd)))
goto out;
if (unlikely(skb_linearize(skb))) {
error = -ENOMEM;
goto out;
}
nd = (struct nd_msg *)skb_transport_header(skb);
key->ipv6.nd.target = nd->target;
key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
icmp_len -= sizeof(*nd);
offset = 0;
while (icmp_len >= 8) {
struct nd_opt_hdr *nd_opt =
(struct nd_opt_hdr *)(nd->opt + offset);
int opt_len = nd_opt->nd_opt_len * 8;
if (unlikely(!opt_len || opt_len > icmp_len))
goto invalid;
/* Store the link layer address if the appropriate
* option is provided. It is considered an error if
* the same link layer option is specified twice.
*/
if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
&& opt_len == 8) {
if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
goto invalid;
memcpy(key->ipv6.nd.sll,
&nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
&& opt_len == 8) {
if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
goto invalid;
memcpy(key->ipv6.nd.tll,
&nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
}
icmp_len -= opt_len;
offset += opt_len;
}
}
goto out;
invalid:
memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
out:
*key_lenp = key_len;
return error;
}
/**
* ovs_flow_extract - extracts a flow key from an Ethernet frame.
* @skb: sk_buff that contains the frame, with skb->data pointing to the
* Ethernet header
* @in_port: port number on which @skb was received.
* @key: output flow key
* @key_lenp: length of output flow key
*
* The caller must ensure that skb->len >= ETH_HLEN.
*
* Returns 0 if successful, otherwise a negative errno value.
*
* Initializes @skb header pointers as follows:
*
* - skb->mac_header: the Ethernet header.
*
* - skb->network_header: just past the Ethernet header, or just past the
* VLAN header, to the first byte of the Ethernet payload.
*
* - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
* on output, then just past the IP header, if one is present and
* of a correct length, otherwise the same as skb->network_header.
* For other key->eth.type values it is left untouched.
*/
int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
int *key_lenp)
{
int error = 0;
int key_len = SW_FLOW_KEY_OFFSET(eth);
struct ethhdr *eth;
memset(key, 0, sizeof(*key));
key->phy.priority = skb->priority;
if (OVS_CB(skb)->tun_key)
memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key));
key->phy.in_port = in_port;
key->phy.skb_mark = skb->mark;
skb_reset_mac_header(skb);
/* Link layer. We are guaranteed to have at least the 14 byte Ethernet
* header in the linear data area.
*/
eth = eth_hdr(skb);
memcpy(key->eth.src, eth->h_source, ETH_ALEN);
memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
__skb_pull(skb, 2 * ETH_ALEN);
/* We are going to push all headers that we pull, so no need to
* update skb->csum here.
*/
if (vlan_tx_tag_present(skb))
key->eth.tci = htons(skb->vlan_tci);
else if (eth->h_proto == htons(ETH_P_8021Q))
if (unlikely(parse_vlan(skb, key)))
return -ENOMEM;
key->eth.type = parse_ethertype(skb);
if (unlikely(key->eth.type == htons(0)))
return -ENOMEM;
skb_reset_network_header(skb);
__skb_push(skb, skb->data - skb_mac_header(skb));
/* Network layer. */
if (key->eth.type == htons(ETH_P_IP)) {
struct iphdr *nh;
__be16 offset;
key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
error = check_iphdr(skb);
if (unlikely(error)) {
if (error == -EINVAL) {
skb->transport_header = skb->network_header;
error = 0;
}
goto out;
}
nh = ip_hdr(skb);
key->ipv4.addr.src = nh->saddr;
key->ipv4.addr.dst = nh->daddr;
key->ip.proto = nh->protocol;
key->ip.tos = nh->tos;
key->ip.ttl = nh->ttl;
offset = nh->frag_off & htons(IP_OFFSET);
if (offset) {
key->ip.frag = OVS_FRAG_TYPE_LATER;
goto out;
}
if (nh->frag_off & htons(IP_MF) ||
skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
/* Transport layer. */
if (key->ip.proto == IPPROTO_TCP) {
key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
if (tcphdr_ok(skb)) {
struct tcphdr *tcp = tcp_hdr(skb);
key->ipv4.tp.src = tcp->source;
key->ipv4.tp.dst = tcp->dest;
}
} else if (key->ip.proto == IPPROTO_UDP) {
key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->ipv4.tp.src = udp->source;
key->ipv4.tp.dst = udp->dest;
}
} else if (key->ip.proto == IPPROTO_ICMP) {
key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
if (icmphdr_ok(skb)) {
struct icmphdr *icmp = icmp_hdr(skb);
/* The ICMP type and code fields use the 16-bit
* transport port fields, so we need to store
* them in 16-bit network byte order. */
key->ipv4.tp.src = htons(icmp->type);
key->ipv4.tp.dst = htons(icmp->code);
}
}
} else if ((key->eth.type == htons(ETH_P_ARP) ||
key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) {
struct arp_eth_header *arp;
arp = (struct arp_eth_header *)skb_network_header(skb);
if (arp->ar_hrd == htons(ARPHRD_ETHER)
&& arp->ar_pro == htons(ETH_P_IP)
&& arp->ar_hln == ETH_ALEN
&& arp->ar_pln == 4) {
/* We only match on the lower 8 bits of the opcode. */
if (ntohs(arp->ar_op) <= 0xff)
key->ip.proto = ntohs(arp->ar_op);
memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
}
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
nh_len = parse_ipv6hdr(skb, key, &key_len);
if (unlikely(nh_len < 0)) {
if (nh_len == -EINVAL)
skb->transport_header = skb->network_header;
else
error = nh_len;
goto out;
}
if (key->ip.frag == OVS_FRAG_TYPE_LATER)
goto out;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
/* Transport layer. */
if (key->ip.proto == NEXTHDR_TCP) {
key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (tcphdr_ok(skb)) {
struct tcphdr *tcp = tcp_hdr(skb);
key->ipv6.tp.src = tcp->source;
key->ipv6.tp.dst = tcp->dest;
}
} else if (key->ip.proto == NEXTHDR_UDP) {
key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->ipv6.tp.src = udp->source;
key->ipv6.tp.dst = udp->dest;
}
} else if (key->ip.proto == NEXTHDR_ICMP) {
key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (icmp6hdr_ok(skb)) {
error = parse_icmpv6(skb, key, &key_len, nh_len);
if (error < 0)
goto out;
}
}
}
out:
*key_lenp = key_len;
return error;
}
static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len)
{
return jhash2((u32 *)((u8 *)key + key_start),
DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0);
}
static int flow_key_start(struct sw_flow_key *key)
{
if (key->tun_key.ipv4_dst)
return 0;
else
return offsetof(struct sw_flow_key, phy);
}
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
struct sw_flow_key *key, int key_len)
{
struct sw_flow *flow;
struct hlist_head *head;
u8 *_key;
int key_start;
u32 hash;
key_start = flow_key_start(key);
hash = ovs_flow_hash(key, key_start, key_len);
_key = (u8 *) key + key_start;
head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
if (flow->hash == hash &&
!memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) {
return flow;
}
}
return NULL;
}
void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
struct sw_flow_key *key, int key_len)
{
flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len);
memcpy(&flow->key, key, sizeof(flow->key));
__flow_tbl_insert(table, flow);
}
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{
BUG_ON(table->count == 0);
hlist_del_rcu(&flow->hash_node[table->node_ver]);
table->count--;
}
/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_ENCAP] = -1,
[OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
[OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
[OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
[OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
[OVS_KEY_ATTR_VLAN] = sizeof(__be16),
[OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
[OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
[OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
[OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
[OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
[OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
[OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
[OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
[OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
[OVS_KEY_ATTR_TUNNEL] = -1,
};
static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
const struct nlattr *a[], u32 *attrs)
{
const struct ovs_key_icmp *icmp_key;
const struct ovs_key_tcp *tcp_key;
const struct ovs_key_udp *udp_key;
switch (swkey->ip.proto) {
case IPPROTO_TCP:
if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_TCP);
*key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
swkey->ipv4.tp.src = tcp_key->tcp_src;
swkey->ipv4.tp.dst = tcp_key->tcp_dst;
break;
case IPPROTO_UDP:
if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_UDP);
*key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
swkey->ipv4.tp.src = udp_key->udp_src;
swkey->ipv4.tp.dst = udp_key->udp_dst;
break;
case IPPROTO_ICMP:
if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
*key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
break;
}
return 0;
}
static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
const struct nlattr *a[], u32 *attrs)
{
const struct ovs_key_icmpv6 *icmpv6_key;
const struct ovs_key_tcp *tcp_key;
const struct ovs_key_udp *udp_key;
switch (swkey->ip.proto) {
case IPPROTO_TCP:
if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_TCP);
*key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
swkey->ipv6.tp.src = tcp_key->tcp_src;
swkey->ipv6.tp.dst = tcp_key->tcp_dst;
break;
case IPPROTO_UDP:
if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_UDP);
*key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
swkey->ipv6.tp.src = udp_key->udp_src;
swkey->ipv6.tp.dst = udp_key->udp_dst;
break;
case IPPROTO_ICMPV6:
if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
*key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
const struct ovs_key_nd *nd_key;
if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_ND);
*key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
sizeof(swkey->ipv6.nd.target));
memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
}
break;
}
return 0;
}
static int parse_flow_nlattrs(const struct nlattr *attr,
const struct nlattr *a[], u32 *attrsp)
{
const struct nlattr *nla;
u32 attrs;
int rem;
attrs = 0;
nla_for_each_nested(nla, attr, rem) {
u16 type = nla_type(nla);
int expected_len;
if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type))
return -EINVAL;
expected_len = ovs_key_lens[type];
if (nla_len(nla) != expected_len && expected_len != -1)
return -EINVAL;
attrs |= 1 << type;
a[type] = nla;
}
if (rem)
return -EINVAL;
*attrsp = attrs;
return 0;
}
int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
struct ovs_key_ipv4_tunnel *tun_key)
{
struct nlattr *a;
int rem;
bool ttl = false;
memset(tun_key, 0, sizeof(*tun_key));
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
[OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
[OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
[OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
[OVS_TUNNEL_KEY_ATTR_TOS] = 1,
[OVS_TUNNEL_KEY_ATTR_TTL] = 1,
[OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
[OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
};
if (type > OVS_TUNNEL_KEY_ATTR_MAX ||
ovs_tunnel_key_lens[type] != nla_len(a))
return -EINVAL;
switch (type) {
case OVS_TUNNEL_KEY_ATTR_ID:
tun_key->tun_id = nla_get_be64(a);
tun_key->tun_flags |= TUNNEL_KEY;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
tun_key->ipv4_src = nla_get_be32(a);
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
tun_key->ipv4_dst = nla_get_be32(a);
break;
case OVS_TUNNEL_KEY_ATTR_TOS:
tun_key->ipv4_tos = nla_get_u8(a);
break;
case OVS_TUNNEL_KEY_ATTR_TTL:
tun_key->ipv4_ttl = nla_get_u8(a);
ttl = true;
break;
case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
tun_key->tun_flags |= TUNNEL_DONT_FRAGMENT;
break;
case OVS_TUNNEL_KEY_ATTR_CSUM:
tun_key->tun_flags |= TUNNEL_CSUM;
break;
default:
return -EINVAL;
}
}
if (rem > 0)
return -EINVAL;
if (!tun_key->ipv4_dst)
return -EINVAL;
if (!ttl)
return -EINVAL;
return 0;
}
int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
const struct ovs_key_ipv4_tunnel *tun_key)
{
struct nlattr *nla;
nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
if (!nla)
return -EMSGSIZE;
if (tun_key->tun_flags & TUNNEL_KEY &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id))
return -EMSGSIZE;
if (tun_key->ipv4_src &&
nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ipv4_src))
return -EMSGSIZE;
if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ipv4_dst))
return -EMSGSIZE;
if (tun_key->ipv4_tos &&
nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ipv4_tos))
return -EMSGSIZE;
if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ipv4_ttl))
return -EMSGSIZE;
if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
if ((tun_key->tun_flags & TUNNEL_CSUM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
nla_nest_end(skb, nla);
return 0;
}
/**
* ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
* @swkey: receives the extracted flow key.
* @key_lenp: number of bytes used in @swkey.
* @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence.
*/
int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
const struct nlattr *attr)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
const struct ovs_key_ethernet *eth_key;
int key_len;
u32 attrs;
int err;
memset(swkey, 0, sizeof(struct sw_flow_key));
key_len = SW_FLOW_KEY_OFFSET(eth);
err = parse_flow_nlattrs(attr, a, &attrs);
if (err)
return err;
/* Metadata attributes. */
if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
}
if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
if (in_port >= DP_MAX_PORTS)
return -EINVAL;
swkey->phy.in_port = in_port;
attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
} else {
swkey->phy.in_port = DP_MAX_PORTS;
}
if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
swkey->phy.skb_mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
}
if (attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
err = ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key);
if (err)
return err;
attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
}
/* Data attributes. */
if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
return -EINVAL;
attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
const struct nlattr *encap;
__be16 tci;
if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
(1 << OVS_KEY_ATTR_ETHERTYPE) |
(1 << OVS_KEY_ATTR_ENCAP)))
return -EINVAL;
encap = a[OVS_KEY_ATTR_ENCAP];
tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
if (tci & htons(VLAN_TAG_PRESENT)) {
swkey->eth.tci = tci;
err = parse_flow_nlattrs(encap, a, &attrs);
if (err)
return err;
} else if (!tci) {
/* Corner case for truncated 802.1Q header. */
if (nla_len(encap))
return -EINVAL;
swkey->eth.type = htons(ETH_P_8021Q);
*key_lenp = key_len;
return 0;
} else {
return -EINVAL;
}
}
if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
if (ntohs(swkey->eth.type) < ETH_P_802_3_MIN)
return -EINVAL;
attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
} else {
swkey->eth.type = htons(ETH_P_802_2);
}
if (swkey->eth.type == htons(ETH_P_IP)) {
const struct ovs_key_ipv4 *ipv4_key;
if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
return -EINVAL;
attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
return -EINVAL;
swkey->ip.proto = ipv4_key->ipv4_proto;
swkey->ip.tos = ipv4_key->ipv4_tos;
swkey->ip.ttl = ipv4_key->ipv4_ttl;
swkey->ip.frag = ipv4_key->ipv4_frag;
swkey->ipv4.addr.src = ipv4_key->ipv4_src;
swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
if (err)
return err;
}
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
const struct ovs_key_ipv6 *ipv6_key;
if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
return -EINVAL;
attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
return -EINVAL;
swkey->ipv6.label = ipv6_key->ipv6_label;
swkey->ip.proto = ipv6_key->ipv6_proto;
swkey->ip.tos = ipv6_key->ipv6_tclass;
swkey->ip.ttl = ipv6_key->ipv6_hlimit;
swkey->ip.frag = ipv6_key->ipv6_frag;
memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
sizeof(swkey->ipv6.addr.src));
memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
sizeof(swkey->ipv6.addr.dst));
if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
if (err)
return err;
}
} else if (swkey->eth.type == htons(ETH_P_ARP) ||
swkey->eth.type == htons(ETH_P_RARP)) {
const struct ovs_key_arp *arp_key;
if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
return -EINVAL;
attrs &= ~(1 << OVS_KEY_ATTR_ARP);
key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
swkey->ipv4.addr.src = arp_key->arp_sip;
swkey->ipv4.addr.dst = arp_key->arp_tip;
if (arp_key->arp_op & htons(0xff00))
return -EINVAL;
swkey->ip.proto = ntohs(arp_key->arp_op);
memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
}
if (attrs)
return -EINVAL;
*key_lenp = key_len;
return 0;
}
/**
* ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
* @flow: Receives extracted in_port, priority, tun_key and skb_mark.
* @key_len: Length of key in @flow. Used for calculating flow hash.
* @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence.
*
* This parses a series of Netlink attributes that form a flow key, which must
* take the same form accepted by flow_from_nlattrs(), but only enough of it to
* get the metadata, that is, the parts of the flow key that cannot be
* extracted from the packet itself.
*/
int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len,
const struct nlattr *attr)
{
struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
const struct nlattr *nla;
int rem;
flow->key.phy.in_port = DP_MAX_PORTS;
flow->key.phy.priority = 0;
flow->key.phy.skb_mark = 0;
memset(tun_key, 0, sizeof(flow->key.tun_key));
nla_for_each_nested(nla, attr, rem) {
int type = nla_type(nla);
if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
int err;
if (nla_len(nla) != ovs_key_lens[type])
return -EINVAL;
switch (type) {
case OVS_KEY_ATTR_PRIORITY:
flow->key.phy.priority = nla_get_u32(nla);
break;
case OVS_KEY_ATTR_TUNNEL:
err = ovs_ipv4_tun_from_nlattr(nla, tun_key);
if (err)
return err;
break;
case OVS_KEY_ATTR_IN_PORT:
if (nla_get_u32(nla) >= DP_MAX_PORTS)
return -EINVAL;
flow->key.phy.in_port = nla_get_u32(nla);
break;
case OVS_KEY_ATTR_SKB_MARK:
flow->key.phy.skb_mark = nla_get_u32(nla);
break;
}
}
}
if (rem)
return -EINVAL;
flow->hash = ovs_flow_hash(&flow->key,
flow_key_start(&flow->key), key_len);
return 0;
}
int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
struct nlattr *nla, *encap;
if (swkey->phy.priority &&
nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
goto nla_put_failure;
if (swkey->tun_key.ipv4_dst &&
ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key))
goto nla_put_failure;
if (swkey->phy.in_port != DP_MAX_PORTS &&
nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
goto nla_put_failure;
if (swkey->phy.skb_mark &&
nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark))
goto nla_put_failure;
nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
if (!nla)
goto nla_put_failure;
eth_key = nla_data(nla);
memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
goto nla_put_failure;
encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
if (!swkey->eth.tci)
goto unencap;
} else {
encap = NULL;
}
if (swkey->eth.type == htons(ETH_P_802_2))
goto unencap;
if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
goto nla_put_failure;
if (swkey->eth.type == htons(ETH_P_IP)) {
struct ovs_key_ipv4 *ipv4_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
if (!nla)
goto nla_put_failure;
ipv4_key = nla_data(nla);
ipv4_key->ipv4_src = swkey->ipv4.addr.src;
ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
ipv4_key->ipv4_proto = swkey->ip.proto;
ipv4_key->ipv4_tos = swkey->ip.tos;
ipv4_key->ipv4_ttl = swkey->ip.ttl;
ipv4_key->ipv4_frag = swkey->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
struct ovs_key_ipv6 *ipv6_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
if (!nla)
goto nla_put_failure;
ipv6_key = nla_data(nla);
memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
sizeof(ipv6_key->ipv6_src));
memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
sizeof(ipv6_key->ipv6_dst));
ipv6_key->ipv6_label = swkey->ipv6.label;
ipv6_key->ipv6_proto = swkey->ip.proto;
ipv6_key->ipv6_tclass = swkey->ip.tos;
ipv6_key->ipv6_hlimit = swkey->ip.ttl;
ipv6_key->ipv6_frag = swkey->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_ARP) ||
swkey->eth.type == htons(ETH_P_RARP)) {
struct ovs_key_arp *arp_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
if (!nla)
goto nla_put_failure;
arp_key = nla_data(nla);
memset(arp_key, 0, sizeof(struct ovs_key_arp));
arp_key->arp_sip = swkey->ipv4.addr.src;
arp_key->arp_tip = swkey->ipv4.addr.dst;
arp_key->arp_op = htons(swkey->ip.proto);
memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
}
if ((swkey->eth.type == htons(ETH_P_IP) ||
swkey->eth.type == htons(ETH_P_IPV6)) &&
swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
if (swkey->ip.proto == IPPROTO_TCP) {
struct ovs_key_tcp *tcp_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
if (!nla)
goto nla_put_failure;
tcp_key = nla_data(nla);
if (swkey->eth.type == htons(ETH_P_IP)) {
tcp_key->tcp_src = swkey->ipv4.tp.src;
tcp_key->tcp_dst = swkey->ipv4.tp.dst;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
tcp_key->tcp_src = swkey->ipv6.tp.src;
tcp_key->tcp_dst = swkey->ipv6.tp.dst;
}
} else if (swkey->ip.proto == IPPROTO_UDP) {
struct ovs_key_udp *udp_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
if (!nla)
goto nla_put_failure;
udp_key = nla_data(nla);
if (swkey->eth.type == htons(ETH_P_IP)) {
udp_key->udp_src = swkey->ipv4.tp.src;
udp_key->udp_dst = swkey->ipv4.tp.dst;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
udp_key->udp_src = swkey->ipv6.tp.src;
udp_key->udp_dst = swkey->ipv6.tp.dst;
}
} else if (swkey->eth.type == htons(ETH_P_IP) &&
swkey->ip.proto == IPPROTO_ICMP) {
struct ovs_key_icmp *icmp_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
if (!nla)
goto nla_put_failure;
icmp_key = nla_data(nla);
icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
} else if (swkey->eth.type == htons(ETH_P_IPV6) &&
swkey->ip.proto == IPPROTO_ICMPV6) {
struct ovs_key_icmpv6 *icmpv6_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
sizeof(*icmpv6_key));
if (!nla)
goto nla_put_failure;
icmpv6_key = nla_data(nla);
icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
struct ovs_key_nd *nd_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
if (!nla)
goto nla_put_failure;
nd_key = nla_data(nla);
memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
sizeof(nd_key->nd_target));
memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
}
}
}
unencap:
if (encap)
nla_nest_end(skb, encap);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
/* Initializes the flow module.
* Returns zero if successful or a negative error code. */
int ovs_flow_init(void)
{
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
0, NULL);
if (flow_cache == NULL)
return -ENOMEM;
return 0;
}
/* Uninitializes the flow module. */
void ovs_flow_exit(void)
{
kmem_cache_destroy(flow_cache);
}
| gpl-2.0 |
AndyLavr/Aspire-SW5-012_Kernel_4.8 | drivers/net/wireless/ath/wil6210/cfg80211.c | 47 | 39527 | /*
* Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
#include "wil6210.h"
#include "wmi.h"
#define WIL_MAX_ROC_DURATION_MS 5000
#define CHAN60G(_channel, _flags) { \
.band = NL80211_BAND_60GHZ, \
.center_freq = 56160 + (2160 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 40, \
}
static struct ieee80211_channel wil_60ghz_channels[] = {
CHAN60G(1, 0),
CHAN60G(2, 0),
CHAN60G(3, 0),
/* channel 4 not supported yet */
};
static struct ieee80211_supported_band wil_band_60ghz = {
.channels = wil_60ghz_channels,
.n_channels = ARRAY_SIZE(wil_60ghz_channels),
.ht_cap = {
.ht_supported = true,
.cap = 0, /* TODO */
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */
.mcs = {
/* MCS 1..12 - SC PHY */
.rx_mask = {0xfe, 0x1f}, /* 1..12 */
.tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */
},
},
};
static const struct ieee80211_txrx_stypes
wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_AP] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_P2P_CLIENT] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_P2P_GO] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_P2P_DEVICE] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
};
static const u32 wil_cipher_suites[] = {
WLAN_CIPHER_SUITE_GCMP,
};
static const char * const key_usage_str[] = {
[WMI_KEY_USE_PAIRWISE] = "PTK",
[WMI_KEY_USE_RX_GROUP] = "RX_GTK",
[WMI_KEY_USE_TX_GROUP] = "TX_GTK",
};
int wil_iftype_nl2wmi(enum nl80211_iftype type)
{
static const struct {
enum nl80211_iftype nl;
enum wmi_network_type wmi;
} __nl2wmi[] = {
{NL80211_IFTYPE_ADHOC, WMI_NETTYPE_ADHOC},
{NL80211_IFTYPE_STATION, WMI_NETTYPE_INFRA},
{NL80211_IFTYPE_AP, WMI_NETTYPE_AP},
{NL80211_IFTYPE_P2P_CLIENT, WMI_NETTYPE_P2P},
{NL80211_IFTYPE_P2P_GO, WMI_NETTYPE_P2P},
{NL80211_IFTYPE_MONITOR, WMI_NETTYPE_ADHOC}, /* FIXME */
};
uint i;
for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) {
if (__nl2wmi[i].nl == type)
return __nl2wmi[i].wmi;
}
return -EOPNOTSUPP;
}
int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
struct station_info *sinfo)
{
struct wmi_notify_req_cmd cmd = {
.cid = cid,
.interval_usec = 0,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_notify_req_done_event evt;
} __packed reply;
struct wil_net_stats *stats = &wil->sta[cid].stats;
int rc;
rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20);
if (rc)
return rc;
wil_dbg_wmi(wil, "Link status for CID %d: {\n"
" MCS %d TSF 0x%016llx\n"
" BF status 0x%08x SNR 0x%08x SQI %d%%\n"
" Tx Tpt %d goodput %d Rx goodput %d\n"
" Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n",
cid, le16_to_cpu(reply.evt.bf_mcs),
le64_to_cpu(reply.evt.tsf), reply.evt.status,
le32_to_cpu(reply.evt.snr_val),
reply.evt.sqi,
le32_to_cpu(reply.evt.tx_tpt),
le32_to_cpu(reply.evt.tx_goodput),
le32_to_cpu(reply.evt.rx_goodput),
le16_to_cpu(reply.evt.my_rx_sector),
le16_to_cpu(reply.evt.my_tx_sector),
le16_to_cpu(reply.evt.other_rx_sector),
le16_to_cpu(reply.evt.other_tx_sector));
sinfo->generation = wil->sinfo_gen;
sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) |
BIT(NL80211_STA_INFO_TX_BYTES) |
BIT(NL80211_STA_INFO_RX_PACKETS) |
BIT(NL80211_STA_INFO_TX_PACKETS) |
BIT(NL80211_STA_INFO_RX_BITRATE) |
BIT(NL80211_STA_INFO_TX_BITRATE) |
BIT(NL80211_STA_INFO_RX_DROP_MISC) |
BIT(NL80211_STA_INFO_TX_FAILED);
sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
sinfo->rxrate.mcs = stats->last_mcs_rx;
sinfo->rx_bytes = stats->rx_bytes;
sinfo->rx_packets = stats->rx_packets;
sinfo->rx_dropped_misc = stats->rx_dropped;
sinfo->tx_bytes = stats->tx_bytes;
sinfo->tx_packets = stats->tx_packets;
sinfo->tx_failed = stats->tx_errors;
if (test_bit(wil_status_fwconnected, wil->status)) {
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
sinfo->signal = reply.evt.sqi;
}
return rc;
}
static int wil_cfg80211_get_station(struct wiphy *wiphy,
struct net_device *ndev,
const u8 *mac, struct station_info *sinfo)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
int cid = wil_find_cid(wil, mac);
wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
if (cid < 0)
return cid;
rc = wil_cid_fill_sinfo(wil, cid, sinfo);
return rc;
}
/*
* Find @idx-th active STA for station dump.
*/
static int wil_find_cid_by_idx(struct wil6210_priv *wil, int idx)
{
int i;
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
if (wil->sta[i].status == wil_sta_unused)
continue;
if (idx == 0)
return i;
idx--;
}
return -ENOENT;
}
static int wil_cfg80211_dump_station(struct wiphy *wiphy,
struct net_device *dev, int idx,
u8 *mac, struct station_info *sinfo)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
int cid = wil_find_cid_by_idx(wil, idx);
if (cid < 0)
return -ENOENT;
ether_addr_copy(mac, wil->sta[cid].addr);
wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
rc = wil_cid_fill_sinfo(wil, cid, sinfo);
return rc;
}
static struct wireless_dev *
wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
u32 *flags, struct vif_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *p2p_wdev;
wil_dbg_misc(wil, "%s()\n", __func__);
if (type != NL80211_IFTYPE_P2P_DEVICE) {
wil_err(wil, "%s: unsupported iftype %d\n", __func__, type);
return ERR_PTR(-EINVAL);
}
if (wil->p2p_wdev) {
wil_err(wil, "%s: P2P_DEVICE interface already created\n",
__func__);
return ERR_PTR(-EINVAL);
}
p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL);
if (!p2p_wdev)
return ERR_PTR(-ENOMEM);
p2p_wdev->iftype = type;
p2p_wdev->wiphy = wiphy;
/* use our primary ethernet address */
ether_addr_copy(p2p_wdev->address, ndev->perm_addr);
wil->p2p_wdev = p2p_wdev;
return p2p_wdev;
}
static int wil_cfg80211_del_iface(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s()\n", __func__);
if (wdev != wil->p2p_wdev) {
wil_err(wil, "%s: delete of incorrect interface 0x%p\n",
__func__, wdev);
return -EINVAL;
}
wil_p2p_wdev_free(wil);
return 0;
}
static int wil_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = wil_to_wdev(wil);
int rc;
wil_dbg_misc(wil, "%s() type=%d\n", __func__, type);
if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) {
wil_dbg_misc(wil, "interface is up. resetting...\n");
mutex_lock(&wil->mutex);
__wil_down(wil);
rc = __wil_up(wil);
mutex_unlock(&wil->mutex);
if (rc)
return rc;
}
switch (type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
break;
case NL80211_IFTYPE_MONITOR:
if (flags)
wil->monitor_flags = *flags;
else
wil->monitor_flags = 0;
break;
default:
return -EOPNOTSUPP;
}
wdev->iftype = type;
return 0;
}
static int wil_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = request->wdev;
struct {
struct wmi_start_scan_cmd cmd;
u16 chnl[4];
} __packed cmd;
uint i, n;
int rc;
wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
__func__, wdev, wdev->iftype);
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
return -EAGAIN;
}
/* check we are client side */
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_DEVICE:
break;
default:
return -EOPNOTSUPP;
}
/* FW don't support scan after connection attempt */
if (test_bit(wil_status_dontscan, wil->status)) {
wil_err(wil, "Can't scan now\n");
return -EBUSY;
}
/* social scan on P2P_DEVICE is handled as p2p search */
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
wil_p2p_is_social_scan(request)) {
if (!wil->p2p.p2p_dev_started) {
wil_err(wil, "P2P search requested on stopped P2P device\n");
return -EIO;
}
wil->scan_request = request;
wil->radio_wdev = wdev;
rc = wil_p2p_search(wil, request);
if (rc) {
wil->radio_wdev = wil_to_wdev(wil);
wil->scan_request = NULL;
}
return rc;
}
(void)wil_p2p_stop_discovery(wil);
wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
wil_dbg_misc(wil, "SSID count: %d", request->n_ssids);
for (i = 0; i < request->n_ssids; i++) {
wil_dbg_misc(wil, "SSID[%d]", i);
print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
request->ssids[i].ssid,
request->ssids[i].ssid_len);
}
if (request->n_ssids)
rc = wmi_set_ssid(wil, request->ssids[0].ssid_len,
request->ssids[0].ssid);
else
rc = wmi_set_ssid(wil, 0, NULL);
if (rc) {
wil_err(wil, "set SSID for scan request failed: %d\n", rc);
return rc;
}
wil->scan_request = request;
mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
memset(&cmd, 0, sizeof(cmd));
cmd.cmd.scan_type = WMI_ACTIVE_SCAN;
cmd.cmd.num_channels = 0;
n = min(request->n_channels, 4U);
for (i = 0; i < n; i++) {
int ch = request->channels[i]->hw_value;
if (ch == 0) {
wil_err(wil,
"Scan requested for unknown frequency %dMhz\n",
request->channels[i]->center_freq);
continue;
}
/* 0-based channel indexes */
cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch,
request->channels[i]->center_freq);
}
if (request->ie_len)
print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET,
request->ie, request->ie_len);
else
wil_dbg_misc(wil, "Scan has no IE's\n");
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
if (rc)
goto out;
if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
cmd.cmd.discovery_mode = 1;
wil_dbg_misc(wil, "active scan with discovery_mode=1\n");
}
wil->radio_wdev = wdev;
rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
out:
if (rc) {
del_timer_sync(&wil->scan_timer);
wil->radio_wdev = wil_to_wdev(wil);
wil->scan_request = NULL;
}
return rc;
}
static void wil_print_crypto(struct wil6210_priv *wil,
struct cfg80211_crypto_settings *c)
{
int i, n;
wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n",
c->wpa_versions, c->cipher_group);
wil_dbg_misc(wil, "Pairwise ciphers [%d] {\n", c->n_ciphers_pairwise);
n = min_t(int, c->n_ciphers_pairwise, ARRAY_SIZE(c->ciphers_pairwise));
for (i = 0; i < n; i++)
wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
c->ciphers_pairwise[i]);
wil_dbg_misc(wil, "}\n");
wil_dbg_misc(wil, "AKM suites [%d] {\n", c->n_akm_suites);
n = min_t(int, c->n_akm_suites, ARRAY_SIZE(c->akm_suites));
for (i = 0; i < n; i++)
wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
c->akm_suites[i]);
wil_dbg_misc(wil, "}\n");
wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n",
c->control_port, be16_to_cpu(c->control_port_ethertype),
c->control_port_no_encrypt);
}
static void wil_print_connect_params(struct wil6210_priv *wil,
struct cfg80211_connect_params *sme)
{
wil_info(wil, "Connecting to:\n");
if (sme->channel) {
wil_info(wil, " Channel: %d freq %d\n",
sme->channel->hw_value, sme->channel->center_freq);
}
if (sme->bssid)
wil_info(wil, " BSSID: %pM\n", sme->bssid);
if (sme->ssid)
print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET,
16, 1, sme->ssid, sme->ssid_len, true);
wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open");
wil_info(wil, " PBSS: %d\n", sme->pbss);
wil_print_crypto(wil, &sme->crypto);
}
static int wil_cfg80211_connect(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct cfg80211_bss *bss;
struct wmi_connect_cmd conn;
const u8 *ssid_eid;
const u8 *rsn_eid;
int ch;
int rc = 0;
enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_connect_params(wil, sme);
if (test_bit(wil_status_fwconnecting, wil->status) ||
test_bit(wil_status_fwconnected, wil->status))
return -EALREADY;
if (sme->ie_len > WMI_MAX_IE_LEN) {
wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len);
return -ERANGE;
}
rsn_eid = sme->ie ?
cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
NULL;
if (sme->privacy && !rsn_eid)
wil_info(wil, "WSC connection\n");
if (sme->pbss)
bss_type = IEEE80211_BSS_TYPE_PBSS;
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
bss_type, IEEE80211_PRIVACY_ANY);
if (!bss) {
wil_err(wil, "Unable to find BSS\n");
return -ENOENT;
}
ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
if (!ssid_eid) {
wil_err(wil, "No SSID\n");
rc = -ENOENT;
goto out;
}
wil->privacy = sme->privacy;
if (wil->privacy) {
/* For secure assoc, remove old keys */
rc = wmi_del_cipher_key(wil, 0, bss->bssid,
WMI_KEY_USE_PAIRWISE);
if (rc) {
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
goto out;
}
rc = wmi_del_cipher_key(wil, 0, bss->bssid,
WMI_KEY_USE_RX_GROUP);
if (rc) {
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
goto out;
}
}
/* WMI_SET_APPIE_CMD. ie may contain rsn info as well as other info
* elements. Send it also in case it's empty, to erase previously set
* ies in FW.
*/
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
if (rc)
goto out;
/* WMI_CONNECT_CMD */
memset(&conn, 0, sizeof(conn));
switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
case WLAN_CAPABILITY_DMG_TYPE_AP:
conn.network_type = WMI_NETTYPE_INFRA;
break;
case WLAN_CAPABILITY_DMG_TYPE_PBSS:
conn.network_type = WMI_NETTYPE_P2P;
break;
default:
wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
bss->capability);
goto out;
}
if (wil->privacy) {
if (rsn_eid) { /* regular secure connection */
conn.dot11_auth_mode = WMI_AUTH11_SHARED;
conn.auth_mode = WMI_AUTH_WPA2_PSK;
conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
conn.pairwise_crypto_len = 16;
conn.group_crypto_type = WMI_CRYPT_AES_GCMP;
conn.group_crypto_len = 16;
} else { /* WSC */
conn.dot11_auth_mode = WMI_AUTH11_WSC;
conn.auth_mode = WMI_AUTH_NONE;
}
} else { /* insecure connection */
conn.dot11_auth_mode = WMI_AUTH11_OPEN;
conn.auth_mode = WMI_AUTH_NONE;
}
conn.ssid_len = min_t(u8, ssid_eid[1], 32);
memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
ch = bss->channel->hw_value;
if (ch == 0) {
wil_err(wil, "BSS at unknown frequency %dMhz\n",
bss->channel->center_freq);
rc = -EOPNOTSUPP;
goto out;
}
conn.channel = ch - 1;
ether_addr_copy(conn.bssid, bss->bssid);
ether_addr_copy(conn.dst_mac, bss->bssid);
set_bit(wil_status_fwconnecting, wil->status);
rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
if (rc == 0) {
netif_carrier_on(ndev);
/* Connect can take lots of time */
mod_timer(&wil->connect_timer,
jiffies + msecs_to_jiffies(2000));
} else {
clear_bit(wil_status_fwconnecting, wil->status);
}
out:
cfg80211_put_bss(wiphy, bss);
return rc;
}
static int wil_cfg80211_disconnect(struct wiphy *wiphy,
struct net_device *ndev,
u16 reason_code)
{
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
if (!(test_bit(wil_status_fwconnecting, wil->status) ||
test_bit(wil_status_fwconnected, wil->status))) {
wil_err(wil, "%s: Disconnect was called while disconnected\n",
__func__);
return 0;
}
rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
WMI_DISCONNECT_EVENTID, NULL, 0,
WIL6210_DISCONNECT_TO_MS);
if (rc)
wil_err(wil, "%s: disconnect error %d\n", __func__, rc);
return rc;
}
int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
u64 *cookie)
{
const u8 *buf = params->buf;
size_t len = params->len;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
bool tx_status = false;
struct ieee80211_mgmt *mgmt_frame = (void *)buf;
struct wmi_sw_tx_req_cmd *cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_sw_tx_complete_event evt;
} __packed evt;
/* Note, currently we do not support the "wait" parameter, user-space
* must call remain_on_channel before mgmt_tx or listen on a channel
* another way (AP/PCP or connected station)
* in addition we need to check if specified "chan" argument is
* different from currently "listened" channel and fail if it is.
*/
wil_dbg_misc(wil, "%s()\n", __func__);
print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
}
memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
cmd->len = cpu_to_le16(len);
memcpy(cmd->payload, buf, len);
rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (rc == 0)
tx_status = !evt.evt.status;
kfree(cmd);
out:
cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
tx_status, GFP_KERNEL);
return rc;
}
static int wil_cfg80211_set_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = wil_to_wdev(wil);
wdev->preset_chandef = *chandef;
return 0;
}
static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
bool pairwise)
{
struct wireless_dev *wdev = wil_to_wdev(wil);
enum wmi_key_usage rc;
if (pairwise) {
rc = WMI_KEY_USE_PAIRWISE;
} else {
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
rc = WMI_KEY_USE_RX_GROUP;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
rc = WMI_KEY_USE_TX_GROUP;
break;
default:
/* TODO: Rx GTK or Tx GTK? */
wil_err(wil, "Can't determine GTK type\n");
rc = WMI_KEY_USE_RX_GROUP;
break;
}
}
wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
return rc;
}
static struct wil_tid_crypto_rx_single *
wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
enum wmi_key_usage key_usage, const u8 *mac_addr)
{
int cid = -EINVAL;
int tid = 0;
struct wil_sta_info *s;
struct wil_tid_crypto_rx *c;
if (key_usage == WMI_KEY_USE_TX_GROUP)
return NULL; /* not needed */
/* supplicant provides Rx group key in STA mode with NULL MAC address */
if (mac_addr)
cid = wil_find_cid(wil, mac_addr);
else if (key_usage == WMI_KEY_USE_RX_GROUP)
cid = wil_find_cid_by_idx(wil, 0);
if (cid < 0) {
wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
key_usage_str[key_usage], key_index);
return ERR_PTR(cid);
}
s = &wil->sta[cid];
if (key_usage == WMI_KEY_USE_PAIRWISE)
c = &s->tid_crypto_rx[tid];
else
c = &s->group_crypto_rx;
return &c->key_id[key_index];
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
{
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
key_index,
key_usage,
mac_addr);
wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
if (IS_ERR(cc)) {
wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
__func__, mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
if (cc)
cc->key_set = false;
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
"Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n",
params->seq_len, __func__, mac_addr,
key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
params->key, key_usage);
if ((rc == 0) && cc) {
if (params->seq)
memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
else
memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
cc->key_set = true;
}
return rc;
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
key_index,
key_usage,
mac_addr);
wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
key_usage_str[key_usage], key_index);
if (IS_ERR(cc))
wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
mac_addr, key_usage_str[key_usage], key_index);
if (!IS_ERR_OR_NULL(cc))
cc->key_set = false;
return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
/* Need to be present or wiphy_new() will WARN */
static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool unicast,
bool multicast)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s: entered\n", __func__);
return 0;
}
static int wil_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct ieee80211_channel *chan,
unsigned int duration,
u64 *cookie)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
__func__, chan->center_freq, duration, wdev->iftype);
rc = wil_p2p_listen(wil, duration, chan, cookie);
if (rc)
return rc;
wil->radio_wdev = wdev;
cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
GFP_KERNEL);
return 0;
}
static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
u64 cookie)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s()\n", __func__);
return wil_p2p_cancel_listen(wil, cookie);
}
/**
* find a specific IE in a list of IEs
* return a pointer to the beginning of IE in the list
* or NULL if not found
*/
static const u8 *_wil_cfg80211_find_ie(const u8 *ies, u16 ies_len, const u8 *ie,
u16 ie_len)
{
struct ieee80211_vendor_ie *vie;
u32 oui;
/* IE tag at offset 0, length at offset 1 */
if (ie_len < 2 || 2 + ie[1] > ie_len)
return NULL;
if (ie[0] != WLAN_EID_VENDOR_SPECIFIC)
return cfg80211_find_ie(ie[0], ies, ies_len);
/* make sure there is room for 3 bytes OUI + 1 byte OUI type */
if (ie[1] < 4)
return NULL;
vie = (struct ieee80211_vendor_ie *)ie;
oui = vie->oui[0] << 16 | vie->oui[1] << 8 | vie->oui[2];
return cfg80211_find_vendor_ie(oui, vie->oui_type, ies,
ies_len);
}
/**
* merge the IEs in two lists into a single list.
* do not include IEs from the second list which exist in the first list.
* add only vendor specific IEs from second list to keep
* the merged list sorted (since vendor-specific IE has the
* highest tag number)
* caller must free the allocated memory for merged IEs
*/
static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
const u8 *ies2, u16 ies2_len,
u8 **merged_ies, u16 *merged_len)
{
u8 *buf, *dpos;
const u8 *spos;
if (ies1_len == 0 && ies2_len == 0) {
*merged_ies = NULL;
*merged_len = 0;
return 0;
}
buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, ies1, ies1_len);
dpos = buf + ies1_len;
spos = ies2;
while (spos + 1 < ies2 + ies2_len) {
/* IE tag at offset 0, length at offset 1 */
u16 ielen = 2 + spos[1];
if (spos + ielen > ies2 + ies2_len)
break;
if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
!_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) {
memcpy(dpos, spos, ielen);
dpos += ielen;
}
spos += ielen;
}
*merged_ies = buf;
*merged_len = dpos - buf;
return 0;
}
static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
{
print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET,
b->head, b->head_len);
print_hex_dump_bytes("tail ", DUMP_PREFIX_OFFSET,
b->tail, b->tail_len);
print_hex_dump_bytes("BCON IE ", DUMP_PREFIX_OFFSET,
b->beacon_ies, b->beacon_ies_len);
print_hex_dump_bytes("PROBE ", DUMP_PREFIX_OFFSET,
b->probe_resp, b->probe_resp_len);
print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET,
b->proberesp_ies, b->proberesp_ies_len);
print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET,
b->assocresp_ies, b->assocresp_ies_len);
}
/* internal functions for device reset and starting AP */
static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon)
{
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
u16 len = 0, proberesp_len = 0;
u8 *ies = NULL, *proberesp = NULL;
if (bcon->probe_resp) {
struct ieee80211_mgmt *f =
(struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
proberesp = f->u.probe_resp.variable;
proberesp_len = bcon->probe_resp_len - hlen;
}
rc = _wil_cfg80211_merge_extra_ies(proberesp,
proberesp_len,
bcon->proberesp_ies,
bcon->proberesp_ies_len,
&ies, &len);
if (rc)
goto out;
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, len, ies);
if (rc)
goto out;
if (bcon->assocresp_ies)
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP,
bcon->assocresp_ies_len, bcon->assocresp_ies);
else
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, len, ies);
#if 0 /* to use beacon IE's, remove this #if 0 */
if (rc)
goto out;
rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail);
#endif
out:
kfree(ies);
return rc;
}
static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
const u8 *ssid, size_t ssid_len, u32 privacy,
int bi, u8 chan,
struct cfg80211_beacon_data *bcon,
u8 hidden_ssid, u32 pbss)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO);
if (pbss)
wmi_nettype = WMI_NETTYPE_P2P;
wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go);
if (is_go && !pbss) {
wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__);
return -ENOTSUPP;
}
wil_set_recovery_state(wil, fw_recovery_idle);
mutex_lock(&wil->mutex);
__wil_down(wil);
rc = __wil_up(wil);
if (rc)
goto out;
rc = wmi_set_ssid(wil, ssid_len, ssid);
if (rc)
goto out;
rc = _wil_cfg80211_set_ies(wiphy, bcon);
if (rc)
goto out;
wil->privacy = privacy;
wil->channel = chan;
wil->hidden_ssid = hidden_ssid;
wil->pbss = pbss;
netif_carrier_on(ndev);
rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go);
if (rc)
goto err_pcp_start;
rc = wil_bcast_init(wil);
if (rc)
goto err_bcast;
goto out; /* success */
err_bcast:
wmi_pcp_stop(wil);
err_pcp_start:
netif_carrier_off(ndev);
out:
mutex_unlock(&wil->mutex);
return rc;
}
static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_beacon_data *bcon)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
u32 privacy = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_bcon_data(bcon);
if (bcon->tail &&
cfg80211_find_ie(WLAN_EID_RSN, bcon->tail,
bcon->tail_len))
privacy = 1;
/* in case privacy has changed, need to restart the AP */
if (wil->privacy != privacy) {
struct wireless_dev *wdev = ndev->ieee80211_ptr;
wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
wil->privacy, privacy);
rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
wdev->ssid_len, privacy,
wdev->beacon_interval,
wil->channel, bcon,
wil->hidden_ssid,
wil->pbss);
} else {
rc = _wil_cfg80211_set_ies(wiphy, bcon);
}
return rc;
}
static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_ap_settings *info)
{
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct ieee80211_channel *channel = info->chandef.chan;
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
u8 hidden_ssid;
wil_dbg_misc(wil, "%s()\n", __func__);
if (!channel) {
wil_err(wil, "AP: No channel???\n");
return -EINVAL;
}
switch (info->hidden_ssid) {
case NL80211_HIDDEN_SSID_NOT_IN_USE:
hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
break;
case NL80211_HIDDEN_SSID_ZERO_LEN:
hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
break;
case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
break;
default:
wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid);
return -EOPNOTSUPP;
}
wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
channel->center_freq, info->privacy ? "secure" : "open");
wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
info->privacy, info->auth_type);
wil_dbg_misc(wil, "Hidden SSID mode: %d\n",
info->hidden_ssid);
wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
info->dtim_period);
wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
info->ssid, info->ssid_len);
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
rc = _wil_cfg80211_start_ap(wiphy, ndev,
info->ssid, info->ssid_len, info->privacy,
info->beacon_interval, channel->hw_value,
bcon, hidden_ssid, info->pbss);
return rc;
}
static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
struct net_device *ndev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s()\n", __func__);
netif_carrier_off(ndev);
wil_set_recovery_state(wil, fw_recovery_idle);
mutex_lock(&wil->mutex);
wmi_pcp_stop(wil);
__wil_down(wil);
mutex_unlock(&wil->mutex);
return 0;
}
static int wil_cfg80211_del_station(struct wiphy *wiphy,
struct net_device *dev,
struct station_del_parameters *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
params->reason_code);
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, params->mac, params->reason_code, false);
mutex_unlock(&wil->mutex);
return 0;
}
/* probe_client handling */
static void wil_probe_client_handle(struct wil6210_priv *wil,
struct wil_probe_client_req *req)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wil_sta_info *sta = &wil->sta[req->cid];
/* assume STA is alive if it is still connected,
* else FW will disconnect it
*/
bool alive = (sta->status == wil_sta_connected);
cfg80211_probe_status(ndev, sta->addr, req->cookie, alive, GFP_KERNEL);
}
static struct list_head *next_probe_client(struct wil6210_priv *wil)
{
struct list_head *ret = NULL;
mutex_lock(&wil->probe_client_mutex);
if (!list_empty(&wil->probe_client_pending)) {
ret = wil->probe_client_pending.next;
list_del(ret);
}
mutex_unlock(&wil->probe_client_mutex);
return ret;
}
void wil_probe_client_worker(struct work_struct *work)
{
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
probe_client_worker);
struct wil_probe_client_req *req;
struct list_head *lh;
while ((lh = next_probe_client(wil)) != NULL) {
req = list_entry(lh, struct wil_probe_client_req, list);
wil_probe_client_handle(wil, req);
kfree(req);
}
}
void wil_probe_client_flush(struct wil6210_priv *wil)
{
struct wil_probe_client_req *req, *t;
wil_dbg_misc(wil, "%s()\n", __func__);
mutex_lock(&wil->probe_client_mutex);
list_for_each_entry_safe(req, t, &wil->probe_client_pending, list) {
list_del(&req->list);
kfree(req);
}
mutex_unlock(&wil->probe_client_mutex);
}
static int wil_cfg80211_probe_client(struct wiphy *wiphy,
struct net_device *dev,
const u8 *peer, u64 *cookie)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil_probe_client_req *req;
int cid = wil_find_cid(wil, peer);
wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid);
if (cid < 0)
return -ENOLINK;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
req->cid = cid;
req->cookie = cid;
mutex_lock(&wil->probe_client_mutex);
list_add_tail(&req->list, &wil->probe_client_pending);
mutex_unlock(&wil->probe_client_mutex);
*cookie = req->cookie;
queue_work(wil->wq_service, &wil->probe_client_worker);
return 0;
}
static int wil_cfg80211_change_bss(struct wiphy *wiphy,
struct net_device *dev,
struct bss_parameters *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
if (params->ap_isolate >= 0) {
wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
wil->ap_isolate, params->ap_isolate);
wil->ap_isolate = params->ap_isolate;
}
return 0;
}
static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s: entered\n", __func__);
wil->p2p.p2p_dev_started = 1;
return 0;
}
static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
u8 started;
wil_dbg_misc(wil, "%s: entered\n", __func__);
mutex_lock(&wil->mutex);
started = wil_p2p_stop_discovery(wil);
if (started && wil->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
};
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
wil->radio_wdev = wil->wdev;
}
mutex_unlock(&wil->mutex);
wil->p2p.p2p_dev_started = 0;
}
static struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
.scan = wil_cfg80211_scan,
.connect = wil_cfg80211_connect,
.disconnect = wil_cfg80211_disconnect,
.change_virtual_intf = wil_cfg80211_change_iface,
.get_station = wil_cfg80211_get_station,
.dump_station = wil_cfg80211_dump_station,
.remain_on_channel = wil_remain_on_channel,
.cancel_remain_on_channel = wil_cancel_remain_on_channel,
.mgmt_tx = wil_cfg80211_mgmt_tx,
.set_monitor_channel = wil_cfg80211_set_channel,
.add_key = wil_cfg80211_add_key,
.del_key = wil_cfg80211_del_key,
.set_default_key = wil_cfg80211_set_default_key,
/* AP mode */
.change_beacon = wil_cfg80211_change_beacon,
.start_ap = wil_cfg80211_start_ap,
.stop_ap = wil_cfg80211_stop_ap,
.del_station = wil_cfg80211_del_station,
.probe_client = wil_cfg80211_probe_client,
.change_bss = wil_cfg80211_change_bss,
/* P2P device */
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
};
static void wil_wiphy_init(struct wiphy *wiphy)
{
wiphy->max_scan_ssids = 1;
wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS;
wiphy->max_num_pmkids = 0 /* TODO: */;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_MONITOR);
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
__func__, wiphy->flags);
wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz;
/* TODO: figure this out */
wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
wiphy->cipher_suites = wil_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
}
struct wireless_dev *wil_cfg80211_init(struct device *dev)
{
int rc = 0;
struct wireless_dev *wdev;
dev_dbg(dev, "%s()\n", __func__);
wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (!wdev)
return ERR_PTR(-ENOMEM);
wdev->wiphy = wiphy_new(&wil_cfg80211_ops,
sizeof(struct wil6210_priv));
if (!wdev->wiphy) {
rc = -ENOMEM;
goto out;
}
set_wiphy_dev(wdev->wiphy, dev);
wil_wiphy_init(wdev->wiphy);
rc = wiphy_register(wdev->wiphy);
if (rc < 0)
goto out_failed_reg;
return wdev;
out_failed_reg:
wiphy_free(wdev->wiphy);
out:
kfree(wdev);
return ERR_PTR(rc);
}
void wil_wdev_free(struct wil6210_priv *wil)
{
struct wireless_dev *wdev = wil_to_wdev(wil);
dev_dbg(wil_to_dev(wil), "%s()\n", __func__);
if (!wdev)
return;
wiphy_unregister(wdev->wiphy);
wiphy_free(wdev->wiphy);
kfree(wdev);
}
void wil_p2p_wdev_free(struct wil6210_priv *wil)
{
struct wireless_dev *p2p_wdev;
mutex_lock(&wil->p2p_wdev_mutex);
p2p_wdev = wil->p2p_wdev;
if (p2p_wdev) {
wil->p2p_wdev = NULL;
wil->radio_wdev = wil_to_wdev(wil);
cfg80211_unregister_wdev(p2p_wdev);
kfree(p2p_wdev);
}
mutex_unlock(&wil->p2p_wdev_mutex);
}
| gpl-2.0 |
AOSP-CAF/platform_external_iproute2 | ip/ipntable.c | 47 | 16511 | /*
* Copyright (C)2006 USAGI/WIDE Project
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* based on ipneigh.c
*/
/*
* Authors:
* Masahide NAKAMURA @USAGI
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include "utils.h"
#include "ip_common.h"
static struct
{
int family;
int index;
#define NONE_DEV (-1)
char name[1024];
} filter;
static void usage(void) __attribute__((noreturn));
static void usage(void)
{
fprintf(stderr,
"Usage: ip ntable change name NAME [ dev DEV ]\n"
" [ thresh1 VAL ] [ thresh2 VAL ] [ thresh3 VAL ] [ gc_int MSEC ]\n"
" [ PARMS ]\n"
"Usage: ip ntable show [ dev DEV ] [ name NAME ]\n"
"PARMS := [ base_reachable MSEC ] [ retrans MSEC ] [ gc_stale MSEC ]\n"
" [ delay_probe MSEC ] [ queue LEN ]\n"
" [ app_probs VAL ] [ ucast_probes VAL ] [ mcast_probes VAL ]\n"
" [ anycast_delay MSEC ] [ proxy_delay MSEC ] [ proxy_queue LEN ]\n"
" [ locktime MSEC ]\n"
);
exit(-1);
}
static int ipntable_modify(int cmd, int flags, int argc, char **argv)
{
struct {
struct nlmsghdr n;
struct ndtmsg ndtm;
char buf[1024];
} req;
char *namep = NULL;
char *threshsp = NULL;
char *gc_intp = NULL;
char parms_buf[1024];
struct rtattr *parms_rta = (struct rtattr *)parms_buf;
int parms_change = 0;
memset(&req, 0, sizeof(req));
req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndtmsg));
req.n.nlmsg_flags = NLM_F_REQUEST|flags;
req.n.nlmsg_type = cmd;
req.ndtm.ndtm_family = preferred_family;
req.ndtm.ndtm_pad1 = 0;
req.ndtm.ndtm_pad2 = 0;
memset(&parms_buf, 0, sizeof(parms_buf));
parms_rta->rta_type = NDTA_PARMS;
parms_rta->rta_len = RTA_LENGTH(0);
while (argc > 0) {
if (strcmp(*argv, "name") == 0) {
int len;
NEXT_ARG();
if (namep)
duparg("NAME", *argv);
namep = *argv;
len = strlen(namep) + 1;
addattr_l(&req.n, sizeof(req), NDTA_NAME, namep, len);
} else if (strcmp(*argv, "thresh1") == 0) {
__u32 thresh1;
NEXT_ARG();
threshsp = *argv;
if (get_u32(&thresh1, *argv, 0))
invarg("\"thresh1\" value is invalid", *argv);
addattr32(&req.n, sizeof(req), NDTA_THRESH1, thresh1);
} else if (strcmp(*argv, "thresh2") == 0) {
__u32 thresh2;
NEXT_ARG();
threshsp = *argv;
if (get_u32(&thresh2, *argv, 0))
invarg("\"thresh2\" value is invalid", *argv);
addattr32(&req.n, sizeof(req), NDTA_THRESH2, thresh2);
} else if (strcmp(*argv, "thresh3") == 0) {
__u32 thresh3;
NEXT_ARG();
threshsp = *argv;
if (get_u32(&thresh3, *argv, 0))
invarg("\"thresh3\" value is invalid", *argv);
addattr32(&req.n, sizeof(req), NDTA_THRESH3, thresh3);
} else if (strcmp(*argv, "gc_int") == 0) {
__u64 gc_int;
NEXT_ARG();
gc_intp = *argv;
if (get_u64(&gc_int, *argv, 0))
invarg("\"gc_int\" value is invalid", *argv);
addattr_l(&req.n, sizeof(req), NDTA_GC_INTERVAL,
&gc_int, sizeof(gc_int));
} else if (strcmp(*argv, "dev") == 0) {
__u32 ifindex;
NEXT_ARG();
ifindex = ll_name_to_index(*argv);
if (ifindex == 0) {
fprintf(stderr, "Cannot find device \"%s\"\n", *argv);
return -1;
}
rta_addattr32(parms_rta, sizeof(parms_buf),
NDTPA_IFINDEX, ifindex);
} else if (strcmp(*argv, "base_reachable") == 0) {
__u64 breachable;
NEXT_ARG();
if (get_u64(&breachable, *argv, 0))
invarg("\"base_reachable\" value is invalid", *argv);
rta_addattr_l(parms_rta, sizeof(parms_buf),
NDTPA_BASE_REACHABLE_TIME,
&breachable, sizeof(breachable));
parms_change = 1;
} else if (strcmp(*argv, "retrans") == 0) {
__u64 retrans;
NEXT_ARG();
if (get_u64(&retrans, *argv, 0))
invarg("\"retrans\" value is invalid", *argv);
rta_addattr_l(parms_rta, sizeof(parms_buf),
NDTPA_RETRANS_TIME,
&retrans, sizeof(retrans));
parms_change = 1;
} else if (strcmp(*argv, "gc_stale") == 0) {
__u64 gc_stale;
NEXT_ARG();
if (get_u64(&gc_stale, *argv, 0))
invarg("\"gc_stale\" value is invalid", *argv);
rta_addattr_l(parms_rta, sizeof(parms_buf),
NDTPA_GC_STALETIME,
&gc_stale, sizeof(gc_stale));
parms_change = 1;
} else if (strcmp(*argv, "delay_probe") == 0) {
__u64 delay_probe;
NEXT_ARG();
if (get_u64(&delay_probe, *argv, 0))
invarg("\"delay_probe\" value is invalid", *argv);
rta_addattr_l(parms_rta, sizeof(parms_buf),
NDTPA_DELAY_PROBE_TIME,
&delay_probe, sizeof(delay_probe));
parms_change = 1;
} else if (strcmp(*argv, "queue") == 0) {
__u32 queue;
NEXT_ARG();
if (get_u32(&queue, *argv, 0))
invarg("\"queue\" value is invalid", *argv);
if (!parms_rta)
parms_rta = (struct rtattr *)&parms_buf;
rta_addattr32(parms_rta, sizeof(parms_buf),
NDTPA_QUEUE_LEN, queue);
parms_change = 1;
} else if (strcmp(*argv, "app_probes") == 0) {
__u32 aprobe;
NEXT_ARG();
if (get_u32(&aprobe, *argv, 0))
invarg("\"app_probes\" value is invalid", *argv);
rta_addattr32(parms_rta, sizeof(parms_buf),
NDTPA_APP_PROBES, aprobe);
parms_change = 1;
} else if (strcmp(*argv, "ucast_probes") == 0) {
__u32 uprobe;
NEXT_ARG();
if (get_u32(&uprobe, *argv, 0))
invarg("\"ucast_probes\" value is invalid", *argv);
rta_addattr32(parms_rta, sizeof(parms_buf),
NDTPA_UCAST_PROBES, uprobe);
parms_change = 1;
} else if (strcmp(*argv, "mcast_probes") == 0) {
__u32 mprobe;
NEXT_ARG();
if (get_u32(&mprobe, *argv, 0))
invarg("\"mcast_probes\" value is invalid", *argv);
rta_addattr32(parms_rta, sizeof(parms_buf),
NDTPA_MCAST_PROBES, mprobe);
parms_change = 1;
} else if (strcmp(*argv, "anycast_delay") == 0) {
__u64 anycast_delay;
NEXT_ARG();
if (get_u64(&anycast_delay, *argv, 0))
invarg("\"anycast_delay\" value is invalid", *argv);
rta_addattr_l(parms_rta, sizeof(parms_buf),
NDTPA_ANYCAST_DELAY,
&anycast_delay, sizeof(anycast_delay));
parms_change = 1;
} else if (strcmp(*argv, "proxy_delay") == 0) {
__u64 proxy_delay;
NEXT_ARG();
if (get_u64(&proxy_delay, *argv, 0))
invarg("\"proxy_delay\" value is invalid", *argv);
rta_addattr_l(parms_rta, sizeof(parms_buf),
NDTPA_PROXY_DELAY,
&proxy_delay, sizeof(proxy_delay));
parms_change = 1;
} else if (strcmp(*argv, "proxy_queue") == 0) {
__u32 pqueue;
NEXT_ARG();
if (get_u32(&pqueue, *argv, 0))
invarg("\"proxy_queue\" value is invalid", *argv);
rta_addattr32(parms_rta, sizeof(parms_buf),
NDTPA_PROXY_QLEN, pqueue);
parms_change = 1;
} else if (strcmp(*argv, "locktime") == 0) {
__u64 locktime;
NEXT_ARG();
if (get_u64(&locktime, *argv, 0))
invarg("\"locktime\" value is invalid", *argv);
rta_addattr_l(parms_rta, sizeof(parms_buf),
NDTPA_LOCKTIME,
&locktime, sizeof(locktime));
parms_change = 1;
} else {
invarg("unknown", *argv);
}
argc--; argv++;
}
if (!namep)
missarg("NAME");
if (!threshsp && !gc_intp && !parms_change) {
fprintf(stderr, "Not enough information: changable attributes required.\n");
exit(-1);
}
if (parms_rta->rta_len > RTA_LENGTH(0)) {
addattr_l(&req.n, sizeof(req), NDTA_PARMS, RTA_DATA(parms_rta),
RTA_PAYLOAD(parms_rta));
}
if (rtnl_talk(&rth, &req.n, 0, 0, NULL) < 0)
exit(2);
return 0;
}
static const char *ntable_strtime_delta(__u32 msec)
{
static char str[32];
struct timeval now;
time_t t;
struct tm *tp;
if (msec == 0)
goto error;
memset(&now, 0, sizeof(now));
if (gettimeofday(&now, NULL) < 0) {
perror("gettimeofday");
goto error;
}
t = now.tv_sec - (msec / 1000);
tp = localtime(&t);
if (!tp)
goto error;
strftime(str, sizeof(str), "%Y-%m-%d %T", tp);
return str;
error:
strcpy(str, "(error)");
return str;
}
int print_ntable(const struct sockaddr_nl *who, struct nlmsghdr *n, void *arg)
{
FILE *fp = (FILE*)arg;
struct ndtmsg *ndtm = NLMSG_DATA(n);
int len = n->nlmsg_len;
struct rtattr *tb[NDTA_MAX+1];
struct rtattr *tpb[NDTPA_MAX+1];
int ret;
if (n->nlmsg_type != RTM_NEWNEIGHTBL) {
fprintf(stderr, "Not NEIGHTBL: %08x %08x %08x\n",
n->nlmsg_len, n->nlmsg_type, n->nlmsg_flags);
return 0;
}
len -= NLMSG_LENGTH(sizeof(*ndtm));
if (len < 0) {
fprintf(stderr, "BUG: wrong nlmsg len %d\n", len);
return -1;
}
if (preferred_family && preferred_family != ndtm->ndtm_family)
return 0;
parse_rtattr(tb, NDTA_MAX, NDTA_RTA(ndtm),
n->nlmsg_len - NLMSG_LENGTH(sizeof(*ndtm)));
if (tb[NDTA_NAME]) {
const char *name = rta_getattr_str(tb[NDTA_NAME]);
if (strlen(filter.name) > 0 && strcmp(filter.name, name))
return 0;
}
if (tb[NDTA_PARMS]) {
parse_rtattr(tpb, NDTPA_MAX, RTA_DATA(tb[NDTA_PARMS]),
RTA_PAYLOAD(tb[NDTA_PARMS]));
if (tpb[NDTPA_IFINDEX]) {
__u32 ifindex = rta_getattr_u32(tpb[NDTPA_IFINDEX]);
if (filter.index && filter.index != ifindex)
return 0;
} else {
if (filter.index && filter.index != NONE_DEV)
return 0;
}
}
if (ndtm->ndtm_family == AF_INET)
fprintf(fp, "inet ");
else if (ndtm->ndtm_family == AF_INET6)
fprintf(fp, "inet6 ");
else if (ndtm->ndtm_family == AF_DECnet)
fprintf(fp, "dnet ");
else
fprintf(fp, "(%d) ", ndtm->ndtm_family);
if (tb[NDTA_NAME]) {
const char *name = rta_getattr_str(tb[NDTA_NAME]);
fprintf(fp, "%s ", name);
}
fprintf(fp, "%s", _SL_);
ret = (tb[NDTA_THRESH1] || tb[NDTA_THRESH2] || tb[NDTA_THRESH3] ||
tb[NDTA_GC_INTERVAL]);
if (ret)
fprintf(fp, " ");
if (tb[NDTA_THRESH1]) {
__u32 thresh1 = rta_getattr_u32(tb[NDTA_THRESH1]);
fprintf(fp, "thresh1 %u ", thresh1);
}
if (tb[NDTA_THRESH2]) {
__u32 thresh2 = rta_getattr_u32(tb[NDTA_THRESH2]);
fprintf(fp, "thresh2 %u ", thresh2);
}
if (tb[NDTA_THRESH3]) {
__u32 thresh3 = rta_getattr_u32(tb[NDTA_THRESH3]);
fprintf(fp, "thresh3 %u ", thresh3);
}
if (tb[NDTA_GC_INTERVAL]) {
__u64 gc_int = rta_getattr_u64(tb[NDTA_GC_INTERVAL]);
fprintf(fp, "gc_int %llu ", gc_int);
}
if (ret)
fprintf(fp, "%s", _SL_);
if (tb[NDTA_CONFIG] && show_stats) {
struct ndt_config *ndtc = RTA_DATA(tb[NDTA_CONFIG]);
fprintf(fp, " ");
fprintf(fp, "config ");
fprintf(fp, "key_len %u ", ndtc->ndtc_key_len);
fprintf(fp, "entry_size %u ", ndtc->ndtc_entry_size);
fprintf(fp, "entries %u ", ndtc->ndtc_entries);
fprintf(fp, "%s", _SL_);
fprintf(fp, " ");
fprintf(fp, "last_flush %s ",
ntable_strtime_delta(ndtc->ndtc_last_flush));
fprintf(fp, "last_rand %s ",
ntable_strtime_delta(ndtc->ndtc_last_rand));
fprintf(fp, "%s", _SL_);
fprintf(fp, " ");
fprintf(fp, "hash_rnd %u ", ndtc->ndtc_hash_rnd);
fprintf(fp, "hash_mask %08x ", ndtc->ndtc_hash_mask);
fprintf(fp, "hash_chain_gc %u ", ndtc->ndtc_hash_chain_gc);
fprintf(fp, "proxy_qlen %u ", ndtc->ndtc_proxy_qlen);
fprintf(fp, "%s", _SL_);
}
if (tb[NDTA_PARMS]) {
if (tpb[NDTPA_IFINDEX]) {
__u32 ifindex = rta_getattr_u32(tpb[NDTPA_IFINDEX]);
fprintf(fp, " ");
fprintf(fp, "dev %s ", ll_index_to_name(ifindex));
fprintf(fp, "%s", _SL_);
}
fprintf(fp, " ");
if (tpb[NDTPA_REFCNT]) {
__u32 refcnt = rta_getattr_u32(tpb[NDTPA_REFCNT]);
fprintf(fp, "refcnt %u ", refcnt);
}
if (tpb[NDTPA_REACHABLE_TIME]) {
__u64 reachable = rta_getattr_u64(tpb[NDTPA_REACHABLE_TIME]);
fprintf(fp, "reachable %llu ", reachable);
}
if (tpb[NDTPA_BASE_REACHABLE_TIME]) {
__u64 breachable = rta_getattr_u64(tpb[NDTPA_BASE_REACHABLE_TIME]);
fprintf(fp, "base_reachable %llu ", breachable);
}
if (tpb[NDTPA_RETRANS_TIME]) {
__u64 retrans = rta_getattr_u64(tpb[NDTPA_RETRANS_TIME]);
fprintf(fp, "retrans %llu ", retrans);
}
fprintf(fp, "%s", _SL_);
fprintf(fp, " ");
if (tpb[NDTPA_GC_STALETIME]) {
__u64 gc_stale = rta_getattr_u64(tpb[NDTPA_GC_STALETIME]);
fprintf(fp, "gc_stale %llu ", gc_stale);
}
if (tpb[NDTPA_DELAY_PROBE_TIME]) {
__u64 delay_probe = rta_getattr_u64(tpb[NDTPA_DELAY_PROBE_TIME]);
fprintf(fp, "delay_probe %llu ", delay_probe);
}
if (tpb[NDTPA_QUEUE_LEN]) {
__u32 queue = rta_getattr_u32(tpb[NDTPA_QUEUE_LEN]);
fprintf(fp, "queue %u ", queue);
}
fprintf(fp, "%s", _SL_);
fprintf(fp, " ");
if (tpb[NDTPA_APP_PROBES]) {
__u32 aprobe = rta_getattr_u32(tpb[NDTPA_APP_PROBES]);
fprintf(fp, "app_probes %u ", aprobe);
}
if (tpb[NDTPA_UCAST_PROBES]) {
__u32 uprobe = rta_getattr_u32(tpb[NDTPA_UCAST_PROBES]);
fprintf(fp, "ucast_probes %u ", uprobe);
}
if (tpb[NDTPA_MCAST_PROBES]) {
__u32 mprobe = rta_getattr_u32(tpb[NDTPA_MCAST_PROBES]);
fprintf(fp, "mcast_probes %u ", mprobe);
}
fprintf(fp, "%s", _SL_);
fprintf(fp, " ");
if (tpb[NDTPA_ANYCAST_DELAY]) {
__u64 anycast_delay = rta_getattr_u64(tpb[NDTPA_ANYCAST_DELAY]);
fprintf(fp, "anycast_delay %llu ", anycast_delay);
}
if (tpb[NDTPA_PROXY_DELAY]) {
__u64 proxy_delay = rta_getattr_u64(tpb[NDTPA_PROXY_DELAY]);
fprintf(fp, "proxy_delay %llu ", proxy_delay);
}
if (tpb[NDTPA_PROXY_QLEN]) {
__u32 pqueue = rta_getattr_u32(tpb[NDTPA_PROXY_QLEN]);
fprintf(fp, "proxy_queue %u ", pqueue);
}
if (tpb[NDTPA_LOCKTIME]) {
__u64 locktime = rta_getattr_u64(tpb[NDTPA_LOCKTIME]);
fprintf(fp, "locktime %llu ", locktime);
}
fprintf(fp, "%s", _SL_);
}
if (tb[NDTA_STATS] && show_stats) {
struct ndt_stats *ndts = RTA_DATA(tb[NDTA_STATS]);
fprintf(fp, " ");
fprintf(fp, "stats ");
fprintf(fp, "allocs %llu ", ndts->ndts_allocs);
fprintf(fp, "destroys %llu ", ndts->ndts_destroys);
fprintf(fp, "hash_grows %llu ", ndts->ndts_hash_grows);
fprintf(fp, "%s", _SL_);
fprintf(fp, " ");
fprintf(fp, "res_failed %llu ", ndts->ndts_res_failed);
fprintf(fp, "lookups %llu ", ndts->ndts_lookups);
fprintf(fp, "hits %llu ", ndts->ndts_hits);
fprintf(fp, "%s", _SL_);
fprintf(fp, " ");
fprintf(fp, "rcv_probes_mcast %llu ", ndts->ndts_rcv_probes_mcast);
fprintf(fp, "rcv_probes_ucast %llu ", ndts->ndts_rcv_probes_ucast);
fprintf(fp, "%s", _SL_);
fprintf(fp, " ");
fprintf(fp, "periodic_gc_runs %llu ", ndts->ndts_periodic_gc_runs);
fprintf(fp, "forced_gc_runs %llu ", ndts->ndts_forced_gc_runs);
fprintf(fp, "%s", _SL_);
}
fprintf(fp, "\n");
fflush(fp);
return 0;
}
void ipntable_reset_filter(void)
{
memset(&filter, 0, sizeof(filter));
}
static int ipntable_show(int argc, char **argv)
{
ipntable_reset_filter();
filter.family = preferred_family;
while (argc > 0) {
if (strcmp(*argv, "dev") == 0) {
NEXT_ARG();
if (strcmp("none", *argv) == 0)
filter.index = NONE_DEV;
else if ((filter.index = ll_name_to_index(*argv)) == 0)
invarg("\"DEV\" is invalid", *argv);
} else if (strcmp(*argv, "name") == 0) {
NEXT_ARG();
strncpy(filter.name, *argv, sizeof(filter.name));
} else
invarg("unknown", *argv);
argc--; argv++;
}
if (rtnl_wilddump_request(&rth, preferred_family, RTM_GETNEIGHTBL) < 0) {
perror("Cannot send dump request");
exit(1);
}
if (rtnl_dump_filter(&rth, print_ntable, stdout) < 0) {
fprintf(stderr, "Dump terminated\n");
exit(1);
}
return 0;
}
int do_ipntable(int argc, char **argv)
{
ll_init_map(&rth);
if (argc > 0) {
if (matches(*argv, "change") == 0 ||
matches(*argv, "chg") == 0)
return ipntable_modify(RTM_SETNEIGHTBL,
NLM_F_REPLACE,
argc-1, argv+1);
if (matches(*argv, "show") == 0 ||
matches(*argv, "lst") == 0 ||
matches(*argv, "list") == 0)
return ipntable_show(argc-1, argv+1);
if (matches(*argv, "help") == 0)
usage();
} else
return ipntable_show(0, NULL);
fprintf(stderr, "Command \"%s\" is unknown, try \"ip ntable help\".\n", *argv);
exit(-1);
}
| gpl-2.0 |
C-Aniruddh/kernel_vortex | drivers/platform/msm/mhi/mhi_init.c | 559 | 23496 | /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mhi_sys.h"
#include "mhi.h"
#include "mhi_hwio.h"
#include <linux/hrtimer.h>
#include <linux/cpu.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/completion.h>
static enum MHI_STATUS mhi_create_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int i;
if (NULL == mhi_dev_ctxt)
return MHI_STATUS_ALLOC_ERROR;
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
mhi_dev_ctxt->nr_of_cc = MHI_MAX_CHANNELS;
mhi_dev_ctxt->nr_of_ec = EVENT_RINGS_ALLOCATED;
mhi_dev_ctxt->nr_of_cmdc = NR_OF_CMD_RINGS;
mhi_dev_ctxt->alloced_ev_rings[PRIMARY_EVENT_RING] = 0;
mhi_dev_ctxt->alloced_ev_rings[SOFTWARE_EV_RING] =
SOFTWARE_EV_RING;
mhi_dev_ctxt->alloced_ev_rings[IPA_OUT_EV_RING] =
MHI_CLIENT_IP_HW_0_OUT;
mhi_dev_ctxt->alloced_ev_rings[IPA_IN_EV_RING] =
MHI_CLIENT_IP_HW_0_IN;
MHI_SET_EVENT_RING_INFO(EVENT_RING_POLLING,
mhi_dev_ctxt->ev_ring_props[PRIMARY_EVENT_RING],
MHI_EVENT_POLLING_ENABLED);
MHI_SET_EVENT_RING_INFO(EVENT_RING_POLLING,
mhi_dev_ctxt->ev_ring_props[SOFTWARE_EV_RING],
MHI_EVENT_POLLING_ENABLED);
MHI_SET_EVENT_RING_INFO(EVENT_RING_POLLING,
mhi_dev_ctxt->ev_ring_props[IPA_OUT_EV_RING],
MHI_EVENT_POLLING_ENABLED);
MHI_SET_EVENT_RING_INFO(EVENT_RING_POLLING,
mhi_dev_ctxt->ev_ring_props[IPA_IN_EV_RING],
MHI_EVENT_POLLING_DISABLED);
for (i = 0; i < MAX_NR_MSI; ++i) {
MHI_SET_EVENT_RING_INFO(EVENT_RING_MSI_VEC,
mhi_dev_ctxt->ev_ring_props[i],
i);
}
return MHI_STATUS_SUCCESS;
}
enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_INIT_ERROR_STAGE cleanup_stage)
{
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
switch (cleanup_stage) {
case MHI_INIT_ERROR_STAGE_UNWIND_ALL:
case MHI_INIT_ERROR_TIMERS:
case MHI_INIT_ERROR_STAGE_DEVICE_CTRL:
mhi_freememregion(mhi_dev_ctxt->mhi_ctrl_seg_info);
case MHI_INIT_ERROR_STAGE_THREAD_QUEUES:
case MHI_INIT_ERROR_STAGE_THREADS:
kfree(mhi_dev_ctxt->event_handle);
kfree(mhi_dev_ctxt->state_change_event_handle);
kfree(mhi_dev_ctxt->M0_event);
case MHI_INIT_ERROR_STAGE_EVENTS:
kfree(mhi_dev_ctxt->mhi_ctrl_seg_info);
case MHI_INIT_ERROR_STAGE_MEM_ZONES:
kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
kfree(mhi_dev_ctxt->mhi_chan_mutex);
kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
case MHI_INIT_ERROR_STAGE_SYNC:
kfree(mhi_dev_ctxt);
break;
default:
ret_val = MHI_STATUS_ERROR;
break;
}
return ret_val;
}
static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 i = 0;
mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) *
MHI_MAX_CHANNELS,
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list)
goto ev_mutex_free;
mhi_dev_ctxt->mhi_chan_mutex = kmalloc(sizeof(struct mutex) *
MHI_MAX_CHANNELS, GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_chan_mutex)
goto chan_mutex_free;
mhi_dev_ctxt->mhi_cmd_mutex_list = kmalloc(sizeof(struct mutex) *
NR_OF_CMD_RINGS, GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_cmd_mutex_list)
goto cmd_mutex_free;
mhi_dev_ctxt->db_write_lock = kmalloc(sizeof(spinlock_t) *
MHI_MAX_CHANNELS, GFP_KERNEL);
if (NULL == mhi_dev_ctxt->db_write_lock)
goto db_write_lock_free;
for (i = 0; i < mhi_dev_ctxt->nr_of_cc; ++i)
mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]);
for (i = 0; i < mhi_dev_ctxt->nr_of_cmdc; ++i)
mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]);
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]);
rwlock_init(&mhi_dev_ctxt->xfer_lock);
mutex_init(&mhi_dev_ctxt->mhi_link_state);
mutex_init(&mhi_dev_ctxt->pm_lock);
return MHI_STATUS_SUCCESS;
db_write_lock_free:
kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
cmd_mutex_free:
kfree(mhi_dev_ctxt->mhi_chan_mutex);
chan_mutex_free:
kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
ev_mutex_free:
return MHI_STATUS_ALLOC_ERROR;
}
static enum MHI_STATUS mhi_init_ctrl_zone(struct mhi_pcie_dev_info *dev_info,
struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_dev_ctxt->mhi_ctrl_seg_info = kmalloc(sizeof(struct mhi_meminfo),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ctrl_seg_info)
return MHI_STATUS_ALLOC_ERROR;
mhi_dev_ctxt->mhi_ctrl_seg_info->dev = &dev_info->pcie_device->dev;
return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_dev_ctxt->event_handle = kmalloc(sizeof(wait_queue_head_t),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->event_handle) {
mhi_log(MHI_MSG_ERROR, "Failed to init event");
return MHI_STATUS_ERROR;
}
mhi_dev_ctxt->state_change_event_handle =
kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (NULL == mhi_dev_ctxt->state_change_event_handle) {
mhi_log(MHI_MSG_ERROR, "Failed to init event");
goto error_event_handle_alloc;
}
/* Initialize the event which signals M0 */
mhi_dev_ctxt->M0_event = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (NULL == mhi_dev_ctxt->M0_event) {
mhi_log(MHI_MSG_ERROR, "Failed to init event");
goto error_state_change_event_handle;
}
/* Initialize the event which signals M0 */
mhi_dev_ctxt->M3_event = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (NULL == mhi_dev_ctxt->M3_event) {
mhi_log(MHI_MSG_ERROR, "Failed to init event");
goto error_M0_event;
}
/* Initialize the event which signals M0 */
mhi_dev_ctxt->bhi_event = kmalloc(sizeof(wait_queue_head_t),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->bhi_event) {
mhi_log(MHI_MSG_ERROR, "Failed to init event");
goto error_bhi_event;
}
mhi_dev_ctxt->chan_start_complete =
kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (NULL == mhi_dev_ctxt->chan_start_complete) {
mhi_log(MHI_MSG_ERROR, "Failed to init event");
goto error_chan_complete;
}
/* Initialize the event which starts the event parsing thread */
init_waitqueue_head(mhi_dev_ctxt->event_handle);
/* Initialize the event which starts the state change thread */
init_waitqueue_head(mhi_dev_ctxt->state_change_event_handle);
/* Initialize the event which triggers clients waiting to send */
init_waitqueue_head(mhi_dev_ctxt->M0_event);
/* Initialize the event which triggers D3hot */
init_waitqueue_head(mhi_dev_ctxt->M3_event);
init_waitqueue_head(mhi_dev_ctxt->bhi_event);
init_waitqueue_head(mhi_dev_ctxt->chan_start_complete);
return MHI_STATUS_SUCCESS;
error_chan_complete:
kfree(mhi_dev_ctxt->bhi_event);
error_bhi_event:
kfree(mhi_dev_ctxt->M3_event);
error_M0_event:
kfree(mhi_dev_ctxt->M0_event);
error_state_change_event_handle:
kfree(mhi_dev_ctxt->state_change_event_handle);
error_event_handle_alloc:
kfree(mhi_dev_ctxt->event_handle);
return MHI_STATUS_ERROR;
}
static enum MHI_STATUS mhi_init_state_change_thread_work_queue(
struct mhi_state_work_queue *q)
{
bool lock_acquired = 0;
unsigned long flags;
if (NULL == q->q_lock) {
q->q_lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
if (NULL == q->q_lock)
return MHI_STATUS_ALLOC_ERROR;
spin_lock_init(q->q_lock);
} else {
spin_lock_irqsave(q->q_lock, flags);
lock_acquired = 1;
}
q->queue_full_cntr = 0;
q->q_info.base = q->buf;
q->q_info.rp = q->buf;
q->q_info.wp = q->buf;
q->q_info.len = MHI_WORK_Q_MAX_SIZE * sizeof(enum STATE_TRANSITION);
q->q_info.el_size = sizeof(enum STATE_TRANSITION);
q->q_info.overwrite_en = 0;
if (lock_acquired)
spin_unlock_irqrestore(q->q_lock, flags);
return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_device_ctrl(struct mhi_device_ctxt
*mhi_dev_ctxt)
{
size_t ctrl_seg_size = 0;
size_t ctrl_seg_offset = 0;
u32 i = 0;
u32 align_len = sizeof(u64)*2;
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
mhi_dev_ctxt->enable_lpm = 1;
if (NULL == mhi_dev_ctxt || NULL == mhi_dev_ctxt->mhi_ctrl_seg_info ||
NULL == mhi_dev_ctxt->mhi_ctrl_seg_info->dev)
return MHI_STATUS_ERROR;
mhi_log(MHI_MSG_INFO, "Allocating control segment.\n");
ctrl_seg_size += sizeof(struct mhi_control_seg);
/* Calculate the size of the control segment needed */
ctrl_seg_size += align_len - (ctrl_seg_size % align_len);
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
if (IS_HARDWARE_CHANNEL(i))
ctrl_seg_size += sizeof(union mhi_xfer_pkt) *
(MAX_NR_TRBS_PER_HARD_CHAN + ELEMENT_GAP);
else if (IS_SOFTWARE_CHANNEL(i))
ctrl_seg_size += sizeof(union mhi_xfer_pkt) *
(MAX_NR_TRBS_PER_SOFT_CHAN + ELEMENT_GAP);
}
ctrl_seg_size += align_len - (ctrl_seg_size % align_len);
for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i)
ctrl_seg_size += sizeof(union mhi_event_pkt)*
(EV_EL_PER_RING + ELEMENT_GAP);
ctrl_seg_size += align_len - (ctrl_seg_size % align_len);
ret_val = mhi_mallocmemregion(mhi_dev_ctxt->mhi_ctrl_seg_info,
ctrl_seg_size);
if (MHI_STATUS_SUCCESS != ret_val)
return MHI_STATUS_ERROR;
(mhi_dev_ctxt->mhi_ctrl_seg =
mhi_get_virt_addr(mhi_dev_ctxt->mhi_ctrl_seg_info));
if (0 == mhi_dev_ctxt->mhi_ctrl_seg)
return MHI_STATUS_ALLOC_ERROR;
/* Set the channel contexts, event contexts and cmd context */
ctrl_seg_offset = (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg +
sizeof(struct mhi_control_seg);
ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len);
/* Set the TRB lists */
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
if (IS_HARDWARE_CHANNEL(i)) {
mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i] =
(union mhi_xfer_pkt *)ctrl_seg_offset;
ctrl_seg_offset += sizeof(union mhi_xfer_pkt) *
(MAX_NR_TRBS_PER_HARD_CHAN + ELEMENT_GAP);
} else if (IS_SOFTWARE_CHANNEL(i)) {
mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i] =
(union mhi_xfer_pkt *)ctrl_seg_offset;
ctrl_seg_offset += sizeof(union mhi_xfer_pkt) *
(MAX_NR_TRBS_PER_SOFT_CHAN + ELEMENT_GAP);
}
}
ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len);
for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) {
mhi_dev_ctxt->mhi_ctrl_seg->ev_trb_list[i] =
(union mhi_event_pkt *)ctrl_seg_offset;
ctrl_seg_offset += sizeof(union mhi_event_pkt) *
(EV_EL_PER_RING + ELEMENT_GAP);
}
return MHI_STATUS_SUCCESS;
}
/**
* mhi_event_ring_init - Initialize an event ring
*
* @ev_list: Event ring context to initialize
* @trb_list_phy_addr: Pointer to phy mem to the tre list for event ring
* @trb_list_virt_addr: Pointer to virt mem to the tre list for event ring
* @el_per_ring: Number of event ring elements in this ring
* @ring: Pointer to the shadow context of this event ring
*
* @Return MHI_STATUS
*/
static enum MHI_STATUS mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
uintptr_t trb_list_phy_addr, uintptr_t trb_list_virt_addr,
size_t el_per_ring, struct mhi_ring *ring,
u32 intmodt_val, u32 msi_vec)
{
ev_list->mhi_event_er_type = MHI_EVENT_RING_TYPE_VALID;
ev_list->mhi_msi_vector = msi_vec;
ev_list->mhi_event_ring_base_addr = trb_list_phy_addr;
ev_list->mhi_event_ring_len = el_per_ring*sizeof(union mhi_event_pkt);
ev_list->mhi_event_read_ptr = trb_list_phy_addr;
ev_list->mhi_event_write_ptr = trb_list_phy_addr;
MHI_SET_EV_CTXT(EVENT_CTXT_INTMODT, ev_list, intmodt_val);
ring->wp = (void *)(uintptr_t)trb_list_virt_addr;
ring->rp = (void *)(uintptr_t)trb_list_virt_addr;
ring->base = (void *)(uintptr_t)(trb_list_virt_addr);
ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt));
ring->el_size = sizeof(union mhi_event_pkt);
ring->overwrite_en = 0;
return MHI_STATUS_SUCCESS;
}
/**
* mhi_cmd_ring_init- Initialization of the command ring
*
* @cmd_ctxt: command ring context to initialize
* @trb_list_phy_addr: Pointer to the pysical address of the tre ring
* @trb_list_virt_addr: Pointer to the virtual address of the tre ring
* @el_per_ring: Number of elements in this command ring
* @ring: Pointer to the shadow command context
*
* @Return MHI_STATUS
*/
static enum MHI_STATUS mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
uintptr_t trb_list_phy_addr,
uintptr_t trb_list_virt_addr,
size_t el_per_ring, struct mhi_ring *ring)
{
cmd_ctxt->mhi_cmd_ring_base_addr = trb_list_phy_addr;
cmd_ctxt->mhi_cmd_ring_read_ptr = trb_list_phy_addr;
cmd_ctxt->mhi_cmd_ring_write_ptr = trb_list_phy_addr;
cmd_ctxt->mhi_cmd_ring_len =
(size_t)el_per_ring*sizeof(union mhi_cmd_pkt);
ring[PRIMARY_CMD_RING].wp = (void *)trb_list_virt_addr;
ring[PRIMARY_CMD_RING].rp = (void *)trb_list_virt_addr;
ring[PRIMARY_CMD_RING].base = (void *)trb_list_virt_addr;
ring[PRIMARY_CMD_RING].len =
(size_t)el_per_ring*sizeof(union mhi_cmd_pkt);
ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt);
ring[PRIMARY_CMD_RING].overwrite_en = 0;
return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_timers(struct mhi_device_ctxt *mhi_dev_ctxt)
{
hrtimer_init(&mhi_dev_ctxt->m1_timer,
CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
mhi_dev_ctxt->m1_timeout =
ktime_set(0, MHI_M1_ENTRY_DELAY_MS * 1E6L);
mhi_dev_ctxt->m1_timer.function = mhi_initiate_m1;
mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
"Starting M1 timer\n");
return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_wakelock(struct mhi_device_ctxt *mhi_dev_ctxt)
{
wakeup_source_init(&mhi_dev_ctxt->w_lock, "mhi_wakeup_source");
return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 i = 0;
struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg;
struct mhi_event_ctxt *event_ctxt = NULL;
u32 event_ring_index = 0;
union mhi_xfer_pkt *trb_list = NULL;
struct mhi_chan_ctxt *chan_ctxt = NULL;
struct mhi_ring *local_event_ctxt = NULL;
u32 msi_vec = 0;
u32 intmod_t = 0;
uintptr_t ev_ring_addr;
for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) {
MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC,
mhi_dev_ctxt->ev_ring_props[i],
msi_vec);
switch (i) {
case IPA_OUT_EV_RING:
intmod_t = 10;
break;
case IPA_IN_EV_RING:
intmod_t = 6;
break;
}
event_ring_index = mhi_dev_ctxt->alloced_ev_rings[i];
event_ctxt = &mhi_ctrl->mhi_ec_list[event_ring_index];
local_event_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
ev_ring_addr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)mhi_ctrl->ev_trb_list[i]);
mhi_log(MHI_MSG_VERBOSE,
"Setting msi_vec 0x%x, for ev ring ctxt 0x%x\n",
msi_vec, event_ring_index);
mhi_event_ring_init(event_ctxt, ev_ring_addr,
(uintptr_t)mhi_ctrl->ev_trb_list[i],
EV_EL_PER_RING, local_event_ctxt,
intmod_t, msi_vec);
}
/* Init Command Ring */
mhi_cmd_ring_init(&mhi_ctrl->mhi_cmd_ctxt_list[PRIMARY_CMD_RING],
mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING]),
(uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING],
CMD_EL_PER_RING,
&mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING]);
mhi_log(MHI_MSG_INFO, "Initializing contexts\n");
/* Initialize Channel Contexts */
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
trb_list = mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i];
chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i];
if (IS_SOFTWARE_CHANNEL(i)) {
mhi_init_chan_ctxt(chan_ctxt,
mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)trb_list),
(uintptr_t)trb_list,
MAX_NR_TRBS_PER_SOFT_CHAN,
(i % 2) ? MHI_IN : MHI_OUT,
0,
&mhi_dev_ctxt->mhi_local_chan_ctxt[i]);
} else if (IS_HARDWARE_CHANNEL(i)) {
mhi_init_chan_ctxt(chan_ctxt,
mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)trb_list),
(uintptr_t)trb_list,
MAX_NR_TRBS_PER_HARD_CHAN,
(i % 2) ? MHI_IN : MHI_OUT,
i,
&mhi_dev_ctxt->mhi_local_chan_ctxt[i]);
}
}
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_work_queues(
struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_dev_ctxt->work_queue = create_singlethread_workqueue("mhi");
if (NULL == mhi_dev_ctxt->work_queue) {
mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
"Failed to create MHI work queue.\n");
return MHI_STATUS_ERROR;
}
INIT_DELAYED_WORK(&mhi_dev_ctxt->m3_work, delayed_m3);
INIT_WORK(&mhi_dev_ctxt->m0_work, m0_work);
return MHI_STATUS_SUCCESS;
}
/**
* @brief Spawn all the MHI threads
*
* @param mhi_dev_ctxt mhi mhi_dev_ctxt context
*
* @return MHI_STATUS
*
*/
static enum MHI_STATUS mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_dev_ctxt->event_thread_handle = kthread_run(parse_event_thread,
mhi_dev_ctxt,
"mhi_ev_thrd");
if (-ENOMEM == (int)mhi_dev_ctxt->event_thread_handle)
return MHI_STATUS_ERROR;
mhi_dev_ctxt->st_thread_handle = kthread_run(mhi_state_change_thread,
mhi_dev_ctxt,
"mhi_st_thrd");
if (-ENOMEM == (int)mhi_dev_ctxt->event_thread_handle)
return MHI_STATUS_ERROR;
return MHI_STATUS_SUCCESS;
}
/**
* @brief Main initialization function for a mhi struct device context
* All threads, events mutexes, mhi specific data structures
* are initialized here
*
* @param dev_info [IN ] pcie struct device information structure to
which this mhi context belongs
* @param mhi_struct device [IN/OUT] reference to a mhi context to be populated
*
* @return MHI_STATUS
*/
enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
struct mhi_device_ctxt *mhi_dev_ctxt)
{
if (NULL == dev_info || NULL == mhi_dev_ctxt)
return MHI_STATUS_ERROR;
mhi_log(MHI_MSG_VERBOSE, "mhi_init_device_ctxt>Init MHI dev ctxt\n");
if (MHI_STATUS_SUCCESS != mhi_create_ctxt(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi dev ctxt\n");
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_sync(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi sync\n");
mhi_clean_init_stage(mhi_dev_ctxt, MHI_INIT_ERROR_STAGE_SYNC);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_ctrl_zone(dev_info, mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize memory zones\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_MEM_ZONES);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_events(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi events\n");
mhi_clean_init_stage(mhi_dev_ctxt, MHI_INIT_ERROR_STAGE_EVENTS);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_reset_all_thread_queues(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize work queues\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_THREAD_QUEUES);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_device_ctrl(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize ctrl seg\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_THREAD_QUEUES);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_contexts(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed initializing contexts\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_DEVICE_CTRL);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_spawn_threads(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to spawn threads\n");
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_timers(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed initializing timers\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_DEVICE_CTRL);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_wakelock(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize wakelock\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_DEVICE_CTRL);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_work_queues(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR,
"Failed initializing work queues\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_DEVICE_CTRL);
return MHI_STATUS_ERROR;
}
mhi_dev_ctxt->dev_info = dev_info;
mhi_dev_ctxt->dev_props = &dev_info->core;
return MHI_STATUS_SUCCESS;
}
enum MHI_STATUS mhi_init_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 nr_ev_el, u32 event_ring_index)
{
union mhi_event_pkt *ev_pkt = NULL;
u32 i = 0;
unsigned long flags = 0;
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
spinlock_t *lock =
&mhi_dev_ctxt->mhi_ev_spinlock_list[event_ring_index];
struct mhi_ring *event_ctxt = NULL;
event_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
return MHI_STATUS_ERROR;
}
spin_lock_irqsave(lock, flags);
mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n",
mhi_dev_ctxt->mmio_addr, mhi_dev_ctxt->mmio_len);
mhi_log(MHI_MSG_INFO,
"Initializing event ring %d\n", event_ring_index);
for (i = 0; i < nr_ev_el - 1; ++i) {
ret_val = ctxt_add_element(event_ctxt, (void *)&ev_pkt);
if (MHI_STATUS_SUCCESS != ret_val) {
mhi_log(MHI_MSG_ERROR,
"Failed to insert el in ev ctxt\n");
ret_val = MHI_STATUS_ERROR;
break;
}
}
spin_unlock_irqrestore(lock, flags);
return ret_val;
}
/**
* @brief Initialize the channel context and shadow context
*
* @cc_list: Context to initialize
* @trb_list_phy: Physical base address for the TRE ring
* @trb_list_virt: Virtual base address for the TRE ring
* @el_per_ring: Number of TREs this ring will contain
* @chan_type: Type of channel IN/OUT
* @event_ring: Event ring to be mapped to this channel context
* @ring: Shadow context to be initialized alongside
*
* @Return MHI_STATUS
*/
enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
uintptr_t trb_list_phy, uintptr_t trb_list_virt,
u64 el_per_ring, enum MHI_CHAN_TYPE chan_type,
u32 event_ring, struct mhi_ring *ring)
{
cc_list->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
cc_list->mhi_chan_type = chan_type;
cc_list->mhi_event_ring_index = event_ring;
cc_list->mhi_trb_ring_base_addr = trb_list_phy;
cc_list->mhi_trb_ring_len =
((size_t)(el_per_ring)*sizeof(struct mhi_tx_pkt));
cc_list->mhi_trb_read_ptr = trb_list_phy;
cc_list->mhi_trb_write_ptr = trb_list_phy;
ring->rp = (void *)(trb_list_virt);
ring->ack_rp = ring->rp;
ring->wp = (void *)(trb_list_virt);
ring->base = (void *)(trb_list_virt);
ring->len = ((size_t)(el_per_ring)*sizeof(struct mhi_tx_pkt));
ring->el_size = sizeof(struct mhi_tx_pkt);
ring->overwrite_en = 0;
return MHI_STATUS_SUCCESS;
}
enum MHI_STATUS mhi_reset_all_thread_queues(
struct mhi_device_ctxt *mhi_dev_ctxt)
{
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
mhi_init_state_change_thread_work_queue(
&mhi_dev_ctxt->state_change_work_item_list);
if (MHI_STATUS_SUCCESS != ret_val) {
mhi_log(MHI_MSG_ERROR, "Failed to reset STT work queue\n");
return ret_val;
}
return ret_val;
}
enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 ret_val;
if (NULL == mhi_dev_ctxt)
return MHI_STATUS_ERROR;
mhi_dev_ctxt->mhi_cpu_notifier.notifier_call = mhi_cpu_notifier_cb;
ret_val = register_cpu_notifier(&mhi_dev_ctxt->mhi_cpu_notifier);
if (ret_val)
return MHI_STATUS_ERROR;
else
return MHI_STATUS_SUCCESS;
}
| gpl-2.0 |
sensarliar/zfcs_linux | drivers/lguest/x86/core.c | 559 | 23543 | /*
* Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
* Copyright (C) 2007, Jes Sorensen <jes@sgi.com> SGI.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*P:450
* This file contains the x86-specific lguest code. It used to be all
* mixed in with drivers/lguest/core.c but several foolhardy code slashers
* wrestled most of the dependencies out to here in preparation for porting
* lguest to other architectures (see what I mean by foolhardy?).
*
* This also contains a couple of non-obvious setup and teardown pieces which
* were implemented after days of debugging pain.
:*/
#include <linux/kernel.h>
#include <linux/start_kernel.h>
#include <linux/string.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/lguest.h>
#include <linux/lguest_launcher.h>
#include <asm/paravirt.h>
#include <asm/param.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/lguest.h>
#include <asm/uaccess.h>
#include <asm/i387.h>
#include "../lg.h"
static int cpu_had_pge;
static struct {
unsigned long offset;
unsigned short segment;
} lguest_entry;
/* Offset from where switcher.S was compiled to where we've copied it */
static unsigned long switcher_offset(void)
{
return SWITCHER_ADDR - (unsigned long)start_switcher_text;
}
/* This cpu's struct lguest_pages. */
static struct lguest_pages *lguest_pages(unsigned int cpu)
{
return &(((struct lguest_pages *)
(SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
}
static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
/*S:010
* We approach the Switcher.
*
* Remember that each CPU has two pages which are visible to the Guest when it
* runs on that CPU. This has to contain the state for that Guest: we copy the
* state in just before we run the Guest.
*
* Each Guest has "changed" flags which indicate what has changed in the Guest
* since it last ran. We saw this set in interrupts_and_traps.c and
* segments.c.
*/
static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
{
/*
* Copying all this data can be quite expensive. We usually run the
* same Guest we ran last time (and that Guest hasn't run anywhere else
* meanwhile). If that's not the case, we pretend everything in the
* Guest has changed.
*/
if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) {
__this_cpu_write(lg_last_cpu, cpu);
cpu->last_pages = pages;
cpu->changed = CHANGED_ALL;
}
/*
* These copies are pretty cheap, so we do them unconditionally: */
/* Save the current Host top-level page directory.
*/
pages->state.host_cr3 = __pa(current->mm->pgd);
/*
* Set up the Guest's page tables to see this CPU's pages (and no
* other CPU's pages).
*/
map_switcher_in_guest(cpu, pages);
/*
* Set up the two "TSS" members which tell the CPU what stack to use
* for traps which do directly into the Guest (ie. traps at privilege
* level 1).
*/
pages->state.guest_tss.sp1 = cpu->esp1;
pages->state.guest_tss.ss1 = cpu->ss1;
/* Copy direct-to-Guest trap entries. */
if (cpu->changed & CHANGED_IDT)
copy_traps(cpu, pages->state.guest_idt, default_idt_entries);
/* Copy all GDT entries which the Guest can change. */
if (cpu->changed & CHANGED_GDT)
copy_gdt(cpu, pages->state.guest_gdt);
/* If only the TLS entries have changed, copy them. */
else if (cpu->changed & CHANGED_GDT_TLS)
copy_gdt_tls(cpu, pages->state.guest_gdt);
/* Mark the Guest as unchanged for next time. */
cpu->changed = 0;
}
/* Finally: the code to actually call into the Switcher to run the Guest. */
static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
{
/* This is a dummy value we need for GCC's sake. */
unsigned int clobber;
/*
* Copy the guest-specific information into this CPU's "struct
* lguest_pages".
*/
copy_in_guest_info(cpu, pages);
/*
* Set the trap number to 256 (impossible value). If we fault while
* switching to the Guest (bad segment registers or bug), this will
* cause us to abort the Guest.
*/
cpu->regs->trapnum = 256;
/*
* Now: we push the "eflags" register on the stack, then do an "lcall".
* This is how we change from using the kernel code segment to using
* the dedicated lguest code segment, as well as jumping into the
* Switcher.
*
* The lcall also pushes the old code segment (KERNEL_CS) onto the
* stack, then the address of this call. This stack layout happens to
* exactly match the stack layout created by an interrupt...
*/
asm volatile("pushf; lcall *lguest_entry"
/*
* This is how we tell GCC that %eax ("a") and %ebx ("b")
* are changed by this routine. The "=" means output.
*/
: "=a"(clobber), "=b"(clobber)
/*
* %eax contains the pages pointer. ("0" refers to the
* 0-th argument above, ie "a"). %ebx contains the
* physical address of the Guest's top-level page
* directory.
*/
: "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir))
/*
* We tell gcc that all these registers could change,
* which means we don't have to save and restore them in
* the Switcher.
*/
: "memory", "%edx", "%ecx", "%edi", "%esi");
}
/*:*/
/*M:002
* There are hooks in the scheduler which we can register to tell when we
* get kicked off the CPU (preempt_notifier_register()). This would allow us
* to lazily disable SYSENTER which would regain some performance, and should
* also simplify copy_in_guest_info(). Note that we'd still need to restore
* things when we exit to Launcher userspace, but that's fairly easy.
*
* We could also try using these hooks for PGE, but that might be too expensive.
*
* The hooks were designed for KVM, but we can also put them to good use.
:*/
/*H:040
* This is the i386-specific code to setup and run the Guest. Interrupts
* are disabled: we own the CPU.
*/
void lguest_arch_run_guest(struct lg_cpu *cpu)
{
/*
* Remember the awfully-named TS bit? If the Guest has asked to set it
* we set it now, so we can trap and pass that trap to the Guest if it
* uses the FPU.
*/
if (cpu->ts)
unlazy_fpu(current);
/*
* SYSENTER is an optimized way of doing system calls. We can't allow
* it because it always jumps to privilege level 0. A normal Guest
* won't try it because we don't advertise it in CPUID, but a malicious
* Guest (or malicious Guest userspace program) could, so we tell the
* CPU to disable it before running the Guest.
*/
if (boot_cpu_has(X86_FEATURE_SEP))
wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
/*
* Now we actually run the Guest. It will return when something
* interesting happens, and we can examine its registers to see what it
* was doing.
*/
run_guest_once(cpu, lguest_pages(raw_smp_processor_id()));
/*
* Note that the "regs" structure contains two extra entries which are
* not really registers: a trap number which says what interrupt or
* trap made the switcher code come back, and an error code which some
* traps set.
*/
/* Restore SYSENTER if it's supposed to be on. */
if (boot_cpu_has(X86_FEATURE_SEP))
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
/*
* If the Guest page faulted, then the cr2 register will tell us the
* bad virtual address. We have to grab this now, because once we
* re-enable interrupts an interrupt could fault and thus overwrite
* cr2, or we could even move off to a different CPU.
*/
if (cpu->regs->trapnum == 14)
cpu->arch.last_pagefault = read_cr2();
/*
* Similarly, if we took a trap because the Guest used the FPU,
* we have to restore the FPU it expects to see.
* math_state_restore() may sleep and we may even move off to
* a different CPU. So all the critical stuff should be done
* before this.
*/
else if (cpu->regs->trapnum == 7)
math_state_restore();
}
/*H:130
* Now we've examined the hypercall code; our Guest can make requests.
* Our Guest is usually so well behaved; it never tries to do things it isn't
* allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual
* infrastructure isn't quite complete, because it doesn't contain replacements
* for the Intel I/O instructions. As a result, the Guest sometimes fumbles
* across one during the boot process as it probes for various things which are
* usually attached to a PC.
*
* When the Guest uses one of these instructions, we get a trap (General
* Protection Fault) and come here. We see if it's one of those troublesome
* instructions and skip over it. We return true if we did.
*/
static int emulate_insn(struct lg_cpu *cpu)
{
u8 insn;
unsigned int insnlen = 0, in = 0, small_operand = 0;
/*
* The eip contains the *virtual* address of the Guest's instruction:
* walk the Guest's page tables to find the "physical" address.
*/
unsigned long physaddr = guest_pa(cpu, cpu->regs->eip);
/*
* This must be the Guest kernel trying to do something, not userspace!
* The bottom two bits of the CS segment register are the privilege
* level.
*/
if ((cpu->regs->cs & 3) != GUEST_PL)
return 0;
/* Decoding x86 instructions is icky. */
insn = lgread(cpu, physaddr, u8);
/*
* Around 2.6.33, the kernel started using an emulation for the
* cmpxchg8b instruction in early boot on many configurations. This
* code isn't paravirtualized, and it tries to disable interrupts.
* Ignore it, which will Mostly Work.
*/
if (insn == 0xfa) {
/* "cli", or Clear Interrupt Enable instruction. Skip it. */
cpu->regs->eip++;
return 1;
}
/*
* 0x66 is an "operand prefix". It means a 16, not 32 bit in/out.
*/
if (insn == 0x66) {
small_operand = 1;
/* The instruction is 1 byte so far, read the next byte. */
insnlen = 1;
insn = lgread(cpu, physaddr + insnlen, u8);
}
/*
* We can ignore the lower bit for the moment and decode the 4 opcodes
* we need to emulate.
*/
switch (insn & 0xFE) {
case 0xE4: /* in <next byte>,%al */
insnlen += 2;
in = 1;
break;
case 0xEC: /* in (%dx),%al */
insnlen += 1;
in = 1;
break;
case 0xE6: /* out %al,<next byte> */
insnlen += 2;
break;
case 0xEE: /* out %al,(%dx) */
insnlen += 1;
break;
default:
/* OK, we don't know what this is, can't emulate. */
return 0;
}
/*
* If it was an "IN" instruction, they expect the result to be read
* into %eax, so we change %eax. We always return all-ones, which
* traditionally means "there's nothing there".
*/
if (in) {
/* Lower bit tells means it's a 32/16 bit access */
if (insn & 0x1) {
if (small_operand)
cpu->regs->eax |= 0xFFFF;
else
cpu->regs->eax = 0xFFFFFFFF;
} else
cpu->regs->eax |= 0xFF;
}
/* Finally, we've "done" the instruction, so move past it. */
cpu->regs->eip += insnlen;
/* Success! */
return 1;
}
/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
void lguest_arch_handle_trap(struct lg_cpu *cpu)
{
switch (cpu->regs->trapnum) {
case 13: /* We've intercepted a General Protection Fault. */
/*
* Check if this was one of those annoying IN or OUT
* instructions which we need to emulate. If so, we just go
* back into the Guest after we've done it.
*/
if (cpu->regs->errcode == 0) {
if (emulate_insn(cpu))
return;
}
break;
case 14: /* We've intercepted a Page Fault. */
/*
* The Guest accessed a virtual address that wasn't mapped.
* This happens a lot: we don't actually set up most of the page
* tables for the Guest at all when we start: as it runs it asks
* for more and more, and we set them up as required. In this
* case, we don't even tell the Guest that the fault happened.
*
* The errcode tells whether this was a read or a write, and
* whether kernel or userspace code.
*/
if (demand_page(cpu, cpu->arch.last_pagefault,
cpu->regs->errcode))
return;
/*
* OK, it's really not there (or not OK): the Guest needs to
* know. We write out the cr2 value so it knows where the
* fault occurred.
*
* Note that if the Guest were really messed up, this could
* happen before it's done the LHCALL_LGUEST_INIT hypercall, so
* lg->lguest_data could be NULL
*/
if (cpu->lg->lguest_data &&
put_user(cpu->arch.last_pagefault,
&cpu->lg->lguest_data->cr2))
kill_guest(cpu, "Writing cr2");
break;
case 7: /* We've intercepted a Device Not Available fault. */
/*
* If the Guest doesn't want to know, we already restored the
* Floating Point Unit, so we just continue without telling it.
*/
if (!cpu->ts)
return;
break;
case 32 ... 255:
/*
* These values mean a real interrupt occurred, in which case
* the Host handler has already been run. We just do a
* friendly check if another process should now be run, then
* return to run the Guest again.
*/
cond_resched();
return;
case LGUEST_TRAP_ENTRY:
/*
* Our 'struct hcall_args' maps directly over our regs: we set
* up the pointer now to indicate a hypercall is pending.
*/
cpu->hcall = (struct hcall_args *)cpu->regs;
return;
}
/* We didn't handle the trap, so it needs to go to the Guest. */
if (!deliver_trap(cpu, cpu->regs->trapnum))
/*
* If the Guest doesn't have a handler (either it hasn't
* registered any yet, or it's one of the faults we don't let
* it handle), it dies with this cryptic error message.
*/
kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)",
cpu->regs->trapnum, cpu->regs->eip,
cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault
: cpu->regs->errcode);
}
/*
* Now we can look at each of the routines this calls, in increasing order of
* complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(),
* deliver_trap() and demand_page(). After all those, we'll be ready to
* examine the Switcher, and our philosophical understanding of the Host/Guest
* duality will be complete.
:*/
static void adjust_pge(void *on)
{
if (on)
write_cr4(read_cr4() | X86_CR4_PGE);
else
write_cr4(read_cr4() & ~X86_CR4_PGE);
}
/*H:020
* Now the Switcher is mapped and every thing else is ready, we need to do
* some more i386-specific initialization.
*/
void __init lguest_arch_host_init(void)
{
int i;
/*
* Most of the x86/switcher_32.S doesn't care that it's been moved; on
* Intel, jumps are relative, and it doesn't access any references to
* external code or data.
*
* The only exception is the interrupt handlers in switcher.S: their
* addresses are placed in a table (default_idt_entries), so we need to
* update the table with the new addresses. switcher_offset() is a
* convenience function which returns the distance between the
* compiled-in switcher code and the high-mapped copy we just made.
*/
for (i = 0; i < IDT_ENTRIES; i++)
default_idt_entries[i] += switcher_offset();
/*
* Set up the Switcher's per-cpu areas.
*
* Each CPU gets two pages of its own within the high-mapped region
* (aka. "struct lguest_pages"). Much of this can be initialized now,
* but some depends on what Guest we are running (which is set up in
* copy_in_guest_info()).
*/
for_each_possible_cpu(i) {
/* lguest_pages() returns this CPU's two pages. */
struct lguest_pages *pages = lguest_pages(i);
/* This is a convenience pointer to make the code neater. */
struct lguest_ro_state *state = &pages->state;
/*
* The Global Descriptor Table: the Host has a different one
* for each CPU. We keep a descriptor for the GDT which says
* where it is and how big it is (the size is actually the last
* byte, not the size, hence the "-1").
*/
state->host_gdt_desc.size = GDT_SIZE-1;
state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
/*
* All CPUs on the Host use the same Interrupt Descriptor
* Table, so we just use store_idt(), which gets this CPU's IDT
* descriptor.
*/
store_idt(&state->host_idt_desc);
/*
* The descriptors for the Guest's GDT and IDT can be filled
* out now, too. We copy the GDT & IDT into ->guest_gdt and
* ->guest_idt before actually running the Guest.
*/
state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
state->guest_idt_desc.address = (long)&state->guest_idt;
state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
state->guest_gdt_desc.address = (long)&state->guest_gdt;
/*
* We know where we want the stack to be when the Guest enters
* the Switcher: in pages->regs. The stack grows upwards, so
* we start it at the end of that structure.
*/
state->guest_tss.sp0 = (long)(&pages->regs + 1);
/*
* And this is the GDT entry to use for the stack: we keep a
* couple of special LGUEST entries.
*/
state->guest_tss.ss0 = LGUEST_DS;
/*
* x86 can have a finegrained bitmap which indicates what I/O
* ports the process can use. We set it to the end of our
* structure, meaning "none".
*/
state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
/*
* Some GDT entries are the same across all Guests, so we can
* set them up now.
*/
setup_default_gdt_entries(state);
/* Most IDT entries are the same for all Guests, too.*/
setup_default_idt_entries(state, default_idt_entries);
/*
* The Host needs to be able to use the LGUEST segments on this
* CPU, too, so put them in the Host GDT.
*/
get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
}
/*
* In the Switcher, we want the %cs segment register to use the
* LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so
* it will be undisturbed when we switch. To change %cs and jump we
* need this structure to feed to Intel's "lcall" instruction.
*/
lguest_entry.offset = (long)switch_to_guest + switcher_offset();
lguest_entry.segment = LGUEST_CS;
/*
* Finally, we need to turn off "Page Global Enable". PGE is an
* optimization where page table entries are specially marked to show
* they never change. The Host kernel marks all the kernel pages this
* way because it's always present, even when userspace is running.
*
* Lguest breaks this: unbeknownst to the rest of the Host kernel, we
* switch to the Guest kernel. If you don't disable this on all CPUs,
* you'll get really weird bugs that you'll chase for two days.
*
* I used to turn PGE off every time we switched to the Guest and back
* on when we return, but that slowed the Switcher down noticibly.
*/
/*
* We don't need the complexity of CPUs coming and going while we're
* doing this.
*/
get_online_cpus();
if (cpu_has_pge) { /* We have a broader idea of "global". */
/* Remember that this was originally set (for cleanup). */
cpu_had_pge = 1;
/*
* adjust_pge is a helper function which sets or unsets the PGE
* bit on its CPU, depending on the argument (0 == unset).
*/
on_each_cpu(adjust_pge, (void *)0, 1);
/* Turn off the feature in the global feature set. */
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
}
put_online_cpus();
}
/*:*/
void __exit lguest_arch_host_fini(void)
{
/* If we had PGE before we started, turn it back on now. */
get_online_cpus();
if (cpu_had_pge) {
set_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
/* adjust_pge's argument "1" means set PGE. */
on_each_cpu(adjust_pge, (void *)1, 1);
}
put_online_cpus();
}
/*H:122 The i386-specific hypercalls simply farm out to the right functions. */
int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
{
switch (args->arg0) {
case LHCALL_LOAD_GDT_ENTRY:
load_guest_gdt_entry(cpu, args->arg1, args->arg2, args->arg3);
break;
case LHCALL_LOAD_IDT_ENTRY:
load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3);
break;
case LHCALL_LOAD_TLS:
guest_load_tls(cpu, args->arg1);
break;
default:
/* Bad Guest. Bad! */
return -EIO;
}
return 0;
}
/*H:126 i386-specific hypercall initialization: */
int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
{
u32 tsc_speed;
/*
* The pointer to the Guest's "struct lguest_data" is the only argument.
* We check that address now.
*/
if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1,
sizeof(*cpu->lg->lguest_data)))
return -EFAULT;
/*
* Having checked it, we simply set lg->lguest_data to point straight
* into the Launcher's memory at the right place and then use
* copy_to_user/from_user from now on, instead of lgread/write. I put
* this in to show that I'm not immune to writing stupid
* optimizations.
*/
cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1;
/*
* We insist that the Time Stamp Counter exist and doesn't change with
* cpu frequency. Some devious chip manufacturers decided that TSC
* changes could be handled in software. I decided that time going
* backwards might be good for benchmarks, but it's bad for users.
*
* We also insist that the TSC be stable: the kernel detects unreliable
* TSCs for its own purposes, and we use that here.
*/
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable())
tsc_speed = tsc_khz;
else
tsc_speed = 0;
if (put_user(tsc_speed, &cpu->lg->lguest_data->tsc_khz))
return -EFAULT;
/* The interrupt code might not like the system call vector. */
if (!check_syscall_vector(cpu->lg))
kill_guest(cpu, "bad syscall vector");
return 0;
}
/*:*/
/*L:030
* Most of the Guest's registers are left alone: we used get_zeroed_page() to
* allocate the structure, so they will be 0.
*/
void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start)
{
struct lguest_regs *regs = cpu->regs;
/*
* There are four "segment" registers which the Guest needs to boot:
* The "code segment" register (cs) refers to the kernel code segment
* __KERNEL_CS, and the "data", "extra" and "stack" segment registers
* refer to the kernel data segment __KERNEL_DS.
*
* The privilege level is packed into the lower bits. The Guest runs
* at privilege level 1 (GUEST_PL).
*/
regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL;
regs->cs = __KERNEL_CS|GUEST_PL;
/*
* The "eflags" register contains miscellaneous flags. Bit 1 (0x002)
* is supposed to always be "1". Bit 9 (0x200) controls whether
* interrupts are enabled. We always leave interrupts enabled while
* running the Guest.
*/
regs->eflags = X86_EFLAGS_IF | 0x2;
/*
* The "Extended Instruction Pointer" register says where the Guest is
* running.
*/
regs->eip = start;
/*
* %esi points to our boot information, at physical address 0, so don't
* touch it.
*/
/* There are a couple of GDT entries the Guest expects at boot. */
setup_guest_gdt(cpu);
}
| gpl-2.0 |
clamor95/kernel_asus_tf300tl | drivers/acpi/dock.c | 559 | 28514 | /*
* dock.c - ACPI dock station driver
*
* Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/stddef.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#define PREFIX "ACPI: "
#define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
ACPI_MODULE_NAME("dock");
MODULE_AUTHOR("Kristen Carlson Accardi");
MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_DESCRIPTION);
MODULE_LICENSE("GPL");
static int immediate_undock = 1;
module_param(immediate_undock, bool, 0644);
MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
"undock immediately when the undock button is pressed, 0 will cause"
" the driver to wait for userspace to write the undock sysfs file "
" before undocking");
static struct atomic_notifier_head dock_notifier_list;
static const struct acpi_device_id dock_device_ids[] = {
{"LNXDOCK", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, dock_device_ids);
struct dock_station {
acpi_handle handle;
unsigned long last_dock_time;
u32 flags;
spinlock_t dd_lock;
struct mutex hp_lock;
struct list_head dependent_devices;
struct list_head hotplug_devices;
struct list_head sibling;
struct platform_device *dock_device;
};
static LIST_HEAD(dock_stations);
static int dock_station_count;
struct dock_dependent_device {
struct list_head list;
struct list_head hotplug_list;
acpi_handle handle;
const struct acpi_dock_ops *ops;
void *context;
};
#define DOCK_DOCKING 0x00000001
#define DOCK_UNDOCKING 0x00000002
#define DOCK_IS_DOCK 0x00000010
#define DOCK_IS_ATA 0x00000020
#define DOCK_IS_BAT 0x00000040
#define DOCK_EVENT 3
#define UNDOCK_EVENT 2
/*****************************************************************************
* Dock Dependent device functions *
*****************************************************************************/
/**
* add_dock_dependent_device - associate a device with the dock station
* @ds: The dock station
* @handle: handle of the dependent device
*
* Add the dependent device to the dock's dependent device list.
*/
static int
add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
{
struct dock_dependent_device *dd;
dd = kzalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
dd->handle = handle;
INIT_LIST_HEAD(&dd->list);
INIT_LIST_HEAD(&dd->hotplug_list);
spin_lock(&ds->dd_lock);
list_add_tail(&dd->list, &ds->dependent_devices);
spin_unlock(&ds->dd_lock);
return 0;
}
/**
* dock_add_hotplug_device - associate a hotplug handler with the dock station
* @ds: The dock station
* @dd: The dependent device struct
*
* Add the dependent device to the dock's hotplug device list
*/
static void
dock_add_hotplug_device(struct dock_station *ds,
struct dock_dependent_device *dd)
{
mutex_lock(&ds->hp_lock);
list_add_tail(&dd->hotplug_list, &ds->hotplug_devices);
mutex_unlock(&ds->hp_lock);
}
/**
* dock_del_hotplug_device - remove a hotplug handler from the dock station
* @ds: The dock station
* @dd: the dependent device struct
*
* Delete the dependent device from the dock's hotplug device list
*/
static void
dock_del_hotplug_device(struct dock_station *ds,
struct dock_dependent_device *dd)
{
mutex_lock(&ds->hp_lock);
list_del(&dd->hotplug_list);
mutex_unlock(&ds->hp_lock);
}
/**
* find_dock_dependent_device - get a device dependent on this dock
* @ds: the dock station
* @handle: the acpi_handle of the device we want
*
* iterate over the dependent device list for this dock. If the
* dependent device matches the handle, return.
*/
static struct dock_dependent_device *
find_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
{
struct dock_dependent_device *dd;
spin_lock(&ds->dd_lock);
list_for_each_entry(dd, &ds->dependent_devices, list) {
if (handle == dd->handle) {
spin_unlock(&ds->dd_lock);
return dd;
}
}
spin_unlock(&ds->dd_lock);
return NULL;
}
/*****************************************************************************
* Dock functions *
*****************************************************************************/
/**
* is_dock - see if a device is a dock station
* @handle: acpi handle of the device
*
* If an acpi object has a _DCK method, then it is by definition a dock
* station, so return true.
*/
static int is_dock(acpi_handle handle)
{
acpi_status status;
acpi_handle tmp;
status = acpi_get_handle(handle, "_DCK", &tmp);
if (ACPI_FAILURE(status))
return 0;
return 1;
}
static int is_ejectable(acpi_handle handle)
{
acpi_status status;
acpi_handle tmp;
status = acpi_get_handle(handle, "_EJ0", &tmp);
if (ACPI_FAILURE(status))
return 0;
return 1;
}
static int is_ata(acpi_handle handle)
{
acpi_handle tmp;
if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
return 1;
return 0;
}
static int is_battery(acpi_handle handle)
{
struct acpi_device_info *info;
int ret = 1;
if (!ACPI_SUCCESS(acpi_get_object_info(handle, &info)))
return 0;
if (!(info->valid & ACPI_VALID_HID))
ret = 0;
else
ret = !strcmp("PNP0C0A", info->hardware_id.string);
kfree(info);
return ret;
}
static int is_ejectable_bay(acpi_handle handle)
{
acpi_handle phandle;
if (!is_ejectable(handle))
return 0;
if (is_battery(handle) || is_ata(handle))
return 1;
if (!acpi_get_parent(handle, &phandle) && is_ata(phandle))
return 1;
return 0;
}
/**
* is_dock_device - see if a device is on a dock station
* @handle: acpi handle of the device
*
* If this device is either the dock station itself,
* or is a device dependent on the dock station, then it
* is a dock device
*/
int is_dock_device(acpi_handle handle)
{
struct dock_station *dock_station;
if (!dock_station_count)
return 0;
if (is_dock(handle))
return 1;
list_for_each_entry(dock_station, &dock_stations, sibling)
if (find_dock_dependent_device(dock_station, handle))
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(is_dock_device);
/**
* dock_present - see if the dock station is present.
* @ds: the dock station
*
* execute the _STA method. note that present does not
* imply that we are docked.
*/
static int dock_present(struct dock_station *ds)
{
unsigned long long sta;
acpi_status status;
if (ds) {
status = acpi_evaluate_integer(ds->handle, "_STA", NULL, &sta);
if (ACPI_SUCCESS(status) && sta)
return 1;
}
return 0;
}
/**
* dock_create_acpi_device - add new devices to acpi
* @handle - handle of the device to add
*
* This function will create a new acpi_device for the given
* handle if one does not exist already. This should cause
* acpi to scan for drivers for the given devices, and call
* matching driver's add routine.
*
* Returns a pointer to the acpi_device corresponding to the handle.
*/
static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
{
struct acpi_device *device;
struct acpi_device *parent_device;
acpi_handle parent;
int ret;
if (acpi_bus_get_device(handle, &device)) {
/*
* no device created for this object,
* so we should create one.
*/
acpi_get_parent(handle, &parent);
if (acpi_bus_get_device(parent, &parent_device))
parent_device = NULL;
ret = acpi_bus_add(&device, parent_device, handle,
ACPI_BUS_TYPE_DEVICE);
if (ret) {
pr_debug("error adding bus, %x\n", -ret);
return NULL;
}
}
return device;
}
/**
* dock_remove_acpi_device - remove the acpi_device struct from acpi
* @handle - the handle of the device to remove
*
* Tell acpi to remove the acpi_device. This should cause any loaded
* driver to have it's remove routine called.
*/
static void dock_remove_acpi_device(acpi_handle handle)
{
struct acpi_device *device;
int ret;
if (!acpi_bus_get_device(handle, &device)) {
ret = acpi_bus_trim(device, 1);
if (ret)
pr_debug("error removing bus, %x\n", -ret);
}
}
/**
* hotplug_dock_devices - insert or remove devices on the dock station
* @ds: the dock station
* @event: either bus check or eject request
*
* Some devices on the dock station need to have drivers called
* to perform hotplug operations after a dock event has occurred.
* Traverse the list of dock devices that have registered a
* hotplug handler, and call the handler.
*/
static void hotplug_dock_devices(struct dock_station *ds, u32 event)
{
struct dock_dependent_device *dd;
mutex_lock(&ds->hp_lock);
/*
* First call driver specific hotplug functions
*/
list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
if (dd->ops && dd->ops->handler)
dd->ops->handler(dd->handle, event, dd->context);
/*
* Now make sure that an acpi_device is created for each
* dependent device, or removed if this is an eject request.
* This will cause acpi_drivers to be stopped/started if they
* exist
*/
list_for_each_entry(dd, &ds->dependent_devices, list) {
if (event == ACPI_NOTIFY_EJECT_REQUEST)
dock_remove_acpi_device(dd->handle);
else
dock_create_acpi_device(dd->handle);
}
mutex_unlock(&ds->hp_lock);
}
static void dock_event(struct dock_station *ds, u32 event, int num)
{
struct device *dev = &ds->dock_device->dev;
char event_string[13];
char *envp[] = { event_string, NULL };
struct dock_dependent_device *dd;
if (num == UNDOCK_EVENT)
sprintf(event_string, "EVENT=undock");
else
sprintf(event_string, "EVENT=dock");
/*
* Indicate that the status of the dock station has
* changed.
*/
if (num == DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
if (dd->ops && dd->ops->uevent)
dd->ops->uevent(dd->handle, event, dd->context);
if (num != DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
/**
* eject_dock - respond to a dock eject request
* @ds: the dock station
*
* This is called after _DCK is called, to execute the dock station's
* _EJ0 method.
*/
static void eject_dock(struct dock_station *ds)
{
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status;
acpi_handle tmp;
/* all dock devices should have _EJ0, but check anyway */
status = acpi_get_handle(ds->handle, "_EJ0", &tmp);
if (ACPI_FAILURE(status)) {
pr_debug("No _EJ0 support for dock device\n");
return;
}
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = 1;
status = acpi_evaluate_object(ds->handle, "_EJ0", &arg_list, NULL);
if (ACPI_FAILURE(status))
pr_debug("Failed to evaluate _EJ0!\n");
}
/**
* handle_dock - handle a dock event
* @ds: the dock station
* @dock: to dock, or undock - that is the question
*
* Execute the _DCK method in response to an acpi event
*/
static void handle_dock(struct dock_station *ds, int dock)
{
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(ds->handle, ACPI_FULL_PATHNAME, &name_buffer);
printk(KERN_INFO PREFIX "%s - %s\n",
(char *)name_buffer.pointer, dock ? "docking" : "undocking");
/* _DCK method has one argument */
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = dock;
status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
ACPI_EXCEPTION((AE_INFO, status, "%s - failed to execute"
" _DCK\n", (char *)name_buffer.pointer));
kfree(buffer.pointer);
kfree(name_buffer.pointer);
}
static inline void dock(struct dock_station *ds)
{
handle_dock(ds, 1);
}
static inline void undock(struct dock_station *ds)
{
handle_dock(ds, 0);
}
static inline void begin_dock(struct dock_station *ds)
{
ds->flags |= DOCK_DOCKING;
}
static inline void complete_dock(struct dock_station *ds)
{
ds->flags &= ~(DOCK_DOCKING);
ds->last_dock_time = jiffies;
}
static inline void begin_undock(struct dock_station *ds)
{
ds->flags |= DOCK_UNDOCKING;
}
static inline void complete_undock(struct dock_station *ds)
{
ds->flags &= ~(DOCK_UNDOCKING);
}
static void dock_lock(struct dock_station *ds, int lock)
{
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status;
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = !!lock;
status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
if (lock)
printk(KERN_WARNING PREFIX "Locking device failed\n");
else
printk(KERN_WARNING PREFIX "Unlocking device failed\n");
}
}
/**
* dock_in_progress - see if we are in the middle of handling a dock event
* @ds: the dock station
*
* Sometimes while docking, false dock events can be sent to the driver
* because good connections aren't made or some other reason. Ignore these
* if we are in the middle of doing something.
*/
static int dock_in_progress(struct dock_station *ds)
{
if ((ds->flags & DOCK_DOCKING) ||
time_before(jiffies, (ds->last_dock_time + HZ)))
return 1;
return 0;
}
/**
* register_dock_notifier - add yourself to the dock notifier list
* @nb: the callers notifier block
*
* If a driver wishes to be notified about dock events, they can
* use this function to put a notifier block on the dock notifier list.
* this notifier call chain will be called after a dock event, but
* before hotplugging any new devices.
*/
int register_dock_notifier(struct notifier_block *nb)
{
if (!dock_station_count)
return -ENODEV;
return atomic_notifier_chain_register(&dock_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(register_dock_notifier);
/**
* unregister_dock_notifier - remove yourself from the dock notifier list
* @nb: the callers notifier block
*/
void unregister_dock_notifier(struct notifier_block *nb)
{
if (!dock_station_count)
return;
atomic_notifier_chain_unregister(&dock_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_dock_notifier);
/**
* register_hotplug_dock_device - register a hotplug function
* @handle: the handle of the device
* @ops: handlers to call after docking
* @context: device specific data
*
* If a driver would like to perform a hotplug operation after a dock
* event, they can register an acpi_notifiy_handler to be called by
* the dock driver after _DCK is executed.
*/
int
register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
void *context)
{
struct dock_dependent_device *dd;
struct dock_station *dock_station;
int ret = -EINVAL;
if (!dock_station_count)
return -ENODEV;
/*
* make sure this handle is for a device dependent on the dock,
* this would include the dock station itself
*/
list_for_each_entry(dock_station, &dock_stations, sibling) {
/*
* An ATA bay can be in a dock and itself can be ejected
* separately, so there are two 'dock stations' which need the
* ops
*/
dd = find_dock_dependent_device(dock_station, handle);
if (dd) {
dd->ops = ops;
dd->context = context;
dock_add_hotplug_device(dock_station, dd);
ret = 0;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
/**
* unregister_hotplug_dock_device - remove yourself from the hotplug list
* @handle: the acpi handle of the device
*/
void unregister_hotplug_dock_device(acpi_handle handle)
{
struct dock_dependent_device *dd;
struct dock_station *dock_station;
if (!dock_station_count)
return;
list_for_each_entry(dock_station, &dock_stations, sibling) {
dd = find_dock_dependent_device(dock_station, handle);
if (dd)
dock_del_hotplug_device(dock_station, dd);
}
}
EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
/**
* handle_eject_request - handle an undock request checking for error conditions
*
* Check to make sure the dock device is still present, then undock and
* hotremove all the devices that may need removing.
*/
static int handle_eject_request(struct dock_station *ds, u32 event)
{
if (dock_in_progress(ds))
return -EBUSY;
/*
* here we need to generate the undock
* event prior to actually doing the undock
* so that the device struct still exists.
* Also, even send the dock event if the
* device is not present anymore
*/
dock_event(ds, event, UNDOCK_EVENT);
hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST);
undock(ds);
dock_lock(ds, 0);
eject_dock(ds);
if (dock_present(ds)) {
printk(KERN_ERR PREFIX "Unable to undock!\n");
return -EBUSY;
}
complete_undock(ds);
return 0;
}
/**
* dock_notify - act upon an acpi dock notification
* @handle: the dock station handle
* @event: the acpi event
* @data: our driver data struct
*
* If we are notified to dock, then check to see if the dock is
* present and then dock. Notify all drivers of the dock event,
* and then hotplug and devices that may need hotplugging.
*/
static void dock_notify(acpi_handle handle, u32 event, void *data)
{
struct dock_station *ds = data;
struct acpi_device *tmp;
int surprise_removal = 0;
/*
* According to acpi spec 3.0a, if a DEVICE_CHECK notification
* is sent and _DCK is present, it is assumed to mean an undock
* request.
*/
if ((ds->flags & DOCK_IS_DOCK) && event == ACPI_NOTIFY_DEVICE_CHECK)
event = ACPI_NOTIFY_EJECT_REQUEST;
/*
* dock station: BUS_CHECK - docked or surprise removal
* DEVICE_CHECK - undocked
* other device: BUS_CHECK/DEVICE_CHECK - added or surprise removal
*
* To simplify event handling, dock dependent device handler always
* get ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
* ACPI_NOTIFY_EJECT_REQUEST for removal
*/
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle,
&tmp)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
printk(KERN_ERR PREFIX "Unable to dock!\n");
complete_dock(ds);
break;
}
atomic_notifier_call_chain(&dock_notifier_list,
event, NULL);
hotplug_dock_devices(ds, event);
complete_dock(ds);
dock_event(ds, event, DOCK_EVENT);
dock_lock(ds, 1);
acpi_update_all_gpes();
break;
}
if (dock_present(ds) || dock_in_progress(ds))
break;
/* This is a surprise removal */
surprise_removal = 1;
event = ACPI_NOTIFY_EJECT_REQUEST;
/* Fall back */
case ACPI_NOTIFY_EJECT_REQUEST:
begin_undock(ds);
if ((immediate_undock && !(ds->flags & DOCK_IS_ATA))
|| surprise_removal)
handle_eject_request(ds, event);
else
dock_event(ds, event, UNDOCK_EVENT);
break;
default:
printk(KERN_ERR PREFIX "Unknown dock event %d\n", event);
}
}
struct dock_data {
acpi_handle handle;
unsigned long event;
struct dock_station *ds;
};
static void acpi_dock_deferred_cb(void *context)
{
struct dock_data *data = context;
dock_notify(data->handle, data->event, data->ds);
kfree(data);
}
static int acpi_dock_notifier_call(struct notifier_block *this,
unsigned long event, void *data)
{
struct dock_station *dock_station;
acpi_handle handle = data;
if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
&& event != ACPI_NOTIFY_EJECT_REQUEST)
return 0;
list_for_each_entry(dock_station, &dock_stations, sibling) {
if (dock_station->handle == handle) {
struct dock_data *dd;
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
return 0;
dd->handle = handle;
dd->event = event;
dd->ds = dock_station;
acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
return 0 ;
}
}
return 0;
}
static struct notifier_block dock_acpi_notifier = {
.notifier_call = acpi_dock_notifier_call,
};
/**
* find_dock_devices - find devices on the dock station
* @handle: the handle of the device we are examining
* @lvl: unused
* @context: the dock station private data
* @rv: unused
*
* This function is called by acpi_walk_namespace. It will
* check to see if an object has an _EJD method. If it does, then it
* will see if it is dependent on the dock station.
*/
static acpi_status
find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status;
acpi_handle tmp, parent;
struct dock_station *ds = context;
status = acpi_bus_get_ejd(handle, &tmp);
if (ACPI_FAILURE(status)) {
/* try the parent device as well */
status = acpi_get_parent(handle, &parent);
if (ACPI_FAILURE(status))
goto fdd_out;
/* see if parent is dependent on dock */
status = acpi_bus_get_ejd(parent, &tmp);
if (ACPI_FAILURE(status))
goto fdd_out;
}
if (tmp == ds->handle)
add_dock_dependent_device(ds, handle);
fdd_out:
return AE_OK;
}
/*
* show_docked - read method for "docked" file in sysfs
*/
static ssize_t show_docked(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *tmp;
struct dock_station *dock_station = dev->platform_data;
if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp)))
return snprintf(buf, PAGE_SIZE, "1\n");
return snprintf(buf, PAGE_SIZE, "0\n");
}
static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
/*
* show_flags - read method for flags file in sysfs
*/
static ssize_t show_flags(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
}
static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
/*
* write_undock - write method for "undock" file in sysfs
*/
static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
struct dock_station *dock_station = dev->platform_data;
if (!count)
return -EINVAL;
begin_undock(dock_station);
ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
return ret ? ret: count;
}
static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
/*
* show_dock_uid - read method for "uid" file in sysfs
*/
static ssize_t show_dock_uid(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long long lbuf;
struct dock_station *dock_station = dev->platform_data;
acpi_status status = acpi_evaluate_integer(dock_station->handle,
"_UID", NULL, &lbuf);
if (ACPI_FAILURE(status))
return 0;
return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
}
static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
static ssize_t show_dock_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
char *type;
if (dock_station->flags & DOCK_IS_DOCK)
type = "dock_station";
else if (dock_station->flags & DOCK_IS_ATA)
type = "ata_bay";
else if (dock_station->flags & DOCK_IS_BAT)
type = "battery_bay";
else
type = "unknown";
return snprintf(buf, PAGE_SIZE, "%s\n", type);
}
static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
static struct attribute *dock_attributes[] = {
&dev_attr_docked.attr,
&dev_attr_flags.attr,
&dev_attr_undock.attr,
&dev_attr_uid.attr,
&dev_attr_type.attr,
NULL
};
static struct attribute_group dock_attribute_group = {
.attrs = dock_attributes
};
/**
* dock_add - add a new dock station
* @handle: the dock station handle
*
* allocated and initialize a new dock station device. Find all devices
* that are on the dock station, and register for dock event notifications.
*/
static int __init dock_add(acpi_handle handle)
{
int ret, id;
struct dock_station ds, *dock_station;
struct platform_device *dd;
id = dock_station_count;
memset(&ds, 0, sizeof(ds));
dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
if (IS_ERR(dd))
return PTR_ERR(dd);
dock_station = dd->dev.platform_data;
dock_station->handle = handle;
dock_station->dock_device = dd;
dock_station->last_dock_time = jiffies - HZ;
mutex_init(&dock_station->hp_lock);
spin_lock_init(&dock_station->dd_lock);
INIT_LIST_HEAD(&dock_station->sibling);
INIT_LIST_HEAD(&dock_station->hotplug_devices);
ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
INIT_LIST_HEAD(&dock_station->dependent_devices);
/* we want the dock device to send uevents */
dev_set_uevent_suppress(&dd->dev, 0);
if (is_dock(handle))
dock_station->flags |= DOCK_IS_DOCK;
if (is_ata(handle))
dock_station->flags |= DOCK_IS_ATA;
if (is_battery(handle))
dock_station->flags |= DOCK_IS_BAT;
ret = sysfs_create_group(&dd->dev.kobj, &dock_attribute_group);
if (ret)
goto err_unregister;
/* Find dependent devices */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_dock_devices, NULL,
dock_station, NULL);
/* add the dock station as a device dependent on itself */
ret = add_dock_dependent_device(dock_station, handle);
if (ret)
goto err_rmgroup;
dock_station_count++;
list_add(&dock_station->sibling, &dock_stations);
return 0;
err_rmgroup:
sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
err_unregister:
platform_device_unregister(dd);
printk(KERN_ERR "%s encountered error %d\n", __func__, ret);
return ret;
}
/**
* dock_remove - free up resources related to the dock station
*/
static int dock_remove(struct dock_station *ds)
{
struct dock_dependent_device *dd, *tmp;
struct platform_device *dock_device = ds->dock_device;
if (!dock_station_count)
return 0;
/* remove dependent devices */
list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list)
kfree(dd);
list_del(&ds->sibling);
/* cleanup sysfs */
sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group);
platform_device_unregister(dock_device);
return 0;
}
/**
* find_dock - look for a dock station
* @handle: acpi handle of a device
* @lvl: unused
* @context: counter of dock stations found
* @rv: unused
*
* This is called by acpi_walk_namespace to look for dock stations.
*/
static __init acpi_status
find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
{
if (is_dock(handle))
dock_add(handle);
return AE_OK;
}
static __init acpi_status
find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
{
/* If bay is a dock, it's already handled */
if (is_ejectable_bay(handle) && !is_dock(handle))
dock_add(handle);
return AE_OK;
}
static int __init dock_init(void)
{
if (acpi_disabled)
return 0;
/* look for a dock station */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_dock, NULL, NULL, NULL);
/* look for bay */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_bay, NULL, NULL, NULL);
if (!dock_station_count) {
printk(KERN_INFO PREFIX "No dock devices found.\n");
return 0;
}
register_acpi_bus_notifier(&dock_acpi_notifier);
printk(KERN_INFO PREFIX "%s: %d docks/bays found\n",
ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
return 0;
}
static void __exit dock_exit(void)
{
struct dock_station *tmp, *dock_station;
unregister_acpi_bus_notifier(&dock_acpi_notifier);
list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
dock_remove(dock_station);
}
/*
* Must be called before drivers of devices in dock, otherwise we can't know
* which devices are in a dock
*/
subsys_initcall(dock_init);
module_exit(dock_exit);
| gpl-2.0 |
ikarosdev/triumph-kernel-msm7x30 | drivers/isdn/hisax/jade.c | 815 | 8560 | /* $Id: jade.c,v 1.9.2.4 2004/01/14 16:04:48 keil Exp $
*
* JADE stuff (derived from original hscx.c)
*
* Author Roland Klabunde
* Copyright by Roland Klabunde <R.Klabunde@Berkom.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include "hisax.h"
#include "hscx.h"
#include "jade.h"
#include "isdnl1.h"
#include <linux/interrupt.h>
int
JadeVersion(struct IsdnCardState *cs, char *s)
{
int ver,i;
int to = 50;
cs->BC_Write_Reg(cs, -1, 0x50, 0x19);
i=0;
while (to) {
udelay(1);
ver = cs->BC_Read_Reg(cs, -1, 0x60);
to--;
if (ver)
break;
if (!to) {
printk(KERN_INFO "%s JADE version not obtainable\n", s);
return (0);
}
}
/* Wait for the JADE */
udelay(10);
/* Read version */
ver = cs->BC_Read_Reg(cs, -1, 0x60);
printk(KERN_INFO "%s JADE version: %d\n", s, ver);
return (1);
}
/* Write to indirect accessible jade register set */
static void
jade_write_indirect(struct IsdnCardState *cs, u_char reg, u_char value)
{
int to = 50;
u_char ret;
/* Write the data */
cs->BC_Write_Reg(cs, -1, COMM_JADE+1, value);
/* Say JADE we wanna write indirect reg 'reg' */
cs->BC_Write_Reg(cs, -1, COMM_JADE, reg);
to = 50;
/* Wait for RDY goes high */
while (to) {
udelay(1);
ret = cs->BC_Read_Reg(cs, -1, COMM_JADE);
to--;
if (ret & 1)
/* Got acknowledge */
break;
if (!to) {
printk(KERN_INFO "Can not see ready bit from JADE DSP (reg=0x%X, value=0x%X)\n", reg, value);
return;
}
}
}
static void
modejade(struct BCState *bcs, int mode, int bc)
{
struct IsdnCardState *cs = bcs->cs;
int jade = bcs->hw.hscx.hscx;
if (cs->debug & L1_DEB_HSCX) {
char tmp[40];
sprintf(tmp, "jade %c mode %d ichan %d",
'A' + jade, mode, bc);
debugl1(cs, tmp);
}
bcs->mode = mode;
bcs->channel = bc;
cs->BC_Write_Reg(cs, jade, jade_HDLC_MODE, (mode == L1_MODE_TRANS ? jadeMODE_TMO:0x00));
cs->BC_Write_Reg(cs, jade, jade_HDLC_CCR0, (jadeCCR0_PU|jadeCCR0_ITF));
cs->BC_Write_Reg(cs, jade, jade_HDLC_CCR1, 0x00);
jade_write_indirect(cs, jade_HDLC1SERRXPATH, 0x08);
jade_write_indirect(cs, jade_HDLC2SERRXPATH, 0x08);
jade_write_indirect(cs, jade_HDLC1SERTXPATH, 0x00);
jade_write_indirect(cs, jade_HDLC2SERTXPATH, 0x00);
cs->BC_Write_Reg(cs, jade, jade_HDLC_XCCR, 0x07);
cs->BC_Write_Reg(cs, jade, jade_HDLC_RCCR, 0x07);
if (bc == 0) {
cs->BC_Write_Reg(cs, jade, jade_HDLC_TSAX, 0x00);
cs->BC_Write_Reg(cs, jade, jade_HDLC_TSAR, 0x00);
} else {
cs->BC_Write_Reg(cs, jade, jade_HDLC_TSAX, 0x04);
cs->BC_Write_Reg(cs, jade, jade_HDLC_TSAR, 0x04);
}
switch (mode) {
case (L1_MODE_NULL):
cs->BC_Write_Reg(cs, jade, jade_HDLC_MODE, jadeMODE_TMO);
break;
case (L1_MODE_TRANS):
cs->BC_Write_Reg(cs, jade, jade_HDLC_MODE, (jadeMODE_TMO|jadeMODE_RAC|jadeMODE_XAC));
break;
case (L1_MODE_HDLC):
cs->BC_Write_Reg(cs, jade, jade_HDLC_MODE, (jadeMODE_RAC|jadeMODE_XAC));
break;
}
if (mode) {
cs->BC_Write_Reg(cs, jade, jade_HDLC_RCMD, (jadeRCMD_RRES|jadeRCMD_RMC));
cs->BC_Write_Reg(cs, jade, jade_HDLC_XCMD, jadeXCMD_XRES);
/* Unmask ints */
cs->BC_Write_Reg(cs, jade, jade_HDLC_IMR, 0xF8);
}
else
/* Mask ints */
cs->BC_Write_Reg(cs, jade, jade_HDLC_IMR, 0x00);
}
static void
jade_l2l1(struct PStack *st, int pr, void *arg)
{
struct BCState *bcs = st->l1.bcs;
struct sk_buff *skb = arg;
u_long flags;
switch (pr) {
case (PH_DATA | REQUEST):
spin_lock_irqsave(&bcs->cs->lock, flags);
if (bcs->tx_skb) {
skb_queue_tail(&bcs->squeue, skb);
} else {
bcs->tx_skb = skb;
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->hw.hscx.count = 0;
bcs->cs->BC_Send_Data(bcs);
}
spin_unlock_irqrestore(&bcs->cs->lock, flags);
break;
case (PH_PULL | INDICATION):
spin_lock_irqsave(&bcs->cs->lock, flags);
if (bcs->tx_skb) {
printk(KERN_WARNING "jade_l2l1: this shouldn't happen\n");
} else {
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->tx_skb = skb;
bcs->hw.hscx.count = 0;
bcs->cs->BC_Send_Data(bcs);
}
spin_unlock_irqrestore(&bcs->cs->lock, flags);
break;
case (PH_PULL | REQUEST):
if (!bcs->tx_skb) {
test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
} else
test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
break;
case (PH_ACTIVATE | REQUEST):
spin_lock_irqsave(&bcs->cs->lock, flags);
test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
modejade(bcs, st->l1.mode, st->l1.bc);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
l1_msg_b(st, pr, arg);
break;
case (PH_DEACTIVATE | REQUEST):
l1_msg_b(st, pr, arg);
break;
case (PH_DEACTIVATE | CONFIRM):
spin_lock_irqsave(&bcs->cs->lock, flags);
test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
modejade(bcs, 0, st->l1.bc);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
break;
}
}
static void
close_jadestate(struct BCState *bcs)
{
modejade(bcs, 0, bcs->channel);
if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
kfree(bcs->hw.hscx.rcvbuf);
bcs->hw.hscx.rcvbuf = NULL;
kfree(bcs->blog);
bcs->blog = NULL;
skb_queue_purge(&bcs->rqueue);
skb_queue_purge(&bcs->squeue);
if (bcs->tx_skb) {
dev_kfree_skb_any(bcs->tx_skb);
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
}
}
}
static int
open_jadestate(struct IsdnCardState *cs, struct BCState *bcs)
{
if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
if (!(bcs->hw.hscx.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) {
printk(KERN_WARNING
"HiSax: No memory for hscx.rcvbuf\n");
test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
return (1);
}
if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) {
printk(KERN_WARNING
"HiSax: No memory for bcs->blog\n");
test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
kfree(bcs->hw.hscx.rcvbuf);
bcs->hw.hscx.rcvbuf = NULL;
return (2);
}
skb_queue_head_init(&bcs->rqueue);
skb_queue_head_init(&bcs->squeue);
}
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->event = 0;
bcs->hw.hscx.rcvidx = 0;
bcs->tx_cnt = 0;
return (0);
}
static int
setstack_jade(struct PStack *st, struct BCState *bcs)
{
bcs->channel = st->l1.bc;
if (open_jadestate(st->l1.hardware, bcs))
return (-1);
st->l1.bcs = bcs;
st->l2.l2l1 = jade_l2l1;
setstack_manager(st);
bcs->st = st;
setstack_l1_B(st);
return (0);
}
void
clear_pending_jade_ints(struct IsdnCardState *cs)
{
int val;
char tmp[64];
cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00);
cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00);
val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR);
sprintf(tmp, "jade B ISTA %x", val);
debugl1(cs, tmp);
val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR);
sprintf(tmp, "jade A ISTA %x", val);
debugl1(cs, tmp);
val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR);
sprintf(tmp, "jade B STAR %x", val);
debugl1(cs, tmp);
val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR);
sprintf(tmp, "jade A STAR %x", val);
debugl1(cs, tmp);
/* Unmask ints */
cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8);
cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8);
}
void
initjade(struct IsdnCardState *cs)
{
cs->bcs[0].BC_SetStack = setstack_jade;
cs->bcs[1].BC_SetStack = setstack_jade;
cs->bcs[0].BC_Close = close_jadestate;
cs->bcs[1].BC_Close = close_jadestate;
cs->bcs[0].hw.hscx.hscx = 0;
cs->bcs[1].hw.hscx.hscx = 1;
/* Stop DSP audio tx/rx */
jade_write_indirect(cs, 0x11, 0x0f);
jade_write_indirect(cs, 0x17, 0x2f);
/* Transparent Mode, RxTx inactive, No Test, No RFS/TFS */
cs->BC_Write_Reg(cs, 0, jade_HDLC_MODE, jadeMODE_TMO);
cs->BC_Write_Reg(cs, 1, jade_HDLC_MODE, jadeMODE_TMO);
/* Power down, 1-Idle, RxTx least significant bit first */
cs->BC_Write_Reg(cs, 0, jade_HDLC_CCR0, 0x00);
cs->BC_Write_Reg(cs, 1, jade_HDLC_CCR0, 0x00);
/* Mask all interrupts */
cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00);
cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00);
/* Setup host access to hdlc controller */
jade_write_indirect(cs, jade_HDLCCNTRACCESS, (jadeINDIRECT_HAH1|jadeINDIRECT_HAH2));
/* Unmask HDLC int (don't forget DSP int later on)*/
cs->BC_Write_Reg(cs, -1,jade_INT, (jadeINT_HDLC1|jadeINT_HDLC2));
/* once again TRANSPARENT */
modejade(cs->bcs, 0, 0);
modejade(cs->bcs + 1, 0, 0);
}
| gpl-2.0 |
xiaoleigua/linux | drivers/media/platform/vsp1/vsp1_hsit.c | 815 | 5623 | /*
* vsp1_hsit.c -- R-Car VSP1 Hue Saturation value (Inverse) Transform
*
* Copyright (C) 2013 Renesas Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/device.h>
#include <linux/gfp.h>
#include <media/v4l2-subdev.h>
#include "vsp1.h"
#include "vsp1_hsit.h"
#define HSIT_MIN_SIZE 4U
#define HSIT_MAX_SIZE 8190U
/* -----------------------------------------------------------------------------
* Device Access
*/
static inline void vsp1_hsit_write(struct vsp1_hsit *hsit, u32 reg, u32 data)
{
vsp1_write(hsit->entity.vsp1, reg, data);
}
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Core Operations
*/
static int hsit_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
if (!enable)
return 0;
if (hsit->inverse)
vsp1_hsit_write(hsit, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
else
vsp1_hsit_write(hsit, VI6_HST_CTRL, VI6_HST_CTRL_EN);
return 0;
}
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Pad Operations
*/
static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
if (code->index > 0)
return -EINVAL;
if ((code->pad == HSIT_PAD_SINK && !hsit->inverse) |
(code->pad == HSIT_PAD_SOURCE && hsit->inverse))
code->code = MEDIA_BUS_FMT_ARGB8888_1X32;
else
code->code = MEDIA_BUS_FMT_AHSV8888_1X32;
return 0;
}
static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
struct v4l2_mbus_framefmt *format;
format = vsp1_entity_get_pad_format(&hsit->entity, cfg, fse->pad,
fse->which);
if (fse->index || fse->code != format->code)
return -EINVAL;
if (fse->pad == HSIT_PAD_SINK) {
fse->min_width = HSIT_MIN_SIZE;
fse->max_width = HSIT_MAX_SIZE;
fse->min_height = HSIT_MIN_SIZE;
fse->max_height = HSIT_MAX_SIZE;
} else {
/* The size on the source pad are fixed and always identical to
* the size on the sink pad.
*/
fse->min_width = format->width;
fse->max_width = format->width;
fse->min_height = format->height;
fse->max_height = format->height;
}
return 0;
}
static int hsit_get_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
fmt->format = *vsp1_entity_get_pad_format(&hsit->entity, cfg, fmt->pad,
fmt->which);
return 0;
}
static int hsit_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
struct v4l2_mbus_framefmt *format;
format = vsp1_entity_get_pad_format(&hsit->entity, cfg, fmt->pad,
fmt->which);
if (fmt->pad == HSIT_PAD_SOURCE) {
/* The HST and HSI output format code and resolution can't be
* modified.
*/
fmt->format = *format;
return 0;
}
format->code = hsit->inverse ? MEDIA_BUS_FMT_AHSV8888_1X32
: MEDIA_BUS_FMT_ARGB8888_1X32;
format->width = clamp_t(unsigned int, fmt->format.width,
HSIT_MIN_SIZE, HSIT_MAX_SIZE);
format->height = clamp_t(unsigned int, fmt->format.height,
HSIT_MIN_SIZE, HSIT_MAX_SIZE);
format->field = V4L2_FIELD_NONE;
format->colorspace = V4L2_COLORSPACE_SRGB;
fmt->format = *format;
/* Propagate the format to the source pad. */
format = vsp1_entity_get_pad_format(&hsit->entity, cfg, HSIT_PAD_SOURCE,
fmt->which);
*format = fmt->format;
format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
: MEDIA_BUS_FMT_AHSV8888_1X32;
return 0;
}
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Operations
*/
static struct v4l2_subdev_video_ops hsit_video_ops = {
.s_stream = hsit_s_stream,
};
static struct v4l2_subdev_pad_ops hsit_pad_ops = {
.enum_mbus_code = hsit_enum_mbus_code,
.enum_frame_size = hsit_enum_frame_size,
.get_fmt = hsit_get_format,
.set_fmt = hsit_set_format,
};
static struct v4l2_subdev_ops hsit_ops = {
.video = &hsit_video_ops,
.pad = &hsit_pad_ops,
};
/* -----------------------------------------------------------------------------
* Initialization and Cleanup
*/
struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
{
struct v4l2_subdev *subdev;
struct vsp1_hsit *hsit;
int ret;
hsit = devm_kzalloc(vsp1->dev, sizeof(*hsit), GFP_KERNEL);
if (hsit == NULL)
return ERR_PTR(-ENOMEM);
hsit->inverse = inverse;
if (inverse)
hsit->entity.type = VSP1_ENTITY_HSI;
else
hsit->entity.type = VSP1_ENTITY_HST;
ret = vsp1_entity_init(vsp1, &hsit->entity, 2);
if (ret < 0)
return ERR_PTR(ret);
/* Initialize the V4L2 subdev. */
subdev = &hsit->entity.subdev;
v4l2_subdev_init(subdev, &hsit_ops);
subdev->entity.ops = &vsp1_media_ops;
subdev->internal_ops = &vsp1_subdev_internal_ops;
snprintf(subdev->name, sizeof(subdev->name), "%s %s",
dev_name(vsp1->dev), inverse ? "hsi" : "hst");
v4l2_set_subdevdata(subdev, hsit);
subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
vsp1_entity_init_formats(subdev, NULL);
return hsit;
}
| gpl-2.0 |
Stane1983/amlogic-m6_m8 | net/mac80211/tkip.c | 2351 | 10849 | /*
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include "driver-ops.h"
#include "key.h"
#include "tkip.h"
#include "wep.h"
#define PHASE1_LOOP_COUNT 8
/*
* 2-byte by 2-byte subset of the full AES S-box table; second part of this
* table is identical to first part but byte-swapped
*/
static const u16 tkip_sbox[256] =
{
0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
};
static u16 tkipS(u16 val)
{
return tkip_sbox[val & 0xff] ^ swab16(tkip_sbox[val >> 8]);
}
static u8 *write_tkip_iv(u8 *pos, u16 iv16)
{
*pos++ = iv16 >> 8;
*pos++ = ((iv16 >> 8) | 0x20) & 0x7f;
*pos++ = iv16 & 0xFF;
return pos;
}
/*
* P1K := Phase1(TA, TK, TSC)
* TA = transmitter address (48 bits)
* TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits)
* TSC = TKIP sequence counter (48 bits, only 32 msb bits used)
* P1K: 80 bits
*/
static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
const u8 *ta, u32 tsc_IV32)
{
int i, j;
u16 *p1k = ctx->p1k;
p1k[0] = tsc_IV32 & 0xFFFF;
p1k[1] = tsc_IV32 >> 16;
p1k[2] = get_unaligned_le16(ta + 0);
p1k[3] = get_unaligned_le16(ta + 2);
p1k[4] = get_unaligned_le16(ta + 4);
for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
j = 2 * (i & 1);
p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j));
p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j));
p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j));
p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
}
ctx->state = TKIP_STATE_PHASE1_DONE;
ctx->p1k_iv32 = tsc_IV32;
}
static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
u16 tsc_IV16, u8 *rc4key)
{
u16 ppk[6];
const u16 *p1k = ctx->p1k;
int i;
ppk[0] = p1k[0];
ppk[1] = p1k[1];
ppk[2] = p1k[2];
ppk[3] = p1k[3];
ppk[4] = p1k[4];
ppk[5] = p1k[4] + tsc_IV16;
ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0));
ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2));
ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4));
ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6));
ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8));
ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10));
ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1);
ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1);
ppk[2] += ror16(ppk[1], 1);
ppk[3] += ror16(ppk[2], 1);
ppk[4] += ror16(ppk[3], 1);
ppk[5] += ror16(ppk[4], 1);
rc4key = write_tkip_iv(rc4key, tsc_IV16);
*rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF;
for (i = 0; i < 6; i++)
put_unaligned_le16(ppk[i], rc4key + 2 * i);
}
/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets
* of the IV. Returns pointer to the octet following IVs (i.e., beginning of
* the packet payload). */
u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key)
{
lockdep_assert_held(&key->u.tkip.txlock);
pos = write_tkip_iv(pos, key->u.tkip.tx.iv16);
*pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */;
put_unaligned_le32(key->u.tkip.tx.iv32, pos);
return pos + 4;
}
static void ieee80211_compute_tkip_p1k(struct ieee80211_key *key, u32 iv32)
{
struct ieee80211_sub_if_data *sdata = key->sdata;
struct tkip_ctx *ctx = &key->u.tkip.tx;
const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
lockdep_assert_held(&key->u.tkip.txlock);
/*
* Update the P1K when the IV32 is different from the value it
* had when we last computed it (or when not initialised yet).
* This might flip-flop back and forth if packets are processed
* out-of-order due to the different ACs, but then we have to
* just compute the P1K more often.
*/
if (ctx->p1k_iv32 != iv32 || ctx->state == TKIP_STATE_NOT_INIT)
tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32);
}
void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
u32 iv32, u16 *p1k)
{
struct ieee80211_key *key = (struct ieee80211_key *)
container_of(keyconf, struct ieee80211_key, conf);
struct tkip_ctx *ctx = &key->u.tkip.tx;
spin_lock_bh(&key->u.tkip.txlock);
ieee80211_compute_tkip_p1k(key, iv32);
memcpy(p1k, ctx->p1k, sizeof(ctx->p1k));
spin_unlock_bh(&key->u.tkip.txlock);
}
EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv);
void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf,
const u8 *ta, u32 iv32, u16 *p1k)
{
const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
struct tkip_ctx ctx;
tkip_mixing_phase1(tk, &ctx, ta, iv32);
memcpy(p1k, ctx.p1k, sizeof(ctx.p1k));
}
EXPORT_SYMBOL(ieee80211_get_tkip_rx_p1k);
void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
struct sk_buff *skb, u8 *p2k)
{
struct ieee80211_key *key = (struct ieee80211_key *)
container_of(keyconf, struct ieee80211_key, conf);
const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
struct tkip_ctx *ctx = &key->u.tkip.tx;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
u32 iv32 = get_unaligned_le32(&data[4]);
u16 iv16 = data[2] | (data[0] << 8);
spin_lock(&key->u.tkip.txlock);
ieee80211_compute_tkip_p1k(key, iv32);
tkip_mixing_phase2(tk, ctx, iv16, p2k);
spin_unlock(&key->u.tkip.txlock);
}
EXPORT_SYMBOL(ieee80211_get_tkip_p2k);
/*
* Encrypt packet payload with TKIP using @key. @pos is a pointer to the
* beginning of the buffer containing payload. This payload must include
* the IV/Ext.IV and space for (taildroom) four octets for ICV.
* @payload_len is the length of payload (_not_ including IV/ICV length).
* @ta is the transmitter addresses.
*/
int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
struct ieee80211_key *key,
struct sk_buff *skb,
u8 *payload, size_t payload_len)
{
u8 rc4key[16];
ieee80211_get_tkip_p2k(&key->conf, skb, rc4key);
return ieee80211_wep_encrypt_data(tfm, rc4key, 16,
payload, payload_len);
}
/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the
* beginning of the buffer containing IEEE 802.11 header payload, i.e.,
* including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the
* length of payload, including IV, Ext. IV, MIC, ICV. */
int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
struct ieee80211_key *key,
u8 *payload, size_t payload_len, u8 *ta,
u8 *ra, int only_iv, int queue,
u32 *out_iv32, u16 *out_iv16)
{
u32 iv32;
u32 iv16;
u8 rc4key[16], keyid, *pos = payload;
int res;
const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
if (payload_len < 12)
return -1;
iv16 = (pos[0] << 8) | pos[2];
keyid = pos[3];
iv32 = get_unaligned_le32(pos + 4);
pos += 8;
if (!(keyid & (1 << 5)))
return TKIP_DECRYPT_NO_EXT_IV;
if ((keyid >> 6) != key->conf.keyidx)
return TKIP_DECRYPT_INVALID_KEYIDX;
if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
(iv32 < key->u.tkip.rx[queue].iv32 ||
(iv32 == key->u.tkip.rx[queue].iv32 &&
iv16 <= key->u.tkip.rx[queue].iv16)))
return TKIP_DECRYPT_REPLAY;
if (only_iv) {
res = TKIP_DECRYPT_OK;
key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
goto done;
}
if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT ||
key->u.tkip.rx[queue].iv32 != iv32) {
/* IV16 wrapped around - perform TKIP phase 1 */
tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
}
if (key->local->ops->update_tkip_key &&
key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) {
struct ieee80211_sub_if_data *sdata = key->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(key->sdata->bss,
struct ieee80211_sub_if_data, u.ap);
drv_update_tkip_key(key->local, sdata, &key->conf, key->sta,
iv32, key->u.tkip.rx[queue].p1k);
key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
}
tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12);
done:
if (res == TKIP_DECRYPT_OK) {
/*
* Record previously received IV, will be copied into the
* key information after MIC verification. It is possible
* that we don't catch replays of fragments but that's ok
* because the Michael MIC verication will then fail.
*/
*out_iv32 = iv32;
*out_iv16 = iv16;
}
return res;
}
| gpl-2.0 |
varigit/VAR-SOM-AM33-SDK7-Kernel | drivers/fmc/fmc-match.c | 2351 | 3166 | /*
* Copyright (C) 2012 CERN (www.cern.ch)
* Author: Alessandro Rubini <rubini@gnudd.com>
*
* Released according to the GNU GPL, version 2 or any later version.
*
* This work is part of the White Rabbit project, a research effort led
* by CERN, the European Institute for Nuclear Research.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fmc.h>
#include <linux/ipmi-fru.h>
/* The fru parser is both user and kernel capable: it needs alloc */
void *fru_alloc(size_t size)
{
return kzalloc(size, GFP_KERNEL);
}
/* The actual match function */
int fmc_match(struct device *dev, struct device_driver *drv)
{
struct fmc_driver *fdrv = to_fmc_driver(drv);
struct fmc_device *fdev = to_fmc_device(dev);
struct fmc_fru_id *fid;
int i, matched = 0;
/* This currently only matches the EEPROM (FRU id) */
fid = fdrv->id_table.fru_id;
if (!fid) {
dev_warn(&fdev->dev, "Driver has no ID: matches all\n");
matched = 1;
} else {
if (!fdev->id.manufacturer || !fdev->id.product_name)
return 0; /* the device has no FRU information */
for (i = 0; i < fdrv->id_table.fru_id_nr; i++, fid++) {
if (fid->manufacturer &&
strcmp(fid->manufacturer, fdev->id.manufacturer))
continue;
if (fid->product_name &&
strcmp(fid->product_name, fdev->id.product_name))
continue;
matched = 1;
break;
}
}
/* FIXME: match SDB contents */
return matched;
}
/* This function creates ID info for a newly registered device */
int fmc_fill_id_info(struct fmc_device *fmc)
{
struct fru_common_header *h;
struct fru_board_info_area *bia;
int ret, allocated = 0;
/* If we know the eeprom length, try to read it off the device */
if (fmc->eeprom_len && !fmc->eeprom) {
fmc->eeprom = kzalloc(fmc->eeprom_len, GFP_KERNEL);
if (!fmc->eeprom)
return -ENOMEM;
allocated = 1;
ret = fmc->op->read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len);
if (ret < 0)
goto out;
}
/* If no eeprom, continue with other matches */
if (!fmc->eeprom)
return 0;
dev_info(fmc->hwdev, "mezzanine %i\n", fmc->slot_id); /* header */
/* So we have the eeprom: parse the FRU part (if any) */
h = (void *)fmc->eeprom;
if (h->format != 1) {
pr_info(" EEPROM has no FRU information\n");
goto out;
}
if (!fru_header_cksum_ok(h)) {
pr_info(" FRU: wrong header checksum\n");
goto out;
}
bia = fru_get_board_area(h);
if (!fru_bia_cksum_ok(bia)) {
pr_info(" FRU: wrong board area checksum\n");
goto out;
}
fmc->id.manufacturer = fru_get_board_manufacturer(h);
fmc->id.product_name = fru_get_product_name(h);
pr_info(" Manufacturer: %s\n", fmc->id.manufacturer);
pr_info(" Product name: %s\n", fmc->id.product_name);
/* Create the short name (FIXME: look in sdb as well) */
fmc->mezzanine_name = kstrdup(fmc->id.product_name, GFP_KERNEL);
out:
if (allocated) {
kfree(fmc->eeprom);
fmc->eeprom = NULL;
}
return 0; /* no error: let other identification work */
}
/* Some ID data is allocated using fru_alloc() above, so release it */
void fmc_free_id_info(struct fmc_device *fmc)
{
kfree(fmc->mezzanine_name);
kfree(fmc->id.manufacturer);
kfree(fmc->id.product_name);
}
| gpl-2.0 |
xhteam/kernel_imx | arch/powerpc/platforms/pseries/eeh_sysfs.c | 2863 | 3539 | /*
* Sysfs entries for PCI Error Recovery for PAPR-compliant platform.
* Copyright IBM Corporation 2007
* Copyright Linas Vepstas <linas@austin.ibm.com> 2007
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
#include <linux/pci.h>
#include <asm/ppc-pci.h>
#include <asm/pci-bridge.h>
/**
* EEH_SHOW_ATTR -- create sysfs entry for eeh statistic
* @_name: name of file in sysfs directory
* @_memb: name of member in struct pci_dn to access
* @_format: printf format for display
*
* All of the attributes look very similar, so just
* auto-gen a cut-n-paste routine to display them.
*/
#define EEH_SHOW_ATTR(_name,_memb,_format) \
static ssize_t eeh_show_##_name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct pci_dev *pdev = to_pci_dev(dev); \
struct device_node *dn = pci_device_to_OF_node(pdev); \
struct pci_dn *pdn; \
\
if (!dn || PCI_DN(dn) == NULL) \
return 0; \
\
pdn = PCI_DN(dn); \
return sprintf(buf, _format "\n", pdn->_memb); \
} \
static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL);
EEH_SHOW_ATTR(eeh_mode, eeh_mode, "0x%x");
EEH_SHOW_ATTR(eeh_config_addr, eeh_config_addr, "0x%x");
EEH_SHOW_ATTR(eeh_pe_config_addr, eeh_pe_config_addr, "0x%x");
EEH_SHOW_ATTR(eeh_check_count, eeh_check_count, "%d");
EEH_SHOW_ATTR(eeh_freeze_count, eeh_freeze_count, "%d");
EEH_SHOW_ATTR(eeh_false_positives, eeh_false_positives, "%d");
void eeh_sysfs_add_device(struct pci_dev *pdev)
{
int rc=0;
rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_check_count);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_false_positives);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_freeze_count);
if (rc)
printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
}
void eeh_sysfs_remove_device(struct pci_dev *pdev)
{
device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
device_remove_file(&pdev->dev, &dev_attr_eeh_check_count);
device_remove_file(&pdev->dev, &dev_attr_eeh_false_positives);
device_remove_file(&pdev->dev, &dev_attr_eeh_freeze_count);
}
| gpl-2.0 |
LeMaker/linux-actions | drivers/media/platform/davinci/isif.c | 3119 | 30441 | /*
* Copyright (C) 2008-2009 Texas Instruments Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Image Sensor Interface (ISIF) driver
*
* This driver is for configuring the ISIF IP available on DM365 or any other
* TI SoCs. This is used for capturing yuv or bayer video or image data
* from a decoder or sensor. This IP is similar to the CCDC IP on DM355
* and DM6446, but with enhanced or additional ip blocks. The driver
* configures the ISIF upon commands from the vpfe bridge driver through
* ccdc_hw_device interface.
*
* TODO: 1) Raw bayer parameter settings and bayer capture
* 2) Add support for control ioctl
*/
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/videodev2.h>
#include <linux/err.h>
#include <linux/module.h>
#include <mach/mux.h>
#include <media/davinci/isif.h>
#include <media/davinci/vpss.h>
#include "isif_regs.h"
#include "ccdc_hw_device.h"
/* Defaults for module configuration parameters */
static struct isif_config_params_raw isif_config_defaults = {
.linearize = {
.en = 0,
.corr_shft = ISIF_NO_SHIFT,
.scale_fact = {1, 0},
},
.df_csc = {
.df_or_csc = 0,
.csc = {
.en = 0,
},
},
.dfc = {
.en = 0,
},
.bclamp = {
.en = 0,
},
.gain_offset = {
.gain = {
.r_ye = {1, 0},
.gr_cy = {1, 0},
.gb_g = {1, 0},
.b_mg = {1, 0},
},
},
.culling = {
.hcpat_odd = 0xff,
.hcpat_even = 0xff,
.vcpat = 0xff,
},
.compress = {
.alg = ISIF_ALAW,
},
};
/* ISIF operation configuration */
static struct isif_oper_config {
struct device *dev;
enum vpfe_hw_if_type if_type;
struct isif_ycbcr_config ycbcr;
struct isif_params_raw bayer;
enum isif_data_pack data_pack;
/* ISIF base address */
void __iomem *base_addr;
/* ISIF Linear Table 0 */
void __iomem *linear_tbl0_addr;
/* ISIF Linear Table 1 */
void __iomem *linear_tbl1_addr;
} isif_cfg = {
.ycbcr = {
.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
.frm_fmt = CCDC_FRMFMT_INTERLACED,
.win = ISIF_WIN_NTSC,
.fid_pol = VPFE_PINPOL_POSITIVE,
.vd_pol = VPFE_PINPOL_POSITIVE,
.hd_pol = VPFE_PINPOL_POSITIVE,
.pix_order = CCDC_PIXORDER_CBYCRY,
.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED,
},
.bayer = {
.pix_fmt = CCDC_PIXFMT_RAW,
.frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
.win = ISIF_WIN_VGA,
.fid_pol = VPFE_PINPOL_POSITIVE,
.vd_pol = VPFE_PINPOL_POSITIVE,
.hd_pol = VPFE_PINPOL_POSITIVE,
.gain = {
.r_ye = {1, 0},
.gr_cy = {1, 0},
.gb_g = {1, 0},
.b_mg = {1, 0},
},
.cfa_pat = ISIF_CFA_PAT_MOSAIC,
.data_msb = ISIF_BIT_MSB_11,
.config_params = {
.data_shift = ISIF_NO_SHIFT,
.col_pat_field0 = {
.olop = ISIF_GREEN_BLUE,
.olep = ISIF_BLUE,
.elop = ISIF_RED,
.elep = ISIF_GREEN_RED,
},
.col_pat_field1 = {
.olop = ISIF_GREEN_BLUE,
.olep = ISIF_BLUE,
.elop = ISIF_RED,
.elep = ISIF_GREEN_RED,
},
.test_pat_gen = 0,
},
},
.data_pack = ISIF_DATA_PACK8,
};
/* Raw Bayer formats */
static const u32 isif_raw_bayer_pix_formats[] = {
V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
/* Raw YUV formats */
static const u32 isif_raw_yuv_pix_formats[] = {
V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
/* register access routines */
static inline u32 regr(u32 offset)
{
return __raw_readl(isif_cfg.base_addr + offset);
}
static inline void regw(u32 val, u32 offset)
{
__raw_writel(val, isif_cfg.base_addr + offset);
}
/* reg_modify() - read, modify and write register */
static inline u32 reg_modify(u32 mask, u32 val, u32 offset)
{
u32 new_val = (regr(offset) & ~mask) | (val & mask);
regw(new_val, offset);
return new_val;
}
static inline void regw_lin_tbl(u32 val, u32 offset, int i)
{
if (!i)
__raw_writel(val, isif_cfg.linear_tbl0_addr + offset);
else
__raw_writel(val, isif_cfg.linear_tbl1_addr + offset);
}
static void isif_disable_all_modules(void)
{
/* disable BC */
regw(0, CLAMPCFG);
/* disable vdfc */
regw(0, DFCCTL);
/* disable CSC */
regw(0, CSCCTL);
/* disable linearization */
regw(0, LINCFG0);
/* disable other modules here as they are supported */
}
static void isif_enable(int en)
{
if (!en) {
/* Before disable isif, disable all ISIF modules */
isif_disable_all_modules();
/*
* wait for next VD. Assume lowest scan rate is 12 Hz. So
* 100 msec delay is good enough
*/
msleep(100);
}
reg_modify(ISIF_SYNCEN_VDHDEN_MASK, en, SYNCEN);
}
static void isif_enable_output_to_sdram(int en)
{
reg_modify(ISIF_SYNCEN_WEN_MASK, en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN);
}
static void isif_config_culling(struct isif_cul *cul)
{
u32 val;
/* Horizontal pattern */
val = (cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT) | cul->hcpat_odd;
regw(val, CULH);
/* vertical pattern */
regw(cul->vcpat, CULV);
/* LPF */
reg_modify(ISIF_LPF_MASK << ISIF_LPF_SHIFT,
cul->en_lpf << ISIF_LPF_SHIFT, MODESET);
}
static void isif_config_gain_offset(void)
{
struct isif_gain_offsets_adj *gain_off_p =
&isif_cfg.bayer.config_params.gain_offset;
u32 val;
val = (!!gain_off_p->gain_sdram_en << GAIN_SDRAM_EN_SHIFT) |
(!!gain_off_p->gain_ipipe_en << GAIN_IPIPE_EN_SHIFT) |
(!!gain_off_p->gain_h3a_en << GAIN_H3A_EN_SHIFT) |
(!!gain_off_p->offset_sdram_en << OFST_SDRAM_EN_SHIFT) |
(!!gain_off_p->offset_ipipe_en << OFST_IPIPE_EN_SHIFT) |
(!!gain_off_p->offset_h3a_en << OFST_H3A_EN_SHIFT);
reg_modify(GAIN_OFFSET_EN_MASK, val, CGAMMAWD);
val = (gain_off_p->gain.r_ye.integer << GAIN_INTEGER_SHIFT) |
gain_off_p->gain.r_ye.decimal;
regw(val, CRGAIN);
val = (gain_off_p->gain.gr_cy.integer << GAIN_INTEGER_SHIFT) |
gain_off_p->gain.gr_cy.decimal;
regw(val, CGRGAIN);
val = (gain_off_p->gain.gb_g.integer << GAIN_INTEGER_SHIFT) |
gain_off_p->gain.gb_g.decimal;
regw(val, CGBGAIN);
val = (gain_off_p->gain.b_mg.integer << GAIN_INTEGER_SHIFT) |
gain_off_p->gain.b_mg.decimal;
regw(val, CBGAIN);
regw(gain_off_p->offset, COFSTA);
}
static void isif_restore_defaults(void)
{
enum vpss_ccdc_source_sel source = VPSS_CCDCIN;
dev_dbg(isif_cfg.dev, "\nstarting isif_restore_defaults...");
isif_cfg.bayer.config_params = isif_config_defaults;
/* Enable clock to ISIF, IPIPEIF and BL */
vpss_enable_clock(VPSS_CCDC_CLOCK, 1);
vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
vpss_enable_clock(VPSS_BL_CLOCK, 1);
/* Set default offset and gain */
isif_config_gain_offset();
vpss_select_ccdc_source(source);
dev_dbg(isif_cfg.dev, "\nEnd of isif_restore_defaults...");
}
static int isif_open(struct device *device)
{
isif_restore_defaults();
return 0;
}
/* This function will configure the window size to be capture in ISIF reg */
static void isif_setwin(struct v4l2_rect *image_win,
enum ccdc_frmfmt frm_fmt, int ppc)
{
int horz_start, horz_nr_pixels;
int vert_start, vert_nr_lines;
int mid_img = 0;
dev_dbg(isif_cfg.dev, "\nStarting isif_setwin...");
/*
* ppc - per pixel count. indicates how many pixels per cell
* output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
* raw capture this is 1
*/
horz_start = image_win->left << (ppc - 1);
horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
/* Writing the horizontal info into the registers */
regw(horz_start & START_PX_HOR_MASK, SPH);
regw(horz_nr_pixels & NUM_PX_HOR_MASK, LNH);
vert_start = image_win->top;
if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
vert_nr_lines = (image_win->height >> 1) - 1;
vert_start >>= 1;
/* To account for VD since line 0 doesn't have any data */
vert_start += 1;
} else {
/* To account for VD since line 0 doesn't have any data */
vert_start += 1;
vert_nr_lines = image_win->height - 1;
/* configure VDINT0 and VDINT1 */
mid_img = vert_start + (image_win->height / 2);
regw(mid_img, VDINT1);
}
regw(0, VDINT0);
regw(vert_start & START_VER_ONE_MASK, SLV0);
regw(vert_start & START_VER_TWO_MASK, SLV1);
regw(vert_nr_lines & NUM_LINES_VER, LNV);
}
static void isif_config_bclamp(struct isif_black_clamp *bc)
{
u32 val;
/*
* DC Offset is always added to image data irrespective of bc enable
* status
*/
regw(bc->dc_offset, CLDCOFST);
if (bc->en) {
val = bc->bc_mode_color << ISIF_BC_MODE_COLOR_SHIFT;
/* Enable BC and horizontal clamp caculation paramaters */
val = val | 1 | (bc->horz.mode << ISIF_HORZ_BC_MODE_SHIFT);
regw(val, CLAMPCFG);
if (bc->horz.mode != ISIF_HORZ_BC_DISABLE) {
/*
* Window count for calculation
* Base window selection
* pixel limit
* Horizontal size of window
* vertical size of the window
* Horizontal start position of the window
* Vertical start position of the window
*/
val = bc->horz.win_count_calc |
((!!bc->horz.base_win_sel_calc) <<
ISIF_HORZ_BC_WIN_SEL_SHIFT) |
((!!bc->horz.clamp_pix_limit) <<
ISIF_HORZ_BC_PIX_LIMIT_SHIFT) |
(bc->horz.win_h_sz_calc <<
ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) |
(bc->horz.win_v_sz_calc <<
ISIF_HORZ_BC_WIN_V_SIZE_SHIFT);
regw(val, CLHWIN0);
regw(bc->horz.win_start_h_calc, CLHWIN1);
regw(bc->horz.win_start_v_calc, CLHWIN2);
}
/* vertical clamp caculation paramaters */
/* Reset clamp value sel for previous line */
val |=
(bc->vert.reset_val_sel << ISIF_VERT_BC_RST_VAL_SEL_SHIFT) |
(bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT);
regw(val, CLVWIN0);
/* Optical Black horizontal start position */
regw(bc->vert.ob_start_h, CLVWIN1);
/* Optical Black vertical start position */
regw(bc->vert.ob_start_v, CLVWIN2);
/* Optical Black vertical size for calculation */
regw(bc->vert.ob_v_sz_calc, CLVWIN3);
/* Vertical start position for BC subtraction */
regw(bc->vert_start_sub, CLSV);
}
}
static void isif_config_linearization(struct isif_linearize *linearize)
{
u32 val, i;
if (!linearize->en) {
regw(0, LINCFG0);
return;
}
/* shift value for correction & enable linearization (set lsb) */
val = (linearize->corr_shft << ISIF_LIN_CORRSFT_SHIFT) | 1;
regw(val, LINCFG0);
/* Scale factor */
val = ((!!linearize->scale_fact.integer) <<
ISIF_LIN_SCALE_FACT_INTEG_SHIFT) |
linearize->scale_fact.decimal;
regw(val, LINCFG1);
for (i = 0; i < ISIF_LINEAR_TAB_SIZE; i++) {
if (i % 2)
regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 1);
else
regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 0);
}
}
static int isif_config_dfc(struct isif_dfc *vdfc)
{
/* initialize retries to loop for max ~ 250 usec */
u32 val, count, retries = loops_per_jiffy / (4000/HZ);
int i;
if (!vdfc->en)
return 0;
/* Correction mode */
val = (vdfc->corr_mode << ISIF_VDFC_CORR_MOD_SHIFT);
/* Correct whole line or partial */
if (vdfc->corr_whole_line)
val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
/* level shift value */
val |= vdfc->def_level_shift << ISIF_VDFC_LEVEL_SHFT_SHIFT;
regw(val, DFCCTL);
/* Defect saturation level */
regw(vdfc->def_sat_level, VDFSATLV);
regw(vdfc->table[0].pos_vert, DFCMEM0);
regw(vdfc->table[0].pos_horz, DFCMEM1);
if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
regw(vdfc->table[0].level_at_pos, DFCMEM2);
regw(vdfc->table[0].level_up_pixels, DFCMEM3);
regw(vdfc->table[0].level_low_pixels, DFCMEM4);
}
/* set DFCMARST and set DFCMWR */
val = regr(DFCMEMCTL) | (1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT) | 1;
regw(val, DFCMEMCTL);
count = retries;
while (count && (regr(DFCMEMCTL) & 0x1))
count--;
if (!count) {
dev_dbg(isif_cfg.dev, "defect table write timeout !!!\n");
return -1;
}
for (i = 1; i < vdfc->num_vdefects; i++) {
regw(vdfc->table[i].pos_vert, DFCMEM0);
regw(vdfc->table[i].pos_horz, DFCMEM1);
if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
regw(vdfc->table[i].level_at_pos, DFCMEM2);
regw(vdfc->table[i].level_up_pixels, DFCMEM3);
regw(vdfc->table[i].level_low_pixels, DFCMEM4);
}
val = regr(DFCMEMCTL);
/* clear DFCMARST and set DFCMWR */
val &= ~BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
val |= 1;
regw(val, DFCMEMCTL);
count = retries;
while (count && (regr(DFCMEMCTL) & 0x1))
count--;
if (!count) {
dev_err(isif_cfg.dev,
"defect table write timeout !!!\n");
return -1;
}
}
if (vdfc->num_vdefects < ISIF_VDFC_TABLE_SIZE) {
/* Extra cycle needed */
regw(0, DFCMEM0);
regw(0x1FFF, DFCMEM1);
regw(1, DFCMEMCTL);
}
/* enable VDFC */
reg_modify((1 << ISIF_VDFC_EN_SHIFT), (1 << ISIF_VDFC_EN_SHIFT),
DFCCTL);
return 0;
}
static void isif_config_csc(struct isif_df_csc *df_csc)
{
u32 val1 = 0, val2 = 0, i;
if (!df_csc->csc.en) {
regw(0, CSCCTL);
return;
}
for (i = 0; i < ISIF_CSC_NUM_COEFF; i++) {
if ((i % 2) == 0) {
/* CSCM - LSB */
val1 = (df_csc->csc.coeff[i].integer <<
ISIF_CSC_COEF_INTEG_SHIFT) |
df_csc->csc.coeff[i].decimal;
} else {
/* CSCM - MSB */
val2 = (df_csc->csc.coeff[i].integer <<
ISIF_CSC_COEF_INTEG_SHIFT) |
df_csc->csc.coeff[i].decimal;
val2 <<= ISIF_CSCM_MSB_SHIFT;
val2 |= val1;
regw(val2, (CSCM0 + ((i - 1) << 1)));
}
}
/* program the active area */
regw(df_csc->start_pix, FMTSPH);
/*
* one extra pixel as required for CSC. Actually number of
* pixel - 1 should be configured in this register. So we
* need to subtract 1 before writing to FMTSPH, but we will
* not do this since csc requires one extra pixel
*/
regw(df_csc->num_pixels, FMTLNH);
regw(df_csc->start_line, FMTSLV);
/*
* one extra line as required for CSC. See reason documented for
* num_pixels
*/
regw(df_csc->num_lines, FMTLNV);
/* Enable CSC */
regw(1, CSCCTL);
}
static int isif_config_raw(void)
{
struct isif_params_raw *params = &isif_cfg.bayer;
struct isif_config_params_raw *module_params =
&isif_cfg.bayer.config_params;
struct vpss_pg_frame_size frame_size;
struct vpss_sync_pol sync;
u32 val;
dev_dbg(isif_cfg.dev, "\nStarting isif_config_raw..\n");
/*
* Configure CCDCFG register:-
* Set CCD Not to swap input since input is RAW data
* Set FID detection function to Latch at V-Sync
* Set WENLOG - isif valid area
* Set TRGSEL
* Set EXTRG
* Packed to 8 or 16 bits
*/
val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC |
ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN |
ISIF_CCDCFG_EXTRG_DISABLE | isif_cfg.data_pack;
dev_dbg(isif_cfg.dev, "Writing 0x%x to ...CCDCFG \n", val);
regw(val, CCDCFG);
/*
* Configure the vertical sync polarity(MODESET.VDPOL)
* Configure the horizontal sync polarity (MODESET.HDPOL)
* Configure frame id polarity (MODESET.FLDPOL)
* Configure data polarity
* Configure External WEN Selection
* Configure frame format(progressive or interlace)
* Configure pixel format (Input mode)
* Configure the data shift
*/
val = ISIF_VDHDOUT_INPUT | (params->vd_pol << ISIF_VD_POL_SHIFT) |
(params->hd_pol << ISIF_HD_POL_SHIFT) |
(params->fid_pol << ISIF_FID_POL_SHIFT) |
(ISIF_DATAPOL_NORMAL << ISIF_DATAPOL_SHIFT) |
(ISIF_EXWEN_DISABLE << ISIF_EXWEN_SHIFT) |
(params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
(params->pix_fmt << ISIF_INPUT_SHIFT) |
(params->config_params.data_shift << ISIF_DATASFT_SHIFT);
regw(val, MODESET);
dev_dbg(isif_cfg.dev, "Writing 0x%x to MODESET...\n", val);
/*
* Configure GAMMAWD register
* CFA pattern setting
*/
val = params->cfa_pat << ISIF_GAMMAWD_CFA_SHIFT;
/* Gamma msb */
if (module_params->compress.alg == ISIF_ALAW)
val |= ISIF_ALAW_ENABLE;
val |= (params->data_msb << ISIF_ALAW_GAMMA_WD_SHIFT);
regw(val, CGAMMAWD);
/* Configure DPCM compression settings */
if (module_params->compress.alg == ISIF_DPCM) {
val = BIT(ISIF_DPCM_EN_SHIFT) |
(module_params->compress.pred <<
ISIF_DPCM_PREDICTOR_SHIFT);
}
regw(val, MISC);
/* Configure Gain & Offset */
isif_config_gain_offset();
/* Configure Color pattern */
val = (params->config_params.col_pat_field0.olop) |
(params->config_params.col_pat_field0.olep << 2) |
(params->config_params.col_pat_field0.elop << 4) |
(params->config_params.col_pat_field0.elep << 6) |
(params->config_params.col_pat_field1.olop << 8) |
(params->config_params.col_pat_field1.olep << 10) |
(params->config_params.col_pat_field1.elop << 12) |
(params->config_params.col_pat_field1.elep << 14);
regw(val, CCOLP);
dev_dbg(isif_cfg.dev, "Writing %x to CCOLP ...\n", val);
/* Configure HSIZE register */
val = (!!params->horz_flip_en) << ISIF_HSIZE_FLIP_SHIFT;
/* calculate line offset in 32 bytes based on pack value */
if (isif_cfg.data_pack == ISIF_PACK_8BIT)
val |= ((params->win.width + 31) >> 5);
else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
val |= (((params->win.width +
(params->win.width >> 2)) + 31) >> 5);
else
val |= (((params->win.width * 2) + 31) >> 5);
regw(val, HSIZE);
/* Configure SDOFST register */
if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
if (params->image_invert_en) {
/* For interlace inverse mode */
regw(0x4B6D, SDOFST);
dev_dbg(isif_cfg.dev, "Writing 0x4B6D to SDOFST...\n");
} else {
/* For interlace non inverse mode */
regw(0x0B6D, SDOFST);
dev_dbg(isif_cfg.dev, "Writing 0x0B6D to SDOFST...\n");
}
} else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
if (params->image_invert_en) {
/* For progressive inverse mode */
regw(0x4000, SDOFST);
dev_dbg(isif_cfg.dev, "Writing 0x4000 to SDOFST...\n");
} else {
/* For progressive non inverse mode */
regw(0x0000, SDOFST);
dev_dbg(isif_cfg.dev, "Writing 0x0000 to SDOFST...\n");
}
}
/* Configure video window */
isif_setwin(¶ms->win, params->frm_fmt, 1);
/* Configure Black Clamp */
isif_config_bclamp(&module_params->bclamp);
/* Configure Vertical Defection Pixel Correction */
if (isif_config_dfc(&module_params->dfc) < 0)
return -EFAULT;
if (!module_params->df_csc.df_or_csc)
/* Configure Color Space Conversion */
isif_config_csc(&module_params->df_csc);
isif_config_linearization(&module_params->linearize);
/* Configure Culling */
isif_config_culling(&module_params->culling);
/* Configure horizontal and vertical offsets(DFC,LSC,Gain) */
regw(module_params->horz_offset, DATAHOFST);
regw(module_params->vert_offset, DATAVOFST);
/* Setup test pattern if enabled */
if (params->config_params.test_pat_gen) {
/* Use the HD/VD pol settings from user */
sync.ccdpg_hdpol = params->hd_pol;
sync.ccdpg_vdpol = params->vd_pol;
dm365_vpss_set_sync_pol(sync);
frame_size.hlpfr = isif_cfg.bayer.win.width;
frame_size.pplen = isif_cfg.bayer.win.height;
dm365_vpss_set_pg_frame_size(frame_size);
vpss_select_ccdc_source(VPSS_PGLPBK);
}
dev_dbg(isif_cfg.dev, "\nEnd of isif_config_ycbcr...\n");
return 0;
}
static int isif_set_buftype(enum ccdc_buftype buf_type)
{
if (isif_cfg.if_type == VPFE_RAW_BAYER)
isif_cfg.bayer.buf_type = buf_type;
else
isif_cfg.ycbcr.buf_type = buf_type;
return 0;
}
static enum ccdc_buftype isif_get_buftype(void)
{
if (isif_cfg.if_type == VPFE_RAW_BAYER)
return isif_cfg.bayer.buf_type;
return isif_cfg.ycbcr.buf_type;
}
static int isif_enum_pix(u32 *pix, int i)
{
int ret = -EINVAL;
if (isif_cfg.if_type == VPFE_RAW_BAYER) {
if (i < ARRAY_SIZE(isif_raw_bayer_pix_formats)) {
*pix = isif_raw_bayer_pix_formats[i];
ret = 0;
}
} else {
if (i < ARRAY_SIZE(isif_raw_yuv_pix_formats)) {
*pix = isif_raw_yuv_pix_formats[i];
ret = 0;
}
}
return ret;
}
static int isif_set_pixel_format(unsigned int pixfmt)
{
if (isif_cfg.if_type == VPFE_RAW_BAYER) {
if (pixfmt == V4L2_PIX_FMT_SBGGR8) {
if ((isif_cfg.bayer.config_params.compress.alg !=
ISIF_ALAW) &&
(isif_cfg.bayer.config_params.compress.alg !=
ISIF_DPCM)) {
dev_dbg(isif_cfg.dev,
"Either configure A-Law or DPCM\n");
return -EINVAL;
}
isif_cfg.data_pack = ISIF_PACK_8BIT;
} else if (pixfmt == V4L2_PIX_FMT_SBGGR16) {
isif_cfg.bayer.config_params.compress.alg =
ISIF_NO_COMPRESSION;
isif_cfg.data_pack = ISIF_PACK_16BIT;
} else
return -EINVAL;
isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
} else {
if (pixfmt == V4L2_PIX_FMT_YUYV)
isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
else if (pixfmt == V4L2_PIX_FMT_UYVY)
isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
else
return -EINVAL;
isif_cfg.data_pack = ISIF_PACK_8BIT;
}
return 0;
}
static u32 isif_get_pixel_format(void)
{
u32 pixfmt;
if (isif_cfg.if_type == VPFE_RAW_BAYER)
if (isif_cfg.bayer.config_params.compress.alg == ISIF_ALAW ||
isif_cfg.bayer.config_params.compress.alg == ISIF_DPCM)
pixfmt = V4L2_PIX_FMT_SBGGR8;
else
pixfmt = V4L2_PIX_FMT_SBGGR16;
else {
if (isif_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
pixfmt = V4L2_PIX_FMT_YUYV;
else
pixfmt = V4L2_PIX_FMT_UYVY;
}
return pixfmt;
}
static int isif_set_image_window(struct v4l2_rect *win)
{
if (isif_cfg.if_type == VPFE_RAW_BAYER) {
isif_cfg.bayer.win.top = win->top;
isif_cfg.bayer.win.left = win->left;
isif_cfg.bayer.win.width = win->width;
isif_cfg.bayer.win.height = win->height;
} else {
isif_cfg.ycbcr.win.top = win->top;
isif_cfg.ycbcr.win.left = win->left;
isif_cfg.ycbcr.win.width = win->width;
isif_cfg.ycbcr.win.height = win->height;
}
return 0;
}
static void isif_get_image_window(struct v4l2_rect *win)
{
if (isif_cfg.if_type == VPFE_RAW_BAYER)
*win = isif_cfg.bayer.win;
else
*win = isif_cfg.ycbcr.win;
}
static unsigned int isif_get_line_length(void)
{
unsigned int len;
if (isif_cfg.if_type == VPFE_RAW_BAYER) {
if (isif_cfg.data_pack == ISIF_PACK_8BIT)
len = ((isif_cfg.bayer.win.width));
else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
len = (((isif_cfg.bayer.win.width * 2) +
(isif_cfg.bayer.win.width >> 2)));
else
len = (((isif_cfg.bayer.win.width * 2)));
} else
len = (((isif_cfg.ycbcr.win.width * 2)));
return ALIGN(len, 32);
}
static int isif_set_frame_format(enum ccdc_frmfmt frm_fmt)
{
if (isif_cfg.if_type == VPFE_RAW_BAYER)
isif_cfg.bayer.frm_fmt = frm_fmt;
else
isif_cfg.ycbcr.frm_fmt = frm_fmt;
return 0;
}
static enum ccdc_frmfmt isif_get_frame_format(void)
{
if (isif_cfg.if_type == VPFE_RAW_BAYER)
return isif_cfg.bayer.frm_fmt;
return isif_cfg.ycbcr.frm_fmt;
}
static int isif_getfid(void)
{
return (regr(MODESET) >> 15) & 0x1;
}
/* misc operations */
static void isif_setfbaddr(unsigned long addr)
{
regw((addr >> 21) & 0x07ff, CADU);
regw((addr >> 5) & 0x0ffff, CADL);
}
static int isif_set_hw_if_params(struct vpfe_hw_if_param *params)
{
isif_cfg.if_type = params->if_type;
switch (params->if_type) {
case VPFE_BT656:
case VPFE_BT656_10BIT:
case VPFE_YCBCR_SYNC_8:
isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
break;
case VPFE_BT1120:
case VPFE_YCBCR_SYNC_16:
isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_16BIT;
isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
break;
case VPFE_RAW_BAYER:
isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
break;
default:
dev_dbg(isif_cfg.dev, "Invalid interface type\n");
return -EINVAL;
}
return 0;
}
/* This function will configure ISIF for YCbCr parameters. */
static int isif_config_ycbcr(void)
{
struct isif_ycbcr_config *params = &isif_cfg.ycbcr;
struct vpss_pg_frame_size frame_size;
u32 modeset = 0, ccdcfg = 0;
struct vpss_sync_pol sync;
dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr...");
/* configure pixel format or input mode */
modeset = modeset | (params->pix_fmt << ISIF_INPUT_SHIFT) |
(params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
(params->fid_pol << ISIF_FID_POL_SHIFT) |
(params->hd_pol << ISIF_HD_POL_SHIFT) |
(params->vd_pol << ISIF_VD_POL_SHIFT);
/* pack the data to 8-bit ISIFCFG */
switch (isif_cfg.if_type) {
case VPFE_BT656:
if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
return -EINVAL;
}
modeset |= (VPFE_PINPOL_NEGATIVE << ISIF_VD_POL_SHIFT);
regw(3, REC656IF);
ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR;
break;
case VPFE_BT656_10BIT:
if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
return -EINVAL;
}
/* setup BT.656, embedded sync */
regw(3, REC656IF);
/* enable 10 bit mode in ccdcfg */
ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR |
ISIF_BW656_ENABLE;
break;
case VPFE_BT1120:
if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
return -EINVAL;
}
regw(3, REC656IF);
break;
case VPFE_YCBCR_SYNC_8:
ccdcfg |= ISIF_DATA_PACK8;
ccdcfg |= ISIF_YCINSWP_YCBCR;
if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
return -EINVAL;
}
break;
case VPFE_YCBCR_SYNC_16:
if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
return -EINVAL;
}
break;
default:
/* should never come here */
dev_dbg(isif_cfg.dev, "Invalid interface type\n");
return -EINVAL;
}
regw(modeset, MODESET);
/* Set up pix order */
ccdcfg |= params->pix_order << ISIF_PIX_ORDER_SHIFT;
regw(ccdcfg, CCDCFG);
/* configure video window */
if ((isif_cfg.if_type == VPFE_BT1120) ||
(isif_cfg.if_type == VPFE_YCBCR_SYNC_16))
isif_setwin(¶ms->win, params->frm_fmt, 1);
else
isif_setwin(¶ms->win, params->frm_fmt, 2);
/*
* configure the horizontal line offset
* this is done by rounding up width to a multiple of 16 pixels
* and multiply by two to account for y:cb:cr 4:2:2 data
*/
regw(((((params->win.width * 2) + 31) & 0xffffffe0) >> 5), HSIZE);
/* configure the memory line offset */
if ((params->frm_fmt == CCDC_FRMFMT_INTERLACED) &&
(params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED))
/* two fields are interleaved in memory */
regw(0x00000249, SDOFST);
/* Setup test pattern if enabled */
if (isif_cfg.bayer.config_params.test_pat_gen) {
sync.ccdpg_hdpol = params->hd_pol;
sync.ccdpg_vdpol = params->vd_pol;
dm365_vpss_set_sync_pol(sync);
dm365_vpss_set_pg_frame_size(frame_size);
}
return 0;
}
static int isif_configure(void)
{
if (isif_cfg.if_type == VPFE_RAW_BAYER)
return isif_config_raw();
return isif_config_ycbcr();
}
static int isif_close(struct device *device)
{
/* copy defaults to module params */
isif_cfg.bayer.config_params = isif_config_defaults;
return 0;
}
static struct ccdc_hw_device isif_hw_dev = {
.name = "ISIF",
.owner = THIS_MODULE,
.hw_ops = {
.open = isif_open,
.close = isif_close,
.enable = isif_enable,
.enable_out_to_sdram = isif_enable_output_to_sdram,
.set_hw_if_params = isif_set_hw_if_params,
.configure = isif_configure,
.set_buftype = isif_set_buftype,
.get_buftype = isif_get_buftype,
.enum_pix = isif_enum_pix,
.set_pixel_format = isif_set_pixel_format,
.get_pixel_format = isif_get_pixel_format,
.set_frame_format = isif_set_frame_format,
.get_frame_format = isif_get_frame_format,
.set_image_window = isif_set_image_window,
.get_image_window = isif_get_image_window,
.get_line_length = isif_get_line_length,
.setfbaddr = isif_setfbaddr,
.getfid = isif_getfid,
},
};
static int isif_probe(struct platform_device *pdev)
{
void (*setup_pinmux)(void);
struct resource *res;
void *__iomem addr;
int status = 0, i;
/* Platform data holds setup_pinmux function ptr */
if (!pdev->dev.platform_data)
return -ENODEV;
/*
* first try to register with vpfe. If not correct platform, then we
* don't have to iomap
*/
status = vpfe_register_ccdc_device(&isif_hw_dev);
if (status < 0)
return status;
setup_pinmux = pdev->dev.platform_data;
/*
* setup Mux configuration for ccdc which may be different for
* different SoCs using this CCDC
*/
setup_pinmux();
i = 0;
/* Get the ISIF base address, linearization table0 and table1 addr. */
while (i < 3) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res) {
status = -ENODEV;
goto fail_nobase_res;
}
res = request_mem_region(res->start, resource_size(res),
res->name);
if (!res) {
status = -EBUSY;
goto fail_nobase_res;
}
addr = ioremap_nocache(res->start, resource_size(res));
if (!addr) {
status = -ENOMEM;
goto fail_base_iomap;
}
switch (i) {
case 0:
/* ISIF base address */
isif_cfg.base_addr = addr;
break;
case 1:
/* ISIF linear tbl0 address */
isif_cfg.linear_tbl0_addr = addr;
break;
default:
/* ISIF linear tbl0 address */
isif_cfg.linear_tbl1_addr = addr;
break;
}
i++;
}
isif_cfg.dev = &pdev->dev;
printk(KERN_NOTICE "%s is registered with vpfe.\n",
isif_hw_dev.name);
return 0;
fail_base_iomap:
release_mem_region(res->start, resource_size(res));
i--;
fail_nobase_res:
if (isif_cfg.base_addr)
iounmap(isif_cfg.base_addr);
if (isif_cfg.linear_tbl0_addr)
iounmap(isif_cfg.linear_tbl0_addr);
while (i >= 0) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
release_mem_region(res->start, resource_size(res));
i--;
}
vpfe_unregister_ccdc_device(&isif_hw_dev);
return status;
}
static int isif_remove(struct platform_device *pdev)
{
struct resource *res;
int i = 0;
iounmap(isif_cfg.base_addr);
iounmap(isif_cfg.linear_tbl0_addr);
iounmap(isif_cfg.linear_tbl1_addr);
while (i < 3) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (res)
release_mem_region(res->start, resource_size(res));
i++;
}
vpfe_unregister_ccdc_device(&isif_hw_dev);
return 0;
}
static struct platform_driver isif_driver = {
.driver = {
.name = "isif",
.owner = THIS_MODULE,
},
.remove = isif_remove,
.probe = isif_probe,
};
module_platform_driver(isif_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
RenderKernels/android_kernel_asus_grouper | security/apparmor/file.c | 3375 | 13084 | /*
* AppArmor security module
*
* This file contains AppArmor mediation of files
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include "include/apparmor.h"
#include "include/audit.h"
#include "include/file.h"
#include "include/match.h"
#include "include/path.h"
#include "include/policy.h"
struct file_perms nullperms;
/**
* audit_file_mask - convert mask to permission string
* @buffer: buffer to write string to (NOT NULL)
* @mask: permission mask to convert
*/
static void audit_file_mask(struct audit_buffer *ab, u32 mask)
{
char str[10];
char *m = str;
if (mask & AA_EXEC_MMAP)
*m++ = 'm';
if (mask & (MAY_READ | AA_MAY_META_READ))
*m++ = 'r';
if (mask & (MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_CHMOD |
AA_MAY_CHOWN))
*m++ = 'w';
else if (mask & MAY_APPEND)
*m++ = 'a';
if (mask & AA_MAY_CREATE)
*m++ = 'c';
if (mask & AA_MAY_DELETE)
*m++ = 'd';
if (mask & AA_MAY_LINK)
*m++ = 'l';
if (mask & AA_MAY_LOCK)
*m++ = 'k';
if (mask & MAY_EXEC)
*m++ = 'x';
*m = '\0';
audit_log_string(ab, str);
}
/**
* file_audit_cb - call back for file specific audit fields
* @ab: audit_buffer (NOT NULL)
* @va: audit struct to audit values of (NOT NULL)
*/
static void file_audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
uid_t fsuid = current_fsuid();
if (sa->aad.fs.request & AA_AUDIT_FILE_MASK) {
audit_log_format(ab, " requested_mask=");
audit_file_mask(ab, sa->aad.fs.request);
}
if (sa->aad.fs.denied & AA_AUDIT_FILE_MASK) {
audit_log_format(ab, " denied_mask=");
audit_file_mask(ab, sa->aad.fs.denied);
}
if (sa->aad.fs.request & AA_AUDIT_FILE_MASK) {
audit_log_format(ab, " fsuid=%d", fsuid);
audit_log_format(ab, " ouid=%d", sa->aad.fs.ouid);
}
if (sa->aad.fs.target) {
audit_log_format(ab, " target=");
audit_log_untrustedstring(ab, sa->aad.fs.target);
}
}
/**
* aa_audit_file - handle the auditing of file operations
* @profile: the profile being enforced (NOT NULL)
* @perms: the permissions computed for the request (NOT NULL)
* @gfp: allocation flags
* @op: operation being mediated
* @request: permissions requested
* @name: name of object being mediated (MAYBE NULL)
* @target: name of target (MAYBE NULL)
* @ouid: object uid
* @info: extra information message (MAYBE NULL)
* @error: 0 if operation allowed else failure error code
*
* Returns: %0 or error on failure
*/
int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
gfp_t gfp, int op, u32 request, const char *name,
const char *target, uid_t ouid, const char *info, int error)
{
int type = AUDIT_APPARMOR_AUTO;
struct common_audit_data sa;
COMMON_AUDIT_DATA_INIT(&sa, NONE);
sa.aad.op = op,
sa.aad.fs.request = request;
sa.aad.name = name;
sa.aad.fs.target = target;
sa.aad.fs.ouid = ouid;
sa.aad.info = info;
sa.aad.error = error;
if (likely(!sa.aad.error)) {
u32 mask = perms->audit;
if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
mask = 0xffff;
/* mask off perms that are not being force audited */
sa.aad.fs.request &= mask;
if (likely(!sa.aad.fs.request))
return 0;
type = AUDIT_APPARMOR_AUDIT;
} else {
/* only report permissions that were denied */
sa.aad.fs.request = sa.aad.fs.request & ~perms->allow;
if (sa.aad.fs.request & perms->kill)
type = AUDIT_APPARMOR_KILL;
/* quiet known rejects, assumes quiet and kill do not overlap */
if ((sa.aad.fs.request & perms->quiet) &&
AUDIT_MODE(profile) != AUDIT_NOQUIET &&
AUDIT_MODE(profile) != AUDIT_ALL)
sa.aad.fs.request &= ~perms->quiet;
if (!sa.aad.fs.request)
return COMPLAIN_MODE(profile) ? 0 : sa.aad.error;
}
sa.aad.fs.denied = sa.aad.fs.request & ~perms->allow;
return aa_audit(type, profile, gfp, &sa, file_audit_cb);
}
/**
* map_old_perms - map old file perms layout to the new layout
* @old: permission set in old mapping
*
* Returns: new permission mapping
*/
static u32 map_old_perms(u32 old)
{
u32 new = old & 0xf;
if (old & MAY_READ)
new |= AA_MAY_META_READ;
if (old & MAY_WRITE)
new |= AA_MAY_META_WRITE | AA_MAY_CREATE | AA_MAY_DELETE |
AA_MAY_CHMOD | AA_MAY_CHOWN;
if (old & 0x10)
new |= AA_MAY_LINK;
/* the old mapping lock and link_subset flags where overlaid
* and use was determined by part of a pair that they were in
*/
if (old & 0x20)
new |= AA_MAY_LOCK | AA_LINK_SUBSET;
if (old & 0x40) /* AA_EXEC_MMAP */
new |= AA_EXEC_MMAP;
new |= AA_MAY_META_READ;
return new;
}
/**
* compute_perms - convert dfa compressed perms to internal perms
* @dfa: dfa to compute perms for (NOT NULL)
* @state: state in dfa
* @cond: conditions to consider (NOT NULL)
*
* TODO: convert from dfa + state to permission entry, do computation conversion
* at load time.
*
* Returns: computed permission set
*/
static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state,
struct path_cond *cond)
{
struct file_perms perms;
/* FIXME: change over to new dfa format
* currently file perms are encoded in the dfa, new format
* splits the permissions from the dfa. This mapping can be
* done at profile load
*/
perms.kill = 0;
if (current_fsuid() == cond->uid) {
perms.allow = map_old_perms(dfa_user_allow(dfa, state));
perms.audit = map_old_perms(dfa_user_audit(dfa, state));
perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
perms.xindex = dfa_user_xindex(dfa, state);
} else {
perms.allow = map_old_perms(dfa_other_allow(dfa, state));
perms.audit = map_old_perms(dfa_other_audit(dfa, state));
perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
perms.xindex = dfa_other_xindex(dfa, state);
}
/* change_profile wasn't determined by ownership in old mapping */
if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
perms.allow |= AA_MAY_CHANGE_PROFILE;
return perms;
}
/**
* aa_str_perms - find permission that match @name
* @dfa: to match against (MAYBE NULL)
* @state: state to start matching in
* @name: string to match against dfa (NOT NULL)
* @cond: conditions to consider for permission set computation (NOT NULL)
* @perms: Returns - the permissions found when matching @name
*
* Returns: the final state in @dfa when beginning @start and walking @name
*/
unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
const char *name, struct path_cond *cond,
struct file_perms *perms)
{
unsigned int state;
if (!dfa) {
*perms = nullperms;
return DFA_NOMATCH;
}
state = aa_dfa_match(dfa, start, name);
*perms = compute_perms(dfa, state, cond);
return state;
}
/**
* is_deleted - test if a file has been completely unlinked
* @dentry: dentry of file to test for deletion (NOT NULL)
*
* Returns: %1 if deleted else %0
*/
static inline bool is_deleted(struct dentry *dentry)
{
if (d_unlinked(dentry) && dentry->d_inode->i_nlink == 0)
return 1;
return 0;
}
/**
* aa_path_perm - do permissions check & audit for @path
* @op: operation being checked
* @profile: profile being enforced (NOT NULL)
* @path: path to check permissions of (NOT NULL)
* @flags: any additional path flags beyond what the profile specifies
* @request: requested permissions
* @cond: conditional info for this request (NOT NULL)
*
* Returns: %0 else error if access denied or other error
*/
int aa_path_perm(int op, struct aa_profile *profile, struct path *path,
int flags, u32 request, struct path_cond *cond)
{
char *buffer = NULL;
struct file_perms perms = {};
const char *name, *info = NULL;
int error;
flags |= profile->path_flags | (S_ISDIR(cond->mode) ? PATH_IS_DIR : 0);
error = aa_get_name(path, flags, &buffer, &name);
if (error) {
if (error == -ENOENT && is_deleted(path->dentry)) {
/* Access to open files that are deleted are
* give a pass (implicit delegation)
*/
error = 0;
perms.allow = request;
} else if (error == -ENOENT)
info = "Failed name lookup - deleted entry";
else if (error == -ESTALE)
info = "Failed name lookup - disconnected path";
else if (error == -ENAMETOOLONG)
info = "Failed name lookup - name too long";
else
info = "Failed name lookup";
} else {
aa_str_perms(profile->file.dfa, profile->file.start, name, cond,
&perms);
if (request & ~perms.allow)
error = -EACCES;
}
error = aa_audit_file(profile, &perms, GFP_KERNEL, op, request, name,
NULL, cond->uid, info, error);
kfree(buffer);
return error;
}
/**
* xindex_is_subset - helper for aa_path_link
* @link: link permission set
* @target: target permission set
*
* test target x permissions are equal OR a subset of link x permissions
* this is done as part of the subset test, where a hardlink must have
* a subset of permissions that the target has.
*
* Returns: %1 if subset else %0
*/
static inline bool xindex_is_subset(u32 link, u32 target)
{
if (((link & ~AA_X_UNSAFE) != (target & ~AA_X_UNSAFE)) ||
((link & AA_X_UNSAFE) && !(target & AA_X_UNSAFE)))
return 0;
return 1;
}
/**
* aa_path_link - Handle hard link permission check
* @profile: the profile being enforced (NOT NULL)
* @old_dentry: the target dentry (NOT NULL)
* @new_dir: directory the new link will be created in (NOT NULL)
* @new_dentry: the link being created (NOT NULL)
*
* Handle the permission test for a link & target pair. Permission
* is encoded as a pair where the link permission is determined
* first, and if allowed, the target is tested. The target test
* is done from the point of the link match (not start of DFA)
* making the target permission dependent on the link permission match.
*
* The subset test if required forces that permissions granted
* on link are a subset of the permission granted to target.
*
* Returns: %0 if allowed else error
*/
int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
struct path *new_dir, struct dentry *new_dentry)
{
struct path link = { new_dir->mnt, new_dentry };
struct path target = { new_dir->mnt, old_dentry };
struct path_cond cond = {
old_dentry->d_inode->i_uid,
old_dentry->d_inode->i_mode
};
char *buffer = NULL, *buffer2 = NULL;
const char *lname, *tname = NULL, *info = NULL;
struct file_perms lperms, perms;
u32 request = AA_MAY_LINK;
unsigned int state;
int error;
lperms = nullperms;
/* buffer freed below, lname is pointer in buffer */
error = aa_get_name(&link, profile->path_flags, &buffer, &lname);
if (error)
goto audit;
/* buffer2 freed below, tname is pointer in buffer2 */
error = aa_get_name(&target, profile->path_flags, &buffer2, &tname);
if (error)
goto audit;
error = -EACCES;
/* aa_str_perms - handles the case of the dfa being NULL */
state = aa_str_perms(profile->file.dfa, profile->file.start, lname,
&cond, &lperms);
if (!(lperms.allow & AA_MAY_LINK))
goto audit;
/* test to see if target can be paired with link */
state = aa_dfa_null_transition(profile->file.dfa, state);
aa_str_perms(profile->file.dfa, state, tname, &cond, &perms);
/* force audit/quiet masks for link are stored in the second entry
* in the link pair.
*/
lperms.audit = perms.audit;
lperms.quiet = perms.quiet;
lperms.kill = perms.kill;
if (!(perms.allow & AA_MAY_LINK)) {
info = "target restricted";
goto audit;
}
/* done if link subset test is not required */
if (!(perms.allow & AA_LINK_SUBSET))
goto done_tests;
/* Do link perm subset test requiring allowed permission on link are a
* subset of the allowed permissions on target.
*/
aa_str_perms(profile->file.dfa, profile->file.start, tname, &cond,
&perms);
/* AA_MAY_LINK is not considered in the subset test */
request = lperms.allow & ~AA_MAY_LINK;
lperms.allow &= perms.allow | AA_MAY_LINK;
request |= AA_AUDIT_FILE_MASK & (lperms.allow & ~perms.allow);
if (request & ~lperms.allow) {
goto audit;
} else if ((lperms.allow & MAY_EXEC) &&
!xindex_is_subset(lperms.xindex, perms.xindex)) {
lperms.allow &= ~MAY_EXEC;
request |= MAY_EXEC;
info = "link not subset of target";
goto audit;
}
done_tests:
error = 0;
audit:
error = aa_audit_file(profile, &lperms, GFP_KERNEL, OP_LINK, request,
lname, tname, cond.uid, info, error);
kfree(buffer);
kfree(buffer2);
return error;
}
/**
* aa_file_perm - do permission revalidation check & audit for @file
* @op: operation being checked
* @profile: profile being enforced (NOT NULL)
* @file: file to revalidate access permissions on (NOT NULL)
* @request: requested permissions
*
* Returns: %0 if access allowed else error
*/
int aa_file_perm(int op, struct aa_profile *profile, struct file *file,
u32 request)
{
struct path_cond cond = {
.uid = file->f_path.dentry->d_inode->i_uid,
.mode = file->f_path.dentry->d_inode->i_mode
};
return aa_path_perm(op, profile, &file->f_path, PATH_DELEGATE_DELETED,
request, &cond);
}
| gpl-2.0 |
GlobalscaleTechnologiesInc/D3-Linux | drivers/gpu/drm/i915/i915_dma.c | 3887 | 60445 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
*/
/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <acpi/video.h>
static void i915_write_hws_pga(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
*/
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
0, PAGE_SIZE);
i915_write_hws_pga(dev);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
static void i915_free_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
}
if (ring->status_page.gfx_addr) {
ring->status_page.gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
}
/* Need to rewrite hardware status page */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
/*
* We should never lose context on the ring with modesetting
* as we don't expose it to userspace
*/
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
if (!dev->primary->master)
return;
master_priv = dev->primary->master->driver_priv;
if (ring->head == ring->tail && master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
static int i915_dma_cleanup(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq_enabled)
drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < I915_NUM_RINGS; i++)
intel_cleanup_ring_buffer(&dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
return 0;
}
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret;
master_priv->sarea = drm_getsarea(dev);
if (master_priv->sarea) {
master_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
} else {
DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
}
if (init->ring_size != 0) {
if (LP_RING(dev_priv)->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
ret = intel_render_ring_init_dri(dev,
init->ring_start,
init->ring_size);
if (ret) {
i915_dma_cleanup(dev);
return ret;
}
}
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
dev_priv->front_offset = init->front_offset;
dev_priv->current_page = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->pf_current_page = 0;
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->allow_batchbuffer = 1;
return 0;
}
static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("%s\n", __func__);
if (ring->map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
/* Program Hardware Status Page */
if (!ring->status_page.page_addr) {
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("hw status page @ %p\n",
ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
intel_ring_setup_status_page(ring);
else
i915_write_hws_pga(dev);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
static int i915_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_init_t *init = data;
int retcode = 0;
switch (init->func) {
case I915_INIT_DMA:
retcode = i915_initialize(dev, init);
break;
case I915_CLEANUP_DMA:
retcode = i915_dma_cleanup(dev);
break;
case I915_RESUME_DMA:
retcode = i915_dma_resume(dev);
break;
default:
retcode = -EINVAL;
break;
}
return retcode;
}
/* Implement basically the same security restrictions as hardware does
* for MI_BATCH_NON_SECURE. These can be made stricter at any time.
*
* Most of the calculations below involve calculating the size of a
* particular instruction. It's important to get the size right as
* that tells us where the next instruction to check is. Any illegal
* instruction detected will be given a size of zero, which is a
* signal to abort the rest of the buffer.
*/
static int validate_cmd(int cmd)
{
switch (((cmd >> 29) & 0x7)) {
case 0x0:
switch ((cmd >> 23) & 0x3f) {
case 0x0:
return 1; /* MI_NOOP */
case 0x4:
return 1; /* MI_FLUSH */
default:
return 0; /* disallow everything else */
}
break;
case 0x1:
return 0; /* reserved */
case 0x2:
return (cmd & 0xff) + 2; /* 2d commands */
case 0x3:
if (((cmd >> 24) & 0x1f) <= 0x18)
return 1;
switch ((cmd >> 24) & 0x1f) {
case 0x1c:
return 1;
case 0x1d:
switch ((cmd >> 16) & 0xff) {
case 0x3:
return (cmd & 0x1f) + 2;
case 0x4:
return (cmd & 0xf) + 2;
default:
return (cmd & 0xffff) + 2;
}
case 0x1e:
if (cmd & (1 << 23))
return (cmd & 0xffff) + 1;
else
return 1;
case 0x1f:
if ((cmd & (1 << 23)) == 0) /* inline vertices */
return (cmd & 0x1ffff) + 2;
else if (cmd & (1 << 17)) /* indirect random */
if ((cmd & 0xffff) == 0)
return 0; /* unknown length, too hard */
else
return (((cmd & 0xffff) + 1) / 2) + 1;
else
return 2; /* indirect sequential */
default:
return 0;
}
default:
return 0;
}
return 0;
}
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i, ret;
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
return -EINVAL;
for (i = 0; i < dwords;) {
int sz = validate_cmd(buffer[i]);
if (sz == 0 || i + sz > dwords)
return -EINVAL;
i += sz;
}
ret = BEGIN_LP_RING((dwords+1)&~1);
if (ret)
return ret;
for (i = 0; i < dwords; i++)
OUT_RING(buffer[i]);
if (dwords & 1)
OUT_RING(0);
ADVANCE_LP_RING();
return 0;
}
int
i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
ret = BEGIN_LP_RING(4);
if (ret)
return ret;
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
} else {
ret = BEGIN_LP_RING(6);
if (ret)
return ret;
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(DR1);
OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
OUT_RING(0);
}
ADVANCE_LP_RING();
return 0;
}
/* XXX: Emitting the counter should really be moved to part of the IRQ
* emit. For now, do it in both places:
*/
static void i915_emit_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
}
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
drm_i915_cmdbuffer_t *cmd,
struct drm_clip_rect *cliprects,
void *cmdbuf)
{
int nbox = cmd->num_cliprects;
int i = 0, count, ret;
if (cmd->sz & 0x3) {
DRM_ERROR("alignment");
return -EINVAL;
}
i915_kernel_lost_context(dev);
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
if (i < nbox) {
ret = i915_emit_box(dev, &cliprects[i],
cmd->DR1, cmd->DR4);
if (ret)
return ret;
}
ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
if (ret)
return ret;
}
i915_emit_breadcrumb(dev);
return 0;
}
static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
int i, count, ret;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
return -EINVAL;
}
i915_kernel_lost_context(dev);
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
if (i < nbox) {
ret = i915_emit_box(dev, &cliprects[i],
batch->DR1, batch->DR4);
if (ret)
return ret;
}
if (!IS_I830(dev) && !IS_845G(dev)) {
ret = BEGIN_LP_RING(2);
if (ret)
return ret;
if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
} else {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
} else {
ret = BEGIN_LP_RING(4);
if (ret)
return ret;
OUT_RING(MI_BATCH_BUFFER);
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
OUT_RING(batch->start + batch->used - 4);
OUT_RING(0);
}
ADVANCE_LP_RING();
}
if (IS_G4X(dev) || IS_GEN5(dev)) {
if (BEGIN_LP_RING(2) == 0) {
OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
}
i915_emit_breadcrumb(dev);
return 0;
}
static int i915_dispatch_flip(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
int ret;
if (!master_priv->sarea_priv)
return -EINVAL;
DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
__func__,
dev_priv->current_page,
master_priv->sarea_priv->pf_current_page);
i915_kernel_lost_context(dev);
ret = BEGIN_LP_RING(10);
if (ret)
return ret;
OUT_RING(MI_FLUSH | MI_READ_FLUSH);
OUT_RING(0);
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
if (dev_priv->current_page == 0) {
OUT_RING(dev_priv->back_offset);
dev_priv->current_page = 1;
} else {
OUT_RING(dev_priv->front_offset);
dev_priv->current_page = 0;
}
OUT_RING(0);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
OUT_RING(0);
ADVANCE_LP_RING();
master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
return 0;
}
static int i915_quiescent(struct drm_device *dev)
{
struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
i915_kernel_lost_context(dev);
return intel_wait_ring_idle(ring);
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
mutex_lock(&dev->struct_mutex);
ret = i915_quiescent(dev);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
drm_i915_batchbuffer_t *batch = data;
int ret;
struct drm_clip_rect *cliprects = NULL;
if (!dev_priv->allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (batch->num_cliprects < 0)
return -EINVAL;
if (batch->num_cliprects) {
cliprects = kcalloc(batch->num_cliprects,
sizeof(struct drm_clip_rect),
GFP_KERNEL);
if (cliprects == NULL)
return -ENOMEM;
ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
if (ret != 0) {
ret = -EFAULT;
goto fail_free;
}
}
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
mutex_unlock(&dev->struct_mutex);
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
fail_free:
kfree(cliprects);
return ret;
}
static int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
struct drm_clip_rect *cliprects = NULL;
void *batch_data;
int ret;
DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects < 0)
return -EINVAL;
batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
if (batch_data == NULL)
return -ENOMEM;
ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
if (ret != 0) {
ret = -EFAULT;
goto fail_batch_free;
}
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
sizeof(struct drm_clip_rect), GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto fail_batch_free;
}
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
if (ret != 0) {
ret = -EFAULT;
goto fail_clip_free;
}
}
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
goto fail_clip_free;
}
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
fail_clip_free:
kfree(cliprects);
fail_batch_free:
kfree(batch_data);
return ret;
}
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
DRM_DEBUG_DRIVER("%s\n", __func__);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_flip(dev);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_getparam_t *param = data;
int value;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
value = dev->pdev->irq ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->allow_batchbuffer ? 1 : 0;
break;
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
break;
case I915_PARAM_CHIPSET_ID:
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
value = dev_priv->has_gem;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
break;
case I915_PARAM_HAS_OVERLAY:
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_PAGEFLIPPING:
value = 1;
break;
case I915_PARAM_HAS_EXECBUF2:
/* depends on GEM */
value = dev_priv->has_gem;
break;
case I915_PARAM_HAS_BSD:
value = HAS_BSD(dev);
break;
case I915_PARAM_HAS_BLT:
value = HAS_BLT(dev);
break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
break;
case I915_PARAM_HAS_COHERENT_RINGS:
value = 1;
break;
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_INFO(dev)->gen >= 4;
break;
case I915_PARAM_HAS_RELAXED_DELTA:
value = 1;
break;
case I915_PARAM_HAS_GEN7_SOL_RESET:
value = 1;
break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev);
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
return -EINVAL;
}
if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("DRM_COPY_TO_USER failed\n");
return -EFAULT;
}
return 0;
}
static int i915_setparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_setparam_t *param = data;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
dev_priv->tex_lru_log_granularity = param->value;
break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
dev_priv->allow_batchbuffer = param->value;
break;
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
param->value < 0)
return -EINVAL;
/* Userspace can use first N regs */
dev_priv->fence_reg_start = param->value;
break;
default:
DRM_DEBUG_DRIVER("unknown parameter %d\n",
param->param);
return -EINVAL;
}
return 0;
}
static int i915_set_status_page(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
WARN(1, "tried to set status page when mode setting active\n");
return 0;
}
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024;
dev_priv->hws_map.type = 0;
dev_priv->hws_map.flags = 0;
dev_priv->hws_map.mtrr = 0;
drm_core_ioremap_wc(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
ring->status_page.page_addr =
(void __force __iomem *)dev_priv->hws_map.handle;
memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws at %p\n",
ring->status_page.page_addr);
return 0;
}
static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
DRM_ERROR("bridge device not found\n");
return -1;
}
return 0;
}
#define MCHBAR_I915 0x44
#define MCHBAR_I965 0x48
#define MCHBAR_SIZE (4*4096)
#define DEVEN_REG 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
if (INTEL_INFO(dev)->gen >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
if (mchbar_addr &&
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
return 0;
#endif
/* Get some space for it */
dev_priv->mch_res.name = "i915 MCHBAR";
dev_priv->mch_res.flags = IORESOURCE_MEM;
ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
&dev_priv->mch_res,
MCHBAR_SIZE, MCHBAR_SIZE,
PCIBIOS_MIN_MEM,
0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
return ret;
}
if (INTEL_INFO(dev)->gen >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
pci_write_config_dword(dev_priv->bridge_dev, reg,
lower_32_bits(dev_priv->mch_res.start));
return 0;
}
/* Setup MCHBAR if possible, return true if we should disable it again */
static void
intel_setup_mchbar(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
dev_priv->mchbar_need_disable = false;
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
enabled = temp & 1;
}
/* If it's already enabled, don't have to do anything */
if (enabled)
return;
if (intel_alloc_mchbar_resource(dev))
return;
dev_priv->mchbar_need_disable = true;
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
temp | DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
}
}
static void
intel_teardown_mchbar(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
temp &= ~DEVEN_MCHBAR_EN;
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
temp &= ~1;
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
}
}
if (dev_priv->mch_res.start)
release_resource(&dev_priv->mch_res);
}
#define PTE_ADDRESS_MASK 0xfffff000
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
#define PTE_MAPPING_TYPE_CACHED (3 << 1)
#define PTE_MAPPING_TYPE_MASK (3 << 1)
#define PTE_VALID (1 << 0)
/**
* i915_stolen_to_phys - take an offset into stolen memory and turn it into
* a physical one
* @dev: drm device
* @offset: address to translate
*
* Some chip functions require allocations from stolen space and need the
* physical address of the memory in question.
*/
static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev;
u32 base;
#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory
* is unreliable, so compute the base by subtracting the stolen memory
* from the Top of Low Usable DRAM which is where the BIOS places
* the graphics stolen memory.
*/
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
/* top 32bits are reserved = 0 */
pci_read_config_dword(pdev, 0xA4, &base);
} else {
/* XXX presume 8xx is the same as i915 */
pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
}
#else
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
u16 val;
pci_read_config_word(pdev, 0xb0, &val);
base = val >> 4 << 20;
} else {
u8 val;
pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
}
base -= dev_priv->mm.gtt->stolen_size;
#endif
return base + offset;
}
static void i915_warn_stolen(struct drm_device *dev)
{
DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
}
static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
unsigned long cfb_base;
unsigned long ll_base = 0;
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb)
goto err;
cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
if (!cfb_base)
goto err_fb;
if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0);
if (compressed_llb)
compressed_llb = drm_mm_get_block(compressed_llb,
4096, 4096);
if (!compressed_llb)
goto err_fb;
ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
if (!ll_base)
goto err_llb;
}
dev_priv->cfb_size = size;
dev_priv->compressed_fb = compressed_fb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
dev_priv->compressed_llb = compressed_llb;
}
DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
cfb_base, ll_base, size >> 20);
return;
err_llb:
drm_mm_put_block(compressed_llb);
err_fb:
drm_mm_put_block(compressed_fb);
err:
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
}
static void i915_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_mm_put_block(dev_priv->compressed_fb);
if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
}
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
struct drm_device *dev = cookie;
intel_modeset_vga_set_state(dev, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_INFO "i915: switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
printk(KERN_ERR "i915: switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
bool can_switch;
spin_lock(&dev->count_lock);
can_switch = (dev->open_count == 0);
spin_unlock(&dev->count_lock);
return can_switch;
}
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
if (i915_enable_ppgtt >= 0)
return i915_enable_ppgtt;
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
#endif
return true;
}
static int i915_load_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long prealloc_size, gtt_size, mappable_size;
int ret;
prealloc_size = dev_priv->mm.gtt->stolen_size;
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
mutex_lock(&dev->struct_mutex);
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
/* For paranoia keep the guard page in between. */
gtt_size -= PAGE_SIZE;
i915_gem_do_init(dev, 0, mappable_size, gtt_size);
ret = i915_gem_init_aliasing_ppgtt(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
} else {
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch
* page. There are a number of places where the hardware
* apparently prefetches past the end of the object, and we've
* seen multiple hangs with the GPU head pointer stuck in a
* batchbuffer bound at the last page of the aperture. One page
* should be enough to keep any prefetching inside of the
* aperture.
*/
i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
}
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret) {
i915_gem_cleanup_aliasing_ppgtt(dev);
return ret;
}
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
/* Leave 1M for line length buffer & misc. */
/* Try to get a 32M buffer... */
if (prealloc_size > (36*1024*1024))
cfb_size = 32*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
}
/* Allow hardware batchbuffers unless told otherwise. */
dev_priv->allow_batchbuffer = 1;
return 0;
}
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = intel_parse_bios(dev);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
/* If we have > 1 VGA cards, then we need to arbitrate access
* to the common VGA resources.
*
* If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
* then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV.
*/
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
if (ret && ret != -ENODEV)
goto out;
intel_register_dsm_handler();
ret = vga_switcheroo_register_client(dev->pdev,
i915_switcheroo_set_state,
NULL,
i915_switcheroo_can_switch);
if (ret)
goto cleanup_vga_client;
/* IIR "flip pending" bit means done if this bit is set */
if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
dev_priv->flip_pending_is_done = true;
intel_modeset_init(dev);
ret = i915_load_gem_init(dev);
if (ret)
goto cleanup_vga_switcheroo;
intel_modeset_gem_init(dev);
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
ret = intel_fbdev_init(dev);
if (ret)
goto cleanup_irq;
drm_kms_helper_poll_init(dev);
/* We're off and running w/KMS */
dev_priv->mm.suspended = 0;
return 0;
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_vga_switcheroo:
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
vga_client_register(dev->pdev, NULL, NULL, NULL);
out:
return ret;
}
int i915_master_create(struct drm_device *dev, struct drm_master *master)
{
struct drm_i915_master_private *master_priv;
master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
if (!master_priv)
return -ENOMEM;
master->driver_priv = master_priv;
return 0;
}
void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
{
struct drm_i915_master_private *master_priv = master->driver_priv;
if (!master_priv)
return;
kfree(master_priv);
master->driver_priv = NULL;
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 tmp;
tmp = I915_READ(CLKCFG);
switch (tmp & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_533:
dev_priv->fsb_freq = 533; /* 133*4 */
break;
case CLKCFG_FSB_800:
dev_priv->fsb_freq = 800; /* 200*4 */
break;
case CLKCFG_FSB_667:
dev_priv->fsb_freq = 667; /* 167*4 */
break;
case CLKCFG_FSB_400:
dev_priv->fsb_freq = 400; /* 100*4 */
break;
}
switch (tmp & CLKCFG_MEM_MASK) {
case CLKCFG_MEM_533:
dev_priv->mem_freq = 533;
break;
case CLKCFG_MEM_667:
dev_priv->mem_freq = 667;
break;
case CLKCFG_MEM_800:
dev_priv->mem_freq = 800;
break;
}
/* detect pineview DDR3 setting */
tmp = I915_READ(CSHRDDR3CTL);
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u16 ddrpll, csipll;
ddrpll = I915_READ16(DDRMPLL1);
csipll = I915_READ16(CSIPLL0);
switch (ddrpll & 0xff) {
case 0xc:
dev_priv->mem_freq = 800;
break;
case 0x10:
dev_priv->mem_freq = 1066;
break;
case 0x14:
dev_priv->mem_freq = 1333;
break;
case 0x18:
dev_priv->mem_freq = 1600;
break;
default:
DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
ddrpll & 0xff);
dev_priv->mem_freq = 0;
break;
}
dev_priv->r_t = dev_priv->mem_freq;
switch (csipll & 0x3ff) {
case 0x00c:
dev_priv->fsb_freq = 3200;
break;
case 0x00e:
dev_priv->fsb_freq = 3733;
break;
case 0x010:
dev_priv->fsb_freq = 4266;
break;
case 0x012:
dev_priv->fsb_freq = 4800;
break;
case 0x014:
dev_priv->fsb_freq = 5333;
break;
case 0x016:
dev_priv->fsb_freq = 5866;
break;
case 0x018:
dev_priv->fsb_freq = 6400;
break;
default:
DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
csipll & 0x3ff);
dev_priv->fsb_freq = 0;
break;
}
if (dev_priv->fsb_freq == 3200) {
dev_priv->c_m = 0;
} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
dev_priv->c_m = 1;
} else {
dev_priv->c_m = 2;
}
}
static const struct cparams {
u16 i;
u16 t;
u16 m;
u16 c;
} cparams[] = {
{ 1, 1333, 301, 28664 },
{ 1, 1066, 294, 24460 },
{ 1, 800, 294, 25192 },
{ 0, 1333, 276, 27605 },
{ 0, 1066, 276, 27605 },
{ 0, 800, 231, 23784 },
};
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
u64 total_count, diff, ret;
u32 count1, count2, count3, m = 0, c = 0;
unsigned long now = jiffies_to_msecs(jiffies), diff1;
int i;
diff1 = now - dev_priv->last_time1;
/* Prevent division-by-zero if we are asking too fast.
* Also, we don't get interesting results if we are polling
* faster than once in 10ms, so just return the saved value
* in such cases.
*/
if (diff1 <= 10)
return dev_priv->chipset_power;
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
count3 = I915_READ(CSIEC);
total_count = count1 + count2 + count3;
/* FIXME: handle per-counter overflow */
if (total_count < dev_priv->last_count1) {
diff = ~0UL - dev_priv->last_count1;
diff += total_count;
} else {
diff = total_count - dev_priv->last_count1;
}
for (i = 0; i < ARRAY_SIZE(cparams); i++) {
if (cparams[i].i == dev_priv->c_m &&
cparams[i].t == dev_priv->r_t) {
m = cparams[i].m;
c = cparams[i].c;
break;
}
}
diff = div_u64(diff, diff1);
ret = ((m * diff) + c);
ret = div_u64(ret, 10);
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
dev_priv->chipset_power = ret;
return ret;
}
unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
{
unsigned long m, x, b;
u32 tsfs;
tsfs = I915_READ(TSFS);
m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
x = I915_READ8(TR1);
b = tsfs & TSFS_INTR_MASK;
return ((m * x) / 127) - b;
}
static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
static const struct v_table {
u16 vd; /* in .1 mil */
u16 vm; /* in .1 mil */
} v_table[] = {
{ 0, 0, },
{ 375, 0, },
{ 500, 0, },
{ 625, 0, },
{ 750, 0, },
{ 875, 0, },
{ 1000, 0, },
{ 1125, 0, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4250, 3125, },
{ 4375, 3250, },
{ 4500, 3375, },
{ 4625, 3500, },
{ 4750, 3625, },
{ 4875, 3750, },
{ 5000, 3875, },
{ 5125, 4000, },
{ 5250, 4125, },
{ 5375, 4250, },
{ 5500, 4375, },
{ 5625, 4500, },
{ 5750, 4625, },
{ 5875, 4750, },
{ 6000, 4875, },
{ 6125, 5000, },
{ 6250, 5125, },
{ 6375, 5250, },
{ 6500, 5375, },
{ 6625, 5500, },
{ 6750, 5625, },
{ 6875, 5750, },
{ 7000, 5875, },
{ 7125, 6000, },
{ 7250, 6125, },
{ 7375, 6250, },
{ 7500, 6375, },
{ 7625, 6500, },
{ 7750, 6625, },
{ 7875, 6750, },
{ 8000, 6875, },
{ 8125, 7000, },
{ 8250, 7125, },
{ 8375, 7250, },
{ 8500, 7375, },
{ 8625, 7500, },
{ 8750, 7625, },
{ 8875, 7750, },
{ 9000, 7875, },
{ 9125, 8000, },
{ 9250, 8125, },
{ 9375, 8250, },
{ 9500, 8375, },
{ 9625, 8500, },
{ 9750, 8625, },
{ 9875, 8750, },
{ 10000, 8875, },
{ 10125, 9000, },
{ 10250, 9125, },
{ 10375, 9250, },
{ 10500, 9375, },
{ 10625, 9500, },
{ 10750, 9625, },
{ 10875, 9750, },
{ 11000, 9875, },
{ 11125, 10000, },
{ 11250, 10125, },
{ 11375, 10250, },
{ 11500, 10375, },
{ 11625, 10500, },
{ 11750, 10625, },
{ 11875, 10750, },
{ 12000, 10875, },
{ 12125, 11000, },
{ 12250, 11125, },
{ 12375, 11250, },
{ 12500, 11375, },
{ 12625, 11500, },
{ 12750, 11625, },
{ 12875, 11750, },
{ 13000, 11875, },
{ 13125, 12000, },
{ 13250, 12125, },
{ 13375, 12250, },
{ 13500, 12375, },
{ 13625, 12500, },
{ 13750, 12625, },
{ 13875, 12750, },
{ 14000, 12875, },
{ 14125, 13000, },
{ 14250, 13125, },
{ 14375, 13250, },
{ 14500, 13375, },
{ 14625, 13500, },
{ 14750, 13625, },
{ 14875, 13750, },
{ 15000, 13875, },
{ 15125, 14000, },
{ 15250, 14125, },
{ 15375, 14250, },
{ 15500, 14375, },
{ 15625, 14500, },
{ 15750, 14625, },
{ 15875, 14750, },
{ 16000, 14875, },
{ 16125, 15000, },
};
if (dev_priv->info->is_mobile)
return v_table[pxvid].vm;
else
return v_table[pxvid].vd;
}
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
struct timespec now, diff1;
u64 diff;
unsigned long diffms;
u32 count;
if (dev_priv->info->gen != 5)
return;
getrawmonotonic(&now);
diff1 = timespec_sub(now, dev_priv->last_time2);
/* Don't divide by 0 */
diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
if (!diffms)
return;
count = I915_READ(GFXEC);
if (count < dev_priv->last_count2) {
diff = ~0UL - dev_priv->last_count2;
diff += count;
} else {
diff = count - dev_priv->last_count2;
}
dev_priv->last_count2 = count;
dev_priv->last_time2 = now;
/* More magic constants... */
diff = diff * 1181;
diff = div_u64(diff, diffms * 10);
dev_priv->gfx_power = diff;
}
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long t, corr, state1, corr2, state2;
u32 pxvid, ext_v;
pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
state1 = ext_v;
t = i915_mch_val(dev_priv);
/* Revel in the empirically derived constants */
/* Correction factor in 1/100000 units */
if (t > 80)
corr = ((t * 2349) + 135940);
else if (t >= 50)
corr = ((t * 964) + 29317);
else /* < 50 */
corr = ((t * 301) + 1004);
corr = corr * ((150142 * state1) / 10000 - 78642);
corr /= 100000;
corr2 = (corr * dev_priv->corr);
state2 = (corr2 * state1) / 10000;
state2 /= 100; /* convert to mW */
i915_update_gfx_val(dev_priv);
return dev_priv->gfx_power + state2;
}
/* Global for IPS driver to get at the current i915 device */
static struct drm_i915_private *i915_mch_dev;
/*
* Lock protecting IPS related data structures
* - i915_mch_dev
* - dev_priv->max_delay
* - dev_priv->min_delay
* - dev_priv->fmax
* - dev_priv->gpu_busy
*/
static DEFINE_SPINLOCK(mchdev_lock);
/**
* i915_read_mch_val - return value for IPS use
*
* Calculate and return a value for the IPS driver to use when deciding whether
* we have thermal and power headroom to increase CPU or GPU power budget.
*/
unsigned long i915_read_mch_val(void)
{
struct drm_i915_private *dev_priv;
unsigned long chipset_val, graphics_val, ret = 0;
spin_lock(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
chipset_val = i915_chipset_val(dev_priv);
graphics_val = i915_gfx_val(dev_priv);
ret = chipset_val + graphics_val;
out_unlock:
spin_unlock(&mchdev_lock);
return ret;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);
/**
* i915_gpu_raise - raise GPU frequency limit
*
* Raise the limit; IPS indicates we have thermal headroom.
*/
bool i915_gpu_raise(void)
{
struct drm_i915_private *dev_priv;
bool ret = true;
spin_lock(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
if (dev_priv->max_delay > dev_priv->fmax)
dev_priv->max_delay--;
out_unlock:
spin_unlock(&mchdev_lock);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);
/**
* i915_gpu_lower - lower GPU frequency limit
*
* IPS indicates we're close to a thermal limit, so throttle back the GPU
* frequency maximum.
*/
bool i915_gpu_lower(void)
{
struct drm_i915_private *dev_priv;
bool ret = true;
spin_lock(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
if (dev_priv->max_delay < dev_priv->min_delay)
dev_priv->max_delay++;
out_unlock:
spin_unlock(&mchdev_lock);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);
/**
* i915_gpu_busy - indicate GPU business to IPS
*
* Tell the IPS driver whether or not the GPU is busy.
*/
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
bool ret = false;
spin_lock(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
ret = dev_priv->busy;
out_unlock:
spin_unlock(&mchdev_lock);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);
/**
* i915_gpu_turbo_disable - disable graphics turbo
*
* Disable graphics turbo by resetting the max frequency and setting the
* current frequency to the default.
*/
bool i915_gpu_turbo_disable(void)
{
struct drm_i915_private *dev_priv;
bool ret = true;
spin_lock(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
dev_priv->max_delay = dev_priv->fstart;
if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
ret = false;
out_unlock:
spin_unlock(&mchdev_lock);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
/**
* Tells the intel_ips driver that the i915 driver is now loaded, if
* IPS got loaded first.
*
* This awkward dance is so that neither module has to depend on the
* other in order for IPS to do the appropriate communication of
* GPU turbo limits to i915.
*/
static void
ips_ping_for_i915_load(void)
{
void (*link)(void);
link = symbol_get(ips_link_to_i915_driver);
if (link) {
link();
symbol_put(ips_link_to_i915_driver);
}
}
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
*
* The driver load routine has to do several things:
* - drive output discovery via intel_modeset_init()
* - initialize the memory manager
* - allocate initial config memory
* - setup the DRM framebuffer with the allocated memory
*/
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
int ret = 0, mmio_bar;
uint32_t agp_size;
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
dev_priv->info = (struct intel_device_info *) flags;
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
}
pci_set_master(dev->pdev);
/* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
/* 965GM sometimes incorrectly writes to hardware status page (HWS)
* using 32bit addressing, overwriting memory if HWS is located
* above 4GB.
*
* The documentation also mentions an issue with undefined
* behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully.
*/
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
mmio_bar = IS_GEN2(dev) ? 1 : 0;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
goto put_bridge;
}
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
goto out_rmmap;
}
agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_mapping =
io_mapping_create_wc(dev->agp->base, agp_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
}
/* Set up a WC MTRR for non-PAT systems. This is more common than
* one would think, because the kernel disables PAT on first
* generation Core chips because WC PAT gets overridden by a UC
* MTRR if present. Even if a UC MTRR isn't present.
*/
dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
agp_size,
MTRR_TYPE_WRCOMB, 1);
if (dev_priv->mm.gtt_mtrr < 0) {
DRM_INFO("MTRR allocation failed. Graphics "
"performance may suffer.\n");
}
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we
* need high-priority retirement, such as waiting for an explicit
* bo.
*
* It is also used for periodic low-priority events, such as
* idle-timers and recording error state.
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
* workqueue at any time: max_active = 1 and NON_REENTRANT.
*/
dev_priv->wq = alloc_workqueue("i915",
WQ_UNBOUND | WQ_NON_REENTRANT,
1);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
goto out_mtrrfree;
}
/* enable GEM by default */
dev_priv->has_gem = 1;
intel_irq_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
intel_setup_gmbus(dev);
intel_opregion_setup(dev);
/* Make sure the bios did its job and set up vital registers */
intel_setup_bios(dev);
i915_gem_load(dev);
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
if (ret)
goto out_gem_unload;
}
if (IS_PINEVIEW(dev))
i915_pineview_get_mem_freq(dev);
else if (IS_GEN5(dev))
i915_ironlake_get_mem_freq(dev);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
* correctly in testing on 945G.
* This may be a side effect of MSI having been made available for PEG
* and the registers being closely associated.
*
* According to chipset errata, on the 965GM, MSI interrupts may
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
spin_lock_init(&dev_priv->gt_lock);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
if (IS_IVYBRIDGE(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
else
dev_priv->num_pipe = 1;
ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
goto out_gem_unload;
/* Start out suspended */
dev_priv->mm.suspended = 1;
intel_detect_pch(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_gem_unload;
}
}
/* Must be done after probing outputs */
intel_opregion_init(dev);
acpi_video_register();
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
if (IS_GEN5(dev)) {
spin_lock(&mchdev_lock);
i915_mch_dev = dev_priv;
dev_priv->mchdev_lock = &mchdev_lock;
spin_unlock(&mchdev_lock);
ips_ping_for_i915_load();
}
return 0;
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.shrink)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
dev->agp->agp_info.aper_size * 1024 * 1024);
dev_priv->mm.gtt_mtrr = -1;
}
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
kfree(dev_priv);
return ret;
}
int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
spin_lock(&mchdev_lock);
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
if (dev_priv->mm.inactive_shrinker.shrink)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
ret = i915_gpu_idle(dev, true);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
dev->agp->agp_info.aper_size * 1024 * 1024);
dev_priv->mm.gtt_mtrr = -1;
}
acpi_video_unregister();
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
/*
* free the memory space allocated for the child device
* config parsed from VBT
*/
if (dev_priv->child_dev && dev_priv->child_dev_num) {
kfree(dev_priv->child_dev);
dev_priv->child_dev = NULL;
dev_priv->child_dev_num = 0;
}
vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
/* Free error state after interrupts are fully disabled. */
del_timer_sync(&dev_priv->hangcheck_timer);
cancel_work_sync(&dev_priv->error_work);
i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
intel_opregion_fini(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
mutex_lock(&dev->struct_mutex);
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
if (!I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
}
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
return 0;
}
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
DRM_DEBUG_DRIVER("\n");
file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;
file->driver_priv = file_priv;
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
return 0;
}
/**
* i915_driver_lastclose - clean up after all DRM clients have exited
* @dev: DRM device
*
* Take care of cleaning up after all DRM clients have exited. In the
* mode setting case, we want to restore the kernel's initial mode (just
* in case the last client left us in a bad state).
*
* Additionally, in the non-mode setting case, we'll tear down the AGP
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
void i915_driver_lastclose(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fb_restore_mode(dev);
vga_switcheroo_process_delayed_switch();
return;
}
i915_gem_lastclose(dev);
i915_dma_cleanup(dev);
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
i915_gem_release(dev, file_priv);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
kfree(file_priv);
}
struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
/**
* Determine if the device really is AGP or not.
*
* All Intel graphics chipsets are treated as AGP, even if they are really
* PCI-e.
*
* \param dev The device to be tested.
*
* \returns
* A value of 1 is always retured to indictate every i9x5 is AGP.
*/
int i915_driver_device_is_agp(struct drm_device * dev)
{
return 1;
}
| gpl-2.0 |
MiCode/mi1_kernel | arch/um/drivers/hostaudio_kern.c | 7983 | 7805 | /*
* Copyright (C) 2002 Steve Schmidtke
* Licensed under the GPL
*/
#include "linux/fs.h"
#include "linux/module.h"
#include "linux/slab.h"
#include "linux/sound.h"
#include "linux/soundcard.h"
#include "linux/mutex.h"
#include "asm/uaccess.h"
#include "init.h"
#include "os.h"
struct hostaudio_state {
int fd;
};
struct hostmixer_state {
int fd;
};
#define HOSTAUDIO_DEV_DSP "/dev/sound/dsp"
#define HOSTAUDIO_DEV_MIXER "/dev/sound/mixer"
/*
* Changed either at boot time or module load time. At boot, this is
* single-threaded; at module load, multiple modules would each have
* their own copy of these variables.
*/
static char *dsp = HOSTAUDIO_DEV_DSP;
static char *mixer = HOSTAUDIO_DEV_MIXER;
#define DSP_HELP \
" This is used to specify the host dsp device to the hostaudio driver.\n" \
" The default is \"" HOSTAUDIO_DEV_DSP "\".\n\n"
#define MIXER_HELP \
" This is used to specify the host mixer device to the hostaudio driver.\n"\
" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n"
module_param(dsp, charp, 0644);
MODULE_PARM_DESC(dsp, DSP_HELP);
module_param(mixer, charp, 0644);
MODULE_PARM_DESC(mixer, MIXER_HELP);
#ifndef MODULE
static int set_dsp(char *name, int *add)
{
dsp = name;
return 0;
}
__uml_setup("dsp=", set_dsp, "dsp=<dsp device>\n" DSP_HELP);
static int set_mixer(char *name, int *add)
{
mixer = name;
return 0;
}
__uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP);
#endif
static DEFINE_MUTEX(hostaudio_mutex);
/* /dev/dsp file operations */
static ssize_t hostaudio_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct hostaudio_state *state = file->private_data;
void *kbuf;
int err;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: read called, count = %d\n", count);
#endif
kbuf = kmalloc(count, GFP_KERNEL);
if (kbuf == NULL)
return -ENOMEM;
err = os_read_file(state->fd, kbuf, count);
if (err < 0)
goto out;
if (copy_to_user(buffer, kbuf, err))
err = -EFAULT;
out:
kfree(kbuf);
return err;
}
static ssize_t hostaudio_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct hostaudio_state *state = file->private_data;
void *kbuf;
int err;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: write called, count = %d\n", count);
#endif
kbuf = kmalloc(count, GFP_KERNEL);
if (kbuf == NULL)
return -ENOMEM;
err = -EFAULT;
if (copy_from_user(kbuf, buffer, count))
goto out;
err = os_write_file(state->fd, kbuf, count);
if (err < 0)
goto out;
*ppos += err;
out:
kfree(kbuf);
return err;
}
static unsigned int hostaudio_poll(struct file *file,
struct poll_table_struct *wait)
{
unsigned int mask = 0;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: poll called (unimplemented)\n");
#endif
return mask;
}
static long hostaudio_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct hostaudio_state *state = file->private_data;
unsigned long data = 0;
int err;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: ioctl called, cmd = %u\n", cmd);
#endif
switch(cmd){
case SNDCTL_DSP_SPEED:
case SNDCTL_DSP_STEREO:
case SNDCTL_DSP_GETBLKSIZE:
case SNDCTL_DSP_CHANNELS:
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
if (get_user(data, (int __user *) arg))
return -EFAULT;
break;
default:
break;
}
err = os_ioctl_generic(state->fd, cmd, (unsigned long) &data);
switch(cmd){
case SNDCTL_DSP_SPEED:
case SNDCTL_DSP_STEREO:
case SNDCTL_DSP_GETBLKSIZE:
case SNDCTL_DSP_CHANNELS:
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
if (put_user(data, (int __user *) arg))
return -EFAULT;
break;
default:
break;
}
return err;
}
static int hostaudio_open(struct inode *inode, struct file *file)
{
struct hostaudio_state *state;
int r = 0, w = 0;
int ret;
#ifdef DEBUG
kparam_block_sysfs_write(dsp);
printk(KERN_DEBUG "hostaudio: open called (host: %s)\n", dsp);
kparam_unblock_sysfs_write(dsp);
#endif
state = kmalloc(sizeof(struct hostaudio_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
if (file->f_mode & FMODE_READ)
r = 1;
if (file->f_mode & FMODE_WRITE)
w = 1;
kparam_block_sysfs_write(dsp);
mutex_lock(&hostaudio_mutex);
ret = os_open_file(dsp, of_set_rw(OPENFLAGS(), r, w), 0);
mutex_unlock(&hostaudio_mutex);
kparam_unblock_sysfs_write(dsp);
if (ret < 0) {
kfree(state);
return ret;
}
state->fd = ret;
file->private_data = state;
return 0;
}
static int hostaudio_release(struct inode *inode, struct file *file)
{
struct hostaudio_state *state = file->private_data;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: release called\n");
#endif
os_close_file(state->fd);
kfree(state);
return 0;
}
/* /dev/mixer file operations */
static long hostmixer_ioctl_mixdev(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct hostmixer_state *state = file->private_data;
#ifdef DEBUG
printk(KERN_DEBUG "hostmixer: ioctl called\n");
#endif
return os_ioctl_generic(state->fd, cmd, arg);
}
static int hostmixer_open_mixdev(struct inode *inode, struct file *file)
{
struct hostmixer_state *state;
int r = 0, w = 0;
int ret;
#ifdef DEBUG
printk(KERN_DEBUG "hostmixer: open called (host: %s)\n", mixer);
#endif
state = kmalloc(sizeof(struct hostmixer_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
if (file->f_mode & FMODE_READ)
r = 1;
if (file->f_mode & FMODE_WRITE)
w = 1;
kparam_block_sysfs_write(mixer);
mutex_lock(&hostaudio_mutex);
ret = os_open_file(mixer, of_set_rw(OPENFLAGS(), r, w), 0);
mutex_unlock(&hostaudio_mutex);
kparam_unblock_sysfs_write(mixer);
if (ret < 0) {
kparam_block_sysfs_write(dsp);
printk(KERN_ERR "hostaudio_open_mixdev failed to open '%s', "
"err = %d\n", dsp, -ret);
kparam_unblock_sysfs_write(dsp);
kfree(state);
return ret;
}
file->private_data = state;
return 0;
}
static int hostmixer_release(struct inode *inode, struct file *file)
{
struct hostmixer_state *state = file->private_data;
#ifdef DEBUG
printk(KERN_DEBUG "hostmixer: release called\n");
#endif
os_close_file(state->fd);
kfree(state);
return 0;
}
/* kernel module operations */
static const struct file_operations hostaudio_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = hostaudio_read,
.write = hostaudio_write,
.poll = hostaudio_poll,
.unlocked_ioctl = hostaudio_ioctl,
.mmap = NULL,
.open = hostaudio_open,
.release = hostaudio_release,
};
static const struct file_operations hostmixer_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = hostmixer_ioctl_mixdev,
.open = hostmixer_open_mixdev,
.release = hostmixer_release,
};
struct {
int dev_audio;
int dev_mixer;
} module_data;
MODULE_AUTHOR("Steve Schmidtke");
MODULE_DESCRIPTION("UML Audio Relay");
MODULE_LICENSE("GPL");
static int __init hostaudio_init_module(void)
{
__kernel_param_lock();
printk(KERN_INFO "UML Audio Relay (host dsp = %s, host mixer = %s)\n",
dsp, mixer);
__kernel_param_unlock();
module_data.dev_audio = register_sound_dsp(&hostaudio_fops, -1);
if (module_data.dev_audio < 0) {
printk(KERN_ERR "hostaudio: couldn't register DSP device!\n");
return -ENODEV;
}
module_data.dev_mixer = register_sound_mixer(&hostmixer_fops, -1);
if (module_data.dev_mixer < 0) {
printk(KERN_ERR "hostmixer: couldn't register mixer "
"device!\n");
unregister_sound_dsp(module_data.dev_audio);
return -ENODEV;
}
return 0;
}
static void __exit hostaudio_cleanup_module (void)
{
unregister_sound_mixer(module_data.dev_mixer);
unregister_sound_dsp(module_data.dev_audio);
}
module_init(hostaudio_init_module);
module_exit(hostaudio_cleanup_module);
| gpl-2.0 |
Kra1o5/android_kernel_huawei_u8815-gb | drivers/hid/hid-a4tech.c | 8239 | 3731 | /*
* HID driver for some a4tech "special" devices
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2007 Paul Walmsley
* Copyright (c) 2008 Jiri Slaby
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "hid-ids.h"
#define A4_2WHEEL_MOUSE_HACK_7 0x01
#define A4_2WHEEL_MOUSE_HACK_B8 0x02
struct a4tech_sc {
unsigned long quirks;
unsigned int hw_wheel;
__s32 delayed_value;
};
static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
if (usage->type == EV_REL && usage->code == REL_WHEEL)
set_bit(REL_HWHEEL, *bit);
if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007)
return -1;
return 0;
}
static int a4_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
struct input_dev *input;
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
!usage->type)
return 0;
input = field->hidinput->input;
if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) {
if (usage->type == EV_REL && usage->code == REL_WHEEL) {
a4->delayed_value = value;
return 1;
}
if (usage->hid == 0x000100b8) {
input_event(input, EV_REL, value ? REL_HWHEEL :
REL_WHEEL, a4->delayed_value);
return 1;
}
}
if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) {
a4->hw_wheel = !!value;
return 1;
}
if (usage->code == REL_WHEEL && a4->hw_wheel) {
input_event(input, usage->type, REL_HWHEEL, value);
return 1;
}
return 0;
}
static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct a4tech_sc *a4;
int ret;
a4 = kzalloc(sizeof(*a4), GFP_KERNEL);
if (a4 == NULL) {
hid_err(hdev, "can't alloc device descriptor\n");
ret = -ENOMEM;
goto err_free;
}
a4->quirks = id->driver_data;
hid_set_drvdata(hdev, a4);
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
goto err_free;
}
return 0;
err_free:
kfree(a4);
return ret;
}
static void a4_remove(struct hid_device *hdev)
{
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
hid_hw_stop(hdev);
kfree(a4);
}
static const struct hid_device_id a4_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU),
.driver_data = A4_2WHEEL_MOUSE_HACK_7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D),
.driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649),
.driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
{ }
};
MODULE_DEVICE_TABLE(hid, a4_devices);
static struct hid_driver a4_driver = {
.name = "a4tech",
.id_table = a4_devices,
.input_mapped = a4_input_mapped,
.event = a4_event,
.probe = a4_probe,
.remove = a4_remove,
};
static int __init a4_init(void)
{
return hid_register_driver(&a4_driver);
}
static void __exit a4_exit(void)
{
hid_unregister_driver(&a4_driver);
}
module_init(a4_init);
module_exit(a4_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.